[llvm] [WIP][DAG] Add legalization handling for ABDS/ABDU (PR #92576)
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Tue Jun 4 03:59:48 PDT 2024
https://github.com/RKSimon updated https://github.com/llvm/llvm-project/pull/92576
>From d6b75401c6ae99bdbf98648260e840490266d082 Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Fri, 17 May 2024 17:47:58 +0100
Subject: [PATCH] [WIP][DAG] Add legalization handling for ABDS/ABDU
Still WIP, but I wanted to get some visibility to other teams.
Always match ABD patterns pre-legalization, and use TargetLowering::expandABD to expand again during legalization.
---
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 14 +-
.../SelectionDAG/LegalizeIntegerTypes.cpp | 9 +
llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h | 1 +
.../SelectionDAG/LegalizeVectorTypes.cpp | 6 +
.../lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 2 +
.../CodeGen/SelectionDAG/TargetLowering.cpp | 18 +-
llvm/test/CodeGen/AArch64/arm64-csel.ll | 4 +-
llvm/test/CodeGen/AArch64/arm64-vabs.ll | 31 +-
llvm/test/CodeGen/AArch64/neon-abd.ll | 77 +-
llvm/test/CodeGen/AArch64/sve-aba.ll | 37 +-
llvm/test/CodeGen/AArch64/sve-abd.ll | 29 +-
llvm/test/CodeGen/AMDGPU/sad.ll | 6 +-
llvm/test/CodeGen/ARM/iabs.ll | 2 +-
llvm/test/CodeGen/ARM/neon_vabs.ll | 10 +-
llvm/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll | 1314 ++++++++---------
llvm/test/CodeGen/PowerPC/vec-zext-abdu.ll | 24 +-
llvm/test/CodeGen/RISCV/rvv/abd.ll | 20 +-
llvm/test/CodeGen/Thumb2/mve-vabdus.ll | 91 +-
llvm/test/CodeGen/X86/abds.ll | 99 +-
llvm/test/CodeGen/X86/abdu.ll | 99 +-
20 files changed, 909 insertions(+), 984 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 5148b7258257f..bc57377f924df 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -4140,13 +4140,13 @@ SDValue DAGCombiner::visitSUB(SDNode *N) {
}
// smax(a,b) - smin(a,b) --> abds(a,b)
- if (hasOperation(ISD::ABDS, VT) &&
+ if ((!LegalOperations || hasOperation(ISD::ABDS, VT)) &&
sd_match(N0, m_SMax(m_Value(A), m_Value(B))) &&
sd_match(N1, m_SMin(m_Specific(A), m_Specific(B))))
return DAG.getNode(ISD::ABDS, DL, VT, A, B);
// umax(a,b) - umin(a,b) --> abdu(a,b)
- if (hasOperation(ISD::ABDU, VT) &&
+ if ((!LegalOperations || hasOperation(ISD::ABDU, VT)) &&
sd_match(N0, m_UMax(m_Value(A), m_Value(B))) &&
sd_match(N1, m_UMin(m_Specific(A), m_Specific(B))))
return DAG.getNode(ISD::ABDU, DL, VT, A, B);
@@ -10942,7 +10942,8 @@ SDValue DAGCombiner::foldABSToABD(SDNode *N, const SDLoc &DL) {
(Opc0 != ISD::ZERO_EXTEND && Opc0 != ISD::SIGN_EXTEND &&
Opc0 != ISD::SIGN_EXTEND_INREG)) {
// fold (abs (sub nsw x, y)) -> abds(x, y)
- if (AbsOp1->getFlags().hasNoSignedWrap() && hasOperation(ISD::ABDS, VT) &&
+ if (AbsOp1->getFlags().hasNoSignedWrap() &&
+ (!LegalOperations || hasOperation(ISD::ABDS, VT)) &&
TLI.preferABDSToABSWithNSW(VT)) {
SDValue ABD = DAG.getNode(ISD::ABDS, DL, VT, Op0, Op1);
return DAG.getZExtOrTrunc(ABD, DL, SrcVT);
@@ -10964,7 +10965,8 @@ SDValue DAGCombiner::foldABSToABD(SDNode *N, const SDLoc &DL) {
// fold abs(zext(x) - zext(y)) -> zext(abdu(x, y))
EVT MaxVT = VT0.bitsGT(VT1) ? VT0 : VT1;
if ((VT0 == MaxVT || Op0->hasOneUse()) &&
- (VT1 == MaxVT || Op1->hasOneUse()) && hasOperation(ABDOpcode, MaxVT)) {
+ (VT1 == MaxVT || Op1->hasOneUse()) &&
+ (!LegalOperations || hasOperation(ABDOpcode, MaxVT))) {
SDValue ABD = DAG.getNode(ABDOpcode, DL, MaxVT,
DAG.getNode(ISD::TRUNCATE, DL, MaxVT, Op0),
DAG.getNode(ISD::TRUNCATE, DL, MaxVT, Op1));
@@ -10974,7 +10976,7 @@ SDValue DAGCombiner::foldABSToABD(SDNode *N, const SDLoc &DL) {
// fold abs(sext(x) - sext(y)) -> abds(sext(x), sext(y))
// fold abs(zext(x) - zext(y)) -> abdu(zext(x), zext(y))
- if (hasOperation(ABDOpcode, VT)) {
+ if (!LegalOperations || hasOperation(ABDOpcode, VT)) {
SDValue ABD = DAG.getNode(ABDOpcode, DL, VT, Op0, Op1);
return DAG.getZExtOrTrunc(ABD, DL, SrcVT);
}
@@ -12346,7 +12348,7 @@ SDValue DAGCombiner::visitVSELECT(SDNode *N) {
N1.getOperand(1) == N2.getOperand(0)) {
bool IsSigned = isSignedIntSetCC(CC);
unsigned ABDOpc = IsSigned ? ISD::ABDS : ISD::ABDU;
- if (hasOperation(ABDOpc, VT)) {
+ if (!LegalOperations || hasOperation(ABDOpc, VT)) {
switch (CC) {
case ISD::SETGT:
case ISD::SETGE:
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 12f1d005249d6..d5f5f6d6eee61 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -188,6 +188,7 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::VP_SUB:
case ISD::VP_MUL: Res = PromoteIntRes_SimpleIntBinOp(N); break;
+ case ISD::ABDS:
case ISD::VP_SMIN:
case ISD::VP_SMAX:
case ISD::SDIV:
@@ -195,6 +196,7 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::VP_SDIV:
case ISD::VP_SREM: Res = PromoteIntRes_SExtIntBinOp(N); break;
+ case ISD::ABDU:
case ISD::VP_UMIN:
case ISD::VP_UMAX:
case ISD::UDIV:
@@ -2703,6 +2705,8 @@ void DAGTypeLegalizer::ExpandIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::PARITY: ExpandIntRes_PARITY(N, Lo, Hi); break;
case ISD::Constant: ExpandIntRes_Constant(N, Lo, Hi); break;
case ISD::ABS: ExpandIntRes_ABS(N, Lo, Hi); break;
+ case ISD::ABDS:
+ case ISD::ABDU: ExpandIntRes_ABD(N, Lo, Hi); break;
case ISD::CTLZ_ZERO_UNDEF:
case ISD::CTLZ: ExpandIntRes_CTLZ(N, Lo, Hi); break;
case ISD::CTPOP: ExpandIntRes_CTPOP(N, Lo, Hi); break;
@@ -3749,6 +3753,11 @@ void DAGTypeLegalizer::ExpandIntRes_CTLZ(SDNode *N,
Hi = DAG.getConstant(0, dl, NVT);
}
+void DAGTypeLegalizer::ExpandIntRes_ABD(SDNode *N, SDValue &Lo, SDValue &Hi) {
+ SDValue Result = TLI.expandABD(N, DAG);
+ SplitInteger(Result, Lo, Hi);
+}
+
void DAGTypeLegalizer::ExpandIntRes_CTPOP(SDNode *N,
SDValue &Lo, SDValue &Hi) {
SDLoc dl(N);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index bec9cb49b5864..081a07cc402cd 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -443,6 +443,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
void ExpandIntRes_AssertZext (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_Constant (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_ABS (SDNode *N, SDValue &Lo, SDValue &Hi);
+ void ExpandIntRes_ABD (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_CTLZ (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_CTPOP (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_CTTZ (SDNode *N, SDValue &Lo, SDValue &Hi);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 361416edb554c..32c962b9259bb 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -136,6 +136,8 @@ void DAGTypeLegalizer::ScalarizeVectorResult(SDNode *N, unsigned ResNo) {
case ISD::FMINIMUM:
case ISD::FMAXIMUM:
case ISD::FLDEXP:
+ case ISD::ABDS:
+ case ISD::ABDU:
case ISD::SMIN:
case ISD::SMAX:
case ISD::UMIN:
@@ -1171,6 +1173,8 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
case ISD::MUL: case ISD::VP_MUL:
case ISD::MULHS:
case ISD::MULHU:
+ case ISD::ABDS:
+ case ISD::ABDU:
case ISD::FADD: case ISD::VP_FADD:
case ISD::FSUB: case ISD::VP_FSUB:
case ISD::FMUL: case ISD::VP_FMUL:
@@ -4235,6 +4239,8 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
case ISD::MUL: case ISD::VP_MUL:
case ISD::MULHS:
case ISD::MULHU:
+ case ISD::ABDS:
+ case ISD::ABDU:
case ISD::OR: case ISD::VP_OR:
case ISD::SUB: case ISD::VP_SUB:
case ISD::XOR: case ISD::VP_XOR:
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 414c724b94f7b..f7db5d8d937f2 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -6940,6 +6940,8 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
assert(VT.isInteger() && "This operator does not apply to FP types!");
assert(N1.getValueType() == N2.getValueType() &&
N1.getValueType() == VT && "Binary operator types must match!");
+ if (VT.isVector() && VT.getVectorElementType() == MVT::i1)
+ return getNode(ISD::XOR, DL, VT, N1, N2);
break;
case ISD::SMIN:
case ISD::UMAX:
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index f856c8a51984e..50cdbf6c5e804 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -9226,6 +9226,15 @@ SDValue TargetLowering::expandABD(SDNode *N, SelectionDAG &DAG) const {
DAG.getNode(ISD::USUBSAT, dl, VT, LHS, RHS),
DAG.getNode(ISD::USUBSAT, dl, VT, RHS, LHS));
+ // If the subtract doesn't overflow then just use abs(sub())
+ // NOTE: don't use frozen operands for value tracking.
+ if (DAG.willNotOverflowSub(IsSigned, N->getOperand(0), N->getOperand(1)))
+ return DAG.getNode(ISD::ABS, dl, VT,
+ DAG.getNode(ISD::SUB, dl, VT, LHS, RHS));
+ if (DAG.willNotOverflowSub(IsSigned, N->getOperand(1), N->getOperand(0)))
+ return DAG.getNode(ISD::ABS, dl, VT,
+ DAG.getNode(ISD::SUB, dl, VT, RHS, LHS));
+
EVT CCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
ISD::CondCode CC = IsSigned ? ISD::CondCode::SETGT : ISD::CondCode::SETUGT;
SDValue Cmp = DAG.getSetCC(dl, CCVT, LHS, RHS, CC);
@@ -9239,10 +9248,15 @@ SDValue TargetLowering::expandABD(SDNode *N, SelectionDAG &DAG) const {
return DAG.getNode(ISD::SUB, dl, VT, Cmp, Xor);
}
+ // FIXME: Should really try to split the vector in case it's legal on a
+ // subvector.
+ if (VT.isVector() && !isOperationLegalOrCustom(ISD::VSELECT, VT))
+ return DAG.UnrollVectorOp(N);
+
// abds(lhs, rhs) -> select(sgt(lhs,rhs), sub(lhs,rhs), sub(rhs,lhs))
// abdu(lhs, rhs) -> select(ugt(lhs,rhs), sub(lhs,rhs), sub(rhs,lhs))
- return DAG.getSelect(dl, VT, Cmp, DAG.getNode(ISD::SUB, dl, VT, LHS, RHS),
- DAG.getNode(ISD::SUB, dl, VT, RHS, LHS));
+ SDValue Diff = DAG.getNode(ISD::SUB, dl, VT, LHS, RHS);
+ return DAG.getSelect(dl, VT, Cmp, Diff, DAG.getNegative(Diff, dl, VT));
}
SDValue TargetLowering::expandBSWAP(SDNode *N, SelectionDAG &DAG) const {
diff --git a/llvm/test/CodeGen/AArch64/arm64-csel.ll b/llvm/test/CodeGen/AArch64/arm64-csel.ll
index 1cf99d1b31a8b..9b78934b580bc 100644
--- a/llvm/test/CodeGen/AArch64/arm64-csel.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-csel.ll
@@ -65,7 +65,7 @@ define i32 at foo5(i32 %a, i32 %b) nounwind ssp {
; CHECK-LABEL: foo5:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: subs w8, w0, w1
-; CHECK-NEXT: cneg w0, w8, mi
+; CHECK-NEXT: cneg w0, w8, le
; CHECK-NEXT: ret
entry:
%sub = sub nsw i32 %a, %b
@@ -98,7 +98,7 @@ define i32 @foo7(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: foo7:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: subs w8, w0, w1
-; CHECK-NEXT: cneg w9, w8, mi
+; CHECK-NEXT: cneg w9, w8, le
; CHECK-NEXT: cmn w8, #1
; CHECK-NEXT: csel w10, w9, w0, lt
; CHECK-NEXT: cmp w8, #0
diff --git a/llvm/test/CodeGen/AArch64/arm64-vabs.ll b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
index 178c229d04e47..48afcc5c3dd2b 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vabs.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
@@ -1799,30 +1799,13 @@ define <2 x i64> @uabd_i32(<2 x i32> %a, <2 x i32> %b) {
define <2 x i128> @uabd_i64(<2 x i64> %a, <2 x i64> %b) {
; CHECK-LABEL: uabd_i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov.d x8, v0[1]
-; CHECK-NEXT: mov.d x9, v1[1]
-; CHECK-NEXT: fmov x10, d0
-; CHECK-NEXT: fmov x11, d1
-; CHECK-NEXT: asr x12, x10, #63
-; CHECK-NEXT: asr x13, x11, #63
-; CHECK-NEXT: subs x10, x10, x11
-; CHECK-NEXT: asr x11, x8, #63
-; CHECK-NEXT: asr x14, x9, #63
-; CHECK-NEXT: sbc x12, x12, x13
-; CHECK-NEXT: subs x8, x8, x9
-; CHECK-NEXT: sbc x9, x11, x14
-; CHECK-NEXT: asr x13, x12, #63
-; CHECK-NEXT: asr x11, x9, #63
-; CHECK-NEXT: eor x10, x10, x13
-; CHECK-NEXT: eor x8, x8, x11
-; CHECK-NEXT: eor x9, x9, x11
-; CHECK-NEXT: subs x2, x8, x11
-; CHECK-NEXT: eor x8, x12, x13
-; CHECK-NEXT: sbc x3, x9, x11
-; CHECK-NEXT: subs x9, x10, x13
-; CHECK-NEXT: fmov d0, x9
-; CHECK-NEXT: sbc x1, x8, x13
-; CHECK-NEXT: mov.d v0[1], x1
+; CHECK-NEXT: cmgt.2d v2, v0, v1
+; CHECK-NEXT: sub.2d v0, v0, v1
+; CHECK-NEXT: mov x1, xzr
+; CHECK-NEXT: mov x3, xzr
+; CHECK-NEXT: eor.16b v0, v0, v2
+; CHECK-NEXT: sub.2d v0, v2, v0
+; CHECK-NEXT: mov.d x2, v0[1]
; CHECK-NEXT: fmov x0, d0
; CHECK-NEXT: ret
%aext = sext <2 x i64> %a to <2 x i128>
diff --git a/llvm/test/CodeGen/AArch64/neon-abd.ll b/llvm/test/CodeGen/AArch64/neon-abd.ll
index 901cb8adc23f0..fa3d6e4d1e556 100644
--- a/llvm/test/CodeGen/AArch64/neon-abd.ll
+++ b/llvm/test/CodeGen/AArch64/neon-abd.ll
@@ -49,11 +49,12 @@ define <4 x i16> @sabd_4h(<4 x i16> %a, <4 x i16> %b) #0 {
define <4 x i16> @sabd_4h_promoted_ops(<4 x i8> %a, <4 x i8> %b) #0 {
; CHECK-LABEL: sabd_4h_promoted_ops:
; CHECK: // %bb.0:
-; CHECK-NEXT: shl v0.4h, v0.4h, #8
; CHECK-NEXT: shl v1.4h, v1.4h, #8
-; CHECK-NEXT: sshr v0.4h, v0.4h, #8
+; CHECK-NEXT: shl v0.4h, v0.4h, #8
; CHECK-NEXT: sshr v1.4h, v1.4h, #8
+; CHECK-NEXT: sshr v0.4h, v0.4h, #8
; CHECK-NEXT: sabd v0.4h, v0.4h, v1.4h
+; CHECK-NEXT: bic v0.4h, #255, lsl #8
; CHECK-NEXT: ret
%a.sext = sext <4 x i8> %a to <4 x i16>
%b.sext = sext <4 x i8> %b to <4 x i16>
@@ -103,11 +104,13 @@ define <2 x i32> @sabd_2s(<2 x i32> %a, <2 x i32> %b) #0 {
define <2 x i32> @sabd_2s_promoted_ops(<2 x i16> %a, <2 x i16> %b) #0 {
; CHECK-LABEL: sabd_2s_promoted_ops:
; CHECK: // %bb.0:
-; CHECK-NEXT: shl v0.2s, v0.2s, #16
; CHECK-NEXT: shl v1.2s, v1.2s, #16
-; CHECK-NEXT: sshr v0.2s, v0.2s, #16
+; CHECK-NEXT: shl v0.2s, v0.2s, #16
+; CHECK-NEXT: movi d2, #0x00ffff0000ffff
; CHECK-NEXT: sshr v1.2s, v1.2s, #16
+; CHECK-NEXT: sshr v0.2s, v0.2s, #16
; CHECK-NEXT: sabd v0.2s, v0.2s, v1.2s
+; CHECK-NEXT: and v0.8b, v0.8b, v2.8b
; CHECK-NEXT: ret
%a.sext = sext <2 x i16> %a to <2 x i32>
%b.sext = sext <2 x i16> %b to <2 x i32>
@@ -144,27 +147,10 @@ define <4 x i32> @sabd_4s_promoted_ops(<4 x i16> %a, <4 x i16> %b) #0 {
define <2 x i64> @sabd_2d(<2 x i64> %a, <2 x i64> %b) #0 {
; CHECK-LABEL: sabd_2d:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov x8, v0.d[1]
-; CHECK-NEXT: mov x9, v1.d[1]
-; CHECK-NEXT: fmov x10, d0
-; CHECK-NEXT: fmov x12, d1
-; CHECK-NEXT: asr x14, x10, #63
-; CHECK-NEXT: asr x11, x8, #63
-; CHECK-NEXT: asr x13, x9, #63
-; CHECK-NEXT: asr x15, x12, #63
-; CHECK-NEXT: subs x8, x8, x9
-; CHECK-NEXT: sbc x9, x11, x13
-; CHECK-NEXT: subs x10, x10, x12
-; CHECK-NEXT: sbc x11, x14, x15
-; CHECK-NEXT: asr x9, x9, #63
-; CHECK-NEXT: asr x11, x11, #63
-; CHECK-NEXT: eor x8, x8, x9
-; CHECK-NEXT: eor x10, x10, x11
-; CHECK-NEXT: sub x8, x8, x9
-; CHECK-NEXT: sub x10, x10, x11
-; CHECK-NEXT: fmov d1, x8
-; CHECK-NEXT: fmov d0, x10
-; CHECK-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-NEXT: cmgt v2.2d, v0.2d, v1.2d
+; CHECK-NEXT: sub v0.2d, v0.2d, v1.2d
+; CHECK-NEXT: eor v0.16b, v0.16b, v2.16b
+; CHECK-NEXT: sub v0.2d, v2.2d, v0.2d
; CHECK-NEXT: ret
%a.sext = sext <2 x i64> %a to <2 x i128>
%b.sext = sext <2 x i64> %b to <2 x i128>
@@ -232,8 +218,8 @@ define <4 x i16> @uabd_4h(<4 x i16> %a, <4 x i16> %b) #0 {
define <4 x i16> @uabd_4h_promoted_ops(<4 x i8> %a, <4 x i8> %b) #0 {
; CHECK-LABEL: uabd_4h_promoted_ops:
; CHECK: // %bb.0:
-; CHECK-NEXT: bic v0.4h, #255, lsl #8
; CHECK-NEXT: bic v1.4h, #255, lsl #8
+; CHECK-NEXT: bic v0.4h, #255, lsl #8
; CHECK-NEXT: uabd v0.4h, v0.4h, v1.4h
; CHECK-NEXT: ret
%a.zext = zext <4 x i8> %a to <4 x i16>
@@ -285,8 +271,8 @@ define <2 x i32> @uabd_2s_promoted_ops(<2 x i16> %a, <2 x i16> %b) #0 {
; CHECK-LABEL: uabd_2s_promoted_ops:
; CHECK: // %bb.0:
; CHECK-NEXT: movi d2, #0x00ffff0000ffff
-; CHECK-NEXT: and v0.8b, v0.8b, v2.8b
; CHECK-NEXT: and v1.8b, v1.8b, v2.8b
+; CHECK-NEXT: and v0.8b, v0.8b, v2.8b
; CHECK-NEXT: uabd v0.2s, v0.2s, v1.2s
; CHECK-NEXT: ret
%a.zext = zext <2 x i16> %a to <2 x i32>
@@ -324,23 +310,9 @@ define <4 x i32> @uabd_4s_promoted_ops(<4 x i16> %a, <4 x i16> %b) #0 {
define <2 x i64> @uabd_2d(<2 x i64> %a, <2 x i64> %b) #0 {
; CHECK-LABEL: uabd_2d:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov x8, v0.d[1]
-; CHECK-NEXT: mov x9, v1.d[1]
-; CHECK-NEXT: fmov x10, d0
-; CHECK-NEXT: fmov x11, d1
-; CHECK-NEXT: subs x8, x8, x9
-; CHECK-NEXT: ngc x9, xzr
-; CHECK-NEXT: subs x10, x10, x11
-; CHECK-NEXT: ngc x11, xzr
-; CHECK-NEXT: asr x9, x9, #63
-; CHECK-NEXT: asr x11, x11, #63
-; CHECK-NEXT: eor x8, x8, x9
-; CHECK-NEXT: eor x10, x10, x11
-; CHECK-NEXT: sub x8, x8, x9
-; CHECK-NEXT: sub x10, x10, x11
-; CHECK-NEXT: fmov d1, x8
-; CHECK-NEXT: fmov d0, x10
-; CHECK-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-NEXT: uqsub v2.2d, v1.2d, v0.2d
+; CHECK-NEXT: uqsub v0.2d, v0.2d, v1.2d
+; CHECK-NEXT: orr v0.16b, v0.16b, v2.16b
; CHECK-NEXT: ret
%a.zext = zext <2 x i64> %a to <2 x i128>
%b.zext = zext <2 x i64> %b to <2 x i128>
@@ -439,8 +411,10 @@ define <4 x i32> @sabd_v4i32_nsw(<4 x i32> %a, <4 x i32> %b) #0 {
define <2 x i64> @sabd_v2i64_nsw(<2 x i64> %a, <2 x i64> %b) #0 {
; CHECK-LABEL: sabd_v2i64_nsw:
; CHECK: // %bb.0:
+; CHECK-NEXT: cmgt v2.2d, v0.2d, v1.2d
; CHECK-NEXT: sub v0.2d, v0.2d, v1.2d
-; CHECK-NEXT: abs v0.2d, v0.2d
+; CHECK-NEXT: eor v0.16b, v0.16b, v2.16b
+; CHECK-NEXT: sub v0.2d, v2.2d, v0.2d
; CHECK-NEXT: ret
%sub = sub nsw <2 x i64> %a, %b
%abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
@@ -484,9 +458,8 @@ define <2 x i64> @smaxmin_v2i64(<2 x i64> %0, <2 x i64> %1) {
; CHECK-LABEL: smaxmin_v2i64:
; CHECK: // %bb.0:
; CHECK-NEXT: cmgt v2.2d, v0.2d, v1.2d
-; CHECK-NEXT: cmgt v3.2d, v1.2d, v0.2d
-; CHECK-NEXT: bsl v2.16b, v0.16b, v1.16b
-; CHECK-NEXT: bif v0.16b, v1.16b, v3.16b
+; CHECK-NEXT: sub v0.2d, v0.2d, v1.2d
+; CHECK-NEXT: eor v0.16b, v0.16b, v2.16b
; CHECK-NEXT: sub v0.2d, v2.2d, v0.2d
; CHECK-NEXT: ret
%a = tail call <2 x i64> @llvm.smax.v2i64(<2 x i64> %0, <2 x i64> %1)
@@ -531,11 +504,9 @@ define <4 x i32> @umaxmin_v4i32(<4 x i32> %0, <4 x i32> %1) {
define <2 x i64> @umaxmin_v2i64(<2 x i64> %0, <2 x i64> %1) {
; CHECK-LABEL: umaxmin_v2i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: cmhi v2.2d, v0.2d, v1.2d
-; CHECK-NEXT: cmhi v3.2d, v1.2d, v0.2d
-; CHECK-NEXT: bsl v2.16b, v0.16b, v1.16b
-; CHECK-NEXT: bif v0.16b, v1.16b, v3.16b
-; CHECK-NEXT: sub v0.2d, v2.2d, v0.2d
+; CHECK-NEXT: uqsub v2.2d, v1.2d, v0.2d
+; CHECK-NEXT: uqsub v0.2d, v0.2d, v1.2d
+; CHECK-NEXT: orr v0.16b, v0.16b, v2.16b
; CHECK-NEXT: ret
%a = tail call <2 x i64> @llvm.umax.v2i64(<2 x i64> %0, <2 x i64> %1)
%b = tail call <2 x i64> @llvm.umin.v2i64(<2 x i64> %0, <2 x i64> %1)
diff --git a/llvm/test/CodeGen/AArch64/sve-aba.ll b/llvm/test/CodeGen/AArch64/sve-aba.ll
index 6859f7d017044..aaa843b454ad3 100644
--- a/llvm/test/CodeGen/AArch64/sve-aba.ll
+++ b/llvm/test/CodeGen/AArch64/sve-aba.ll
@@ -24,9 +24,10 @@ define <vscale x 16 x i8> @saba_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b,
define <vscale x 16 x i8> @saba_b_promoted_ops(<vscale x 16 x i8> %a, <vscale x 16 x i1> %b, <vscale x 16 x i1> %c) #0 {
; CHECK-LABEL: saba_b_promoted_ops:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov z1.b, p0/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: mov z2.b, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: saba z0.b, z1.b, z2.b
+; CHECK-NEXT: ptrue p2.b
+; CHECK-NEXT: mov z1.b, #1 // =0x1
+; CHECK-NEXT: eor p0.b, p2/z, p0.b, p1.b
+; CHECK-NEXT: add z0.b, p0/m, z0.b, z1.b
; CHECK-NEXT: ret
%b.sext = sext <vscale x 16 x i1> %b to <vscale x 16 x i8>
%c.sext = sext <vscale x 16 x i1> %c to <vscale x 16 x i8>
@@ -75,9 +76,11 @@ define <vscale x 8 x i16> @saba_h_promoted_ops(<vscale x 8 x i16> %a, <vscale x
; CHECK-LABEL: saba_h_promoted_ops:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.h
-; CHECK-NEXT: sxtb z1.h, p0/m, z1.h
; CHECK-NEXT: sxtb z2.h, p0/m, z2.h
-; CHECK-NEXT: saba z0.h, z1.h, z2.h
+; CHECK-NEXT: sxtb z1.h, p0/m, z1.h
+; CHECK-NEXT: sabd z1.h, p0/m, z1.h, z2.h
+; CHECK-NEXT: and z1.h, z1.h, #0xff
+; CHECK-NEXT: add z0.h, z0.h, z1.h
; CHECK-NEXT: ret
%b.sext = sext <vscale x 8 x i8> %b to <vscale x 8 x i16>
%c.sext = sext <vscale x 8 x i8> %c to <vscale x 8 x i16>
@@ -126,9 +129,11 @@ define <vscale x 4 x i32> @saba_s_promoted_ops(<vscale x 4 x i32> %a, <vscale x
; CHECK-LABEL: saba_s_promoted_ops:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: sxth z1.s, p0/m, z1.s
; CHECK-NEXT: sxth z2.s, p0/m, z2.s
-; CHECK-NEXT: saba z0.s, z1.s, z2.s
+; CHECK-NEXT: sxth z1.s, p0/m, z1.s
+; CHECK-NEXT: sabd z1.s, p0/m, z1.s, z2.s
+; CHECK-NEXT: and z1.s, z1.s, #0xffff
+; CHECK-NEXT: add z0.s, z0.s, z1.s
; CHECK-NEXT: ret
%b.sext = sext <vscale x 4 x i16> %b to <vscale x 4 x i32>
%c.sext = sext <vscale x 4 x i16> %c to <vscale x 4 x i32>
@@ -177,9 +182,10 @@ define <vscale x 2 x i64> @saba_d_promoted_ops(<vscale x 2 x i64> %a, <vscale x
; CHECK-LABEL: saba_d_promoted_ops:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: sxtw z1.d, p0/m, z1.d
; CHECK-NEXT: sxtw z2.d, p0/m, z2.d
-; CHECK-NEXT: saba z0.d, z1.d, z2.d
+; CHECK-NEXT: sxtw z1.d, p0/m, z1.d
+; CHECK-NEXT: sabd z1.d, p0/m, z1.d, z2.d
+; CHECK-NEXT: adr z0.d, [z0.d, z1.d, uxtw]
; CHECK-NEXT: ret
%b.sext = sext <vscale x 2 x i32> %b to <vscale x 2 x i64>
%c.sext = sext <vscale x 2 x i32> %c to <vscale x 2 x i64>
@@ -231,9 +237,10 @@ define <vscale x 16 x i8> @uaba_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b,
define <vscale x 16 x i8> @uaba_b_promoted_ops(<vscale x 16 x i8> %a, <vscale x 16 x i1> %b, <vscale x 16 x i1> %c) #0 {
; CHECK-LABEL: uaba_b_promoted_ops:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov z1.b, p0/z, #1 // =0x1
-; CHECK-NEXT: mov z2.b, p1/z, #1 // =0x1
-; CHECK-NEXT: uaba z0.b, z1.b, z2.b
+; CHECK-NEXT: ptrue p2.b
+; CHECK-NEXT: mov z1.b, #1 // =0x1
+; CHECK-NEXT: eor p0.b, p2/z, p0.b, p1.b
+; CHECK-NEXT: add z0.b, p0/m, z0.b, z1.b
; CHECK-NEXT: ret
%b.zext = zext <vscale x 16 x i1> %b to <vscale x 16 x i8>
%c.zext = zext <vscale x 16 x i1> %c to <vscale x 16 x i8>
@@ -281,8 +288,8 @@ define <vscale x 8 x i16> @uaba_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b,
define <vscale x 8 x i16> @uaba_h_promoted_ops(<vscale x 8 x i16> %a, <vscale x 8 x i8> %b, <vscale x 8 x i8> %c) #0 {
; CHECK-LABEL: uaba_h_promoted_ops:
; CHECK: // %bb.0:
-; CHECK-NEXT: and z1.h, z1.h, #0xff
; CHECK-NEXT: and z2.h, z2.h, #0xff
+; CHECK-NEXT: and z1.h, z1.h, #0xff
; CHECK-NEXT: uaba z0.h, z1.h, z2.h
; CHECK-NEXT: ret
%b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i16>
@@ -331,8 +338,8 @@ define <vscale x 4 x i32> @uaba_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b,
define <vscale x 4 x i32> @uaba_s_promoted_ops(<vscale x 4 x i32> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c) #0 {
; CHECK-LABEL: uaba_s_promoted_ops:
; CHECK: // %bb.0:
-; CHECK-NEXT: and z1.s, z1.s, #0xffff
; CHECK-NEXT: and z2.s, z2.s, #0xffff
+; CHECK-NEXT: and z1.s, z1.s, #0xffff
; CHECK-NEXT: uaba z0.s, z1.s, z2.s
; CHECK-NEXT: ret
%b.zext = zext <vscale x 4 x i16> %b to <vscale x 4 x i32>
@@ -381,8 +388,8 @@ define <vscale x 2 x i64> @uaba_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b,
define <vscale x 2 x i64> @uaba_d_promoted_ops(<vscale x 2 x i64> %a, <vscale x 2 x i32> %b, <vscale x 2 x i32> %c) #0 {
; CHECK-LABEL: uaba_d_promoted_ops:
; CHECK: // %bb.0:
-; CHECK-NEXT: and z1.d, z1.d, #0xffffffff
; CHECK-NEXT: and z2.d, z2.d, #0xffffffff
+; CHECK-NEXT: and z1.d, z1.d, #0xffffffff
; CHECK-NEXT: uaba z0.d, z1.d, z2.d
; CHECK-NEXT: ret
%b.zext = zext <vscale x 2 x i32> %b to <vscale x 2 x i64>
diff --git a/llvm/test/CodeGen/AArch64/sve-abd.ll b/llvm/test/CodeGen/AArch64/sve-abd.ll
index 7b492229e3d23..cffb32309f2ee 100644
--- a/llvm/test/CodeGen/AArch64/sve-abd.ll
+++ b/llvm/test/CodeGen/AArch64/sve-abd.ll
@@ -24,10 +24,9 @@ define <vscale x 16 x i8> @sabd_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
define <vscale x 16 x i8> @sabd_b_promoted_ops(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) #0 {
; CHECK-LABEL: sabd_b_promoted_ops:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov z0.b, p0/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: mov z1.b, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: ptrue p0.b
-; CHECK-NEXT: sabd z0.b, p0/m, z0.b, z1.b
+; CHECK-NEXT: ptrue p2.b
+; CHECK-NEXT: eor p0.b, p2/z, p0.b, p1.b
+; CHECK-NEXT: mov z0.b, p0/z, #1 // =0x1
; CHECK-NEXT: ret
%a.sext = sext <vscale x 16 x i1> %a to <vscale x 16 x i8>
%b.sext = sext <vscale x 16 x i1> %b to <vscale x 16 x i8>
@@ -54,9 +53,10 @@ define <vscale x 8 x i16> @sabd_h_promoted_ops(<vscale x 8 x i8> %a, <vscale x 8
; CHECK-LABEL: sabd_h_promoted_ops:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.h
-; CHECK-NEXT: sxtb z0.h, p0/m, z0.h
; CHECK-NEXT: sxtb z1.h, p0/m, z1.h
+; CHECK-NEXT: sxtb z0.h, p0/m, z0.h
; CHECK-NEXT: sabd z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT: and z0.h, z0.h, #0xff
; CHECK-NEXT: ret
%a.sext = sext <vscale x 8 x i8> %a to <vscale x 8 x i16>
%b.sext = sext <vscale x 8 x i8> %b to <vscale x 8 x i16>
@@ -83,9 +83,10 @@ define <vscale x 4 x i32> @sabd_s_promoted_ops(<vscale x 4 x i16> %a, <vscale x
; CHECK-LABEL: sabd_s_promoted_ops:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: sxth z0.s, p0/m, z0.s
; CHECK-NEXT: sxth z1.s, p0/m, z1.s
+; CHECK-NEXT: sxth z0.s, p0/m, z0.s
; CHECK-NEXT: sabd z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT: and z0.s, z0.s, #0xffff
; CHECK-NEXT: ret
%a.sext = sext <vscale x 4 x i16> %a to <vscale x 4 x i32>
%b.sext = sext <vscale x 4 x i16> %b to <vscale x 4 x i32>
@@ -112,9 +113,10 @@ define <vscale x 2 x i64> @sabd_d_promoted_ops(<vscale x 2 x i32> %a, <vscale x
; CHECK-LABEL: sabd_d_promoted_ops:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: sxtw z0.d, p0/m, z0.d
; CHECK-NEXT: sxtw z1.d, p0/m, z1.d
+; CHECK-NEXT: sxtw z0.d, p0/m, z0.d
; CHECK-NEXT: sabd z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT: and z0.d, z0.d, #0xffffffff
; CHECK-NEXT: ret
%a.sext = sext <vscale x 2 x i32> %a to <vscale x 2 x i64>
%b.sext = sext <vscale x 2 x i32> %b to <vscale x 2 x i64>
@@ -144,10 +146,9 @@ define <vscale x 16 x i8> @uabd_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
define <vscale x 16 x i8> @uabd_b_promoted_ops(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) #0 {
; CHECK-LABEL: uabd_b_promoted_ops:
; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p2.b
+; CHECK-NEXT: eor p0.b, p2/z, p0.b, p1.b
; CHECK-NEXT: mov z0.b, p0/z, #1 // =0x1
-; CHECK-NEXT: mov z1.b, p1/z, #1 // =0x1
-; CHECK-NEXT: ptrue p0.b
-; CHECK-NEXT: uabd z0.b, p0/m, z0.b, z1.b
; CHECK-NEXT: ret
%a.zext = zext <vscale x 16 x i1> %a to <vscale x 16 x i8>
%b.zext = zext <vscale x 16 x i1> %b to <vscale x 16 x i8>
@@ -173,8 +174,8 @@ define <vscale x 8 x i16> @uabd_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b)
define <vscale x 8 x i16> @uabd_h_promoted_ops(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) #0 {
; CHECK-LABEL: uabd_h_promoted_ops:
; CHECK: // %bb.0:
-; CHECK-NEXT: and z0.h, z0.h, #0xff
; CHECK-NEXT: and z1.h, z1.h, #0xff
+; CHECK-NEXT: and z0.h, z0.h, #0xff
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: uabd z0.h, p0/m, z0.h, z1.h
; CHECK-NEXT: ret
@@ -202,8 +203,8 @@ define <vscale x 4 x i32> @uabd_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b)
define <vscale x 4 x i32> @uabd_s_promoted_ops(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) #0 {
; CHECK-LABEL: uabd_s_promoted_ops:
; CHECK: // %bb.0:
-; CHECK-NEXT: and z0.s, z0.s, #0xffff
; CHECK-NEXT: and z1.s, z1.s, #0xffff
+; CHECK-NEXT: and z0.s, z0.s, #0xffff
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: uabd z0.s, p0/m, z0.s, z1.s
; CHECK-NEXT: ret
@@ -231,8 +232,8 @@ define <vscale x 2 x i64> @uabd_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b)
define <vscale x 2 x i64> @uabd_d_promoted_ops(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) #0 {
; CHECK-LABEL: uabd_d_promoted_ops:
; CHECK: // %bb.0:
-; CHECK-NEXT: and z0.d, z0.d, #0xffffffff
; CHECK-NEXT: and z1.d, z1.d, #0xffffffff
+; CHECK-NEXT: and z0.d, z0.d, #0xffffffff
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: uabd z0.d, p0/m, z0.d, z1.d
; CHECK-NEXT: ret
@@ -265,8 +266,8 @@ define <vscale x 4 x i32> @uabd_non_matching_extension(<vscale x 4 x i32> %a, <v
define <vscale x 4 x i32> @uabd_non_matching_promoted_ops(<vscale x 4 x i8> %a, <vscale x 4 x i16> %b) #0 {
; CHECK-LABEL: uabd_non_matching_promoted_ops:
; CHECK: // %bb.0:
-; CHECK-NEXT: and z0.s, z0.s, #0xff
; CHECK-NEXT: and z1.s, z1.s, #0xffff
+; CHECK-NEXT: and z0.s, z0.s, #0xff
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: uabd z0.s, p0/m, z0.s, z1.s
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AMDGPU/sad.ll b/llvm/test/CodeGen/AMDGPU/sad.ll
index 0492c5663e666..137c458aa50eb 100644
--- a/llvm/test/CodeGen/AMDGPU/sad.ll
+++ b/llvm/test/CodeGen/AMDGPU/sad.ll
@@ -86,9 +86,9 @@ define amdgpu_kernel void @v_sad_u32_multi_use_sub_pat1(ptr addrspace(1) %out, i
; GCN-NEXT: s_add_u32 s8, s8, s7
; GCN-NEXT: s_addc_u32 s9, s9, 0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: s_max_u32 s3, s0, s1
-; GCN-NEXT: s_min_u32 s0, s0, s1
-; GCN-NEXT: s_sub_i32 s0, s3, s0
+; GCN-NEXT: s_min_u32 s3, s0, s1
+; GCN-NEXT: s_max_u32 s0, s0, s1
+; GCN-NEXT: s_sub_i32 s0, s0, s3
; GCN-NEXT: v_mov_b32_e32 v0, s4
; GCN-NEXT: v_mov_b32_e32 v2, s0
; GCN-NEXT: s_add_i32 s0, s0, s2
diff --git a/llvm/test/CodeGen/ARM/iabs.ll b/llvm/test/CodeGen/ARM/iabs.ll
index fffa9555b2966..635b0f1c2dfbe 100644
--- a/llvm/test/CodeGen/ARM/iabs.ll
+++ b/llvm/test/CodeGen/ARM/iabs.ll
@@ -27,7 +27,7 @@ define i32 @test2(i32 %a, i32 %b) nounwind readnone ssp {
; CHECK-LABEL: test2:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: subs r0, r0, r1
-; CHECK-NEXT: rsbmi r0, r0, #0
+; CHECK-NEXT: rsble r0, r0, #0
; CHECK-NEXT: bx lr
entry:
%sub = sub nsw i32 %a, %b
diff --git a/llvm/test/CodeGen/ARM/neon_vabs.ll b/llvm/test/CodeGen/ARM/neon_vabs.ll
index 4064aae65f665..37d389441d217 100644
--- a/llvm/test/CodeGen/ARM/neon_vabs.ll
+++ b/llvm/test/CodeGen/ARM/neon_vabs.ll
@@ -149,7 +149,10 @@ define <4 x i32> @test11(<4 x i16> %a, <4 x i16> %b) nounwind {
; CHECK: @ %bb.0:
; CHECK-NEXT: vmov d16, r2, r3
; CHECK-NEXT: vmov d17, r0, r1
-; CHECK-NEXT: vabdl.u16 q8, d17, d16
+; CHECK-NEXT: vmin.u16 d18, d17, d16
+; CHECK-NEXT: vmax.u16 d16, d17, d16
+; CHECK-NEXT: vsub.i16 d16, d16, d18
+; CHECK-NEXT: vmovl.u16 q8, d16
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: vmov r2, r3, d17
; CHECK-NEXT: mov pc, lr
@@ -166,7 +169,10 @@ define <8 x i16> @test12(<8 x i8> %a, <8 x i8> %b) nounwind {
; CHECK: @ %bb.0:
; CHECK-NEXT: vmov d16, r2, r3
; CHECK-NEXT: vmov d17, r0, r1
-; CHECK-NEXT: vabdl.u8 q8, d17, d16
+; CHECK-NEXT: vmin.u8 d18, d17, d16
+; CHECK-NEXT: vmax.u8 d16, d17, d16
+; CHECK-NEXT: vsub.i8 d16, d16, d18
+; CHECK-NEXT: vmovl.u8 q8, d16
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: vmov r2, r3, d17
; CHECK-NEXT: mov pc, lr
diff --git a/llvm/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll b/llvm/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll
index 7a6640fea2d1e..a041c9898f9fd 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll
@@ -72,37 +72,51 @@ entry:
define <2 x i64> @sub_absv_64(<2 x i64> %a, <2 x i64> %b) local_unnamed_addr {
; CHECK-PWR9-LABEL: sub_absv_64:
; CHECK-PWR9: # %bb.0: # %entry
-; CHECK-PWR9-NEXT: vsubudm v2, v2, v3
-; CHECK-PWR9-NEXT: vnegd v3, v2
+; CHECK-PWR9-NEXT: vminsd v4, v2, v3
; CHECK-PWR9-NEXT: vmaxsd v2, v2, v3
+; CHECK-PWR9-NEXT: vsubudm v2, v2, v4
; CHECK-PWR9-NEXT: blr
;
; CHECK-PWR8-LABEL: sub_absv_64:
; CHECK-PWR8: # %bb.0: # %entry
-; CHECK-PWR8-NEXT: vsubudm v2, v2, v3
-; CHECK-PWR8-NEXT: xxlxor v3, v3, v3
-; CHECK-PWR8-NEXT: vsubudm v3, v3, v2
+; CHECK-PWR8-NEXT: vminsd v4, v2, v3
; CHECK-PWR8-NEXT: vmaxsd v2, v2, v3
+; CHECK-PWR8-NEXT: vsubudm v2, v2, v4
; CHECK-PWR8-NEXT: blr
;
; CHECK-PWR7-LABEL: sub_absv_64:
; CHECK-PWR7: # %bb.0: # %entry
-; CHECK-PWR7-NEXT: addi r3, r1, -48
+; CHECK-PWR7-NEXT: addi r3, r1, -96
; CHECK-PWR7-NEXT: stxvd2x v2, 0, r3
-; CHECK-PWR7-NEXT: addi r3, r1, -32
+; CHECK-PWR7-NEXT: addi r3, r1, -80
; CHECK-PWR7-NEXT: stxvd2x v3, 0, r3
-; CHECK-PWR7-NEXT: ld r4, -40(r1)
-; CHECK-PWR7-NEXT: ld r5, -24(r1)
-; CHECK-PWR7-NEXT: ld r3, -48(r1)
-; CHECK-PWR7-NEXT: sub r4, r4, r5
-; CHECK-PWR7-NEXT: sradi r5, r4, 63
-; CHECK-PWR7-NEXT: xor r4, r4, r5
-; CHECK-PWR7-NEXT: sub r4, r4, r5
-; CHECK-PWR7-NEXT: ld r5, -32(r1)
+; CHECK-PWR7-NEXT: ld r3, -88(r1)
+; CHECK-PWR7-NEXT: ld r4, -72(r1)
+; CHECK-PWR7-NEXT: ld r6, -80(r1)
+; CHECK-PWR7-NEXT: sub r5, r3, r4
+; CHECK-PWR7-NEXT: cmpd r3, r4
+; CHECK-PWR7-NEXT: li r3, 0
+; CHECK-PWR7-NEXT: li r4, -1
+; CHECK-PWR7-NEXT: std r5, -56(r1)
+; CHECK-PWR7-NEXT: ld r5, -96(r1)
+; CHECK-PWR7-NEXT: sub r7, r5, r6
+; CHECK-PWR7-NEXT: std r7, -64(r1)
+; CHECK-PWR7-NEXT: iselgt r7, r4, r3
+; CHECK-PWR7-NEXT: cmpd r5, r6
+; CHECK-PWR7-NEXT: std r7, -40(r1)
+; CHECK-PWR7-NEXT: iselgt r3, r4, r3
+; CHECK-PWR7-NEXT: addi r4, r1, -64
+; CHECK-PWR7-NEXT: std r3, -48(r1)
+; CHECK-PWR7-NEXT: lxvw4x vs0, 0, r4
+; CHECK-PWR7-NEXT: addi r4, r1, -48
+; CHECK-PWR7-NEXT: lxvw4x vs1, 0, r4
+; CHECK-PWR7-NEXT: addi r4, r1, -32
+; CHECK-PWR7-NEXT: xxlxor vs0, vs0, vs1
+; CHECK-PWR7-NEXT: stxvw4x vs0, 0, r4
+; CHECK-PWR7-NEXT: ld r4, -24(r1)
+; CHECK-PWR7-NEXT: sub r4, r7, r4
; CHECK-PWR7-NEXT: std r4, -8(r1)
-; CHECK-PWR7-NEXT: sub r3, r3, r5
-; CHECK-PWR7-NEXT: sradi r4, r3, 63
-; CHECK-PWR7-NEXT: xor r3, r3, r4
+; CHECK-PWR7-NEXT: ld r4, -32(r1)
; CHECK-PWR7-NEXT: sub r3, r3, r4
; CHECK-PWR7-NEXT: std r3, -16(r1)
; CHECK-PWR7-NEXT: addi r3, r1, -16
@@ -127,10 +141,9 @@ define <4 x i32> @sub_absv_32(<4 x i32> %a, <4 x i32> %b) local_unnamed_addr {
;
; CHECK-PWR78-LABEL: sub_absv_32:
; CHECK-PWR78: # %bb.0: # %entry
-; CHECK-PWR78-NEXT: vsubuwm v2, v2, v3
-; CHECK-PWR78-NEXT: xxlxor v3, v3, v3
-; CHECK-PWR78-NEXT: vsubuwm v3, v3, v2
+; CHECK-PWR78-NEXT: vminsw v4, v2, v3
; CHECK-PWR78-NEXT: vmaxsw v2, v2, v3
+; CHECK-PWR78-NEXT: vsubuwm v2, v2, v4
; CHECK-PWR78-NEXT: blr
entry:
%0 = sub nsw <4 x i32> %a, %b
@@ -143,10 +156,9 @@ entry:
define <8 x i16> @sub_absv_16(<8 x i16> %a, <8 x i16> %b) local_unnamed_addr {
; CHECK-LABEL: sub_absv_16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsubuhm v2, v2, v3
-; CHECK-NEXT: xxlxor v3, v3, v3
-; CHECK-NEXT: vsubuhm v3, v3, v2
+; CHECK-NEXT: vminsh v4, v2, v3
; CHECK-NEXT: vmaxsh v2, v2, v3
+; CHECK-NEXT: vsubuhm v2, v2, v4
; CHECK-NEXT: blr
entry:
%0 = sub nsw <8 x i16> %a, %b
@@ -159,10 +171,9 @@ entry:
define <16 x i8> @sub_absv_8(<16 x i8> %a, <16 x i8> %b) local_unnamed_addr {
; CHECK-LABEL: sub_absv_8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsububm v2, v2, v3
-; CHECK-NEXT: xxlxor v3, v3, v3
-; CHECK-NEXT: vsububm v3, v3, v2
+; CHECK-NEXT: vminsb v4, v2, v3
; CHECK-NEXT: vmaxsb v2, v2, v3
+; CHECK-NEXT: vsububm v2, v2, v4
; CHECK-NEXT: blr
entry:
%0 = sub nsw <16 x i8> %a, %b
@@ -178,76 +189,12 @@ entry:
; Threfore, we end up doing more work than is required with a pair of abs for word
; instead of just one for the halfword.
define <8 x i16> @sub_absv_16_ext(<8 x i16> %a, <8 x i16> %b) local_unnamed_addr {
-; CHECK-PWR9-LABEL: sub_absv_16_ext:
-; CHECK-PWR9: # %bb.0: # %entry
-; CHECK-PWR9-NEXT: vmrghh v4, v2, v2
-; CHECK-PWR9-NEXT: vmrglh v2, v2, v2
-; CHECK-PWR9-NEXT: vmrghh v5, v3, v3
-; CHECK-PWR9-NEXT: vmrglh v3, v3, v3
-; CHECK-PWR9-NEXT: vextsh2w v2, v2
-; CHECK-PWR9-NEXT: vextsh2w v3, v3
-; CHECK-PWR9-NEXT: vextsh2w v4, v4
-; CHECK-PWR9-NEXT: vextsh2w v5, v5
-; CHECK-PWR9-NEXT: xvnegsp v3, v3
-; CHECK-PWR9-NEXT: xvnegsp v2, v2
-; CHECK-PWR9-NEXT: xvnegsp v4, v4
-; CHECK-PWR9-NEXT: vabsduw v2, v2, v3
-; CHECK-PWR9-NEXT: xvnegsp v3, v5
-; CHECK-PWR9-NEXT: vabsduw v3, v4, v3
-; CHECK-PWR9-NEXT: vpkuwum v2, v3, v2
-; CHECK-PWR9-NEXT: blr
-;
-; CHECK-PWR8-LABEL: sub_absv_16_ext:
-; CHECK-PWR8: # %bb.0: # %entry
-; CHECK-PWR8-NEXT: vspltisw v4, 8
-; CHECK-PWR8-NEXT: vmrglh v5, v2, v2
-; CHECK-PWR8-NEXT: vadduwm v4, v4, v4
-; CHECK-PWR8-NEXT: vmrghh v2, v2, v2
-; CHECK-PWR8-NEXT: vmrglh v0, v3, v3
-; CHECK-PWR8-NEXT: vmrghh v3, v3, v3
-; CHECK-PWR8-NEXT: vslw v5, v5, v4
-; CHECK-PWR8-NEXT: vslw v2, v2, v4
-; CHECK-PWR8-NEXT: vslw v0, v0, v4
-; CHECK-PWR8-NEXT: vslw v3, v3, v4
-; CHECK-PWR8-NEXT: vsraw v5, v5, v4
-; CHECK-PWR8-NEXT: vsraw v2, v2, v4
-; CHECK-PWR8-NEXT: vsraw v0, v0, v4
-; CHECK-PWR8-NEXT: vsraw v3, v3, v4
-; CHECK-PWR8-NEXT: xxlxor v4, v4, v4
-; CHECK-PWR8-NEXT: vsubuwm v2, v2, v3
-; CHECK-PWR8-NEXT: vsubuwm v3, v5, v0
-; CHECK-PWR8-NEXT: vsubuwm v5, v4, v3
-; CHECK-PWR8-NEXT: vsubuwm v4, v4, v2
-; CHECK-PWR8-NEXT: vmaxsw v3, v3, v5
-; CHECK-PWR8-NEXT: vmaxsw v2, v2, v4
-; CHECK-PWR8-NEXT: vpkuwum v2, v2, v3
-; CHECK-PWR8-NEXT: blr
-;
-; CHECK-PWR7-LABEL: sub_absv_16_ext:
-; CHECK-PWR7: # %bb.0: # %entry
-; CHECK-PWR7-NEXT: vspltisw v4, 8
-; CHECK-PWR7-NEXT: vmrglh v5, v2, v2
-; CHECK-PWR7-NEXT: vmrghh v2, v2, v2
-; CHECK-PWR7-NEXT: vmrglh v0, v3, v3
-; CHECK-PWR7-NEXT: vmrghh v3, v3, v3
-; CHECK-PWR7-NEXT: vadduwm v4, v4, v4
-; CHECK-PWR7-NEXT: vslw v5, v5, v4
-; CHECK-PWR7-NEXT: vslw v2, v2, v4
-; CHECK-PWR7-NEXT: vslw v0, v0, v4
-; CHECK-PWR7-NEXT: vslw v3, v3, v4
-; CHECK-PWR7-NEXT: vsraw v5, v5, v4
-; CHECK-PWR7-NEXT: vsraw v2, v2, v4
-; CHECK-PWR7-NEXT: vsraw v0, v0, v4
-; CHECK-PWR7-NEXT: vsraw v3, v3, v4
-; CHECK-PWR7-NEXT: xxlxor v4, v4, v4
-; CHECK-PWR7-NEXT: vsubuwm v2, v2, v3
-; CHECK-PWR7-NEXT: vsubuwm v3, v5, v0
-; CHECK-PWR7-NEXT: vsubuwm v5, v4, v3
-; CHECK-PWR7-NEXT: vsubuwm v4, v4, v2
-; CHECK-PWR7-NEXT: vmaxsw v3, v3, v5
-; CHECK-PWR7-NEXT: vmaxsw v2, v2, v4
-; CHECK-PWR7-NEXT: vpkuwum v2, v2, v3
-; CHECK-PWR7-NEXT: blr
+; CHECK-LABEL: sub_absv_16_ext:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vminsh v4, v2, v3
+; CHECK-NEXT: vmaxsh v2, v2, v3
+; CHECK-NEXT: vsubuhm v2, v2, v4
+; CHECK-NEXT: blr
entry:
%0 = sext <8 x i16> %a to <8 x i32>
%1 = sext <8 x i16> %b to <8 x i32>
@@ -266,13 +213,13 @@ define <16 x i8> @sub_absv_8_ext(<16 x i8> %a, <16 x i8> %b) local_unnamed_addr
; CHECK-PWR9-LE-LABEL: sub_absv_8_ext:
; CHECK-PWR9-LE: # %bb.0: # %entry
; CHECK-PWR9-LE-NEXT: li r3, 0
-; CHECK-PWR9-LE-NEXT: li r5, 2
; CHECK-PWR9-LE-NEXT: li r4, 1
+; CHECK-PWR9-LE-NEXT: li r5, 2
; CHECK-PWR9-LE-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-PWR9-LE-NEXT: vextubrx r6, r3, v2
; CHECK-PWR9-LE-NEXT: vextubrx r3, r3, v3
-; CHECK-PWR9-LE-NEXT: vextubrx r8, r5, v2
-; CHECK-PWR9-LE-NEXT: vextubrx r5, r5, v3
+; CHECK-PWR9-LE-NEXT: vextubrx r7, r4, v2
+; CHECK-PWR9-LE-NEXT: vextubrx r4, r4, v3
; CHECK-PWR9-LE-NEXT: std r29, -24(r1) # 8-byte Folded Spill
; CHECK-PWR9-LE-NEXT: std r28, -32(r1) # 8-byte Folded Spill
; CHECK-PWR9-LE-NEXT: std r27, -40(r1) # 8-byte Folded Spill
@@ -280,155 +227,155 @@ define <16 x i8> @sub_absv_8_ext(<16 x i8> %a, <16 x i8> %b) local_unnamed_addr
; CHECK-PWR9-LE-NEXT: std r25, -56(r1) # 8-byte Folded Spill
; CHECK-PWR9-LE-NEXT: clrlwi r6, r6, 24
; CHECK-PWR9-LE-NEXT: clrlwi r3, r3, 24
-; CHECK-PWR9-LE-NEXT: clrlwi r8, r8, 24
-; CHECK-PWR9-LE-NEXT: clrlwi r5, r5, 24
-; CHECK-PWR9-LE-NEXT: vextubrx r7, r4, v2
-; CHECK-PWR9-LE-NEXT: vextubrx r4, r4, v3
-; CHECK-PWR9-LE-NEXT: sub r3, r6, r3
-; CHECK-PWR9-LE-NEXT: sub r6, r8, r5
; CHECK-PWR9-LE-NEXT: clrlwi r7, r7, 24
; CHECK-PWR9-LE-NEXT: clrlwi r4, r4, 24
-; CHECK-PWR9-LE-NEXT: sub r4, r7, r4
-; CHECK-PWR9-LE-NEXT: srawi r5, r3, 31
-; CHECK-PWR9-LE-NEXT: srawi r7, r4, 31
-; CHECK-PWR9-LE-NEXT: xor r3, r3, r5
-; CHECK-PWR9-LE-NEXT: xor r4, r4, r7
-; CHECK-PWR9-LE-NEXT: sub r5, r3, r5
-; CHECK-PWR9-LE-NEXT: srawi r3, r6, 31
-; CHECK-PWR9-LE-NEXT: sub r4, r4, r7
-; CHECK-PWR9-LE-NEXT: xor r6, r6, r3
+; CHECK-PWR9-LE-NEXT: vextubrx r8, r5, v2
+; CHECK-PWR9-LE-NEXT: vextubrx r5, r5, v3
+; CHECK-PWR9-LE-NEXT: cmplw r6, r3
; CHECK-PWR9-LE-NEXT: sub r3, r6, r3
+; CHECK-PWR9-LE-NEXT: clrlwi r8, r8, 24
+; CHECK-PWR9-LE-NEXT: clrlwi r5, r5, 24
+; CHECK-PWR9-LE-NEXT: neg r6, r3
+; CHECK-PWR9-LE-NEXT: iselgt r3, r3, r6
+; CHECK-PWR9-LE-NEXT: cmplw r7, r4
+; CHECK-PWR9-LE-NEXT: sub r4, r7, r4
+; CHECK-PWR9-LE-NEXT: neg r6, r4
+; CHECK-PWR9-LE-NEXT: iselgt r4, r4, r6
+; CHECK-PWR9-LE-NEXT: cmplw r8, r5
+; CHECK-PWR9-LE-NEXT: sub r5, r8, r5
+; CHECK-PWR9-LE-NEXT: neg r6, r5
+; CHECK-PWR9-LE-NEXT: iselgt r5, r5, r6
; CHECK-PWR9-LE-NEXT: li r6, 3
; CHECK-PWR9-LE-NEXT: vextubrx r7, r6, v2
; CHECK-PWR9-LE-NEXT: vextubrx r6, r6, v3
; CHECK-PWR9-LE-NEXT: clrlwi r7, r7, 24
; CHECK-PWR9-LE-NEXT: clrlwi r6, r6, 24
+; CHECK-PWR9-LE-NEXT: cmplw r7, r6
; CHECK-PWR9-LE-NEXT: sub r6, r7, r6
-; CHECK-PWR9-LE-NEXT: srawi r7, r6, 31
-; CHECK-PWR9-LE-NEXT: xor r6, r6, r7
-; CHECK-PWR9-LE-NEXT: sub r6, r6, r7
+; CHECK-PWR9-LE-NEXT: neg r7, r6
+; CHECK-PWR9-LE-NEXT: iselgt r6, r6, r7
; CHECK-PWR9-LE-NEXT: li r7, 4
; CHECK-PWR9-LE-NEXT: vextubrx r8, r7, v2
; CHECK-PWR9-LE-NEXT: vextubrx r7, r7, v3
; CHECK-PWR9-LE-NEXT: mtvsrd v4, r6
; CHECK-PWR9-LE-NEXT: clrlwi r8, r8, 24
; CHECK-PWR9-LE-NEXT: clrlwi r7, r7, 24
+; CHECK-PWR9-LE-NEXT: cmplw r8, r7
; CHECK-PWR9-LE-NEXT: sub r7, r8, r7
-; CHECK-PWR9-LE-NEXT: srawi r8, r7, 31
-; CHECK-PWR9-LE-NEXT: xor r7, r7, r8
-; CHECK-PWR9-LE-NEXT: sub r7, r7, r8
+; CHECK-PWR9-LE-NEXT: neg r8, r7
+; CHECK-PWR9-LE-NEXT: iselgt r7, r7, r8
; CHECK-PWR9-LE-NEXT: li r8, 5
; CHECK-PWR9-LE-NEXT: vextubrx r9, r8, v2
; CHECK-PWR9-LE-NEXT: vextubrx r8, r8, v3
; CHECK-PWR9-LE-NEXT: clrlwi r9, r9, 24
; CHECK-PWR9-LE-NEXT: clrlwi r8, r8, 24
+; CHECK-PWR9-LE-NEXT: cmplw r9, r8
; CHECK-PWR9-LE-NEXT: sub r8, r9, r8
-; CHECK-PWR9-LE-NEXT: srawi r9, r8, 31
-; CHECK-PWR9-LE-NEXT: xor r8, r8, r9
-; CHECK-PWR9-LE-NEXT: sub r8, r8, r9
+; CHECK-PWR9-LE-NEXT: neg r9, r8
+; CHECK-PWR9-LE-NEXT: iselgt r8, r8, r9
; CHECK-PWR9-LE-NEXT: li r9, 6
; CHECK-PWR9-LE-NEXT: vextubrx r10, r9, v2
; CHECK-PWR9-LE-NEXT: vextubrx r9, r9, v3
; CHECK-PWR9-LE-NEXT: clrlwi r10, r10, 24
; CHECK-PWR9-LE-NEXT: clrlwi r9, r9, 24
+; CHECK-PWR9-LE-NEXT: cmplw r10, r9
; CHECK-PWR9-LE-NEXT: sub r9, r10, r9
-; CHECK-PWR9-LE-NEXT: srawi r10, r9, 31
-; CHECK-PWR9-LE-NEXT: xor r9, r9, r10
-; CHECK-PWR9-LE-NEXT: sub r9, r9, r10
+; CHECK-PWR9-LE-NEXT: neg r10, r9
+; CHECK-PWR9-LE-NEXT: iselgt r9, r9, r10
; CHECK-PWR9-LE-NEXT: li r10, 7
; CHECK-PWR9-LE-NEXT: vextubrx r11, r10, v2
; CHECK-PWR9-LE-NEXT: vextubrx r10, r10, v3
; CHECK-PWR9-LE-NEXT: clrlwi r11, r11, 24
; CHECK-PWR9-LE-NEXT: clrlwi r10, r10, 24
+; CHECK-PWR9-LE-NEXT: cmplw r11, r10
; CHECK-PWR9-LE-NEXT: sub r10, r11, r10
-; CHECK-PWR9-LE-NEXT: srawi r11, r10, 31
-; CHECK-PWR9-LE-NEXT: xor r10, r10, r11
-; CHECK-PWR9-LE-NEXT: sub r10, r10, r11
+; CHECK-PWR9-LE-NEXT: neg r11, r10
+; CHECK-PWR9-LE-NEXT: iselgt r10, r10, r11
; CHECK-PWR9-LE-NEXT: li r11, 8
; CHECK-PWR9-LE-NEXT: vextubrx r12, r11, v2
; CHECK-PWR9-LE-NEXT: vextubrx r11, r11, v3
; CHECK-PWR9-LE-NEXT: mtvsrd v5, r10
; CHECK-PWR9-LE-NEXT: clrlwi r12, r12, 24
; CHECK-PWR9-LE-NEXT: clrlwi r11, r11, 24
+; CHECK-PWR9-LE-NEXT: cmplw r12, r11
; CHECK-PWR9-LE-NEXT: sub r11, r12, r11
-; CHECK-PWR9-LE-NEXT: srawi r12, r11, 31
-; CHECK-PWR9-LE-NEXT: xor r11, r11, r12
-; CHECK-PWR9-LE-NEXT: sub r11, r11, r12
+; CHECK-PWR9-LE-NEXT: neg r12, r11
+; CHECK-PWR9-LE-NEXT: iselgt r11, r11, r12
; CHECK-PWR9-LE-NEXT: li r12, 9
; CHECK-PWR9-LE-NEXT: vextubrx r0, r12, v2
; CHECK-PWR9-LE-NEXT: vextubrx r12, r12, v3
; CHECK-PWR9-LE-NEXT: clrlwi r0, r0, 24
; CHECK-PWR9-LE-NEXT: clrlwi r12, r12, 24
+; CHECK-PWR9-LE-NEXT: cmplw r0, r12
; CHECK-PWR9-LE-NEXT: sub r12, r0, r12
-; CHECK-PWR9-LE-NEXT: srawi r0, r12, 31
-; CHECK-PWR9-LE-NEXT: xor r12, r12, r0
-; CHECK-PWR9-LE-NEXT: sub r12, r12, r0
; CHECK-PWR9-LE-NEXT: li r0, 10
+; CHECK-PWR9-LE-NEXT: neg r30, r12
+; CHECK-PWR9-LE-NEXT: iselgt r12, r12, r30
; CHECK-PWR9-LE-NEXT: vextubrx r30, r0, v2
; CHECK-PWR9-LE-NEXT: vextubrx r0, r0, v3
; CHECK-PWR9-LE-NEXT: clrlwi r30, r30, 24
; CHECK-PWR9-LE-NEXT: clrlwi r0, r0, 24
-; CHECK-PWR9-LE-NEXT: sub r0, r30, r0
-; CHECK-PWR9-LE-NEXT: srawi r30, r0, 31
-; CHECK-PWR9-LE-NEXT: xor r0, r0, r30
-; CHECK-PWR9-LE-NEXT: sub r0, r0, r30
+; CHECK-PWR9-LE-NEXT: cmplw r30, r0
+; CHECK-PWR9-LE-NEXT: sub r30, r30, r0
+; CHECK-PWR9-LE-NEXT: neg r29, r30
+; CHECK-PWR9-LE-NEXT: iselgt r0, r30, r29
; CHECK-PWR9-LE-NEXT: li r30, 11
; CHECK-PWR9-LE-NEXT: vextubrx r29, r30, v2
; CHECK-PWR9-LE-NEXT: vextubrx r30, r30, v3
; CHECK-PWR9-LE-NEXT: clrlwi r29, r29, 24
; CHECK-PWR9-LE-NEXT: clrlwi r30, r30, 24
+; CHECK-PWR9-LE-NEXT: cmplw r29, r30
; CHECK-PWR9-LE-NEXT: sub r30, r29, r30
-; CHECK-PWR9-LE-NEXT: srawi r29, r30, 31
-; CHECK-PWR9-LE-NEXT: xor r30, r30, r29
-; CHECK-PWR9-LE-NEXT: sub r30, r30, r29
+; CHECK-PWR9-LE-NEXT: neg r29, r30
+; CHECK-PWR9-LE-NEXT: iselgt r30, r30, r29
; CHECK-PWR9-LE-NEXT: li r29, 12
; CHECK-PWR9-LE-NEXT: vextubrx r28, r29, v2
; CHECK-PWR9-LE-NEXT: vextubrx r29, r29, v3
; CHECK-PWR9-LE-NEXT: clrlwi r28, r28, 24
; CHECK-PWR9-LE-NEXT: clrlwi r29, r29, 24
+; CHECK-PWR9-LE-NEXT: cmplw r28, r29
; CHECK-PWR9-LE-NEXT: sub r29, r28, r29
-; CHECK-PWR9-LE-NEXT: srawi r28, r29, 31
-; CHECK-PWR9-LE-NEXT: xor r29, r29, r28
-; CHECK-PWR9-LE-NEXT: sub r29, r29, r28
+; CHECK-PWR9-LE-NEXT: neg r28, r29
+; CHECK-PWR9-LE-NEXT: iselgt r29, r29, r28
; CHECK-PWR9-LE-NEXT: li r28, 13
; CHECK-PWR9-LE-NEXT: vextubrx r27, r28, v2
; CHECK-PWR9-LE-NEXT: vextubrx r28, r28, v3
; CHECK-PWR9-LE-NEXT: clrlwi r27, r27, 24
; CHECK-PWR9-LE-NEXT: clrlwi r28, r28, 24
+; CHECK-PWR9-LE-NEXT: cmplw r27, r28
; CHECK-PWR9-LE-NEXT: sub r28, r27, r28
-; CHECK-PWR9-LE-NEXT: srawi r27, r28, 31
-; CHECK-PWR9-LE-NEXT: xor r28, r28, r27
-; CHECK-PWR9-LE-NEXT: sub r28, r28, r27
+; CHECK-PWR9-LE-NEXT: neg r27, r28
+; CHECK-PWR9-LE-NEXT: iselgt r28, r28, r27
; CHECK-PWR9-LE-NEXT: li r27, 14
; CHECK-PWR9-LE-NEXT: vextubrx r26, r27, v2
; CHECK-PWR9-LE-NEXT: vextubrx r27, r27, v3
; CHECK-PWR9-LE-NEXT: clrlwi r26, r26, 24
; CHECK-PWR9-LE-NEXT: clrlwi r27, r27, 24
+; CHECK-PWR9-LE-NEXT: cmplw r26, r27
; CHECK-PWR9-LE-NEXT: sub r27, r26, r27
-; CHECK-PWR9-LE-NEXT: srawi r26, r27, 31
-; CHECK-PWR9-LE-NEXT: xor r27, r27, r26
-; CHECK-PWR9-LE-NEXT: sub r27, r27, r26
+; CHECK-PWR9-LE-NEXT: neg r26, r27
+; CHECK-PWR9-LE-NEXT: iselgt r27, r27, r26
; CHECK-PWR9-LE-NEXT: li r26, 15
; CHECK-PWR9-LE-NEXT: vextubrx r25, r26, v2
; CHECK-PWR9-LE-NEXT: vextubrx r26, r26, v3
-; CHECK-PWR9-LE-NEXT: mtvsrd v2, r5
+; CHECK-PWR9-LE-NEXT: mtvsrd v2, r3
; CHECK-PWR9-LE-NEXT: mtvsrd v3, r4
; CHECK-PWR9-LE-NEXT: vmrghb v2, v3, v2
-; CHECK-PWR9-LE-NEXT: mtvsrd v3, r3
+; CHECK-PWR9-LE-NEXT: mtvsrd v3, r5
; CHECK-PWR9-LE-NEXT: clrlwi r25, r25, 24
; CHECK-PWR9-LE-NEXT: clrlwi r26, r26, 24
; CHECK-PWR9-LE-NEXT: vmrghb v3, v4, v3
; CHECK-PWR9-LE-NEXT: mtvsrd v4, r8
+; CHECK-PWR9-LE-NEXT: cmplw r25, r26
; CHECK-PWR9-LE-NEXT: sub r26, r25, r26
; CHECK-PWR9-LE-NEXT: vmrglh v2, v3, v2
; CHECK-PWR9-LE-NEXT: mtvsrd v3, r7
-; CHECK-PWR9-LE-NEXT: srawi r25, r26, 31
+; CHECK-PWR9-LE-NEXT: neg r25, r26
; CHECK-PWR9-LE-NEXT: vmrghb v3, v4, v3
; CHECK-PWR9-LE-NEXT: mtvsrd v4, r9
-; CHECK-PWR9-LE-NEXT: xor r26, r26, r25
-; CHECK-PWR9-LE-NEXT: vmrghb v4, v5, v4
-; CHECK-PWR9-LE-NEXT: sub r26, r26, r25
+; CHECK-PWR9-LE-NEXT: iselgt r26, r26, r25
; CHECK-PWR9-LE-NEXT: ld r25, -56(r1) # 8-byte Folded Reload
+; CHECK-PWR9-LE-NEXT: vmrghb v4, v5, v4
; CHECK-PWR9-LE-NEXT: mtvsrd v5, r26
; CHECK-PWR9-LE-NEXT: ld r26, -48(r1) # 8-byte Folded Reload
; CHECK-PWR9-LE-NEXT: vmrglh v3, v4, v3
@@ -475,130 +422,130 @@ define <16 x i8> @sub_absv_8_ext(<16 x i8> %a, <16 x i8> %b) local_unnamed_addr
; CHECK-PWR9-BE-NEXT: clrlwi r4, r4, 24
; CHECK-PWR9-BE-NEXT: vextublx r8, r5, v2
; CHECK-PWR9-BE-NEXT: vextublx r5, r5, v3
+; CHECK-PWR9-BE-NEXT: cmplw r6, r3
; CHECK-PWR9-BE-NEXT: sub r3, r6, r3
-; CHECK-PWR9-BE-NEXT: sub r4, r7, r4
; CHECK-PWR9-BE-NEXT: clrlwi r8, r8, 24
; CHECK-PWR9-BE-NEXT: clrlwi r5, r5, 24
+; CHECK-PWR9-BE-NEXT: neg r6, r3
+; CHECK-PWR9-BE-NEXT: iselgt r3, r3, r6
+; CHECK-PWR9-BE-NEXT: cmplw r7, r4
+; CHECK-PWR9-BE-NEXT: sub r4, r7, r4
+; CHECK-PWR9-BE-NEXT: neg r6, r4
+; CHECK-PWR9-BE-NEXT: iselgt r4, r4, r6
+; CHECK-PWR9-BE-NEXT: cmplw r8, r5
; CHECK-PWR9-BE-NEXT: sub r5, r8, r5
-; CHECK-PWR9-BE-NEXT: srawi r6, r3, 31
-; CHECK-PWR9-BE-NEXT: srawi r7, r4, 31
-; CHECK-PWR9-BE-NEXT: srawi r8, r5, 31
-; CHECK-PWR9-BE-NEXT: xor r3, r3, r6
-; CHECK-PWR9-BE-NEXT: xor r4, r4, r7
-; CHECK-PWR9-BE-NEXT: xor r5, r5, r8
-; CHECK-PWR9-BE-NEXT: sub r3, r3, r6
+; CHECK-PWR9-BE-NEXT: neg r6, r5
+; CHECK-PWR9-BE-NEXT: iselgt r5, r5, r6
; CHECK-PWR9-BE-NEXT: li r6, 3
-; CHECK-PWR9-BE-NEXT: sub r4, r4, r7
-; CHECK-PWR9-BE-NEXT: sub r5, r5, r8
; CHECK-PWR9-BE-NEXT: vextublx r7, r6, v2
; CHECK-PWR9-BE-NEXT: vextublx r6, r6, v3
; CHECK-PWR9-BE-NEXT: clrlwi r7, r7, 24
; CHECK-PWR9-BE-NEXT: clrlwi r6, r6, 24
+; CHECK-PWR9-BE-NEXT: cmplw r7, r6
; CHECK-PWR9-BE-NEXT: sub r6, r7, r6
-; CHECK-PWR9-BE-NEXT: srawi r7, r6, 31
-; CHECK-PWR9-BE-NEXT: xor r6, r6, r7
-; CHECK-PWR9-BE-NEXT: sub r6, r6, r7
+; CHECK-PWR9-BE-NEXT: neg r7, r6
+; CHECK-PWR9-BE-NEXT: iselgt r6, r6, r7
; CHECK-PWR9-BE-NEXT: li r7, 4
; CHECK-PWR9-BE-NEXT: vextublx r8, r7, v2
; CHECK-PWR9-BE-NEXT: vextublx r7, r7, v3
; CHECK-PWR9-BE-NEXT: clrlwi r8, r8, 24
; CHECK-PWR9-BE-NEXT: clrlwi r7, r7, 24
+; CHECK-PWR9-BE-NEXT: cmplw r8, r7
; CHECK-PWR9-BE-NEXT: sub r7, r8, r7
-; CHECK-PWR9-BE-NEXT: srawi r8, r7, 31
-; CHECK-PWR9-BE-NEXT: xor r7, r7, r8
-; CHECK-PWR9-BE-NEXT: sub r7, r7, r8
+; CHECK-PWR9-BE-NEXT: neg r8, r7
+; CHECK-PWR9-BE-NEXT: iselgt r7, r7, r8
; CHECK-PWR9-BE-NEXT: li r8, 5
; CHECK-PWR9-BE-NEXT: vextublx r9, r8, v2
; CHECK-PWR9-BE-NEXT: vextublx r8, r8, v3
; CHECK-PWR9-BE-NEXT: clrlwi r9, r9, 24
; CHECK-PWR9-BE-NEXT: clrlwi r8, r8, 24
+; CHECK-PWR9-BE-NEXT: cmplw r9, r8
; CHECK-PWR9-BE-NEXT: sub r8, r9, r8
-; CHECK-PWR9-BE-NEXT: srawi r9, r8, 31
-; CHECK-PWR9-BE-NEXT: xor r8, r8, r9
-; CHECK-PWR9-BE-NEXT: sub r8, r8, r9
+; CHECK-PWR9-BE-NEXT: neg r9, r8
+; CHECK-PWR9-BE-NEXT: iselgt r8, r8, r9
; CHECK-PWR9-BE-NEXT: li r9, 6
; CHECK-PWR9-BE-NEXT: vextublx r10, r9, v2
; CHECK-PWR9-BE-NEXT: vextublx r9, r9, v3
; CHECK-PWR9-BE-NEXT: clrlwi r10, r10, 24
; CHECK-PWR9-BE-NEXT: clrlwi r9, r9, 24
+; CHECK-PWR9-BE-NEXT: cmplw r10, r9
; CHECK-PWR9-BE-NEXT: sub r9, r10, r9
-; CHECK-PWR9-BE-NEXT: srawi r10, r9, 31
-; CHECK-PWR9-BE-NEXT: xor r9, r9, r10
-; CHECK-PWR9-BE-NEXT: sub r9, r9, r10
+; CHECK-PWR9-BE-NEXT: neg r10, r9
+; CHECK-PWR9-BE-NEXT: iselgt r9, r9, r10
; CHECK-PWR9-BE-NEXT: li r10, 7
; CHECK-PWR9-BE-NEXT: vextublx r11, r10, v2
; CHECK-PWR9-BE-NEXT: vextublx r10, r10, v3
; CHECK-PWR9-BE-NEXT: mtfprwz f2, r9
; CHECK-PWR9-BE-NEXT: clrlwi r11, r11, 24
; CHECK-PWR9-BE-NEXT: clrlwi r10, r10, 24
+; CHECK-PWR9-BE-NEXT: cmplw r11, r10
; CHECK-PWR9-BE-NEXT: sub r10, r11, r10
-; CHECK-PWR9-BE-NEXT: srawi r11, r10, 31
-; CHECK-PWR9-BE-NEXT: xor r10, r10, r11
-; CHECK-PWR9-BE-NEXT: sub r10, r10, r11
+; CHECK-PWR9-BE-NEXT: neg r11, r10
+; CHECK-PWR9-BE-NEXT: iselgt r10, r10, r11
; CHECK-PWR9-BE-NEXT: li r11, 8
; CHECK-PWR9-BE-NEXT: vextublx r12, r11, v2
; CHECK-PWR9-BE-NEXT: vextublx r11, r11, v3
; CHECK-PWR9-BE-NEXT: clrlwi r12, r12, 24
; CHECK-PWR9-BE-NEXT: clrlwi r11, r11, 24
+; CHECK-PWR9-BE-NEXT: cmplw r12, r11
; CHECK-PWR9-BE-NEXT: sub r11, r12, r11
-; CHECK-PWR9-BE-NEXT: srawi r12, r11, 31
-; CHECK-PWR9-BE-NEXT: xor r11, r11, r12
-; CHECK-PWR9-BE-NEXT: sub r11, r11, r12
+; CHECK-PWR9-BE-NEXT: neg r12, r11
+; CHECK-PWR9-BE-NEXT: iselgt r11, r11, r12
; CHECK-PWR9-BE-NEXT: li r12, 9
; CHECK-PWR9-BE-NEXT: vextublx r0, r12, v2
; CHECK-PWR9-BE-NEXT: vextublx r12, r12, v3
; CHECK-PWR9-BE-NEXT: clrlwi r0, r0, 24
; CHECK-PWR9-BE-NEXT: clrlwi r12, r12, 24
+; CHECK-PWR9-BE-NEXT: cmplw r0, r12
; CHECK-PWR9-BE-NEXT: sub r12, r0, r12
-; CHECK-PWR9-BE-NEXT: srawi r0, r12, 31
-; CHECK-PWR9-BE-NEXT: xor r12, r12, r0
-; CHECK-PWR9-BE-NEXT: sub r12, r12, r0
; CHECK-PWR9-BE-NEXT: li r0, 10
+; CHECK-PWR9-BE-NEXT: neg r30, r12
+; CHECK-PWR9-BE-NEXT: iselgt r12, r12, r30
; CHECK-PWR9-BE-NEXT: vextublx r30, r0, v2
; CHECK-PWR9-BE-NEXT: vextublx r0, r0, v3
; CHECK-PWR9-BE-NEXT: mtvsrwz v4, r12
; CHECK-PWR9-BE-NEXT: clrlwi r30, r30, 24
; CHECK-PWR9-BE-NEXT: clrlwi r0, r0, 24
-; CHECK-PWR9-BE-NEXT: sub r0, r30, r0
-; CHECK-PWR9-BE-NEXT: srawi r30, r0, 31
-; CHECK-PWR9-BE-NEXT: xor r0, r0, r30
-; CHECK-PWR9-BE-NEXT: sub r0, r0, r30
+; CHECK-PWR9-BE-NEXT: cmplw r30, r0
+; CHECK-PWR9-BE-NEXT: sub r30, r30, r0
+; CHECK-PWR9-BE-NEXT: neg r29, r30
+; CHECK-PWR9-BE-NEXT: iselgt r0, r30, r29
; CHECK-PWR9-BE-NEXT: li r30, 11
; CHECK-PWR9-BE-NEXT: vextublx r29, r30, v2
; CHECK-PWR9-BE-NEXT: vextublx r30, r30, v3
; CHECK-PWR9-BE-NEXT: clrlwi r29, r29, 24
; CHECK-PWR9-BE-NEXT: clrlwi r30, r30, 24
+; CHECK-PWR9-BE-NEXT: cmplw r29, r30
; CHECK-PWR9-BE-NEXT: sub r30, r29, r30
-; CHECK-PWR9-BE-NEXT: srawi r29, r30, 31
-; CHECK-PWR9-BE-NEXT: xor r30, r30, r29
-; CHECK-PWR9-BE-NEXT: sub r30, r30, r29
+; CHECK-PWR9-BE-NEXT: neg r29, r30
+; CHECK-PWR9-BE-NEXT: iselgt r30, r30, r29
; CHECK-PWR9-BE-NEXT: li r29, 12
; CHECK-PWR9-BE-NEXT: vextublx r28, r29, v2
; CHECK-PWR9-BE-NEXT: vextublx r29, r29, v3
; CHECK-PWR9-BE-NEXT: clrlwi r28, r28, 24
; CHECK-PWR9-BE-NEXT: clrlwi r29, r29, 24
+; CHECK-PWR9-BE-NEXT: cmplw r28, r29
; CHECK-PWR9-BE-NEXT: sub r29, r28, r29
-; CHECK-PWR9-BE-NEXT: srawi r28, r29, 31
-; CHECK-PWR9-BE-NEXT: xor r29, r29, r28
-; CHECK-PWR9-BE-NEXT: sub r29, r29, r28
+; CHECK-PWR9-BE-NEXT: neg r28, r29
+; CHECK-PWR9-BE-NEXT: iselgt r29, r29, r28
; CHECK-PWR9-BE-NEXT: li r28, 13
; CHECK-PWR9-BE-NEXT: vextublx r27, r28, v2
; CHECK-PWR9-BE-NEXT: vextublx r28, r28, v3
; CHECK-PWR9-BE-NEXT: clrlwi r27, r27, 24
; CHECK-PWR9-BE-NEXT: clrlwi r28, r28, 24
+; CHECK-PWR9-BE-NEXT: cmplw r27, r28
; CHECK-PWR9-BE-NEXT: sub r28, r27, r28
-; CHECK-PWR9-BE-NEXT: srawi r27, r28, 31
-; CHECK-PWR9-BE-NEXT: xor r28, r28, r27
-; CHECK-PWR9-BE-NEXT: sub r28, r28, r27
+; CHECK-PWR9-BE-NEXT: neg r27, r28
+; CHECK-PWR9-BE-NEXT: iselgt r28, r28, r27
; CHECK-PWR9-BE-NEXT: li r27, 14
; CHECK-PWR9-BE-NEXT: vextublx r26, r27, v2
; CHECK-PWR9-BE-NEXT: vextublx r27, r27, v3
; CHECK-PWR9-BE-NEXT: clrlwi r26, r26, 24
; CHECK-PWR9-BE-NEXT: clrlwi r27, r27, 24
+; CHECK-PWR9-BE-NEXT: cmplw r26, r27
; CHECK-PWR9-BE-NEXT: sub r27, r26, r27
-; CHECK-PWR9-BE-NEXT: srawi r26, r27, 31
-; CHECK-PWR9-BE-NEXT: xor r27, r27, r26
-; CHECK-PWR9-BE-NEXT: sub r27, r27, r26
+; CHECK-PWR9-BE-NEXT: neg r26, r27
+; CHECK-PWR9-BE-NEXT: iselgt r27, r27, r26
; CHECK-PWR9-BE-NEXT: li r26, 15
; CHECK-PWR9-BE-NEXT: vextublx r25, r26, v2
; CHECK-PWR9-BE-NEXT: vextublx r26, r26, v3
@@ -611,10 +558,10 @@ define <16 x i8> @sub_absv_8_ext(<16 x i8> %a, <16 x i8> %b) local_unnamed_addr
; CHECK-PWR9-BE-NEXT: clrlwi r26, r26, 24
; CHECK-PWR9-BE-NEXT: lxv vs1, 0(r27)
; CHECK-PWR9-BE-NEXT: ld r27, -40(r1) # 8-byte Folded Reload
+; CHECK-PWR9-BE-NEXT: cmplw r25, r26
; CHECK-PWR9-BE-NEXT: sub r26, r25, r26
-; CHECK-PWR9-BE-NEXT: srawi r25, r26, 31
-; CHECK-PWR9-BE-NEXT: xor r26, r26, r25
-; CHECK-PWR9-BE-NEXT: sub r26, r26, r25
+; CHECK-PWR9-BE-NEXT: neg r25, r26
+; CHECK-PWR9-BE-NEXT: iselgt r26, r26, r25
; CHECK-PWR9-BE-NEXT: ld r25, -56(r1) # 8-byte Folded Reload
; CHECK-PWR9-BE-NEXT: mtvsrwz v2, r26
; CHECK-PWR9-BE-NEXT: ld r26, -48(r1) # 8-byte Folded Reload
@@ -650,179 +597,185 @@ define <16 x i8> @sub_absv_8_ext(<16 x i8> %a, <16 x i8> %b) local_unnamed_addr
;
; CHECK-PWR8-LABEL: sub_absv_8_ext:
; CHECK-PWR8: # %bb.0: # %entry
+; CHECK-PWR8-NEXT: mfvsrd r8, v2
+; CHECK-PWR8-NEXT: mfvsrd r11, v3
+; CHECK-PWR8-NEXT: std r24, -64(r1) # 8-byte Folded Spill
+; CHECK-PWR8-NEXT: std r29, -24(r1) # 8-byte Folded Spill
; CHECK-PWR8-NEXT: xxswapd vs0, v2
; CHECK-PWR8-NEXT: xxswapd vs1, v3
+; CHECK-PWR8-NEXT: std r21, -88(r1) # 8-byte Folded Spill
+; CHECK-PWR8-NEXT: std r23, -72(r1) # 8-byte Folded Spill
+; CHECK-PWR8-NEXT: mffprd r3, f0
+; CHECK-PWR8-NEXT: mffprd r4, f1
; CHECK-PWR8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-PWR8-NEXT: std r22, -80(r1) # 8-byte Folded Spill
+; CHECK-PWR8-NEXT: std r27, -40(r1) # 8-byte Folded Spill
; CHECK-PWR8-NEXT: std r28, -32(r1) # 8-byte Folded Spill
-; CHECK-PWR8-NEXT: std r29, -24(r1) # 8-byte Folded Spill
+; CHECK-PWR8-NEXT: clrldi r5, r8, 56
+; CHECK-PWR8-NEXT: clrldi r6, r11, 56
+; CHECK-PWR8-NEXT: rldicl r7, r8, 56, 56
; CHECK-PWR8-NEXT: std r26, -48(r1) # 8-byte Folded Spill
-; CHECK-PWR8-NEXT: mffprd r11, f0
-; CHECK-PWR8-NEXT: mffprd r8, f1
-; CHECK-PWR8-NEXT: std r27, -40(r1) # 8-byte Folded Spill
; CHECK-PWR8-NEXT: std r25, -56(r1) # 8-byte Folded Spill
-; CHECK-PWR8-NEXT: clrldi r3, r11, 56
-; CHECK-PWR8-NEXT: clrldi r4, r8, 56
-; CHECK-PWR8-NEXT: rldicl r5, r11, 56, 56
-; CHECK-PWR8-NEXT: rldicl r6, r8, 56, 56
-; CHECK-PWR8-NEXT: rldicl r7, r11, 48, 56
-; CHECK-PWR8-NEXT: rldicl r9, r8, 48, 56
-; CHECK-PWR8-NEXT: rldicl r0, r11, 32, 56
-; CHECK-PWR8-NEXT: rldicl r30, r8, 32, 56
-; CHECK-PWR8-NEXT: rldicl r29, r11, 24, 56
-; CHECK-PWR8-NEXT: rldicl r28, r8, 24, 56
-; CHECK-PWR8-NEXT: rldicl r10, r11, 40, 56
-; CHECK-PWR8-NEXT: rldicl r12, r8, 40, 56
-; CHECK-PWR8-NEXT: rldicl r27, r11, 16, 56
-; CHECK-PWR8-NEXT: rldicl r11, r11, 8, 56
-; CHECK-PWR8-NEXT: std r24, -64(r1) # 8-byte Folded Spill
-; CHECK-PWR8-NEXT: clrlwi r3, r3, 24
-; CHECK-PWR8-NEXT: clrlwi r4, r4, 24
; CHECK-PWR8-NEXT: clrlwi r5, r5, 24
; CHECK-PWR8-NEXT: clrlwi r6, r6, 24
-; CHECK-PWR8-NEXT: clrlwi r7, r7, 24
-; CHECK-PWR8-NEXT: clrlwi r9, r9, 24
-; CHECK-PWR8-NEXT: sub r3, r3, r4
-; CHECK-PWR8-NEXT: clrlwi r0, r0, 24
-; CHECK-PWR8-NEXT: clrlwi r30, r30, 24
-; CHECK-PWR8-NEXT: sub r4, r5, r6
+; CHECK-PWR8-NEXT: clrldi r9, r3, 56
+; CHECK-PWR8-NEXT: clrldi r10, r4, 56
+; CHECK-PWR8-NEXT: clrlwi r24, r7, 24
+; CHECK-PWR8-NEXT: rldicl r29, r4, 48, 56
+; CHECK-PWR8-NEXT: rldicl r12, r3, 56, 56
+; CHECK-PWR8-NEXT: rldicl r0, r4, 56, 56
+; CHECK-PWR8-NEXT: rldicl r30, r3, 48, 56
+; CHECK-PWR8-NEXT: rldicl r28, r3, 40, 56
+; CHECK-PWR8-NEXT: rldicl r27, r4, 40, 56
+; CHECK-PWR8-NEXT: rldicl r26, r3, 32, 56
+; CHECK-PWR8-NEXT: rldicl r25, r4, 32, 56
+; CHECK-PWR8-NEXT: cmplw r5, r6
+; CHECK-PWR8-NEXT: clrlwi r7, r9, 24
+; CHECK-PWR8-NEXT: clrlwi r9, r10, 24
+; CHECK-PWR8-NEXT: sub r10, r5, r6
+; CHECK-PWR8-NEXT: clrlwi r21, r29, 24
+; CHECK-PWR8-NEXT: neg r6, r10
; CHECK-PWR8-NEXT: sub r5, r7, r9
-; CHECK-PWR8-NEXT: clrlwi r29, r29, 24
-; CHECK-PWR8-NEXT: clrlwi r28, r28, 24
-; CHECK-PWR8-NEXT: sub r7, r0, r30
-; CHECK-PWR8-NEXT: sub r9, r29, r28
-; CHECK-PWR8-NEXT: clrlwi r10, r10, 24
; CHECK-PWR8-NEXT: clrlwi r12, r12, 24
-; CHECK-PWR8-NEXT: sub r6, r10, r12
-; CHECK-PWR8-NEXT: clrlwi r27, r27, 24
-; CHECK-PWR8-NEXT: clrlwi r11, r11, 24
-; CHECK-PWR8-NEXT: srawi r0, r5, 31
-; CHECK-PWR8-NEXT: srawi r29, r7, 31
-; CHECK-PWR8-NEXT: srawi r12, r4, 31
-; CHECK-PWR8-NEXT: srawi r28, r9, 31
-; CHECK-PWR8-NEXT: srawi r30, r6, 31
-; CHECK-PWR8-NEXT: srawi r10, r3, 31
-; CHECK-PWR8-NEXT: xor r5, r5, r0
-; CHECK-PWR8-NEXT: xor r26, r7, r29
-; CHECK-PWR8-NEXT: sub r7, r5, r0
-; CHECK-PWR8-NEXT: rldicl r5, r8, 16, 56
-; CHECK-PWR8-NEXT: rldicl r8, r8, 8, 56
-; CHECK-PWR8-NEXT: xor r4, r4, r12
-; CHECK-PWR8-NEXT: xor r25, r9, r28
-; CHECK-PWR8-NEXT: sub r9, r4, r12
-; CHECK-PWR8-NEXT: sub r4, r26, r29
-; CHECK-PWR8-NEXT: mtvsrd v1, r9
-; CHECK-PWR8-NEXT: clrlwi r5, r5, 24
-; CHECK-PWR8-NEXT: sub r5, r27, r5
-; CHECK-PWR8-NEXT: clrlwi r8, r8, 24
-; CHECK-PWR8-NEXT: sub r8, r11, r8
-; CHECK-PWR8-NEXT: xor r6, r6, r30
-; CHECK-PWR8-NEXT: sub r6, r6, r30
-; CHECK-PWR8-NEXT: xor r3, r3, r10
-; CHECK-PWR8-NEXT: sub r10, r3, r10
-; CHECK-PWR8-NEXT: sub r3, r25, r28
-; CHECK-PWR8-NEXT: mtvsrd v6, r6
-; CHECK-PWR8-NEXT: mtvsrd v7, r3
-; CHECK-PWR8-NEXT: srawi r12, r5, 31
-; CHECK-PWR8-NEXT: srawi r11, r8, 31
-; CHECK-PWR8-NEXT: xor r5, r5, r12
-; CHECK-PWR8-NEXT: xor r8, r8, r11
-; CHECK-PWR8-NEXT: sub r5, r5, r12
-; CHECK-PWR8-NEXT: sub r8, r8, r11
-; CHECK-PWR8-NEXT: mfvsrd r11, v2
-; CHECK-PWR8-NEXT: mfvsrd r12, v3
-; CHECK-PWR8-NEXT: mtvsrd v8, r8
-; CHECK-PWR8-NEXT: clrldi r0, r11, 56
-; CHECK-PWR8-NEXT: clrldi r30, r12, 56
-; CHECK-PWR8-NEXT: rldicl r29, r12, 56, 56
-; CHECK-PWR8-NEXT: rldicl r28, r12, 48, 56
-; CHECK-PWR8-NEXT: rldicl r27, r12, 40, 56
-; CHECK-PWR8-NEXT: rldicl r26, r12, 32, 56
-; CHECK-PWR8-NEXT: rldicl r25, r12, 24, 56
-; CHECK-PWR8-NEXT: rldicl r24, r12, 16, 56
-; CHECK-PWR8-NEXT: rldicl r12, r12, 8, 56
-; CHECK-PWR8-NEXT: clrlwi r0, r0, 24
-; CHECK-PWR8-NEXT: clrlwi r30, r30, 24
-; CHECK-PWR8-NEXT: clrlwi r29, r29, 24
+; CHECK-PWR8-NEXT: clrlwi r23, r0, 24
+; CHECK-PWR8-NEXT: clrlwi r22, r30, 24
; CHECK-PWR8-NEXT: clrlwi r28, r28, 24
; CHECK-PWR8-NEXT: clrlwi r27, r27, 24
-; CHECK-PWR8-NEXT: clrlwi r26, r26, 24
-; CHECK-PWR8-NEXT: clrlwi r25, r25, 24
-; CHECK-PWR8-NEXT: clrlwi r24, r24, 24
-; CHECK-PWR8-NEXT: clrlwi r12, r12, 24
-; CHECK-PWR8-NEXT: sub r0, r0, r30
-; CHECK-PWR8-NEXT: srawi r30, r0, 31
-; CHECK-PWR8-NEXT: xor r0, r0, r30
-; CHECK-PWR8-NEXT: sub r0, r0, r30
-; CHECK-PWR8-NEXT: rldicl r30, r11, 56, 56
-; CHECK-PWR8-NEXT: clrlwi r30, r30, 24
-; CHECK-PWR8-NEXT: mtvsrd v2, r0
-; CHECK-PWR8-NEXT: sub r30, r30, r29
-; CHECK-PWR8-NEXT: srawi r29, r30, 31
-; CHECK-PWR8-NEXT: xor r30, r30, r29
-; CHECK-PWR8-NEXT: sub r30, r30, r29
-; CHECK-PWR8-NEXT: rldicl r29, r11, 48, 56
-; CHECK-PWR8-NEXT: clrlwi r29, r29, 24
-; CHECK-PWR8-NEXT: mtvsrd v3, r30
-; CHECK-PWR8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
-; CHECK-PWR8-NEXT: sub r29, r29, r28
-; CHECK-PWR8-NEXT: srawi r28, r29, 31
-; CHECK-PWR8-NEXT: xor r29, r29, r28
-; CHECK-PWR8-NEXT: sub r29, r29, r28
-; CHECK-PWR8-NEXT: rldicl r28, r11, 40, 56
+; CHECK-PWR8-NEXT: clrlwi r0, r26, 24
+; CHECK-PWR8-NEXT: clrlwi r30, r25, 24
+; CHECK-PWR8-NEXT: iselgt r29, r10, r6
+; CHECK-PWR8-NEXT: cmplw r7, r9
+; CHECK-PWR8-NEXT: neg r7, r5
+; CHECK-PWR8-NEXT: sub r6, r12, r23
+; CHECK-PWR8-NEXT: sub r9, r28, r27
+; CHECK-PWR8-NEXT: iselgt r10, r5, r7
+; CHECK-PWR8-NEXT: cmplw r12, r23
+; CHECK-PWR8-NEXT: neg r7, r6
+; CHECK-PWR8-NEXT: sub r5, r22, r21
+; CHECK-PWR8-NEXT: mtvsrd v2, r29
+; CHECK-PWR8-NEXT: iselgt r12, r6, r7
+; CHECK-PWR8-NEXT: cmplw r22, r21
+; CHECK-PWR8-NEXT: neg r6, r5
+; CHECK-PWR8-NEXT: iselgt r7, r5, r6
+; CHECK-PWR8-NEXT: cmplw r28, r27
+; CHECK-PWR8-NEXT: neg r28, r9
+; CHECK-PWR8-NEXT: mtvsrd v1, r12
+; CHECK-PWR8-NEXT: sub r5, r0, r30
+; CHECK-PWR8-NEXT: neg r6, r5
+; CHECK-PWR8-NEXT: iselgt r9, r9, r28
+; CHECK-PWR8-NEXT: rldicl r28, r11, 56, 56
; CHECK-PWR8-NEXT: clrlwi r28, r28, 24
-; CHECK-PWR8-NEXT: sub r28, r28, r27
-; CHECK-PWR8-NEXT: srawi r27, r28, 31
-; CHECK-PWR8-NEXT: xor r28, r28, r27
-; CHECK-PWR8-NEXT: sub r28, r28, r27
-; CHECK-PWR8-NEXT: rldicl r27, r11, 32, 56
-; CHECK-PWR8-NEXT: clrlwi r27, r27, 24
-; CHECK-PWR8-NEXT: mtvsrd v4, r28
-; CHECK-PWR8-NEXT: ld r28, -32(r1) # 8-byte Folded Reload
-; CHECK-PWR8-NEXT: sub r27, r27, r26
-; CHECK-PWR8-NEXT: srawi r26, r27, 31
-; CHECK-PWR8-NEXT: xor r27, r27, r26
-; CHECK-PWR8-NEXT: sub r27, r27, r26
-; CHECK-PWR8-NEXT: rldicl r26, r11, 24, 56
+; CHECK-PWR8-NEXT: cmplw r24, r28
+; CHECK-PWR8-NEXT: sub r27, r24, r28
+; CHECK-PWR8-NEXT: rldicl r28, r8, 48, 56
+; CHECK-PWR8-NEXT: mtvsrd v6, r9
+; CHECK-PWR8-NEXT: neg r26, r27
+; CHECK-PWR8-NEXT: clrlwi r28, r28, 24
+; CHECK-PWR8-NEXT: iselgt r27, r27, r26
+; CHECK-PWR8-NEXT: rldicl r26, r11, 48, 56
; CHECK-PWR8-NEXT: clrlwi r26, r26, 24
-; CHECK-PWR8-NEXT: sub r26, r26, r25
-; CHECK-PWR8-NEXT: srawi r25, r26, 31
-; CHECK-PWR8-NEXT: xor r26, r26, r25
-; CHECK-PWR8-NEXT: sub r26, r26, r25
-; CHECK-PWR8-NEXT: rldicl r25, r11, 16, 56
-; CHECK-PWR8-NEXT: rldicl r11, r11, 8, 56
+; CHECK-PWR8-NEXT: cmplw r28, r26
+; CHECK-PWR8-NEXT: sub r25, r28, r26
+; CHECK-PWR8-NEXT: rldicl r26, r8, 40, 56
+; CHECK-PWR8-NEXT: mtvsrd v3, r27
+; CHECK-PWR8-NEXT: neg r24, r25
+; CHECK-PWR8-NEXT: clrlwi r26, r26, 24
+; CHECK-PWR8-NEXT: iselgt r28, r25, r24
+; CHECK-PWR8-NEXT: rldicl r25, r11, 40, 56
; CHECK-PWR8-NEXT: clrlwi r25, r25, 24
-; CHECK-PWR8-NEXT: clrlwi r11, r11, 24
-; CHECK-PWR8-NEXT: mtvsrd v5, r26
-; CHECK-PWR8-NEXT: ld r26, -48(r1) # 8-byte Folded Reload
-; CHECK-PWR8-NEXT: sub r25, r25, r24
-; CHECK-PWR8-NEXT: sub r11, r11, r12
-; CHECK-PWR8-NEXT: srawi r24, r25, 31
-; CHECK-PWR8-NEXT: srawi r12, r11, 31
-; CHECK-PWR8-NEXT: xor r25, r25, r24
-; CHECK-PWR8-NEXT: xor r11, r11, r12
-; CHECK-PWR8-NEXT: sub r25, r25, r24
-; CHECK-PWR8-NEXT: sub r11, r11, r12
+; CHECK-PWR8-NEXT: cmplw r26, r25
+; CHECK-PWR8-NEXT: sub r24, r26, r25
+; CHECK-PWR8-NEXT: rldicl r26, r8, 32, 56
+; CHECK-PWR8-NEXT: rldicl r25, r11, 32, 56
+; CHECK-PWR8-NEXT: neg r23, r24
+; CHECK-PWR8-NEXT: clrlwi r26, r26, 24
+; CHECK-PWR8-NEXT: clrlwi r22, r25, 24
+; CHECK-PWR8-NEXT: sub r21, r26, r22
+; CHECK-PWR8-NEXT: iselgt r25, r24, r23
+; CHECK-PWR8-NEXT: cmplw r26, r22
+; CHECK-PWR8-NEXT: rldicl r23, r11, 24, 56
+; CHECK-PWR8-NEXT: neg r24, r21
+; CHECK-PWR8-NEXT: clrlwi r23, r23, 24
+; CHECK-PWR8-NEXT: iselgt r26, r21, r24
+; CHECK-PWR8-NEXT: rldicl r24, r8, 24, 56
+; CHECK-PWR8-NEXT: mtvsrd v4, r25
+; CHECK-PWR8-NEXT: ld r25, -56(r1) # 8-byte Folded Reload
+; CHECK-PWR8-NEXT: ld r21, -88(r1) # 8-byte Folded Reload
+; CHECK-PWR8-NEXT: clrlwi r24, r24, 24
+; CHECK-PWR8-NEXT: cmplw r24, r23
+; CHECK-PWR8-NEXT: sub r22, r24, r23
+; CHECK-PWR8-NEXT: rldicl r23, r8, 16, 56
+; CHECK-PWR8-NEXT: rldicl r8, r8, 8, 56
+; CHECK-PWR8-NEXT: neg r24, r22
+; CHECK-PWR8-NEXT: clrlwi r23, r23, 24
+; CHECK-PWR8-NEXT: clrlwi r29, r8, 24
+; CHECK-PWR8-NEXT: rldicl r8, r11, 8, 56
+; CHECK-PWR8-NEXT: iselgt r24, r22, r24
+; CHECK-PWR8-NEXT: rldicl r22, r11, 16, 56
+; CHECK-PWR8-NEXT: clrlwi r11, r8, 24
+; CHECK-PWR8-NEXT: rldicl r8, r3, 24, 56
+; CHECK-PWR8-NEXT: clrlwi r22, r22, 24
+; CHECK-PWR8-NEXT: sub r27, r29, r11
+; CHECK-PWR8-NEXT: clrlwi r8, r8, 24
+; CHECK-PWR8-NEXT: cmplw r23, r22
+; CHECK-PWR8-NEXT: sub r23, r23, r22
+; CHECK-PWR8-NEXT: mtvsrd v5, r24
; CHECK-PWR8-NEXT: ld r24, -64(r1) # 8-byte Folded Reload
-; CHECK-PWR8-NEXT: mtvsrd v0, r11
+; CHECK-PWR8-NEXT: neg r22, r23
+; CHECK-PWR8-NEXT: iselgt r23, r23, r22
+; CHECK-PWR8-NEXT: cmplw r29, r11
+; CHECK-PWR8-NEXT: rldicl r11, r4, 24, 56
+; CHECK-PWR8-NEXT: ld r22, -80(r1) # 8-byte Folded Reload
+; CHECK-PWR8-NEXT: clrlwi r11, r11, 24
+; CHECK-PWR8-NEXT: sub r29, r8, r11
; CHECK-PWR8-NEXT: vmrghb v2, v3, v2
-; CHECK-PWR8-NEXT: mtvsrd v3, r29
-; CHECK-PWR8-NEXT: ld r29, -24(r1) # 8-byte Folded Reload
+; CHECK-PWR8-NEXT: mtvsrd v3, r28
+; CHECK-PWR8-NEXT: neg r28, r29
; CHECK-PWR8-NEXT: vmrghb v3, v4, v3
-; CHECK-PWR8-NEXT: mtvsrd v4, r27
-; CHECK-PWR8-NEXT: ld r27, -40(r1) # 8-byte Folded Reload
+; CHECK-PWR8-NEXT: mtvsrd v4, r26
+; CHECK-PWR8-NEXT: neg r26, r27
+; CHECK-PWR8-NEXT: iselgt r27, r27, r26
+; CHECK-PWR8-NEXT: cmplw r0, r30
+; CHECK-PWR8-NEXT: rldicl r0, r3, 16, 56
+; CHECK-PWR8-NEXT: rldicl r30, r4, 16, 56
+; CHECK-PWR8-NEXT: rldicl r3, r3, 8, 56
+; CHECK-PWR8-NEXT: rldicl r4, r4, 8, 56
; CHECK-PWR8-NEXT: vmrglh v2, v3, v2
+; CHECK-PWR8-NEXT: clrlwi r0, r0, 24
+; CHECK-PWR8-NEXT: clrlwi r30, r30, 24
+; CHECK-PWR8-NEXT: clrlwi r3, r3, 24
+; CHECK-PWR8-NEXT: clrlwi r4, r4, 24
+; CHECK-PWR8-NEXT: sub r26, r0, r30
+; CHECK-PWR8-NEXT: mtvsrd v0, r27
+; CHECK-PWR8-NEXT: iselgt r5, r5, r6
+; CHECK-PWR8-NEXT: cmplw r8, r11
+; CHECK-PWR8-NEXT: neg r27, r26
; CHECK-PWR8-NEXT: vmrghb v4, v5, v4
-; CHECK-PWR8-NEXT: mtvsrd v5, r25
-; CHECK-PWR8-NEXT: ld r25, -56(r1) # 8-byte Folded Reload
+; CHECK-PWR8-NEXT: mtvsrd v5, r23
+; CHECK-PWR8-NEXT: ld r23, -72(r1) # 8-byte Folded Reload
; CHECK-PWR8-NEXT: vmrghb v5, v0, v5
; CHECK-PWR8-NEXT: mtvsrd v0, r10
+; CHECK-PWR8-NEXT: sub r10, r3, r4
; CHECK-PWR8-NEXT: vmrglh v3, v5, v4
; CHECK-PWR8-NEXT: xxmrglw vs0, v3, v2
; CHECK-PWR8-NEXT: vmrghb v0, v1, v0
; CHECK-PWR8-NEXT: mtvsrd v1, r7
+; CHECK-PWR8-NEXT: neg r7, r10
; CHECK-PWR8-NEXT: vmrghb v1, v6, v1
-; CHECK-PWR8-NEXT: mtvsrd v6, r4
+; CHECK-PWR8-NEXT: mtvsrd v6, r5
+; CHECK-PWR8-NEXT: iselgt r5, r29, r28
+; CHECK-PWR8-NEXT: cmplw r0, r30
+; CHECK-PWR8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-PWR8-NEXT: ld r29, -24(r1) # 8-byte Folded Reload
+; CHECK-PWR8-NEXT: ld r28, -32(r1) # 8-byte Folded Reload
+; CHECK-PWR8-NEXT: mtvsrd v7, r5
+; CHECK-PWR8-NEXT: iselgt r5, r26, r27
+; CHECK-PWR8-NEXT: cmplw r3, r4
; CHECK-PWR8-NEXT: vmrglh v4, v1, v0
+; CHECK-PWR8-NEXT: ld r27, -40(r1) # 8-byte Folded Reload
+; CHECK-PWR8-NEXT: ld r26, -48(r1) # 8-byte Folded Reload
+; CHECK-PWR8-NEXT: iselgt r3, r10, r7
+; CHECK-PWR8-NEXT: mtvsrd v8, r3
; CHECK-PWR8-NEXT: vmrghb v6, v7, v6
; CHECK-PWR8-NEXT: mtvsrd v7, r5
; CHECK-PWR8-NEXT: vmrghb v7, v8, v7
@@ -833,8 +786,10 @@ define <16 x i8> @sub_absv_8_ext(<16 x i8> %a, <16 x i8> %b) local_unnamed_addr
;
; CHECK-PWR7-LABEL: sub_absv_8_ext:
; CHECK-PWR7: # %bb.0: # %entry
-; CHECK-PWR7-NEXT: stdu r1, -400(r1)
-; CHECK-PWR7-NEXT: .cfi_def_cfa_offset 400
+; CHECK-PWR7-NEXT: stdu r1, -416(r1)
+; CHECK-PWR7-NEXT: .cfi_def_cfa_offset 416
+; CHECK-PWR7-NEXT: .cfi_offset r22, -80
+; CHECK-PWR7-NEXT: .cfi_offset r23, -72
; CHECK-PWR7-NEXT: .cfi_offset r24, -64
; CHECK-PWR7-NEXT: .cfi_offset r25, -56
; CHECK-PWR7-NEXT: .cfi_offset r26, -48
@@ -843,136 +798,140 @@ define <16 x i8> @sub_absv_8_ext(<16 x i8> %a, <16 x i8> %b) local_unnamed_addr
; CHECK-PWR7-NEXT: .cfi_offset r29, -24
; CHECK-PWR7-NEXT: .cfi_offset r30, -16
; CHECK-PWR7-NEXT: addi r3, r1, 304
-; CHECK-PWR7-NEXT: std r24, 336(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT: std r25, 344(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT: std r26, 352(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT: std r27, 360(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT: std r28, 368(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT: std r29, 376(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT: std r30, 384(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT: addi r4, r1, 320
+; CHECK-PWR7-NEXT: std r22, 336(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT: std r23, 344(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT: std r24, 352(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT: std r25, 360(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT: std r26, 368(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT: std r27, 376(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT: std r28, 384(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT: std r29, 392(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT: std r30, 400(r1) # 8-byte Folded Spill
; CHECK-PWR7-NEXT: stxvw4x v2, 0, r3
-; CHECK-PWR7-NEXT: addi r3, r1, 320
-; CHECK-PWR7-NEXT: lbz r4, 304(r1)
-; CHECK-PWR7-NEXT: stxvw4x v3, 0, r3
-; CHECK-PWR7-NEXT: lbz r5, 305(r1)
-; CHECK-PWR7-NEXT: lbz r6, 321(r1)
-; CHECK-PWR7-NEXT: lbz r7, 306(r1)
-; CHECK-PWR7-NEXT: lbz r8, 322(r1)
-; CHECK-PWR7-NEXT: lbz r9, 307(r1)
-; CHECK-PWR7-NEXT: lbz r10, 323(r1)
-; CHECK-PWR7-NEXT: lbz r0, 309(r1)
-; CHECK-PWR7-NEXT: lbz r30, 325(r1)
-; CHECK-PWR7-NEXT: lbz r29, 310(r1)
-; CHECK-PWR7-NEXT: lbz r28, 326(r1)
-; CHECK-PWR7-NEXT: lbz r11, 308(r1)
-; CHECK-PWR7-NEXT: lbz r12, 324(r1)
-; CHECK-PWR7-NEXT: lbz r27, 311(r1)
-; CHECK-PWR7-NEXT: lbz r26, 327(r1)
-; CHECK-PWR7-NEXT: lbz r25, 312(r1)
-; CHECK-PWR7-NEXT: sub r5, r5, r6
-; CHECK-PWR7-NEXT: sub r6, r7, r8
-; CHECK-PWR7-NEXT: sub r7, r9, r10
-; CHECK-PWR7-NEXT: sub r9, r0, r30
-; CHECK-PWR7-NEXT: sub r10, r29, r28
-; CHECK-PWR7-NEXT: sub r8, r11, r12
-; CHECK-PWR7-NEXT: srawi r0, r5, 31
-; CHECK-PWR7-NEXT: srawi r30, r6, 31
-; CHECK-PWR7-NEXT: srawi r29, r7, 31
-; CHECK-PWR7-NEXT: srawi r28, r8, 31
-; CHECK-PWR7-NEXT: sub r11, r27, r26
-; CHECK-PWR7-NEXT: srawi r27, r9, 31
-; CHECK-PWR7-NEXT: lbz r24, 328(r1)
-; CHECK-PWR7-NEXT: xor r5, r5, r0
-; CHECK-PWR7-NEXT: xor r6, r6, r30
-; CHECK-PWR7-NEXT: xor r7, r7, r29
-; CHECK-PWR7-NEXT: xor r8, r8, r28
-; CHECK-PWR7-NEXT: xor r9, r9, r27
-; CHECK-PWR7-NEXT: srawi r26, r10, 31
-; CHECK-PWR7-NEXT: sub r5, r5, r0
-; CHECK-PWR7-NEXT: sub r6, r6, r30
-; CHECK-PWR7-NEXT: lbz r0, 313(r1)
-; CHECK-PWR7-NEXT: lbz r30, 329(r1)
-; CHECK-PWR7-NEXT: sub r7, r7, r29
-; CHECK-PWR7-NEXT: lbz r29, 330(r1)
-; CHECK-PWR7-NEXT: sub r8, r8, r28
-; CHECK-PWR7-NEXT: lbz r28, 331(r1)
-; CHECK-PWR7-NEXT: sub r9, r9, r27
-; CHECK-PWR7-NEXT: lbz r27, 332(r1)
-; CHECK-PWR7-NEXT: xor r10, r10, r26
-; CHECK-PWR7-NEXT: sub r10, r10, r26
-; CHECK-PWR7-NEXT: lbz r26, 333(r1)
-; CHECK-PWR7-NEXT: sub r12, r25, r24
-; CHECK-PWR7-NEXT: srawi r25, r11, 31
-; CHECK-PWR7-NEXT: lbz r3, 320(r1)
-; CHECK-PWR7-NEXT: sub r0, r0, r30
-; CHECK-PWR7-NEXT: xor r11, r11, r25
-; CHECK-PWR7-NEXT: sub r11, r11, r25
-; CHECK-PWR7-NEXT: lbz r25, 334(r1)
-; CHECK-PWR7-NEXT: sub r4, r4, r3
-; CHECK-PWR7-NEXT: srawi r30, r0, 31
-; CHECK-PWR7-NEXT: srawi r24, r12, 31
-; CHECK-PWR7-NEXT: xor r12, r12, r24
-; CHECK-PWR7-NEXT: sub r12, r12, r24
-; CHECK-PWR7-NEXT: lbz r24, 335(r1)
-; CHECK-PWR7-NEXT: srawi r3, r4, 31
-; CHECK-PWR7-NEXT: xor r4, r4, r3
-; CHECK-PWR7-NEXT: xor r0, r0, r30
-; CHECK-PWR7-NEXT: sub r3, r4, r3
+; CHECK-PWR7-NEXT: lbz r3, 304(r1)
+; CHECK-PWR7-NEXT: stxvw4x v3, 0, r4
+; CHECK-PWR7-NEXT: lbz r6, 305(r1)
+; CHECK-PWR7-NEXT: lbz r7, 321(r1)
+; CHECK-PWR7-NEXT: lbz r8, 306(r1)
+; CHECK-PWR7-NEXT: lbz r9, 322(r1)
+; CHECK-PWR7-NEXT: sub r24, r6, r7
+; CHECK-PWR7-NEXT: lbz r10, 307(r1)
+; CHECK-PWR7-NEXT: lbz r11, 323(r1)
+; CHECK-PWR7-NEXT: lbz r12, 308(r1)
+; CHECK-PWR7-NEXT: cmplw r6, r7
+; CHECK-PWR7-NEXT: neg r6, r24
+; CHECK-PWR7-NEXT: sub r7, r8, r9
+; CHECK-PWR7-NEXT: lbz r0, 324(r1)
+; CHECK-PWR7-NEXT: lbz r30, 309(r1)
+; CHECK-PWR7-NEXT: lbz r29, 325(r1)
+; CHECK-PWR7-NEXT: lbz r28, 310(r1)
+; CHECK-PWR7-NEXT: lbz r27, 326(r1)
+; CHECK-PWR7-NEXT: lbz r26, 311(r1)
+; CHECK-PWR7-NEXT: lbz r25, 327(r1)
+; CHECK-PWR7-NEXT: iselgt r6, r24, r6
+; CHECK-PWR7-NEXT: cmplw r8, r9
+; CHECK-PWR7-NEXT: neg r8, r7
+; CHECK-PWR7-NEXT: sub r9, r10, r11
+; CHECK-PWR7-NEXT: lbz r4, 320(r1)
+; CHECK-PWR7-NEXT: sub r5, r3, r4
+; CHECK-PWR7-NEXT: stb r6, 64(r1)
+; CHECK-PWR7-NEXT: iselgt r7, r7, r8
+; CHECK-PWR7-NEXT: cmplw r10, r11
+; CHECK-PWR7-NEXT: neg r8, r9
+; CHECK-PWR7-NEXT: sub r10, r12, r0
+; CHECK-PWR7-NEXT: sub r11, r30, r29
+; CHECK-PWR7-NEXT: stb r7, 80(r1)
+; CHECK-PWR7-NEXT: iselgt r8, r9, r8
+; CHECK-PWR7-NEXT: cmplw r12, r0
+; CHECK-PWR7-NEXT: neg r9, r10
+; CHECK-PWR7-NEXT: sub r12, r28, r27
+; CHECK-PWR7-NEXT: stb r8, 96(r1)
+; CHECK-PWR7-NEXT: iselgt r9, r10, r9
+; CHECK-PWR7-NEXT: cmplw r30, r29
+; CHECK-PWR7-NEXT: neg r10, r11
+; CHECK-PWR7-NEXT: sub r30, r26, r25
+; CHECK-PWR7-NEXT: neg r29, r12
+; CHECK-PWR7-NEXT: stb r9, 112(r1)
+; CHECK-PWR7-NEXT: iselgt r11, r11, r10
+; CHECK-PWR7-NEXT: cmplw r28, r27
+; CHECK-PWR7-NEXT: neg r10, r5
+; CHECK-PWR7-NEXT: iselgt r12, r12, r29
+; CHECK-PWR7-NEXT: neg r29, r30
+; CHECK-PWR7-NEXT: cmplw r26, r25
+; CHECK-PWR7-NEXT: iselgt r0, r30, r29
+; CHECK-PWR7-NEXT: lbz r30, 312(r1)
+; CHECK-PWR7-NEXT: lbz r29, 328(r1)
+; CHECK-PWR7-NEXT: stb r12, 144(r1)
+; CHECK-PWR7-NEXT: sub r28, r30, r29
+; CHECK-PWR7-NEXT: cmplw r30, r29
+; CHECK-PWR7-NEXT: lbz r29, 313(r1)
+; CHECK-PWR7-NEXT: stb r0, 160(r1)
+; CHECK-PWR7-NEXT: stb r11, 128(r1)
+; CHECK-PWR7-NEXT: neg r27, r28
+; CHECK-PWR7-NEXT: iselgt r30, r28, r27
+; CHECK-PWR7-NEXT: lbz r28, 329(r1)
+; CHECK-PWR7-NEXT: stb r30, 176(r1)
+; CHECK-PWR7-NEXT: ld r30, 400(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT: sub r27, r29, r28
+; CHECK-PWR7-NEXT: cmplw r29, r28
+; CHECK-PWR7-NEXT: lbz r28, 314(r1)
+; CHECK-PWR7-NEXT: neg r26, r27
+; CHECK-PWR7-NEXT: iselgt r29, r27, r26
+; CHECK-PWR7-NEXT: lbz r27, 330(r1)
+; CHECK-PWR7-NEXT: stb r29, 192(r1)
+; CHECK-PWR7-NEXT: ld r29, 392(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT: sub r26, r28, r27
+; CHECK-PWR7-NEXT: cmplw r28, r27
+; CHECK-PWR7-NEXT: lbz r27, 315(r1)
+; CHECK-PWR7-NEXT: neg r25, r26
+; CHECK-PWR7-NEXT: iselgt r28, r26, r25
+; CHECK-PWR7-NEXT: lbz r26, 331(r1)
+; CHECK-PWR7-NEXT: stb r28, 208(r1)
+; CHECK-PWR7-NEXT: ld r28, 384(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT: sub r25, r27, r26
+; CHECK-PWR7-NEXT: cmplw r27, r26
+; CHECK-PWR7-NEXT: lbz r26, 316(r1)
+; CHECK-PWR7-NEXT: neg r24, r25
+; CHECK-PWR7-NEXT: iselgt r27, r25, r24
+; CHECK-PWR7-NEXT: lbz r25, 332(r1)
+; CHECK-PWR7-NEXT: stb r27, 224(r1)
+; CHECK-PWR7-NEXT: ld r27, 376(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT: sub r24, r26, r25
+; CHECK-PWR7-NEXT: cmplw r26, r25
+; CHECK-PWR7-NEXT: lbz r25, 317(r1)
+; CHECK-PWR7-NEXT: neg r23, r24
+; CHECK-PWR7-NEXT: iselgt r26, r24, r23
+; CHECK-PWR7-NEXT: lbz r24, 333(r1)
+; CHECK-PWR7-NEXT: stb r26, 240(r1)
+; CHECK-PWR7-NEXT: ld r26, 368(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT: sub r23, r25, r24
+; CHECK-PWR7-NEXT: cmplw r25, r24
+; CHECK-PWR7-NEXT: lbz r24, 318(r1)
+; CHECK-PWR7-NEXT: neg r22, r23
+; CHECK-PWR7-NEXT: iselgt r25, r23, r22
+; CHECK-PWR7-NEXT: lbz r23, 334(r1)
+; CHECK-PWR7-NEXT: stb r25, 256(r1)
+; CHECK-PWR7-NEXT: ld r25, 360(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT: sub r22, r24, r23
+; CHECK-PWR7-NEXT: cmplw r24, r23
+; CHECK-PWR7-NEXT: lbz r23, 319(r1)
+; CHECK-PWR7-NEXT: neg r24, r22
+; CHECK-PWR7-NEXT: iselgt r24, r22, r24
+; CHECK-PWR7-NEXT: lbz r22, 335(r1)
+; CHECK-PWR7-NEXT: stb r24, 272(r1)
+; CHECK-PWR7-NEXT: ld r24, 352(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT: cmplw r23, r22
+; CHECK-PWR7-NEXT: sub r23, r23, r22
+; CHECK-PWR7-NEXT: neg r22, r23
+; CHECK-PWR7-NEXT: iselgt r23, r23, r22
+; CHECK-PWR7-NEXT: cmplw r3, r4
+; CHECK-PWR7-NEXT: ld r22, 336(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT: stb r23, 288(r1)
+; CHECK-PWR7-NEXT: ld r23, 344(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT: iselgt r3, r5, r10
; CHECK-PWR7-NEXT: stb r3, 48(r1)
; CHECK-PWR7-NEXT: addi r3, r1, 288
-; CHECK-PWR7-NEXT: stb r12, 176(r1)
-; CHECK-PWR7-NEXT: sub r0, r0, r30
-; CHECK-PWR7-NEXT: lbz r30, 314(r1)
-; CHECK-PWR7-NEXT: stb r11, 160(r1)
-; CHECK-PWR7-NEXT: sub r30, r30, r29
-; CHECK-PWR7-NEXT: stb r0, 192(r1)
-; CHECK-PWR7-NEXT: stb r10, 144(r1)
-; CHECK-PWR7-NEXT: stb r9, 128(r1)
-; CHECK-PWR7-NEXT: stb r8, 112(r1)
-; CHECK-PWR7-NEXT: stb r7, 96(r1)
-; CHECK-PWR7-NEXT: stb r6, 80(r1)
-; CHECK-PWR7-NEXT: srawi r29, r30, 31
-; CHECK-PWR7-NEXT: stb r5, 64(r1)
-; CHECK-PWR7-NEXT: xor r30, r30, r29
-; CHECK-PWR7-NEXT: sub r30, r30, r29
-; CHECK-PWR7-NEXT: lbz r29, 315(r1)
-; CHECK-PWR7-NEXT: sub r29, r29, r28
-; CHECK-PWR7-NEXT: stb r30, 208(r1)
-; CHECK-PWR7-NEXT: ld r30, 384(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT: srawi r28, r29, 31
-; CHECK-PWR7-NEXT: xor r29, r29, r28
-; CHECK-PWR7-NEXT: sub r29, r29, r28
-; CHECK-PWR7-NEXT: lbz r28, 316(r1)
-; CHECK-PWR7-NEXT: sub r28, r28, r27
-; CHECK-PWR7-NEXT: stb r29, 224(r1)
-; CHECK-PWR7-NEXT: ld r29, 376(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT: srawi r27, r28, 31
-; CHECK-PWR7-NEXT: xor r28, r28, r27
-; CHECK-PWR7-NEXT: sub r28, r28, r27
-; CHECK-PWR7-NEXT: lbz r27, 317(r1)
-; CHECK-PWR7-NEXT: sub r27, r27, r26
-; CHECK-PWR7-NEXT: stb r28, 240(r1)
-; CHECK-PWR7-NEXT: ld r28, 368(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT: srawi r26, r27, 31
-; CHECK-PWR7-NEXT: xor r27, r27, r26
-; CHECK-PWR7-NEXT: sub r27, r27, r26
-; CHECK-PWR7-NEXT: lbz r26, 318(r1)
-; CHECK-PWR7-NEXT: sub r26, r26, r25
-; CHECK-PWR7-NEXT: stb r27, 256(r1)
-; CHECK-PWR7-NEXT: ld r27, 360(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT: srawi r25, r26, 31
-; CHECK-PWR7-NEXT: xor r26, r26, r25
-; CHECK-PWR7-NEXT: sub r26, r26, r25
-; CHECK-PWR7-NEXT: lbz r25, 319(r1)
-; CHECK-PWR7-NEXT: sub r25, r25, r24
-; CHECK-PWR7-NEXT: stb r26, 272(r1)
-; CHECK-PWR7-NEXT: ld r26, 352(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT: srawi r24, r25, 31
-; CHECK-PWR7-NEXT: xor r25, r25, r24
-; CHECK-PWR7-NEXT: sub r25, r25, r24
-; CHECK-PWR7-NEXT: ld r24, 336(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT: stb r25, 288(r1)
-; CHECK-PWR7-NEXT: ld r25, 344(r1) # 8-byte Folded Reload
; CHECK-PWR7-NEXT: lxvw4x v2, 0, r3
; CHECK-PWR7-NEXT: addi r3, r1, 272
; CHECK-PWR7-NEXT: lxvw4x v3, 0, r3
@@ -1019,7 +978,7 @@ define <16 x i8> @sub_absv_8_ext(<16 x i8> %a, <16 x i8> %b) local_unnamed_addr
; CHECK-PWR7-NEXT: vmrghh v3, v4, v3
; CHECK-PWR7-NEXT: xxmrghw vs1, v3, v2
; CHECK-PWR7-NEXT: xxmrghd v2, vs1, vs0
-; CHECK-PWR7-NEXT: addi r1, r1, 400
+; CHECK-PWR7-NEXT: addi r1, r1, 416
; CHECK-PWR7-NEXT: blr
entry:
%vecext = extractelement <16 x i8> %a, i32 0
@@ -1208,13 +1167,20 @@ entry:
}
define <8 x i16> @sub_absv_vec_16(<8 x i16> %a, <8 x i16> %b) local_unnamed_addr {
-; CHECK-LABEL: sub_absv_vec_16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsubuhm v2, v2, v3
-; CHECK-NEXT: xxlxor v3, v3, v3
-; CHECK-NEXT: vsubuhm v3, v3, v2
-; CHECK-NEXT: vmaxsh v2, v2, v3
-; CHECK-NEXT: blr
+; CHECK-PWR9-LABEL: sub_absv_vec_16:
+; CHECK-PWR9: # %bb.0: # %entry
+; CHECK-PWR9-NEXT: vminsh v4, v2, v3
+; CHECK-PWR9-NEXT: vmaxsh v2, v2, v3
+; CHECK-PWR9-NEXT: vsubuhm v2, v2, v4
+; CHECK-PWR9-NEXT: blr
+;
+; CHECK-PWR78-LABEL: sub_absv_vec_16:
+; CHECK-PWR78: # %bb.0: # %entry
+; CHECK-PWR78-NEXT: vsubuhm v2, v2, v3
+; CHECK-PWR78-NEXT: xxlxor v3, v3, v3
+; CHECK-PWR78-NEXT: vsubuhm v3, v3, v2
+; CHECK-PWR78-NEXT: vmaxsh v2, v2, v3
+; CHECK-PWR78-NEXT: blr
entry:
%sub = sub nsw <8 x i16> %a, %b
%sub.i = sub <8 x i16> zeroinitializer, %sub
@@ -1223,13 +1189,20 @@ entry:
}
define <16 x i8> @sub_absv_vec_8(<16 x i8> %a, <16 x i8> %b) local_unnamed_addr {
-; CHECK-LABEL: sub_absv_vec_8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsububm v2, v2, v3
-; CHECK-NEXT: xxlxor v3, v3, v3
-; CHECK-NEXT: vsububm v3, v3, v2
-; CHECK-NEXT: vmaxsb v2, v2, v3
-; CHECK-NEXT: blr
+; CHECK-PWR9-LABEL: sub_absv_vec_8:
+; CHECK-PWR9: # %bb.0: # %entry
+; CHECK-PWR9-NEXT: vminsb v4, v2, v3
+; CHECK-PWR9-NEXT: vmaxsb v2, v2, v3
+; CHECK-PWR9-NEXT: vsububm v2, v2, v4
+; CHECK-PWR9-NEXT: blr
+;
+; CHECK-PWR78-LABEL: sub_absv_vec_8:
+; CHECK-PWR78: # %bb.0: # %entry
+; CHECK-PWR78-NEXT: vsububm v2, v2, v3
+; CHECK-PWR78-NEXT: xxlxor v3, v3, v3
+; CHECK-PWR78-NEXT: vsububm v3, v3, v2
+; CHECK-PWR78-NEXT: vmaxsb v2, v2, v3
+; CHECK-PWR78-NEXT: blr
entry:
%sub = sub nsw <16 x i8> %a, %b
%sub.i = sub <16 x i8> zeroinitializer, %sub
@@ -1240,18 +1213,16 @@ entry:
define <4 x i32> @zext_sub_absd32(<4 x i16>, <4 x i16>) local_unnamed_addr {
; CHECK-PWR9-LE-LABEL: zext_sub_absd32:
; CHECK-PWR9-LE: # %bb.0:
-; CHECK-PWR9-LE-NEXT: xxlxor v4, v4, v4
-; CHECK-PWR9-LE-NEXT: vmrglh v2, v4, v2
-; CHECK-PWR9-LE-NEXT: vmrglh v3, v4, v3
-; CHECK-PWR9-LE-NEXT: vabsduw v2, v2, v3
+; CHECK-PWR9-LE-NEXT: vabsduh v2, v2, v3
+; CHECK-PWR9-LE-NEXT: xxlxor v3, v3, v3
+; CHECK-PWR9-LE-NEXT: vmrglh v2, v3, v2
; CHECK-PWR9-LE-NEXT: blr
;
; CHECK-PWR9-BE-LABEL: zext_sub_absd32:
; CHECK-PWR9-BE: # %bb.0:
-; CHECK-PWR9-BE-NEXT: xxlxor v4, v4, v4
-; CHECK-PWR9-BE-NEXT: vmrghh v2, v4, v2
-; CHECK-PWR9-BE-NEXT: vmrghh v3, v4, v3
-; CHECK-PWR9-BE-NEXT: vabsduw v2, v2, v3
+; CHECK-PWR9-BE-NEXT: vabsduh v2, v2, v3
+; CHECK-PWR9-BE-NEXT: xxlxor v3, v3, v3
+; CHECK-PWR9-BE-NEXT: vmrghh v2, v3, v2
; CHECK-PWR9-BE-NEXT: blr
;
; CHECK-PWR8-LABEL: zext_sub_absd32:
@@ -1287,18 +1258,16 @@ define <4 x i32> @zext_sub_absd32(<4 x i16>, <4 x i16>) local_unnamed_addr {
define <8 x i16> @zext_sub_absd16(<8 x i8>, <8 x i8>) local_unnamed_addr {
; CHECK-PWR9-LE-LABEL: zext_sub_absd16:
; CHECK-PWR9-LE: # %bb.0:
-; CHECK-PWR9-LE-NEXT: xxlxor v4, v4, v4
-; CHECK-PWR9-LE-NEXT: vmrglb v2, v4, v2
-; CHECK-PWR9-LE-NEXT: vmrglb v3, v4, v3
-; CHECK-PWR9-LE-NEXT: vabsduh v2, v2, v3
+; CHECK-PWR9-LE-NEXT: vabsdub v2, v2, v3
+; CHECK-PWR9-LE-NEXT: xxlxor v3, v3, v3
+; CHECK-PWR9-LE-NEXT: vmrglb v2, v3, v2
; CHECK-PWR9-LE-NEXT: blr
;
; CHECK-PWR9-BE-LABEL: zext_sub_absd16:
; CHECK-PWR9-BE: # %bb.0:
-; CHECK-PWR9-BE-NEXT: xxlxor v4, v4, v4
-; CHECK-PWR9-BE-NEXT: vmrghb v2, v4, v2
-; CHECK-PWR9-BE-NEXT: vmrghb v3, v4, v3
-; CHECK-PWR9-BE-NEXT: vabsduh v2, v2, v3
+; CHECK-PWR9-BE-NEXT: vabsdub v2, v2, v3
+; CHECK-PWR9-BE-NEXT: xxlxor v3, v3, v3
+; CHECK-PWR9-BE-NEXT: vmrghb v2, v3, v2
; CHECK-PWR9-BE-NEXT: blr
;
; CHECK-PWR8-LABEL: zext_sub_absd16:
@@ -1335,8 +1304,8 @@ define <16 x i8> @zext_sub_absd8(<16 x i4>, <16 x i4>) local_unnamed_addr {
; CHECK-PWR9-LABEL: zext_sub_absd8:
; CHECK-PWR9: # %bb.0:
; CHECK-PWR9-NEXT: xxspltib vs0, 15
-; CHECK-PWR9-NEXT: xxland v2, v2, vs0
; CHECK-PWR9-NEXT: xxland v3, v3, vs0
+; CHECK-PWR9-NEXT: xxland v2, v2, vs0
; CHECK-PWR9-NEXT: vabsdub v2, v2, v3
; CHECK-PWR9-NEXT: blr
;
@@ -1361,24 +1330,20 @@ define <16 x i8> @zext_sub_absd8(<16 x i4>, <16 x i4>) local_unnamed_addr {
define <4 x i32> @sext_sub_absd32(<4 x i16>, <4 x i16>) local_unnamed_addr {
; CHECK-PWR9-LE-LABEL: sext_sub_absd32:
; CHECK-PWR9-LE: # %bb.0:
-; CHECK-PWR9-LE-NEXT: vmrglh v2, v2, v2
-; CHECK-PWR9-LE-NEXT: vmrglh v3, v3, v3
-; CHECK-PWR9-LE-NEXT: vextsh2w v2, v2
-; CHECK-PWR9-LE-NEXT: vextsh2w v3, v3
-; CHECK-PWR9-LE-NEXT: xvnegsp v3, v3
-; CHECK-PWR9-LE-NEXT: xvnegsp v2, v2
-; CHECK-PWR9-LE-NEXT: vabsduw v2, v2, v3
+; CHECK-PWR9-LE-NEXT: vminsh v4, v2, v3
+; CHECK-PWR9-LE-NEXT: vmaxsh v2, v2, v3
+; CHECK-PWR9-LE-NEXT: xxlxor v3, v3, v3
+; CHECK-PWR9-LE-NEXT: vsubuhm v2, v2, v4
+; CHECK-PWR9-LE-NEXT: vmrglh v2, v3, v2
; CHECK-PWR9-LE-NEXT: blr
;
; CHECK-PWR9-BE-LABEL: sext_sub_absd32:
; CHECK-PWR9-BE: # %bb.0:
-; CHECK-PWR9-BE-NEXT: vmrghh v2, v2, v2
-; CHECK-PWR9-BE-NEXT: vmrghh v3, v3, v3
-; CHECK-PWR9-BE-NEXT: vextsh2w v2, v2
-; CHECK-PWR9-BE-NEXT: vextsh2w v3, v3
-; CHECK-PWR9-BE-NEXT: xvnegsp v3, v3
-; CHECK-PWR9-BE-NEXT: xvnegsp v2, v2
-; CHECK-PWR9-BE-NEXT: vabsduw v2, v2, v3
+; CHECK-PWR9-BE-NEXT: vminsh v4, v2, v3
+; CHECK-PWR9-BE-NEXT: vmaxsh v2, v2, v3
+; CHECK-PWR9-BE-NEXT: xxlxor v3, v3, v3
+; CHECK-PWR9-BE-NEXT: vsubuhm v2, v2, v4
+; CHECK-PWR9-BE-NEXT: vmrghh v2, v3, v2
; CHECK-PWR9-BE-NEXT: blr
;
; CHECK-PWR8-LABEL: sext_sub_absd32:
@@ -1423,32 +1388,20 @@ define <4 x i32> @sext_sub_absd32(<4 x i16>, <4 x i16>) local_unnamed_addr {
define <8 x i16> @sext_sub_absd16(<8 x i8>, <8 x i8>) local_unnamed_addr {
; CHECK-PWR9-LE-LABEL: sext_sub_absd16:
; CHECK-PWR9-LE: # %bb.0:
-; CHECK-PWR9-LE-NEXT: vmrglb v2, v2, v2
-; CHECK-PWR9-LE-NEXT: vspltish v4, 8
-; CHECK-PWR9-LE-NEXT: vmrglb v3, v3, v3
-; CHECK-PWR9-LE-NEXT: vslh v2, v2, v4
-; CHECK-PWR9-LE-NEXT: vslh v3, v3, v4
-; CHECK-PWR9-LE-NEXT: vsrah v2, v2, v4
-; CHECK-PWR9-LE-NEXT: vsrah v3, v3, v4
-; CHECK-PWR9-LE-NEXT: vsubuhm v2, v2, v3
+; CHECK-PWR9-LE-NEXT: vminsb v4, v2, v3
+; CHECK-PWR9-LE-NEXT: vmaxsb v2, v2, v3
; CHECK-PWR9-LE-NEXT: xxlxor v3, v3, v3
-; CHECK-PWR9-LE-NEXT: vsubuhm v3, v3, v2
-; CHECK-PWR9-LE-NEXT: vmaxsh v2, v2, v3
+; CHECK-PWR9-LE-NEXT: vsububm v2, v2, v4
+; CHECK-PWR9-LE-NEXT: vmrglb v2, v3, v2
; CHECK-PWR9-LE-NEXT: blr
;
; CHECK-PWR9-BE-LABEL: sext_sub_absd16:
; CHECK-PWR9-BE: # %bb.0:
-; CHECK-PWR9-BE-NEXT: vmrghb v2, v2, v2
-; CHECK-PWR9-BE-NEXT: vspltish v4, 8
-; CHECK-PWR9-BE-NEXT: vmrghb v3, v3, v3
-; CHECK-PWR9-BE-NEXT: vslh v2, v2, v4
-; CHECK-PWR9-BE-NEXT: vslh v3, v3, v4
-; CHECK-PWR9-BE-NEXT: vsrah v2, v2, v4
-; CHECK-PWR9-BE-NEXT: vsrah v3, v3, v4
-; CHECK-PWR9-BE-NEXT: vsubuhm v2, v2, v3
+; CHECK-PWR9-BE-NEXT: vminsb v4, v2, v3
+; CHECK-PWR9-BE-NEXT: vmaxsb v2, v2, v3
; CHECK-PWR9-BE-NEXT: xxlxor v3, v3, v3
-; CHECK-PWR9-BE-NEXT: vsubuhm v3, v3, v2
-; CHECK-PWR9-BE-NEXT: vmaxsh v2, v2, v3
+; CHECK-PWR9-BE-NEXT: vsububm v2, v2, v4
+; CHECK-PWR9-BE-NEXT: vmrghb v2, v3, v2
; CHECK-PWR9-BE-NEXT: blr
;
; CHECK-PWR8-LABEL: sext_sub_absd16:
@@ -1492,14 +1445,15 @@ define <16 x i8> @sext_sub_absd8(<16 x i4>, <16 x i4>) local_unnamed_addr {
; CHECK-PWR9-LABEL: sext_sub_absd8:
; CHECK-PWR9: # %bb.0:
; CHECK-PWR9-NEXT: xxspltib v4, 4
-; CHECK-PWR9-NEXT: vslb v2, v2, v4
+; CHECK-PWR9-NEXT: xxspltib vs0, 15
; CHECK-PWR9-NEXT: vslb v3, v3, v4
-; CHECK-PWR9-NEXT: vsrab v2, v2, v4
+; CHECK-PWR9-NEXT: vslb v2, v2, v4
; CHECK-PWR9-NEXT: vsrab v3, v3, v4
-; CHECK-PWR9-NEXT: vsububm v2, v2, v3
-; CHECK-PWR9-NEXT: xxlxor v3, v3, v3
-; CHECK-PWR9-NEXT: vsububm v3, v3, v2
+; CHECK-PWR9-NEXT: vsrab v2, v2, v4
+; CHECK-PWR9-NEXT: vminsb v4, v2, v3
; CHECK-PWR9-NEXT: vmaxsb v2, v2, v3
+; CHECK-PWR9-NEXT: vsububm v2, v2, v4
+; CHECK-PWR9-NEXT: xxland v2, v2, vs0
; CHECK-PWR9-NEXT: blr
;
; CHECK-PWR78-LABEL: sext_sub_absd8:
@@ -1532,10 +1486,9 @@ define <4 x i32> @absd_int32_ugt(<4 x i32>, <4 x i32>) {
;
; CHECK-PWR78-LABEL: absd_int32_ugt:
; CHECK-PWR78: # %bb.0:
-; CHECK-PWR78-NEXT: vcmpgtuw v4, v2, v3
-; CHECK-PWR78-NEXT: vsubuwm v5, v2, v3
-; CHECK-PWR78-NEXT: vsubuwm v2, v3, v2
-; CHECK-PWR78-NEXT: xxsel v2, v2, v5, v4
+; CHECK-PWR78-NEXT: vminuw v4, v2, v3
+; CHECK-PWR78-NEXT: vmaxuw v2, v2, v3
+; CHECK-PWR78-NEXT: vsubuwm v2, v2, v4
; CHECK-PWR78-NEXT: blr
%3 = icmp ugt <4 x i32> %0, %1
%4 = sub <4 x i32> %0, %1
@@ -1552,11 +1505,9 @@ define <4 x i32> @absd_int32_uge(<4 x i32>, <4 x i32>) {
;
; CHECK-PWR78-LABEL: absd_int32_uge:
; CHECK-PWR78: # %bb.0:
-; CHECK-PWR78-NEXT: vcmpgtuw v4, v3, v2
-; CHECK-PWR78-NEXT: xxlnor vs0, v4, v4
-; CHECK-PWR78-NEXT: vsubuwm v4, v2, v3
-; CHECK-PWR78-NEXT: vsubuwm v2, v3, v2
-; CHECK-PWR78-NEXT: xxsel v2, v2, v4, vs0
+; CHECK-PWR78-NEXT: vminuw v4, v2, v3
+; CHECK-PWR78-NEXT: vmaxuw v2, v2, v3
+; CHECK-PWR78-NEXT: vsubuwm v2, v2, v4
; CHECK-PWR78-NEXT: blr
%3 = icmp uge <4 x i32> %0, %1
%4 = sub <4 x i32> %0, %1
@@ -1573,10 +1524,9 @@ define <4 x i32> @absd_int32_ult(<4 x i32>, <4 x i32>) {
;
; CHECK-PWR78-LABEL: absd_int32_ult:
; CHECK-PWR78: # %bb.0:
-; CHECK-PWR78-NEXT: vcmpgtuw v4, v3, v2
-; CHECK-PWR78-NEXT: vsubuwm v5, v2, v3
-; CHECK-PWR78-NEXT: vsubuwm v2, v3, v2
-; CHECK-PWR78-NEXT: xxsel v2, v5, v2, v4
+; CHECK-PWR78-NEXT: vminuw v4, v2, v3
+; CHECK-PWR78-NEXT: vmaxuw v2, v2, v3
+; CHECK-PWR78-NEXT: vsubuwm v2, v2, v4
; CHECK-PWR78-NEXT: blr
%3 = icmp ult <4 x i32> %0, %1
%4 = sub <4 x i32> %0, %1
@@ -1593,11 +1543,9 @@ define <4 x i32> @absd_int32_ule(<4 x i32>, <4 x i32>) {
;
; CHECK-PWR78-LABEL: absd_int32_ule:
; CHECK-PWR78: # %bb.0:
-; CHECK-PWR78-NEXT: vcmpgtuw v4, v2, v3
-; CHECK-PWR78-NEXT: xxlnor vs0, v4, v4
-; CHECK-PWR78-NEXT: vsubuwm v4, v2, v3
-; CHECK-PWR78-NEXT: vsubuwm v2, v3, v2
-; CHECK-PWR78-NEXT: xxsel v2, v4, v2, vs0
+; CHECK-PWR78-NEXT: vminuw v4, v2, v3
+; CHECK-PWR78-NEXT: vmaxuw v2, v2, v3
+; CHECK-PWR78-NEXT: vsubuwm v2, v2, v4
; CHECK-PWR78-NEXT: blr
%3 = icmp ule <4 x i32> %0, %1
%4 = sub <4 x i32> %0, %1
@@ -1614,10 +1562,9 @@ define <8 x i16> @absd_int16_ugt(<8 x i16>, <8 x i16>) {
;
; CHECK-PWR78-LABEL: absd_int16_ugt:
; CHECK-PWR78: # %bb.0:
-; CHECK-PWR78-NEXT: vcmpgtuh v4, v2, v3
-; CHECK-PWR78-NEXT: vsubuhm v5, v2, v3
-; CHECK-PWR78-NEXT: vsubuhm v2, v3, v2
-; CHECK-PWR78-NEXT: xxsel v2, v2, v5, v4
+; CHECK-PWR78-NEXT: vminuh v4, v2, v3
+; CHECK-PWR78-NEXT: vmaxuh v2, v2, v3
+; CHECK-PWR78-NEXT: vsubuhm v2, v2, v4
; CHECK-PWR78-NEXT: blr
%3 = icmp ugt <8 x i16> %0, %1
%4 = sub <8 x i16> %0, %1
@@ -1634,11 +1581,9 @@ define <8 x i16> @absd_int16_uge(<8 x i16>, <8 x i16>) {
;
; CHECK-PWR78-LABEL: absd_int16_uge:
; CHECK-PWR78: # %bb.0:
-; CHECK-PWR78-NEXT: vcmpgtuh v4, v3, v2
-; CHECK-PWR78-NEXT: vsubuhm v5, v2, v3
-; CHECK-PWR78-NEXT: vsubuhm v2, v3, v2
-; CHECK-PWR78-NEXT: xxlnor v4, v4, v4
-; CHECK-PWR78-NEXT: xxsel v2, v2, v5, v4
+; CHECK-PWR78-NEXT: vminuh v4, v2, v3
+; CHECK-PWR78-NEXT: vmaxuh v2, v2, v3
+; CHECK-PWR78-NEXT: vsubuhm v2, v2, v4
; CHECK-PWR78-NEXT: blr
%3 = icmp uge <8 x i16> %0, %1
%4 = sub <8 x i16> %0, %1
@@ -1655,10 +1600,9 @@ define <8 x i16> @absd_int16_ult(<8 x i16>, <8 x i16>) {
;
; CHECK-PWR78-LABEL: absd_int16_ult:
; CHECK-PWR78: # %bb.0:
-; CHECK-PWR78-NEXT: vcmpgtuh v4, v3, v2
-; CHECK-PWR78-NEXT: vsubuhm v5, v2, v3
-; CHECK-PWR78-NEXT: vsubuhm v2, v3, v2
-; CHECK-PWR78-NEXT: xxsel v2, v5, v2, v4
+; CHECK-PWR78-NEXT: vminuh v4, v2, v3
+; CHECK-PWR78-NEXT: vmaxuh v2, v2, v3
+; CHECK-PWR78-NEXT: vsubuhm v2, v2, v4
; CHECK-PWR78-NEXT: blr
%3 = icmp ult <8 x i16> %0, %1
%4 = sub <8 x i16> %0, %1
@@ -1675,11 +1619,9 @@ define <8 x i16> @absd_int16_ule(<8 x i16>, <8 x i16>) {
;
; CHECK-PWR78-LABEL: absd_int16_ule:
; CHECK-PWR78: # %bb.0:
-; CHECK-PWR78-NEXT: vcmpgtuh v4, v2, v3
-; CHECK-PWR78-NEXT: vsubuhm v5, v2, v3
-; CHECK-PWR78-NEXT: vsubuhm v2, v3, v2
-; CHECK-PWR78-NEXT: xxlnor v4, v4, v4
-; CHECK-PWR78-NEXT: xxsel v2, v5, v2, v4
+; CHECK-PWR78-NEXT: vminuh v4, v2, v3
+; CHECK-PWR78-NEXT: vmaxuh v2, v2, v3
+; CHECK-PWR78-NEXT: vsubuhm v2, v2, v4
; CHECK-PWR78-NEXT: blr
%3 = icmp ule <8 x i16> %0, %1
%4 = sub <8 x i16> %0, %1
@@ -1696,10 +1638,9 @@ define <16 x i8> @absd_int8_ugt(<16 x i8>, <16 x i8>) {
;
; CHECK-PWR78-LABEL: absd_int8_ugt:
; CHECK-PWR78: # %bb.0:
-; CHECK-PWR78-NEXT: vcmpgtub v4, v2, v3
-; CHECK-PWR78-NEXT: vsububm v5, v2, v3
-; CHECK-PWR78-NEXT: vsububm v2, v3, v2
-; CHECK-PWR78-NEXT: xxsel v2, v2, v5, v4
+; CHECK-PWR78-NEXT: vminub v4, v2, v3
+; CHECK-PWR78-NEXT: vmaxub v2, v2, v3
+; CHECK-PWR78-NEXT: vsububm v2, v2, v4
; CHECK-PWR78-NEXT: blr
%3 = icmp ugt <16 x i8> %0, %1
%4 = sub <16 x i8> %0, %1
@@ -1716,11 +1657,9 @@ define <16 x i8> @absd_int8_uge(<16 x i8>, <16 x i8>) {
;
; CHECK-PWR78-LABEL: absd_int8_uge:
; CHECK-PWR78: # %bb.0:
-; CHECK-PWR78-NEXT: vcmpgtub v4, v3, v2
-; CHECK-PWR78-NEXT: vsububm v5, v2, v3
-; CHECK-PWR78-NEXT: vsububm v2, v3, v2
-; CHECK-PWR78-NEXT: xxlnor v4, v4, v4
-; CHECK-PWR78-NEXT: xxsel v2, v2, v5, v4
+; CHECK-PWR78-NEXT: vminub v4, v2, v3
+; CHECK-PWR78-NEXT: vmaxub v2, v2, v3
+; CHECK-PWR78-NEXT: vsububm v2, v2, v4
; CHECK-PWR78-NEXT: blr
%3 = icmp uge <16 x i8> %0, %1
%4 = sub <16 x i8> %0, %1
@@ -1737,10 +1676,9 @@ define <16 x i8> @absd_int8_ult(<16 x i8>, <16 x i8>) {
;
; CHECK-PWR78-LABEL: absd_int8_ult:
; CHECK-PWR78: # %bb.0:
-; CHECK-PWR78-NEXT: vcmpgtub v4, v3, v2
-; CHECK-PWR78-NEXT: vsububm v5, v2, v3
-; CHECK-PWR78-NEXT: vsububm v2, v3, v2
-; CHECK-PWR78-NEXT: xxsel v2, v5, v2, v4
+; CHECK-PWR78-NEXT: vminub v4, v2, v3
+; CHECK-PWR78-NEXT: vmaxub v2, v2, v3
+; CHECK-PWR78-NEXT: vsububm v2, v2, v4
; CHECK-PWR78-NEXT: blr
%3 = icmp ult <16 x i8> %0, %1
%4 = sub <16 x i8> %0, %1
@@ -1757,11 +1695,9 @@ define <16 x i8> @absd_int8_ule(<16 x i8>, <16 x i8>) {
;
; CHECK-PWR78-LABEL: absd_int8_ule:
; CHECK-PWR78: # %bb.0:
-; CHECK-PWR78-NEXT: vcmpgtub v4, v2, v3
-; CHECK-PWR78-NEXT: vsububm v5, v2, v3
-; CHECK-PWR78-NEXT: vsububm v2, v3, v2
-; CHECK-PWR78-NEXT: xxlnor v4, v4, v4
-; CHECK-PWR78-NEXT: xxsel v2, v5, v2, v4
+; CHECK-PWR78-NEXT: vminub v4, v2, v3
+; CHECK-PWR78-NEXT: vmaxub v2, v2, v3
+; CHECK-PWR78-NEXT: vsububm v2, v2, v4
; CHECK-PWR78-NEXT: blr
%3 = icmp ule <16 x i8> %0, %1
%4 = sub <16 x i8> %0, %1
@@ -1782,10 +1718,9 @@ define <4 x i32> @absd_int32_sgt(<4 x i32>, <4 x i32>) {
;
; CHECK-PWR78-LABEL: absd_int32_sgt:
; CHECK-PWR78: # %bb.0:
-; CHECK-PWR78-NEXT: vcmpgtsw v4, v2, v3
-; CHECK-PWR78-NEXT: vsubuwm v5, v2, v3
-; CHECK-PWR78-NEXT: vsubuwm v2, v3, v2
-; CHECK-PWR78-NEXT: xxsel v2, v2, v5, v4
+; CHECK-PWR78-NEXT: vminsw v4, v2, v3
+; CHECK-PWR78-NEXT: vmaxsw v2, v2, v3
+; CHECK-PWR78-NEXT: vsubuwm v2, v2, v4
; CHECK-PWR78-NEXT: blr
%3 = icmp sgt <4 x i32> %0, %1
%4 = sub <4 x i32> %0, %1
@@ -1804,11 +1739,9 @@ define <4 x i32> @absd_int32_sge(<4 x i32>, <4 x i32>) {
;
; CHECK-PWR78-LABEL: absd_int32_sge:
; CHECK-PWR78: # %bb.0:
-; CHECK-PWR78-NEXT: vcmpgtsw v4, v3, v2
-; CHECK-PWR78-NEXT: xxlnor vs0, v4, v4
-; CHECK-PWR78-NEXT: vsubuwm v4, v2, v3
-; CHECK-PWR78-NEXT: vsubuwm v2, v3, v2
-; CHECK-PWR78-NEXT: xxsel v2, v2, v4, vs0
+; CHECK-PWR78-NEXT: vminsw v4, v2, v3
+; CHECK-PWR78-NEXT: vmaxsw v2, v2, v3
+; CHECK-PWR78-NEXT: vsubuwm v2, v2, v4
; CHECK-PWR78-NEXT: blr
%3 = icmp sge <4 x i32> %0, %1
%4 = sub <4 x i32> %0, %1
@@ -1827,10 +1760,9 @@ define <4 x i32> @absd_int32_slt(<4 x i32>, <4 x i32>) {
;
; CHECK-PWR78-LABEL: absd_int32_slt:
; CHECK-PWR78: # %bb.0:
-; CHECK-PWR78-NEXT: vcmpgtsw v4, v3, v2
-; CHECK-PWR78-NEXT: vsubuwm v5, v2, v3
-; CHECK-PWR78-NEXT: vsubuwm v2, v3, v2
-; CHECK-PWR78-NEXT: xxsel v2, v5, v2, v4
+; CHECK-PWR78-NEXT: vminsw v4, v2, v3
+; CHECK-PWR78-NEXT: vmaxsw v2, v2, v3
+; CHECK-PWR78-NEXT: vsubuwm v2, v2, v4
; CHECK-PWR78-NEXT: blr
%3 = icmp slt <4 x i32> %0, %1
%4 = sub <4 x i32> %0, %1
@@ -1849,11 +1781,9 @@ define <4 x i32> @absd_int32_sle(<4 x i32>, <4 x i32>) {
;
; CHECK-PWR78-LABEL: absd_int32_sle:
; CHECK-PWR78: # %bb.0:
-; CHECK-PWR78-NEXT: vcmpgtsw v4, v2, v3
-; CHECK-PWR78-NEXT: xxlnor vs0, v4, v4
-; CHECK-PWR78-NEXT: vsubuwm v4, v2, v3
-; CHECK-PWR78-NEXT: vsubuwm v2, v3, v2
-; CHECK-PWR78-NEXT: xxsel v2, v4, v2, vs0
+; CHECK-PWR78-NEXT: vminsw v4, v2, v3
+; CHECK-PWR78-NEXT: vmaxsw v2, v2, v3
+; CHECK-PWR78-NEXT: vsubuwm v2, v2, v4
; CHECK-PWR78-NEXT: blr
%3 = icmp sle <4 x i32> %0, %1
%4 = sub <4 x i32> %0, %1
@@ -1865,10 +1795,9 @@ define <4 x i32> @absd_int32_sle(<4 x i32>, <4 x i32>) {
define <8 x i16> @absd_int16_sgt(<8 x i16>, <8 x i16>) {
; CHECK-LABEL: absd_int16_sgt:
; CHECK: # %bb.0:
-; CHECK-NEXT: vcmpgtsh v4, v2, v3
-; CHECK-NEXT: vsubuhm v5, v2, v3
-; CHECK-NEXT: vsubuhm v2, v3, v2
-; CHECK-NEXT: xxsel v2, v2, v5, v4
+; CHECK-NEXT: vminsh v4, v2, v3
+; CHECK-NEXT: vmaxsh v2, v2, v3
+; CHECK-NEXT: vsubuhm v2, v2, v4
; CHECK-NEXT: blr
%3 = icmp sgt <8 x i16> %0, %1
%4 = sub <8 x i16> %0, %1
@@ -1880,11 +1809,9 @@ define <8 x i16> @absd_int16_sgt(<8 x i16>, <8 x i16>) {
define <8 x i16> @absd_int16_sge(<8 x i16>, <8 x i16>) {
; CHECK-LABEL: absd_int16_sge:
; CHECK: # %bb.0:
-; CHECK-NEXT: vcmpgtsh v4, v3, v2
-; CHECK-NEXT: vsubuhm v5, v2, v3
-; CHECK-NEXT: vsubuhm v2, v3, v2
-; CHECK-NEXT: xxlnor v4, v4, v4
-; CHECK-NEXT: xxsel v2, v2, v5, v4
+; CHECK-NEXT: vminsh v4, v2, v3
+; CHECK-NEXT: vmaxsh v2, v2, v3
+; CHECK-NEXT: vsubuhm v2, v2, v4
; CHECK-NEXT: blr
%3 = icmp sge <8 x i16> %0, %1
%4 = sub <8 x i16> %0, %1
@@ -1896,10 +1823,9 @@ define <8 x i16> @absd_int16_sge(<8 x i16>, <8 x i16>) {
define <8 x i16> @absd_int16_slt(<8 x i16>, <8 x i16>) {
; CHECK-LABEL: absd_int16_slt:
; CHECK: # %bb.0:
-; CHECK-NEXT: vcmpgtsh v4, v3, v2
-; CHECK-NEXT: vsubuhm v5, v2, v3
-; CHECK-NEXT: vsubuhm v2, v3, v2
-; CHECK-NEXT: xxsel v2, v5, v2, v4
+; CHECK-NEXT: vminsh v4, v2, v3
+; CHECK-NEXT: vmaxsh v2, v2, v3
+; CHECK-NEXT: vsubuhm v2, v2, v4
; CHECK-NEXT: blr
%3 = icmp slt <8 x i16> %0, %1
%4 = sub <8 x i16> %0, %1
@@ -1911,11 +1837,9 @@ define <8 x i16> @absd_int16_slt(<8 x i16>, <8 x i16>) {
define <8 x i16> @absd_int16_sle(<8 x i16>, <8 x i16>) {
; CHECK-LABEL: absd_int16_sle:
; CHECK: # %bb.0:
-; CHECK-NEXT: vcmpgtsh v4, v2, v3
-; CHECK-NEXT: vsubuhm v5, v2, v3
-; CHECK-NEXT: vsubuhm v2, v3, v2
-; CHECK-NEXT: xxlnor v4, v4, v4
-; CHECK-NEXT: xxsel v2, v5, v2, v4
+; CHECK-NEXT: vminsh v4, v2, v3
+; CHECK-NEXT: vmaxsh v2, v2, v3
+; CHECK-NEXT: vsubuhm v2, v2, v4
; CHECK-NEXT: blr
%3 = icmp sle <8 x i16> %0, %1
%4 = sub <8 x i16> %0, %1
@@ -1927,10 +1851,9 @@ define <8 x i16> @absd_int16_sle(<8 x i16>, <8 x i16>) {
define <16 x i8> @absd_int8_sgt(<16 x i8>, <16 x i8>) {
; CHECK-LABEL: absd_int8_sgt:
; CHECK: # %bb.0:
-; CHECK-NEXT: vcmpgtsb v4, v2, v3
-; CHECK-NEXT: vsububm v5, v2, v3
-; CHECK-NEXT: vsububm v2, v3, v2
-; CHECK-NEXT: xxsel v2, v2, v5, v4
+; CHECK-NEXT: vminsb v4, v2, v3
+; CHECK-NEXT: vmaxsb v2, v2, v3
+; CHECK-NEXT: vsububm v2, v2, v4
; CHECK-NEXT: blr
%3 = icmp sgt <16 x i8> %0, %1
%4 = sub <16 x i8> %0, %1
@@ -1942,11 +1865,9 @@ define <16 x i8> @absd_int8_sgt(<16 x i8>, <16 x i8>) {
define <16 x i8> @absd_int8_sge(<16 x i8>, <16 x i8>) {
; CHECK-LABEL: absd_int8_sge:
; CHECK: # %bb.0:
-; CHECK-NEXT: vcmpgtsb v4, v3, v2
-; CHECK-NEXT: vsububm v5, v2, v3
-; CHECK-NEXT: vsububm v2, v3, v2
-; CHECK-NEXT: xxlnor v4, v4, v4
-; CHECK-NEXT: xxsel v2, v2, v5, v4
+; CHECK-NEXT: vminsb v4, v2, v3
+; CHECK-NEXT: vmaxsb v2, v2, v3
+; CHECK-NEXT: vsububm v2, v2, v4
; CHECK-NEXT: blr
%3 = icmp sge <16 x i8> %0, %1
%4 = sub <16 x i8> %0, %1
@@ -1958,10 +1879,9 @@ define <16 x i8> @absd_int8_sge(<16 x i8>, <16 x i8>) {
define <16 x i8> @absd_int8_slt(<16 x i8>, <16 x i8>) {
; CHECK-LABEL: absd_int8_slt:
; CHECK: # %bb.0:
-; CHECK-NEXT: vcmpgtsb v4, v3, v2
-; CHECK-NEXT: vsububm v5, v2, v3
-; CHECK-NEXT: vsububm v2, v3, v2
-; CHECK-NEXT: xxsel v2, v5, v2, v4
+; CHECK-NEXT: vminsb v4, v2, v3
+; CHECK-NEXT: vmaxsb v2, v2, v3
+; CHECK-NEXT: vsububm v2, v2, v4
; CHECK-NEXT: blr
%3 = icmp slt <16 x i8> %0, %1
%4 = sub <16 x i8> %0, %1
@@ -1973,11 +1893,9 @@ define <16 x i8> @absd_int8_slt(<16 x i8>, <16 x i8>) {
define <16 x i8> @absd_int8_sle(<16 x i8>, <16 x i8>) {
; CHECK-LABEL: absd_int8_sle:
; CHECK: # %bb.0:
-; CHECK-NEXT: vcmpgtsb v4, v2, v3
-; CHECK-NEXT: vsububm v5, v2, v3
-; CHECK-NEXT: vsububm v2, v3, v2
-; CHECK-NEXT: xxlnor v4, v4, v4
-; CHECK-NEXT: xxsel v2, v5, v2, v4
+; CHECK-NEXT: vminsb v4, v2, v3
+; CHECK-NEXT: vmaxsb v2, v2, v3
+; CHECK-NEXT: vsububm v2, v2, v4
; CHECK-NEXT: blr
%3 = icmp sle <16 x i8> %0, %1
%4 = sub <16 x i8> %0, %1
@@ -2006,53 +1924,55 @@ define <4 x i32> @absd_int32_ugt_opp(<4 x i32>, <4 x i32>) {
define <2 x i64> @absd_int64_ugt(<2 x i64>, <2 x i64>) {
; CHECK-PWR9-LABEL: absd_int64_ugt:
; CHECK-PWR9: # %bb.0:
-; CHECK-PWR9-NEXT: vcmpgtud v4, v2, v3
-; CHECK-PWR9-NEXT: vsubudm v5, v2, v3
-; CHECK-PWR9-NEXT: vsubudm v2, v3, v2
-; CHECK-PWR9-NEXT: xxsel v2, v2, v5, v4
+; CHECK-PWR9-NEXT: vminud v4, v2, v3
+; CHECK-PWR9-NEXT: vmaxud v2, v2, v3
+; CHECK-PWR9-NEXT: vsubudm v2, v2, v4
; CHECK-PWR9-NEXT: blr
;
; CHECK-PWR8-LABEL: absd_int64_ugt:
; CHECK-PWR8: # %bb.0:
-; CHECK-PWR8-NEXT: vcmpgtud v4, v2, v3
-; CHECK-PWR8-NEXT: vsubudm v5, v2, v3
-; CHECK-PWR8-NEXT: vsubudm v2, v3, v2
-; CHECK-PWR8-NEXT: xxsel v2, v2, v5, v4
+; CHECK-PWR8-NEXT: vminud v4, v2, v3
+; CHECK-PWR8-NEXT: vmaxud v2, v2, v3
+; CHECK-PWR8-NEXT: vsubudm v2, v2, v4
; CHECK-PWR8-NEXT: blr
;
; CHECK-PWR7-LABEL: absd_int64_ugt:
; CHECK-PWR7: # %bb.0:
-; CHECK-PWR7-NEXT: addi r3, r1, -64
-; CHECK-PWR7-NEXT: addi r4, r1, -80
-; CHECK-PWR7-NEXT: li r5, 0
-; CHECK-PWR7-NEXT: li r6, -1
+; CHECK-PWR7-NEXT: addi r3, r1, -96
+; CHECK-PWR7-NEXT: stxvd2x v2, 0, r3
+; CHECK-PWR7-NEXT: addi r3, r1, -80
; CHECK-PWR7-NEXT: stxvd2x v3, 0, r3
-; CHECK-PWR7-NEXT: stxvd2x v2, 0, r4
-; CHECK-PWR7-NEXT: addi r9, r1, -16
-; CHECK-PWR7-NEXT: ld r3, -56(r1)
+; CHECK-PWR7-NEXT: ld r3, -88(r1)
; CHECK-PWR7-NEXT: ld r4, -72(r1)
-; CHECK-PWR7-NEXT: ld r8, -80(r1)
-; CHECK-PWR7-NEXT: cmpld r4, r3
-; CHECK-PWR7-NEXT: iselgt r7, r6, r5
-; CHECK-PWR7-NEXT: std r7, -8(r1)
-; CHECK-PWR7-NEXT: ld r7, -64(r1)
-; CHECK-PWR7-NEXT: cmpld r8, r7
-; CHECK-PWR7-NEXT: iselgt r5, r6, r5
-; CHECK-PWR7-NEXT: std r5, -16(r1)
-; CHECK-PWR7-NEXT: sub r5, r4, r3
+; CHECK-PWR7-NEXT: ld r6, -80(r1)
+; CHECK-PWR7-NEXT: sub r5, r3, r4
+; CHECK-PWR7-NEXT: cmpld r3, r4
+; CHECK-PWR7-NEXT: li r3, 0
+; CHECK-PWR7-NEXT: li r4, -1
+; CHECK-PWR7-NEXT: std r5, -56(r1)
+; CHECK-PWR7-NEXT: ld r5, -96(r1)
+; CHECK-PWR7-NEXT: sub r7, r5, r6
+; CHECK-PWR7-NEXT: std r7, -64(r1)
+; CHECK-PWR7-NEXT: iselgt r7, r4, r3
+; CHECK-PWR7-NEXT: cmpld r5, r6
+; CHECK-PWR7-NEXT: std r7, -40(r1)
+; CHECK-PWR7-NEXT: iselgt r3, r4, r3
+; CHECK-PWR7-NEXT: addi r4, r1, -64
+; CHECK-PWR7-NEXT: std r3, -48(r1)
+; CHECK-PWR7-NEXT: lxvw4x vs0, 0, r4
+; CHECK-PWR7-NEXT: addi r4, r1, -48
+; CHECK-PWR7-NEXT: lxvw4x vs1, 0, r4
+; CHECK-PWR7-NEXT: addi r4, r1, -32
+; CHECK-PWR7-NEXT: xxlxor vs0, vs0, vs1
+; CHECK-PWR7-NEXT: stxvw4x vs0, 0, r4
+; CHECK-PWR7-NEXT: ld r4, -24(r1)
+; CHECK-PWR7-NEXT: sub r4, r7, r4
+; CHECK-PWR7-NEXT: std r4, -8(r1)
+; CHECK-PWR7-NEXT: ld r4, -32(r1)
; CHECK-PWR7-NEXT: sub r3, r3, r4
-; CHECK-PWR7-NEXT: lxvd2x v2, 0, r9
-; CHECK-PWR7-NEXT: std r5, -40(r1)
-; CHECK-PWR7-NEXT: sub r5, r8, r7
-; CHECK-PWR7-NEXT: std r5, -48(r1)
-; CHECK-PWR7-NEXT: addi r5, r1, -48
-; CHECK-PWR7-NEXT: lxvd2x v3, 0, r5
-; CHECK-PWR7-NEXT: std r3, -24(r1)
-; CHECK-PWR7-NEXT: sub r3, r7, r8
-; CHECK-PWR7-NEXT: std r3, -32(r1)
-; CHECK-PWR7-NEXT: addi r3, r1, -32
-; CHECK-PWR7-NEXT: lxvd2x v4, 0, r3
-; CHECK-PWR7-NEXT: xxsel v2, v4, v3, v2
+; CHECK-PWR7-NEXT: std r3, -16(r1)
+; CHECK-PWR7-NEXT: addi r3, r1, -16
+; CHECK-PWR7-NEXT: lxvd2x v2, 0, r3
; CHECK-PWR7-NEXT: blr
%3 = icmp ugt <2 x i64> %0, %1
%4 = sub <2 x i64> %0, %1
diff --git a/llvm/test/CodeGen/PowerPC/vec-zext-abdu.ll b/llvm/test/CodeGen/PowerPC/vec-zext-abdu.ll
index 787b81f7f2098..32c28148df32e 100644
--- a/llvm/test/CodeGen/PowerPC/vec-zext-abdu.ll
+++ b/llvm/test/CodeGen/PowerPC/vec-zext-abdu.ll
@@ -1,31 +1,11 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
; RUN: llc -verify-machineinstrs -mtriple=powerpc64le -mcpu=pwr9 < %s | FileCheck %s
+; Widen to <16 x i8>
define <12 x i8> @zext_abdu(<12 x i8> %a, <12 x i8> %b) {
; CHECK-LABEL: zext_abdu:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addis 3, 2, .LCPI0_0 at toc@ha
-; CHECK-NEXT: xxlxor 36, 36, 36
-; CHECK-NEXT: addi 3, 3, .LCPI0_0 at toc@l
-; CHECK-NEXT: lxv 37, 0(3)
-; CHECK-NEXT: addis 3, 2, .LCPI0_1 at toc@ha
-; CHECK-NEXT: addi 3, 3, .LCPI0_1 at toc@l
-; CHECK-NEXT: lxv 33, 0(3)
-; CHECK-NEXT: addis 3, 2, .LCPI0_2 at toc@ha
-; CHECK-NEXT: vperm 0, 4, 2, 5
-; CHECK-NEXT: vperm 5, 4, 3, 5
-; CHECK-NEXT: addi 3, 3, .LCPI0_2 at toc@l
-; CHECK-NEXT: lxv 39, 0(3)
-; CHECK-NEXT: vperm 6, 4, 2, 1
-; CHECK-NEXT: vperm 1, 4, 3, 1
-; CHECK-NEXT: vperm 2, 4, 2, 7
-; CHECK-NEXT: vperm 3, 4, 3, 7
-; CHECK-NEXT: vabsduw 4, 5, 0
-; CHECK-NEXT: vabsduw 2, 3, 2
-; CHECK-NEXT: vabsduw 3, 1, 6
-; CHECK-NEXT: vpkuwum 3, 4, 3
-; CHECK-NEXT: vpkuwum 2, 2, 2
-; CHECK-NEXT: vpkuhum 2, 2, 3
+; CHECK-NEXT: vabsdub 2, 2, 3
; CHECK-NEXT: blr
entry:
%aa = zext <12 x i8> %a to <12 x i32>
diff --git a/llvm/test/CodeGen/RISCV/rvv/abd.ll b/llvm/test/CodeGen/RISCV/rvv/abd.ll
index ddbfbd0b59fa4..5e610c453e1ba 100644
--- a/llvm/test/CodeGen/RISCV/rvv/abd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/abd.ll
@@ -26,13 +26,9 @@ define <vscale x 16 x i8> @sabd_b_promoted_ops(<vscale x 16 x i1> %a, <vscale x
; CHECK-LABEL: sabd_b_promoted_ops:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmv.v.i v10, 0
-; CHECK-NEXT: vmerge.vim v12, v10, -1, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vim v8, v10, -1, v0
-; CHECK-NEXT: vmin.vv v10, v12, v8
-; CHECK-NEXT: vmax.vv v8, v12, v8
-; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vmxor.mm v0, v0, v8
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: ret
%a.sext = sext <vscale x 16 x i1> %a to <vscale x 16 x i8>
%b.sext = sext <vscale x 16 x i1> %b to <vscale x 16 x i8>
@@ -158,13 +154,9 @@ define <vscale x 16 x i8> @uabd_b_promoted_ops(<vscale x 16 x i1> %a, <vscale x
; CHECK-LABEL: uabd_b_promoted_ops:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmv.v.i v10, 0
-; CHECK-NEXT: vmerge.vim v12, v10, 1, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
-; CHECK-NEXT: vminu.vv v10, v12, v8
-; CHECK-NEXT: vmaxu.vv v8, v12, v8
-; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vmxor.mm v0, v0, v8
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: ret
%a.zext = zext <vscale x 16 x i1> %a to <vscale x 16 x i8>
%b.zext = zext <vscale x 16 x i1> %b to <vscale x 16 x i8>
diff --git a/llvm/test/CodeGen/Thumb2/mve-vabdus.ll b/llvm/test/CodeGen/Thumb2/mve-vabdus.ll
index cfbecd14604a2..eb29d730e83b3 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vabdus.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vabdus.ll
@@ -22,6 +22,7 @@ define arm_aapcs_vfpcc <8 x i8> @vabd_v8s8(<8 x i8> %src1, <8 x i8> %src2) {
; CHECK-NEXT: vmovlb.s8 q1, q1
; CHECK-NEXT: vmovlb.s8 q0, q0
; CHECK-NEXT: vabd.s16 q0, q0, q1
+; CHECK-NEXT: vmovlb.u8 q0, q0
; CHECK-NEXT: bx lr
%sextsrc1 = sext <8 x i8> %src1 to <8 x i16>
%sextsrc2 = sext <8 x i8> %src2 to <8 x i16>
@@ -41,6 +42,8 @@ define arm_aapcs_vfpcc <4 x i8> @vabd_v4s8(<4 x i8> %src1, <4 x i8> %src2) {
; CHECK-NEXT: vmovlb.s16 q1, q1
; CHECK-NEXT: vmovlb.s16 q0, q0
; CHECK-NEXT: vabd.s32 q0, q0, q1
+; CHECK-NEXT: vmov.i32 q1, #0xff
+; CHECK-NEXT: vand q0, q0, q1
; CHECK-NEXT: bx lr
%sextsrc1 = sext <4 x i8> %src1 to <4 x i16>
%sextsrc2 = sext <4 x i8> %src2 to <4 x i16>
@@ -73,6 +76,7 @@ define arm_aapcs_vfpcc <4 x i16> @vabd_v4s16(<4 x i16> %src1, <4 x i16> %src2) {
; CHECK-NEXT: vmovlb.s16 q1, q1
; CHECK-NEXT: vmovlb.s16 q0, q0
; CHECK-NEXT: vabd.s32 q0, q0, q1
+; CHECK-NEXT: vmovlb.u16 q0, q0
; CHECK-NEXT: bx lr
%sextsrc1 = sext <4 x i16> %src1 to <4 x i32>
%sextsrc2 = sext <4 x i16> %src2 to <4 x i32>
@@ -104,24 +108,21 @@ define arm_aapcs_vfpcc <2 x i32> @vabd_v2s32(<2 x i32> %src1, <2 x i32> %src2) {
; CHECK: @ %bb.0:
; CHECK-NEXT: vmov r0, s2
; CHECK-NEXT: vmov r2, s6
+; CHECK-NEXT: vmov r3, s4
+; CHECK-NEXT: vmov.i64 q1, #0xffffffff
; CHECK-NEXT: asrs r1, r0, #31
; CHECK-NEXT: subs r0, r0, r2
; CHECK-NEXT: sbc.w r1, r1, r2, asr #31
; CHECK-NEXT: eor.w r0, r0, r1, asr #31
-; CHECK-NEXT: eor.w r2, r1, r1, asr #31
-; CHECK-NEXT: subs.w r0, r0, r1, asr #31
-; CHECK-NEXT: sbc.w r12, r2, r1, asr #31
-; CHECK-NEXT: vmov r2, s0
-; CHECK-NEXT: vmov r1, s4
-; CHECK-NEXT: asrs r3, r2, #31
-; CHECK-NEXT: subs r2, r2, r1
-; CHECK-NEXT: sbc.w r1, r3, r1, asr #31
-; CHECK-NEXT: eor.w r2, r2, r1, asr #31
-; CHECK-NEXT: subs.w r2, r2, r1, asr #31
-; CHECK-NEXT: vmov q0[2], q0[0], r2, r0
-; CHECK-NEXT: eor.w r0, r1, r1, asr #31
-; CHECK-NEXT: sbc.w r0, r0, r1, asr #31
-; CHECK-NEXT: vmov q0[3], q0[1], r0, r12
+; CHECK-NEXT: sub.w r0, r0, r1, asr #31
+; CHECK-NEXT: vmov r1, s0
+; CHECK-NEXT: asrs r2, r1, #31
+; CHECK-NEXT: subs r1, r1, r3
+; CHECK-NEXT: sbc.w r2, r2, r3, asr #31
+; CHECK-NEXT: eor.w r1, r1, r2, asr #31
+; CHECK-NEXT: sub.w r1, r1, r2, asr #31
+; CHECK-NEXT: vmov q0[2], q0[0], r1, r0
+; CHECK-NEXT: vand q0, q0, q1
; CHECK-NEXT: bx lr
%sextsrc1 = sext <2 x i32> %src1 to <2 x i64>
%sextsrc2 = sext <2 x i32> %src2 to <2 x i64>
@@ -171,8 +172,7 @@ define arm_aapcs_vfpcc <4 x i8> @vabd_v4u8(<4 x i8> %src1, <4 x i8> %src2) {
; CHECK-NEXT: vmov.i32 q2, #0xff
; CHECK-NEXT: vand q1, q1, q2
; CHECK-NEXT: vand q0, q0, q2
-; CHECK-NEXT: vsub.i32 q0, q0, q1
-; CHECK-NEXT: vabs.s32 q0, q0
+; CHECK-NEXT: vabd.u32 q0, q0, q1
; CHECK-NEXT: bx lr
%zextsrc1 = zext <4 x i8> %src1 to <4 x i16>
%zextsrc2 = zext <4 x i8> %src2 to <4 x i16>
@@ -249,30 +249,47 @@ define arm_aapcs_vfpcc <4 x i32> @vabd_v4u32(<4 x i32> %src1, <4 x i32> %src2) {
define arm_aapcs_vfpcc <2 x i32> @vabd_v2u32(<2 x i32> %src1, <2 x i32> %src2) {
; CHECK-LABEL: vabd_v2u32:
; CHECK: @ %bb.0:
-; CHECK-NEXT: .save {r7, lr}
-; CHECK-NEXT: push {r7, lr}
+; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT: .pad #12
+; CHECK-NEXT: sub sp, #12
; CHECK-NEXT: vmov.i64 q2, #0xffffffff
+; CHECK-NEXT: movs r4, #0
; CHECK-NEXT: vand q1, q1, q2
; CHECK-NEXT: vand q0, q0, q2
-; CHECK-NEXT: vmov r0, r1, d3
-; CHECK-NEXT: vmov r2, r3, d1
-; CHECK-NEXT: subs r0, r2, r0
-; CHECK-NEXT: sbc.w r1, r3, r1
-; CHECK-NEXT: eor.w r0, r0, r1, asr #31
-; CHECK-NEXT: eor.w r2, r1, r1, asr #31
-; CHECK-NEXT: subs.w lr, r0, r1, asr #31
-; CHECK-NEXT: sbc.w r12, r2, r1, asr #31
-; CHECK-NEXT: vmov r2, r3, d2
-; CHECK-NEXT: vmov r1, r0, d0
-; CHECK-NEXT: subs r1, r1, r2
-; CHECK-NEXT: sbcs r0, r3
-; CHECK-NEXT: eor.w r1, r1, r0, asr #31
-; CHECK-NEXT: subs.w r1, r1, r0, asr #31
-; CHECK-NEXT: vmov q0[2], q0[0], r1, lr
-; CHECK-NEXT: eor.w r1, r0, r0, asr #31
-; CHECK-NEXT: sbc.w r0, r1, r0, asr #31
-; CHECK-NEXT: vmov q0[3], q0[1], r0, r12
-; CHECK-NEXT: pop {r7, pc}
+; CHECK-NEXT: vmov r1, r0, d3
+; CHECK-NEXT: vmov r11, r10, d1
+; CHECK-NEXT: vmov r7, r6, d2
+; CHECK-NEXT: vmov r5, r3, d0
+; CHECK-NEXT: subs.w r12, r11, r1
+; CHECK-NEXT: strd r1, r0, [sp, #4] @ 8-byte Folded Spill
+; CHECK-NEXT: sbc.w lr, r10, r0
+; CHECK-NEXT: rsbs.w r9, r12, #0
+; CHECK-NEXT: sbc.w r0, r4, lr
+; CHECK-NEXT: subs r2, r5, r7
+; CHECK-NEXT: str r0, [sp] @ 4-byte Spill
+; CHECK-NEXT: sbc.w r0, r3, r6
+; CHECK-NEXT: rsbs r1, r2, #0
+; CHECK-NEXT: vmov q1[2], q1[0], r2, r12
+; CHECK-NEXT: sbc.w r8, r4, r0
+; CHECK-NEXT: vmov q1[3], q1[1], r0, lr
+; CHECK-NEXT: subs r0, r7, r5
+; CHECK-NEXT: vmov q0[2], q0[0], r1, r9
+; CHECK-NEXT: sbcs.w r0, r6, r3
+; CHECK-NEXT: ldr r1, [sp] @ 4-byte Reload
+; CHECK-NEXT: csetm r0, lo
+; CHECK-NEXT: bfi r4, r0, #0, #8
+; CHECK-NEXT: ldr r0, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT: vmov q0[3], q0[1], r8, r1
+; CHECK-NEXT: subs.w r0, r0, r11
+; CHECK-NEXT: ldr r0, [sp, #8] @ 4-byte Reload
+; CHECK-NEXT: sbcs.w r0, r0, r10
+; CHECK-NEXT: csetm r0, lo
+; CHECK-NEXT: bfi r4, r0, #8, #8
+; CHECK-NEXT: vmsr p0, r4
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: add sp, #12
+; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
%zextsrc1 = zext <2 x i32> %src1 to <2 x i64>
%zextsrc2 = zext <2 x i32> %src2 to <2 x i64>
%add1 = sub <2 x i64> %zextsrc1, %zextsrc2
diff --git a/llvm/test/CodeGen/X86/abds.ll b/llvm/test/CodeGen/X86/abds.ll
index a80339427984a..627fe99543497 100644
--- a/llvm/test/CodeGen/X86/abds.ll
+++ b/llvm/test/CodeGen/X86/abds.ll
@@ -282,27 +282,29 @@ define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind {
define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind {
; X86-LABEL: abd_ext_i64:
; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: pushl %ebx
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT: movl %esi, %edi
-; X86-NEXT: sarl $31, %edi
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movl %edx, %ecx
-; X86-NEXT: sarl $31, %ecx
-; X86-NEXT: subl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: sbbl %esi, %edx
-; X86-NEXT: movl %ecx, %esi
-; X86-NEXT: sbbl %edi, %esi
-; X86-NEXT: sbbl %edi, %ecx
-; X86-NEXT: sarl $31, %ecx
-; X86-NEXT: xorl %ecx, %edx
-; X86-NEXT: xorl %ecx, %eax
-; X86-NEXT: subl %ecx, %eax
-; X86-NEXT: sbbl %ecx, %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %edi, %ebp
+; X86-NEXT: subl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT: movl %ecx, %ebx
+; X86-NEXT: sbbl %esi, %ebx
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: movl %ebp, %eax
+; X86-NEXT: negl %eax
+; X86-NEXT: sbbl %ebx, %edx
+; X86-NEXT: cmpl %edi, {{[0-9]+}}(%esp)
+; X86-NEXT: sbbl %ecx, %esi
+; X86-NEXT: cmovll %ebp, %eax
+; X86-NEXT: cmovll %ebx, %edx
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
+; X86-NEXT: popl %ebx
+; X86-NEXT: popl %ebp
; X86-NEXT: retl
;
; X64-LABEL: abd_ext_i64:
@@ -324,27 +326,29 @@ define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind {
define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind {
; X86-LABEL: abd_ext_i64_undef:
; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: pushl %ebx
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT: movl %esi, %edi
-; X86-NEXT: sarl $31, %edi
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movl %edx, %ecx
-; X86-NEXT: sarl $31, %ecx
-; X86-NEXT: subl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: sbbl %esi, %edx
-; X86-NEXT: movl %ecx, %esi
-; X86-NEXT: sbbl %edi, %esi
-; X86-NEXT: sbbl %edi, %ecx
-; X86-NEXT: sarl $31, %ecx
-; X86-NEXT: xorl %ecx, %edx
-; X86-NEXT: xorl %ecx, %eax
-; X86-NEXT: subl %ecx, %eax
-; X86-NEXT: sbbl %ecx, %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %edi, %ebp
+; X86-NEXT: subl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT: movl %ecx, %ebx
+; X86-NEXT: sbbl %esi, %ebx
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: movl %ebp, %eax
+; X86-NEXT: negl %eax
+; X86-NEXT: sbbl %ebx, %edx
+; X86-NEXT: cmpl %edi, {{[0-9]+}}(%esp)
+; X86-NEXT: sbbl %ecx, %esi
+; X86-NEXT: cmovll %ebp, %eax
+; X86-NEXT: cmovll %ebx, %edx
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
+; X86-NEXT: popl %ebx
+; X86-NEXT: popl %ebp
; X86-NEXT: retl
;
; X64-LABEL: abd_ext_i64_undef:
@@ -458,24 +462,21 @@ define i64 @abd_minmax_i64(i64 %a, i64 %b) nounwind {
; X86-NEXT: pushl %ebx
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT: cmpl %eax, %ecx
-; X86-NEXT: movl %esi, %edi
-; X86-NEXT: sbbl %edx, %edi
-; X86-NEXT: movl %edx, %edi
-; X86-NEXT: cmovll %esi, %edi
-; X86-NEXT: movl %eax, %ebx
-; X86-NEXT: cmovll %ecx, %ebx
-; X86-NEXT: cmpl %ecx, %eax
-; X86-NEXT: movl %edx, %ebp
-; X86-NEXT: sbbl %esi, %ebp
-; X86-NEXT: cmovll %esi, %edx
-; X86-NEXT: cmovll %ecx, %eax
-; X86-NEXT: subl %ebx, %eax
-; X86-NEXT: sbbl %edi, %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %edi, %ebp
+; X86-NEXT: subl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT: movl %ecx, %ebx
+; X86-NEXT: sbbl %esi, %ebx
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: movl %ebp, %eax
+; X86-NEXT: negl %eax
+; X86-NEXT: sbbl %ebx, %edx
+; X86-NEXT: cmpl %edi, {{[0-9]+}}(%esp)
+; X86-NEXT: sbbl %ecx, %esi
+; X86-NEXT: cmovll %ebp, %eax
+; X86-NEXT: cmovll %ebx, %edx
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
; X86-NEXT: popl %ebx
diff --git a/llvm/test/CodeGen/X86/abdu.ll b/llvm/test/CodeGen/X86/abdu.ll
index 11719be4ab5cd..2a230cfc131f2 100644
--- a/llvm/test/CodeGen/X86/abdu.ll
+++ b/llvm/test/CodeGen/X86/abdu.ll
@@ -280,21 +280,29 @@ define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind {
define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind {
; X86-LABEL: abd_ext_i64:
; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: pushl %ebx
+; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: xorl %ecx, %ecx
-; X86-NEXT: subl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: sbbl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movl $0, %esi
-; X86-NEXT: sbbl %esi, %esi
-; X86-NEXT: sbbl %ecx, %ecx
-; X86-NEXT: sarl $31, %ecx
-; X86-NEXT: xorl %ecx, %edx
-; X86-NEXT: xorl %ecx, %eax
-; X86-NEXT: subl %ecx, %eax
-; X86-NEXT: sbbl %ecx, %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %edi, %ebp
+; X86-NEXT: subl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT: movl %ecx, %ebx
+; X86-NEXT: sbbl %esi, %ebx
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: movl %ebp, %eax
+; X86-NEXT: negl %eax
+; X86-NEXT: sbbl %ebx, %edx
+; X86-NEXT: cmpl %edi, {{[0-9]+}}(%esp)
+; X86-NEXT: sbbl %ecx, %esi
+; X86-NEXT: cmovbl %ebp, %eax
+; X86-NEXT: cmovbl %ebx, %edx
; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: popl %ebx
+; X86-NEXT: popl %ebp
; X86-NEXT: retl
;
; X64-LABEL: abd_ext_i64:
@@ -316,21 +324,29 @@ define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind {
define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind {
; X86-LABEL: abd_ext_i64_undef:
; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: pushl %ebx
+; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: xorl %ecx, %ecx
-; X86-NEXT: subl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: sbbl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movl $0, %esi
-; X86-NEXT: sbbl %esi, %esi
-; X86-NEXT: sbbl %ecx, %ecx
-; X86-NEXT: sarl $31, %ecx
-; X86-NEXT: xorl %ecx, %edx
-; X86-NEXT: xorl %ecx, %eax
-; X86-NEXT: subl %ecx, %eax
-; X86-NEXT: sbbl %ecx, %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %edi, %ebp
+; X86-NEXT: subl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT: movl %ecx, %ebx
+; X86-NEXT: sbbl %esi, %ebx
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: movl %ebp, %eax
+; X86-NEXT: negl %eax
+; X86-NEXT: sbbl %ebx, %edx
+; X86-NEXT: cmpl %edi, {{[0-9]+}}(%esp)
+; X86-NEXT: sbbl %ecx, %esi
+; X86-NEXT: cmovbl %ebp, %eax
+; X86-NEXT: cmovbl %ebx, %edx
; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: popl %ebx
+; X86-NEXT: popl %ebp
; X86-NEXT: retl
;
; X64-LABEL: abd_ext_i64_undef:
@@ -444,24 +460,21 @@ define i64 @abd_minmax_i64(i64 %a, i64 %b) nounwind {
; X86-NEXT: pushl %ebx
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT: cmpl %eax, %ecx
-; X86-NEXT: movl %esi, %edi
-; X86-NEXT: sbbl %edx, %edi
-; X86-NEXT: movl %edx, %edi
-; X86-NEXT: cmovbl %esi, %edi
-; X86-NEXT: movl %eax, %ebx
-; X86-NEXT: cmovbl %ecx, %ebx
-; X86-NEXT: cmpl %ecx, %eax
-; X86-NEXT: movl %edx, %ebp
-; X86-NEXT: sbbl %esi, %ebp
-; X86-NEXT: cmovbl %esi, %edx
-; X86-NEXT: cmovbl %ecx, %eax
-; X86-NEXT: subl %ebx, %eax
-; X86-NEXT: sbbl %edi, %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %edi, %ebp
+; X86-NEXT: subl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT: movl %ecx, %ebx
+; X86-NEXT: sbbl %esi, %ebx
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: movl %ebp, %eax
+; X86-NEXT: negl %eax
+; X86-NEXT: sbbl %ebx, %edx
+; X86-NEXT: cmpl %edi, {{[0-9]+}}(%esp)
+; X86-NEXT: sbbl %ecx, %esi
+; X86-NEXT: cmovbl %ebp, %eax
+; X86-NEXT: cmovbl %ebx, %edx
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
; X86-NEXT: popl %ebx
More information about the llvm-commits
mailing list