[llvm] 13403a7 - [AArch64] Add lowerings for {ADD,SUB}CARRY and S{ADD,SUB}O_CARRY

Karl Meakin via llvm-commits llvm-commits at lists.llvm.org
Thu Apr 21 06:57:01 PDT 2022


Author: Karl Meakin
Date: 2022-04-21T14:56:37+01:00
New Revision: 13403a70e45b2d22878ba59fc211f8dba3a8deba

URL: https://github.com/llvm/llvm-project/commit/13403a70e45b2d22878ba59fc211f8dba3a8deba
DIFF: https://github.com/llvm/llvm-project/commit/13403a70e45b2d22878ba59fc211f8dba3a8deba.diff

LOG: [AArch64] Add lowerings for {ADD,SUB}CARRY and S{ADD,SUB}O_CARRY

Differential Revision: https://reviews.llvm.org/D123322

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/test/CodeGen/AArch64/adc.ll
    llvm/test/CodeGen/AArch64/addcarry-crash.ll
    llvm/test/CodeGen/AArch64/arm64-atomic-128.ll
    llvm/test/CodeGen/AArch64/arm64-vabs.ll
    llvm/test/CodeGen/AArch64/atomicrmw-O0.ll
    llvm/test/CodeGen/AArch64/i128-math.ll
    llvm/test/CodeGen/AArch64/icmp-shift-opt.ll
    llvm/test/CodeGen/AArch64/neg-abs.ll
    llvm/test/CodeGen/AArch64/neon-abd.ll
    llvm/test/CodeGen/AArch64/nzcv-save.ll
    llvm/test/CodeGen/AArch64/sadd_sat_vec.ll
    llvm/test/CodeGen/AArch64/ssub_sat_vec.ll
    llvm/test/CodeGen/AArch64/uadd_sat_vec.ll
    llvm/test/CodeGen/AArch64/usub_sat_vec.ll
    llvm/test/CodeGen/AArch64/vec_uaddo.ll
    llvm/test/CodeGen/AArch64/vecreduce-add-legalization.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 4a60896f69e59..b61f7d8144524 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -526,6 +526,15 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
   setOperationAction(ISD::UMULO, MVT::i32, Custom);
   setOperationAction(ISD::UMULO, MVT::i64, Custom);
 
+  setOperationAction(ISD::ADDCARRY, MVT::i32, Custom);
+  setOperationAction(ISD::ADDCARRY, MVT::i64, Custom);
+  setOperationAction(ISD::SUBCARRY, MVT::i32, Custom);
+  setOperationAction(ISD::SUBCARRY, MVT::i64, Custom);
+  setOperationAction(ISD::SADDO_CARRY, MVT::i32, Custom);
+  setOperationAction(ISD::SADDO_CARRY, MVT::i64, Custom);
+  setOperationAction(ISD::SSUBO_CARRY, MVT::i32, Custom);
+  setOperationAction(ISD::SSUBO_CARRY, MVT::i64, Custom);
+
   setOperationAction(ISD::FSIN, MVT::f32, Expand);
   setOperationAction(ISD::FSIN, MVT::f64, Expand);
   setOperationAction(ISD::FCOS, MVT::f32, Expand);
@@ -3299,6 +3308,62 @@ static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
                      Op.getOperand(2));
 }
 
+// Sets 'C' bit of NZCV to 0 if value is 0, else sets 'C' bit to 1
+static SDValue valueToCarryFlag(SDValue Value, SelectionDAG &DAG) {
+  SDLoc DL(Value);
+  SDValue One = DAG.getConstant(1, DL, Value.getValueType());
+  SDValue Cmp =
+      DAG.getNode(AArch64ISD::SUBS, DL,
+                  DAG.getVTList(Value.getValueType(), MVT::Glue), Value, One);
+  return Cmp.getValue(1);
+}
+
+// Value is 1 if 'C' bit of NZCV is 1, else 0
+static SDValue carryFlagToValue(SDValue Flag, EVT VT, SelectionDAG &DAG) {
+  assert(Flag.getResNo() == 1);
+  SDLoc DL(Flag);
+  SDValue Zero = DAG.getConstant(0, DL, VT);
+  SDValue One = DAG.getConstant(1, DL, VT);
+  SDValue CC = DAG.getConstant(AArch64CC::HS, DL, MVT::i32);
+  return DAG.getNode(AArch64ISD::CSEL, DL, VT, One, Zero, CC, Flag);
+}
+
+// Value is 1 if 'V' bit of NZCV is 1, else 0
+static SDValue overflowFlagToValue(SDValue Flag, EVT VT, SelectionDAG &DAG) {
+  assert(Flag.getResNo() == 1);
+  SDLoc DL(Flag);
+  SDValue Zero = DAG.getConstant(0, DL, VT);
+  SDValue One = DAG.getConstant(1, DL, VT);
+  SDValue CC = DAG.getConstant(AArch64CC::VS, DL, MVT::i32);
+  return DAG.getNode(AArch64ISD::CSEL, DL, VT, One, Zero, CC, Flag);
+}
+
+// This lowering is inefficient, but it will get cleaned up by
+// `performAddSubCombine`
+static SDValue lowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG, unsigned Opcode,
+                                bool IsSigned) {
+  EVT VT0 = Op.getValue(0).getValueType();
+  EVT VT1 = Op.getValue(1).getValueType();
+
+  if (VT0 != MVT::i32 && VT0 != MVT::i64)
+    return SDValue();
+
+  SDValue OpLHS = Op.getOperand(0);
+  SDValue OpRHS = Op.getOperand(1);
+  SDValue OpCarryIn = valueToCarryFlag(Op.getOperand(2), DAG);
+
+  SDLoc DL(Op);
+  SDVTList VTs = DAG.getVTList(VT0, VT1);
+
+  SDValue Sum = DAG.getNode(Opcode, DL, DAG.getVTList(VT0, MVT::Glue), OpLHS,
+                            OpRHS, OpCarryIn);
+
+  SDValue OutFlag = IsSigned ? overflowFlagToValue(Sum.getValue(1), VT1, DAG)
+                             : carryFlagToValue(Sum.getValue(1), VT1, DAG);
+
+  return DAG.getNode(ISD::MERGE_VALUES, DL, VTs, Sum, OutFlag);
+}
+
 static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
   // Let legalize expand this if it isn't a legal type yet.
   if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()))
@@ -5147,6 +5212,14 @@ SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
   case ISD::SUBC:
   case ISD::SUBE:
     return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
+  case ISD::ADDCARRY:
+    return lowerADDSUBCARRY(Op, DAG, AArch64ISD::ADCS, false /*unsigned*/);
+  case ISD::SUBCARRY:
+    return lowerADDSUBCARRY(Op, DAG, AArch64ISD::SBCS, false /*unsigned*/);
+  case ISD::SADDO_CARRY:
+    return lowerADDSUBCARRY(Op, DAG, AArch64ISD::ADCS, true /*signed*/);
+  case ISD::SSUBO_CARRY:
+    return lowerADDSUBCARRY(Op, DAG, AArch64ISD::SBCS, true /*signed*/);
   case ISD::SADDO:
   case ISD::UADDO:
   case ISD::SSUBO:

diff  --git a/llvm/test/CodeGen/AArch64/adc.ll b/llvm/test/CodeGen/AArch64/adc.ll
index aa129da8db261..c52a796a11477 100644
--- a/llvm/test/CodeGen/AArch64/adc.ll
+++ b/llvm/test/CodeGen/AArch64/adc.ll
@@ -6,16 +6,24 @@ define i128 @test_simple(i128 %a, i128 %b, i128 %c) {
 ; CHECK-LE-LABEL: test_simple:
 ; CHECK-LE:       ; %bb.0:
 ; CHECK-LE-NEXT:    adds x8, x0, x2
+; CHECK-LE-NEXT:    cset w9, hs
+; CHECK-LE-NEXT:    cmp w9, #1
 ; CHECK-LE-NEXT:    adcs x9, x1, x3
 ; CHECK-LE-NEXT:    subs x0, x8, x4
+; CHECK-LE-NEXT:    cset w8, lo
+; CHECK-LE-NEXT:    cmp w8, #1
 ; CHECK-LE-NEXT:    sbcs x1, x9, x5
 ; CHECK-LE-NEXT:    ret
 ;
 ; CHECK-BE-LABEL: test_simple:
 ; CHECK-BE:       // %bb.0:
 ; CHECK-BE-NEXT:    adds x8, x1, x3
+; CHECK-BE-NEXT:    cset w9, hs
+; CHECK-BE-NEXT:    cmp w9, #1
 ; CHECK-BE-NEXT:    adcs x9, x0, x2
 ; CHECK-BE-NEXT:    subs x1, x8, x5
+; CHECK-BE-NEXT:    cset w8, lo
+; CHECK-BE-NEXT:    cmp w8, #1
 ; CHECK-BE-NEXT:    sbcs x0, x9, x4
 ; CHECK-BE-NEXT:    ret
 
@@ -30,12 +38,16 @@ define i128 @test_imm(i128 %a) {
 ; CHECK-LE-LABEL: test_imm:
 ; CHECK-LE:       ; %bb.0:
 ; CHECK-LE-NEXT:    adds x0, x0, #12
+; CHECK-LE-NEXT:    cset w8, hs
+; CHECK-LE-NEXT:    cmp w8, #1
 ; CHECK-LE-NEXT:    adcs x1, x1, xzr
 ; CHECK-LE-NEXT:    ret
 ;
 ; CHECK-BE-LABEL: test_imm:
 ; CHECK-BE:       // %bb.0:
 ; CHECK-BE-NEXT:    adds x1, x1, #12
+; CHECK-BE-NEXT:    cset w8, hs
+; CHECK-BE-NEXT:    cmp w8, #1
 ; CHECK-BE-NEXT:    adcs x0, x0, xzr
 ; CHECK-BE-NEXT:    ret
 
@@ -47,15 +59,19 @@ define i128 @test_imm(i128 %a) {
 define i128 @test_shifted(i128 %a, i128 %b) {
 ; CHECK-LE-LABEL: test_shifted:
 ; CHECK-LE:       ; %bb.0:
-; CHECK-LE-NEXT:    extr x8, x3, x2, #19
 ; CHECK-LE-NEXT:    adds x0, x0, x2, lsl #45
+; CHECK-LE-NEXT:    extr x8, x3, x2, #19
+; CHECK-LE-NEXT:    cset w9, hs
+; CHECK-LE-NEXT:    cmp w9, #1
 ; CHECK-LE-NEXT:    adcs x1, x1, x8
 ; CHECK-LE-NEXT:    ret
 ;
 ; CHECK-BE-LABEL: test_shifted:
 ; CHECK-BE:       // %bb.0:
-; CHECK-BE-NEXT:    extr x8, x2, x3, #19
 ; CHECK-BE-NEXT:    adds x1, x1, x3, lsl #45
+; CHECK-BE-NEXT:    extr x8, x2, x3, #19
+; CHECK-BE-NEXT:    cset w9, hs
+; CHECK-BE-NEXT:    cmp w9, #1
 ; CHECK-BE-NEXT:    adcs x0, x0, x8
 ; CHECK-BE-NEXT:    ret
 
@@ -74,6 +90,8 @@ define i128 @test_extended(i128 %a, i16 %b) {
 ; CHECK-LE-NEXT:    adds x0, x0, w2, sxth #3
 ; CHECK-LE-NEXT:    asr x9, x8, #63
 ; CHECK-LE-NEXT:    extr x8, x9, x8, #61
+; CHECK-LE-NEXT:    cset w9, hs
+; CHECK-LE-NEXT:    cmp w9, #1
 ; CHECK-LE-NEXT:    adcs x1, x1, x8
 ; CHECK-LE-NEXT:    ret
 ;
@@ -84,6 +102,8 @@ define i128 @test_extended(i128 %a, i16 %b) {
 ; CHECK-BE-NEXT:    adds x1, x1, w2, sxth #3
 ; CHECK-BE-NEXT:    asr x9, x8, #63
 ; CHECK-BE-NEXT:    extr x8, x9, x8, #61
+; CHECK-BE-NEXT:    cset w9, hs
+; CHECK-BE-NEXT:    cmp w9, #1
 ; CHECK-BE-NEXT:    adcs x0, x0, x8
 ; CHECK-BE-NEXT:    ret
 

diff  --git a/llvm/test/CodeGen/AArch64/addcarry-crash.ll b/llvm/test/CodeGen/AArch64/addcarry-crash.ll
index 91c7ee7292c63..e131ff2b13311 100644
--- a/llvm/test/CodeGen/AArch64/addcarry-crash.ll
+++ b/llvm/test/CodeGen/AArch64/addcarry-crash.ll
@@ -9,7 +9,9 @@ define i64 @foo(i64* nocapture readonly %ptr, i64 %a, i64 %b, i64 %c) local_unna
 ; CHECK-NEXT:    lsr x9, x1, #32
 ; CHECK-NEXT:    cmn x3, x2
 ; CHECK-NEXT:    mul x8, x8, x9
-; CHECK-NEXT:    cinc x0, x8, hs
+; CHECK-NEXT:    cset w9, hs
+; CHECK-NEXT:    cmp w9, #1
+; CHECK-NEXT:    adcs x0, x8, xzr
 ; CHECK-NEXT:    ret
 entry:
   %0 = lshr i64 %a, 32

diff  --git a/llvm/test/CodeGen/AArch64/arm64-atomic-128.ll b/llvm/test/CodeGen/AArch64/arm64-atomic-128.ll
index 2f8d06e133555..5785348779af2 100644
--- a/llvm/test/CodeGen/AArch64/arm64-atomic-128.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-atomic-128.ll
@@ -260,6 +260,8 @@ define void @fetch_and_add(i128* %p, i128 %bits) {
 ; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    ldaxp x9, x8, [x0]
 ; CHECK-NEXT:    adds x10, x9, x2
+; CHECK-NEXT:    cset w11, hs
+; CHECK-NEXT:    cmp w11, #1
 ; CHECK-NEXT:    adcs x11, x8, x3
 ; CHECK-NEXT:    stlxp w12, x10, x11, [x0]
 ; CHECK-NEXT:    cbnz w12, .LBB6_1
@@ -281,6 +283,8 @@ define void @fetch_and_sub(i128* %p, i128 %bits) {
 ; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    ldaxp x9, x8, [x0]
 ; CHECK-NEXT:    subs x10, x9, x2
+; CHECK-NEXT:    cset w11, lo
+; CHECK-NEXT:    cmp w11, #1
 ; CHECK-NEXT:    sbcs x11, x8, x3
 ; CHECK-NEXT:    stlxp w12, x10, x11, [x0]
 ; CHECK-NEXT:    cbnz w12, .LBB7_1

diff  --git a/llvm/test/CodeGen/AArch64/arm64-vabs.ll b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
index da48edc98d067..96bc43f5b801a 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vabs.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
@@ -1749,27 +1749,35 @@ define <2 x i128> @uabd_i64(<2 x i64> %a, <2 x i64> %b) {
 ; CHECK-LABEL: uabd_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmov x9, d0
-; CHECK-NEXT:    fmov x11, d1
+; CHECK-NEXT:    fmov x10, d1
 ; CHECK-NEXT:    mov.d x8, v0[1]
-; CHECK-NEXT:    mov.d x10, v1[1]
-; CHECK-NEXT:    asr x12, x9, #63
-; CHECK-NEXT:    asr x13, x11, #63
-; CHECK-NEXT:    subs x9, x9, x11
-; CHECK-NEXT:    sbcs x11, x12, x13
-; CHECK-NEXT:    asr x12, x8, #63
-; CHECK-NEXT:    asr x13, x10, #63
-; CHECK-NEXT:    subs x8, x8, x10
-; CHECK-NEXT:    sbcs x10, x12, x13
-; CHECK-NEXT:    negs x12, x8
-; CHECK-NEXT:    ngcs x13, x10
-; CHECK-NEXT:    cmp x10, #0
-; CHECK-NEXT:    csel x2, x12, x8, lt
-; CHECK-NEXT:    csel x3, x13, x10, lt
-; CHECK-NEXT:    negs x8, x9
-; CHECK-NEXT:    ngcs x10, x11
-; CHECK-NEXT:    cmp x11, #0
-; CHECK-NEXT:    csel x8, x8, x9, lt
-; CHECK-NEXT:    csel x1, x10, x11, lt
+; CHECK-NEXT:    mov.d x11, v1[1]
+; CHECK-NEXT:    subs x12, x9, x10
+; CHECK-NEXT:    asr x9, x9, #63
+; CHECK-NEXT:    cset w13, lo
+; CHECK-NEXT:    asr x10, x10, #63
+; CHECK-NEXT:    cmp w13, #1
+; CHECK-NEXT:    sbcs x9, x9, x10
+; CHECK-NEXT:    subs x10, x8, x11
+; CHECK-NEXT:    asr x8, x8, #63
+; CHECK-NEXT:    cset w13, lo
+; CHECK-NEXT:    asr x11, x11, #63
+; CHECK-NEXT:    cmp w13, #1
+; CHECK-NEXT:    sbcs x8, x8, x11
+; CHECK-NEXT:    asr x11, x8, #63
+; CHECK-NEXT:    eor x10, x10, x11
+; CHECK-NEXT:    eor x8, x8, x11
+; CHECK-NEXT:    subs x2, x10, x11
+; CHECK-NEXT:    asr x10, x9, #63
+; CHECK-NEXT:    cset w13, lo
+; CHECK-NEXT:    eor x12, x12, x10
+; CHECK-NEXT:    cmp w13, #1
+; CHECK-NEXT:    eor x9, x9, x10
+; CHECK-NEXT:    sbcs x3, x8, x11
+; CHECK-NEXT:    subs x8, x12, x10
+; CHECK-NEXT:    cset w11, lo
+; CHECK-NEXT:    cmp w11, #1
+; CHECK-NEXT:    sbcs x1, x9, x10
 ; CHECK-NEXT:    fmov d0, x8
 ; CHECK-NEXT:    mov.d v0[1], x1
 ; CHECK-NEXT:    fmov x0, d0

diff  --git a/llvm/test/CodeGen/AArch64/atomicrmw-O0.ll b/llvm/test/CodeGen/AArch64/atomicrmw-O0.ll
index 06be6209f023e..024d60a8d77d4 100644
--- a/llvm/test/CodeGen/AArch64/atomicrmw-O0.ll
+++ b/llvm/test/CodeGen/AArch64/atomicrmw-O0.ll
@@ -219,6 +219,8 @@ define i128 @test_rmw_add_128(i128* %dst)   {
 ; NOLSE-NEXT:    ldr x8, [sp, #32] // 8-byte Folded Reload
 ; NOLSE-NEXT:    ldr x13, [sp, #24] // 8-byte Folded Reload
 ; NOLSE-NEXT:    adds x14, x8, #1
+; NOLSE-NEXT:    cset w9, hs
+; NOLSE-NEXT:    subs w9, w9, #1
 ; NOLSE-NEXT:    mov x9, xzr
 ; NOLSE-NEXT:    adcs x15, x11, x9
 ; NOLSE-NEXT:  .LBB4_2: // %atomicrmw.start
@@ -272,6 +274,8 @@ define i128 @test_rmw_add_128(i128* %dst)   {
 ; LSE-NEXT:    ldr x8, [sp, #64] // 8-byte Folded Reload
 ; LSE-NEXT:    ldr x9, [sp, #56] // 8-byte Folded Reload
 ; LSE-NEXT:    adds x2, x8, #1
+; LSE-NEXT:    cset w11, hs
+; LSE-NEXT:    subs w11, w11, #1
 ; LSE-NEXT:    mov x11, xzr
 ; LSE-NEXT:    adcs x11, x10, x11
 ; LSE-NEXT:    // kill: def $x2 killed $x2 def $x2_x3

diff  --git a/llvm/test/CodeGen/AArch64/i128-math.ll b/llvm/test/CodeGen/AArch64/i128-math.ll
index 380105ae1618b..a0ade45dab3c9 100644
--- a/llvm/test/CodeGen/AArch64/i128-math.ll
+++ b/llvm/test/CodeGen/AArch64/i128-math.ll
@@ -23,6 +23,8 @@ define i128 @u128_add(i128 %x, i128 %y) {
 ; CHECK-LABEL: u128_add:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    adds x0, x0, x2
+; CHECK-NEXT:    cset w8, hs
+; CHECK-NEXT:    cmp w8, #1
 ; CHECK-NEXT:    adcs x1, x1, x3
 ; CHECK-NEXT:    ret
   %1 = add i128 %x, %y
@@ -32,16 +34,12 @@ define i128 @u128_add(i128 %x, i128 %y) {
 define { i128, i8 } @u128_checked_add(i128 %x, i128 %y) {
 ; CHECK-LABEL: u128_checked_add:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    adds x8, x0, x2
-; CHECK-NEXT:    adcs x9, x1, x3
-; CHECK-NEXT:    cmp x8, x0
-; CHECK-NEXT:    mov x0, x8
-; CHECK-NEXT:    cset w10, lo
-; CHECK-NEXT:    cmp x9, x1
-; CHECK-NEXT:    cset w11, lo
-; CHECK-NEXT:    mov x1, x9
-; CHECK-NEXT:    csel w10, w10, w11, eq
-; CHECK-NEXT:    eor w2, w10, #0x1
+; CHECK-NEXT:    adds x0, x0, x2
+; CHECK-NEXT:    cset w8, hs
+; CHECK-NEXT:    cmp w8, #1
+; CHECK-NEXT:    adcs x1, x1, x3
+; CHECK-NEXT:    cset w8, hs
+; CHECK-NEXT:    eor w2, w8, #0x1
 ; CHECK-NEXT:    ret
   %1 = tail call { i128, i1 } @llvm.uadd.with.overflow.i128(i128 %x, i128 %y)
   %2 = extractvalue { i128, i1 } %1, 0
@@ -56,15 +54,11 @@ define { i128, i8 } @u128_checked_add(i128 %x, i128 %y) {
 define { i128, i8 } @u128_overflowing_add(i128 %x, i128 %y) {
 ; CHECK-LABEL: u128_overflowing_add:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    adds x8, x0, x2
-; CHECK-NEXT:    adcs x9, x1, x3
-; CHECK-NEXT:    cmp x8, x0
-; CHECK-NEXT:    mov x0, x8
-; CHECK-NEXT:    cset w10, lo
-; CHECK-NEXT:    cmp x9, x1
-; CHECK-NEXT:    cset w11, lo
-; CHECK-NEXT:    mov x1, x9
-; CHECK-NEXT:    csel w2, w10, w11, eq
+; CHECK-NEXT:    adds x0, x0, x2
+; CHECK-NEXT:    cset w8, hs
+; CHECK-NEXT:    cmp w8, #1
+; CHECK-NEXT:    adcs x1, x1, x3
+; CHECK-NEXT:    cset w2, hs
 ; CHECK-NEXT:    ret
   %1 = tail call { i128, i1 } @llvm.uadd.with.overflow.i128(i128 %x, i128 %y)
   %2 = extractvalue { i128, i1 } %1, 0
@@ -79,12 +73,10 @@ define i128 @u128_saturating_add(i128 %x, i128 %y) {
 ; CHECK-LABEL: u128_saturating_add:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    adds x8, x0, x2
+; CHECK-NEXT:    cset w9, hs
+; CHECK-NEXT:    cmp w9, #1
 ; CHECK-NEXT:    adcs x9, x1, x3
-; CHECK-NEXT:    cmp x8, x0
-; CHECK-NEXT:    cset w10, lo
-; CHECK-NEXT:    cmp x9, x1
-; CHECK-NEXT:    cset w11, lo
-; CHECK-NEXT:    csel w10, w10, w11, eq
+; CHECK-NEXT:    cset w10, hs
 ; CHECK-NEXT:    cmp w10, #0
 ; CHECK-NEXT:    csinv x0, x8, xzr, eq
 ; CHECK-NEXT:    csinv x1, x9, xzr, eq
@@ -97,6 +89,8 @@ define i128 @u128_sub(i128 %x, i128 %y) {
 ; CHECK-LABEL: u128_sub:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    subs x0, x0, x2
+; CHECK-NEXT:    cset w8, lo
+; CHECK-NEXT:    cmp w8, #1
 ; CHECK-NEXT:    sbcs x1, x1, x3
 ; CHECK-NEXT:    ret
   %1 = sub i128 %x, %y
@@ -106,16 +100,12 @@ define i128 @u128_sub(i128 %x, i128 %y) {
 define { i128, i8 } @u128_checked_sub(i128 %x, i128 %y) {
 ; CHECK-LABEL: u128_checked_sub:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    subs x8, x0, x2
-; CHECK-NEXT:    sbcs x9, x1, x3
-; CHECK-NEXT:    cmp x8, x0
-; CHECK-NEXT:    mov x0, x8
-; CHECK-NEXT:    cset w10, hi
-; CHECK-NEXT:    cmp x9, x1
-; CHECK-NEXT:    cset w11, hi
-; CHECK-NEXT:    mov x1, x9
-; CHECK-NEXT:    csel w10, w10, w11, eq
-; CHECK-NEXT:    eor w2, w10, #0x1
+; CHECK-NEXT:    subs x0, x0, x2
+; CHECK-NEXT:    cset w8, lo
+; CHECK-NEXT:    cmp w8, #1
+; CHECK-NEXT:    sbcs x1, x1, x3
+; CHECK-NEXT:    cset w8, hs
+; CHECK-NEXT:    eor w2, w8, #0x1
 ; CHECK-NEXT:    ret
   %1 = tail call { i128, i1 } @llvm.usub.with.overflow.i128(i128 %x, i128 %y)
   %2 = extractvalue { i128, i1 } %1, 0
@@ -130,15 +120,11 @@ define { i128, i8 } @u128_checked_sub(i128 %x, i128 %y) {
 define { i128, i8 } @u128_overflowing_sub(i128 %x, i128 %y) {
 ; CHECK-LABEL: u128_overflowing_sub:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    subs x8, x0, x2
-; CHECK-NEXT:    sbcs x9, x1, x3
-; CHECK-NEXT:    cmp x8, x0
-; CHECK-NEXT:    mov x0, x8
-; CHECK-NEXT:    cset w10, hi
-; CHECK-NEXT:    cmp x9, x1
-; CHECK-NEXT:    cset w11, hi
-; CHECK-NEXT:    mov x1, x9
-; CHECK-NEXT:    csel w2, w10, w11, eq
+; CHECK-NEXT:    subs x0, x0, x2
+; CHECK-NEXT:    cset w8, lo
+; CHECK-NEXT:    cmp w8, #1
+; CHECK-NEXT:    sbcs x1, x1, x3
+; CHECK-NEXT:    cset w2, hs
 ; CHECK-NEXT:    ret
   %1 = tail call { i128, i1 } @llvm.usub.with.overflow.i128(i128 %x, i128 %y)
   %2 = extractvalue { i128, i1 } %1, 0
@@ -153,12 +139,10 @@ define i128 @u128_saturating_sub(i128 %x, i128 %y) {
 ; CHECK-LABEL: u128_saturating_sub:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    subs x8, x0, x2
+; CHECK-NEXT:    cset w9, lo
+; CHECK-NEXT:    cmp w9, #1
 ; CHECK-NEXT:    sbcs x9, x1, x3
-; CHECK-NEXT:    cmp x8, x0
-; CHECK-NEXT:    cset w10, hi
-; CHECK-NEXT:    cmp x9, x1
-; CHECK-NEXT:    cset w11, hi
-; CHECK-NEXT:    csel w10, w10, w11, eq
+; CHECK-NEXT:    cset w10, hs
 ; CHECK-NEXT:    cmp w10, #0
 ; CHECK-NEXT:    csel x0, xzr, x8, ne
 ; CHECK-NEXT:    csel x1, xzr, x9, ne
@@ -171,6 +155,8 @@ define i128 @i128_add(i128 %x, i128 %y) {
 ; CHECK-LABEL: i128_add:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    adds x0, x0, x2
+; CHECK-NEXT:    cset w8, hs
+; CHECK-NEXT:    cmp w8, #1
 ; CHECK-NEXT:    adcs x1, x1, x3
 ; CHECK-NEXT:    ret
   %1 = add i128 %x, %y
@@ -181,12 +167,11 @@ define { i128, i8 } @i128_checked_add(i128 %x, i128 %y) {
 ; CHECK-LABEL: i128_checked_add:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    adds x0, x0, x2
-; CHECK-NEXT:    eor x9, x1, x3
-; CHECK-NEXT:    adcs x8, x1, x3
-; CHECK-NEXT:    eor x10, x1, x8
-; CHECK-NEXT:    mov x1, x8
-; CHECK-NEXT:    bics xzr, x10, x9
-; CHECK-NEXT:    cset w2, ge
+; CHECK-NEXT:    cset w8, hs
+; CHECK-NEXT:    cmp w8, #1
+; CHECK-NEXT:    adcs x1, x1, x3
+; CHECK-NEXT:    cset w8, vs
+; CHECK-NEXT:    eor w2, w8, #0x1
 ; CHECK-NEXT:    ret
   %1 = tail call { i128, i1 } @llvm.sadd.with.overflow.i128(i128 %x, i128 %y)
   %2 = extractvalue { i128, i1 } %1, 0
@@ -202,12 +187,10 @@ define { i128, i8 } @i128_overflowing_add(i128 %x, i128 %y) {
 ; CHECK-LABEL: i128_overflowing_add:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    adds x0, x0, x2
-; CHECK-NEXT:    eor x9, x1, x3
-; CHECK-NEXT:    adcs x8, x1, x3
-; CHECK-NEXT:    eor x10, x1, x8
-; CHECK-NEXT:    mov x1, x8
-; CHECK-NEXT:    bics xzr, x10, x9
-; CHECK-NEXT:    cset w2, lt
+; CHECK-NEXT:    cset w8, hs
+; CHECK-NEXT:    cmp w8, #1
+; CHECK-NEXT:    adcs x1, x1, x3
+; CHECK-NEXT:    cset w2, vs
 ; CHECK-NEXT:    ret
   %1 = tail call { i128, i1 } @llvm.sadd.with.overflow.i128(i128 %x, i128 %y)
   %2 = extractvalue { i128, i1 } %1, 0
@@ -222,14 +205,15 @@ define i128 @i128_saturating_add(i128 %x, i128 %y) {
 ; CHECK-LABEL: i128_saturating_add:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    adds x8, x0, x2
-; CHECK-NEXT:    eor x11, x1, x3
+; CHECK-NEXT:    cset w9, hs
+; CHECK-NEXT:    cmp w9, #1
 ; CHECK-NEXT:    adcs x9, x1, x3
 ; CHECK-NEXT:    asr x10, x9, #63
-; CHECK-NEXT:    eor x12, x1, x9
-; CHECK-NEXT:    bics xzr, x12, x11
+; CHECK-NEXT:    cset w11, vs
+; CHECK-NEXT:    cmp w11, #0
 ; CHECK-NEXT:    eor x11, x10, #0x8000000000000000
-; CHECK-NEXT:    csel x0, x10, x8, lt
-; CHECK-NEXT:    csel x1, x11, x9, lt
+; CHECK-NEXT:    csel x0, x10, x8, ne
+; CHECK-NEXT:    csel x1, x11, x9, ne
 ; CHECK-NEXT:    ret
   %1 = tail call i128 @llvm.sadd.sat.i128(i128 %x, i128 %y)
   ret i128 %1
@@ -239,6 +223,8 @@ define i128 @i128_sub(i128 %x, i128 %y) {
 ; CHECK-LABEL: i128_sub:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    subs x0, x0, x2
+; CHECK-NEXT:    cset w8, lo
+; CHECK-NEXT:    cmp w8, #1
 ; CHECK-NEXT:    sbcs x1, x1, x3
 ; CHECK-NEXT:    ret
   %1 = sub i128 %x, %y
@@ -249,12 +235,11 @@ define { i128, i8 } @i128_checked_sub(i128 %x, i128 %y) {
 ; CHECK-LABEL: i128_checked_sub:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    subs x0, x0, x2
-; CHECK-NEXT:    eor x9, x1, x3
-; CHECK-NEXT:    sbcs x8, x1, x3
-; CHECK-NEXT:    eor x10, x1, x8
-; CHECK-NEXT:    mov x1, x8
-; CHECK-NEXT:    tst x9, x10
-; CHECK-NEXT:    cset w2, ge
+; CHECK-NEXT:    cset w8, lo
+; CHECK-NEXT:    cmp w8, #1
+; CHECK-NEXT:    sbcs x1, x1, x3
+; CHECK-NEXT:    cset w8, vs
+; CHECK-NEXT:    eor w2, w8, #0x1
 ; CHECK-NEXT:    ret
   %1 = tail call { i128, i1 } @llvm.ssub.with.overflow.i128(i128 %x, i128 %y)
   %2 = extractvalue { i128, i1 } %1, 0
@@ -270,12 +255,10 @@ define { i128, i8 } @i128_overflowing_sub(i128 %x, i128 %y) {
 ; CHECK-LABEL: i128_overflowing_sub:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    subs x0, x0, x2
-; CHECK-NEXT:    eor x9, x1, x3
-; CHECK-NEXT:    sbcs x8, x1, x3
-; CHECK-NEXT:    eor x10, x1, x8
-; CHECK-NEXT:    mov x1, x8
-; CHECK-NEXT:    tst x9, x10
-; CHECK-NEXT:    cset w2, lt
+; CHECK-NEXT:    cset w8, lo
+; CHECK-NEXT:    cmp w8, #1
+; CHECK-NEXT:    sbcs x1, x1, x3
+; CHECK-NEXT:    cset w2, vs
 ; CHECK-NEXT:    ret
   %1 = tail call { i128, i1 } @llvm.ssub.with.overflow.i128(i128 %x, i128 %y)
   %2 = extractvalue { i128, i1 } %1, 0
@@ -290,14 +273,15 @@ define i128 @i128_saturating_sub(i128 %x, i128 %y) {
 ; CHECK-LABEL: i128_saturating_sub:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    subs x8, x0, x2
-; CHECK-NEXT:    eor x11, x1, x3
+; CHECK-NEXT:    cset w9, lo
+; CHECK-NEXT:    cmp w9, #1
 ; CHECK-NEXT:    sbcs x9, x1, x3
 ; CHECK-NEXT:    asr x10, x9, #63
-; CHECK-NEXT:    eor x12, x1, x9
-; CHECK-NEXT:    tst x11, x12
+; CHECK-NEXT:    cset w11, vs
+; CHECK-NEXT:    cmp w11, #0
 ; CHECK-NEXT:    eor x11, x10, #0x8000000000000000
-; CHECK-NEXT:    csel x0, x10, x8, lt
-; CHECK-NEXT:    csel x1, x11, x9, lt
+; CHECK-NEXT:    csel x0, x10, x8, ne
+; CHECK-NEXT:    csel x1, x11, x9, ne
 ; CHECK-NEXT:    ret
   %1 = tail call i128 @llvm.ssub.sat.i128(i128 %x, i128 %y)
   ret i128 %1

diff  --git a/llvm/test/CodeGen/AArch64/icmp-shift-opt.ll b/llvm/test/CodeGen/AArch64/icmp-shift-opt.ll
index 368246dbb2426..3c3e9b1d3f313 100644
--- a/llvm/test/CodeGen/AArch64/icmp-shift-opt.ll
+++ b/llvm/test/CodeGen/AArch64/icmp-shift-opt.ll
@@ -11,6 +11,8 @@ define i128 @opt_setcc_lt_power_of_2(i128 %a) nounwind {
 ; CHECK-NEXT:  .LBB0_1: // %loop
 ; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    adds x0, x0, #1
+; CHECK-NEXT:    cset w8, hs
+; CHECK-NEXT:    cmp w8, #1
 ; CHECK-NEXT:    adcs x1, x1, xzr
 ; CHECK-NEXT:    orr x8, x1, x0, lsr #60
 ; CHECK-NEXT:    cbnz x8, .LBB0_1

diff  --git a/llvm/test/CodeGen/AArch64/neg-abs.ll b/llvm/test/CodeGen/AArch64/neg-abs.ll
index 7f691c9b694c4..3a09972ae6506 100644
--- a/llvm/test/CodeGen/AArch64/neg-abs.ll
+++ b/llvm/test/CodeGen/AArch64/neg-abs.ll
@@ -52,6 +52,8 @@ define i128 @neg_abs128(i128 %x) {
 ; CHECK-NEXT:    eor x9, x0, x8
 ; CHECK-NEXT:    eor x10, x1, x8
 ; CHECK-NEXT:    subs x0, x8, x9
+; CHECK-NEXT:    cset w9, lo
+; CHECK-NEXT:    cmp w9, #1
 ; CHECK-NEXT:    sbcs x1, x8, x10
 ; CHECK-NEXT:    ret
   %abs = tail call i128 @llvm.abs.i128(i128 %x, i1 true)
@@ -95,11 +97,13 @@ define i16 @abs16(i16 %x) {
 define i128 @abs128(i128 %x) {
 ; CHECK-LABEL: abs128:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    negs x8, x0
-; CHECK-NEXT:    ngcs x9, x1
-; CHECK-NEXT:    cmp x1, #0
-; CHECK-NEXT:    csel x0, x8, x0, lt
-; CHECK-NEXT:    csel x1, x9, x1, lt
+; CHECK-NEXT:    asr x8, x1, #63
+; CHECK-NEXT:    eor x9, x0, x8
+; CHECK-NEXT:    eor x10, x1, x8
+; CHECK-NEXT:    subs x0, x9, x8
+; CHECK-NEXT:    cset w9, lo
+; CHECK-NEXT:    cmp w9, #1
+; CHECK-NEXT:    sbcs x1, x10, x8
 ; CHECK-NEXT:    ret
   %abs = tail call i128 @llvm.abs.i128(i128 %x, i1 true)
   ret i128 %abs

diff  --git a/llvm/test/CodeGen/AArch64/neon-abd.ll b/llvm/test/CodeGen/AArch64/neon-abd.ll
index 0279c832391ed..117fca8ddbf02 100644
--- a/llvm/test/CodeGen/AArch64/neon-abd.ll
+++ b/llvm/test/CodeGen/AArch64/neon-abd.ll
@@ -147,21 +147,27 @@ define <2 x i64> @sabd_2d(<2 x i64> %a, <2 x i64> %b) #0 {
 ; CHECK-NEXT:    mov x8, v0.d[1]
 ; CHECK-NEXT:    fmov x10, d0
 ; CHECK-NEXT:    mov x9, v1.d[1]
-; CHECK-NEXT:    asr x11, x10, #63
-; CHECK-NEXT:    asr x12, x8, #63
-; CHECK-NEXT:    asr x13, x9, #63
-; CHECK-NEXT:    subs x8, x8, x9
-; CHECK-NEXT:    fmov x9, d1
-; CHECK-NEXT:    sbcs x12, x12, x13
-; CHECK-NEXT:    asr x13, x9, #63
-; CHECK-NEXT:    subs x9, x10, x9
-; CHECK-NEXT:    sbcs x10, x11, x13
-; CHECK-NEXT:    cmp x10, #0
-; CHECK-NEXT:    cneg x9, x9, lt
-; CHECK-NEXT:    cmp x12, #0
-; CHECK-NEXT:    cneg x8, x8, lt
-; CHECK-NEXT:    fmov d0, x9
+; CHECK-NEXT:    subs x11, x8, x9
+; CHECK-NEXT:    asr x8, x8, #63
+; CHECK-NEXT:    cset w12, lo
+; CHECK-NEXT:    asr x9, x9, #63
+; CHECK-NEXT:    cmp w12, #1
+; CHECK-NEXT:    fmov x12, d1
+; CHECK-NEXT:    sbcs x8, x8, x9
+; CHECK-NEXT:    asr x8, x8, #63
+; CHECK-NEXT:    subs x9, x10, x12
+; CHECK-NEXT:    asr x10, x10, #63
+; CHECK-NEXT:    cset w13, lo
+; CHECK-NEXT:    asr x12, x12, #63
+; CHECK-NEXT:    cmp w13, #1
+; CHECK-NEXT:    eor x11, x11, x8
+; CHECK-NEXT:    sbcs x10, x10, x12
+; CHECK-NEXT:    sub x8, x11, x8
+; CHECK-NEXT:    asr x10, x10, #63
+; CHECK-NEXT:    eor x9, x9, x10
+; CHECK-NEXT:    sub x9, x9, x10
 ; CHECK-NEXT:    fmov d1, x8
+; CHECK-NEXT:    fmov d0, x9
 ; CHECK-NEXT:    mov v0.d[1], v1.d[0]
 ; CHECK-NEXT:    ret
   %a.sext = sext <2 x i64> %a to <2 x i128>
@@ -326,16 +332,22 @@ define <2 x i64> @uabd_2d(<2 x i64> %a, <2 x i64> %b) #0 {
 ; CHECK-NEXT:    fmov x10, d0
 ; CHECK-NEXT:    mov x9, v1.d[1]
 ; CHECK-NEXT:    subs x8, x8, x9
+; CHECK-NEXT:    cset w9, lo
+; CHECK-NEXT:    cmp w9, #1
 ; CHECK-NEXT:    fmov x9, d1
 ; CHECK-NEXT:    ngcs x11, xzr
+; CHECK-NEXT:    asr x11, x11, #63
 ; CHECK-NEXT:    subs x9, x10, x9
+; CHECK-NEXT:    eor x8, x8, x11
+; CHECK-NEXT:    cset w10, lo
+; CHECK-NEXT:    sub x8, x8, x11
+; CHECK-NEXT:    cmp w10, #1
 ; CHECK-NEXT:    ngcs x10, xzr
-; CHECK-NEXT:    cmp x10, #0
-; CHECK-NEXT:    cneg x9, x9, lt
-; CHECK-NEXT:    cmp x11, #0
-; CHECK-NEXT:    cneg x8, x8, lt
-; CHECK-NEXT:    fmov d0, x9
+; CHECK-NEXT:    asr x10, x10, #63
 ; CHECK-NEXT:    fmov d1, x8
+; CHECK-NEXT:    eor x9, x9, x10
+; CHECK-NEXT:    sub x9, x9, x10
+; CHECK-NEXT:    fmov d0, x9
 ; CHECK-NEXT:    mov v0.d[1], v1.d[0]
 ; CHECK-NEXT:    ret
   %a.zext = zext <2 x i64> %a to <2 x i128>

diff  --git a/llvm/test/CodeGen/AArch64/nzcv-save.ll b/llvm/test/CodeGen/AArch64/nzcv-save.ll
index 9f7b3d3fa42c1..5f320bf484985 100644
--- a/llvm/test/CodeGen/AArch64/nzcv-save.ll
+++ b/llvm/test/CodeGen/AArch64/nzcv-save.ll
@@ -6,22 +6,27 @@
 define void @f(i256* nocapture %a, i256* nocapture %b, i256* nocapture %cc, i256* nocapture %dd) nounwind uwtable noinline ssp {
 ; CHECK-LABEL: f:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ldp x9, x8, [x2]
-; CHECK-NEXT:    ldp x11, x10, [x3]
-; CHECK-NEXT:    adds x9, x9, x11
-; CHECK-NEXT:    ldp x12, x11, [x2, #16]
-; CHECK-NEXT:    adcs x8, x8, x10
-; CHECK-NEXT:    ldp x13, x10, [x3, #16]
-; CHECK-NEXT:    adcs x12, x12, x13
-; CHECK-NEXT:    mrs x13, NZCV
-; CHECK-NEXT:    adcs x14, x11, x10
-; CHECK-NEXT:    orr x11, x11, #0x100
-; CHECK-NEXT:    msr NZCV, x13
-; CHECK-NEXT:    stp x9, x8, [x0]
-; CHECK-NEXT:    adcs x10, x11, x10
-; CHECK-NEXT:    stp x12, x14, [x0, #16]
-; CHECK-NEXT:    stp x9, x8, [x1]
-; CHECK-NEXT:    stp x12, x10, [x1, #16]
+; CHECK-NEXT:    ldp x8, x10, [x2]
+; CHECK-NEXT:    ldp x9, x11, [x3]
+; CHECK-NEXT:    ldp x12, x13, [x2, #16]
+; CHECK-NEXT:    adds x8, x8, x9
+; CHECK-NEXT:    cset w9, hs
+; CHECK-NEXT:    cmp w9, #1
+; CHECK-NEXT:    adcs x10, x10, x11
+; CHECK-NEXT:    orr x14, x13, #0x100
+; CHECK-NEXT:    cset w11, hs
+; CHECK-NEXT:    cmp w11, #1
+; CHECK-NEXT:    ldp x9, x11, [x3, #16]
+; CHECK-NEXT:    stp x8, x10, [x0]
+; CHECK-NEXT:    adcs x9, x12, x9
+; CHECK-NEXT:    cset w12, hs
+; CHECK-NEXT:    cmp w12, #1
+; CHECK-NEXT:    adcs x13, x13, x11
+; CHECK-NEXT:    cmp w12, #1
+; CHECK-NEXT:    adcs x11, x14, x11
+; CHECK-NEXT:    stp x9, x13, [x0, #16]
+; CHECK-NEXT:    stp x8, x10, [x1]
+; CHECK-NEXT:    stp x9, x11, [x1, #16]
 ; CHECK-NEXT:    ret
 entry:
   %c = load i256, i256* %cc

diff  --git a/llvm/test/CodeGen/AArch64/sadd_sat_vec.ll b/llvm/test/CodeGen/AArch64/sadd_sat_vec.ll
index 917f146890266..04fb33e3ce5de 100644
--- a/llvm/test/CodeGen/AArch64/sadd_sat_vec.ll
+++ b/llvm/test/CodeGen/AArch64/sadd_sat_vec.ll
@@ -351,23 +351,25 @@ define <2 x i128> @v2i128(<2 x i128> %x, <2 x i128> %y) nounwind {
 ; CHECK-LABEL: v2i128:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    adds x8, x2, x6
-; CHECK-NEXT:    eor x10, x3, x7
+; CHECK-NEXT:    cset w9, hs
+; CHECK-NEXT:    cmp w9, #1
 ; CHECK-NEXT:    adcs x9, x3, x7
-; CHECK-NEXT:    eor x11, x3, x9
-; CHECK-NEXT:    asr x12, x9, #63
-; CHECK-NEXT:    bics xzr, x11, x10
-; CHECK-NEXT:    eor x10, x1, x5
-; CHECK-NEXT:    csel x2, x12, x8, lt
-; CHECK-NEXT:    eor x8, x12, #0x8000000000000000
-; CHECK-NEXT:    csel x3, x8, x9, lt
+; CHECK-NEXT:    cset w10, vs
+; CHECK-NEXT:    asr x11, x9, #63
+; CHECK-NEXT:    cmp w10, #0
+; CHECK-NEXT:    csel x2, x11, x8, ne
+; CHECK-NEXT:    eor x8, x11, #0x8000000000000000
+; CHECK-NEXT:    csel x3, x8, x9, ne
 ; CHECK-NEXT:    adds x8, x0, x4
+; CHECK-NEXT:    cset w9, hs
+; CHECK-NEXT:    cmp w9, #1
 ; CHECK-NEXT:    adcs x9, x1, x5
-; CHECK-NEXT:    eor x11, x1, x9
-; CHECK-NEXT:    asr x12, x9, #63
-; CHECK-NEXT:    bics xzr, x11, x10
-; CHECK-NEXT:    eor x10, x12, #0x8000000000000000
-; CHECK-NEXT:    csel x8, x12, x8, lt
-; CHECK-NEXT:    csel x1, x10, x9, lt
+; CHECK-NEXT:    cset w10, vs
+; CHECK-NEXT:    asr x11, x9, #63
+; CHECK-NEXT:    cmp w10, #0
+; CHECK-NEXT:    eor x10, x11, #0x8000000000000000
+; CHECK-NEXT:    csel x8, x11, x8, ne
+; CHECK-NEXT:    csel x1, x10, x9, ne
 ; CHECK-NEXT:    fmov d0, x8
 ; CHECK-NEXT:    mov v0.d[1], x1
 ; CHECK-NEXT:    fmov x0, d0

diff  --git a/llvm/test/CodeGen/AArch64/ssub_sat_vec.ll b/llvm/test/CodeGen/AArch64/ssub_sat_vec.ll
index 74c7a55c8be73..d40109f77fa96 100644
--- a/llvm/test/CodeGen/AArch64/ssub_sat_vec.ll
+++ b/llvm/test/CodeGen/AArch64/ssub_sat_vec.ll
@@ -354,23 +354,25 @@ define <2 x i128> @v2i128(<2 x i128> %x, <2 x i128> %y) nounwind {
 ; CHECK-LABEL: v2i128:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    subs x8, x2, x6
-; CHECK-NEXT:    eor x10, x3, x7
+; CHECK-NEXT:    cset w9, lo
+; CHECK-NEXT:    cmp w9, #1
 ; CHECK-NEXT:    sbcs x9, x3, x7
-; CHECK-NEXT:    eor x11, x3, x9
-; CHECK-NEXT:    asr x12, x9, #63
-; CHECK-NEXT:    tst x10, x11
-; CHECK-NEXT:    eor x10, x1, x5
-; CHECK-NEXT:    csel x2, x12, x8, lt
-; CHECK-NEXT:    eor x8, x12, #0x8000000000000000
-; CHECK-NEXT:    csel x3, x8, x9, lt
+; CHECK-NEXT:    cset w10, vs
+; CHECK-NEXT:    asr x11, x9, #63
+; CHECK-NEXT:    cmp w10, #0
+; CHECK-NEXT:    csel x2, x11, x8, ne
+; CHECK-NEXT:    eor x8, x11, #0x8000000000000000
+; CHECK-NEXT:    csel x3, x8, x9, ne
 ; CHECK-NEXT:    subs x8, x0, x4
+; CHECK-NEXT:    cset w9, lo
+; CHECK-NEXT:    cmp w9, #1
 ; CHECK-NEXT:    sbcs x9, x1, x5
-; CHECK-NEXT:    eor x11, x1, x9
-; CHECK-NEXT:    asr x12, x9, #63
-; CHECK-NEXT:    tst x10, x11
-; CHECK-NEXT:    eor x10, x12, #0x8000000000000000
-; CHECK-NEXT:    csel x8, x12, x8, lt
-; CHECK-NEXT:    csel x1, x10, x9, lt
+; CHECK-NEXT:    cset w10, vs
+; CHECK-NEXT:    asr x11, x9, #63
+; CHECK-NEXT:    cmp w10, #0
+; CHECK-NEXT:    eor x10, x11, #0x8000000000000000
+; CHECK-NEXT:    csel x8, x11, x8, ne
+; CHECK-NEXT:    csel x1, x10, x9, ne
 ; CHECK-NEXT:    fmov d0, x8
 ; CHECK-NEXT:    mov v0.d[1], x1
 ; CHECK-NEXT:    fmov x0, d0

diff  --git a/llvm/test/CodeGen/AArch64/uadd_sat_vec.ll b/llvm/test/CodeGen/AArch64/uadd_sat_vec.ll
index 46dd3db9e97fe..924cc4bee0d3d 100644
--- a/llvm/test/CodeGen/AArch64/uadd_sat_vec.ll
+++ b/llvm/test/CodeGen/AArch64/uadd_sat_vec.ll
@@ -349,22 +349,18 @@ define <2 x i128> @v2i128(<2 x i128> %x, <2 x i128> %y) nounwind {
 ; CHECK-LABEL: v2i128:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    adds x8, x2, x6
+; CHECK-NEXT:    cset w9, hs
+; CHECK-NEXT:    cmp w9, #1
 ; CHECK-NEXT:    adcs x9, x3, x7
-; CHECK-NEXT:    cmp x8, x2
-; CHECK-NEXT:    cset w10, lo
-; CHECK-NEXT:    cmp x9, x3
-; CHECK-NEXT:    cset w11, lo
-; CHECK-NEXT:    csel w10, w10, w11, eq
+; CHECK-NEXT:    cset w10, hs
 ; CHECK-NEXT:    cmp w10, #0
-; CHECK-NEXT:    csinv x3, x9, xzr, eq
 ; CHECK-NEXT:    csinv x2, x8, xzr, eq
+; CHECK-NEXT:    csinv x3, x9, xzr, eq
 ; CHECK-NEXT:    adds x8, x0, x4
+; CHECK-NEXT:    cset w9, hs
+; CHECK-NEXT:    cmp w9, #1
 ; CHECK-NEXT:    adcs x9, x1, x5
-; CHECK-NEXT:    cmp x8, x0
-; CHECK-NEXT:    cset w10, lo
-; CHECK-NEXT:    cmp x9, x1
-; CHECK-NEXT:    cset w11, lo
-; CHECK-NEXT:    csel w10, w10, w11, eq
+; CHECK-NEXT:    cset w10, hs
 ; CHECK-NEXT:    cmp w10, #0
 ; CHECK-NEXT:    csinv x8, x8, xzr, eq
 ; CHECK-NEXT:    csinv x1, x9, xzr, eq

diff  --git a/llvm/test/CodeGen/AArch64/usub_sat_vec.ll b/llvm/test/CodeGen/AArch64/usub_sat_vec.ll
index 666e5a6134330..ed1a138ed7a5b 100644
--- a/llvm/test/CodeGen/AArch64/usub_sat_vec.ll
+++ b/llvm/test/CodeGen/AArch64/usub_sat_vec.ll
@@ -345,22 +345,18 @@ define <2 x i128> @v2i128(<2 x i128> %x, <2 x i128> %y) nounwind {
 ; CHECK-LABEL: v2i128:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    subs x8, x2, x6
+; CHECK-NEXT:    cset w9, lo
+; CHECK-NEXT:    cmp w9, #1
 ; CHECK-NEXT:    sbcs x9, x3, x7
-; CHECK-NEXT:    cmp x8, x2
-; CHECK-NEXT:    cset w10, hi
-; CHECK-NEXT:    cmp x9, x3
-; CHECK-NEXT:    cset w11, hi
-; CHECK-NEXT:    csel w10, w10, w11, eq
+; CHECK-NEXT:    cset w10, hs
 ; CHECK-NEXT:    cmp w10, #0
-; CHECK-NEXT:    csel x3, xzr, x9, ne
 ; CHECK-NEXT:    csel x2, xzr, x8, ne
+; CHECK-NEXT:    csel x3, xzr, x9, ne
 ; CHECK-NEXT:    subs x8, x0, x4
+; CHECK-NEXT:    cset w9, lo
+; CHECK-NEXT:    cmp w9, #1
 ; CHECK-NEXT:    sbcs x9, x1, x5
-; CHECK-NEXT:    cmp x8, x0
-; CHECK-NEXT:    cset w10, hi
-; CHECK-NEXT:    cmp x9, x1
-; CHECK-NEXT:    cset w11, hi
-; CHECK-NEXT:    csel w10, w10, w11, eq
+; CHECK-NEXT:    cset w10, hs
 ; CHECK-NEXT:    cmp w10, #0
 ; CHECK-NEXT:    csel x8, xzr, x8, ne
 ; CHECK-NEXT:    csel x1, xzr, x9, ne

diff  --git a/llvm/test/CodeGen/AArch64/vec_uaddo.ll b/llvm/test/CodeGen/AArch64/vec_uaddo.ll
index 516f0297b462e..635c0a36feed7 100644
--- a/llvm/test/CodeGen/AArch64/vec_uaddo.ll
+++ b/llvm/test/CodeGen/AArch64/vec_uaddo.ll
@@ -277,19 +277,15 @@ define <2 x i32> @uaddo_v2i128(<2 x i128> %a0, <2 x i128> %a1, <2 x i128>* %p2)
 ; CHECK-LABEL: uaddo_v2i128:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    adds x8, x2, x6
+; CHECK-NEXT:    cset w9, hs
+; CHECK-NEXT:    cmp w9, #1
 ; CHECK-NEXT:    adcs x9, x3, x7
-; CHECK-NEXT:    cmp x8, x2
-; CHECK-NEXT:    cset w10, lo
-; CHECK-NEXT:    cmp x9, x3
-; CHECK-NEXT:    cset w11, lo
-; CHECK-NEXT:    csel w10, w10, w11, eq
+; CHECK-NEXT:    cset w10, hs
 ; CHECK-NEXT:    adds x11, x0, x4
+; CHECK-NEXT:    cset w12, hs
+; CHECK-NEXT:    cmp w12, #1
 ; CHECK-NEXT:    adcs x12, x1, x5
-; CHECK-NEXT:    cmp x11, x0
-; CHECK-NEXT:    cset w13, lo
-; CHECK-NEXT:    cmp x12, x1
-; CHECK-NEXT:    cset w14, lo
-; CHECK-NEXT:    csel w13, w13, w14, eq
+; CHECK-NEXT:    cset w13, hs
 ; CHECK-NEXT:    fmov s0, w13
 ; CHECK-NEXT:    mov v0.s[1], w10
 ; CHECK-NEXT:    ldr x10, [sp]

diff  --git a/llvm/test/CodeGen/AArch64/vecreduce-add-legalization.ll b/llvm/test/CodeGen/AArch64/vecreduce-add-legalization.ll
index 36ec218e4e20c..df18e72a5f16d 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-add-legalization.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-add-legalization.ll
@@ -149,6 +149,8 @@ define i128 @test_v2i128(<2 x i128> %a) nounwind {
 ; CHECK-LABEL: test_v2i128:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    adds x0, x0, x2
+; CHECK-NEXT:    cset w8, hs
+; CHECK-NEXT:    cmp w8, #1
 ; CHECK-NEXT:    adcs x1, x1, x3
 ; CHECK-NEXT:    ret
   %b = call i128 @llvm.vector.reduce.add.v2i128(<2 x i128> %a)


        


More information about the llvm-commits mailing list