[llvm] ef774be - [AArch64] Support SETCCCARRY lowering

Filipp Zhinkin via llvm-commits llvm-commits at lists.llvm.org
Fri Oct 14 12:29:43 PDT 2022


Author: Filipp Zhinkin
Date: 2022-10-14T22:29:31+03:00
New Revision: ef774bec63b72a0e900b3ddf22da81efbecd99a9

URL: https://github.com/llvm/llvm-project/commit/ef774bec63b72a0e900b3ddf22da81efbecd99a9
DIFF: https://github.com/llvm/llvm-project/commit/ef774bec63b72a0e900b3ddf22da81efbecd99a9.diff

LOG: [AArch64] Support SETCCCARRY lowering

Support SETCCCARRY lowering to SBCS instruction.

Related issue: https://github.com/llvm/llvm-project/issues/44629

Reviewed By: efriedma

Differential Revision: https://reviews.llvm.org/D135302

Added: 
    

Modified: 
    llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/lib/Target/AArch64/AArch64ISelLowering.h
    llvm/test/CodeGen/AArch64/arm64-atomic-128.ll
    llvm/test/CodeGen/AArch64/fpclamptosat.ll
    llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll
    llvm/test/CodeGen/AArch64/i128-cmp.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index e62f57c536b3..feb6c74807d9 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -1037,12 +1037,14 @@ void SelectionDAGLegalize::LegalizeOp(SDNode *Node) {
   case ISD::STRICT_FSETCC:
   case ISD::STRICT_FSETCCS:
   case ISD::SETCC:
+  case ISD::SETCCCARRY:
   case ISD::VP_SETCC:
   case ISD::BR_CC: {
     unsigned Opc = Node->getOpcode();
     unsigned CCOperand = Opc == ISD::SELECT_CC                         ? 4
                          : Opc == ISD::STRICT_FSETCC                   ? 3
                          : Opc == ISD::STRICT_FSETCCS                  ? 3
+                         : Opc == ISD::SETCCCARRY                      ? 3
                          : (Opc == ISD::SETCC || Opc == ISD::VP_SETCC) ? 2
                                                                        : 1;
     unsigned CompareOperand = Opc == ISD::BR_CC            ? 2

diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 7e0fb016c340..1940107724dc 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -414,6 +414,7 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
   setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
   setOperationAction(ISD::BR_JT, MVT::Other, Custom);
   setOperationAction(ISD::JumpTable, MVT::i64, Custom);
+  setOperationAction(ISD::SETCCCARRY, MVT::i64, Custom);
 
   setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
   setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
@@ -5545,6 +5546,8 @@ SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
   case ISD::STRICT_FSETCC:
   case ISD::STRICT_FSETCCS:
     return LowerSETCC(Op, DAG);
+  case ISD::SETCCCARRY:
+    return LowerSETCCCARRY(Op, DAG);
   case ISD::BRCOND:
     return LowerBRCOND(Op, DAG);
   case ISD::BR_CC:
@@ -8590,6 +8593,36 @@ SDValue AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
   return IsStrict ? DAG.getMergeValues({Res, Cmp.getValue(1)}, dl) : Res;
 }
 
+SDValue AArch64TargetLowering::LowerSETCCCARRY(SDValue Op,
+                                               SelectionDAG &DAG) const {
+
+  SDValue LHS = Op.getOperand(0);
+  SDValue RHS = Op.getOperand(1);
+  EVT VT = LHS.getValueType();
+  if (VT != MVT::i32 && VT != MVT::i64)
+    return SDValue();
+
+  SDLoc DL(Op);
+  SDValue Carry = Op.getOperand(2);
+  // SBCS uses a carry not a borrow so the carry flag should be inverted first.
+  SDValue InvCarry = valueToCarryFlag(Carry, DAG, true);
+  SDValue Cmp = DAG.getNode(AArch64ISD::SBCS, DL, DAG.getVTList(VT, MVT::Glue),
+                            LHS, RHS, InvCarry);
+
+  EVT OpVT = Op.getValueType();
+  SDValue TVal = DAG.getConstant(1, DL, OpVT);
+  SDValue FVal = DAG.getConstant(0, DL, OpVT);
+
+  ISD::CondCode Cond = cast<CondCodeSDNode>(Op.getOperand(3))->get();
+  ISD::CondCode CondInv = ISD::getSetCCInverse(Cond, VT);
+  SDValue CCVal =
+      DAG.getConstant(changeIntCCToAArch64CC(CondInv), DL, MVT::i32);
+  // Inputs are swapped because the condition is inverted. This will allow
+  // matching with a single CSINC instruction.
+  return DAG.getNode(AArch64ISD::CSEL, DL, OpVT, FVal, TVal, CCVal,
+                     Cmp.getValue(1));
+}
+
 SDValue AArch64TargetLowering::LowerSELECT_CC(ISD::CondCode CC, SDValue LHS,
                                               SDValue RHS, SDValue TVal,
                                               SDValue FVal, const SDLoc &dl,

diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index 33b2bf141320..8be30cbe718c 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -986,6 +986,7 @@ class AArch64TargetLowering : public TargetLowering {
                                  SelectionDAG &DAG) const;
   SDValue LowerWindowsGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
+  SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;

diff  --git a/llvm/test/CodeGen/AArch64/arm64-atomic-128.ll b/llvm/test/CodeGen/AArch64/arm64-atomic-128.ll
index b00dad42d455..65106b92858d 100644
--- a/llvm/test/CodeGen/AArch64/arm64-atomic-128.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-atomic-128.ll
@@ -301,14 +301,10 @@ define void @fetch_and_min(i128* %p, i128 %bits) {
 ; CHECK-NEXT:  .LBB8_1: // %atomicrmw.start
 ; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    ldaxp x9, x8, [x0]
-; CHECK-NEXT:    cmp x9, x2
-; CHECK-NEXT:    cset w10, ls
-; CHECK-NEXT:    cmp x8, x3
-; CHECK-NEXT:    cset w11, le
-; CHECK-NEXT:    csel w10, w10, w11, eq
-; CHECK-NEXT:    cmp w10, #0
-; CHECK-NEXT:    csel x10, x8, x3, ne
-; CHECK-NEXT:    csel x11, x9, x2, ne
+; CHECK-NEXT:    cmp x2, x9
+; CHECK-NEXT:    sbcs xzr, x3, x8
+; CHECK-NEXT:    csel x10, x8, x3, ge
+; CHECK-NEXT:    csel x11, x9, x2, ge
 ; CHECK-NEXT:    stlxp w12, x11, x10, [x0]
 ; CHECK-NEXT:    cbnz w12, .LBB8_1
 ; CHECK-NEXT:  // %bb.2: // %atomicrmw.end
@@ -328,14 +324,10 @@ define void @fetch_and_max(i128* %p, i128 %bits) {
 ; CHECK-NEXT:  .LBB9_1: // %atomicrmw.start
 ; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    ldaxp x9, x8, [x0]
-; CHECK-NEXT:    cmp x9, x2
-; CHECK-NEXT:    cset w10, hi
-; CHECK-NEXT:    cmp x8, x3
-; CHECK-NEXT:    cset w11, gt
-; CHECK-NEXT:    csel w10, w10, w11, eq
-; CHECK-NEXT:    cmp w10, #0
-; CHECK-NEXT:    csel x10, x8, x3, ne
-; CHECK-NEXT:    csel x11, x9, x2, ne
+; CHECK-NEXT:    cmp x2, x9
+; CHECK-NEXT:    sbcs xzr, x3, x8
+; CHECK-NEXT:    csel x10, x8, x3, lt
+; CHECK-NEXT:    csel x11, x9, x2, lt
 ; CHECK-NEXT:    stlxp w12, x11, x10, [x0]
 ; CHECK-NEXT:    cbnz w12, .LBB9_1
 ; CHECK-NEXT:  // %bb.2: // %atomicrmw.end
@@ -355,14 +347,10 @@ define void @fetch_and_umin(i128* %p, i128 %bits) {
 ; CHECK-NEXT:  .LBB10_1: // %atomicrmw.start
 ; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    ldaxp x9, x8, [x0]
-; CHECK-NEXT:    cmp x9, x2
-; CHECK-NEXT:    cset w10, ls
-; CHECK-NEXT:    cmp x8, x3
-; CHECK-NEXT:    cset w11, ls
-; CHECK-NEXT:    csel w10, w10, w11, eq
-; CHECK-NEXT:    cmp w10, #0
-; CHECK-NEXT:    csel x10, x8, x3, ne
-; CHECK-NEXT:    csel x11, x9, x2, ne
+; CHECK-NEXT:    cmp x2, x9
+; CHECK-NEXT:    sbcs xzr, x3, x8
+; CHECK-NEXT:    csel x10, x8, x3, hs
+; CHECK-NEXT:    csel x11, x9, x2, hs
 ; CHECK-NEXT:    stlxp w12, x11, x10, [x0]
 ; CHECK-NEXT:    cbnz w12, .LBB10_1
 ; CHECK-NEXT:  // %bb.2: // %atomicrmw.end
@@ -382,14 +370,10 @@ define void @fetch_and_umax(i128* %p, i128 %bits) {
 ; CHECK-NEXT:  .LBB11_1: // %atomicrmw.start
 ; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    ldaxp x9, x8, [x0]
-; CHECK-NEXT:    cmp x9, x2
-; CHECK-NEXT:    cset w10, hi
-; CHECK-NEXT:    cmp x8, x3
-; CHECK-NEXT:    cset w11, hi
-; CHECK-NEXT:    csel w10, w10, w11, eq
-; CHECK-NEXT:    cmp w10, #0
-; CHECK-NEXT:    csel x10, x8, x3, ne
-; CHECK-NEXT:    csel x11, x9, x2, ne
+; CHECK-NEXT:    cmp x2, x9
+; CHECK-NEXT:    sbcs xzr, x3, x8
+; CHECK-NEXT:    csel x10, x8, x3, lo
+; CHECK-NEXT:    csel x11, x9, x2, lo
 ; CHECK-NEXT:    stlxp w12, x11, x10, [x0]
 ; CHECK-NEXT:    cbnz w12, .LBB11_1
 ; CHECK-NEXT:  // %bb.2: // %atomicrmw.end

diff  --git a/llvm/test/CodeGen/AArch64/fpclamptosat.ll b/llvm/test/CodeGen/AArch64/fpclamptosat.ll
index 829b4ddbcc5c..d8c589022afa 100644
--- a/llvm/test/CodeGen/AArch64/fpclamptosat.ll
+++ b/llvm/test/CodeGen/AArch64/fpclamptosat.ll
@@ -398,13 +398,9 @@ define i64 @ustest_f64i64(double %x) {
 ; CHECK-NEXT:    cmp x1, #1
 ; CHECK-NEXT:    csel x8, x0, xzr, lt
 ; CHECK-NEXT:    csinc x9, x1, xzr, lt
-; CHECK-NEXT:    cmp x8, #0
-; CHECK-NEXT:    cset w10, ne
-; CHECK-NEXT:    cmp x9, #0
-; CHECK-NEXT:    cset w9, gt
-; CHECK-NEXT:    csel w9, w10, w9, eq
-; CHECK-NEXT:    cmp w9, #0
-; CHECK-NEXT:    csel x0, x8, xzr, ne
+; CHECK-NEXT:    cmp xzr, x8
+; CHECK-NEXT:    ngcs xzr, x9
+; CHECK-NEXT:    csel x0, x8, xzr, lt
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
@@ -461,13 +457,9 @@ define i64 @ustest_f32i64(float %x) {
 ; CHECK-NEXT:    cmp x1, #1
 ; CHECK-NEXT:    csel x8, x0, xzr, lt
 ; CHECK-NEXT:    csinc x9, x1, xzr, lt
-; CHECK-NEXT:    cmp x8, #0
-; CHECK-NEXT:    cset w10, ne
-; CHECK-NEXT:    cmp x9, #0
-; CHECK-NEXT:    cset w9, gt
-; CHECK-NEXT:    csel w9, w10, w9, eq
-; CHECK-NEXT:    cmp w9, #0
-; CHECK-NEXT:    csel x0, x8, xzr, ne
+; CHECK-NEXT:    cmp xzr, x8
+; CHECK-NEXT:    ngcs xzr, x9
+; CHECK-NEXT:    csel x0, x8, xzr, lt
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
@@ -530,13 +522,9 @@ define i64 @ustest_f16i64(half %x) {
 ; CHECK-NEXT:    cmp x1, #1
 ; CHECK-NEXT:    csel x8, x0, xzr, lt
 ; CHECK-NEXT:    csinc x9, x1, xzr, lt
-; CHECK-NEXT:    cmp x8, #0
-; CHECK-NEXT:    cset w10, ne
-; CHECK-NEXT:    cmp x9, #0
-; CHECK-NEXT:    cset w9, gt
-; CHECK-NEXT:    csel w9, w10, w9, eq
-; CHECK-NEXT:    cmp w9, #0
-; CHECK-NEXT:    csel x0, x8, xzr, ne
+; CHECK-NEXT:    cmp xzr, x8
+; CHECK-NEXT:    ngcs xzr, x9
+; CHECK-NEXT:    csel x0, x8, xzr, lt
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll b/llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll
index fab520135fc9..9d16f0be5660 100644
--- a/llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll
+++ b/llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll
@@ -397,12 +397,12 @@ define <2 x i64> @ustest_f64i64(<2 x double> %x) {
 ; CHECK-NEXT:    .cfi_offset w20, -16
 ; CHECK-NEXT:    .cfi_offset w30, -32
 ; CHECK-NEXT:    str q0, [sp] // 16-byte Folded Spill
-; CHECK-NEXT:    mov d0, v0.d[1]
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
 ; CHECK-NEXT:    bl __fixdfti
 ; CHECK-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    mov x19, x0
 ; CHECK-NEXT:    mov x20, x1
-; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    mov d0, v0.d[1]
 ; CHECK-NEXT:    bl __fixdfti
 ; CHECK-NEXT:    cmp x1, #1
 ; CHECK-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
@@ -411,23 +411,15 @@ define <2 x i64> @ustest_f64i64(<2 x double> %x) {
 ; CHECK-NEXT:    cmp x20, #1
 ; CHECK-NEXT:    csel x10, x19, xzr, lt
 ; CHECK-NEXT:    csinc x11, x20, xzr, lt
-; CHECK-NEXT:    cmp x10, #0
-; CHECK-NEXT:    cset w12, ne
-; CHECK-NEXT:    cmp x11, #0
-; CHECK-NEXT:    cset w11, gt
-; CHECK-NEXT:    csel w11, w12, w11, eq
-; CHECK-NEXT:    cmp x8, #0
-; CHECK-NEXT:    cset w12, ne
-; CHECK-NEXT:    cmp x9, #0
-; CHECK-NEXT:    cset w9, gt
-; CHECK-NEXT:    csel w9, w12, w9, eq
-; CHECK-NEXT:    cmp w9, #0
-; CHECK-NEXT:    csel x8, x8, xzr, ne
-; CHECK-NEXT:    cmp w11, #0
-; CHECK-NEXT:    csel x9, x10, xzr, ne
+; CHECK-NEXT:    cmp xzr, x10
+; CHECK-NEXT:    ngcs xzr, x11
+; CHECK-NEXT:    csel x10, x10, xzr, lt
+; CHECK-NEXT:    cmp xzr, x8
+; CHECK-NEXT:    ngcs xzr, x9
+; CHECK-NEXT:    csel x8, x8, xzr, lt
 ; CHECK-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    fmov d1, x9
+; CHECK-NEXT:    fmov d0, x10
+; CHECK-NEXT:    fmov d1, x8
 ; CHECK-NEXT:    mov v0.d[1], v1.d[0]
 ; CHECK-NEXT:    add sp, sp, #48
 ; CHECK-NEXT:    ret
@@ -511,12 +503,12 @@ define <2 x i64> @ustest_f32i64(<2 x float> %x) {
 ; CHECK-NEXT:    .cfi_offset w30, -32
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    str q0, [sp] // 16-byte Folded Spill
-; CHECK-NEXT:    mov s0, v0.s[1]
+; CHECK-NEXT:    // kill: def $s0 killed $s0 killed $q0
 ; CHECK-NEXT:    bl __fixsfti
 ; CHECK-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    mov x19, x0
 ; CHECK-NEXT:    mov x20, x1
-; CHECK-NEXT:    // kill: def $s0 killed $s0 killed $q0
+; CHECK-NEXT:    mov s0, v0.s[1]
 ; CHECK-NEXT:    bl __fixsfti
 ; CHECK-NEXT:    cmp x1, #1
 ; CHECK-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
@@ -525,23 +517,15 @@ define <2 x i64> @ustest_f32i64(<2 x float> %x) {
 ; CHECK-NEXT:    cmp x20, #1
 ; CHECK-NEXT:    csel x10, x19, xzr, lt
 ; CHECK-NEXT:    csinc x11, x20, xzr, lt
-; CHECK-NEXT:    cmp x10, #0
-; CHECK-NEXT:    cset w12, ne
-; CHECK-NEXT:    cmp x11, #0
-; CHECK-NEXT:    cset w11, gt
-; CHECK-NEXT:    csel w11, w12, w11, eq
-; CHECK-NEXT:    cmp x9, #0
-; CHECK-NEXT:    cset w12, ne
-; CHECK-NEXT:    cmp x8, #0
-; CHECK-NEXT:    cset w8, gt
-; CHECK-NEXT:    csel w8, w12, w8, eq
-; CHECK-NEXT:    cmp w8, #0
-; CHECK-NEXT:    csel x8, x9, xzr, ne
-; CHECK-NEXT:    cmp w11, #0
-; CHECK-NEXT:    csel x9, x10, xzr, ne
+; CHECK-NEXT:    cmp xzr, x10
+; CHECK-NEXT:    ngcs xzr, x11
+; CHECK-NEXT:    csel x10, x10, xzr, lt
+; CHECK-NEXT:    cmp xzr, x9
+; CHECK-NEXT:    ngcs xzr, x8
+; CHECK-NEXT:    csel x8, x9, xzr, lt
 ; CHECK-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    fmov d1, x9
+; CHECK-NEXT:    fmov d0, x10
+; CHECK-NEXT:    fmov d1, x8
 ; CHECK-NEXT:    mov v0.d[1], v1.d[0]
 ; CHECK-NEXT:    add sp, sp, #48
 ; CHECK-NEXT:    ret
@@ -637,12 +621,12 @@ define <2 x i64> @ustest_f16i64(<2 x half> %x) {
 ; CHECK-NEXT:    .cfi_offset w30, -32
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    str q0, [sp] // 16-byte Folded Spill
-; CHECK-NEXT:    mov h0, v0.h[1]
+; CHECK-NEXT:    // kill: def $h0 killed $h0 killed $q0
 ; CHECK-NEXT:    bl __fixhfti
 ; CHECK-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    mov x19, x0
 ; CHECK-NEXT:    mov x20, x1
-; CHECK-NEXT:    // kill: def $h0 killed $h0 killed $q0
+; CHECK-NEXT:    mov h0, v0.h[1]
 ; CHECK-NEXT:    bl __fixhfti
 ; CHECK-NEXT:    cmp x1, #1
 ; CHECK-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
@@ -651,23 +635,15 @@ define <2 x i64> @ustest_f16i64(<2 x half> %x) {
 ; CHECK-NEXT:    cmp x20, #1
 ; CHECK-NEXT:    csel x10, x19, xzr, lt
 ; CHECK-NEXT:    csinc x11, x20, xzr, lt
-; CHECK-NEXT:    cmp x10, #0
-; CHECK-NEXT:    cset w12, ne
-; CHECK-NEXT:    cmp x11, #0
-; CHECK-NEXT:    cset w11, gt
-; CHECK-NEXT:    csel w11, w12, w11, eq
-; CHECK-NEXT:    cmp x9, #0
-; CHECK-NEXT:    cset w12, ne
-; CHECK-NEXT:    cmp x8, #0
-; CHECK-NEXT:    cset w8, gt
-; CHECK-NEXT:    csel w8, w12, w8, eq
-; CHECK-NEXT:    cmp w8, #0
-; CHECK-NEXT:    csel x8, x9, xzr, ne
-; CHECK-NEXT:    cmp w11, #0
-; CHECK-NEXT:    csel x9, x10, xzr, ne
+; CHECK-NEXT:    cmp xzr, x10
+; CHECK-NEXT:    ngcs xzr, x11
+; CHECK-NEXT:    csel x10, x10, xzr, lt
+; CHECK-NEXT:    cmp xzr, x9
+; CHECK-NEXT:    ngcs xzr, x8
+; CHECK-NEXT:    csel x8, x9, xzr, lt
 ; CHECK-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    fmov d1, x9
+; CHECK-NEXT:    fmov d0, x10
+; CHECK-NEXT:    fmov d1, x8
 ; CHECK-NEXT:    mov v0.d[1], v1.d[0]
 ; CHECK-NEXT:    add sp, sp, #48
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/i128-cmp.ll b/llvm/test/CodeGen/AArch64/i128-cmp.ll
index 6f588b0f4053..7cc3e843ba24 100644
--- a/llvm/test/CodeGen/AArch64/i128-cmp.ll
+++ b/llvm/test/CodeGen/AArch64/i128-cmp.ll
@@ -32,11 +32,9 @@ define i1 @cmp_i128_ne(i128 %a, i128 %b) {
 define i1 @cmp_i128_ugt(i128 %a, i128 %b) {
 ; CHECK-LABEL: cmp_i128_ugt:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    cmp x0, x2
-; CHECK-NEXT:    cset w8, hi
-; CHECK-NEXT:    cmp x1, x3
-; CHECK-NEXT:    cset w9, hi
-; CHECK-NEXT:    csel w0, w8, w9, eq
+; CHECK-NEXT:    cmp x2, x0
+; CHECK-NEXT:    sbcs xzr, x3, x1
+; CHECK-NEXT:    cset w0, lo
 ; CHECK-NEXT:    ret
     %cmp = icmp ugt i128 %a, %b
     ret i1 %cmp
@@ -46,10 +44,8 @@ define i1 @cmp_i128_uge(i128 %a, i128 %b) {
 ; CHECK-LABEL: cmp_i128_uge:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmp x0, x2
-; CHECK-NEXT:    cset w8, hs
-; CHECK-NEXT:    cmp x1, x3
-; CHECK-NEXT:    cset w9, hs
-; CHECK-NEXT:    csel w0, w8, w9, eq
+; CHECK-NEXT:    sbcs xzr, x1, x3
+; CHECK-NEXT:    cset w0, hs
 ; CHECK-NEXT:    ret
     %cmp = icmp uge i128 %a, %b
     ret i1 %cmp
@@ -59,10 +55,8 @@ define i1 @cmp_i128_ult(i128 %a, i128 %b) {
 ; CHECK-LABEL: cmp_i128_ult:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmp x0, x2
-; CHECK-NEXT:    cset w8, lo
-; CHECK-NEXT:    cmp x1, x3
-; CHECK-NEXT:    cset w9, lo
-; CHECK-NEXT:    csel w0, w8, w9, eq
+; CHECK-NEXT:    sbcs xzr, x1, x3
+; CHECK-NEXT:    cset w0, lo
 ; CHECK-NEXT:    ret
     %cmp = icmp ult i128 %a, %b
     ret i1 %cmp
@@ -71,11 +65,9 @@ define i1 @cmp_i128_ult(i128 %a, i128 %b) {
 define i1 @cmp_i128_ule(i128 %a, i128 %b) {
 ; CHECK-LABEL: cmp_i128_ule:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    cmp x0, x2
-; CHECK-NEXT:    cset w8, ls
-; CHECK-NEXT:    cmp x1, x3
-; CHECK-NEXT:    cset w9, ls
-; CHECK-NEXT:    csel w0, w8, w9, eq
+; CHECK-NEXT:    cmp x2, x0
+; CHECK-NEXT:    sbcs xzr, x3, x1
+; CHECK-NEXT:    cset w0, hs
 ; CHECK-NEXT:    ret
     %cmp = icmp ule i128 %a, %b
     ret i1 %cmp
@@ -84,11 +76,9 @@ define i1 @cmp_i128_ule(i128 %a, i128 %b) {
 define i1 @cmp_i128_sgt(i128 %a, i128 %b) {
 ; CHECK-LABEL: cmp_i128_sgt:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    cmp x0, x2
-; CHECK-NEXT:    cset w8, hi
-; CHECK-NEXT:    cmp x1, x3
-; CHECK-NEXT:    cset w9, gt
-; CHECK-NEXT:    csel w0, w8, w9, eq
+; CHECK-NEXT:    cmp x2, x0
+; CHECK-NEXT:    sbcs xzr, x3, x1
+; CHECK-NEXT:    cset w0, lt
 ; CHECK-NEXT:    ret
     %cmp = icmp sgt i128 %a, %b
     ret i1 %cmp
@@ -98,10 +88,8 @@ define i1 @cmp_i128_sge(i128 %a, i128 %b) {
 ; CHECK-LABEL: cmp_i128_sge:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmp x0, x2
-; CHECK-NEXT:    cset w8, hs
-; CHECK-NEXT:    cmp x1, x3
-; CHECK-NEXT:    cset w9, ge
-; CHECK-NEXT:    csel w0, w8, w9, eq
+; CHECK-NEXT:    sbcs xzr, x1, x3
+; CHECK-NEXT:    cset w0, ge
 ; CHECK-NEXT:    ret
     %cmp = icmp sge i128 %a, %b
     ret i1 %cmp
@@ -111,10 +99,8 @@ define i1 @cmp_i128_slt(i128 %a, i128 %b) {
 ; CHECK-LABEL: cmp_i128_slt:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmp x0, x2
-; CHECK-NEXT:    cset w8, lo
-; CHECK-NEXT:    cmp x1, x3
-; CHECK-NEXT:    cset w9, lt
-; CHECK-NEXT:    csel w0, w8, w9, eq
+; CHECK-NEXT:    sbcs xzr, x1, x3
+; CHECK-NEXT:    cset w0, lt
 ; CHECK-NEXT:    ret
     %cmp = icmp slt i128 %a, %b
     ret i1 %cmp
@@ -123,11 +109,9 @@ define i1 @cmp_i128_slt(i128 %a, i128 %b) {
 define i1 @cmp_i128_sle(i128 %a, i128 %b) {
 ; CHECK-LABEL: cmp_i128_sle:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    cmp x0, x2
-; CHECK-NEXT:    cset w8, ls
-; CHECK-NEXT:    cmp x1, x3
-; CHECK-NEXT:    cset w9, le
-; CHECK-NEXT:    csel w0, w8, w9, eq
+; CHECK-NEXT:    cmp x2, x0
+; CHECK-NEXT:    sbcs xzr, x3, x1
+; CHECK-NEXT:    cset w0, ge
 ; CHECK-NEXT:    ret
     %cmp = icmp sle i128 %a, %b
     ret i1 %cmp
@@ -180,12 +164,9 @@ exit:
 define void @br_on_cmp_i128_ugt(i128 %a, i128 %b) nounwind {
 ; CHECK-LABEL: br_on_cmp_i128_ugt:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    cmp x0, x2
-; CHECK-NEXT:    cset w8, ls
-; CHECK-NEXT:    cmp x1, x3
-; CHECK-NEXT:    cset w9, ls
-; CHECK-NEXT:    csel w8, w8, w9, eq
-; CHECK-NEXT:    tbnz w8, #0, .LBB12_2
+; CHECK-NEXT:    cmp x2, x0
+; CHECK-NEXT:    sbcs xzr, x3, x1
+; CHECK-NEXT:    b.hs .LBB12_2
 ; CHECK-NEXT:  // %bb.1: // %call
 ; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    bl call
@@ -205,11 +186,8 @@ define void @br_on_cmp_i128_uge(i128 %a, i128 %b) nounwind {
 ; CHECK-LABEL: br_on_cmp_i128_uge:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmp x0, x2
-; CHECK-NEXT:    cset w8, lo
-; CHECK-NEXT:    cmp x1, x3
-; CHECK-NEXT:    cset w9, lo
-; CHECK-NEXT:    csel w8, w8, w9, eq
-; CHECK-NEXT:    tbnz w8, #0, .LBB13_2
+; CHECK-NEXT:    sbcs xzr, x1, x3
+; CHECK-NEXT:    b.lo .LBB13_2
 ; CHECK-NEXT:  // %bb.1: // %call
 ; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    bl call
@@ -229,11 +207,8 @@ define void @br_on_cmp_i128_ult(i128 %a, i128 %b) nounwind {
 ; CHECK-LABEL: br_on_cmp_i128_ult:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmp x0, x2
-; CHECK-NEXT:    cset w8, hs
-; CHECK-NEXT:    cmp x1, x3
-; CHECK-NEXT:    cset w9, hs
-; CHECK-NEXT:    csel w8, w8, w9, eq
-; CHECK-NEXT:    tbnz w8, #0, .LBB14_2
+; CHECK-NEXT:    sbcs xzr, x1, x3
+; CHECK-NEXT:    b.hs .LBB14_2
 ; CHECK-NEXT:  // %bb.1: // %call
 ; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    bl call
@@ -252,12 +227,9 @@ exit:
 define void @br_on_cmp_i128_ule(i128 %a, i128 %b) nounwind {
 ; CHECK-LABEL: br_on_cmp_i128_ule:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    cmp x0, x2
-; CHECK-NEXT:    cset w8, hi
-; CHECK-NEXT:    cmp x1, x3
-; CHECK-NEXT:    cset w9, hi
-; CHECK-NEXT:    csel w8, w8, w9, eq
-; CHECK-NEXT:    tbnz w8, #0, .LBB15_2
+; CHECK-NEXT:    cmp x2, x0
+; CHECK-NEXT:    sbcs xzr, x3, x1
+; CHECK-NEXT:    b.lo .LBB15_2
 ; CHECK-NEXT:  // %bb.1: // %call
 ; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    bl call
@@ -276,12 +248,9 @@ exit:
 define void @br_on_cmp_i128_sgt(i128 %a, i128 %b) nounwind {
 ; CHECK-LABEL: br_on_cmp_i128_sgt:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    cmp x0, x2
-; CHECK-NEXT:    cset w8, ls
-; CHECK-NEXT:    cmp x1, x3
-; CHECK-NEXT:    cset w9, le
-; CHECK-NEXT:    csel w8, w8, w9, eq
-; CHECK-NEXT:    tbnz w8, #0, .LBB16_2
+; CHECK-NEXT:    cmp x2, x0
+; CHECK-NEXT:    sbcs xzr, x3, x1
+; CHECK-NEXT:    b.ge .LBB16_2
 ; CHECK-NEXT:  // %bb.1: // %call
 ; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    bl call
@@ -301,11 +270,8 @@ define void @br_on_cmp_i128_sge(i128 %a, i128 %b) nounwind {
 ; CHECK-LABEL: br_on_cmp_i128_sge:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmp x0, x2
-; CHECK-NEXT:    cset w8, lo
-; CHECK-NEXT:    cmp x1, x3
-; CHECK-NEXT:    cset w9, lt
-; CHECK-NEXT:    csel w8, w8, w9, eq
-; CHECK-NEXT:    tbnz w8, #0, .LBB17_2
+; CHECK-NEXT:    sbcs xzr, x1, x3
+; CHECK-NEXT:    b.lt .LBB17_2
 ; CHECK-NEXT:  // %bb.1: // %call
 ; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    bl call
@@ -325,11 +291,8 @@ define void @br_on_cmp_i128_slt(i128 %a, i128 %b) nounwind {
 ; CHECK-LABEL: br_on_cmp_i128_slt:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmp x0, x2
-; CHECK-NEXT:    cset w8, hs
-; CHECK-NEXT:    cmp x1, x3
-; CHECK-NEXT:    cset w9, ge
-; CHECK-NEXT:    csel w8, w8, w9, eq
-; CHECK-NEXT:    tbnz w8, #0, .LBB18_2
+; CHECK-NEXT:    sbcs xzr, x1, x3
+; CHECK-NEXT:    b.ge .LBB18_2
 ; CHECK-NEXT:  // %bb.1: // %call
 ; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    bl call
@@ -348,12 +311,9 @@ exit:
 define void @br_on_cmp_i128_sle(i128 %a, i128 %b) nounwind {
 ; CHECK-LABEL: br_on_cmp_i128_sle:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    cmp x0, x2
-; CHECK-NEXT:    cset w8, hi
-; CHECK-NEXT:    cmp x1, x3
-; CHECK-NEXT:    cset w9, gt
-; CHECK-NEXT:    csel w8, w8, w9, eq
-; CHECK-NEXT:    tbnz w8, #0, .LBB19_2
+; CHECK-NEXT:    cmp x2, x0
+; CHECK-NEXT:    sbcs xzr, x3, x1
+; CHECK-NEXT:    b.lt .LBB19_2
 ; CHECK-NEXT:  // %bb.1: // %call
 ; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    bl call


        


More information about the llvm-commits mailing list