[llvm-branch-commits] [llvm] 777a4a5 - [AArch64] Extend `performANDORCSELCombine` to cover `fcmp` and `cmn`
Karl Meakin via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Fri Sep 23 19:37:08 PDT 2022
Author: Karl Meakin
Date: 2022-09-06T02:28:17+01:00
New Revision: 777a4a5e4e216f8f316c6cd9d3070cdeba2a9ca0
URL: https://github.com/llvm/llvm-project/commit/777a4a5e4e216f8f316c6cd9d3070cdeba2a9ca0
DIFF: https://github.com/llvm/llvm-project/commit/777a4a5e4e216f8f316c6cd9d3070cdeba2a9ca0.diff
LOG: [AArch64] Extend `performANDORCSELCombine` to cover `fcmp` and `cmn`
Added:
llvm/test/CodeGen/AArch64/ccmn.ll
llvm/test/CodeGen/AArch64/fccmp.ll
Modified:
llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
Removed:
################################################################################
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 67a5f88e1408..cec26c133e7d 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -14993,6 +14993,22 @@ static SDValue tryCombineToBSL(SDNode *N,
// (OR (CSET cc0 cmp0) (CSET cc1 (CMP x1 y1)))
// =>
// (CSET cc1 (CCMP x1 y1 cc1 !cc0 cmp0))
+
+// (AND (CSET cc0 cmp0) (CSET cc1 (CMN x1 y1)))
+// =>
+// (CSET cc1 (CCMN x1 y1 !cc1 cc0 cmp0))
+//
+// (OR (CSET cc0 cmp0) (CSET cc1 (CMN x1 y1)))
+// =>
+// (CSET cc1 (CCMN x1 y1 cc1 !cc0 cmp0))
+
+// (AND (CSET cc0 cmp0) (CSET cc1 (FCMP x1 y1)))
+// =>
+// (CSET cc1 (FCCMP x1 y1 !cc1 cc0 cmp0))
+//
+// (OR (CSET cc0 cmp0) (CSET cc1 (FCMP x1 y1)))
+// =>
+// (CSET cc1 (FCCMP x1 y1 cc1 !cc0 cmp0))
static SDValue performANDORCSELCombine(SDNode *N, SelectionDAG &DAG) {
EVT VT = N->getValueType(0);
SDValue CSel0 = N->getOperand(0);
@@ -15017,15 +15033,36 @@ static SDValue performANDORCSELCombine(SDNode *N, SelectionDAG &DAG) {
AArch64CC::CondCode CC1 = (AArch64CC::CondCode)CSel1.getConstantOperandVal(2);
if (!Cmp0->hasOneUse() || !Cmp1->hasOneUse())
return SDValue();
- if (Cmp1.getOpcode() != AArch64ISD::SUBS &&
- Cmp0.getOpcode() == AArch64ISD::SUBS) {
- std::swap(Cmp0, Cmp1);
- std::swap(CC0, CC1);
+
+ unsigned Opcode = 0;
+ bool Swap = false;
+
+ if (Cmp0.getOpcode() != AArch64ISD::SUBS &&
+ Cmp1.getOpcode() == AArch64ISD::SUBS) {
+ Opcode = AArch64ISD::CCMP;
+ } else if (Cmp0.getOpcode() == AArch64ISD::SUBS) {
+ Opcode = AArch64ISD::CCMP;
+ Swap = true;
+ } else if (Cmp0.getOpcode() == AArch64ISD::ADDS) {
+ Opcode = AArch64ISD::CCMN;
+ } else if (Cmp1.getOpcode() == AArch64ISD::ADDS) {
+ Opcode = AArch64ISD::CCMN;
+ Swap = true;
+ } else if (Cmp0.getOpcode() == AArch64ISD::FCMP) {
+ Opcode = AArch64ISD::FCCMP;
+ } else if (Cmp1.getOpcode() == AArch64ISD::FCMP) {
+ Opcode = AArch64ISD::FCCMP;
+ Swap = true;
}
- if (Cmp1.getOpcode() != AArch64ISD::SUBS)
+ if (Opcode == 0)
return SDValue();
+ if (Swap) {
+ std::swap(Cmp0, Cmp1);
+ std::swap(CC0, CC1);
+ }
+
SDLoc DL(N);
SDValue CCmp;
@@ -15034,7 +15071,7 @@ static SDValue performANDORCSELCombine(SDNode *N, SelectionDAG &DAG) {
SDValue Condition = DAG.getConstant(InvCC0, DL, MVT_CC);
unsigned NZCV = AArch64CC::getNZCVToSatisfyCondCode(CC1);
SDValue NZCVOp = DAG.getConstant(NZCV, DL, MVT::i32);
- CCmp = DAG.getNode(AArch64ISD::CCMP, DL, MVT_CC, Cmp1.getOperand(0),
+ CCmp = DAG.getNode(Opcode, DL, MVT_CC, Cmp1.getOperand(0),
Cmp1.getOperand(1), NZCVOp, Condition, Cmp0);
} else {
SDLoc DL(N);
@@ -15042,7 +15079,7 @@ static SDValue performANDORCSELCombine(SDNode *N, SelectionDAG &DAG) {
SDValue Condition = DAG.getConstant(CC0, DL, MVT_CC);
unsigned NZCV = AArch64CC::getNZCVToSatisfyCondCode(InvCC1);
SDValue NZCVOp = DAG.getConstant(NZCV, DL, MVT::i32);
- CCmp = DAG.getNode(AArch64ISD::CCMP, DL, MVT_CC, Cmp1.getOperand(0),
+ CCmp = DAG.getNode(Opcode, DL, MVT_CC, Cmp1.getOperand(0),
Cmp1.getOperand(1), NZCVOp, Condition, Cmp0);
}
return DAG.getNode(AArch64ISD::CSEL, DL, VT, CSel0.getOperand(0),
@@ -19790,6 +19827,26 @@ static SDValue performDupLane128Combine(SDNode *N, SelectionDAG &DAG) {
return DAG.getNode(ISD::BITCAST, DL, VT, NewDuplane128);
}
+// (CCMP x c nzcv cc cond) => (CCMN x -c nzcv cc cond) if -31 <= c < 0
+static SDValue performCCMPCombine(SDNode *N, SelectionDAG &DAG) {
+ auto X = N->getOperand(0);
+ auto C = N->getOperand(1);
+ auto NZCV = N->getOperand(2);
+ auto CC = N->getOperand(3);
+ auto Cond = N->getOperand(4);
+ SDLoc DL(N);
+ auto VTs = N->getVTList();
+
+ if (ConstantSDNode *ConstC = dyn_cast<ConstantSDNode>(C)) {
+ auto ConstInt = ConstC->getSExtValue();
+ if (-31 <= ConstInt && ConstInt < 0) {
+ auto NegC = DAG.getConstant(-ConstInt, DL, MVT::i32);
+ return DAG.getNode(AArch64ISD::CCMN, DL, VTs, X, NegC, NZCV, CC, Cond);
+ }
+ }
+ return SDValue();
+}
+
SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
@@ -19797,6 +19854,8 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
default:
LLVM_DEBUG(dbgs() << "Custom combining: skipping\n");
break;
+ case AArch64ISD::CCMP:
+ return performCCMPCombine(N, DAG);
case ISD::ADD:
case ISD::SUB:
return performAddSubCombine(N, DCI, DAG);
diff --git a/llvm/test/CodeGen/AArch64/ccmn.ll b/llvm/test/CodeGen/AArch64/ccmn.ll
new file mode 100644
index 000000000000..7532acac6bf3
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/ccmn.ll
@@ -0,0 +1,93 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s --check-prefixes=CHECK,SDISEL
+
+; w0 == -1 && w1 == -2
+define i1 @w0_eq_minus_1_and_w1_eq_minus_2(i32 %0, i32 %1) {
+; CHECK-LABEL: w0_eq_minus_1_and_w1_eq_minus_2:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmn w1, #2
+; CHECK-NEXT: ccmn w0, #1, #0, eq
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: ret
+ %3 = icmp eq i32 %0, -1
+ %4 = icmp eq i32 %1, -2
+ %5 = and i1 %3, %4
+ ret i1 %5
+}
+
+; w0 == -30 && w1 == -31
+define i1 @w0_eq_minus_30_and_w1_eq_minus_31(i32 %0, i32 %1) {
+; CHECK-LABEL: w0_eq_minus_30_and_w1_eq_minus_31:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmn w1, #31
+; CHECK-NEXT: ccmn w0, #30, #0, eq
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: ret
+ %3 = icmp eq i32 %0, -30
+ %4 = icmp eq i32 %1, -31
+ %5 = and i1 %3, %4
+ ret i1 %5
+}
+
+; x0 == -1 && x1 == -2
+define i1 @x0_eq_minus_1_and_x1_eq_minus_2(i64 %0, i64 %1) {
+; CHECK-LABEL: x0_eq_minus_1_and_x1_eq_minus_2:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmn x1, #2
+; CHECK-NEXT: ccmn x0, #1, #0, eq
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: ret
+ %3 = icmp eq i64 %0, -1
+ %4 = icmp eq i64 %1, -2
+ %5 = and i1 %3, %4
+ ret i1 %5
+}
+
+; x0 == -30 && x1 == -31
+define i1 @x0_eq_minus_30_and_x1_eq_minus_31(i64 %0, i64 %1) {
+; CHECK-LABEL: x0_eq_minus_30_and_x1_eq_minus_31:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmn x1, #31
+; CHECK-NEXT: ccmn x0, #30, #0, eq
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: ret
+ %3 = icmp eq i64 %0, -30
+ %4 = icmp eq i64 %1, -31
+ %5 = and i1 %3, %4
+ ret i1 %5
+}
+
+; w0 == -w1 && w2 == w3
+define i1 @w0_eq_minus_w1_and_w2_eq_minus_w3(i32 %0, i32 %1, i32 %2, i32 %3) {
+; CHECK-LABEL: w0_eq_minus_w1_and_w2_eq_minus_w3:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmn w1, w0
+; CHECK-NEXT: ccmn w3, w2, #0, eq
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: ret
+ %5 = sub i32 0, %1
+ %6 = icmp eq i32 %5, %0
+ %7 = sub i32 0, %3
+ %8 = icmp eq i32 %7, %2
+ %9 = and i1 %6, %8
+ ret i1 %9
+}
+
+; x0 == -x1 && x2 == x3
+define i1 @x0_eq_minus_x1_and_x2_eq_minus_x3(i64 %0, i64 %1, i64 %2, i64 %3) {
+; CHECK-LABEL: x0_eq_minus_x1_and_x2_eq_minus_x3:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmn x1, x0
+; CHECK-NEXT: ccmn x3, x2, #0, eq
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: ret
+ %5 = sub i64 0, %1
+ %6 = icmp eq i64 %5, %0
+ %7 = sub i64 0, %3
+ %8 = icmp eq i64 %7, %2
+ %9 = and i1 %6, %8
+ ret i1 %9
+}
+
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; SDISEL: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/fccmp.ll b/llvm/test/CodeGen/AArch64/fccmp.ll
new file mode 100644
index 000000000000..6d0f11918f40
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/fccmp.ll
@@ -0,0 +1,281 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s --check-prefixes=CHECK,SDISEL
+; RUN: llc < %s -mtriple=aarch64-- -global-isel | FileCheck %s --check-prefixes=CHECK,GISEL
+
+; Ensure chains of comparisons produce chains of `fccmp`
+
+; (x0 < x1) && (x2 > x3)
+define i1 @fcmp_and2(float %0, float %1, float %2, float %3) {
+; SDISEL-LABEL: fcmp_and2:
+; SDISEL: // %bb.0:
+; SDISEL-NEXT: fcmp s0, s1
+; SDISEL-NEXT: fccmp s2, s3, #4, mi
+; SDISEL-NEXT: cset w0, gt
+; SDISEL-NEXT: ret
+;
+; GISEL-LABEL: fcmp_and2:
+; GISEL: // %bb.0:
+; GISEL-NEXT: fcmp s0, s1
+; GISEL-NEXT: cset w8, mi
+; GISEL-NEXT: fcmp s2, s3
+; GISEL-NEXT: cset w9, gt
+; GISEL-NEXT: and w0, w8, w9
+; GISEL-NEXT: ret
+ %5 = fcmp olt float %0, %1
+ %6 = fcmp ogt float %2, %3
+ %7 = and i1 %5, %6
+ ret i1 %7
+}
+
+; (x0 < x1) && (x2 > x3) && (x4 != x5)
+define i1 @fcmp_and3(float %0, float %1, float %2, float %3, float %4, float %5) {
+; SDISEL-LABEL: fcmp_and3:
+; SDISEL: // %bb.0:
+; SDISEL-NEXT: fcmp s4, s5
+; SDISEL-NEXT: fccmp s2, s3, #4, ne
+; SDISEL-NEXT: cset w0, gt
+; SDISEL-NEXT: ret
+;
+; GISEL-LABEL: fcmp_and3:
+; GISEL: // %bb.0:
+; GISEL-NEXT: fcmp s0, s1
+; GISEL-NEXT: cset w8, mi
+; GISEL-NEXT: fcmp s2, s3
+; GISEL-NEXT: cset w9, gt
+; GISEL-NEXT: fcmp s4, s5
+; GISEL-NEXT: and w8, w8, w9
+; GISEL-NEXT: cset w9, ne
+; GISEL-NEXT: and w0, w8, w9
+; GISEL-NEXT: ret
+ %7 = fcmp olt float %0, %1
+ %8 = fcmp ogt float %2, %3
+ %9 = and i1 %7, %8
+ %10 = fcmp une float %4, %5
+ %11 = and i1 %9, %10
+ ret i1 %11
+}
+
+; (x0 < x1) && (x2 > x3) && (x4 != x5) && (x6 == x7)
+define i1 @cmp_and4(float %0, float %1, float %2, float %3, float %4, float %5, float %6, float %7) {
+; SDISEL-LABEL: cmp_and4:
+; SDISEL: // %bb.0:
+; SDISEL-NEXT: fcmp s6, s7
+; SDISEL-NEXT: fccmp s2, s3, #4, eq
+; SDISEL-NEXT: cset w0, gt
+; SDISEL-NEXT: ret
+;
+; GISEL-LABEL: cmp_and4:
+; GISEL: // %bb.0:
+; GISEL-NEXT: fcmp s0, s1
+; GISEL-NEXT: cset w8, mi
+; GISEL-NEXT: fcmp s2, s3
+; GISEL-NEXT: cset w9, gt
+; GISEL-NEXT: fcmp s4, s5
+; GISEL-NEXT: and w8, w8, w9
+; GISEL-NEXT: cset w9, ne
+; GISEL-NEXT: fcmp s6, s7
+; GISEL-NEXT: and w8, w8, w9
+; GISEL-NEXT: cset w9, eq
+; GISEL-NEXT: and w0, w8, w9
+; GISEL-NEXT: ret
+ %9 = fcmp olt float %0, %1
+ %10 = fcmp ogt float %2, %3
+ %11 = and i1 %9, %10
+ %12 = fcmp une float %4, %5
+ %13 = and i1 %11, %12
+ %14 = fcmp oeq float %6, %7
+ %15 = and i1 %13, %14
+ ret i1 %15
+}
+
+; (x0 < x1) || (x2 > x3)
+define i1 @cmp_or2(float %0, float %1, float %2, float %3) {
+; SDISEL-LABEL: cmp_or2:
+; SDISEL: // %bb.0:
+; SDISEL-NEXT: fcmp s0, s1
+; SDISEL-NEXT: fccmp s2, s3, #0, pl
+; SDISEL-NEXT: cset w0, gt
+; SDISEL-NEXT: ret
+;
+; GISEL-LABEL: cmp_or2:
+; GISEL: // %bb.0:
+; GISEL-NEXT: fcmp s0, s1
+; GISEL-NEXT: cset w8, mi
+; GISEL-NEXT: fcmp s2, s3
+; GISEL-NEXT: cset w9, gt
+; GISEL-NEXT: orr w0, w8, w9
+; GISEL-NEXT: ret
+ %5 = fcmp olt float %0, %1
+ %6 = fcmp ogt float %2, %3
+ %7 = or i1 %5, %6
+ ret i1 %7
+}
+
+; (x0 < x1) || (x2 > x3) || (x4 != x5)
+define i1 @cmp_or3(float %0, float %1, float %2, float %3, float %4, float %5) {
+; SDISEL-LABEL: cmp_or3:
+; SDISEL: // %bb.0:
+; SDISEL-NEXT: fcmp s4, s5
+; SDISEL-NEXT: fccmp s2, s3, #0, eq
+; SDISEL-NEXT: cset w0, gt
+; SDISEL-NEXT: ret
+;
+; GISEL-LABEL: cmp_or3:
+; GISEL: // %bb.0:
+; GISEL-NEXT: fcmp s0, s1
+; GISEL-NEXT: cset w8, mi
+; GISEL-NEXT: fcmp s2, s3
+; GISEL-NEXT: cset w9, gt
+; GISEL-NEXT: fcmp s4, s5
+; GISEL-NEXT: orr w8, w8, w9
+; GISEL-NEXT: cset w9, ne
+; GISEL-NEXT: orr w0, w8, w9
+; GISEL-NEXT: ret
+ %7 = fcmp olt float %0, %1
+ %8 = fcmp ogt float %2, %3
+ %9 = or i1 %7, %8
+ %10 = fcmp une float %4, %5
+ %11 = or i1 %9, %10
+ ret i1 %11
+}
+
+; (x0 < x1) || (x2 > x3) || (x4 != x5) || (x6 == x7)
+define i1 @cmp_or4(float %0, float %1, float %2, float %3, float %4, float %5, float %6, float %7) {
+; SDISEL-LABEL: cmp_or4:
+; SDISEL: // %bb.0:
+; SDISEL-NEXT: fcmp s6, s7
+; SDISEL-NEXT: fccmp s2, s3, #0, ne
+; SDISEL-NEXT: cset w0, gt
+; SDISEL-NEXT: ret
+;
+; GISEL-LABEL: cmp_or4:
+; GISEL: // %bb.0:
+; GISEL-NEXT: fcmp s0, s1
+; GISEL-NEXT: cset w8, mi
+; GISEL-NEXT: fcmp s2, s3
+; GISEL-NEXT: cset w9, gt
+; GISEL-NEXT: fcmp s4, s5
+; GISEL-NEXT: orr w8, w8, w9
+; GISEL-NEXT: cset w9, ne
+; GISEL-NEXT: fcmp s6, s7
+; GISEL-NEXT: orr w8, w8, w9
+; GISEL-NEXT: cset w9, eq
+; GISEL-NEXT: orr w0, w8, w9
+; GISEL-NEXT: ret
+ %9 = fcmp olt float %0, %1
+ %10 = fcmp ogt float %2, %3
+ %11 = or i1 %9, %10
+ %12 = fcmp une float %4, %5
+ %13 = or i1 %11, %12
+ %14 = fcmp oeq float %6, %7
+ %15 = or i1 %13, %14
+ ret i1 %15
+}
+
+; (x0 != 0) || (x1 != 0)
+define i1 @true_or2(float %0, float %1) {
+; SDISEL-LABEL: true_or2:
+; SDISEL: // %bb.0:
+; SDISEL-NEXT: movi d2, #0000000000000000
+; SDISEL-NEXT: fcmp s0, #0.0
+; SDISEL-NEXT: fccmp s1, s2, #0, eq
+; SDISEL-NEXT: cset w0, ne
+; SDISEL-NEXT: ret
+;
+; GISEL-LABEL: true_or2:
+; GISEL: // %bb.0:
+; GISEL-NEXT: fcmp s0, #0.0
+; GISEL-NEXT: cset w8, ne
+; GISEL-NEXT: fcmp s1, #0.0
+; GISEL-NEXT: cset w9, ne
+; GISEL-NEXT: orr w0, w8, w9
+; GISEL-NEXT: ret
+ %3 = fcmp une float %0, 0.000000e+00
+ %4 = fcmp une float %1, 0.000000e+00
+ %5 = or i1 %3, %4
+ ret i1 %5
+}
+
+; (x0 != 0) || (x1 != 0) || (x2 != 0)
+define i1 @true_or3(float %0, float %1, float %2) {
+; SDISEL-LABEL: true_or3:
+; SDISEL: // %bb.0:
+; SDISEL-NEXT: movi d0, #0000000000000000
+; SDISEL-NEXT: fcmp s2, #0.0
+; SDISEL-NEXT: fccmp s1, s0, #0, eq
+; SDISEL-NEXT: cset w0, ne
+; SDISEL-NEXT: ret
+;
+; GISEL-LABEL: true_or3:
+; GISEL: // %bb.0:
+; GISEL-NEXT: fcmp s0, #0.0
+; GISEL-NEXT: cset w8, ne
+; GISEL-NEXT: fcmp s1, #0.0
+; GISEL-NEXT: cset w9, ne
+; GISEL-NEXT: fcmp s2, #0.0
+; GISEL-NEXT: orr w8, w8, w9
+; GISEL-NEXT: cset w9, ne
+; GISEL-NEXT: orr w0, w8, w9
+; GISEL-NEXT: ret
+ %4 = fcmp une float %0, 0.000000e+00
+ %5 = fcmp une float %1, 0.000000e+00
+ %6 = or i1 %4, %5
+ %7 = fcmp une float %2, 0.000000e+00
+ %8 = or i1 %6, %7
+ ret i1 %8
+}
+
+; (x0 != 0) || (x1 != 0)
+define i1 @true_and2(float %0, float %1) {
+; SDISEL-LABEL: true_and2:
+; SDISEL: // %bb.0:
+; SDISEL-NEXT: movi d2, #0000000000000000
+; SDISEL-NEXT: fcmp s0, #0.0
+; SDISEL-NEXT: fccmp s1, s2, #4, ne
+; SDISEL-NEXT: cset w0, ne
+; SDISEL-NEXT: ret
+;
+; GISEL-LABEL: true_and2:
+; GISEL: // %bb.0:
+; GISEL-NEXT: fcmp s0, #0.0
+; GISEL-NEXT: cset w8, ne
+; GISEL-NEXT: fcmp s1, #0.0
+; GISEL-NEXT: cset w9, ne
+; GISEL-NEXT: and w0, w8, w9
+; GISEL-NEXT: ret
+ %3 = fcmp une float %0, 0.000000e+00
+ %4 = fcmp une float %1, 0.000000e+00
+ %5 = and i1 %3, %4
+ ret i1 %5
+}
+
+; (x0 != 0) || (x1 != 0) || (x2 != 0)
+define i1 @true_and3(float %0, float %1, float %2) {
+; SDISEL-LABEL: true_and3:
+; SDISEL: // %bb.0:
+; SDISEL-NEXT: movi d0, #0000000000000000
+; SDISEL-NEXT: fcmp s2, #0.0
+; SDISEL-NEXT: fccmp s1, s0, #4, ne
+; SDISEL-NEXT: cset w0, ne
+; SDISEL-NEXT: ret
+;
+; GISEL-LABEL: true_and3:
+; GISEL: // %bb.0:
+; GISEL-NEXT: fcmp s0, #0.0
+; GISEL-NEXT: cset w8, ne
+; GISEL-NEXT: fcmp s1, #0.0
+; GISEL-NEXT: cset w9, ne
+; GISEL-NEXT: fcmp s2, #0.0
+; GISEL-NEXT: and w8, w8, w9
+; GISEL-NEXT: cset w9, ne
+; GISEL-NEXT: and w0, w8, w9
+; GISEL-NEXT: ret
+ %4 = fcmp une float %0, 0.000000e+00
+ %5 = fcmp une float %1, 0.000000e+00
+ %6 = and i1 %4, %5
+ %7 = fcmp une float %2, 0.000000e+00
+ %8 = and i1 %6, %7
+ ret i1 %8
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
More information about the llvm-branch-commits
mailing list