[llvm] DAGCombiner: Support fmaximum/fminimum and fmaximumnum/fminimumnum (PR #137318)
YunQiang Su via llvm-commits
llvm-commits at lists.llvm.org
Fri Apr 25 04:42:21 PDT 2025
https://github.com/wzssyqa updated https://github.com/llvm/llvm-project/pull/137318
>From 792d6721e6542680914ddd4ccc990b9537f7b1f6 Mon Sep 17 00:00:00 2001
From: YunQiang Su <yunqiang at isrc.iscas.acn.cn>
Date: Fri, 18 Apr 2025 11:12:58 +0800
Subject: [PATCH 1/2] DAGCombiner: Support fmaximum/fminimum and
fmaximumnum/fminimumnum
Some architecutres like RISC-V supports new fmax/fmin instructions
introduced by IEEE754-2019.
We can use them in `getMinMaxOpcodeForFP`.
---
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 133 ++--
llvm/test/CodeGen/AArch64/fmaxmin-combine.ll | 340 ++++++++
llvm/test/CodeGen/RISCV/fmaxmin-combine.ll | 742 ++++++++++++++++++
3 files changed, 1157 insertions(+), 58 deletions(-)
create mode 100644 llvm/test/CodeGen/AArch64/fmaxmin-combine.ll
create mode 100644 llvm/test/CodeGen/RISCV/fmaxmin-combine.ll
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 282dc4470238d..79f626bf88c1e 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -6333,60 +6333,70 @@ static bool arebothOperandsNotNan(SDValue Operand1, SDValue Operand2,
return DAG.isKnownNeverNaN(Operand2) && DAG.isKnownNeverNaN(Operand1);
}
-// FIXME: use FMINIMUMNUM if possible, such as for RISC-V.
-static unsigned getMinMaxOpcodeForFP(SDValue Operand1, SDValue Operand2,
- ISD::CondCode CC, unsigned OrAndOpcode,
- SelectionDAG &DAG,
- bool isFMAXNUMFMINNUM_IEEE,
- bool isFMAXNUMFMINNUM) {
- // The optimization cannot be applied for all the predicates because
- // of the way FMINNUM/FMAXNUM and FMINNUM_IEEE/FMAXNUM_IEEE handle
- // NaNs. For FMINNUM_IEEE/FMAXNUM_IEEE, the optimization cannot be
- // applied at all if one of the operands is a signaling NaN.
-
- // It is safe to use FMINNUM_IEEE/FMAXNUM_IEEE if all the operands
- // are non NaN values.
- if (((CC == ISD::SETLT || CC == ISD::SETLE) && (OrAndOpcode == ISD::OR)) ||
- ((CC == ISD::SETGT || CC == ISD::SETGE) && (OrAndOpcode == ISD::AND)))
- return arebothOperandsNotNan(Operand1, Operand2, DAG) &&
- isFMAXNUMFMINNUM_IEEE
- ? ISD::FMINNUM_IEEE
- : ISD::DELETED_NODE;
- else if (((CC == ISD::SETGT || CC == ISD::SETGE) &&
- (OrAndOpcode == ISD::OR)) ||
- ((CC == ISD::SETLT || CC == ISD::SETLE) &&
- (OrAndOpcode == ISD::AND)))
- return arebothOperandsNotNan(Operand1, Operand2, DAG) &&
- isFMAXNUMFMINNUM_IEEE
- ? ISD::FMAXNUM_IEEE
- : ISD::DELETED_NODE;
- // Both FMINNUM/FMAXNUM and FMINNUM_IEEE/FMAXNUM_IEEE handle quiet
- // NaNs in the same way. But, FMINNUM/FMAXNUM and FMINNUM_IEEE/
- // FMAXNUM_IEEE handle signaling NaNs differently. If we cannot prove
- // that there are not any sNaNs, then the optimization is not valid
- // for FMINNUM_IEEE/FMAXNUM_IEEE. In the presence of sNaNs, we apply
- // the optimization using FMINNUM/FMAXNUM for the following cases. If
- // we can prove that we do not have any sNaNs, then we can do the
- // optimization using FMINNUM_IEEE/FMAXNUM_IEEE for the following
- // cases.
- else if (((CC == ISD::SETOLT || CC == ISD::SETOLE) &&
- (OrAndOpcode == ISD::OR)) ||
- ((CC == ISD::SETUGT || CC == ISD::SETUGE) &&
- (OrAndOpcode == ISD::AND)))
- return isFMAXNUMFMINNUM ? ISD::FMINNUM
- : arebothOperandsNotSNan(Operand1, Operand2, DAG) &&
- isFMAXNUMFMINNUM_IEEE
- ? ISD::FMINNUM_IEEE
- : ISD::DELETED_NODE;
- else if (((CC == ISD::SETOGT || CC == ISD::SETOGE) &&
- (OrAndOpcode == ISD::OR)) ||
- ((CC == ISD::SETULT || CC == ISD::SETULE) &&
- (OrAndOpcode == ISD::AND)))
- return isFMAXNUMFMINNUM ? ISD::FMAXNUM
- : arebothOperandsNotSNan(Operand1, Operand2, DAG) &&
- isFMAXNUMFMINNUM_IEEE
- ? ISD::FMAXNUM_IEEE
- : ISD::DELETED_NODE;
+static unsigned
+getMinMaxOpcodeForFP(SDValue Operand1, SDValue Operand2, ISD::CondCode CC,
+ unsigned OrAndOpcode, SelectionDAG &DAG,
+ bool isFMAXNUMFMINNUM_IEEE, bool isFMAXNUMFMINNUM,
+ bool isFMAXIMUMFMINIMUM, bool isFMAXIMUMNUMFMINIMUMNUM) {
+ bool isMax = true;
+ // SETLT/SETLE/SETGT/SETGE are undefined if any Operand is NaN. We
+ // treat them as SETOLT/SETOLE/SETOGT/SETOGE.
+ if (((CC == ISD::SETLT || CC == ISD::SETLE || CC == ISD::SETOLT ||
+ CC == ISD::SETOLE) &&
+ (OrAndOpcode == ISD::OR)) ||
+ ((CC == ISD::SETUGT || CC == ISD::SETUGE) && (OrAndOpcode == ISD::AND))) {
+ isMax = false;
+ if (arebothOperandsNotSNan(Operand1, Operand2, DAG) &&
+ isFMAXNUMFMINNUM_IEEE)
+ return ISD::FMINNUM_IEEE;
+ if (arebothOperandsNotSNan(Operand1, Operand2, DAG) && isFMAXNUMFMINNUM)
+ return ISD::FMINNUM;
+ if (isFMAXIMUMNUMFMINIMUMNUM)
+ return ISD::FMINIMUMNUM;
+ } else if (((CC == ISD::SETLT || CC == ISD::SETLE || CC == ISD::SETOLT ||
+ CC == ISD::SETOLE) &&
+ (OrAndOpcode == ISD::AND)) ||
+ ((CC == ISD::SETUGT || CC == ISD::SETUGE) &&
+ (OrAndOpcode == ISD::OR))) {
+ isMax = true;
+ if (isFMAXIMUMFMINIMUM)
+ return ISD::FMAXIMUM;
+ } else if (((CC == ISD::SETGT || CC == ISD::SETGE || CC == ISD::SETOGT ||
+ CC == ISD::SETOGE) &&
+ (OrAndOpcode == ISD::OR)) ||
+ ((CC == ISD::SETULT || CC == ISD::SETULE) &&
+ (OrAndOpcode == ISD::AND))) {
+ isMax = true;
+ if (arebothOperandsNotSNan(Operand1, Operand2, DAG) &&
+ isFMAXNUMFMINNUM_IEEE)
+ return ISD::FMAXNUM_IEEE;
+ if (arebothOperandsNotSNan(Operand1, Operand2, DAG) && isFMAXNUMFMINNUM)
+ return ISD::FMAXNUM;
+ if (isFMAXIMUMNUMFMINIMUMNUM)
+ return ISD::FMAXIMUMNUM;
+ } else if (((CC == ISD::SETGT || CC == ISD::SETGE || CC == ISD::SETOGT ||
+ CC == ISD::SETOGE) &&
+ (OrAndOpcode == ISD::AND)) ||
+ ((CC == ISD::SETULT || CC == ISD::SETULE) &&
+ (OrAndOpcode == ISD::OR))) {
+ isMax = false;
+ if (isFMAXIMUMFMINIMUM)
+ return ISD::FMINIMUM;
+ }
+ if (arebothOperandsNotNan(Operand1, Operand2, DAG)) {
+ // Keep this order to help unittest easy:
+ // AArch64 has FMAXNUM_IEEE, while not FMAXIMUMNUM
+ // RISCV64 has FMAXIMUMNUM, while not FMAXNUM_IEEE
+ // Both has FMAXIMUM (RISCV64 has a switch for it)
+ if (isFMAXIMUMFMINIMUM)
+ return isMax ? ISD::FMAXIMUM : ISD::FMINIMUM;
+ if (isFMAXNUMFMINNUM_IEEE)
+ return isMax ? ISD::FMAXNUM_IEEE : ISD::FMINNUM_IEEE;
+ if (isFMAXIMUMNUMFMINIMUMNUM)
+ return isMax ? ISD::FMAXIMUMNUM : ISD::FMINIMUMNUM;
+ if (isFMAXNUMFMINNUM)
+ return isMax ? ISD::FMAXNUM : ISD::FMINNUM;
+ }
return ISD::DELETED_NODE;
}
@@ -6433,14 +6443,20 @@ static SDValue foldAndOrOfSETCC(SDNode *LogicOp, SelectionDAG &DAG) {
// predicate of one of the comparisons is the opposite of the other one.
bool isFMAXNUMFMINNUM_IEEE = TLI.isOperationLegal(ISD::FMAXNUM_IEEE, OpVT) &&
TLI.isOperationLegal(ISD::FMINNUM_IEEE, OpVT);
- bool isFMAXNUMFMINNUM = TLI.isOperationLegalOrCustom(ISD::FMAXNUM, OpVT) &&
- TLI.isOperationLegalOrCustom(ISD::FMINNUM, OpVT);
+ bool isFMAXNUMFMINNUM = TLI.isOperationLegal(ISD::FMAXNUM, OpVT) &&
+ TLI.isOperationLegal(ISD::FMINNUM, OpVT);
+ bool isFMAXIMUMFMINIMUM = TLI.isOperationLegal(ISD::FMAXIMUM, OpVT) &&
+ TLI.isOperationLegal(ISD::FMINIMUM, OpVT);
+ bool isFMAXIMUMNUMFMINIMUMNUM =
+ TLI.isOperationLegal(ISD::FMAXIMUMNUM, OpVT) &&
+ TLI.isOperationLegal(ISD::FMINIMUMNUM, OpVT);
if (((OpVT.isInteger() && TLI.isOperationLegal(ISD::UMAX, OpVT) &&
TLI.isOperationLegal(ISD::SMAX, OpVT) &&
TLI.isOperationLegal(ISD::UMIN, OpVT) &&
TLI.isOperationLegal(ISD::SMIN, OpVT)) ||
(OpVT.isFloatingPoint() &&
- (isFMAXNUMFMINNUM_IEEE || isFMAXNUMFMINNUM))) &&
+ (isFMAXNUMFMINNUM_IEEE || isFMAXNUMFMINNUM || isFMAXIMUMFMINIMUM ||
+ isFMAXIMUMNUMFMINIMUMNUM))) &&
!ISD::isIntEqualitySetCC(CCL) && !ISD::isFPEqualitySetCC(CCL) &&
CCL != ISD::SETFALSE && CCL != ISD::SETO && CCL != ISD::SETUO &&
CCL != ISD::SETTRUE &&
@@ -6496,7 +6512,8 @@ static SDValue foldAndOrOfSETCC(SDNode *LogicOp, SelectionDAG &DAG) {
} else if (OpVT.isFloatingPoint())
NewOpcode =
getMinMaxOpcodeForFP(Operand1, Operand2, CC, LogicOp->getOpcode(),
- DAG, isFMAXNUMFMINNUM_IEEE, isFMAXNUMFMINNUM);
+ DAG, isFMAXNUMFMINNUM_IEEE, isFMAXNUMFMINNUM,
+ isFMAXIMUMFMINIMUM, isFMAXIMUMNUMFMINIMUMNUM);
if (NewOpcode != ISD::DELETED_NODE) {
SDValue MinMaxValue =
diff --git a/llvm/test/CodeGen/AArch64/fmaxmin-combine.ll b/llvm/test/CodeGen/AArch64/fmaxmin-combine.ll
new file mode 100644
index 0000000000000..671f47a46dc8a
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/fmaxmin-combine.ll
@@ -0,0 +1,340 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc --mtriple=aarch64 < %s | FileCheck %s --check-prefix=AARCH64
+
+define i1 @f_olt_or(float %a, float %b, float %c) {
+; AARCH64-LABEL: f_olt_or:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w8, mi
+; AARCH64-NEXT: fcmp s1, s2
+; AARCH64-NEXT: csinc w0, w8, wzr, pl
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp olt float %a, %c
+ %cmp1 = fcmp olt float %b, %c
+ %0 = or i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_olt_or_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
+; AARCH64-LABEL: f_olt_or_nan:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fminnm s0, s0, s1
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w0, mi
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp olt float %a, %c
+ %cmp1 = fcmp olt float %b, %c
+ %0 = or i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_olt_or_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
+; AARCH64-LABEL: f_olt_or_snan:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fminnm s0, s0, s1
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w0, mi
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp olt float %a, %c
+ %cmp1 = fcmp olt float %b, %c
+ %0 = or i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_ugt_and(float %a, float %b, float %c) {
+; AARCH64-LABEL: f_ugt_and:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fcmp s1, s2
+; AARCH64-NEXT: fccmp s0, s2, #0, hi
+; AARCH64-NEXT: cset w0, hi
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp ugt float %a, %c
+ %cmp1 = fcmp ugt float %b, %c
+ %0 = and i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_ugt_and_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
+; AARCH64-LABEL: f_ugt_and_nan:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fminnm s0, s0, s1
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w0, hi
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp ugt float %a, %c
+ %cmp1 = fcmp ugt float %b, %c
+ %0 = and i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_ugt_and_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
+; AARCH64-LABEL: f_ugt_and_snan:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fminnm s0, s0, s1
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w0, hi
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp ugt float %a, %c
+ %cmp1 = fcmp ugt float %b, %c
+ %0 = and i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_olt_and(float %a, float %b, float %c) {
+; AARCH64-LABEL: f_olt_and:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fmax s0, s0, s1
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w0, mi
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp olt float %a, %c
+ %cmp1 = fcmp olt float %b, %c
+ %0 = and i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_olt_and_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
+; AARCH64-LABEL: f_olt_and_nan:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fmax s0, s0, s1
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w0, mi
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp olt float %a, %c
+ %cmp1 = fcmp olt float %b, %c
+ %0 = and i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_olt_and_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
+; AARCH64-LABEL: f_olt_and_snan:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fmax s0, s0, s1
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w0, mi
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp olt float %a, %c
+ %cmp1 = fcmp olt float %b, %c
+ %0 = and i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_ugt_or(float %a, float %b, float %c) {
+; AARCH64-LABEL: f_ugt_or:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fmax s0, s0, s1
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w0, hi
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp ugt float %a, %c
+ %cmp1 = fcmp ugt float %b, %c
+ %0 = or i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_ugt_or_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
+; AARCH64-LABEL: f_ugt_or_nan:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fmax s0, s0, s1
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w0, hi
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp ugt float %a, %c
+ %cmp1 = fcmp ugt float %b, %c
+ %0 = or i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_ugt_or_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
+; AARCH64-LABEL: f_ugt_or_snan:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fmax s0, s0, s1
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w0, hi
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp ugt float %a, %c
+ %cmp1 = fcmp ugt float %b, %c
+ %0 = or i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_ogt_or(float %a, float %b, float %c) {
+; AARCH64-LABEL: f_ogt_or:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w8, gt
+; AARCH64-NEXT: fcmp s1, s2
+; AARCH64-NEXT: csinc w0, w8, wzr, le
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp ogt float %a, %c
+ %cmp1 = fcmp ogt float %b, %c
+ %0 = or i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_ogt_or_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
+; AARCH64-LABEL: f_ogt_or_nan:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fmaxnm s0, s0, s1
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w0, gt
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp ogt float %a, %c
+ %cmp1 = fcmp ogt float %b, %c
+ %0 = or i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_ogt_or_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
+; AARCH64-LABEL: f_ogt_or_snan:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fmaxnm s0, s0, s1
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w0, gt
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp ogt float %a, %c
+ %cmp1 = fcmp ogt float %b, %c
+ %0 = or i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_ult_and(float %a, float %b, float %c) {
+; AARCH64-LABEL: f_ult_and:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fcmp s1, s2
+; AARCH64-NEXT: fccmp s0, s2, #0, lt
+; AARCH64-NEXT: cset w0, lt
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp ult float %a, %c
+ %cmp1 = fcmp ult float %b, %c
+ %0 = and i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_ult_and_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
+; AARCH64-LABEL: f_ult_and_nan:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fmaxnm s0, s0, s1
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w0, lt
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp ult float %a, %c
+ %cmp1 = fcmp ult float %b, %c
+ %0 = and i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_ult_and_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
+; AARCH64-LABEL: f_ult_and_snan:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fmaxnm s0, s0, s1
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w0, lt
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp ult float %a, %c
+ %cmp1 = fcmp ult float %b, %c
+ %0 = and i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_ogt_and(float %a, float %b, float %c) {
+; AARCH64-LABEL: f_ogt_and:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fmin s0, s0, s1
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w0, gt
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp ogt float %a, %c
+ %cmp1 = fcmp ogt float %b, %c
+ %0 = and i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_ogt_and_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
+; AARCH64-LABEL: f_ogt_and_nan:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fmin s0, s0, s1
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w0, gt
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp ogt float %a, %c
+ %cmp1 = fcmp ogt float %b, %c
+ %0 = and i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_ogt_and_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
+; AARCH64-LABEL: f_ogt_and_snan:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fmin s0, s0, s1
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w0, gt
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp ogt float %a, %c
+ %cmp1 = fcmp ogt float %b, %c
+ %0 = and i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_ult_or(float %a, float %b, float %c) {
+; AARCH64-LABEL: f_ult_or:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fmin s0, s0, s1
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w0, lt
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp ult float %a, %c
+ %cmp1 = fcmp ult float %b, %c
+ %0 = or i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_ult_or_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
+; AARCH64-LABEL: f_ult_or_nan:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fmin s0, s0, s1
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w0, lt
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp ult float %a, %c
+ %cmp1 = fcmp ult float %b, %c
+ %0 = or i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_ult_or_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
+; AARCH64-LABEL: f_ult_or_snan:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fmin s0, s0, s1
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w0, lt
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp ult float %a, %c
+ %cmp1 = fcmp ult float %b, %c
+ %0 = or i1 %cmp, %cmp1
+ ret i1 %0
+}
diff --git a/llvm/test/CodeGen/RISCV/fmaxmin-combine.ll b/llvm/test/CodeGen/RISCV/fmaxmin-combine.ll
new file mode 100644
index 0000000000000..1da45ac0f2734
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/fmaxmin-combine.ll
@@ -0,0 +1,742 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc --mtriple=riscv64 --mattr=+f,+zfa < %s | FileCheck %s --check-prefix=RISCV64
+; RUN: llc --mtriple=riscv64 --mattr=+f,-zfa < %s | FileCheck %s --check-prefix=RISCV64-NOZFA
+; RUN: llc --mtriple=aarch64 < %s | FileCheck %s --check-prefix=AARCH64
+
+define i1 @f_olt_or(float %a, float %b, float %c) {
+; RISCV64-LABEL: f_olt_or:
+; RISCV64: # %bb.0: # %entry
+; RISCV64-NEXT: fmin.s fa5, fa0, fa1
+; RISCV64-NEXT: flt.s a0, fa5, fa2
+; RISCV64-NEXT: ret
+;
+; RISCV64-NOZFA-LABEL: f_olt_or:
+; RISCV64-NOZFA: # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT: fmin.s fa5, fa0, fa1
+; RISCV64-NOZFA-NEXT: flt.s a0, fa5, fa2
+; RISCV64-NOZFA-NEXT: ret
+;
+; AARCH64-LABEL: f_olt_or:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w8, mi
+; AARCH64-NEXT: fcmp s1, s2
+; AARCH64-NEXT: csinc w0, w8, wzr, pl
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp olt float %a, %c
+ %cmp1 = fcmp olt float %b, %c
+ %0 = or i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_olt_or_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
+; RISCV64-LABEL: f_olt_or_nan:
+; RISCV64: # %bb.0: # %entry
+; RISCV64-NEXT: fmin.s fa5, fa0, fa1
+; RISCV64-NEXT: flt.s a0, fa5, fa2
+; RISCV64-NEXT: ret
+;
+; RISCV64-NOZFA-LABEL: f_olt_or_nan:
+; RISCV64-NOZFA: # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT: fmin.s fa5, fa0, fa1
+; RISCV64-NOZFA-NEXT: flt.s a0, fa5, fa2
+; RISCV64-NOZFA-NEXT: ret
+;
+; AARCH64-LABEL: f_olt_or_nan:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fminnm s0, s0, s1
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w0, mi
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp olt float %a, %c
+ %cmp1 = fcmp olt float %b, %c
+ %0 = or i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_olt_or_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
+; RISCV64-LABEL: f_olt_or_snan:
+; RISCV64: # %bb.0: # %entry
+; RISCV64-NEXT: fmin.s fa5, fa0, fa1
+; RISCV64-NEXT: flt.s a0, fa5, fa2
+; RISCV64-NEXT: ret
+;
+; RISCV64-NOZFA-LABEL: f_olt_or_snan:
+; RISCV64-NOZFA: # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT: fmin.s fa5, fa0, fa1
+; RISCV64-NOZFA-NEXT: flt.s a0, fa5, fa2
+; RISCV64-NOZFA-NEXT: ret
+;
+; AARCH64-LABEL: f_olt_or_snan:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fminnm s0, s0, s1
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w0, mi
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp olt float %a, %c
+ %cmp1 = fcmp olt float %b, %c
+ %0 = or i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_ugt_and(float %a, float %b, float %c) {
+; RISCV64-LABEL: f_ugt_and:
+; RISCV64: # %bb.0: # %entry
+; RISCV64-NEXT: fmin.s fa5, fa0, fa1
+; RISCV64-NEXT: fle.s a0, fa5, fa2
+; RISCV64-NEXT: xori a0, a0, 1
+; RISCV64-NEXT: ret
+;
+; RISCV64-NOZFA-LABEL: f_ugt_and:
+; RISCV64-NOZFA: # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT: fmin.s fa5, fa0, fa1
+; RISCV64-NOZFA-NEXT: fle.s a0, fa5, fa2
+; RISCV64-NOZFA-NEXT: xori a0, a0, 1
+; RISCV64-NOZFA-NEXT: ret
+;
+; AARCH64-LABEL: f_ugt_and:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fcmp s1, s2
+; AARCH64-NEXT: fccmp s0, s2, #0, hi
+; AARCH64-NEXT: cset w0, hi
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp ugt float %a, %c
+ %cmp1 = fcmp ugt float %b, %c
+ %0 = and i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_ugt_and_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
+; RISCV64-LABEL: f_ugt_and_nan:
+; RISCV64: # %bb.0: # %entry
+; RISCV64-NEXT: fmin.s fa5, fa0, fa1
+; RISCV64-NEXT: fle.s a0, fa5, fa2
+; RISCV64-NEXT: xori a0, a0, 1
+; RISCV64-NEXT: ret
+;
+; RISCV64-NOZFA-LABEL: f_ugt_and_nan:
+; RISCV64-NOZFA: # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT: fmin.s fa5, fa0, fa1
+; RISCV64-NOZFA-NEXT: fle.s a0, fa5, fa2
+; RISCV64-NOZFA-NEXT: xori a0, a0, 1
+; RISCV64-NOZFA-NEXT: ret
+;
+; AARCH64-LABEL: f_ugt_and_nan:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fminnm s0, s0, s1
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w0, hi
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp ugt float %a, %c
+ %cmp1 = fcmp ugt float %b, %c
+ %0 = and i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_ugt_and_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
+; RISCV64-LABEL: f_ugt_and_snan:
+; RISCV64: # %bb.0: # %entry
+; RISCV64-NEXT: fmin.s fa5, fa0, fa1
+; RISCV64-NEXT: fle.s a0, fa5, fa2
+; RISCV64-NEXT: xori a0, a0, 1
+; RISCV64-NEXT: ret
+;
+; RISCV64-NOZFA-LABEL: f_ugt_and_snan:
+; RISCV64-NOZFA: # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT: fmin.s fa5, fa0, fa1
+; RISCV64-NOZFA-NEXT: fle.s a0, fa5, fa2
+; RISCV64-NOZFA-NEXT: xori a0, a0, 1
+; RISCV64-NOZFA-NEXT: ret
+;
+; AARCH64-LABEL: f_ugt_and_snan:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fminnm s0, s0, s1
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w0, hi
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp ugt float %a, %c
+ %cmp1 = fcmp ugt float %b, %c
+ %0 = and i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_olt_and(float %a, float %b, float %c) {
+; RISCV64-LABEL: f_olt_and:
+; RISCV64: # %bb.0: # %entry
+; RISCV64-NEXT: fmaxm.s fa5, fa0, fa1
+; RISCV64-NEXT: flt.s a0, fa5, fa2
+; RISCV64-NEXT: ret
+;
+; RISCV64-NOZFA-LABEL: f_olt_and:
+; RISCV64-NOZFA: # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT: feq.s a0, fa0, fa0
+; RISCV64-NOZFA-NEXT: fmv.s fa5, fa1
+; RISCV64-NOZFA-NEXT: bnez a0, .LBB6_2
+; RISCV64-NOZFA-NEXT: # %bb.1: # %entry
+; RISCV64-NOZFA-NEXT: fmv.s fa5, fa0
+; RISCV64-NOZFA-NEXT: .LBB6_2: # %entry
+; RISCV64-NOZFA-NEXT: feq.s a0, fa1, fa1
+; RISCV64-NOZFA-NEXT: bnez a0, .LBB6_4
+; RISCV64-NOZFA-NEXT: # %bb.3: # %entry
+; RISCV64-NOZFA-NEXT: fmv.s fa0, fa1
+; RISCV64-NOZFA-NEXT: .LBB6_4: # %entry
+; RISCV64-NOZFA-NEXT: fmax.s fa5, fa0, fa5
+; RISCV64-NOZFA-NEXT: flt.s a0, fa5, fa2
+; RISCV64-NOZFA-NEXT: ret
+;
+; AARCH64-LABEL: f_olt_and:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fmax s0, s0, s1
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w0, mi
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp olt float %a, %c
+ %cmp1 = fcmp olt float %b, %c
+ %0 = and i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_olt_and_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
+; RISCV64-LABEL: f_olt_and_nan:
+; RISCV64: # %bb.0: # %entry
+; RISCV64-NEXT: fmaxm.s fa5, fa0, fa1
+; RISCV64-NEXT: flt.s a0, fa5, fa2
+; RISCV64-NEXT: ret
+;
+; RISCV64-NOZFA-LABEL: f_olt_and_nan:
+; RISCV64-NOZFA: # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT: fmax.s fa5, fa0, fa1
+; RISCV64-NOZFA-NEXT: flt.s a0, fa5, fa2
+; RISCV64-NOZFA-NEXT: ret
+;
+; AARCH64-LABEL: f_olt_and_nan:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fmax s0, s0, s1
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w0, mi
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp olt float %a, %c
+ %cmp1 = fcmp olt float %b, %c
+ %0 = and i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_olt_and_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
+; RISCV64-LABEL: f_olt_and_snan:
+; RISCV64: # %bb.0: # %entry
+; RISCV64-NEXT: fmaxm.s fa5, fa0, fa1
+; RISCV64-NEXT: flt.s a0, fa5, fa2
+; RISCV64-NEXT: ret
+;
+; RISCV64-NOZFA-LABEL: f_olt_and_snan:
+; RISCV64-NOZFA: # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT: feq.s a0, fa0, fa0
+; RISCV64-NOZFA-NEXT: fmv.s fa5, fa1
+; RISCV64-NOZFA-NEXT: bnez a0, .LBB8_2
+; RISCV64-NOZFA-NEXT: # %bb.1: # %entry
+; RISCV64-NOZFA-NEXT: fmv.s fa5, fa0
+; RISCV64-NOZFA-NEXT: .LBB8_2: # %entry
+; RISCV64-NOZFA-NEXT: feq.s a0, fa1, fa1
+; RISCV64-NOZFA-NEXT: bnez a0, .LBB8_4
+; RISCV64-NOZFA-NEXT: # %bb.3: # %entry
+; RISCV64-NOZFA-NEXT: fmv.s fa0, fa1
+; RISCV64-NOZFA-NEXT: .LBB8_4: # %entry
+; RISCV64-NOZFA-NEXT: fmax.s fa5, fa0, fa5
+; RISCV64-NOZFA-NEXT: flt.s a0, fa5, fa2
+; RISCV64-NOZFA-NEXT: ret
+;
+; AARCH64-LABEL: f_olt_and_snan:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fmax s0, s0, s1
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w0, mi
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp olt float %a, %c
+ %cmp1 = fcmp olt float %b, %c
+ %0 = and i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_ugt_or(float %a, float %b, float %c) {
+; RISCV64-LABEL: f_ugt_or:
+; RISCV64: # %bb.0: # %entry
+; RISCV64-NEXT: fmaxm.s fa5, fa0, fa1
+; RISCV64-NEXT: fle.s a0, fa5, fa2
+; RISCV64-NEXT: xori a0, a0, 1
+; RISCV64-NEXT: ret
+;
+; RISCV64-NOZFA-LABEL: f_ugt_or:
+; RISCV64-NOZFA: # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT: feq.s a0, fa0, fa0
+; RISCV64-NOZFA-NEXT: fmv.s fa5, fa1
+; RISCV64-NOZFA-NEXT: bnez a0, .LBB9_2
+; RISCV64-NOZFA-NEXT: # %bb.1: # %entry
+; RISCV64-NOZFA-NEXT: fmv.s fa5, fa0
+; RISCV64-NOZFA-NEXT: .LBB9_2: # %entry
+; RISCV64-NOZFA-NEXT: feq.s a0, fa1, fa1
+; RISCV64-NOZFA-NEXT: bnez a0, .LBB9_4
+; RISCV64-NOZFA-NEXT: # %bb.3: # %entry
+; RISCV64-NOZFA-NEXT: fmv.s fa0, fa1
+; RISCV64-NOZFA-NEXT: .LBB9_4: # %entry
+; RISCV64-NOZFA-NEXT: fmax.s fa5, fa0, fa5
+; RISCV64-NOZFA-NEXT: fle.s a0, fa5, fa2
+; RISCV64-NOZFA-NEXT: xori a0, a0, 1
+; RISCV64-NOZFA-NEXT: ret
+;
+; AARCH64-LABEL: f_ugt_or:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fmax s0, s0, s1
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w0, hi
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp ugt float %a, %c
+ %cmp1 = fcmp ugt float %b, %c
+ %0 = or i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_ugt_or_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
+; RISCV64-LABEL: f_ugt_or_nan:
+; RISCV64: # %bb.0: # %entry
+; RISCV64-NEXT: fmaxm.s fa5, fa0, fa1
+; RISCV64-NEXT: fle.s a0, fa5, fa2
+; RISCV64-NEXT: xori a0, a0, 1
+; RISCV64-NEXT: ret
+;
+; RISCV64-NOZFA-LABEL: f_ugt_or_nan:
+; RISCV64-NOZFA: # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT: fmax.s fa5, fa0, fa1
+; RISCV64-NOZFA-NEXT: fle.s a0, fa5, fa2
+; RISCV64-NOZFA-NEXT: xori a0, a0, 1
+; RISCV64-NOZFA-NEXT: ret
+;
+; AARCH64-LABEL: f_ugt_or_nan:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fmax s0, s0, s1
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w0, hi
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp ugt float %a, %c
+ %cmp1 = fcmp ugt float %b, %c
+ %0 = or i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_ugt_or_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
+; RISCV64-LABEL: f_ugt_or_snan:
+; RISCV64: # %bb.0: # %entry
+; RISCV64-NEXT: fmaxm.s fa5, fa0, fa1
+; RISCV64-NEXT: fle.s a0, fa5, fa2
+; RISCV64-NEXT: xori a0, a0, 1
+; RISCV64-NEXT: ret
+;
+; RISCV64-NOZFA-LABEL: f_ugt_or_snan:
+; RISCV64-NOZFA: # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT: feq.s a0, fa0, fa0
+; RISCV64-NOZFA-NEXT: fmv.s fa5, fa1
+; RISCV64-NOZFA-NEXT: bnez a0, .LBB11_2
+; RISCV64-NOZFA-NEXT: # %bb.1: # %entry
+; RISCV64-NOZFA-NEXT: fmv.s fa5, fa0
+; RISCV64-NOZFA-NEXT: .LBB11_2: # %entry
+; RISCV64-NOZFA-NEXT: feq.s a0, fa1, fa1
+; RISCV64-NOZFA-NEXT: bnez a0, .LBB11_4
+; RISCV64-NOZFA-NEXT: # %bb.3: # %entry
+; RISCV64-NOZFA-NEXT: fmv.s fa0, fa1
+; RISCV64-NOZFA-NEXT: .LBB11_4: # %entry
+; RISCV64-NOZFA-NEXT: fmax.s fa5, fa0, fa5
+; RISCV64-NOZFA-NEXT: fle.s a0, fa5, fa2
+; RISCV64-NOZFA-NEXT: xori a0, a0, 1
+; RISCV64-NOZFA-NEXT: ret
+;
+; AARCH64-LABEL: f_ugt_or_snan:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fmax s0, s0, s1
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w0, hi
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp ugt float %a, %c
+ %cmp1 = fcmp ugt float %b, %c
+ %0 = or i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_ogt_or(float %a, float %b, float %c) {
+; RISCV64-LABEL: f_ogt_or:
+; RISCV64: # %bb.0: # %entry
+; RISCV64-NEXT: fmax.s fa5, fa0, fa1
+; RISCV64-NEXT: flt.s a0, fa2, fa5
+; RISCV64-NEXT: ret
+;
+; RISCV64-NOZFA-LABEL: f_ogt_or:
+; RISCV64-NOZFA: # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT: fmax.s fa5, fa0, fa1
+; RISCV64-NOZFA-NEXT: flt.s a0, fa2, fa5
+; RISCV64-NOZFA-NEXT: ret
+;
+; AARCH64-LABEL: f_ogt_or:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w8, gt
+; AARCH64-NEXT: fcmp s1, s2
+; AARCH64-NEXT: csinc w0, w8, wzr, le
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp ogt float %a, %c
+ %cmp1 = fcmp ogt float %b, %c
+ %0 = or i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_ogt_or_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
+; RISCV64-LABEL: f_ogt_or_nan:
+; RISCV64: # %bb.0: # %entry
+; RISCV64-NEXT: fmax.s fa5, fa0, fa1
+; RISCV64-NEXT: flt.s a0, fa2, fa5
+; RISCV64-NEXT: ret
+;
+; RISCV64-NOZFA-LABEL: f_ogt_or_nan:
+; RISCV64-NOZFA: # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT: fmax.s fa5, fa0, fa1
+; RISCV64-NOZFA-NEXT: flt.s a0, fa2, fa5
+; RISCV64-NOZFA-NEXT: ret
+;
+; AARCH64-LABEL: f_ogt_or_nan:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fmaxnm s0, s0, s1
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w0, gt
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp ogt float %a, %c
+ %cmp1 = fcmp ogt float %b, %c
+ %0 = or i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_ogt_or_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
+; RISCV64-LABEL: f_ogt_or_snan:
+; RISCV64: # %bb.0: # %entry
+; RISCV64-NEXT: fmax.s fa5, fa0, fa1
+; RISCV64-NEXT: flt.s a0, fa2, fa5
+; RISCV64-NEXT: ret
+;
+; RISCV64-NOZFA-LABEL: f_ogt_or_snan:
+; RISCV64-NOZFA: # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT: fmax.s fa5, fa0, fa1
+; RISCV64-NOZFA-NEXT: flt.s a0, fa2, fa5
+; RISCV64-NOZFA-NEXT: ret
+;
+; AARCH64-LABEL: f_ogt_or_snan:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fmaxnm s0, s0, s1
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w0, gt
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp ogt float %a, %c
+ %cmp1 = fcmp ogt float %b, %c
+ %0 = or i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_ult_and(float %a, float %b, float %c) {
+; RISCV64-LABEL: f_ult_and:
+; RISCV64: # %bb.0: # %entry
+; RISCV64-NEXT: fmax.s fa5, fa0, fa1
+; RISCV64-NEXT: fle.s a0, fa2, fa5
+; RISCV64-NEXT: xori a0, a0, 1
+; RISCV64-NEXT: ret
+;
+; RISCV64-NOZFA-LABEL: f_ult_and:
+; RISCV64-NOZFA: # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT: fmax.s fa5, fa0, fa1
+; RISCV64-NOZFA-NEXT: fle.s a0, fa2, fa5
+; RISCV64-NOZFA-NEXT: xori a0, a0, 1
+; RISCV64-NOZFA-NEXT: ret
+;
+; AARCH64-LABEL: f_ult_and:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fcmp s1, s2
+; AARCH64-NEXT: fccmp s0, s2, #0, lt
+; AARCH64-NEXT: cset w0, lt
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp ult float %a, %c
+ %cmp1 = fcmp ult float %b, %c
+ %0 = and i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_ult_and_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
+; RISCV64-LABEL: f_ult_and_nan:
+; RISCV64: # %bb.0: # %entry
+; RISCV64-NEXT: fmax.s fa5, fa0, fa1
+; RISCV64-NEXT: fle.s a0, fa2, fa5
+; RISCV64-NEXT: xori a0, a0, 1
+; RISCV64-NEXT: ret
+;
+; RISCV64-NOZFA-LABEL: f_ult_and_nan:
+; RISCV64-NOZFA: # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT: fmax.s fa5, fa0, fa1
+; RISCV64-NOZFA-NEXT: fle.s a0, fa2, fa5
+; RISCV64-NOZFA-NEXT: xori a0, a0, 1
+; RISCV64-NOZFA-NEXT: ret
+;
+; AARCH64-LABEL: f_ult_and_nan:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fmaxnm s0, s0, s1
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w0, lt
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp ult float %a, %c
+ %cmp1 = fcmp ult float %b, %c
+ %0 = and i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_ult_and_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
+; RISCV64-LABEL: f_ult_and_snan:
+; RISCV64: # %bb.0: # %entry
+; RISCV64-NEXT: fmax.s fa5, fa0, fa1
+; RISCV64-NEXT: fle.s a0, fa2, fa5
+; RISCV64-NEXT: xori a0, a0, 1
+; RISCV64-NEXT: ret
+;
+; RISCV64-NOZFA-LABEL: f_ult_and_snan:
+; RISCV64-NOZFA: # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT: fmax.s fa5, fa0, fa1
+; RISCV64-NOZFA-NEXT: fle.s a0, fa2, fa5
+; RISCV64-NOZFA-NEXT: xori a0, a0, 1
+; RISCV64-NOZFA-NEXT: ret
+;
+; AARCH64-LABEL: f_ult_and_snan:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fmaxnm s0, s0, s1
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w0, lt
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp ult float %a, %c
+ %cmp1 = fcmp ult float %b, %c
+ %0 = and i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_ogt_and(float %a, float %b, float %c) {
+; RISCV64-LABEL: f_ogt_and:
+; RISCV64: # %bb.0: # %entry
+; RISCV64-NEXT: fminm.s fa5, fa0, fa1
+; RISCV64-NEXT: flt.s a0, fa2, fa5
+; RISCV64-NEXT: ret
+;
+; RISCV64-NOZFA-LABEL: f_ogt_and:
+; RISCV64-NOZFA: # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT: feq.s a0, fa0, fa0
+; RISCV64-NOZFA-NEXT: fmv.s fa5, fa1
+; RISCV64-NOZFA-NEXT: bnez a0, .LBB18_2
+; RISCV64-NOZFA-NEXT: # %bb.1: # %entry
+; RISCV64-NOZFA-NEXT: fmv.s fa5, fa0
+; RISCV64-NOZFA-NEXT: .LBB18_2: # %entry
+; RISCV64-NOZFA-NEXT: feq.s a0, fa1, fa1
+; RISCV64-NOZFA-NEXT: bnez a0, .LBB18_4
+; RISCV64-NOZFA-NEXT: # %bb.3: # %entry
+; RISCV64-NOZFA-NEXT: fmv.s fa0, fa1
+; RISCV64-NOZFA-NEXT: .LBB18_4: # %entry
+; RISCV64-NOZFA-NEXT: fmin.s fa5, fa0, fa5
+; RISCV64-NOZFA-NEXT: flt.s a0, fa2, fa5
+; RISCV64-NOZFA-NEXT: ret
+;
+; AARCH64-LABEL: f_ogt_and:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fmin s0, s0, s1
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w0, gt
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp ogt float %a, %c
+ %cmp1 = fcmp ogt float %b, %c
+ %0 = and i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_ogt_and_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
+; RISCV64-LABEL: f_ogt_and_nan:
+; RISCV64: # %bb.0: # %entry
+; RISCV64-NEXT: fminm.s fa5, fa0, fa1
+; RISCV64-NEXT: flt.s a0, fa2, fa5
+; RISCV64-NEXT: ret
+;
+; RISCV64-NOZFA-LABEL: f_ogt_and_nan:
+; RISCV64-NOZFA: # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT: fmin.s fa5, fa0, fa1
+; RISCV64-NOZFA-NEXT: flt.s a0, fa2, fa5
+; RISCV64-NOZFA-NEXT: ret
+;
+; AARCH64-LABEL: f_ogt_and_nan:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fmin s0, s0, s1
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w0, gt
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp ogt float %a, %c
+ %cmp1 = fcmp ogt float %b, %c
+ %0 = and i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_ogt_and_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
+; RISCV64-LABEL: f_ogt_and_snan:
+; RISCV64: # %bb.0: # %entry
+; RISCV64-NEXT: fminm.s fa5, fa0, fa1
+; RISCV64-NEXT: flt.s a0, fa2, fa5
+; RISCV64-NEXT: ret
+;
+; RISCV64-NOZFA-LABEL: f_ogt_and_snan:
+; RISCV64-NOZFA: # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT: feq.s a0, fa0, fa0
+; RISCV64-NOZFA-NEXT: fmv.s fa5, fa1
+; RISCV64-NOZFA-NEXT: bnez a0, .LBB20_2
+; RISCV64-NOZFA-NEXT: # %bb.1: # %entry
+; RISCV64-NOZFA-NEXT: fmv.s fa5, fa0
+; RISCV64-NOZFA-NEXT: .LBB20_2: # %entry
+; RISCV64-NOZFA-NEXT: feq.s a0, fa1, fa1
+; RISCV64-NOZFA-NEXT: bnez a0, .LBB20_4
+; RISCV64-NOZFA-NEXT: # %bb.3: # %entry
+; RISCV64-NOZFA-NEXT: fmv.s fa0, fa1
+; RISCV64-NOZFA-NEXT: .LBB20_4: # %entry
+; RISCV64-NOZFA-NEXT: fmin.s fa5, fa0, fa5
+; RISCV64-NOZFA-NEXT: flt.s a0, fa2, fa5
+; RISCV64-NOZFA-NEXT: ret
+;
+; AARCH64-LABEL: f_ogt_and_snan:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fmin s0, s0, s1
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w0, gt
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp ogt float %a, %c
+ %cmp1 = fcmp ogt float %b, %c
+ %0 = and i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_ult_or(float %a, float %b, float %c) {
+; RISCV64-LABEL: f_ult_or:
+; RISCV64: # %bb.0: # %entry
+; RISCV64-NEXT: fminm.s fa5, fa0, fa1
+; RISCV64-NEXT: fle.s a0, fa2, fa5
+; RISCV64-NEXT: xori a0, a0, 1
+; RISCV64-NEXT: ret
+;
+; RISCV64-NOZFA-LABEL: f_ult_or:
+; RISCV64-NOZFA: # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT: feq.s a0, fa0, fa0
+; RISCV64-NOZFA-NEXT: fmv.s fa5, fa1
+; RISCV64-NOZFA-NEXT: bnez a0, .LBB21_2
+; RISCV64-NOZFA-NEXT: # %bb.1: # %entry
+; RISCV64-NOZFA-NEXT: fmv.s fa5, fa0
+; RISCV64-NOZFA-NEXT: .LBB21_2: # %entry
+; RISCV64-NOZFA-NEXT: feq.s a0, fa1, fa1
+; RISCV64-NOZFA-NEXT: bnez a0, .LBB21_4
+; RISCV64-NOZFA-NEXT: # %bb.3: # %entry
+; RISCV64-NOZFA-NEXT: fmv.s fa0, fa1
+; RISCV64-NOZFA-NEXT: .LBB21_4: # %entry
+; RISCV64-NOZFA-NEXT: fmin.s fa5, fa0, fa5
+; RISCV64-NOZFA-NEXT: fle.s a0, fa2, fa5
+; RISCV64-NOZFA-NEXT: xori a0, a0, 1
+; RISCV64-NOZFA-NEXT: ret
+;
+; AARCH64-LABEL: f_ult_or:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fmin s0, s0, s1
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w0, lt
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp ult float %a, %c
+ %cmp1 = fcmp ult float %b, %c
+ %0 = or i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_ult_or_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
+; RISCV64-LABEL: f_ult_or_nan:
+; RISCV64: # %bb.0: # %entry
+; RISCV64-NEXT: fminm.s fa5, fa0, fa1
+; RISCV64-NEXT: fle.s a0, fa2, fa5
+; RISCV64-NEXT: xori a0, a0, 1
+; RISCV64-NEXT: ret
+;
+; RISCV64-NOZFA-LABEL: f_ult_or_nan:
+; RISCV64-NOZFA: # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT: fmin.s fa5, fa0, fa1
+; RISCV64-NOZFA-NEXT: fle.s a0, fa2, fa5
+; RISCV64-NOZFA-NEXT: xori a0, a0, 1
+; RISCV64-NOZFA-NEXT: ret
+;
+; AARCH64-LABEL: f_ult_or_nan:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fmin s0, s0, s1
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w0, lt
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp ult float %a, %c
+ %cmp1 = fcmp ult float %b, %c
+ %0 = or i1 %cmp, %cmp1
+ ret i1 %0
+}
+
+define i1 @f_ult_or_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
+; RISCV64-LABEL: f_ult_or_snan:
+; RISCV64: # %bb.0: # %entry
+; RISCV64-NEXT: fminm.s fa5, fa0, fa1
+; RISCV64-NEXT: fle.s a0, fa2, fa5
+; RISCV64-NEXT: xori a0, a0, 1
+; RISCV64-NEXT: ret
+;
+; RISCV64-NOZFA-LABEL: f_ult_or_snan:
+; RISCV64-NOZFA: # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT: feq.s a0, fa0, fa0
+; RISCV64-NOZFA-NEXT: fmv.s fa5, fa1
+; RISCV64-NOZFA-NEXT: bnez a0, .LBB23_2
+; RISCV64-NOZFA-NEXT: # %bb.1: # %entry
+; RISCV64-NOZFA-NEXT: fmv.s fa5, fa0
+; RISCV64-NOZFA-NEXT: .LBB23_2: # %entry
+; RISCV64-NOZFA-NEXT: feq.s a0, fa1, fa1
+; RISCV64-NOZFA-NEXT: bnez a0, .LBB23_4
+; RISCV64-NOZFA-NEXT: # %bb.3: # %entry
+; RISCV64-NOZFA-NEXT: fmv.s fa0, fa1
+; RISCV64-NOZFA-NEXT: .LBB23_4: # %entry
+; RISCV64-NOZFA-NEXT: fmin.s fa5, fa0, fa5
+; RISCV64-NOZFA-NEXT: fle.s a0, fa2, fa5
+; RISCV64-NOZFA-NEXT: xori a0, a0, 1
+; RISCV64-NOZFA-NEXT: ret
+;
+; AARCH64-LABEL: f_ult_or_snan:
+; AARCH64: // %bb.0: // %entry
+; AARCH64-NEXT: fmin s0, s0, s1
+; AARCH64-NEXT: fcmp s0, s2
+; AARCH64-NEXT: cset w0, lt
+; AARCH64-NEXT: ret
+entry:
+ %cmp = fcmp ult float %a, %c
+ %cmp1 = fcmp ult float %b, %c
+ %0 = or i1 %cmp, %cmp1
+ ret i1 %0
+}
>From 6d6ca00431d0434aa6d32a54bd7cca07f45c7496 Mon Sep 17 00:00:00 2001
From: YunQiang Su <yunqiang at isrc.iscas.ac.cn>
Date: Fri, 25 Apr 2025 19:42:05 +0800
Subject: [PATCH 2/2] Remove AArch64 test from riscv file
---
llvm/test/CodeGen/RISCV/fmaxmin-combine.ll | 299 ++-------------------
1 file changed, 24 insertions(+), 275 deletions(-)
diff --git a/llvm/test/CodeGen/RISCV/fmaxmin-combine.ll b/llvm/test/CodeGen/RISCV/fmaxmin-combine.ll
index 1da45ac0f2734..b0a8ad0b5aa0b 100644
--- a/llvm/test/CodeGen/RISCV/fmaxmin-combine.ll
+++ b/llvm/test/CodeGen/RISCV/fmaxmin-combine.ll
@@ -1,7 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc --mtriple=riscv64 --mattr=+f,+zfa < %s | FileCheck %s --check-prefix=RISCV64
; RUN: llc --mtriple=riscv64 --mattr=+f,-zfa < %s | FileCheck %s --check-prefix=RISCV64-NOZFA
-; RUN: llc --mtriple=aarch64 < %s | FileCheck %s --check-prefix=AARCH64
define i1 @f_olt_or(float %a, float %b, float %c) {
; RISCV64-LABEL: f_olt_or:
@@ -15,14 +14,6 @@ define i1 @f_olt_or(float %a, float %b, float %c) {
; RISCV64-NOZFA-NEXT: fmin.s fa5, fa0, fa1
; RISCV64-NOZFA-NEXT: flt.s a0, fa5, fa2
; RISCV64-NOZFA-NEXT: ret
-;
-; AARCH64-LABEL: f_olt_or:
-; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fcmp s0, s2
-; AARCH64-NEXT: cset w8, mi
-; AARCH64-NEXT: fcmp s1, s2
-; AARCH64-NEXT: csinc w0, w8, wzr, pl
-; AARCH64-NEXT: ret
entry:
%cmp = fcmp olt float %a, %c
%cmp1 = fcmp olt float %b, %c
@@ -42,13 +33,6 @@ define i1 @f_olt_or_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float
; RISCV64-NOZFA-NEXT: fmin.s fa5, fa0, fa1
; RISCV64-NOZFA-NEXT: flt.s a0, fa5, fa2
; RISCV64-NOZFA-NEXT: ret
-;
-; AARCH64-LABEL: f_olt_or_nan:
-; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fminnm s0, s0, s1
-; AARCH64-NEXT: fcmp s0, s2
-; AARCH64-NEXT: cset w0, mi
-; AARCH64-NEXT: ret
entry:
%cmp = fcmp olt float %a, %c
%cmp1 = fcmp olt float %b, %c
@@ -68,13 +52,6 @@ define i1 @f_olt_or_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, flo
; RISCV64-NOZFA-NEXT: fmin.s fa5, fa0, fa1
; RISCV64-NOZFA-NEXT: flt.s a0, fa5, fa2
; RISCV64-NOZFA-NEXT: ret
-;
-; AARCH64-LABEL: f_olt_or_snan:
-; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fminnm s0, s0, s1
-; AARCH64-NEXT: fcmp s0, s2
-; AARCH64-NEXT: cset w0, mi
-; AARCH64-NEXT: ret
entry:
%cmp = fcmp olt float %a, %c
%cmp1 = fcmp olt float %b, %c
@@ -96,13 +73,6 @@ define i1 @f_ugt_and(float %a, float %b, float %c) {
; RISCV64-NOZFA-NEXT: fle.s a0, fa5, fa2
; RISCV64-NOZFA-NEXT: xori a0, a0, 1
; RISCV64-NOZFA-NEXT: ret
-;
-; AARCH64-LABEL: f_ugt_and:
-; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fcmp s1, s2
-; AARCH64-NEXT: fccmp s0, s2, #0, hi
-; AARCH64-NEXT: cset w0, hi
-; AARCH64-NEXT: ret
entry:
%cmp = fcmp ugt float %a, %c
%cmp1 = fcmp ugt float %b, %c
@@ -124,13 +94,6 @@ define i1 @f_ugt_and_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float
; RISCV64-NOZFA-NEXT: fle.s a0, fa5, fa2
; RISCV64-NOZFA-NEXT: xori a0, a0, 1
; RISCV64-NOZFA-NEXT: ret
-;
-; AARCH64-LABEL: f_ugt_and_nan:
-; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fminnm s0, s0, s1
-; AARCH64-NEXT: fcmp s0, s2
-; AARCH64-NEXT: cset w0, hi
-; AARCH64-NEXT: ret
entry:
%cmp = fcmp ugt float %a, %c
%cmp1 = fcmp ugt float %b, %c
@@ -152,13 +115,6 @@ define i1 @f_ugt_and_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, fl
; RISCV64-NOZFA-NEXT: fle.s a0, fa5, fa2
; RISCV64-NOZFA-NEXT: xori a0, a0, 1
; RISCV64-NOZFA-NEXT: ret
-;
-; AARCH64-LABEL: f_ugt_and_snan:
-; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fminnm s0, s0, s1
-; AARCH64-NEXT: fcmp s0, s2
-; AARCH64-NEXT: cset w0, hi
-; AARCH64-NEXT: ret
entry:
%cmp = fcmp ugt float %a, %c
%cmp1 = fcmp ugt float %b, %c
@@ -175,27 +131,10 @@ define i1 @f_olt_and(float %a, float %b, float %c) {
;
; RISCV64-NOZFA-LABEL: f_olt_and:
; RISCV64-NOZFA: # %bb.0: # %entry
-; RISCV64-NOZFA-NEXT: feq.s a0, fa0, fa0
-; RISCV64-NOZFA-NEXT: fmv.s fa5, fa1
-; RISCV64-NOZFA-NEXT: bnez a0, .LBB6_2
-; RISCV64-NOZFA-NEXT: # %bb.1: # %entry
-; RISCV64-NOZFA-NEXT: fmv.s fa5, fa0
-; RISCV64-NOZFA-NEXT: .LBB6_2: # %entry
-; RISCV64-NOZFA-NEXT: feq.s a0, fa1, fa1
-; RISCV64-NOZFA-NEXT: bnez a0, .LBB6_4
-; RISCV64-NOZFA-NEXT: # %bb.3: # %entry
-; RISCV64-NOZFA-NEXT: fmv.s fa0, fa1
-; RISCV64-NOZFA-NEXT: .LBB6_4: # %entry
-; RISCV64-NOZFA-NEXT: fmax.s fa5, fa0, fa5
-; RISCV64-NOZFA-NEXT: flt.s a0, fa5, fa2
+; RISCV64-NOZFA-NEXT: flt.s a0, fa0, fa2
+; RISCV64-NOZFA-NEXT: flt.s a1, fa1, fa2
+; RISCV64-NOZFA-NEXT: and a0, a0, a1
; RISCV64-NOZFA-NEXT: ret
-;
-; AARCH64-LABEL: f_olt_and:
-; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fmax s0, s0, s1
-; AARCH64-NEXT: fcmp s0, s2
-; AARCH64-NEXT: cset w0, mi
-; AARCH64-NEXT: ret
entry:
%cmp = fcmp olt float %a, %c
%cmp1 = fcmp olt float %b, %c
@@ -215,13 +154,6 @@ define i1 @f_olt_and_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float
; RISCV64-NOZFA-NEXT: fmax.s fa5, fa0, fa1
; RISCV64-NOZFA-NEXT: flt.s a0, fa5, fa2
; RISCV64-NOZFA-NEXT: ret
-;
-; AARCH64-LABEL: f_olt_and_nan:
-; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fmax s0, s0, s1
-; AARCH64-NEXT: fcmp s0, s2
-; AARCH64-NEXT: cset w0, mi
-; AARCH64-NEXT: ret
entry:
%cmp = fcmp olt float %a, %c
%cmp1 = fcmp olt float %b, %c
@@ -238,27 +170,10 @@ define i1 @f_olt_and_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, fl
;
; RISCV64-NOZFA-LABEL: f_olt_and_snan:
; RISCV64-NOZFA: # %bb.0: # %entry
-; RISCV64-NOZFA-NEXT: feq.s a0, fa0, fa0
-; RISCV64-NOZFA-NEXT: fmv.s fa5, fa1
-; RISCV64-NOZFA-NEXT: bnez a0, .LBB8_2
-; RISCV64-NOZFA-NEXT: # %bb.1: # %entry
-; RISCV64-NOZFA-NEXT: fmv.s fa5, fa0
-; RISCV64-NOZFA-NEXT: .LBB8_2: # %entry
-; RISCV64-NOZFA-NEXT: feq.s a0, fa1, fa1
-; RISCV64-NOZFA-NEXT: bnez a0, .LBB8_4
-; RISCV64-NOZFA-NEXT: # %bb.3: # %entry
-; RISCV64-NOZFA-NEXT: fmv.s fa0, fa1
-; RISCV64-NOZFA-NEXT: .LBB8_4: # %entry
-; RISCV64-NOZFA-NEXT: fmax.s fa5, fa0, fa5
-; RISCV64-NOZFA-NEXT: flt.s a0, fa5, fa2
+; RISCV64-NOZFA-NEXT: flt.s a0, fa0, fa2
+; RISCV64-NOZFA-NEXT: flt.s a1, fa1, fa2
+; RISCV64-NOZFA-NEXT: and a0, a0, a1
; RISCV64-NOZFA-NEXT: ret
-;
-; AARCH64-LABEL: f_olt_and_snan:
-; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fmax s0, s0, s1
-; AARCH64-NEXT: fcmp s0, s2
-; AARCH64-NEXT: cset w0, mi
-; AARCH64-NEXT: ret
entry:
%cmp = fcmp olt float %a, %c
%cmp1 = fcmp olt float %b, %c
@@ -276,28 +191,11 @@ define i1 @f_ugt_or(float %a, float %b, float %c) {
;
; RISCV64-NOZFA-LABEL: f_ugt_or:
; RISCV64-NOZFA: # %bb.0: # %entry
-; RISCV64-NOZFA-NEXT: feq.s a0, fa0, fa0
-; RISCV64-NOZFA-NEXT: fmv.s fa5, fa1
-; RISCV64-NOZFA-NEXT: bnez a0, .LBB9_2
-; RISCV64-NOZFA-NEXT: # %bb.1: # %entry
-; RISCV64-NOZFA-NEXT: fmv.s fa5, fa0
-; RISCV64-NOZFA-NEXT: .LBB9_2: # %entry
-; RISCV64-NOZFA-NEXT: feq.s a0, fa1, fa1
-; RISCV64-NOZFA-NEXT: bnez a0, .LBB9_4
-; RISCV64-NOZFA-NEXT: # %bb.3: # %entry
-; RISCV64-NOZFA-NEXT: fmv.s fa0, fa1
-; RISCV64-NOZFA-NEXT: .LBB9_4: # %entry
-; RISCV64-NOZFA-NEXT: fmax.s fa5, fa0, fa5
-; RISCV64-NOZFA-NEXT: fle.s a0, fa5, fa2
+; RISCV64-NOZFA-NEXT: fle.s a0, fa0, fa2
+; RISCV64-NOZFA-NEXT: fle.s a1, fa1, fa2
+; RISCV64-NOZFA-NEXT: and a0, a0, a1
; RISCV64-NOZFA-NEXT: xori a0, a0, 1
; RISCV64-NOZFA-NEXT: ret
-;
-; AARCH64-LABEL: f_ugt_or:
-; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fmax s0, s0, s1
-; AARCH64-NEXT: fcmp s0, s2
-; AARCH64-NEXT: cset w0, hi
-; AARCH64-NEXT: ret
entry:
%cmp = fcmp ugt float %a, %c
%cmp1 = fcmp ugt float %b, %c
@@ -319,13 +217,6 @@ define i1 @f_ugt_or_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float
; RISCV64-NOZFA-NEXT: fle.s a0, fa5, fa2
; RISCV64-NOZFA-NEXT: xori a0, a0, 1
; RISCV64-NOZFA-NEXT: ret
-;
-; AARCH64-LABEL: f_ugt_or_nan:
-; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fmax s0, s0, s1
-; AARCH64-NEXT: fcmp s0, s2
-; AARCH64-NEXT: cset w0, hi
-; AARCH64-NEXT: ret
entry:
%cmp = fcmp ugt float %a, %c
%cmp1 = fcmp ugt float %b, %c
@@ -343,28 +234,11 @@ define i1 @f_ugt_or_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, flo
;
; RISCV64-NOZFA-LABEL: f_ugt_or_snan:
; RISCV64-NOZFA: # %bb.0: # %entry
-; RISCV64-NOZFA-NEXT: feq.s a0, fa0, fa0
-; RISCV64-NOZFA-NEXT: fmv.s fa5, fa1
-; RISCV64-NOZFA-NEXT: bnez a0, .LBB11_2
-; RISCV64-NOZFA-NEXT: # %bb.1: # %entry
-; RISCV64-NOZFA-NEXT: fmv.s fa5, fa0
-; RISCV64-NOZFA-NEXT: .LBB11_2: # %entry
-; RISCV64-NOZFA-NEXT: feq.s a0, fa1, fa1
-; RISCV64-NOZFA-NEXT: bnez a0, .LBB11_4
-; RISCV64-NOZFA-NEXT: # %bb.3: # %entry
-; RISCV64-NOZFA-NEXT: fmv.s fa0, fa1
-; RISCV64-NOZFA-NEXT: .LBB11_4: # %entry
-; RISCV64-NOZFA-NEXT: fmax.s fa5, fa0, fa5
-; RISCV64-NOZFA-NEXT: fle.s a0, fa5, fa2
+; RISCV64-NOZFA-NEXT: fle.s a0, fa0, fa2
+; RISCV64-NOZFA-NEXT: fle.s a1, fa1, fa2
+; RISCV64-NOZFA-NEXT: and a0, a0, a1
; RISCV64-NOZFA-NEXT: xori a0, a0, 1
; RISCV64-NOZFA-NEXT: ret
-;
-; AARCH64-LABEL: f_ugt_or_snan:
-; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fmax s0, s0, s1
-; AARCH64-NEXT: fcmp s0, s2
-; AARCH64-NEXT: cset w0, hi
-; AARCH64-NEXT: ret
entry:
%cmp = fcmp ugt float %a, %c
%cmp1 = fcmp ugt float %b, %c
@@ -384,14 +258,6 @@ define i1 @f_ogt_or(float %a, float %b, float %c) {
; RISCV64-NOZFA-NEXT: fmax.s fa5, fa0, fa1
; RISCV64-NOZFA-NEXT: flt.s a0, fa2, fa5
; RISCV64-NOZFA-NEXT: ret
-;
-; AARCH64-LABEL: f_ogt_or:
-; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fcmp s0, s2
-; AARCH64-NEXT: cset w8, gt
-; AARCH64-NEXT: fcmp s1, s2
-; AARCH64-NEXT: csinc w0, w8, wzr, le
-; AARCH64-NEXT: ret
entry:
%cmp = fcmp ogt float %a, %c
%cmp1 = fcmp ogt float %b, %c
@@ -411,13 +277,6 @@ define i1 @f_ogt_or_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float
; RISCV64-NOZFA-NEXT: fmax.s fa5, fa0, fa1
; RISCV64-NOZFA-NEXT: flt.s a0, fa2, fa5
; RISCV64-NOZFA-NEXT: ret
-;
-; AARCH64-LABEL: f_ogt_or_nan:
-; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fmaxnm s0, s0, s1
-; AARCH64-NEXT: fcmp s0, s2
-; AARCH64-NEXT: cset w0, gt
-; AARCH64-NEXT: ret
entry:
%cmp = fcmp ogt float %a, %c
%cmp1 = fcmp ogt float %b, %c
@@ -437,13 +296,6 @@ define i1 @f_ogt_or_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, flo
; RISCV64-NOZFA-NEXT: fmax.s fa5, fa0, fa1
; RISCV64-NOZFA-NEXT: flt.s a0, fa2, fa5
; RISCV64-NOZFA-NEXT: ret
-;
-; AARCH64-LABEL: f_ogt_or_snan:
-; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fmaxnm s0, s0, s1
-; AARCH64-NEXT: fcmp s0, s2
-; AARCH64-NEXT: cset w0, gt
-; AARCH64-NEXT: ret
entry:
%cmp = fcmp ogt float %a, %c
%cmp1 = fcmp ogt float %b, %c
@@ -465,13 +317,6 @@ define i1 @f_ult_and(float %a, float %b, float %c) {
; RISCV64-NOZFA-NEXT: fle.s a0, fa2, fa5
; RISCV64-NOZFA-NEXT: xori a0, a0, 1
; RISCV64-NOZFA-NEXT: ret
-;
-; AARCH64-LABEL: f_ult_and:
-; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fcmp s1, s2
-; AARCH64-NEXT: fccmp s0, s2, #0, lt
-; AARCH64-NEXT: cset w0, lt
-; AARCH64-NEXT: ret
entry:
%cmp = fcmp ult float %a, %c
%cmp1 = fcmp ult float %b, %c
@@ -493,13 +338,6 @@ define i1 @f_ult_and_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float
; RISCV64-NOZFA-NEXT: fle.s a0, fa2, fa5
; RISCV64-NOZFA-NEXT: xori a0, a0, 1
; RISCV64-NOZFA-NEXT: ret
-;
-; AARCH64-LABEL: f_ult_and_nan:
-; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fmaxnm s0, s0, s1
-; AARCH64-NEXT: fcmp s0, s2
-; AARCH64-NEXT: cset w0, lt
-; AARCH64-NEXT: ret
entry:
%cmp = fcmp ult float %a, %c
%cmp1 = fcmp ult float %b, %c
@@ -521,13 +359,6 @@ define i1 @f_ult_and_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, fl
; RISCV64-NOZFA-NEXT: fle.s a0, fa2, fa5
; RISCV64-NOZFA-NEXT: xori a0, a0, 1
; RISCV64-NOZFA-NEXT: ret
-;
-; AARCH64-LABEL: f_ult_and_snan:
-; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fmaxnm s0, s0, s1
-; AARCH64-NEXT: fcmp s0, s2
-; AARCH64-NEXT: cset w0, lt
-; AARCH64-NEXT: ret
entry:
%cmp = fcmp ult float %a, %c
%cmp1 = fcmp ult float %b, %c
@@ -544,27 +375,10 @@ define i1 @f_ogt_and(float %a, float %b, float %c) {
;
; RISCV64-NOZFA-LABEL: f_ogt_and:
; RISCV64-NOZFA: # %bb.0: # %entry
-; RISCV64-NOZFA-NEXT: feq.s a0, fa0, fa0
-; RISCV64-NOZFA-NEXT: fmv.s fa5, fa1
-; RISCV64-NOZFA-NEXT: bnez a0, .LBB18_2
-; RISCV64-NOZFA-NEXT: # %bb.1: # %entry
-; RISCV64-NOZFA-NEXT: fmv.s fa5, fa0
-; RISCV64-NOZFA-NEXT: .LBB18_2: # %entry
-; RISCV64-NOZFA-NEXT: feq.s a0, fa1, fa1
-; RISCV64-NOZFA-NEXT: bnez a0, .LBB18_4
-; RISCV64-NOZFA-NEXT: # %bb.3: # %entry
-; RISCV64-NOZFA-NEXT: fmv.s fa0, fa1
-; RISCV64-NOZFA-NEXT: .LBB18_4: # %entry
-; RISCV64-NOZFA-NEXT: fmin.s fa5, fa0, fa5
-; RISCV64-NOZFA-NEXT: flt.s a0, fa2, fa5
+; RISCV64-NOZFA-NEXT: flt.s a0, fa2, fa0
+; RISCV64-NOZFA-NEXT: flt.s a1, fa2, fa1
+; RISCV64-NOZFA-NEXT: and a0, a0, a1
; RISCV64-NOZFA-NEXT: ret
-;
-; AARCH64-LABEL: f_ogt_and:
-; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fmin s0, s0, s1
-; AARCH64-NEXT: fcmp s0, s2
-; AARCH64-NEXT: cset w0, gt
-; AARCH64-NEXT: ret
entry:
%cmp = fcmp ogt float %a, %c
%cmp1 = fcmp ogt float %b, %c
@@ -584,13 +398,6 @@ define i1 @f_ogt_and_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float
; RISCV64-NOZFA-NEXT: fmin.s fa5, fa0, fa1
; RISCV64-NOZFA-NEXT: flt.s a0, fa2, fa5
; RISCV64-NOZFA-NEXT: ret
-;
-; AARCH64-LABEL: f_ogt_and_nan:
-; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fmin s0, s0, s1
-; AARCH64-NEXT: fcmp s0, s2
-; AARCH64-NEXT: cset w0, gt
-; AARCH64-NEXT: ret
entry:
%cmp = fcmp ogt float %a, %c
%cmp1 = fcmp ogt float %b, %c
@@ -607,27 +414,10 @@ define i1 @f_ogt_and_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, fl
;
; RISCV64-NOZFA-LABEL: f_ogt_and_snan:
; RISCV64-NOZFA: # %bb.0: # %entry
-; RISCV64-NOZFA-NEXT: feq.s a0, fa0, fa0
-; RISCV64-NOZFA-NEXT: fmv.s fa5, fa1
-; RISCV64-NOZFA-NEXT: bnez a0, .LBB20_2
-; RISCV64-NOZFA-NEXT: # %bb.1: # %entry
-; RISCV64-NOZFA-NEXT: fmv.s fa5, fa0
-; RISCV64-NOZFA-NEXT: .LBB20_2: # %entry
-; RISCV64-NOZFA-NEXT: feq.s a0, fa1, fa1
-; RISCV64-NOZFA-NEXT: bnez a0, .LBB20_4
-; RISCV64-NOZFA-NEXT: # %bb.3: # %entry
-; RISCV64-NOZFA-NEXT: fmv.s fa0, fa1
-; RISCV64-NOZFA-NEXT: .LBB20_4: # %entry
-; RISCV64-NOZFA-NEXT: fmin.s fa5, fa0, fa5
-; RISCV64-NOZFA-NEXT: flt.s a0, fa2, fa5
+; RISCV64-NOZFA-NEXT: flt.s a0, fa2, fa0
+; RISCV64-NOZFA-NEXT: flt.s a1, fa2, fa1
+; RISCV64-NOZFA-NEXT: and a0, a0, a1
; RISCV64-NOZFA-NEXT: ret
-;
-; AARCH64-LABEL: f_ogt_and_snan:
-; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fmin s0, s0, s1
-; AARCH64-NEXT: fcmp s0, s2
-; AARCH64-NEXT: cset w0, gt
-; AARCH64-NEXT: ret
entry:
%cmp = fcmp ogt float %a, %c
%cmp1 = fcmp ogt float %b, %c
@@ -645,28 +435,11 @@ define i1 @f_ult_or(float %a, float %b, float %c) {
;
; RISCV64-NOZFA-LABEL: f_ult_or:
; RISCV64-NOZFA: # %bb.0: # %entry
-; RISCV64-NOZFA-NEXT: feq.s a0, fa0, fa0
-; RISCV64-NOZFA-NEXT: fmv.s fa5, fa1
-; RISCV64-NOZFA-NEXT: bnez a0, .LBB21_2
-; RISCV64-NOZFA-NEXT: # %bb.1: # %entry
-; RISCV64-NOZFA-NEXT: fmv.s fa5, fa0
-; RISCV64-NOZFA-NEXT: .LBB21_2: # %entry
-; RISCV64-NOZFA-NEXT: feq.s a0, fa1, fa1
-; RISCV64-NOZFA-NEXT: bnez a0, .LBB21_4
-; RISCV64-NOZFA-NEXT: # %bb.3: # %entry
-; RISCV64-NOZFA-NEXT: fmv.s fa0, fa1
-; RISCV64-NOZFA-NEXT: .LBB21_4: # %entry
-; RISCV64-NOZFA-NEXT: fmin.s fa5, fa0, fa5
-; RISCV64-NOZFA-NEXT: fle.s a0, fa2, fa5
+; RISCV64-NOZFA-NEXT: fle.s a0, fa2, fa0
+; RISCV64-NOZFA-NEXT: fle.s a1, fa2, fa1
+; RISCV64-NOZFA-NEXT: and a0, a0, a1
; RISCV64-NOZFA-NEXT: xori a0, a0, 1
; RISCV64-NOZFA-NEXT: ret
-;
-; AARCH64-LABEL: f_ult_or:
-; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fmin s0, s0, s1
-; AARCH64-NEXT: fcmp s0, s2
-; AARCH64-NEXT: cset w0, lt
-; AARCH64-NEXT: ret
entry:
%cmp = fcmp ult float %a, %c
%cmp1 = fcmp ult float %b, %c
@@ -688,13 +461,6 @@ define i1 @f_ult_or_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float
; RISCV64-NOZFA-NEXT: fle.s a0, fa2, fa5
; RISCV64-NOZFA-NEXT: xori a0, a0, 1
; RISCV64-NOZFA-NEXT: ret
-;
-; AARCH64-LABEL: f_ult_or_nan:
-; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fmin s0, s0, s1
-; AARCH64-NEXT: fcmp s0, s2
-; AARCH64-NEXT: cset w0, lt
-; AARCH64-NEXT: ret
entry:
%cmp = fcmp ult float %a, %c
%cmp1 = fcmp ult float %b, %c
@@ -712,28 +478,11 @@ define i1 @f_ult_or_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, flo
;
; RISCV64-NOZFA-LABEL: f_ult_or_snan:
; RISCV64-NOZFA: # %bb.0: # %entry
-; RISCV64-NOZFA-NEXT: feq.s a0, fa0, fa0
-; RISCV64-NOZFA-NEXT: fmv.s fa5, fa1
-; RISCV64-NOZFA-NEXT: bnez a0, .LBB23_2
-; RISCV64-NOZFA-NEXT: # %bb.1: # %entry
-; RISCV64-NOZFA-NEXT: fmv.s fa5, fa0
-; RISCV64-NOZFA-NEXT: .LBB23_2: # %entry
-; RISCV64-NOZFA-NEXT: feq.s a0, fa1, fa1
-; RISCV64-NOZFA-NEXT: bnez a0, .LBB23_4
-; RISCV64-NOZFA-NEXT: # %bb.3: # %entry
-; RISCV64-NOZFA-NEXT: fmv.s fa0, fa1
-; RISCV64-NOZFA-NEXT: .LBB23_4: # %entry
-; RISCV64-NOZFA-NEXT: fmin.s fa5, fa0, fa5
-; RISCV64-NOZFA-NEXT: fle.s a0, fa2, fa5
+; RISCV64-NOZFA-NEXT: fle.s a0, fa2, fa0
+; RISCV64-NOZFA-NEXT: fle.s a1, fa2, fa1
+; RISCV64-NOZFA-NEXT: and a0, a0, a1
; RISCV64-NOZFA-NEXT: xori a0, a0, 1
; RISCV64-NOZFA-NEXT: ret
-;
-; AARCH64-LABEL: f_ult_or_snan:
-; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fmin s0, s0, s1
-; AARCH64-NEXT: fcmp s0, s2
-; AARCH64-NEXT: cset w0, lt
-; AARCH64-NEXT: ret
entry:
%cmp = fcmp ult float %a, %c
%cmp1 = fcmp ult float %b, %c
More information about the llvm-commits
mailing list