[llvm] DAGCombiner: Support fmaximum/fminimum and fmaximumnum/fminimumnum (PR #137318)

YunQiang Su via llvm-commits llvm-commits at lists.llvm.org
Sat Apr 26 17:37:26 PDT 2025


https://github.com/wzssyqa updated https://github.com/llvm/llvm-project/pull/137318

>From 53ee33678134c47728b6a7d54e42780917eee817 Mon Sep 17 00:00:00 2001
From: YunQiang Su <yunqiang at isrc.iscas.acn.cn>
Date: Fri, 18 Apr 2025 11:12:58 +0800
Subject: [PATCH 1/4] DAGCombiner: Support fmaximum/fminimum and
 fmaximumnum/fminimumnum

Some architecutres like RISC-V supports new fmax/fmin instructions
introduced by IEEE754-2019.

We can use them in `getMinMaxOpcodeForFP`.
---
 llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 133 ++--
 llvm/test/CodeGen/AArch64/fmaxmin-combine.ll  | 340 ++++++++
 llvm/test/CodeGen/RISCV/fmaxmin-combine.ll    | 742 ++++++++++++++++++
 3 files changed, 1157 insertions(+), 58 deletions(-)
 create mode 100644 llvm/test/CodeGen/AArch64/fmaxmin-combine.ll
 create mode 100644 llvm/test/CodeGen/RISCV/fmaxmin-combine.ll

diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 282dc4470238d..79f626bf88c1e 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -6333,60 +6333,70 @@ static bool arebothOperandsNotNan(SDValue Operand1, SDValue Operand2,
   return DAG.isKnownNeverNaN(Operand2) && DAG.isKnownNeverNaN(Operand1);
 }
 
-// FIXME: use FMINIMUMNUM if possible, such as for RISC-V.
-static unsigned getMinMaxOpcodeForFP(SDValue Operand1, SDValue Operand2,
-                                     ISD::CondCode CC, unsigned OrAndOpcode,
-                                     SelectionDAG &DAG,
-                                     bool isFMAXNUMFMINNUM_IEEE,
-                                     bool isFMAXNUMFMINNUM) {
-  // The optimization cannot be applied for all the predicates because
-  // of the way FMINNUM/FMAXNUM and FMINNUM_IEEE/FMAXNUM_IEEE handle
-  // NaNs. For FMINNUM_IEEE/FMAXNUM_IEEE, the optimization cannot be
-  // applied at all if one of the operands is a signaling NaN.
-
-  // It is safe to use FMINNUM_IEEE/FMAXNUM_IEEE if all the operands
-  // are non NaN values.
-  if (((CC == ISD::SETLT || CC == ISD::SETLE) && (OrAndOpcode == ISD::OR)) ||
-      ((CC == ISD::SETGT || CC == ISD::SETGE) && (OrAndOpcode == ISD::AND)))
-    return arebothOperandsNotNan(Operand1, Operand2, DAG) &&
-                   isFMAXNUMFMINNUM_IEEE
-               ? ISD::FMINNUM_IEEE
-               : ISD::DELETED_NODE;
-  else if (((CC == ISD::SETGT || CC == ISD::SETGE) &&
-            (OrAndOpcode == ISD::OR)) ||
-           ((CC == ISD::SETLT || CC == ISD::SETLE) &&
-            (OrAndOpcode == ISD::AND)))
-    return arebothOperandsNotNan(Operand1, Operand2, DAG) &&
-                   isFMAXNUMFMINNUM_IEEE
-               ? ISD::FMAXNUM_IEEE
-               : ISD::DELETED_NODE;
-  // Both FMINNUM/FMAXNUM and FMINNUM_IEEE/FMAXNUM_IEEE handle quiet
-  // NaNs in the same way. But, FMINNUM/FMAXNUM and FMINNUM_IEEE/
-  // FMAXNUM_IEEE handle signaling NaNs differently. If we cannot prove
-  // that there are not any sNaNs, then the optimization is not valid
-  // for FMINNUM_IEEE/FMAXNUM_IEEE. In the presence of sNaNs, we apply
-  // the optimization using FMINNUM/FMAXNUM for the following cases. If
-  // we can prove that we do not have any sNaNs, then we can do the
-  // optimization using FMINNUM_IEEE/FMAXNUM_IEEE for the following
-  // cases.
-  else if (((CC == ISD::SETOLT || CC == ISD::SETOLE) &&
-            (OrAndOpcode == ISD::OR)) ||
-           ((CC == ISD::SETUGT || CC == ISD::SETUGE) &&
-            (OrAndOpcode == ISD::AND)))
-    return isFMAXNUMFMINNUM ? ISD::FMINNUM
-                            : arebothOperandsNotSNan(Operand1, Operand2, DAG) &&
-                                      isFMAXNUMFMINNUM_IEEE
-                                  ? ISD::FMINNUM_IEEE
-                                  : ISD::DELETED_NODE;
-  else if (((CC == ISD::SETOGT || CC == ISD::SETOGE) &&
-            (OrAndOpcode == ISD::OR)) ||
-           ((CC == ISD::SETULT || CC == ISD::SETULE) &&
-            (OrAndOpcode == ISD::AND)))
-    return isFMAXNUMFMINNUM ? ISD::FMAXNUM
-                            : arebothOperandsNotSNan(Operand1, Operand2, DAG) &&
-                                      isFMAXNUMFMINNUM_IEEE
-                                  ? ISD::FMAXNUM_IEEE
-                                  : ISD::DELETED_NODE;
+static unsigned
+getMinMaxOpcodeForFP(SDValue Operand1, SDValue Operand2, ISD::CondCode CC,
+                     unsigned OrAndOpcode, SelectionDAG &DAG,
+                     bool isFMAXNUMFMINNUM_IEEE, bool isFMAXNUMFMINNUM,
+                     bool isFMAXIMUMFMINIMUM, bool isFMAXIMUMNUMFMINIMUMNUM) {
+  bool isMax = true;
+  // SETLT/SETLE/SETGT/SETGE are undefined if any Operand is NaN. We
+  // treat them as SETOLT/SETOLE/SETOGT/SETOGE.
+  if (((CC == ISD::SETLT || CC == ISD::SETLE || CC == ISD::SETOLT ||
+        CC == ISD::SETOLE) &&
+       (OrAndOpcode == ISD::OR)) ||
+      ((CC == ISD::SETUGT || CC == ISD::SETUGE) && (OrAndOpcode == ISD::AND))) {
+    isMax = false;
+    if (arebothOperandsNotSNan(Operand1, Operand2, DAG) &&
+        isFMAXNUMFMINNUM_IEEE)
+      return ISD::FMINNUM_IEEE;
+    if (arebothOperandsNotSNan(Operand1, Operand2, DAG) && isFMAXNUMFMINNUM)
+      return ISD::FMINNUM;
+    if (isFMAXIMUMNUMFMINIMUMNUM)
+      return ISD::FMINIMUMNUM;
+  } else if (((CC == ISD::SETLT || CC == ISD::SETLE || CC == ISD::SETOLT ||
+               CC == ISD::SETOLE) &&
+              (OrAndOpcode == ISD::AND)) ||
+             ((CC == ISD::SETUGT || CC == ISD::SETUGE) &&
+              (OrAndOpcode == ISD::OR))) {
+    isMax = true;
+    if (isFMAXIMUMFMINIMUM)
+      return ISD::FMAXIMUM;
+  } else if (((CC == ISD::SETGT || CC == ISD::SETGE || CC == ISD::SETOGT ||
+               CC == ISD::SETOGE) &&
+              (OrAndOpcode == ISD::OR)) ||
+             ((CC == ISD::SETULT || CC == ISD::SETULE) &&
+              (OrAndOpcode == ISD::AND))) {
+    isMax = true;
+    if (arebothOperandsNotSNan(Operand1, Operand2, DAG) &&
+        isFMAXNUMFMINNUM_IEEE)
+      return ISD::FMAXNUM_IEEE;
+    if (arebothOperandsNotSNan(Operand1, Operand2, DAG) && isFMAXNUMFMINNUM)
+      return ISD::FMAXNUM;
+    if (isFMAXIMUMNUMFMINIMUMNUM)
+      return ISD::FMAXIMUMNUM;
+  } else if (((CC == ISD::SETGT || CC == ISD::SETGE || CC == ISD::SETOGT ||
+               CC == ISD::SETOGE) &&
+              (OrAndOpcode == ISD::AND)) ||
+             ((CC == ISD::SETULT || CC == ISD::SETULE) &&
+              (OrAndOpcode == ISD::OR))) {
+    isMax = false;
+    if (isFMAXIMUMFMINIMUM)
+      return ISD::FMINIMUM;
+  }
+  if (arebothOperandsNotNan(Operand1, Operand2, DAG)) {
+    // Keep this order to help unittest easy:
+    //     AArch64 has FMAXNUM_IEEE, while not FMAXIMUMNUM
+    //     RISCV64 has FMAXIMUMNUM,  while not FMAXNUM_IEEE
+    //     Both has FMAXIMUM (RISCV64 has a switch for it)
+    if (isFMAXIMUMFMINIMUM)
+      return isMax ? ISD::FMAXIMUM : ISD::FMINIMUM;
+    if (isFMAXNUMFMINNUM_IEEE)
+      return isMax ? ISD::FMAXNUM_IEEE : ISD::FMINNUM_IEEE;
+    if (isFMAXIMUMNUMFMINIMUMNUM)
+      return isMax ? ISD::FMAXIMUMNUM : ISD::FMINIMUMNUM;
+    if (isFMAXNUMFMINNUM)
+      return isMax ? ISD::FMAXNUM : ISD::FMINNUM;
+  }
   return ISD::DELETED_NODE;
 }
 
@@ -6433,14 +6443,20 @@ static SDValue foldAndOrOfSETCC(SDNode *LogicOp, SelectionDAG &DAG) {
   // predicate of one of the comparisons is the opposite of the other one.
   bool isFMAXNUMFMINNUM_IEEE = TLI.isOperationLegal(ISD::FMAXNUM_IEEE, OpVT) &&
                                TLI.isOperationLegal(ISD::FMINNUM_IEEE, OpVT);
-  bool isFMAXNUMFMINNUM = TLI.isOperationLegalOrCustom(ISD::FMAXNUM, OpVT) &&
-                          TLI.isOperationLegalOrCustom(ISD::FMINNUM, OpVT);
+  bool isFMAXNUMFMINNUM = TLI.isOperationLegal(ISD::FMAXNUM, OpVT) &&
+                          TLI.isOperationLegal(ISD::FMINNUM, OpVT);
+  bool isFMAXIMUMFMINIMUM = TLI.isOperationLegal(ISD::FMAXIMUM, OpVT) &&
+                            TLI.isOperationLegal(ISD::FMINIMUM, OpVT);
+  bool isFMAXIMUMNUMFMINIMUMNUM =
+      TLI.isOperationLegal(ISD::FMAXIMUMNUM, OpVT) &&
+      TLI.isOperationLegal(ISD::FMINIMUMNUM, OpVT);
   if (((OpVT.isInteger() && TLI.isOperationLegal(ISD::UMAX, OpVT) &&
         TLI.isOperationLegal(ISD::SMAX, OpVT) &&
         TLI.isOperationLegal(ISD::UMIN, OpVT) &&
         TLI.isOperationLegal(ISD::SMIN, OpVT)) ||
        (OpVT.isFloatingPoint() &&
-        (isFMAXNUMFMINNUM_IEEE || isFMAXNUMFMINNUM))) &&
+        (isFMAXNUMFMINNUM_IEEE || isFMAXNUMFMINNUM || isFMAXIMUMFMINIMUM ||
+         isFMAXIMUMNUMFMINIMUMNUM))) &&
       !ISD::isIntEqualitySetCC(CCL) && !ISD::isFPEqualitySetCC(CCL) &&
       CCL != ISD::SETFALSE && CCL != ISD::SETO && CCL != ISD::SETUO &&
       CCL != ISD::SETTRUE &&
@@ -6496,7 +6512,8 @@ static SDValue foldAndOrOfSETCC(SDNode *LogicOp, SelectionDAG &DAG) {
       } else if (OpVT.isFloatingPoint())
         NewOpcode =
             getMinMaxOpcodeForFP(Operand1, Operand2, CC, LogicOp->getOpcode(),
-                                 DAG, isFMAXNUMFMINNUM_IEEE, isFMAXNUMFMINNUM);
+                                 DAG, isFMAXNUMFMINNUM_IEEE, isFMAXNUMFMINNUM,
+                                 isFMAXIMUMFMINIMUM, isFMAXIMUMNUMFMINIMUMNUM);
 
       if (NewOpcode != ISD::DELETED_NODE) {
         SDValue MinMaxValue =
diff --git a/llvm/test/CodeGen/AArch64/fmaxmin-combine.ll b/llvm/test/CodeGen/AArch64/fmaxmin-combine.ll
new file mode 100644
index 0000000000000..671f47a46dc8a
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/fmaxmin-combine.ll
@@ -0,0 +1,340 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc --mtriple=aarch64 < %s | FileCheck %s --check-prefix=AARCH64
+
+define i1 @f_olt_or(float %a, float %b, float %c) {
+; AARCH64-LABEL: f_olt_or:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w8, mi
+; AARCH64-NEXT:    fcmp s1, s2
+; AARCH64-NEXT:    csinc w0, w8, wzr, pl
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp olt float %a, %c
+  %cmp1 = fcmp olt float %b, %c
+  %0 = or i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_olt_or_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
+; AARCH64-LABEL: f_olt_or_nan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, mi
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp olt float %a, %c
+  %cmp1 = fcmp olt float %b, %c
+  %0 = or i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_olt_or_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
+; AARCH64-LABEL: f_olt_or_snan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, mi
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp olt float %a, %c
+  %cmp1 = fcmp olt float %b, %c
+  %0 = or i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ugt_and(float %a, float %b, float %c) {
+; AARCH64-LABEL: f_ugt_and:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fcmp s1, s2
+; AARCH64-NEXT:    fccmp s0, s2, #0, hi
+; AARCH64-NEXT:    cset w0, hi
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ugt float %a, %c
+  %cmp1 = fcmp ugt float %b, %c
+  %0 = and i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ugt_and_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
+; AARCH64-LABEL: f_ugt_and_nan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, hi
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ugt float %a, %c
+  %cmp1 = fcmp ugt float %b, %c
+  %0 = and i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ugt_and_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
+; AARCH64-LABEL: f_ugt_and_snan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, hi
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ugt float %a, %c
+  %cmp1 = fcmp ugt float %b, %c
+  %0 = and i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_olt_and(float %a, float %b, float %c) {
+; AARCH64-LABEL: f_olt_and:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmax s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, mi
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp olt float %a, %c
+  %cmp1 = fcmp olt float %b, %c
+  %0 = and i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_olt_and_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
+; AARCH64-LABEL: f_olt_and_nan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmax s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, mi
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp olt float %a, %c
+  %cmp1 = fcmp olt float %b, %c
+  %0 = and i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_olt_and_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
+; AARCH64-LABEL: f_olt_and_snan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmax s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, mi
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp olt float %a, %c
+  %cmp1 = fcmp olt float %b, %c
+  %0 = and i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ugt_or(float %a, float %b, float %c) {
+; AARCH64-LABEL: f_ugt_or:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmax s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, hi
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ugt float %a, %c
+  %cmp1 = fcmp ugt float %b, %c
+  %0 = or i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ugt_or_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
+; AARCH64-LABEL: f_ugt_or_nan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmax s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, hi
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ugt float %a, %c
+  %cmp1 = fcmp ugt float %b, %c
+  %0 = or i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ugt_or_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
+; AARCH64-LABEL: f_ugt_or_snan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmax s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, hi
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ugt float %a, %c
+  %cmp1 = fcmp ugt float %b, %c
+  %0 = or i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ogt_or(float %a, float %b, float %c) {
+; AARCH64-LABEL: f_ogt_or:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w8, gt
+; AARCH64-NEXT:    fcmp s1, s2
+; AARCH64-NEXT:    csinc w0, w8, wzr, le
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ogt float %a, %c
+  %cmp1 = fcmp ogt float %b, %c
+  %0 = or i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ogt_or_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
+; AARCH64-LABEL: f_ogt_or_nan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmaxnm s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, gt
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ogt float %a, %c
+  %cmp1 = fcmp ogt float %b, %c
+  %0 = or i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ogt_or_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
+; AARCH64-LABEL: f_ogt_or_snan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmaxnm s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, gt
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ogt float %a, %c
+  %cmp1 = fcmp ogt float %b, %c
+  %0 = or i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ult_and(float %a, float %b, float %c) {
+; AARCH64-LABEL: f_ult_and:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fcmp s1, s2
+; AARCH64-NEXT:    fccmp s0, s2, #0, lt
+; AARCH64-NEXT:    cset w0, lt
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ult float %a, %c
+  %cmp1 = fcmp ult float %b, %c
+  %0 = and i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ult_and_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
+; AARCH64-LABEL: f_ult_and_nan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmaxnm s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, lt
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ult float %a, %c
+  %cmp1 = fcmp ult float %b, %c
+  %0 = and i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ult_and_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
+; AARCH64-LABEL: f_ult_and_snan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmaxnm s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, lt
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ult float %a, %c
+  %cmp1 = fcmp ult float %b, %c
+  %0 = and i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ogt_and(float %a, float %b, float %c) {
+; AARCH64-LABEL: f_ogt_and:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmin s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, gt
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ogt float %a, %c
+  %cmp1 = fcmp ogt float %b, %c
+  %0 = and i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ogt_and_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
+; AARCH64-LABEL: f_ogt_and_nan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmin s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, gt
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ogt float %a, %c
+  %cmp1 = fcmp ogt float %b, %c
+  %0 = and i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ogt_and_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
+; AARCH64-LABEL: f_ogt_and_snan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmin s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, gt
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ogt float %a, %c
+  %cmp1 = fcmp ogt float %b, %c
+  %0 = and i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ult_or(float %a, float %b, float %c) {
+; AARCH64-LABEL: f_ult_or:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmin s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, lt
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ult float %a, %c
+  %cmp1 = fcmp ult float %b, %c
+  %0 = or i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ult_or_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
+; AARCH64-LABEL: f_ult_or_nan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmin s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, lt
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ult float %a, %c
+  %cmp1 = fcmp ult float %b, %c
+  %0 = or i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ult_or_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
+; AARCH64-LABEL: f_ult_or_snan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmin s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, lt
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ult float %a, %c
+  %cmp1 = fcmp ult float %b, %c
+  %0 = or i1 %cmp, %cmp1
+  ret i1 %0
+}
diff --git a/llvm/test/CodeGen/RISCV/fmaxmin-combine.ll b/llvm/test/CodeGen/RISCV/fmaxmin-combine.ll
new file mode 100644
index 0000000000000..1da45ac0f2734
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/fmaxmin-combine.ll
@@ -0,0 +1,742 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc --mtriple=riscv64 --mattr=+f,+zfa < %s | FileCheck %s --check-prefix=RISCV64
+; RUN: llc --mtriple=riscv64 --mattr=+f,-zfa < %s | FileCheck %s --check-prefix=RISCV64-NOZFA
+; RUN: llc --mtriple=aarch64 < %s | FileCheck %s --check-prefix=AARCH64
+
+define i1 @f_olt_or(float %a, float %b, float %c) {
+; RISCV64-LABEL: f_olt_or:
+; RISCV64:       # %bb.0: # %entry
+; RISCV64-NEXT:    fmin.s fa5, fa0, fa1
+; RISCV64-NEXT:    flt.s a0, fa5, fa2
+; RISCV64-NEXT:    ret
+;
+; RISCV64-NOZFA-LABEL: f_olt_or:
+; RISCV64-NOZFA:       # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT:    fmin.s fa5, fa0, fa1
+; RISCV64-NOZFA-NEXT:    flt.s a0, fa5, fa2
+; RISCV64-NOZFA-NEXT:    ret
+;
+; AARCH64-LABEL: f_olt_or:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w8, mi
+; AARCH64-NEXT:    fcmp s1, s2
+; AARCH64-NEXT:    csinc w0, w8, wzr, pl
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp olt float %a, %c
+  %cmp1 = fcmp olt float %b, %c
+  %0 = or i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_olt_or_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
+; RISCV64-LABEL: f_olt_or_nan:
+; RISCV64:       # %bb.0: # %entry
+; RISCV64-NEXT:    fmin.s fa5, fa0, fa1
+; RISCV64-NEXT:    flt.s a0, fa5, fa2
+; RISCV64-NEXT:    ret
+;
+; RISCV64-NOZFA-LABEL: f_olt_or_nan:
+; RISCV64-NOZFA:       # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT:    fmin.s fa5, fa0, fa1
+; RISCV64-NOZFA-NEXT:    flt.s a0, fa5, fa2
+; RISCV64-NOZFA-NEXT:    ret
+;
+; AARCH64-LABEL: f_olt_or_nan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, mi
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp olt float %a, %c
+  %cmp1 = fcmp olt float %b, %c
+  %0 = or i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_olt_or_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
+; RISCV64-LABEL: f_olt_or_snan:
+; RISCV64:       # %bb.0: # %entry
+; RISCV64-NEXT:    fmin.s fa5, fa0, fa1
+; RISCV64-NEXT:    flt.s a0, fa5, fa2
+; RISCV64-NEXT:    ret
+;
+; RISCV64-NOZFA-LABEL: f_olt_or_snan:
+; RISCV64-NOZFA:       # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT:    fmin.s fa5, fa0, fa1
+; RISCV64-NOZFA-NEXT:    flt.s a0, fa5, fa2
+; RISCV64-NOZFA-NEXT:    ret
+;
+; AARCH64-LABEL: f_olt_or_snan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, mi
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp olt float %a, %c
+  %cmp1 = fcmp olt float %b, %c
+  %0 = or i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ugt_and(float %a, float %b, float %c) {
+; RISCV64-LABEL: f_ugt_and:
+; RISCV64:       # %bb.0: # %entry
+; RISCV64-NEXT:    fmin.s fa5, fa0, fa1
+; RISCV64-NEXT:    fle.s a0, fa5, fa2
+; RISCV64-NEXT:    xori a0, a0, 1
+; RISCV64-NEXT:    ret
+;
+; RISCV64-NOZFA-LABEL: f_ugt_and:
+; RISCV64-NOZFA:       # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT:    fmin.s fa5, fa0, fa1
+; RISCV64-NOZFA-NEXT:    fle.s a0, fa5, fa2
+; RISCV64-NOZFA-NEXT:    xori a0, a0, 1
+; RISCV64-NOZFA-NEXT:    ret
+;
+; AARCH64-LABEL: f_ugt_and:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fcmp s1, s2
+; AARCH64-NEXT:    fccmp s0, s2, #0, hi
+; AARCH64-NEXT:    cset w0, hi
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ugt float %a, %c
+  %cmp1 = fcmp ugt float %b, %c
+  %0 = and i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ugt_and_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
+; RISCV64-LABEL: f_ugt_and_nan:
+; RISCV64:       # %bb.0: # %entry
+; RISCV64-NEXT:    fmin.s fa5, fa0, fa1
+; RISCV64-NEXT:    fle.s a0, fa5, fa2
+; RISCV64-NEXT:    xori a0, a0, 1
+; RISCV64-NEXT:    ret
+;
+; RISCV64-NOZFA-LABEL: f_ugt_and_nan:
+; RISCV64-NOZFA:       # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT:    fmin.s fa5, fa0, fa1
+; RISCV64-NOZFA-NEXT:    fle.s a0, fa5, fa2
+; RISCV64-NOZFA-NEXT:    xori a0, a0, 1
+; RISCV64-NOZFA-NEXT:    ret
+;
+; AARCH64-LABEL: f_ugt_and_nan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, hi
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ugt float %a, %c
+  %cmp1 = fcmp ugt float %b, %c
+  %0 = and i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ugt_and_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
+; RISCV64-LABEL: f_ugt_and_snan:
+; RISCV64:       # %bb.0: # %entry
+; RISCV64-NEXT:    fmin.s fa5, fa0, fa1
+; RISCV64-NEXT:    fle.s a0, fa5, fa2
+; RISCV64-NEXT:    xori a0, a0, 1
+; RISCV64-NEXT:    ret
+;
+; RISCV64-NOZFA-LABEL: f_ugt_and_snan:
+; RISCV64-NOZFA:       # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT:    fmin.s fa5, fa0, fa1
+; RISCV64-NOZFA-NEXT:    fle.s a0, fa5, fa2
+; RISCV64-NOZFA-NEXT:    xori a0, a0, 1
+; RISCV64-NOZFA-NEXT:    ret
+;
+; AARCH64-LABEL: f_ugt_and_snan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, hi
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ugt float %a, %c
+  %cmp1 = fcmp ugt float %b, %c
+  %0 = and i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_olt_and(float %a, float %b, float %c) {
+; RISCV64-LABEL: f_olt_and:
+; RISCV64:       # %bb.0: # %entry
+; RISCV64-NEXT:    fmaxm.s fa5, fa0, fa1
+; RISCV64-NEXT:    flt.s a0, fa5, fa2
+; RISCV64-NEXT:    ret
+;
+; RISCV64-NOZFA-LABEL: f_olt_and:
+; RISCV64-NOZFA:       # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT:    feq.s a0, fa0, fa0
+; RISCV64-NOZFA-NEXT:    fmv.s fa5, fa1
+; RISCV64-NOZFA-NEXT:    bnez a0, .LBB6_2
+; RISCV64-NOZFA-NEXT:  # %bb.1: # %entry
+; RISCV64-NOZFA-NEXT:    fmv.s fa5, fa0
+; RISCV64-NOZFA-NEXT:  .LBB6_2: # %entry
+; RISCV64-NOZFA-NEXT:    feq.s a0, fa1, fa1
+; RISCV64-NOZFA-NEXT:    bnez a0, .LBB6_4
+; RISCV64-NOZFA-NEXT:  # %bb.3: # %entry
+; RISCV64-NOZFA-NEXT:    fmv.s fa0, fa1
+; RISCV64-NOZFA-NEXT:  .LBB6_4: # %entry
+; RISCV64-NOZFA-NEXT:    fmax.s fa5, fa0, fa5
+; RISCV64-NOZFA-NEXT:    flt.s a0, fa5, fa2
+; RISCV64-NOZFA-NEXT:    ret
+;
+; AARCH64-LABEL: f_olt_and:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmax s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, mi
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp olt float %a, %c
+  %cmp1 = fcmp olt float %b, %c
+  %0 = and i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_olt_and_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
+; RISCV64-LABEL: f_olt_and_nan:
+; RISCV64:       # %bb.0: # %entry
+; RISCV64-NEXT:    fmaxm.s fa5, fa0, fa1
+; RISCV64-NEXT:    flt.s a0, fa5, fa2
+; RISCV64-NEXT:    ret
+;
+; RISCV64-NOZFA-LABEL: f_olt_and_nan:
+; RISCV64-NOZFA:       # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT:    fmax.s fa5, fa0, fa1
+; RISCV64-NOZFA-NEXT:    flt.s a0, fa5, fa2
+; RISCV64-NOZFA-NEXT:    ret
+;
+; AARCH64-LABEL: f_olt_and_nan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmax s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, mi
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp olt float %a, %c
+  %cmp1 = fcmp olt float %b, %c
+  %0 = and i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_olt_and_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
+; RISCV64-LABEL: f_olt_and_snan:
+; RISCV64:       # %bb.0: # %entry
+; RISCV64-NEXT:    fmaxm.s fa5, fa0, fa1
+; RISCV64-NEXT:    flt.s a0, fa5, fa2
+; RISCV64-NEXT:    ret
+;
+; RISCV64-NOZFA-LABEL: f_olt_and_snan:
+; RISCV64-NOZFA:       # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT:    feq.s a0, fa0, fa0
+; RISCV64-NOZFA-NEXT:    fmv.s fa5, fa1
+; RISCV64-NOZFA-NEXT:    bnez a0, .LBB8_2
+; RISCV64-NOZFA-NEXT:  # %bb.1: # %entry
+; RISCV64-NOZFA-NEXT:    fmv.s fa5, fa0
+; RISCV64-NOZFA-NEXT:  .LBB8_2: # %entry
+; RISCV64-NOZFA-NEXT:    feq.s a0, fa1, fa1
+; RISCV64-NOZFA-NEXT:    bnez a0, .LBB8_4
+; RISCV64-NOZFA-NEXT:  # %bb.3: # %entry
+; RISCV64-NOZFA-NEXT:    fmv.s fa0, fa1
+; RISCV64-NOZFA-NEXT:  .LBB8_4: # %entry
+; RISCV64-NOZFA-NEXT:    fmax.s fa5, fa0, fa5
+; RISCV64-NOZFA-NEXT:    flt.s a0, fa5, fa2
+; RISCV64-NOZFA-NEXT:    ret
+;
+; AARCH64-LABEL: f_olt_and_snan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmax s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, mi
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp olt float %a, %c
+  %cmp1 = fcmp olt float %b, %c
+  %0 = and i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ugt_or(float %a, float %b, float %c) {
+; RISCV64-LABEL: f_ugt_or:
+; RISCV64:       # %bb.0: # %entry
+; RISCV64-NEXT:    fmaxm.s fa5, fa0, fa1
+; RISCV64-NEXT:    fle.s a0, fa5, fa2
+; RISCV64-NEXT:    xori a0, a0, 1
+; RISCV64-NEXT:    ret
+;
+; RISCV64-NOZFA-LABEL: f_ugt_or:
+; RISCV64-NOZFA:       # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT:    feq.s a0, fa0, fa0
+; RISCV64-NOZFA-NEXT:    fmv.s fa5, fa1
+; RISCV64-NOZFA-NEXT:    bnez a0, .LBB9_2
+; RISCV64-NOZFA-NEXT:  # %bb.1: # %entry
+; RISCV64-NOZFA-NEXT:    fmv.s fa5, fa0
+; RISCV64-NOZFA-NEXT:  .LBB9_2: # %entry
+; RISCV64-NOZFA-NEXT:    feq.s a0, fa1, fa1
+; RISCV64-NOZFA-NEXT:    bnez a0, .LBB9_4
+; RISCV64-NOZFA-NEXT:  # %bb.3: # %entry
+; RISCV64-NOZFA-NEXT:    fmv.s fa0, fa1
+; RISCV64-NOZFA-NEXT:  .LBB9_4: # %entry
+; RISCV64-NOZFA-NEXT:    fmax.s fa5, fa0, fa5
+; RISCV64-NOZFA-NEXT:    fle.s a0, fa5, fa2
+; RISCV64-NOZFA-NEXT:    xori a0, a0, 1
+; RISCV64-NOZFA-NEXT:    ret
+;
+; AARCH64-LABEL: f_ugt_or:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmax s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, hi
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ugt float %a, %c
+  %cmp1 = fcmp ugt float %b, %c
+  %0 = or i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ugt_or_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
+; RISCV64-LABEL: f_ugt_or_nan:
+; RISCV64:       # %bb.0: # %entry
+; RISCV64-NEXT:    fmaxm.s fa5, fa0, fa1
+; RISCV64-NEXT:    fle.s a0, fa5, fa2
+; RISCV64-NEXT:    xori a0, a0, 1
+; RISCV64-NEXT:    ret
+;
+; RISCV64-NOZFA-LABEL: f_ugt_or_nan:
+; RISCV64-NOZFA:       # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT:    fmax.s fa5, fa0, fa1
+; RISCV64-NOZFA-NEXT:    fle.s a0, fa5, fa2
+; RISCV64-NOZFA-NEXT:    xori a0, a0, 1
+; RISCV64-NOZFA-NEXT:    ret
+;
+; AARCH64-LABEL: f_ugt_or_nan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmax s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, hi
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ugt float %a, %c
+  %cmp1 = fcmp ugt float %b, %c
+  %0 = or i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ugt_or_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
+; RISCV64-LABEL: f_ugt_or_snan:
+; RISCV64:       # %bb.0: # %entry
+; RISCV64-NEXT:    fmaxm.s fa5, fa0, fa1
+; RISCV64-NEXT:    fle.s a0, fa5, fa2
+; RISCV64-NEXT:    xori a0, a0, 1
+; RISCV64-NEXT:    ret
+;
+; RISCV64-NOZFA-LABEL: f_ugt_or_snan:
+; RISCV64-NOZFA:       # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT:    feq.s a0, fa0, fa0
+; RISCV64-NOZFA-NEXT:    fmv.s fa5, fa1
+; RISCV64-NOZFA-NEXT:    bnez a0, .LBB11_2
+; RISCV64-NOZFA-NEXT:  # %bb.1: # %entry
+; RISCV64-NOZFA-NEXT:    fmv.s fa5, fa0
+; RISCV64-NOZFA-NEXT:  .LBB11_2: # %entry
+; RISCV64-NOZFA-NEXT:    feq.s a0, fa1, fa1
+; RISCV64-NOZFA-NEXT:    bnez a0, .LBB11_4
+; RISCV64-NOZFA-NEXT:  # %bb.3: # %entry
+; RISCV64-NOZFA-NEXT:    fmv.s fa0, fa1
+; RISCV64-NOZFA-NEXT:  .LBB11_4: # %entry
+; RISCV64-NOZFA-NEXT:    fmax.s fa5, fa0, fa5
+; RISCV64-NOZFA-NEXT:    fle.s a0, fa5, fa2
+; RISCV64-NOZFA-NEXT:    xori a0, a0, 1
+; RISCV64-NOZFA-NEXT:    ret
+;
+; AARCH64-LABEL: f_ugt_or_snan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmax s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, hi
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ugt float %a, %c
+  %cmp1 = fcmp ugt float %b, %c
+  %0 = or i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ogt_or(float %a, float %b, float %c) {
+; RISCV64-LABEL: f_ogt_or:
+; RISCV64:       # %bb.0: # %entry
+; RISCV64-NEXT:    fmax.s fa5, fa0, fa1
+; RISCV64-NEXT:    flt.s a0, fa2, fa5
+; RISCV64-NEXT:    ret
+;
+; RISCV64-NOZFA-LABEL: f_ogt_or:
+; RISCV64-NOZFA:       # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT:    fmax.s fa5, fa0, fa1
+; RISCV64-NOZFA-NEXT:    flt.s a0, fa2, fa5
+; RISCV64-NOZFA-NEXT:    ret
+;
+; AARCH64-LABEL: f_ogt_or:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w8, gt
+; AARCH64-NEXT:    fcmp s1, s2
+; AARCH64-NEXT:    csinc w0, w8, wzr, le
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ogt float %a, %c
+  %cmp1 = fcmp ogt float %b, %c
+  %0 = or i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ogt_or_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
+; RISCV64-LABEL: f_ogt_or_nan:
+; RISCV64:       # %bb.0: # %entry
+; RISCV64-NEXT:    fmax.s fa5, fa0, fa1
+; RISCV64-NEXT:    flt.s a0, fa2, fa5
+; RISCV64-NEXT:    ret
+;
+; RISCV64-NOZFA-LABEL: f_ogt_or_nan:
+; RISCV64-NOZFA:       # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT:    fmax.s fa5, fa0, fa1
+; RISCV64-NOZFA-NEXT:    flt.s a0, fa2, fa5
+; RISCV64-NOZFA-NEXT:    ret
+;
+; AARCH64-LABEL: f_ogt_or_nan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmaxnm s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, gt
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ogt float %a, %c
+  %cmp1 = fcmp ogt float %b, %c
+  %0 = or i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ogt_or_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
+; RISCV64-LABEL: f_ogt_or_snan:
+; RISCV64:       # %bb.0: # %entry
+; RISCV64-NEXT:    fmax.s fa5, fa0, fa1
+; RISCV64-NEXT:    flt.s a0, fa2, fa5
+; RISCV64-NEXT:    ret
+;
+; RISCV64-NOZFA-LABEL: f_ogt_or_snan:
+; RISCV64-NOZFA:       # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT:    fmax.s fa5, fa0, fa1
+; RISCV64-NOZFA-NEXT:    flt.s a0, fa2, fa5
+; RISCV64-NOZFA-NEXT:    ret
+;
+; AARCH64-LABEL: f_ogt_or_snan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmaxnm s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, gt
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ogt float %a, %c
+  %cmp1 = fcmp ogt float %b, %c
+  %0 = or i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ult_and(float %a, float %b, float %c) {
+; RISCV64-LABEL: f_ult_and:
+; RISCV64:       # %bb.0: # %entry
+; RISCV64-NEXT:    fmax.s fa5, fa0, fa1
+; RISCV64-NEXT:    fle.s a0, fa2, fa5
+; RISCV64-NEXT:    xori a0, a0, 1
+; RISCV64-NEXT:    ret
+;
+; RISCV64-NOZFA-LABEL: f_ult_and:
+; RISCV64-NOZFA:       # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT:    fmax.s fa5, fa0, fa1
+; RISCV64-NOZFA-NEXT:    fle.s a0, fa2, fa5
+; RISCV64-NOZFA-NEXT:    xori a0, a0, 1
+; RISCV64-NOZFA-NEXT:    ret
+;
+; AARCH64-LABEL: f_ult_and:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fcmp s1, s2
+; AARCH64-NEXT:    fccmp s0, s2, #0, lt
+; AARCH64-NEXT:    cset w0, lt
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ult float %a, %c
+  %cmp1 = fcmp ult float %b, %c
+  %0 = and i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ult_and_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
+; RISCV64-LABEL: f_ult_and_nan:
+; RISCV64:       # %bb.0: # %entry
+; RISCV64-NEXT:    fmax.s fa5, fa0, fa1
+; RISCV64-NEXT:    fle.s a0, fa2, fa5
+; RISCV64-NEXT:    xori a0, a0, 1
+; RISCV64-NEXT:    ret
+;
+; RISCV64-NOZFA-LABEL: f_ult_and_nan:
+; RISCV64-NOZFA:       # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT:    fmax.s fa5, fa0, fa1
+; RISCV64-NOZFA-NEXT:    fle.s a0, fa2, fa5
+; RISCV64-NOZFA-NEXT:    xori a0, a0, 1
+; RISCV64-NOZFA-NEXT:    ret
+;
+; AARCH64-LABEL: f_ult_and_nan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmaxnm s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, lt
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ult float %a, %c
+  %cmp1 = fcmp ult float %b, %c
+  %0 = and i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ult_and_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
+; RISCV64-LABEL: f_ult_and_snan:
+; RISCV64:       # %bb.0: # %entry
+; RISCV64-NEXT:    fmax.s fa5, fa0, fa1
+; RISCV64-NEXT:    fle.s a0, fa2, fa5
+; RISCV64-NEXT:    xori a0, a0, 1
+; RISCV64-NEXT:    ret
+;
+; RISCV64-NOZFA-LABEL: f_ult_and_snan:
+; RISCV64-NOZFA:       # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT:    fmax.s fa5, fa0, fa1
+; RISCV64-NOZFA-NEXT:    fle.s a0, fa2, fa5
+; RISCV64-NOZFA-NEXT:    xori a0, a0, 1
+; RISCV64-NOZFA-NEXT:    ret
+;
+; AARCH64-LABEL: f_ult_and_snan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmaxnm s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, lt
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ult float %a, %c
+  %cmp1 = fcmp ult float %b, %c
+  %0 = and i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ogt_and(float %a, float %b, float %c) {
+; RISCV64-LABEL: f_ogt_and:
+; RISCV64:       # %bb.0: # %entry
+; RISCV64-NEXT:    fminm.s fa5, fa0, fa1
+; RISCV64-NEXT:    flt.s a0, fa2, fa5
+; RISCV64-NEXT:    ret
+;
+; RISCV64-NOZFA-LABEL: f_ogt_and:
+; RISCV64-NOZFA:       # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT:    feq.s a0, fa0, fa0
+; RISCV64-NOZFA-NEXT:    fmv.s fa5, fa1
+; RISCV64-NOZFA-NEXT:    bnez a0, .LBB18_2
+; RISCV64-NOZFA-NEXT:  # %bb.1: # %entry
+; RISCV64-NOZFA-NEXT:    fmv.s fa5, fa0
+; RISCV64-NOZFA-NEXT:  .LBB18_2: # %entry
+; RISCV64-NOZFA-NEXT:    feq.s a0, fa1, fa1
+; RISCV64-NOZFA-NEXT:    bnez a0, .LBB18_4
+; RISCV64-NOZFA-NEXT:  # %bb.3: # %entry
+; RISCV64-NOZFA-NEXT:    fmv.s fa0, fa1
+; RISCV64-NOZFA-NEXT:  .LBB18_4: # %entry
+; RISCV64-NOZFA-NEXT:    fmin.s fa5, fa0, fa5
+; RISCV64-NOZFA-NEXT:    flt.s a0, fa2, fa5
+; RISCV64-NOZFA-NEXT:    ret
+;
+; AARCH64-LABEL: f_ogt_and:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmin s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, gt
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ogt float %a, %c
+  %cmp1 = fcmp ogt float %b, %c
+  %0 = and i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ogt_and_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
+; RISCV64-LABEL: f_ogt_and_nan:
+; RISCV64:       # %bb.0: # %entry
+; RISCV64-NEXT:    fminm.s fa5, fa0, fa1
+; RISCV64-NEXT:    flt.s a0, fa2, fa5
+; RISCV64-NEXT:    ret
+;
+; RISCV64-NOZFA-LABEL: f_ogt_and_nan:
+; RISCV64-NOZFA:       # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT:    fmin.s fa5, fa0, fa1
+; RISCV64-NOZFA-NEXT:    flt.s a0, fa2, fa5
+; RISCV64-NOZFA-NEXT:    ret
+;
+; AARCH64-LABEL: f_ogt_and_nan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmin s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, gt
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ogt float %a, %c
+  %cmp1 = fcmp ogt float %b, %c
+  %0 = and i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ogt_and_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
+; RISCV64-LABEL: f_ogt_and_snan:
+; RISCV64:       # %bb.0: # %entry
+; RISCV64-NEXT:    fminm.s fa5, fa0, fa1
+; RISCV64-NEXT:    flt.s a0, fa2, fa5
+; RISCV64-NEXT:    ret
+;
+; RISCV64-NOZFA-LABEL: f_ogt_and_snan:
+; RISCV64-NOZFA:       # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT:    feq.s a0, fa0, fa0
+; RISCV64-NOZFA-NEXT:    fmv.s fa5, fa1
+; RISCV64-NOZFA-NEXT:    bnez a0, .LBB20_2
+; RISCV64-NOZFA-NEXT:  # %bb.1: # %entry
+; RISCV64-NOZFA-NEXT:    fmv.s fa5, fa0
+; RISCV64-NOZFA-NEXT:  .LBB20_2: # %entry
+; RISCV64-NOZFA-NEXT:    feq.s a0, fa1, fa1
+; RISCV64-NOZFA-NEXT:    bnez a0, .LBB20_4
+; RISCV64-NOZFA-NEXT:  # %bb.3: # %entry
+; RISCV64-NOZFA-NEXT:    fmv.s fa0, fa1
+; RISCV64-NOZFA-NEXT:  .LBB20_4: # %entry
+; RISCV64-NOZFA-NEXT:    fmin.s fa5, fa0, fa5
+; RISCV64-NOZFA-NEXT:    flt.s a0, fa2, fa5
+; RISCV64-NOZFA-NEXT:    ret
+;
+; AARCH64-LABEL: f_ogt_and_snan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmin s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, gt
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ogt float %a, %c
+  %cmp1 = fcmp ogt float %b, %c
+  %0 = and i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ult_or(float %a, float %b, float %c) {
+; RISCV64-LABEL: f_ult_or:
+; RISCV64:       # %bb.0: # %entry
+; RISCV64-NEXT:    fminm.s fa5, fa0, fa1
+; RISCV64-NEXT:    fle.s a0, fa2, fa5
+; RISCV64-NEXT:    xori a0, a0, 1
+; RISCV64-NEXT:    ret
+;
+; RISCV64-NOZFA-LABEL: f_ult_or:
+; RISCV64-NOZFA:       # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT:    feq.s a0, fa0, fa0
+; RISCV64-NOZFA-NEXT:    fmv.s fa5, fa1
+; RISCV64-NOZFA-NEXT:    bnez a0, .LBB21_2
+; RISCV64-NOZFA-NEXT:  # %bb.1: # %entry
+; RISCV64-NOZFA-NEXT:    fmv.s fa5, fa0
+; RISCV64-NOZFA-NEXT:  .LBB21_2: # %entry
+; RISCV64-NOZFA-NEXT:    feq.s a0, fa1, fa1
+; RISCV64-NOZFA-NEXT:    bnez a0, .LBB21_4
+; RISCV64-NOZFA-NEXT:  # %bb.3: # %entry
+; RISCV64-NOZFA-NEXT:    fmv.s fa0, fa1
+; RISCV64-NOZFA-NEXT:  .LBB21_4: # %entry
+; RISCV64-NOZFA-NEXT:    fmin.s fa5, fa0, fa5
+; RISCV64-NOZFA-NEXT:    fle.s a0, fa2, fa5
+; RISCV64-NOZFA-NEXT:    xori a0, a0, 1
+; RISCV64-NOZFA-NEXT:    ret
+;
+; AARCH64-LABEL: f_ult_or:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmin s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, lt
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ult float %a, %c
+  %cmp1 = fcmp ult float %b, %c
+  %0 = or i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ult_or_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
+; RISCV64-LABEL: f_ult_or_nan:
+; RISCV64:       # %bb.0: # %entry
+; RISCV64-NEXT:    fminm.s fa5, fa0, fa1
+; RISCV64-NEXT:    fle.s a0, fa2, fa5
+; RISCV64-NEXT:    xori a0, a0, 1
+; RISCV64-NEXT:    ret
+;
+; RISCV64-NOZFA-LABEL: f_ult_or_nan:
+; RISCV64-NOZFA:       # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT:    fmin.s fa5, fa0, fa1
+; RISCV64-NOZFA-NEXT:    fle.s a0, fa2, fa5
+; RISCV64-NOZFA-NEXT:    xori a0, a0, 1
+; RISCV64-NOZFA-NEXT:    ret
+;
+; AARCH64-LABEL: f_ult_or_nan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmin s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, lt
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ult float %a, %c
+  %cmp1 = fcmp ult float %b, %c
+  %0 = or i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ult_or_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
+; RISCV64-LABEL: f_ult_or_snan:
+; RISCV64:       # %bb.0: # %entry
+; RISCV64-NEXT:    fminm.s fa5, fa0, fa1
+; RISCV64-NEXT:    fle.s a0, fa2, fa5
+; RISCV64-NEXT:    xori a0, a0, 1
+; RISCV64-NEXT:    ret
+;
+; RISCV64-NOZFA-LABEL: f_ult_or_snan:
+; RISCV64-NOZFA:       # %bb.0: # %entry
+; RISCV64-NOZFA-NEXT:    feq.s a0, fa0, fa0
+; RISCV64-NOZFA-NEXT:    fmv.s fa5, fa1
+; RISCV64-NOZFA-NEXT:    bnez a0, .LBB23_2
+; RISCV64-NOZFA-NEXT:  # %bb.1: # %entry
+; RISCV64-NOZFA-NEXT:    fmv.s fa5, fa0
+; RISCV64-NOZFA-NEXT:  .LBB23_2: # %entry
+; RISCV64-NOZFA-NEXT:    feq.s a0, fa1, fa1
+; RISCV64-NOZFA-NEXT:    bnez a0, .LBB23_4
+; RISCV64-NOZFA-NEXT:  # %bb.3: # %entry
+; RISCV64-NOZFA-NEXT:    fmv.s fa0, fa1
+; RISCV64-NOZFA-NEXT:  .LBB23_4: # %entry
+; RISCV64-NOZFA-NEXT:    fmin.s fa5, fa0, fa5
+; RISCV64-NOZFA-NEXT:    fle.s a0, fa2, fa5
+; RISCV64-NOZFA-NEXT:    xori a0, a0, 1
+; RISCV64-NOZFA-NEXT:    ret
+;
+; AARCH64-LABEL: f_ult_or_snan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmin s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, lt
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ult float %a, %c
+  %cmp1 = fcmp ult float %b, %c
+  %0 = or i1 %cmp, %cmp1
+  ret i1 %0
+}

>From 02b669854bce25a2995525f2c0c7a6fac2f4a7db Mon Sep 17 00:00:00 2001
From: YunQiang Su <yunqiang at isrc.iscas.ac.cn>
Date: Fri, 25 Apr 2025 19:42:05 +0800
Subject: [PATCH 2/4] Remove AArch64 test from riscv file

---
 llvm/test/CodeGen/RISCV/fmaxmin-combine.ll | 299 ++-------------------
 1 file changed, 24 insertions(+), 275 deletions(-)

diff --git a/llvm/test/CodeGen/RISCV/fmaxmin-combine.ll b/llvm/test/CodeGen/RISCV/fmaxmin-combine.ll
index 1da45ac0f2734..b0a8ad0b5aa0b 100644
--- a/llvm/test/CodeGen/RISCV/fmaxmin-combine.ll
+++ b/llvm/test/CodeGen/RISCV/fmaxmin-combine.ll
@@ -1,7 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
 ; RUN: llc --mtriple=riscv64 --mattr=+f,+zfa < %s | FileCheck %s --check-prefix=RISCV64
 ; RUN: llc --mtriple=riscv64 --mattr=+f,-zfa < %s | FileCheck %s --check-prefix=RISCV64-NOZFA
-; RUN: llc --mtriple=aarch64 < %s | FileCheck %s --check-prefix=AARCH64
 
 define i1 @f_olt_or(float %a, float %b, float %c) {
 ; RISCV64-LABEL: f_olt_or:
@@ -15,14 +14,6 @@ define i1 @f_olt_or(float %a, float %b, float %c) {
 ; RISCV64-NOZFA-NEXT:    fmin.s fa5, fa0, fa1
 ; RISCV64-NOZFA-NEXT:    flt.s a0, fa5, fa2
 ; RISCV64-NOZFA-NEXT:    ret
-;
-; AARCH64-LABEL: f_olt_or:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w8, mi
-; AARCH64-NEXT:    fcmp s1, s2
-; AARCH64-NEXT:    csinc w0, w8, wzr, pl
-; AARCH64-NEXT:    ret
 entry:
   %cmp = fcmp olt float %a, %c
   %cmp1 = fcmp olt float %b, %c
@@ -42,13 +33,6 @@ define i1 @f_olt_or_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float
 ; RISCV64-NOZFA-NEXT:    fmin.s fa5, fa0, fa1
 ; RISCV64-NOZFA-NEXT:    flt.s a0, fa5, fa2
 ; RISCV64-NOZFA-NEXT:    ret
-;
-; AARCH64-LABEL: f_olt_or_nan:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fminnm s0, s0, s1
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w0, mi
-; AARCH64-NEXT:    ret
 entry:
   %cmp = fcmp olt float %a, %c
   %cmp1 = fcmp olt float %b, %c
@@ -68,13 +52,6 @@ define i1 @f_olt_or_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, flo
 ; RISCV64-NOZFA-NEXT:    fmin.s fa5, fa0, fa1
 ; RISCV64-NOZFA-NEXT:    flt.s a0, fa5, fa2
 ; RISCV64-NOZFA-NEXT:    ret
-;
-; AARCH64-LABEL: f_olt_or_snan:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fminnm s0, s0, s1
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w0, mi
-; AARCH64-NEXT:    ret
 entry:
   %cmp = fcmp olt float %a, %c
   %cmp1 = fcmp olt float %b, %c
@@ -96,13 +73,6 @@ define i1 @f_ugt_and(float %a, float %b, float %c) {
 ; RISCV64-NOZFA-NEXT:    fle.s a0, fa5, fa2
 ; RISCV64-NOZFA-NEXT:    xori a0, a0, 1
 ; RISCV64-NOZFA-NEXT:    ret
-;
-; AARCH64-LABEL: f_ugt_and:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fcmp s1, s2
-; AARCH64-NEXT:    fccmp s0, s2, #0, hi
-; AARCH64-NEXT:    cset w0, hi
-; AARCH64-NEXT:    ret
 entry:
   %cmp = fcmp ugt float %a, %c
   %cmp1 = fcmp ugt float %b, %c
@@ -124,13 +94,6 @@ define i1 @f_ugt_and_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float
 ; RISCV64-NOZFA-NEXT:    fle.s a0, fa5, fa2
 ; RISCV64-NOZFA-NEXT:    xori a0, a0, 1
 ; RISCV64-NOZFA-NEXT:    ret
-;
-; AARCH64-LABEL: f_ugt_and_nan:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fminnm s0, s0, s1
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w0, hi
-; AARCH64-NEXT:    ret
 entry:
   %cmp = fcmp ugt float %a, %c
   %cmp1 = fcmp ugt float %b, %c
@@ -152,13 +115,6 @@ define i1 @f_ugt_and_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, fl
 ; RISCV64-NOZFA-NEXT:    fle.s a0, fa5, fa2
 ; RISCV64-NOZFA-NEXT:    xori a0, a0, 1
 ; RISCV64-NOZFA-NEXT:    ret
-;
-; AARCH64-LABEL: f_ugt_and_snan:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fminnm s0, s0, s1
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w0, hi
-; AARCH64-NEXT:    ret
 entry:
   %cmp = fcmp ugt float %a, %c
   %cmp1 = fcmp ugt float %b, %c
@@ -175,27 +131,10 @@ define i1 @f_olt_and(float %a, float %b, float %c) {
 ;
 ; RISCV64-NOZFA-LABEL: f_olt_and:
 ; RISCV64-NOZFA:       # %bb.0: # %entry
-; RISCV64-NOZFA-NEXT:    feq.s a0, fa0, fa0
-; RISCV64-NOZFA-NEXT:    fmv.s fa5, fa1
-; RISCV64-NOZFA-NEXT:    bnez a0, .LBB6_2
-; RISCV64-NOZFA-NEXT:  # %bb.1: # %entry
-; RISCV64-NOZFA-NEXT:    fmv.s fa5, fa0
-; RISCV64-NOZFA-NEXT:  .LBB6_2: # %entry
-; RISCV64-NOZFA-NEXT:    feq.s a0, fa1, fa1
-; RISCV64-NOZFA-NEXT:    bnez a0, .LBB6_4
-; RISCV64-NOZFA-NEXT:  # %bb.3: # %entry
-; RISCV64-NOZFA-NEXT:    fmv.s fa0, fa1
-; RISCV64-NOZFA-NEXT:  .LBB6_4: # %entry
-; RISCV64-NOZFA-NEXT:    fmax.s fa5, fa0, fa5
-; RISCV64-NOZFA-NEXT:    flt.s a0, fa5, fa2
+; RISCV64-NOZFA-NEXT:    flt.s a0, fa0, fa2
+; RISCV64-NOZFA-NEXT:    flt.s a1, fa1, fa2
+; RISCV64-NOZFA-NEXT:    and a0, a0, a1
 ; RISCV64-NOZFA-NEXT:    ret
-;
-; AARCH64-LABEL: f_olt_and:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fmax s0, s0, s1
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w0, mi
-; AARCH64-NEXT:    ret
 entry:
   %cmp = fcmp olt float %a, %c
   %cmp1 = fcmp olt float %b, %c
@@ -215,13 +154,6 @@ define i1 @f_olt_and_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float
 ; RISCV64-NOZFA-NEXT:    fmax.s fa5, fa0, fa1
 ; RISCV64-NOZFA-NEXT:    flt.s a0, fa5, fa2
 ; RISCV64-NOZFA-NEXT:    ret
-;
-; AARCH64-LABEL: f_olt_and_nan:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fmax s0, s0, s1
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w0, mi
-; AARCH64-NEXT:    ret
 entry:
   %cmp = fcmp olt float %a, %c
   %cmp1 = fcmp olt float %b, %c
@@ -238,27 +170,10 @@ define i1 @f_olt_and_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, fl
 ;
 ; RISCV64-NOZFA-LABEL: f_olt_and_snan:
 ; RISCV64-NOZFA:       # %bb.0: # %entry
-; RISCV64-NOZFA-NEXT:    feq.s a0, fa0, fa0
-; RISCV64-NOZFA-NEXT:    fmv.s fa5, fa1
-; RISCV64-NOZFA-NEXT:    bnez a0, .LBB8_2
-; RISCV64-NOZFA-NEXT:  # %bb.1: # %entry
-; RISCV64-NOZFA-NEXT:    fmv.s fa5, fa0
-; RISCV64-NOZFA-NEXT:  .LBB8_2: # %entry
-; RISCV64-NOZFA-NEXT:    feq.s a0, fa1, fa1
-; RISCV64-NOZFA-NEXT:    bnez a0, .LBB8_4
-; RISCV64-NOZFA-NEXT:  # %bb.3: # %entry
-; RISCV64-NOZFA-NEXT:    fmv.s fa0, fa1
-; RISCV64-NOZFA-NEXT:  .LBB8_4: # %entry
-; RISCV64-NOZFA-NEXT:    fmax.s fa5, fa0, fa5
-; RISCV64-NOZFA-NEXT:    flt.s a0, fa5, fa2
+; RISCV64-NOZFA-NEXT:    flt.s a0, fa0, fa2
+; RISCV64-NOZFA-NEXT:    flt.s a1, fa1, fa2
+; RISCV64-NOZFA-NEXT:    and a0, a0, a1
 ; RISCV64-NOZFA-NEXT:    ret
-;
-; AARCH64-LABEL: f_olt_and_snan:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fmax s0, s0, s1
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w0, mi
-; AARCH64-NEXT:    ret
 entry:
   %cmp = fcmp olt float %a, %c
   %cmp1 = fcmp olt float %b, %c
@@ -276,28 +191,11 @@ define i1 @f_ugt_or(float %a, float %b, float %c) {
 ;
 ; RISCV64-NOZFA-LABEL: f_ugt_or:
 ; RISCV64-NOZFA:       # %bb.0: # %entry
-; RISCV64-NOZFA-NEXT:    feq.s a0, fa0, fa0
-; RISCV64-NOZFA-NEXT:    fmv.s fa5, fa1
-; RISCV64-NOZFA-NEXT:    bnez a0, .LBB9_2
-; RISCV64-NOZFA-NEXT:  # %bb.1: # %entry
-; RISCV64-NOZFA-NEXT:    fmv.s fa5, fa0
-; RISCV64-NOZFA-NEXT:  .LBB9_2: # %entry
-; RISCV64-NOZFA-NEXT:    feq.s a0, fa1, fa1
-; RISCV64-NOZFA-NEXT:    bnez a0, .LBB9_4
-; RISCV64-NOZFA-NEXT:  # %bb.3: # %entry
-; RISCV64-NOZFA-NEXT:    fmv.s fa0, fa1
-; RISCV64-NOZFA-NEXT:  .LBB9_4: # %entry
-; RISCV64-NOZFA-NEXT:    fmax.s fa5, fa0, fa5
-; RISCV64-NOZFA-NEXT:    fle.s a0, fa5, fa2
+; RISCV64-NOZFA-NEXT:    fle.s a0, fa0, fa2
+; RISCV64-NOZFA-NEXT:    fle.s a1, fa1, fa2
+; RISCV64-NOZFA-NEXT:    and a0, a0, a1
 ; RISCV64-NOZFA-NEXT:    xori a0, a0, 1
 ; RISCV64-NOZFA-NEXT:    ret
-;
-; AARCH64-LABEL: f_ugt_or:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fmax s0, s0, s1
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w0, hi
-; AARCH64-NEXT:    ret
 entry:
   %cmp = fcmp ugt float %a, %c
   %cmp1 = fcmp ugt float %b, %c
@@ -319,13 +217,6 @@ define i1 @f_ugt_or_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float
 ; RISCV64-NOZFA-NEXT:    fle.s a0, fa5, fa2
 ; RISCV64-NOZFA-NEXT:    xori a0, a0, 1
 ; RISCV64-NOZFA-NEXT:    ret
-;
-; AARCH64-LABEL: f_ugt_or_nan:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fmax s0, s0, s1
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w0, hi
-; AARCH64-NEXT:    ret
 entry:
   %cmp = fcmp ugt float %a, %c
   %cmp1 = fcmp ugt float %b, %c
@@ -343,28 +234,11 @@ define i1 @f_ugt_or_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, flo
 ;
 ; RISCV64-NOZFA-LABEL: f_ugt_or_snan:
 ; RISCV64-NOZFA:       # %bb.0: # %entry
-; RISCV64-NOZFA-NEXT:    feq.s a0, fa0, fa0
-; RISCV64-NOZFA-NEXT:    fmv.s fa5, fa1
-; RISCV64-NOZFA-NEXT:    bnez a0, .LBB11_2
-; RISCV64-NOZFA-NEXT:  # %bb.1: # %entry
-; RISCV64-NOZFA-NEXT:    fmv.s fa5, fa0
-; RISCV64-NOZFA-NEXT:  .LBB11_2: # %entry
-; RISCV64-NOZFA-NEXT:    feq.s a0, fa1, fa1
-; RISCV64-NOZFA-NEXT:    bnez a0, .LBB11_4
-; RISCV64-NOZFA-NEXT:  # %bb.3: # %entry
-; RISCV64-NOZFA-NEXT:    fmv.s fa0, fa1
-; RISCV64-NOZFA-NEXT:  .LBB11_4: # %entry
-; RISCV64-NOZFA-NEXT:    fmax.s fa5, fa0, fa5
-; RISCV64-NOZFA-NEXT:    fle.s a0, fa5, fa2
+; RISCV64-NOZFA-NEXT:    fle.s a0, fa0, fa2
+; RISCV64-NOZFA-NEXT:    fle.s a1, fa1, fa2
+; RISCV64-NOZFA-NEXT:    and a0, a0, a1
 ; RISCV64-NOZFA-NEXT:    xori a0, a0, 1
 ; RISCV64-NOZFA-NEXT:    ret
-;
-; AARCH64-LABEL: f_ugt_or_snan:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fmax s0, s0, s1
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w0, hi
-; AARCH64-NEXT:    ret
 entry:
   %cmp = fcmp ugt float %a, %c
   %cmp1 = fcmp ugt float %b, %c
@@ -384,14 +258,6 @@ define i1 @f_ogt_or(float %a, float %b, float %c) {
 ; RISCV64-NOZFA-NEXT:    fmax.s fa5, fa0, fa1
 ; RISCV64-NOZFA-NEXT:    flt.s a0, fa2, fa5
 ; RISCV64-NOZFA-NEXT:    ret
-;
-; AARCH64-LABEL: f_ogt_or:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w8, gt
-; AARCH64-NEXT:    fcmp s1, s2
-; AARCH64-NEXT:    csinc w0, w8, wzr, le
-; AARCH64-NEXT:    ret
 entry:
   %cmp = fcmp ogt float %a, %c
   %cmp1 = fcmp ogt float %b, %c
@@ -411,13 +277,6 @@ define i1 @f_ogt_or_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float
 ; RISCV64-NOZFA-NEXT:    fmax.s fa5, fa0, fa1
 ; RISCV64-NOZFA-NEXT:    flt.s a0, fa2, fa5
 ; RISCV64-NOZFA-NEXT:    ret
-;
-; AARCH64-LABEL: f_ogt_or_nan:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fmaxnm s0, s0, s1
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w0, gt
-; AARCH64-NEXT:    ret
 entry:
   %cmp = fcmp ogt float %a, %c
   %cmp1 = fcmp ogt float %b, %c
@@ -437,13 +296,6 @@ define i1 @f_ogt_or_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, flo
 ; RISCV64-NOZFA-NEXT:    fmax.s fa5, fa0, fa1
 ; RISCV64-NOZFA-NEXT:    flt.s a0, fa2, fa5
 ; RISCV64-NOZFA-NEXT:    ret
-;
-; AARCH64-LABEL: f_ogt_or_snan:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fmaxnm s0, s0, s1
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w0, gt
-; AARCH64-NEXT:    ret
 entry:
   %cmp = fcmp ogt float %a, %c
   %cmp1 = fcmp ogt float %b, %c
@@ -465,13 +317,6 @@ define i1 @f_ult_and(float %a, float %b, float %c) {
 ; RISCV64-NOZFA-NEXT:    fle.s a0, fa2, fa5
 ; RISCV64-NOZFA-NEXT:    xori a0, a0, 1
 ; RISCV64-NOZFA-NEXT:    ret
-;
-; AARCH64-LABEL: f_ult_and:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fcmp s1, s2
-; AARCH64-NEXT:    fccmp s0, s2, #0, lt
-; AARCH64-NEXT:    cset w0, lt
-; AARCH64-NEXT:    ret
 entry:
   %cmp = fcmp ult float %a, %c
   %cmp1 = fcmp ult float %b, %c
@@ -493,13 +338,6 @@ define i1 @f_ult_and_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float
 ; RISCV64-NOZFA-NEXT:    fle.s a0, fa2, fa5
 ; RISCV64-NOZFA-NEXT:    xori a0, a0, 1
 ; RISCV64-NOZFA-NEXT:    ret
-;
-; AARCH64-LABEL: f_ult_and_nan:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fmaxnm s0, s0, s1
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w0, lt
-; AARCH64-NEXT:    ret
 entry:
   %cmp = fcmp ult float %a, %c
   %cmp1 = fcmp ult float %b, %c
@@ -521,13 +359,6 @@ define i1 @f_ult_and_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, fl
 ; RISCV64-NOZFA-NEXT:    fle.s a0, fa2, fa5
 ; RISCV64-NOZFA-NEXT:    xori a0, a0, 1
 ; RISCV64-NOZFA-NEXT:    ret
-;
-; AARCH64-LABEL: f_ult_and_snan:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fmaxnm s0, s0, s1
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w0, lt
-; AARCH64-NEXT:    ret
 entry:
   %cmp = fcmp ult float %a, %c
   %cmp1 = fcmp ult float %b, %c
@@ -544,27 +375,10 @@ define i1 @f_ogt_and(float %a, float %b, float %c) {
 ;
 ; RISCV64-NOZFA-LABEL: f_ogt_and:
 ; RISCV64-NOZFA:       # %bb.0: # %entry
-; RISCV64-NOZFA-NEXT:    feq.s a0, fa0, fa0
-; RISCV64-NOZFA-NEXT:    fmv.s fa5, fa1
-; RISCV64-NOZFA-NEXT:    bnez a0, .LBB18_2
-; RISCV64-NOZFA-NEXT:  # %bb.1: # %entry
-; RISCV64-NOZFA-NEXT:    fmv.s fa5, fa0
-; RISCV64-NOZFA-NEXT:  .LBB18_2: # %entry
-; RISCV64-NOZFA-NEXT:    feq.s a0, fa1, fa1
-; RISCV64-NOZFA-NEXT:    bnez a0, .LBB18_4
-; RISCV64-NOZFA-NEXT:  # %bb.3: # %entry
-; RISCV64-NOZFA-NEXT:    fmv.s fa0, fa1
-; RISCV64-NOZFA-NEXT:  .LBB18_4: # %entry
-; RISCV64-NOZFA-NEXT:    fmin.s fa5, fa0, fa5
-; RISCV64-NOZFA-NEXT:    flt.s a0, fa2, fa5
+; RISCV64-NOZFA-NEXT:    flt.s a0, fa2, fa0
+; RISCV64-NOZFA-NEXT:    flt.s a1, fa2, fa1
+; RISCV64-NOZFA-NEXT:    and a0, a0, a1
 ; RISCV64-NOZFA-NEXT:    ret
-;
-; AARCH64-LABEL: f_ogt_and:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fmin s0, s0, s1
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w0, gt
-; AARCH64-NEXT:    ret
 entry:
   %cmp = fcmp ogt float %a, %c
   %cmp1 = fcmp ogt float %b, %c
@@ -584,13 +398,6 @@ define i1 @f_ogt_and_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float
 ; RISCV64-NOZFA-NEXT:    fmin.s fa5, fa0, fa1
 ; RISCV64-NOZFA-NEXT:    flt.s a0, fa2, fa5
 ; RISCV64-NOZFA-NEXT:    ret
-;
-; AARCH64-LABEL: f_ogt_and_nan:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fmin s0, s0, s1
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w0, gt
-; AARCH64-NEXT:    ret
 entry:
   %cmp = fcmp ogt float %a, %c
   %cmp1 = fcmp ogt float %b, %c
@@ -607,27 +414,10 @@ define i1 @f_ogt_and_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, fl
 ;
 ; RISCV64-NOZFA-LABEL: f_ogt_and_snan:
 ; RISCV64-NOZFA:       # %bb.0: # %entry
-; RISCV64-NOZFA-NEXT:    feq.s a0, fa0, fa0
-; RISCV64-NOZFA-NEXT:    fmv.s fa5, fa1
-; RISCV64-NOZFA-NEXT:    bnez a0, .LBB20_2
-; RISCV64-NOZFA-NEXT:  # %bb.1: # %entry
-; RISCV64-NOZFA-NEXT:    fmv.s fa5, fa0
-; RISCV64-NOZFA-NEXT:  .LBB20_2: # %entry
-; RISCV64-NOZFA-NEXT:    feq.s a0, fa1, fa1
-; RISCV64-NOZFA-NEXT:    bnez a0, .LBB20_4
-; RISCV64-NOZFA-NEXT:  # %bb.3: # %entry
-; RISCV64-NOZFA-NEXT:    fmv.s fa0, fa1
-; RISCV64-NOZFA-NEXT:  .LBB20_4: # %entry
-; RISCV64-NOZFA-NEXT:    fmin.s fa5, fa0, fa5
-; RISCV64-NOZFA-NEXT:    flt.s a0, fa2, fa5
+; RISCV64-NOZFA-NEXT:    flt.s a0, fa2, fa0
+; RISCV64-NOZFA-NEXT:    flt.s a1, fa2, fa1
+; RISCV64-NOZFA-NEXT:    and a0, a0, a1
 ; RISCV64-NOZFA-NEXT:    ret
-;
-; AARCH64-LABEL: f_ogt_and_snan:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fmin s0, s0, s1
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w0, gt
-; AARCH64-NEXT:    ret
 entry:
   %cmp = fcmp ogt float %a, %c
   %cmp1 = fcmp ogt float %b, %c
@@ -645,28 +435,11 @@ define i1 @f_ult_or(float %a, float %b, float %c) {
 ;
 ; RISCV64-NOZFA-LABEL: f_ult_or:
 ; RISCV64-NOZFA:       # %bb.0: # %entry
-; RISCV64-NOZFA-NEXT:    feq.s a0, fa0, fa0
-; RISCV64-NOZFA-NEXT:    fmv.s fa5, fa1
-; RISCV64-NOZFA-NEXT:    bnez a0, .LBB21_2
-; RISCV64-NOZFA-NEXT:  # %bb.1: # %entry
-; RISCV64-NOZFA-NEXT:    fmv.s fa5, fa0
-; RISCV64-NOZFA-NEXT:  .LBB21_2: # %entry
-; RISCV64-NOZFA-NEXT:    feq.s a0, fa1, fa1
-; RISCV64-NOZFA-NEXT:    bnez a0, .LBB21_4
-; RISCV64-NOZFA-NEXT:  # %bb.3: # %entry
-; RISCV64-NOZFA-NEXT:    fmv.s fa0, fa1
-; RISCV64-NOZFA-NEXT:  .LBB21_4: # %entry
-; RISCV64-NOZFA-NEXT:    fmin.s fa5, fa0, fa5
-; RISCV64-NOZFA-NEXT:    fle.s a0, fa2, fa5
+; RISCV64-NOZFA-NEXT:    fle.s a0, fa2, fa0
+; RISCV64-NOZFA-NEXT:    fle.s a1, fa2, fa1
+; RISCV64-NOZFA-NEXT:    and a0, a0, a1
 ; RISCV64-NOZFA-NEXT:    xori a0, a0, 1
 ; RISCV64-NOZFA-NEXT:    ret
-;
-; AARCH64-LABEL: f_ult_or:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fmin s0, s0, s1
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w0, lt
-; AARCH64-NEXT:    ret
 entry:
   %cmp = fcmp ult float %a, %c
   %cmp1 = fcmp ult float %b, %c
@@ -688,13 +461,6 @@ define i1 @f_ult_or_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float
 ; RISCV64-NOZFA-NEXT:    fle.s a0, fa2, fa5
 ; RISCV64-NOZFA-NEXT:    xori a0, a0, 1
 ; RISCV64-NOZFA-NEXT:    ret
-;
-; AARCH64-LABEL: f_ult_or_nan:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fmin s0, s0, s1
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w0, lt
-; AARCH64-NEXT:    ret
 entry:
   %cmp = fcmp ult float %a, %c
   %cmp1 = fcmp ult float %b, %c
@@ -712,28 +478,11 @@ define i1 @f_ult_or_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, flo
 ;
 ; RISCV64-NOZFA-LABEL: f_ult_or_snan:
 ; RISCV64-NOZFA:       # %bb.0: # %entry
-; RISCV64-NOZFA-NEXT:    feq.s a0, fa0, fa0
-; RISCV64-NOZFA-NEXT:    fmv.s fa5, fa1
-; RISCV64-NOZFA-NEXT:    bnez a0, .LBB23_2
-; RISCV64-NOZFA-NEXT:  # %bb.1: # %entry
-; RISCV64-NOZFA-NEXT:    fmv.s fa5, fa0
-; RISCV64-NOZFA-NEXT:  .LBB23_2: # %entry
-; RISCV64-NOZFA-NEXT:    feq.s a0, fa1, fa1
-; RISCV64-NOZFA-NEXT:    bnez a0, .LBB23_4
-; RISCV64-NOZFA-NEXT:  # %bb.3: # %entry
-; RISCV64-NOZFA-NEXT:    fmv.s fa0, fa1
-; RISCV64-NOZFA-NEXT:  .LBB23_4: # %entry
-; RISCV64-NOZFA-NEXT:    fmin.s fa5, fa0, fa5
-; RISCV64-NOZFA-NEXT:    fle.s a0, fa2, fa5
+; RISCV64-NOZFA-NEXT:    fle.s a0, fa2, fa0
+; RISCV64-NOZFA-NEXT:    fle.s a1, fa2, fa1
+; RISCV64-NOZFA-NEXT:    and a0, a0, a1
 ; RISCV64-NOZFA-NEXT:    xori a0, a0, 1
 ; RISCV64-NOZFA-NEXT:    ret
-;
-; AARCH64-LABEL: f_ult_or_snan:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fmin s0, s0, s1
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w0, lt
-; AARCH64-NEXT:    ret
 entry:
   %cmp = fcmp ult float %a, %c
   %cmp1 = fcmp ult float %b, %c

>From 1e258441bf67ab7f40d8456ce9cb80b646b69ae0 Mon Sep 17 00:00:00 2001
From: YunQiang Su <yunqiang at isrc.iscas.ac.cn>
Date: Sat, 26 Apr 2025 12:54:05 +0800
Subject: [PATCH 3/4] Update test cases

---
 .../AArch64/combine_andor_with_cmps.ll        | 401 +++++++++++++++---
 llvm/test/CodeGen/AArch64/fmaxmin-combine.ll  | 340 ---------------
 .../CodeGen/AMDGPU/combine_andor_with_cmps.ll | 360 +++++++++++-----
 llvm/test/CodeGen/AMDGPU/or.ll                |  43 +-
 llvm/test/CodeGen/AMDGPU/xor3-i1-const.ll     |   5 +-
 ...-combine.ll => combine_andor_with_cmps.ll} |   0
 6 files changed, 610 insertions(+), 539 deletions(-)
 delete mode 100644 llvm/test/CodeGen/AArch64/fmaxmin-combine.ll
 rename llvm/test/CodeGen/RISCV/{fmaxmin-combine.ll => combine_andor_with_cmps.ll} (100%)

diff --git a/llvm/test/CodeGen/AArch64/combine_andor_with_cmps.ll b/llvm/test/CodeGen/AArch64/combine_andor_with_cmps.ll
index 89cb25f3d9d75..1e7ab51709157 100644
--- a/llvm/test/CodeGen/AArch64/combine_andor_with_cmps.ll
+++ b/llvm/test/CodeGen/AArch64/combine_andor_with_cmps.ll
@@ -1,71 +1,344 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
-; RUN: llc -mtriple=aarch64-none-linux-gnu -mcpu=neoverse-n1 -verify-machineinstrs < %s | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc --mtriple=aarch64 < %s | FileCheck %s --check-prefix=AARCH64
 
 ; The tests check the following optimization of DAGCombiner for AArch64:
 ; CMP(A,C)||CMP(B,C) => CMP(MIN/MAX(A,B), C)
 ; CMP(A,C)&&CMP(B,C) => CMP(MIN/MAX(A,B), C)
 
-define i1 @test1(float %arg1, float %arg2, float %arg3) #0 {
-; CHECK-LABEL: test1:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fminnm s0, s0, s1
-; CHECK-NEXT:    fcmp s0, s2
-; CHECK-NEXT:    cset w0, mi
-; CHECK-NEXT:    ret
-  %cmp1 = fcmp olt float %arg1, %arg3
-  %cmp2 = fcmp olt float %arg2, %arg3
-  %or1  = or i1 %cmp1, %cmp2
-  ret i1 %or1
-}
-
-define i1 @test2(double %arg1, double %arg2, double %arg3) #0 {
-; CHECK-LABEL: test2:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fmaxnm d0, d0, d1
-; CHECK-NEXT:    fcmp d0, d2
-; CHECK-NEXT:    cset w0, gt
-; CHECK-NEXT:    ret
-  %cmp1 = fcmp ogt double %arg1, %arg3
-  %cmp2 = fcmp ogt double %arg2, %arg3
-  %or1  = or i1 %cmp1, %cmp2
-  ret i1 %or1
-}
-
-define i1 @test3(float %arg1, float %arg2, float %arg3) {
-; CHECK-LABEL: test3:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fmov s3, #1.00000000
-; CHECK-NEXT:    fadd s0, s0, s3
-; CHECK-NEXT:    fmov s3, #2.00000000
-; CHECK-NEXT:    fadd s1, s1, s3
-; CHECK-NEXT:    fmaxnm s0, s0, s1
-; CHECK-NEXT:    fcmp s0, s2
-; CHECK-NEXT:    cset w0, lt
-; CHECK-NEXT:    ret
-  %add1 = fadd nnan float %arg1, 1.0
-  %add2 = fadd nnan float %arg2, 2.0
-  %cmp1 = fcmp nnan olt float %add1, %arg3
-  %cmp2 = fcmp nnan olt float %add2, %arg3
-  %or1  = and i1 %cmp1, %cmp2
-  ret i1 %or1
-}
-
-define i1 @test4(float %arg1, float %arg2, float %arg3) {
-; CHECK-LABEL: test4:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fmov s3, #1.00000000
-; CHECK-NEXT:    fadd s0, s0, s3
-; CHECK-NEXT:    fmov s3, #2.00000000
-; CHECK-NEXT:    fadd s1, s1, s3
-; CHECK-NEXT:    fminnm s0, s0, s1
-; CHECK-NEXT:    fcmp s0, s2
-; CHECK-NEXT:    cset w0, gt
-; CHECK-NEXT:    ret
-  %add1 = fadd nnan float %arg1, 1.0
-  %add2 = fadd nnan float %arg2, 2.0
-  %cmp1 = fcmp nnan ogt float %add1, %arg3
-  %cmp2 = fcmp nnan ogt float %add2, %arg3
-  %or1  = and i1 %cmp1, %cmp2
-  ret i1 %or1
+define i1 @f_olt_or(float %a, float %b, float %c) {
+; AARCH64-LABEL: f_olt_or:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w8, mi
+; AARCH64-NEXT:    fcmp s1, s2
+; AARCH64-NEXT:    csinc w0, w8, wzr, pl
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp olt float %a, %c
+  %cmp1 = fcmp olt float %b, %c
+  %0 = or i1 %cmp, %cmp1
+  ret i1 %0
 }
 
+define i1 @f_olt_or_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
+; AARCH64-LABEL: f_olt_or_nan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, mi
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp olt float %a, %c
+  %cmp1 = fcmp olt float %b, %c
+  %0 = or i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_olt_or_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
+; AARCH64-LABEL: f_olt_or_snan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, mi
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp olt float %a, %c
+  %cmp1 = fcmp olt float %b, %c
+  %0 = or i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ugt_and(float %a, float %b, float %c) {
+; AARCH64-LABEL: f_ugt_and:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fcmp s1, s2
+; AARCH64-NEXT:    fccmp s0, s2, #0, hi
+; AARCH64-NEXT:    cset w0, hi
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ugt float %a, %c
+  %cmp1 = fcmp ugt float %b, %c
+  %0 = and i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ugt_and_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
+; AARCH64-LABEL: f_ugt_and_nan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, hi
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ugt float %a, %c
+  %cmp1 = fcmp ugt float %b, %c
+  %0 = and i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ugt_and_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
+; AARCH64-LABEL: f_ugt_and_snan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fminnm s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, hi
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ugt float %a, %c
+  %cmp1 = fcmp ugt float %b, %c
+  %0 = and i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_olt_and(float %a, float %b, float %c) {
+; AARCH64-LABEL: f_olt_and:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmax s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, mi
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp olt float %a, %c
+  %cmp1 = fcmp olt float %b, %c
+  %0 = and i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_olt_and_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
+; AARCH64-LABEL: f_olt_and_nan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmax s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, mi
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp olt float %a, %c
+  %cmp1 = fcmp olt float %b, %c
+  %0 = and i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_olt_and_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
+; AARCH64-LABEL: f_olt_and_snan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmax s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, mi
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp olt float %a, %c
+  %cmp1 = fcmp olt float %b, %c
+  %0 = and i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ugt_or(float %a, float %b, float %c) {
+; AARCH64-LABEL: f_ugt_or:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmax s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, hi
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ugt float %a, %c
+  %cmp1 = fcmp ugt float %b, %c
+  %0 = or i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ugt_or_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
+; AARCH64-LABEL: f_ugt_or_nan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmax s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, hi
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ugt float %a, %c
+  %cmp1 = fcmp ugt float %b, %c
+  %0 = or i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ugt_or_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
+; AARCH64-LABEL: f_ugt_or_snan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmax s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, hi
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ugt float %a, %c
+  %cmp1 = fcmp ugt float %b, %c
+  %0 = or i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ogt_or(float %a, float %b, float %c) {
+; AARCH64-LABEL: f_ogt_or:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w8, gt
+; AARCH64-NEXT:    fcmp s1, s2
+; AARCH64-NEXT:    csinc w0, w8, wzr, le
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ogt float %a, %c
+  %cmp1 = fcmp ogt float %b, %c
+  %0 = or i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ogt_or_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
+; AARCH64-LABEL: f_ogt_or_nan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmaxnm s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, gt
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ogt float %a, %c
+  %cmp1 = fcmp ogt float %b, %c
+  %0 = or i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ogt_or_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
+; AARCH64-LABEL: f_ogt_or_snan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmaxnm s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, gt
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ogt float %a, %c
+  %cmp1 = fcmp ogt float %b, %c
+  %0 = or i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ult_and(float %a, float %b, float %c) {
+; AARCH64-LABEL: f_ult_and:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fcmp s1, s2
+; AARCH64-NEXT:    fccmp s0, s2, #0, lt
+; AARCH64-NEXT:    cset w0, lt
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ult float %a, %c
+  %cmp1 = fcmp ult float %b, %c
+  %0 = and i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ult_and_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
+; AARCH64-LABEL: f_ult_and_nan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmaxnm s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, lt
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ult float %a, %c
+  %cmp1 = fcmp ult float %b, %c
+  %0 = and i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ult_and_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
+; AARCH64-LABEL: f_ult_and_snan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmaxnm s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, lt
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ult float %a, %c
+  %cmp1 = fcmp ult float %b, %c
+  %0 = and i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ogt_and(float %a, float %b, float %c) {
+; AARCH64-LABEL: f_ogt_and:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmin s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, gt
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ogt float %a, %c
+  %cmp1 = fcmp ogt float %b, %c
+  %0 = and i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ogt_and_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
+; AARCH64-LABEL: f_ogt_and_nan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmin s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, gt
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ogt float %a, %c
+  %cmp1 = fcmp ogt float %b, %c
+  %0 = and i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ogt_and_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
+; AARCH64-LABEL: f_ogt_and_snan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmin s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, gt
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ogt float %a, %c
+  %cmp1 = fcmp ogt float %b, %c
+  %0 = and i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ult_or(float %a, float %b, float %c) {
+; AARCH64-LABEL: f_ult_or:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmin s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, lt
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ult float %a, %c
+  %cmp1 = fcmp ult float %b, %c
+  %0 = or i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ult_or_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
+; AARCH64-LABEL: f_ult_or_nan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmin s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, lt
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ult float %a, %c
+  %cmp1 = fcmp ult float %b, %c
+  %0 = or i1 %cmp, %cmp1
+  ret i1 %0
+}
+
+define i1 @f_ult_or_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
+; AARCH64-LABEL: f_ult_or_snan:
+; AARCH64:       // %bb.0: // %entry
+; AARCH64-NEXT:    fmin s0, s0, s1
+; AARCH64-NEXT:    fcmp s0, s2
+; AARCH64-NEXT:    cset w0, lt
+; AARCH64-NEXT:    ret
+entry:
+  %cmp = fcmp ult float %a, %c
+  %cmp1 = fcmp ult float %b, %c
+  %0 = or i1 %cmp, %cmp1
+  ret i1 %0
+}
diff --git a/llvm/test/CodeGen/AArch64/fmaxmin-combine.ll b/llvm/test/CodeGen/AArch64/fmaxmin-combine.ll
deleted file mode 100644
index 671f47a46dc8a..0000000000000
--- a/llvm/test/CodeGen/AArch64/fmaxmin-combine.ll
+++ /dev/null
@@ -1,340 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc --mtriple=aarch64 < %s | FileCheck %s --check-prefix=AARCH64
-
-define i1 @f_olt_or(float %a, float %b, float %c) {
-; AARCH64-LABEL: f_olt_or:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w8, mi
-; AARCH64-NEXT:    fcmp s1, s2
-; AARCH64-NEXT:    csinc w0, w8, wzr, pl
-; AARCH64-NEXT:    ret
-entry:
-  %cmp = fcmp olt float %a, %c
-  %cmp1 = fcmp olt float %b, %c
-  %0 = or i1 %cmp, %cmp1
-  ret i1 %0
-}
-
-define i1 @f_olt_or_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
-; AARCH64-LABEL: f_olt_or_nan:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fminnm s0, s0, s1
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w0, mi
-; AARCH64-NEXT:    ret
-entry:
-  %cmp = fcmp olt float %a, %c
-  %cmp1 = fcmp olt float %b, %c
-  %0 = or i1 %cmp, %cmp1
-  ret i1 %0
-}
-
-define i1 @f_olt_or_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
-; AARCH64-LABEL: f_olt_or_snan:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fminnm s0, s0, s1
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w0, mi
-; AARCH64-NEXT:    ret
-entry:
-  %cmp = fcmp olt float %a, %c
-  %cmp1 = fcmp olt float %b, %c
-  %0 = or i1 %cmp, %cmp1
-  ret i1 %0
-}
-
-define i1 @f_ugt_and(float %a, float %b, float %c) {
-; AARCH64-LABEL: f_ugt_and:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fcmp s1, s2
-; AARCH64-NEXT:    fccmp s0, s2, #0, hi
-; AARCH64-NEXT:    cset w0, hi
-; AARCH64-NEXT:    ret
-entry:
-  %cmp = fcmp ugt float %a, %c
-  %cmp1 = fcmp ugt float %b, %c
-  %0 = and i1 %cmp, %cmp1
-  ret i1 %0
-}
-
-define i1 @f_ugt_and_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
-; AARCH64-LABEL: f_ugt_and_nan:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fminnm s0, s0, s1
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w0, hi
-; AARCH64-NEXT:    ret
-entry:
-  %cmp = fcmp ugt float %a, %c
-  %cmp1 = fcmp ugt float %b, %c
-  %0 = and i1 %cmp, %cmp1
-  ret i1 %0
-}
-
-define i1 @f_ugt_and_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
-; AARCH64-LABEL: f_ugt_and_snan:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fminnm s0, s0, s1
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w0, hi
-; AARCH64-NEXT:    ret
-entry:
-  %cmp = fcmp ugt float %a, %c
-  %cmp1 = fcmp ugt float %b, %c
-  %0 = and i1 %cmp, %cmp1
-  ret i1 %0
-}
-
-define i1 @f_olt_and(float %a, float %b, float %c) {
-; AARCH64-LABEL: f_olt_and:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fmax s0, s0, s1
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w0, mi
-; AARCH64-NEXT:    ret
-entry:
-  %cmp = fcmp olt float %a, %c
-  %cmp1 = fcmp olt float %b, %c
-  %0 = and i1 %cmp, %cmp1
-  ret i1 %0
-}
-
-define i1 @f_olt_and_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
-; AARCH64-LABEL: f_olt_and_nan:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fmax s0, s0, s1
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w0, mi
-; AARCH64-NEXT:    ret
-entry:
-  %cmp = fcmp olt float %a, %c
-  %cmp1 = fcmp olt float %b, %c
-  %0 = and i1 %cmp, %cmp1
-  ret i1 %0
-}
-
-define i1 @f_olt_and_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
-; AARCH64-LABEL: f_olt_and_snan:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fmax s0, s0, s1
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w0, mi
-; AARCH64-NEXT:    ret
-entry:
-  %cmp = fcmp olt float %a, %c
-  %cmp1 = fcmp olt float %b, %c
-  %0 = and i1 %cmp, %cmp1
-  ret i1 %0
-}
-
-define i1 @f_ugt_or(float %a, float %b, float %c) {
-; AARCH64-LABEL: f_ugt_or:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fmax s0, s0, s1
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w0, hi
-; AARCH64-NEXT:    ret
-entry:
-  %cmp = fcmp ugt float %a, %c
-  %cmp1 = fcmp ugt float %b, %c
-  %0 = or i1 %cmp, %cmp1
-  ret i1 %0
-}
-
-define i1 @f_ugt_or_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
-; AARCH64-LABEL: f_ugt_or_nan:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fmax s0, s0, s1
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w0, hi
-; AARCH64-NEXT:    ret
-entry:
-  %cmp = fcmp ugt float %a, %c
-  %cmp1 = fcmp ugt float %b, %c
-  %0 = or i1 %cmp, %cmp1
-  ret i1 %0
-}
-
-define i1 @f_ugt_or_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
-; AARCH64-LABEL: f_ugt_or_snan:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fmax s0, s0, s1
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w0, hi
-; AARCH64-NEXT:    ret
-entry:
-  %cmp = fcmp ugt float %a, %c
-  %cmp1 = fcmp ugt float %b, %c
-  %0 = or i1 %cmp, %cmp1
-  ret i1 %0
-}
-
-define i1 @f_ogt_or(float %a, float %b, float %c) {
-; AARCH64-LABEL: f_ogt_or:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w8, gt
-; AARCH64-NEXT:    fcmp s1, s2
-; AARCH64-NEXT:    csinc w0, w8, wzr, le
-; AARCH64-NEXT:    ret
-entry:
-  %cmp = fcmp ogt float %a, %c
-  %cmp1 = fcmp ogt float %b, %c
-  %0 = or i1 %cmp, %cmp1
-  ret i1 %0
-}
-
-define i1 @f_ogt_or_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
-; AARCH64-LABEL: f_ogt_or_nan:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fmaxnm s0, s0, s1
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w0, gt
-; AARCH64-NEXT:    ret
-entry:
-  %cmp = fcmp ogt float %a, %c
-  %cmp1 = fcmp ogt float %b, %c
-  %0 = or i1 %cmp, %cmp1
-  ret i1 %0
-}
-
-define i1 @f_ogt_or_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
-; AARCH64-LABEL: f_ogt_or_snan:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fmaxnm s0, s0, s1
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w0, gt
-; AARCH64-NEXT:    ret
-entry:
-  %cmp = fcmp ogt float %a, %c
-  %cmp1 = fcmp ogt float %b, %c
-  %0 = or i1 %cmp, %cmp1
-  ret i1 %0
-}
-
-define i1 @f_ult_and(float %a, float %b, float %c) {
-; AARCH64-LABEL: f_ult_and:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fcmp s1, s2
-; AARCH64-NEXT:    fccmp s0, s2, #0, lt
-; AARCH64-NEXT:    cset w0, lt
-; AARCH64-NEXT:    ret
-entry:
-  %cmp = fcmp ult float %a, %c
-  %cmp1 = fcmp ult float %b, %c
-  %0 = and i1 %cmp, %cmp1
-  ret i1 %0
-}
-
-define i1 @f_ult_and_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
-; AARCH64-LABEL: f_ult_and_nan:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fmaxnm s0, s0, s1
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w0, lt
-; AARCH64-NEXT:    ret
-entry:
-  %cmp = fcmp ult float %a, %c
-  %cmp1 = fcmp ult float %b, %c
-  %0 = and i1 %cmp, %cmp1
-  ret i1 %0
-}
-
-define i1 @f_ult_and_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
-; AARCH64-LABEL: f_ult_and_snan:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fmaxnm s0, s0, s1
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w0, lt
-; AARCH64-NEXT:    ret
-entry:
-  %cmp = fcmp ult float %a, %c
-  %cmp1 = fcmp ult float %b, %c
-  %0 = and i1 %cmp, %cmp1
-  ret i1 %0
-}
-
-define i1 @f_ogt_and(float %a, float %b, float %c) {
-; AARCH64-LABEL: f_ogt_and:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fmin s0, s0, s1
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w0, gt
-; AARCH64-NEXT:    ret
-entry:
-  %cmp = fcmp ogt float %a, %c
-  %cmp1 = fcmp ogt float %b, %c
-  %0 = and i1 %cmp, %cmp1
-  ret i1 %0
-}
-
-define i1 @f_ogt_and_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
-; AARCH64-LABEL: f_ogt_and_nan:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fmin s0, s0, s1
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w0, gt
-; AARCH64-NEXT:    ret
-entry:
-  %cmp = fcmp ogt float %a, %c
-  %cmp1 = fcmp ogt float %b, %c
-  %0 = and i1 %cmp, %cmp1
-  ret i1 %0
-}
-
-define i1 @f_ogt_and_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
-; AARCH64-LABEL: f_ogt_and_snan:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fmin s0, s0, s1
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w0, gt
-; AARCH64-NEXT:    ret
-entry:
-  %cmp = fcmp ogt float %a, %c
-  %cmp1 = fcmp ogt float %b, %c
-  %0 = and i1 %cmp, %cmp1
-  ret i1 %0
-}
-
-define i1 @f_ult_or(float %a, float %b, float %c) {
-; AARCH64-LABEL: f_ult_or:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fmin s0, s0, s1
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w0, lt
-; AARCH64-NEXT:    ret
-entry:
-  %cmp = fcmp ult float %a, %c
-  %cmp1 = fcmp ult float %b, %c
-  %0 = or i1 %cmp, %cmp1
-  ret i1 %0
-}
-
-define i1 @f_ult_or_nan(float nofpclass(nan) %a, float nofpclass(nan) %b, float nofpclass(nan) %c) {
-; AARCH64-LABEL: f_ult_or_nan:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fmin s0, s0, s1
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w0, lt
-; AARCH64-NEXT:    ret
-entry:
-  %cmp = fcmp ult float %a, %c
-  %cmp1 = fcmp ult float %b, %c
-  %0 = or i1 %cmp, %cmp1
-  ret i1 %0
-}
-
-define i1 @f_ult_or_snan(float nofpclass(snan) %a, float nofpclass(snan) %b, float nofpclass(snan) %c) {
-; AARCH64-LABEL: f_ult_or_snan:
-; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    fmin s0, s0, s1
-; AARCH64-NEXT:    fcmp s0, s2
-; AARCH64-NEXT:    cset w0, lt
-; AARCH64-NEXT:    ret
-entry:
-  %cmp = fcmp ult float %a, %c
-  %cmp1 = fcmp ult float %b, %c
-  %0 = or i1 %cmp, %cmp1
-  ret i1 %0
-}
diff --git a/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll b/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll
index 9d679779fed0e..613319a329e7e 100644
--- a/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll
+++ b/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll
@@ -799,13 +799,30 @@ define i1 @test53(i32 %arg1, i32 %arg2, i32 %arg3) {
 }
 
 define i1 @test54(float %arg1, float %arg2, float %arg3) #0 {
-; GCN-LABEL: test54:
-; GCN:       ; %bb.0:
-; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_min_f32_e32 v0, v0, v1
-; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v2
-; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GCN-NEXT:    s_setpc_b64 s[30:31]
+; GFX11-LABEL: test54:
+; GFX11:       ; %bb.0:
+; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GFX11-NEXT:    v_cmp_lt_f32_e64 s0, v1, v2
+; GFX11-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-TRUE16-LABEL: test54:
+; GCN-TRUE16:       ; %bb.0:
+; GCN-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-TRUE16-NEXT:    v_min_f32_e32 v0, v0, v1
+; GCN-TRUE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GCN-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-TRUE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-FAKE16-LABEL: test54:
+; GCN-FAKE16:       ; %bb.0:
+; GCN-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-FAKE16-NEXT:    v_min_f32_e32 v0, v0, v1
+; GCN-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GCN-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-FAKE16-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp olt float %arg1, %arg3
   %cmp2 = fcmp olt float %arg2, %arg3
   %or1  = or i1 %cmp1, %cmp2
@@ -813,13 +830,30 @@ define i1 @test54(float %arg1, float %arg2, float %arg3) #0 {
 }
 
 define i1 @test55(double %arg1, double %arg2, double %arg3) #0 {
-; GCN-LABEL: test55:
-; GCN:       ; %bb.0:
-; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_min_f64 v[0:1], v[0:1], v[2:3]
-; GCN-NEXT:    v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GCN-NEXT:    s_setpc_b64 s[30:31]
+; GFX11-LABEL: test55:
+; GFX11:       ; %bb.0:
+; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT:    v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX11-NEXT:    v_cmp_le_f64_e64 s0, v[2:3], v[4:5]
+; GFX11-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-TRUE16-LABEL: test55:
+; GCN-TRUE16:       ; %bb.0:
+; GCN-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-TRUE16-NEXT:    v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-TRUE16-NEXT:    v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-TRUE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-FAKE16-LABEL: test55:
+; GCN-FAKE16:       ; %bb.0:
+; GCN-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-FAKE16-NEXT:    v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-FAKE16-NEXT:    v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-FAKE16-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp ole double %arg1, %arg3
   %cmp2 = fcmp ole double %arg2, %arg3
   %or1  = or i1 %cmp1, %cmp2
@@ -827,13 +861,30 @@ define i1 @test55(double %arg1, double %arg2, double %arg3) #0 {
 }
 
 define i1 @test56(double %arg1, double %arg2, double %arg3) #0 {
-; GCN-LABEL: test56:
-; GCN:       ; %bb.0:
-; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_max_f64 v[0:1], v[0:1], v[2:3]
-; GCN-NEXT:    v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GCN-NEXT:    s_setpc_b64 s[30:31]
+; GFX11-LABEL: test56:
+; GFX11:       ; %bb.0:
+; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT:    v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX11-NEXT:    v_cmp_gt_f64_e64 s0, v[2:3], v[4:5]
+; GFX11-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-TRUE16-LABEL: test56:
+; GCN-TRUE16:       ; %bb.0:
+; GCN-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-TRUE16-NEXT:    v_max_f64 v[0:1], v[0:1], v[2:3]
+; GCN-TRUE16-NEXT:    v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-TRUE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-FAKE16-LABEL: test56:
+; GCN-FAKE16:       ; %bb.0:
+; GCN-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-FAKE16-NEXT:    v_max_f64 v[0:1], v[0:1], v[2:3]
+; GCN-FAKE16-NEXT:    v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-FAKE16-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp ogt double %arg1, %arg3
   %cmp2 = fcmp ogt double %arg2, %arg3
   %or1  = or i1 %cmp1, %cmp2
@@ -841,13 +892,30 @@ define i1 @test56(double %arg1, double %arg2, double %arg3) #0 {
 }
 
 define i1 @test57(float %arg1, float %arg2, float %arg3) #0 {
-; GCN-LABEL: test57:
-; GCN:       ; %bb.0:
-; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_max_f32_e32 v0, v0, v1
-; GCN-NEXT:    v_cmp_ge_f32_e32 vcc_lo, v0, v2
-; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GCN-NEXT:    s_setpc_b64 s[30:31]
+; GFX11-LABEL: test57:
+; GFX11:       ; %bb.0:
+; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT:    v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GFX11-NEXT:    v_cmp_ge_f32_e64 s0, v1, v2
+; GFX11-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-TRUE16-LABEL: test57:
+; GCN-TRUE16:       ; %bb.0:
+; GCN-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-TRUE16-NEXT:    v_max_f32_e32 v0, v0, v1
+; GCN-TRUE16-NEXT:    v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GCN-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-TRUE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-FAKE16-LABEL: test57:
+; GCN-FAKE16:       ; %bb.0:
+; GCN-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-FAKE16-NEXT:    v_max_f32_e32 v0, v0, v1
+; GCN-FAKE16-NEXT:    v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GCN-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-FAKE16-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp oge float %arg1, %arg3
   %cmp2 = fcmp oge float %arg2, %arg3
   %or1  = or i1 %cmp1, %cmp2
@@ -858,9 +926,10 @@ define i1 @test58(double %arg1, double %arg2, double %arg3) #0 {
 ; GFX11-LABEL: test58:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_min_f64 v[0:1], v[0:1], v[2:3]
 ; GFX11-NEXT:    v_cmp_nle_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX11-NEXT:    v_cmp_nle_f64_e64 s0, v[2:3], v[4:5]
+; GFX11-NEXT:    s_and_b32 s0, vcc_lo, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GCN-TRUE16-LABEL: test58:
@@ -888,9 +957,10 @@ define i1 @test59(float %arg1, float %arg2, float %arg3) #0 {
 ; GFX11-LABEL: test59:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_min_f32_e32 v0, v0, v1
 ; GFX11-NEXT:    v_cmp_nlt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX11-NEXT:    v_cmp_nlt_f32_e64 s0, v1, v2
+; GFX11-NEXT:    s_and_b32 s0, vcc_lo, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GCN-TRUE16-LABEL: test59:
@@ -918,9 +988,10 @@ define i1 @test60(float %arg1, float %arg2, float %arg3) #0 {
 ; GFX11-LABEL: test60:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_max_f32_e32 v0, v0, v1
 ; GFX11-NEXT:    v_cmp_ngt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX11-NEXT:    v_cmp_ngt_f32_e64 s0, v1, v2
+; GFX11-NEXT:    s_and_b32 s0, vcc_lo, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GCN-TRUE16-LABEL: test60:
@@ -948,9 +1019,10 @@ define i1 @test61(double %arg1, double %arg2, double %arg3) #0 {
 ; GFX11-LABEL: test61:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_max_f64 v[0:1], v[0:1], v[2:3]
 ; GFX11-NEXT:    v_cmp_nge_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX11-NEXT:    v_cmp_nge_f64_e64 s0, v[2:3], v[4:5]
+; GFX11-NEXT:    s_and_b32 s0, vcc_lo, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GCN-TRUE16-LABEL: test61:
@@ -1359,13 +1431,30 @@ define i1 @test77(double %arg1, double %arg2, double %arg3) {
 }
 
 define i1 @test78(float %arg1, float %arg2, float %arg3) #0 {
-; GCN-LABEL: test78:
-; GCN:       ; %bb.0:
-; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_min_f32_e32 v0, v0, v1
-; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v2
-; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GCN-NEXT:    s_setpc_b64 s[30:31]
+; GFX11-LABEL: test78:
+; GFX11:       ; %bb.0:
+; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GFX11-NEXT:    v_cmp_gt_f32_e64 s0, v2, v1
+; GFX11-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-TRUE16-LABEL: test78:
+; GCN-TRUE16:       ; %bb.0:
+; GCN-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-TRUE16-NEXT:    v_min_f32_e32 v0, v0, v1
+; GCN-TRUE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GCN-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-TRUE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-FAKE16-LABEL: test78:
+; GCN-FAKE16:       ; %bb.0:
+; GCN-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-FAKE16-NEXT:    v_min_f32_e32 v0, v0, v1
+; GCN-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GCN-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-FAKE16-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp olt float %arg1, %arg3
   %cmp2 = fcmp ogt float %arg3, %arg2
   %or1  = or i1 %cmp1, %cmp2
@@ -1376,9 +1465,10 @@ define i1 @test79(float %arg1, float %arg2, float %arg3) #0 {
 ; GFX11-LABEL: test79:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_max_f32_e32 v0, v0, v1
 ; GFX11-NEXT:    v_cmp_nge_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX11-NEXT:    v_cmp_nle_f32_e64 s0, v2, v1
+; GFX11-NEXT:    s_and_b32 s0, vcc_lo, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GCN-TRUE16-LABEL: test79:
@@ -2259,13 +2349,32 @@ define i1 @test106(i32 %arg1, i32 %arg2, i32 %arg3, i32 %arg4, i32 %arg5, i32 %a
 }
 
 define i1 @test107(float %arg1, float %arg2, float %arg3, float %C) {
-; GCN-LABEL: test107:
-; GCN:       ; %bb.0:
-; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_min3_f32 v0, v0, v1, v2
-; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v3
-; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GCN-NEXT:    s_setpc_b64 s[30:31]
+; GFX11-LABEL: test107:
+; GFX11:       ; %bb.0:
+; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v3
+; GFX11-NEXT:    v_cmp_lt_f32_e64 s0, v1, v3
+; GFX11-NEXT:    v_cmp_lt_f32_e64 s1, v2, v3
+; GFX11-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GFX11-NEXT:    s_or_b32 s0, s0, s1
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-TRUE16-LABEL: test107:
+; GCN-TRUE16:       ; %bb.0:
+; GCN-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-TRUE16-NEXT:    v_min3_f32 v0, v0, v1, v2
+; GCN-TRUE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v3
+; GCN-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-TRUE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-FAKE16-LABEL: test107:
+; GCN-FAKE16:       ; %bb.0:
+; GCN-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-FAKE16-NEXT:    v_min3_f32 v0, v0, v1, v2
+; GCN-FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v3
+; GCN-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-FAKE16-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp olt float %arg1, %C
   %cmp2 = fcmp olt float %arg2, %C
   %cmp3 = fcmp olt float %arg3, %C
@@ -2278,9 +2387,12 @@ define i1 @test108(float %arg1, float %arg2, float %arg3, float %C) {
 ; GFX11-LABEL: test108:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_max3_f32 v0, v0, v1, v2
 ; GFX11-NEXT:    v_cmp_nge_f32_e32 vcc_lo, v0, v3
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX11-NEXT:    v_cmp_nge_f32_e64 s0, v1, v3
+; GFX11-NEXT:    v_cmp_nge_f32_e64 s1, v2, v3
+; GFX11-NEXT:    s_and_b32 s0, vcc_lo, s0
+; GFX11-NEXT:    s_and_b32 s0, s0, s1
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GCN-TRUE16-LABEL: test108:
@@ -2310,12 +2422,13 @@ define i1 @test109(float %arg1, float %arg2, float %arg3, float %arg4, float %C)
 ; GFX11-LABEL: test109:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_dual_max_f32 v1, v1, v1 :: v_dual_max_f32 v0, v0, v0
-; GFX11-NEXT:    v_dual_max_f32 v3, v3, v3 :: v_dual_max_f32 v2, v2, v2
-; GFX11-NEXT:    v_dual_min_f32 v0, v0, v1 :: v_dual_max_f32 v1, v2, v3
 ; GFX11-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v4
-; GFX11-NEXT:    v_cmp_gt_f32_e64 s0, v1, v4
+; GFX11-NEXT:    v_cmp_lt_f32_e64 s0, v1, v4
+; GFX11-NEXT:    v_cmp_gt_f32_e64 s1, v2, v4
+; GFX11-NEXT:    v_cmp_gt_f32_e64 s2, v3, v4
 ; GFX11-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GFX11-NEXT:    s_or_b32 s1, s1, s2
+; GFX11-NEXT:    s_or_b32 s0, s0, s1
 ; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
 ;
@@ -2378,13 +2491,20 @@ define i1 @test111(float %arg1, float %arg2, float %arg3, float %arg4, float %ar
 ; GFX11-LABEL: test111:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_dual_max_f32 v3, v3, v3 :: v_dual_max_f32 v2, v2, v2
-; GFX11-NEXT:    v_dual_min_f32 v2, v2, v3 :: v_dual_max_f32 v3, v4, v4
-; GFX11-NEXT:    v_min3_f32 v0, v0, v1, v2
-; GFX11-NEXT:    v_min_f32_e32 v0, v0, v3
-; GFX11-NEXT:    v_min3_f32 v0, v5, v6, v0
 ; GFX11-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v8
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX11-NEXT:    v_cmp_lt_f32_e64 s0, v1, v8
+; GFX11-NEXT:    v_cmp_lt_f32_e64 s1, v2, v8
+; GFX11-NEXT:    v_cmp_lt_f32_e64 s2, v3, v8
+; GFX11-NEXT:    s_or_b32 s3, vcc_lo, s0
+; GFX11-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v4, v8
+; GFX11-NEXT:    s_or_b32 s2, s1, s2
+; GFX11-NEXT:    v_cmp_lt_f32_e64 s0, v5, v8
+; GFX11-NEXT:    v_cmp_lt_f32_e64 s1, v6, v8
+; GFX11-NEXT:    s_or_b32 s2, s3, s2
+; GFX11-NEXT:    s_or_b32 s2, s2, vcc_lo
+; GFX11-NEXT:    s_or_b32 s0, s0, s1
+; GFX11-NEXT:    s_or_b32 s0, s0, s2
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GCN-TRUE16-LABEL: test111:
@@ -2430,14 +2550,19 @@ define i1 @test112(float %arg1, float %arg2, float %arg3, float %arg4, float %ar
 ; GFX11-LABEL: test112:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_dual_max_f32 v3, v3, v3 :: v_dual_max_f32 v2, v2, v2
+; GFX11-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v8
+; GFX11-NEXT:    v_cmp_lt_f32_e64 s0, v1, v8
+; GFX11-NEXT:    v_cmp_lt_f32_e64 s1, v2, v8
+; GFX11-NEXT:    v_cmp_lt_f32_e64 s2, v3, v8
+; GFX11-NEXT:    s_or_b32 s3, vcc_lo, s0
 ; GFX11-NEXT:    v_cmp_nge_f32_e32 vcc_lo, v4, v8
-; GFX11-NEXT:    v_dual_max_f32 v5, v5, v5 :: v_dual_min_f32 v2, v2, v3
-; GFX11-NEXT:    v_max_f32_e32 v3, v6, v6
-; GFX11-NEXT:    v_min3_f32 v0, v0, v1, v2
-; GFX11-NEXT:    v_min3_f32 v0, v0, v5, v3
-; GFX11-NEXT:    v_cmp_lt_f32_e64 s0, v0, v8
-; GFX11-NEXT:    s_or_b32 s0, s0, vcc_lo
+; GFX11-NEXT:    s_or_b32 s2, s1, s2
+; GFX11-NEXT:    v_cmp_lt_f32_e64 s0, v5, v8
+; GFX11-NEXT:    v_cmp_lt_f32_e64 s1, v6, v8
+; GFX11-NEXT:    s_or_b32 s2, s3, s2
+; GFX11-NEXT:    s_or_b32 s2, s2, vcc_lo
+; GFX11-NEXT:    s_or_b32 s0, s0, s1
+; GFX11-NEXT:    s_or_b32 s0, s0, s2
 ; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
 ;
@@ -2484,11 +2609,11 @@ define i1 @test113(float %arg1, float %arg2, float %arg3, float %C) {
 ; GFX11-LABEL: test113:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_dual_max_f32 v1, v1, v1 :: v_dual_max_f32 v0, v0, v0
-; GFX11-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v2, v3
-; GFX11-NEXT:    v_max_f32_e32 v0, v0, v1
-; GFX11-NEXT:    v_cmp_nge_f32_e64 s0, v0, v3
-; GFX11-NEXT:    s_or_b32 s0, s0, vcc_lo
+; GFX11-NEXT:    v_cmp_nge_f32_e32 vcc_lo, v0, v3
+; GFX11-NEXT:    v_cmp_nge_f32_e64 s0, v1, v3
+; GFX11-NEXT:    v_cmp_lt_f32_e64 s1, v2, v3
+; GFX11-NEXT:    s_and_b32 s0, vcc_lo, s0
+; GFX11-NEXT:    s_or_b32 s0, s0, s1
 ; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
 ;
@@ -2519,11 +2644,11 @@ define i1 @test114(float %arg1, float %arg2, float %arg3, float %C) {
 ; GFX11-LABEL: test114:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_dual_max_f32 v1, v1, v1 :: v_dual_max_f32 v0, v0, v0
-; GFX11-NEXT:    v_cmp_nge_f32_e32 vcc_lo, v2, v3
-; GFX11-NEXT:    v_max_f32_e32 v0, v0, v1
-; GFX11-NEXT:    v_cmp_gt_f32_e64 s0, v0, v3
-; GFX11-NEXT:    s_and_b32 s0, s0, vcc_lo
+; GFX11-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v0, v3
+; GFX11-NEXT:    v_cmp_gt_f32_e64 s0, v1, v3
+; GFX11-NEXT:    v_cmp_nge_f32_e64 s1, v2, v3
+; GFX11-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GFX11-NEXT:    s_and_b32 s0, s0, s1
 ; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
 ;
@@ -2558,12 +2683,13 @@ define i1 @test115(float %arg1, float %arg2, float %arg3, float %arg4, float %C)
 ; GFX11-LABEL: test115:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_dual_max_f32 v2, v2, v2 :: v_dual_max_f32 v1, v1, v1
-; GFX11-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v3, v3, v3
-; GFX11-NEXT:    v_dual_min_f32 v0, v0, v1 :: v_dual_max_f32 v1, v2, v3
+; GFX11-NEXT:    v_dual_max_f32 v2, v2, v2 :: v_dual_max_f32 v3, v3, v3
 ; GFX11-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v4
-; GFX11-NEXT:    v_cmp_nge_f32_e64 s0, v1, v4
+; GFX11-NEXT:    v_cmp_lt_f32_e64 s0, v1, v4
+; GFX11-NEXT:    v_max_f32_e32 v2, v2, v3
 ; GFX11-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GFX11-NEXT:    v_cmp_nge_f32_e64 s1, v2, v4
+; GFX11-NEXT:    s_or_b32 s0, s0, s1
 ; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
 ;
@@ -2600,22 +2726,25 @@ define i1 @test116(float %arg1, float %arg2, float %arg3, float %arg4, float %ar
 ; GFX11-LABEL: test116:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_dual_max_f32 v9, v9, v9 :: v_dual_max_f32 v8, v8, v8
-; GFX11-NEXT:    v_dual_max_f32 v1, v1, v1 :: v_dual_max_f32 v0, v0, v0
-; GFX11-NEXT:    v_dual_max_f32 v3, v3, v3 :: v_dual_max_f32 v2, v2, v2
-; GFX11-NEXT:    v_dual_max_f32 v5, v5, v5 :: v_dual_max_f32 v4, v4, v4
-; GFX11-NEXT:    v_dual_max_f32 v7, v7, v7 :: v_dual_max_f32 v6, v6, v6
-; GFX11-NEXT:    v_min_f32_e32 v8, v8, v9
-; GFX11-NEXT:    v_dual_max_f32 v2, v2, v3 :: v_dual_min_f32 v3, v4, v5
-; GFX11-NEXT:    v_max_f32_e32 v4, v6, v7
-; GFX11-NEXT:    v_min3_f32 v0, v0, v1, v8
-; GFX11-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v2, v10
-; GFX11-NEXT:    v_cmp_lt_f32_e64 s0, v3, v10
-; GFX11-NEXT:    v_cmp_gt_f32_e64 s1, v4, v10
-; GFX11-NEXT:    v_cmp_lt_f32_e64 s2, v0, v10
-; GFX11-NEXT:    s_or_b32 s0, s0, s1
-; GFX11-NEXT:    s_or_b32 s1, s2, vcc_lo
+; GFX11-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v10
+; GFX11-NEXT:    v_cmp_lt_f32_e64 s0, v1, v10
+; GFX11-NEXT:    v_cmp_gt_f32_e64 s1, v2, v10
+; GFX11-NEXT:    v_cmp_gt_f32_e64 s2, v3, v10
+; GFX11-NEXT:    v_cmp_lt_f32_e64 s3, v4, v10
+; GFX11-NEXT:    v_cmp_lt_f32_e64 s4, v5, v10
+; GFX11-NEXT:    v_cmp_gt_f32_e64 s5, v6, v10
+; GFX11-NEXT:    v_cmp_gt_f32_e64 s6, v7, v10
+; GFX11-NEXT:    v_cmp_lt_f32_e64 s7, v8, v10
+; GFX11-NEXT:    v_cmp_lt_f32_e64 s8, v9, v10
+; GFX11-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GFX11-NEXT:    s_or_b32 s1, s1, s2
+; GFX11-NEXT:    s_or_b32 s2, s3, s4
+; GFX11-NEXT:    s_or_b32 s3, s5, s6
+; GFX11-NEXT:    s_or_b32 s4, s7, s8
 ; GFX11-NEXT:    s_or_b32 s0, s0, s1
+; GFX11-NEXT:    s_or_b32 s1, s2, s3
+; GFX11-NEXT:    s_or_b32 s0, s4, s0
+; GFX11-NEXT:    s_or_b32 s0, s1, s0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
 ;
@@ -2678,20 +2807,27 @@ define i1 @test117(float %arg1, float %arg2, float %arg3, float %arg4, float %ar
 ; GFX11-LABEL: test117:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_dual_max_f32 v3, v3, v3 :: v_dual_max_f32 v6, v6, v6
-; GFX11-NEXT:    v_dual_max_f32 v7, v7, v7 :: v_dual_max_f32 v10, v10, v10
-; GFX11-NEXT:    v_dual_max_f32 v1, v1, v1 :: v_dual_max_f32 v0, v0, v0
-; GFX11-NEXT:    v_dual_max_f32 v11, v11, v11 :: v_dual_max_f32 v2, v2, v2
-; GFX11-NEXT:    v_min_f32_e32 v6, v6, v7
-; GFX11-NEXT:    v_dual_min_f32 v0, v0, v1 :: v_dual_min_f32 v1, v10, v11
-; GFX11-NEXT:    v_min_f32_e32 v2, v2, v3
-; GFX11-NEXT:    v_min3_f32 v3, v4, v5, v6
 ; GFX11-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v12
-; GFX11-NEXT:    v_min3_f32 v0, v8, v9, v1
-; GFX11-NEXT:    v_cmp_lt_f32_e64 s0, v2, v13
-; GFX11-NEXT:    v_cmp_lt_f32_e64 s1, v3, v13
-; GFX11-NEXT:    v_cmp_lt_f32_e64 s2, v0, v12
+; GFX11-NEXT:    v_cmp_lt_f32_e64 s0, v1, v12
+; GFX11-NEXT:    v_cmp_lt_f32_e64 s1, v2, v13
+; GFX11-NEXT:    v_cmp_lt_f32_e64 s2, v3, v13
+; GFX11-NEXT:    v_cmp_lt_f32_e64 s3, v4, v13
+; GFX11-NEXT:    v_cmp_lt_f32_e64 s4, v5, v13
+; GFX11-NEXT:    v_cmp_lt_f32_e64 s5, v6, v13
+; GFX11-NEXT:    v_cmp_lt_f32_e64 s6, v7, v13
+; GFX11-NEXT:    v_cmp_lt_f32_e64 s7, v8, v12
+; GFX11-NEXT:    v_cmp_lt_f32_e64 s8, v9, v12
+; GFX11-NEXT:    v_cmp_lt_f32_e64 s9, v10, v12
+; GFX11-NEXT:    v_cmp_lt_f32_e64 s10, v11, v12
 ; GFX11-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GFX11-NEXT:    s_or_b32 s1, s1, s2
+; GFX11-NEXT:    s_or_b32 s2, s3, s4
+; GFX11-NEXT:    s_or_b32 s3, s5, s6
+; GFX11-NEXT:    s_or_b32 s4, s7, s8
+; GFX11-NEXT:    s_or_b32 s5, s9, s10
+; GFX11-NEXT:    s_or_b32 s0, s0, s1
+; GFX11-NEXT:    s_or_b32 s1, s2, s3
+; GFX11-NEXT:    s_or_b32 s2, s4, s5
 ; GFX11-NEXT:    s_or_b32 s0, s0, s1
 ; GFX11-NEXT:    s_or_b32 s0, s2, s0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
diff --git a/llvm/test/CodeGen/AMDGPU/or.ll b/llvm/test/CodeGen/AMDGPU/or.ll
index cc9650b9a7309..1e08c728e047e 100644
--- a/llvm/test/CodeGen/AMDGPU/or.ll
+++ b/llvm/test/CodeGen/AMDGPU/or.ll
@@ -1175,12 +1175,11 @@ define amdgpu_kernel void @or_i1(ptr addrspace(1) %out, ptr addrspace(1) %in0, p
 ; GFX6-NEXT:    s_mov_b32 s4, s0
 ; GFX6-NEXT:    s_mov_b32 s5, s1
 ; GFX6-NEXT:    s_waitcnt vmcnt(1)
-; GFX6-NEXT:    v_mul_f32_e32 v0, 1.0, v0
-; GFX6-NEXT:    s_waitcnt vmcnt(0)
-; GFX6-NEXT:    v_mul_f32_e32 v1, 1.0, v1
-; GFX6-NEXT:    v_max_f32_e32 v0, v1, v0
 ; GFX6-NEXT:    v_cmp_le_f32_e32 vcc, 0, v0
-; GFX6-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX6-NEXT:    s_waitcnt vmcnt(0)
+; GFX6-NEXT:    v_cmp_le_f32_e64 s[0:1], 0, v1
+; GFX6-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
 ; GFX6-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; GFX6-NEXT:    s_endpgm
 ;
@@ -1202,32 +1201,36 @@ define amdgpu_kernel void @or_i1(ptr addrspace(1) %out, ptr addrspace(1) %in0, p
 ; GFX8-NEXT:    s_mov_b32 s4, s0
 ; GFX8-NEXT:    s_mov_b32 s5, s1
 ; GFX8-NEXT:    s_waitcnt vmcnt(1)
-; GFX8-NEXT:    v_mul_f32_e32 v0, 1.0, v0
-; GFX8-NEXT:    s_waitcnt vmcnt(0)
-; GFX8-NEXT:    v_mul_f32_e32 v1, 1.0, v1
-; GFX8-NEXT:    v_max_f32_e32 v0, v1, v0
 ; GFX8-NEXT:    v_cmp_le_f32_e32 vcc, 0, v0
-; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX8-NEXT:    s_waitcnt vmcnt(0)
+; GFX8-NEXT:    v_cmp_le_f32_e64 s[0:1], 0, v1
+; GFX8-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
 ; GFX8-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; GFX8-NEXT:    s_endpgm
 ;
 ; EG-LABEL: or_i1:
 ; EG:       ; %bb.0:
-; EG-NEXT:    ALU 1, @10, KC0[CB0:0-32], KC1[]
-; EG-NEXT:    TEX 1 @6
-; EG-NEXT:    ALU 4, @12, KC0[CB0:0-32], KC1[]
+; EG-NEXT:    ALU 0, @12, KC0[CB0:0-32], KC1[]
+; EG-NEXT:    TEX 0 @8
+; EG-NEXT:    ALU 0, @13, KC0[CB0:0-32], KC1[]
+; EG-NEXT:    TEX 0 @10
+; EG-NEXT:    ALU 5, @14, KC0[CB0:0-32], KC1[]
 ; EG-NEXT:    MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
 ; EG-NEXT:    CF_END
 ; EG-NEXT:    PAD
-; EG-NEXT:    Fetch clause starting at 6:
-; EG-NEXT:     VTX_READ_32 T1.X, T1.X, 0, #1
+; EG-NEXT:    Fetch clause starting at 8:
 ; EG-NEXT:     VTX_READ_32 T0.X, T0.X, 0, #1
-; EG-NEXT:    ALU clause starting at 10:
-; EG-NEXT:     MOV T0.X, KC0[2].Z,
-; EG-NEXT:     MOV * T1.X, KC0[2].W,
+; EG-NEXT:    Fetch clause starting at 10:
+; EG-NEXT:     VTX_READ_32 T1.X, T1.X, 0, #1
 ; EG-NEXT:    ALU clause starting at 12:
-; EG-NEXT:     MAX_DX10 * T0.W, T0.X, T1.X,
-; EG-NEXT:     SETGE_DX10 * T0.W, PV.W, 0.0,
+; EG-NEXT:     MOV * T0.X, KC0[2].W,
+; EG-NEXT:    ALU clause starting at 13:
+; EG-NEXT:     MOV * T1.X, KC0[2].Z,
+; EG-NEXT:    ALU clause starting at 14:
+; EG-NEXT:     SETGE_DX10 T0.W, T0.X, 0.0,
+; EG-NEXT:     SETGE_DX10 * T1.W, T1.X, 0.0,
+; EG-NEXT:     OR_INT * T0.W, PS, PV.W,
 ; EG-NEXT:     AND_INT T0.X, PV.W, 1,
 ; EG-NEXT:     LSHR * T1.X, KC0[2].Y, literal.x,
 ; EG-NEXT:    2(2.802597e-45), 0(0.000000e+00)
diff --git a/llvm/test/CodeGen/AMDGPU/xor3-i1-const.ll b/llvm/test/CodeGen/AMDGPU/xor3-i1-const.ll
index a9f1dc4a7a9af..b890c1ab7da12 100644
--- a/llvm/test/CodeGen/AMDGPU/xor3-i1-const.ll
+++ b/llvm/test/CodeGen/AMDGPU/xor3-i1-const.ll
@@ -6,10 +6,9 @@ define amdgpu_ps float @xor3_i1_const(float inreg %arg1, i32 inreg %arg2) {
 ; GCN-LABEL: xor3_i1_const:
 ; GCN:       ; %bb.0: ; %main_body
 ; GCN-NEXT:    v_mov_b32_e32 v0, 0x42640000
-; GCN-NEXT:    v_cmp_lt_f32_e64 s[2:3], s0, 0
+; GCN-NEXT:    v_min_f32_e32 v0, 0, v0
 ; GCN-NEXT:    v_cmp_lt_f32_e32 vcc, s0, v0
-; GCN-NEXT:    s_and_b64 s[0:1], s[2:3], vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, 1.0, 0, s[0:1]
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 1.0, 0, vcc
 ; GCN-NEXT:    ; return to shader part epilog
 main_body:
   %tmp26 = fcmp nsz olt float %arg1, 0.000000e+00
diff --git a/llvm/test/CodeGen/RISCV/fmaxmin-combine.ll b/llvm/test/CodeGen/RISCV/combine_andor_with_cmps.ll
similarity index 100%
rename from llvm/test/CodeGen/RISCV/fmaxmin-combine.ll
rename to llvm/test/CodeGen/RISCV/combine_andor_with_cmps.ll

>From b75f131d7adf9763d89aa0adf2798ce5123dd1ac Mon Sep 17 00:00:00 2001
From: YunQiang Su <yunqiang at isrc.iscas.ac.cn>
Date: Sat, 26 Apr 2025 16:24:29 +0800
Subject: [PATCH 4/4] Fix code style

---
 llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 64 +++++++++----------
 1 file changed, 32 insertions(+), 32 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 79f626bf88c1e..8a395a0967cf8 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -6323,12 +6323,12 @@ SDValue DAGCombiner::foldLogicOfSetCCs(bool IsAnd, SDValue N0, SDValue N1,
   return SDValue();
 }
 
-static bool arebothOperandsNotSNan(SDValue Operand1, SDValue Operand2,
+static bool areBothOperandsNotSNan(SDValue Operand1, SDValue Operand2,
                                    SelectionDAG &DAG) {
   return DAG.isKnownNeverSNaN(Operand2) && DAG.isKnownNeverSNaN(Operand1);
 }
 
-static bool arebothOperandsNotNan(SDValue Operand1, SDValue Operand2,
+static bool areBothOperandsNotNan(SDValue Operand1, SDValue Operand2,
                                   SelectionDAG &DAG) {
   return DAG.isKnownNeverNaN(Operand2) && DAG.isKnownNeverNaN(Operand1);
 }
@@ -6336,8 +6336,8 @@ static bool arebothOperandsNotNan(SDValue Operand1, SDValue Operand2,
 static unsigned
 getMinMaxOpcodeForFP(SDValue Operand1, SDValue Operand2, ISD::CondCode CC,
                      unsigned OrAndOpcode, SelectionDAG &DAG,
-                     bool isFMAXNUMFMINNUM_IEEE, bool isFMAXNUMFMINNUM,
-                     bool isFMAXIMUMFMINIMUM, bool isFMAXIMUMNUMFMINIMUMNUM) {
+                     bool hasFMAXNUMFMINNUM_IEEE, bool hasFMAXNUMFMINNUM,
+                     bool hasFMAXIMUMFMINIMUM, bool hasFMAXIMUMNUMFMINIMUMNUM) {
   bool isMax = true;
   // SETLT/SETLE/SETGT/SETGE are undefined if any Operand is NaN. We
   // treat them as SETOLT/SETOLE/SETOGT/SETOGE.
@@ -6346,12 +6346,12 @@ getMinMaxOpcodeForFP(SDValue Operand1, SDValue Operand2, ISD::CondCode CC,
        (OrAndOpcode == ISD::OR)) ||
       ((CC == ISD::SETUGT || CC == ISD::SETUGE) && (OrAndOpcode == ISD::AND))) {
     isMax = false;
-    if (arebothOperandsNotSNan(Operand1, Operand2, DAG) &&
-        isFMAXNUMFMINNUM_IEEE)
+    if (hasFMAXNUMFMINNUM_IEEE &&
+        areBothOperandsNotSNan(Operand1, Operand2, DAG))
       return ISD::FMINNUM_IEEE;
-    if (arebothOperandsNotSNan(Operand1, Operand2, DAG) && isFMAXNUMFMINNUM)
+    if (hasFMAXNUMFMINNUM && areBothOperandsNotSNan(Operand1, Operand2, DAG))
       return ISD::FMINNUM;
-    if (isFMAXIMUMNUMFMINIMUMNUM)
+    if (hasFMAXIMUMNUMFMINIMUMNUM)
       return ISD::FMINIMUMNUM;
   } else if (((CC == ISD::SETLT || CC == ISD::SETLE || CC == ISD::SETOLT ||
                CC == ISD::SETOLE) &&
@@ -6359,7 +6359,7 @@ getMinMaxOpcodeForFP(SDValue Operand1, SDValue Operand2, ISD::CondCode CC,
              ((CC == ISD::SETUGT || CC == ISD::SETUGE) &&
               (OrAndOpcode == ISD::OR))) {
     isMax = true;
-    if (isFMAXIMUMFMINIMUM)
+    if (hasFMAXIMUMFMINIMUM)
       return ISD::FMAXIMUM;
   } else if (((CC == ISD::SETGT || CC == ISD::SETGE || CC == ISD::SETOGT ||
                CC == ISD::SETOGE) &&
@@ -6367,12 +6367,12 @@ getMinMaxOpcodeForFP(SDValue Operand1, SDValue Operand2, ISD::CondCode CC,
              ((CC == ISD::SETULT || CC == ISD::SETULE) &&
               (OrAndOpcode == ISD::AND))) {
     isMax = true;
-    if (arebothOperandsNotSNan(Operand1, Operand2, DAG) &&
-        isFMAXNUMFMINNUM_IEEE)
+    if (hasFMAXNUMFMINNUM_IEEE &&
+        areBothOperandsNotSNan(Operand1, Operand2, DAG))
       return ISD::FMAXNUM_IEEE;
-    if (arebothOperandsNotSNan(Operand1, Operand2, DAG) && isFMAXNUMFMINNUM)
+    if (hasFMAXNUMFMINNUM && areBothOperandsNotSNan(Operand1, Operand2, DAG))
       return ISD::FMAXNUM;
-    if (isFMAXIMUMNUMFMINIMUMNUM)
+    if (hasFMAXIMUMNUMFMINIMUMNUM)
       return ISD::FMAXIMUMNUM;
   } else if (((CC == ISD::SETGT || CC == ISD::SETGE || CC == ISD::SETOGT ||
                CC == ISD::SETOGE) &&
@@ -6380,21 +6380,21 @@ getMinMaxOpcodeForFP(SDValue Operand1, SDValue Operand2, ISD::CondCode CC,
              ((CC == ISD::SETULT || CC == ISD::SETULE) &&
               (OrAndOpcode == ISD::OR))) {
     isMax = false;
-    if (isFMAXIMUMFMINIMUM)
+    if (hasFMAXIMUMFMINIMUM)
       return ISD::FMINIMUM;
   }
-  if (arebothOperandsNotNan(Operand1, Operand2, DAG)) {
+  if (areBothOperandsNotNan(Operand1, Operand2, DAG)) {
     // Keep this order to help unittest easy:
     //     AArch64 has FMAXNUM_IEEE, while not FMAXIMUMNUM
     //     RISCV64 has FMAXIMUMNUM,  while not FMAXNUM_IEEE
     //     Both has FMAXIMUM (RISCV64 has a switch for it)
-    if (isFMAXIMUMFMINIMUM)
+    if (hasFMAXIMUMFMINIMUM)
       return isMax ? ISD::FMAXIMUM : ISD::FMINIMUM;
-    if (isFMAXNUMFMINNUM_IEEE)
+    if (hasFMAXNUMFMINNUM_IEEE)
       return isMax ? ISD::FMAXNUM_IEEE : ISD::FMINNUM_IEEE;
-    if (isFMAXIMUMNUMFMINIMUMNUM)
+    if (hasFMAXIMUMNUMFMINIMUMNUM)
       return isMax ? ISD::FMAXIMUMNUM : ISD::FMINIMUMNUM;
-    if (isFMAXNUMFMINNUM)
+    if (hasFMAXNUMFMINNUM)
       return isMax ? ISD::FMAXNUM : ISD::FMINNUM;
   }
   return ISD::DELETED_NODE;
@@ -6441,13 +6441,13 @@ static SDValue foldAndOrOfSETCC(SDNode *LogicOp, SelectionDAG &DAG) {
   // The optimization does not work for `==` or `!=` .
   // The two comparisons should have either the same predicate or the
   // predicate of one of the comparisons is the opposite of the other one.
-  bool isFMAXNUMFMINNUM_IEEE = TLI.isOperationLegal(ISD::FMAXNUM_IEEE, OpVT) &&
-                               TLI.isOperationLegal(ISD::FMINNUM_IEEE, OpVT);
-  bool isFMAXNUMFMINNUM = TLI.isOperationLegal(ISD::FMAXNUM, OpVT) &&
-                          TLI.isOperationLegal(ISD::FMINNUM, OpVT);
-  bool isFMAXIMUMFMINIMUM = TLI.isOperationLegal(ISD::FMAXIMUM, OpVT) &&
-                            TLI.isOperationLegal(ISD::FMINIMUM, OpVT);
-  bool isFMAXIMUMNUMFMINIMUMNUM =
+  bool hasFMAXNUMFMINNUM_IEEE = TLI.isOperationLegal(ISD::FMAXNUM_IEEE, OpVT) &&
+                                TLI.isOperationLegal(ISD::FMINNUM_IEEE, OpVT);
+  bool hasFMAXNUMFMINNUM = TLI.isOperationLegal(ISD::FMAXNUM, OpVT) &&
+                           TLI.isOperationLegal(ISD::FMINNUM, OpVT);
+  bool hasFMAXIMUMFMINIMUM = TLI.isOperationLegal(ISD::FMAXIMUM, OpVT) &&
+                             TLI.isOperationLegal(ISD::FMINIMUM, OpVT);
+  bool hasFMAXIMUMNUMFMINIMUMNUM =
       TLI.isOperationLegal(ISD::FMAXIMUMNUM, OpVT) &&
       TLI.isOperationLegal(ISD::FMINIMUMNUM, OpVT);
   if (((OpVT.isInteger() && TLI.isOperationLegal(ISD::UMAX, OpVT) &&
@@ -6455,8 +6455,8 @@ static SDValue foldAndOrOfSETCC(SDNode *LogicOp, SelectionDAG &DAG) {
         TLI.isOperationLegal(ISD::UMIN, OpVT) &&
         TLI.isOperationLegal(ISD::SMIN, OpVT)) ||
        (OpVT.isFloatingPoint() &&
-        (isFMAXNUMFMINNUM_IEEE || isFMAXNUMFMINNUM || isFMAXIMUMFMINIMUM ||
-         isFMAXIMUMNUMFMINIMUMNUM))) &&
+        (hasFMAXNUMFMINNUM_IEEE || hasFMAXNUMFMINNUM || hasFMAXIMUMFMINIMUM ||
+         hasFMAXIMUMNUMFMINIMUMNUM))) &&
       !ISD::isIntEqualitySetCC(CCL) && !ISD::isFPEqualitySetCC(CCL) &&
       CCL != ISD::SETFALSE && CCL != ISD::SETO && CCL != ISD::SETUO &&
       CCL != ISD::SETTRUE &&
@@ -6510,10 +6510,10 @@ static SDValue foldAndOrOfSETCC(SDNode *LogicOp, SelectionDAG &DAG) {
         else
           NewOpcode = IsSigned ? ISD::SMAX : ISD::UMAX;
       } else if (OpVT.isFloatingPoint())
-        NewOpcode =
-            getMinMaxOpcodeForFP(Operand1, Operand2, CC, LogicOp->getOpcode(),
-                                 DAG, isFMAXNUMFMINNUM_IEEE, isFMAXNUMFMINNUM,
-                                 isFMAXIMUMFMINIMUM, isFMAXIMUMNUMFMINIMUMNUM);
+        NewOpcode = getMinMaxOpcodeForFP(
+            Operand1, Operand2, CC, LogicOp->getOpcode(), DAG,
+            hasFMAXNUMFMINNUM_IEEE, hasFMAXNUMFMINNUM, hasFMAXIMUMFMINIMUM,
+            hasFMAXIMUMNUMFMINIMUMNUM);
 
       if (NewOpcode != ISD::DELETED_NODE) {
         SDValue MinMaxValue =



More information about the llvm-commits mailing list