[llvm] [SelectionDAG] Remove `NoNaNsFPMath` in `visitFCmp` (PR #163519)

via llvm-commits llvm-commits at lists.llvm.org
Wed Oct 29 04:29:45 PDT 2025


https://github.com/paperchalice updated https://github.com/llvm/llvm-project/pull/163519

>From c5f8d91298616658c70410e568bf8ece1d2fa41f Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Wed, 15 Oct 2025 15:21:30 +0800
Subject: [PATCH 1/6] [SelectionDAG] Remove `NoNaNsFPMath` in `visitFCmp`

---
 .../SelectionDAG/SelectionDAGBuilder.cpp      |   2 +-
 .../CodeGen/AArch64/build-vector-dup-simd.ll  | 251 ++++++++++--------
 2 files changed, 139 insertions(+), 114 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index dee090950ea8d..71fc917464844 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -3757,7 +3757,7 @@ void SelectionDAGBuilder::visitFCmp(const FCmpInst &I) {
 
   ISD::CondCode Condition = getFCmpCondCode(predicate);
   auto *FPMO = cast<FPMathOperator>(&I);
-  if (FPMO->hasNoNaNs() || TM.Options.NoNaNsFPMath)
+  if (FPMO->hasNoNaNs())
     Condition = getFCmpCodeWithoutNaN(Condition);
 
   SDNodeFlags Flags;
diff --git a/llvm/test/CodeGen/AArch64/build-vector-dup-simd.ll b/llvm/test/CodeGen/AArch64/build-vector-dup-simd.ll
index ac0b8e89519dd..f03ceddc685d2 100644
--- a/llvm/test/CodeGen/AArch64/build-vector-dup-simd.ll
+++ b/llvm/test/CodeGen/AArch64/build-vector-dup-simd.ll
@@ -1,6 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=aarch64 | FileCheck %s --check-prefixes=CHECK,CHECK-NOFULLFP16
-; RUN: llc < %s -mtriple=aarch64 --enable-no-nans-fp-math | FileCheck %s --check-prefixes=CHECK,CHECK-NONANS
 ; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16  | FileCheck %s --check-prefixes=CHECK,CHECK-FULLFP16
 
 define <1 x float> @dup_v1i32_oeq(float %a, float %b) {
@@ -69,27 +68,13 @@ entry:
 }
 
 define <1 x float> @dup_v1i32_one(float %a, float %b) {
-; CHECK-NOFULLFP16-LABEL: dup_v1i32_one:
-; CHECK-NOFULLFP16:       // %bb.0: // %entry
-; CHECK-NOFULLFP16-NEXT:    fcmgt s2, s0, s1
-; CHECK-NOFULLFP16-NEXT:    fcmgt s0, s1, s0
-; CHECK-NOFULLFP16-NEXT:    orr v0.16b, v0.16b, v2.16b
-; CHECK-NOFULLFP16-NEXT:    // kill: def $d0 killed $d0 killed $q0
-; CHECK-NOFULLFP16-NEXT:    ret
-;
-; CHECK-NONANS-LABEL: dup_v1i32_one:
-; CHECK-NONANS:       // %bb.0: // %entry
-; CHECK-NONANS-NEXT:    fcmeq s0, s0, s1
-; CHECK-NONANS-NEXT:    mvn v0.8b, v0.8b
-; CHECK-NONANS-NEXT:    ret
-;
-; CHECK-FULLFP16-LABEL: dup_v1i32_one:
-; CHECK-FULLFP16:       // %bb.0: // %entry
-; CHECK-FULLFP16-NEXT:    fcmgt s2, s0, s1
-; CHECK-FULLFP16-NEXT:    fcmgt s0, s1, s0
-; CHECK-FULLFP16-NEXT:    orr v0.16b, v0.16b, v2.16b
-; CHECK-FULLFP16-NEXT:    // kill: def $d0 killed $d0 killed $q0
-; CHECK-FULLFP16-NEXT:    ret
+; CHECK-LABEL: dup_v1i32_one:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmgt s2, s0, s1
+; CHECK-NEXT:    fcmgt s0, s1, s0
+; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
 entry:
   %0 = fcmp one float %a, %b
   %vcmpd.i = sext i1 %0 to i32
@@ -98,6 +83,20 @@ entry:
   ret <1 x float> %1
 }
 
+define <1 x float> @dup_v1i32_one_nnan(float %a, float %b) {
+; CHECK-LABEL: dup_v1i32_one_nnan:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmeq s0, s0, s1
+; CHECK-NEXT:    mvn v0.8b, v0.8b
+; CHECK-NEXT:    ret
+entry:
+  %0 = fcmp nnan one float %a, %b
+  %vcmpd.i = sext i1 %0 to i32
+  %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+  %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+  ret <1 x float> %1
+}
+
 define <1 x float> @dup_v1i32_ord(float %a, float %b) {
 ; CHECK-LABEL: dup_v1i32_ord:
 ; CHECK:       // %bb.0: // %entry
@@ -115,26 +114,13 @@ entry:
 }
 
 define <1 x float> @dup_v1i32_ueq(float %a, float %b) {
-; CHECK-NOFULLFP16-LABEL: dup_v1i32_ueq:
-; CHECK-NOFULLFP16:       // %bb.0: // %entry
-; CHECK-NOFULLFP16-NEXT:    fcmgt s2, s0, s1
-; CHECK-NOFULLFP16-NEXT:    fcmgt s0, s1, s0
-; CHECK-NOFULLFP16-NEXT:    orr v0.16b, v0.16b, v2.16b
-; CHECK-NOFULLFP16-NEXT:    mvn v0.8b, v0.8b
-; CHECK-NOFULLFP16-NEXT:    ret
-;
-; CHECK-NONANS-LABEL: dup_v1i32_ueq:
-; CHECK-NONANS:       // %bb.0: // %entry
-; CHECK-NONANS-NEXT:    fcmeq s0, s0, s1
-; CHECK-NONANS-NEXT:    ret
-;
-; CHECK-FULLFP16-LABEL: dup_v1i32_ueq:
-; CHECK-FULLFP16:       // %bb.0: // %entry
-; CHECK-FULLFP16-NEXT:    fcmgt s2, s0, s1
-; CHECK-FULLFP16-NEXT:    fcmgt s0, s1, s0
-; CHECK-FULLFP16-NEXT:    orr v0.16b, v0.16b, v2.16b
-; CHECK-FULLFP16-NEXT:    mvn v0.8b, v0.8b
-; CHECK-FULLFP16-NEXT:    ret
+; CHECK-LABEL: dup_v1i32_ueq:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmgt s2, s0, s1
+; CHECK-NEXT:    fcmgt s0, s1, s0
+; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    mvn v0.8b, v0.8b
+; CHECK-NEXT:    ret
 entry:
   %0 = fcmp ueq float %a, %b
   %vcmpd.i = sext i1 %0 to i32
@@ -143,23 +129,25 @@ entry:
   ret <1 x float> %1
 }
 
+define <1 x float> @dup_v1i32_ueq_nnan(float %a, float %b) {
+; CHECK-LABEL: dup_v1i32_ueq_nnan:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmeq s0, s0, s1
+; CHECK-NEXT:    ret
+entry:
+  %0 = fcmp nnan ueq float %a, %b
+  %vcmpd.i = sext i1 %0 to i32
+  %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+  %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+  ret <1 x float> %1
+}
+
 define <1 x float> @dup_v1i32_ugt(float %a, float %b) {
-; CHECK-NOFULLFP16-LABEL: dup_v1i32_ugt:
-; CHECK-NOFULLFP16:       // %bb.0: // %entry
-; CHECK-NOFULLFP16-NEXT:    fcmge s0, s1, s0
-; CHECK-NOFULLFP16-NEXT:    mvn v0.8b, v0.8b
-; CHECK-NOFULLFP16-NEXT:    ret
-;
-; CHECK-NONANS-LABEL: dup_v1i32_ugt:
-; CHECK-NONANS:       // %bb.0: // %entry
-; CHECK-NONANS-NEXT:    fcmgt s0, s0, s1
-; CHECK-NONANS-NEXT:    ret
-;
-; CHECK-FULLFP16-LABEL: dup_v1i32_ugt:
-; CHECK-FULLFP16:       // %bb.0: // %entry
-; CHECK-FULLFP16-NEXT:    fcmge s0, s1, s0
-; CHECK-FULLFP16-NEXT:    mvn v0.8b, v0.8b
-; CHECK-FULLFP16-NEXT:    ret
+; CHECK-LABEL: dup_v1i32_ugt:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmge s0, s1, s0
+; CHECK-NEXT:    mvn v0.8b, v0.8b
+; CHECK-NEXT:    ret
 entry:
   %0 = fcmp ugt float %a, %b
   %vcmpd.i = sext i1 %0 to i32
@@ -168,23 +156,25 @@ entry:
   ret <1 x float> %1
 }
 
+define <1 x float> @dup_v1i32_ugt_nnan(float %a, float %b) {
+; CHECK-LABEL: dup_v1i32_ugt_nnan:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmgt s0, s0, s1
+; CHECK-NEXT:    ret
+entry:
+  %0 = fcmp nnan ugt float %a, %b
+  %vcmpd.i = sext i1 %0 to i32
+  %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+  %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+  ret <1 x float> %1
+}
+
 define <1 x float> @dup_v1i32_uge(float %a, float %b) {
-; CHECK-NOFULLFP16-LABEL: dup_v1i32_uge:
-; CHECK-NOFULLFP16:       // %bb.0: // %entry
-; CHECK-NOFULLFP16-NEXT:    fcmgt s0, s1, s0
-; CHECK-NOFULLFP16-NEXT:    mvn v0.8b, v0.8b
-; CHECK-NOFULLFP16-NEXT:    ret
-;
-; CHECK-NONANS-LABEL: dup_v1i32_uge:
-; CHECK-NONANS:       // %bb.0: // %entry
-; CHECK-NONANS-NEXT:    fcmge s0, s0, s1
-; CHECK-NONANS-NEXT:    ret
-;
-; CHECK-FULLFP16-LABEL: dup_v1i32_uge:
-; CHECK-FULLFP16:       // %bb.0: // %entry
-; CHECK-FULLFP16-NEXT:    fcmgt s0, s1, s0
-; CHECK-FULLFP16-NEXT:    mvn v0.8b, v0.8b
-; CHECK-FULLFP16-NEXT:    ret
+; CHECK-LABEL: dup_v1i32_uge:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmgt s0, s1, s0
+; CHECK-NEXT:    mvn v0.8b, v0.8b
+; CHECK-NEXT:    ret
 entry:
   %0 = fcmp uge float %a, %b
   %vcmpd.i = sext i1 %0 to i32
@@ -193,23 +183,26 @@ entry:
   ret <1 x float> %1
 }
 
+define <1 x float> @dup_v1i32_uge_nnan(float %a, float %b) {
+; CHECK-LABEL: dup_v1i32_uge_nnan:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmge s0, s0, s1
+; CHECK-NEXT:    ret
+entry:
+  %0 = fcmp nnan uge float %a, %b
+  %vcmpd.i = sext i1 %0 to i32
+  %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+  %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+  ret <1 x float> %1
+}
+
+
 define <1 x float> @dup_v1i32_ult(float %a, float %b) {
-; CHECK-NOFULLFP16-LABEL: dup_v1i32_ult:
-; CHECK-NOFULLFP16:       // %bb.0: // %entry
-; CHECK-NOFULLFP16-NEXT:    fcmge s0, s0, s1
-; CHECK-NOFULLFP16-NEXT:    mvn v0.8b, v0.8b
-; CHECK-NOFULLFP16-NEXT:    ret
-;
-; CHECK-NONANS-LABEL: dup_v1i32_ult:
-; CHECK-NONANS:       // %bb.0: // %entry
-; CHECK-NONANS-NEXT:    fcmgt s0, s1, s0
-; CHECK-NONANS-NEXT:    ret
-;
-; CHECK-FULLFP16-LABEL: dup_v1i32_ult:
-; CHECK-FULLFP16:       // %bb.0: // %entry
-; CHECK-FULLFP16-NEXT:    fcmge s0, s0, s1
-; CHECK-FULLFP16-NEXT:    mvn v0.8b, v0.8b
-; CHECK-FULLFP16-NEXT:    ret
+; CHECK-LABEL: dup_v1i32_ult:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmge s0, s0, s1
+; CHECK-NEXT:    mvn v0.8b, v0.8b
+; CHECK-NEXT:    ret
 entry:
   %0 = fcmp ult float %a, %b
   %vcmpd.i = sext i1 %0 to i32
@@ -218,23 +211,25 @@ entry:
   ret <1 x float> %1
 }
 
+define <1 x float> @dup_v1i32_ult_nnan(float %a, float %b) {
+; CHECK-LABEL: dup_v1i32_ult_nnan:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmgt s0, s1, s0
+; CHECK-NEXT:    ret
+entry:
+  %0 = fcmp nnan ult float %a, %b
+  %vcmpd.i = sext i1 %0 to i32
+  %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+  %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+  ret <1 x float> %1
+}
+
 define <1 x float> @dup_v1i32_ule(float %a, float %b) {
-; CHECK-NOFULLFP16-LABEL: dup_v1i32_ule:
-; CHECK-NOFULLFP16:       // %bb.0: // %entry
-; CHECK-NOFULLFP16-NEXT:    fcmgt s0, s0, s1
-; CHECK-NOFULLFP16-NEXT:    mvn v0.8b, v0.8b
-; CHECK-NOFULLFP16-NEXT:    ret
-;
-; CHECK-NONANS-LABEL: dup_v1i32_ule:
-; CHECK-NONANS:       // %bb.0: // %entry
-; CHECK-NONANS-NEXT:    fcmge s0, s1, s0
-; CHECK-NONANS-NEXT:    ret
-;
-; CHECK-FULLFP16-LABEL: dup_v1i32_ule:
-; CHECK-FULLFP16:       // %bb.0: // %entry
-; CHECK-FULLFP16-NEXT:    fcmgt s0, s0, s1
-; CHECK-FULLFP16-NEXT:    mvn v0.8b, v0.8b
-; CHECK-FULLFP16-NEXT:    ret
+; CHECK-LABEL: dup_v1i32_ule:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmgt s0, s0, s1
+; CHECK-NEXT:    mvn v0.8b, v0.8b
+; CHECK-NEXT:    ret
 entry:
   %0 = fcmp ule float %a, %b
   %vcmpd.i = sext i1 %0 to i32
@@ -243,6 +238,19 @@ entry:
   ret <1 x float> %1
 }
 
+define <1 x float> @dup_v1i32_ule_nnan(float %a, float %b) {
+; CHECK-LABEL: dup_v1i32_ule_nnan:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmge s0, s1, s0
+; CHECK-NEXT:    ret
+entry:
+  %0 = fcmp nnan ule float %a, %b
+  %vcmpd.i = sext i1 %0 to i32
+  %vecinit.i = insertelement <1 x i32> poison, i32 %vcmpd.i, i64 0
+  %1 = bitcast <1 x i32> %vecinit.i to <1 x float>
+  ret <1 x float> %1
+}
+
 define <1 x float> @dup_v1i32_une(float %a, float %b) {
 ; CHECK-LABEL: dup_v1i32_une:
 ; CHECK:       // %bb.0: // %entry
@@ -326,13 +334,6 @@ define <8 x half> @dup_v8i16(half %a, half %b) {
 ; CHECK-NOFULLFP16-NEXT:    fcmeq s0, s0, s1
 ; CHECK-NOFULLFP16-NEXT:    ret
 ;
-; CHECK-NONANS-LABEL: dup_v8i16:
-; CHECK-NONANS:       // %bb.0: // %entry
-; CHECK-NONANS-NEXT:    fcvt s1, h1
-; CHECK-NONANS-NEXT:    fcvt s0, h0
-; CHECK-NONANS-NEXT:    fcmeq s0, s0, s1
-; CHECK-NONANS-NEXT:    ret
-;
 ; CHECK-FULLFP16-LABEL: dup_v8i16:
 ; CHECK-FULLFP16:       // %bb.0: // %entry
 ; CHECK-FULLFP16-NEXT:    fcmp h0, h1
@@ -350,6 +351,30 @@ define <8 x half> @dup_v8i16(half %a, half %b) {
   ret <8 x half> %1
 }
 
+define <8 x half> @dup_v8i16_nnan(half %a, half %b) {
+; FIXME: Could be replaced with fcmeq + dup but the type of the former is
+; promoted to i32 during selection and then the optimization does not apply.
+; CHECK-NOFULLFP16-LABEL: dup_v8i16_nnan:
+; CHECK-NOFULLFP16:       // %bb.0: // %entry
+; CHECK-NOFULLFP16-NEXT:    fcvt s1, h1
+; CHECK-NOFULLFP16-NEXT:    fcvt s0, h0
+; CHECK-NOFULLFP16-NEXT:    fcmeq s0, s0, s1
+; CHECK-NOFULLFP16-NEXT:    ret
+;
+; CHECK-FULLFP16-LABEL: dup_v8i16_nnan:
+; CHECK-FULLFP16:       // %bb.0: // %entry
+; CHECK-FULLFP16-NEXT:    fcmp h0, h1
+; CHECK-FULLFP16-NEXT:    csetm w8, eq
+; CHECK-FULLFP16-NEXT:    fmov s0, w8
+; CHECK-FULLFP16-NEXT:    ret
+  entry:
+  %0 = fcmp nnan oeq half %a, %b
+  %vcmpd.i = sext i1 %0 to i16
+  %vecinit.i = insertelement <8 x i16> poison, i16 %vcmpd.i, i64 0
+  %1 = bitcast <8 x i16> %vecinit.i to <8 x half>
+  ret <8 x half> %1
+}
+
 ; Check that a mask is not generated for non-vectorized users.
 define i32 @mask_i32(float %a, float %b) {
 ; CHECK-LABEL: mask_i32:

>From ebdd3851ea55b0dafb26c2a82dbcad195c486dee Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Thu, 16 Oct 2025 14:15:21 +0800
Subject: [PATCH 2/6] [SelectionDAG] also deduce nnan from users

---
 llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp  | 3 +++
 llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 6 ++++++
 2 files changed, 9 insertions(+)

diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 8676060eb3db7..0799edabe7052 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -6714,6 +6714,9 @@ static SDValue foldAndOrOfSETCC(SDNode *LogicOp, SelectionDAG &DAG) {
                                  DAG, isFMAXNUMFMINNUM_IEEE, isFMAXNUMFMINNUM);
 
       if (NewOpcode != ISD::DELETED_NODE) {
+        // Propagate fast-math flags from setcc.
+        SelectionDAG::FlagInserter FlagInserter(DAG, LHS->getFlags() &
+                                                         RHS->getFlags());
         SDValue MinMaxValue =
             DAG.getNode(NewOpcode, DL, OpVT, Operand1, Operand2);
         return DAG.getSetCC(DL, VT, MinMaxValue, CommonValue, CC);
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 379242ec5a157..9e8336e3508a0 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -5869,6 +5869,12 @@ bool SelectionDAG::isKnownNeverNaN(SDValue Op, bool SNaN,
                            ? APInt::getAllOnes(VT.getVectorNumElements())
                            : APInt(1, 1);
 
+  // If all users of this operand is annotated with nnan, we can assume
+  // this operand is not NaN, since nnan also affects inputs.
+  if (llvm::all_of(Op->users(),
+                   [](const SDNode *N) { return N->getFlags().hasNoNaNs(); }))
+    return true;
+
   return isKnownNeverNaN(Op, DemandedElts, SNaN, Depth);
 }
 

>From 43bf1aa02c3ba9e84980e4bb5eeb7362f0445262 Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Thu, 16 Oct 2025 15:32:44 +0800
Subject: [PATCH 3/6] fix tests

---
 .../AArch64/neon-compare-instructions.ll      |  111 +-
 .../CodeGen/AMDGPU/combine_andor_with_cmps.ll | 2316 ++++++++++-------
 llvm/test/CodeGen/Mips/fcmp.ll                |    6 +-
 llvm/test/CodeGen/PowerPC/change-no-infs.ll   |   67 -
 llvm/test/CodeGen/PowerPC/fsel.ll             |  195 +-
 llvm/test/CodeGen/PowerPC/scalar-equal.ll     |  112 +-
 llvm/test/CodeGen/PowerPC/scalar_cmp.ll       | 1494 ++++-------
 llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll       |  450 ++--
 llvm/test/CodeGen/X86/2006-05-22-FPSetEQ.ll   |   16 +-
 llvm/test/CodeGen/X86/avx-minmax.ll           |   18 +-
 llvm/test/CodeGen/X86/sse-minmax-fast.ll      |  735 ++++++
 llvm/test/CodeGen/X86/sse-minmax-finite.ll    |  735 ++++++
 llvm/test/CodeGen/X86/sse-minmax.ll           | 1332 +++-------
 13 files changed, 4298 insertions(+), 3289 deletions(-)
 delete mode 100644 llvm/test/CodeGen/PowerPC/change-no-infs.ll
 create mode 100644 llvm/test/CodeGen/X86/sse-minmax-fast.ll
 create mode 100644 llvm/test/CodeGen/X86/sse-minmax-finite.ll

diff --git a/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll b/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll
index 11b3b62ec1c8d..a82ead2406945 100644
--- a/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll
+++ b/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll
@@ -3249,36 +3249,51 @@ define <2 x i64> @fcmone2xdouble_fast(<2 x double> %A, <2 x double> %B) {
 }
 
 define <2 x i32> @fcmord2xfloat_fast(<2 x float> %A, <2 x float> %B) {
-; CHECK-LABEL: fcmord2xfloat_fast:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcmge v2.2s, v0.2s, v1.2s
-; CHECK-NEXT:    fcmgt v0.2s, v1.2s, v0.2s
-; CHECK-NEXT:    orr v0.8b, v0.8b, v2.8b
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fcmord2xfloat_fast:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fcmeq v0.2s, v0.2s, v0.2s
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fcmord2xfloat_fast:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    fcmge v2.2s, v0.2s, v1.2s
+; CHECK-GI-NEXT:    fcmgt v0.2s, v1.2s, v0.2s
+; CHECK-GI-NEXT:    orr v0.8b, v0.8b, v2.8b
+; CHECK-GI-NEXT:    ret
   %tmp3 = fcmp fast ord <2 x float> %A, %B
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
   ret <2 x i32> %tmp4
 }
 
 define <4 x i32> @fcmord4xfloat_fast(<4 x float> %A, <4 x float> %B) {
-; CHECK-LABEL: fcmord4xfloat_fast:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcmge v2.4s, v0.4s, v1.4s
-; CHECK-NEXT:    fcmgt v0.4s, v1.4s, v0.4s
-; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fcmord4xfloat_fast:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fcmeq v0.4s, v0.4s, v0.4s
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fcmord4xfloat_fast:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    fcmge v2.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT:    fcmgt v0.4s, v1.4s, v0.4s
+; CHECK-GI-NEXT:    orr v0.16b, v0.16b, v2.16b
+; CHECK-GI-NEXT:    ret
   %tmp3 = fcmp fast ord <4 x float> %A, %B
   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
   ret <4 x i32> %tmp4
 }
 
 define <2 x i64> @fcmord2xdouble_fast(<2 x double> %A, <2 x double> %B) {
-; CHECK-LABEL: fcmord2xdouble_fast:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcmge v2.2d, v0.2d, v1.2d
-; CHECK-NEXT:    fcmgt v0.2d, v1.2d, v0.2d
-; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fcmord2xdouble_fast:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fcmeq v0.2d, v0.2d, v0.2d
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fcmord2xdouble_fast:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    fcmge v2.2d, v0.2d, v1.2d
+; CHECK-GI-NEXT:    fcmgt v0.2d, v1.2d, v0.2d
+; CHECK-GI-NEXT:    orr v0.16b, v0.16b, v2.16b
+; CHECK-GI-NEXT:    ret
   %tmp3 = fcmp fast ord <2 x double> %A, %B
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
   ret <2 x i64> %tmp4
@@ -3286,39 +3301,57 @@ define <2 x i64> @fcmord2xdouble_fast(<2 x double> %A, <2 x double> %B) {
 
 
 define <2 x i32> @fcmuno2xfloat_fast(<2 x float> %A, <2 x float> %B) {
-; CHECK-LABEL: fcmuno2xfloat_fast:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcmge v2.2s, v0.2s, v1.2s
-; CHECK-NEXT:    fcmgt v0.2s, v1.2s, v0.2s
-; CHECK-NEXT:    orr v0.8b, v0.8b, v2.8b
-; CHECK-NEXT:    mvn v0.8b, v0.8b
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fcmuno2xfloat_fast:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fcmeq v0.2s, v0.2s, v0.2s
+; CHECK-SD-NEXT:    mvn v0.8b, v0.8b
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fcmuno2xfloat_fast:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    fcmge v2.2s, v0.2s, v1.2s
+; CHECK-GI-NEXT:    fcmgt v0.2s, v1.2s, v0.2s
+; CHECK-GI-NEXT:    orr v0.8b, v0.8b, v2.8b
+; CHECK-GI-NEXT:    mvn v0.8b, v0.8b
+; CHECK-GI-NEXT:    ret
   %tmp3 = fcmp fast uno <2 x float> %A, %B
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
   ret <2 x i32> %tmp4
 }
 
 define <4 x i32> @fcmuno4xfloat_fast(<4 x float> %A, <4 x float> %B) {
-; CHECK-LABEL: fcmuno4xfloat_fast:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcmge v2.4s, v0.4s, v1.4s
-; CHECK-NEXT:    fcmgt v0.4s, v1.4s, v0.4s
-; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
-; CHECK-NEXT:    mvn v0.16b, v0.16b
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fcmuno4xfloat_fast:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fcmeq v0.4s, v0.4s, v0.4s
+; CHECK-SD-NEXT:    mvn v0.16b, v0.16b
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fcmuno4xfloat_fast:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    fcmge v2.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT:    fcmgt v0.4s, v1.4s, v0.4s
+; CHECK-GI-NEXT:    orr v0.16b, v0.16b, v2.16b
+; CHECK-GI-NEXT:    mvn v0.16b, v0.16b
+; CHECK-GI-NEXT:    ret
   %tmp3 = fcmp fast uno <4 x float> %A, %B
   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
   ret <4 x i32> %tmp4
 }
 
 define <2 x i64> @fcmuno2xdouble_fast(<2 x double> %A, <2 x double> %B) {
-; CHECK-LABEL: fcmuno2xdouble_fast:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcmge v2.2d, v0.2d, v1.2d
-; CHECK-NEXT:    fcmgt v0.2d, v1.2d, v0.2d
-; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
-; CHECK-NEXT:    mvn v0.16b, v0.16b
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fcmuno2xdouble_fast:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fcmeq v0.2d, v0.2d, v0.2d
+; CHECK-SD-NEXT:    mvn v0.16b, v0.16b
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fcmuno2xdouble_fast:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    fcmge v2.2d, v0.2d, v1.2d
+; CHECK-GI-NEXT:    fcmgt v0.2d, v1.2d, v0.2d
+; CHECK-GI-NEXT:    orr v0.16b, v0.16b, v2.16b
+; CHECK-GI-NEXT:    mvn v0.16b, v0.16b
+; CHECK-GI-NEXT:    ret
   %tmp3 = fcmp fast uno <2 x double> %A, %B
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
   ret <2 x i64> %tmp4
diff --git a/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll b/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll
index ec92edbe2bf65..70eac30c508b9 100644
--- a/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll
+++ b/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll
@@ -1,8 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
 ; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -amdgpu-enable-delay-alu=0 < %s | FileCheck %s -check-prefixes=GCN,GFX11,GFX11-TRUE16
 ; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -amdgpu-enable-delay-alu=0 < %s | FileCheck %s -check-prefixes=GCN,GFX11,GFX11-FAKE16
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -amdgpu-enable-delay-alu=0 -enable-no-nans-fp-math < %s | FileCheck %s -check-prefixes=GCN,GFX11NONANS,GCN-TRUE16,GFX11NONANS-TRUE16
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -amdgpu-enable-delay-alu=0 -enable-no-nans-fp-math < %s | FileCheck %s -check-prefixes=GCN,GFX11NONANS,GCN-FAKE16,GFX11NONANS-FAKE16
 
 ; The tests check the following optimization of DAGCombiner:
 ; CMP(A,C)||CMP(B,C) => CMP(MIN/MAX(A,B), C)
@@ -855,93 +853,117 @@ define i1 @test57(float %arg1, float %arg2, float %arg3) #0 {
 }
 
 define i1 @test58(double %arg1, double %arg2, double %arg3) #0 {
-; GFX11-LABEL: test58:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_min_f64 v[0:1], v[0:1], v[2:3]
-; GFX11-NEXT:    v_cmp_nle_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test58:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_min_f64 v[0:1], v[0:1], v[2:3]
-; GFX11NONANS-NEXT:    v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: test58:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT:    v_cmp_nle_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp ugt double %arg1, %arg3
   %cmp2 = fcmp ugt double %arg2, %arg3
   %and1  = and i1 %cmp1, %cmp2
   ret i1 %and1
 }
 
+define i1 @test58_nnan(double %arg1, double %arg2, double %arg3) #0 {
+; GCN-LABEL: test58_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT:    v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %cmp1 = fcmp nnan ugt double %arg1, %arg3
+  %cmp2 = fcmp nnan ugt double %arg2, %arg3
+  %and1  = and i1 %cmp1, %cmp2
+  ret i1 %and1
+}
+
 define i1 @test59(float %arg1, float %arg2, float %arg3) #0 {
-; GFX11-LABEL: test59:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_min_f32_e32 v0, v0, v1
-; GFX11-NEXT:    v_cmp_nlt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test59:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_min_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT:    v_cmp_ge_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: test59:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_min_f32_e32 v0, v0, v1
+; GCN-NEXT:    v_cmp_nlt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp uge float %arg1, %arg3
   %cmp2 = fcmp uge float %arg2, %arg3
   %and1  = and i1 %cmp1, %cmp2
   ret i1 %and1
 }
 
+define i1 @test59_nnan(float %arg1, float %arg2, float %arg3) #0 {
+; GCN-LABEL: test59_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_min_f32_e32 v0, v0, v1
+; GCN-NEXT:    v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %cmp1 = fcmp nnan uge float %arg1, %arg3
+  %cmp2 = fcmp nnan uge float %arg2, %arg3
+  %and1  = and i1 %cmp1, %cmp2
+  ret i1 %and1
+}
+
 define i1 @test60(float %arg1, float %arg2, float %arg3) #0 {
-; GFX11-LABEL: test60:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_max_f32_e32 v0, v0, v1
-; GFX11-NEXT:    v_cmp_ngt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test60:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_max_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT:    v_cmp_le_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: test60:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_max_f32_e32 v0, v0, v1
+; GCN-NEXT:    v_cmp_ngt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp ule float %arg1, %arg3
   %cmp2 = fcmp ule float %arg2, %arg3
   %and1  = and i1 %cmp1, %cmp2
   ret i1 %and1
 }
 
+define i1 @test60_nnan(float %arg1, float %arg2, float %arg3) #0 {
+; GCN-LABEL: test60_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_max_f32_e32 v0, v0, v1
+; GCN-NEXT:    v_cmp_le_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %cmp1 = fcmp nnan ule float %arg1, %arg3
+  %cmp2 = fcmp nnan ule float %arg2, %arg3
+  %and1  = and i1 %cmp1, %cmp2
+  ret i1 %and1
+}
+
 define i1 @test61(double %arg1, double %arg2, double %arg3) #0 {
-; GFX11-LABEL: test61:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_max_f64 v[0:1], v[0:1], v[2:3]
-; GFX11-NEXT:    v_cmp_nge_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test61:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_max_f64 v[0:1], v[0:1], v[2:3]
-; GFX11NONANS-NEXT:    v_cmp_lt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: test61:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_max_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT:    v_cmp_nge_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp ult double %arg1, %arg3
   %cmp2 = fcmp ult double %arg2, %arg3
   %and1 = and i1 %cmp1, %cmp2
   ret i1 %and1
 }
 
+define i1 @test61_nnan(double %arg1, double %arg2, double %arg3) #0 {
+; GCN-LABEL: test61_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_max_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT:    v_cmp_lt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %cmp1 = fcmp nnan ult double %arg1, %arg3
+  %cmp2 = fcmp nnan ult double %arg2, %arg3
+  %and1 = and i1 %cmp1, %cmp2
+  ret i1 %and1
+}
+
 define i1 @test62(float %arg1, float %arg2, float %arg3) {
 ; GCN-LABEL: test62:
 ; GCN:       ; %bb.0:
@@ -1083,22 +1105,14 @@ define i1 @test69(double %arg1, double %arg2, double %arg3) {
 }
 
 define i1 @test70(float %arg1, float %arg2, float %arg3) {
-; GFX11-LABEL: test70:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
-; GFX11-NEXT:    v_min_f32_e32 v0, v0, v1
-; GFX11-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test70:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_min_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: test70:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GCN-NEXT:    v_min_f32_e32 v0, v0, v1
+; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %var1 = call float @llvm.canonicalize.f32(float %arg1)
   %var2 = call float @llvm.canonicalize.f32(float %arg2)
   %cmp1 = fcmp olt float %var1, %arg3
@@ -1107,6 +1121,22 @@ define i1 @test70(float %arg1, float %arg2, float %arg3) {
   ret i1 %or1
 }
 
+define i1 @test70_nnan(float %arg1, float %arg2, float %arg3) {
+; GCN-LABEL: test70_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_min_f32_e32 v0, v0, v1
+; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %var1 = call nnan float @llvm.canonicalize.f32(float %arg1)
+  %var2 = call nnan float @llvm.canonicalize.f32(float %arg2)
+  %cmp1 = fcmp nnan olt float %var1, %arg3
+  %cmp2 = fcmp nnan olt float %var2, %arg3
+  %or1  = or i1 %cmp1, %cmp2
+  ret i1 %or1
+}
+
 define i1 @test71(double %arg1, double %arg2, double %arg3) {
 ; GCN-LABEL: test71:
 ; GCN:       ; %bb.0:
@@ -1144,22 +1174,14 @@ define i1 @test72(double %arg1, double %arg2, double %arg3) {
 }
 
 define i1 @test73(float %arg1, float %arg2, float %arg3) {
-; GFX11-LABEL: test73:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
-; GFX11-NEXT:    v_max_f32_e32 v0, v0, v1
-; GFX11-NEXT:    v_cmp_ge_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test73:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_max_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT:    v_cmp_ge_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: test73:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GCN-NEXT:    v_max_f32_e32 v0, v0, v1
+; GCN-NEXT:    v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %var1 = call float @llvm.canonicalize.f32(float %arg1)
   %var2 = call float @llvm.canonicalize.f32(float %arg2)
   %cmp1 = fcmp oge float %var1, %arg3
@@ -1168,26 +1190,32 @@ define i1 @test73(float %arg1, float %arg2, float %arg3) {
   ret i1 %or1
 }
 
+define i1 @test73_nnan(float %arg1, float %arg2, float %arg3) {
+; GCN-LABEL: test73_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_max_f32_e32 v0, v0, v1
+; GCN-NEXT:    v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %var1 = call nnan float @llvm.canonicalize.f32(float %arg1)
+  %var2 = call nnan float @llvm.canonicalize.f32(float %arg2)
+  %cmp1 = fcmp nnan oge float %var1, %arg3
+  %cmp2 = fcmp nnan oge float %var2, %arg3
+  %or1  = or i1 %cmp1, %cmp2
+  ret i1 %or1
+}
+
 define i1 @test74(double %arg1, double %arg2, double %arg3) {
-; GFX11-LABEL: test74:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11-NEXT:    v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11-NEXT:    v_min_f64 v[0:1], v[0:1], v[2:3]
-; GFX11-NEXT:    v_cmp_nle_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test74:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11NONANS-NEXT:    v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11NONANS-NEXT:    v_min_f64 v[0:1], v[0:1], v[2:3]
-; GFX11NONANS-NEXT:    v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: test74:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT:    v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT:    v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT:    v_cmp_nle_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %var1 = call double @llvm.canonicalize.f64(double %arg1)
   %var2 = call double @llvm.canonicalize.f64(double %arg2)
   %cmp1 = fcmp ugt double %var1, %arg3
@@ -1196,23 +1224,33 @@ define i1 @test74(double %arg1, double %arg2, double %arg3) {
   ret i1 %and1
 }
 
+define i1 @test74_nnan(double %arg1, double %arg2, double %arg3) {
+; GCN-LABEL: test74_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT:    v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT:    v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT:    v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %var1 = call nnan double @llvm.canonicalize.f64(double %arg1)
+  %var2 = call nnan double @llvm.canonicalize.f64(double %arg2)
+  %cmp1 = fcmp nnan ugt double %var1, %arg3
+  %cmp2 = fcmp nnan ugt double %var2, %arg3
+  %and1  = and i1 %cmp1, %cmp2
+  ret i1 %and1
+}
+
 define i1 @test75(float %arg1, float %arg2, float %arg3) {
-; GFX11-LABEL: test75:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
-; GFX11-NEXT:    v_min_f32_e32 v0, v0, v1
-; GFX11-NEXT:    v_cmp_nlt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test75:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_min_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT:    v_cmp_ge_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: test75:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GCN-NEXT:    v_min_f32_e32 v0, v0, v1
+; GCN-NEXT:    v_cmp_nlt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %var1 = call float @llvm.canonicalize.f32(float %arg1)
   %var2 = call float @llvm.canonicalize.f32(float %arg2)
   %cmp1 = fcmp uge float %var1, %arg3
@@ -1221,23 +1259,31 @@ define i1 @test75(float %arg1, float %arg2, float %arg3) {
   ret i1 %and1
 }
 
+define i1 @test75_nnan(float %arg1, float %arg2, float %arg3) {
+; GCN-LABEL: test75_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_min_f32_e32 v0, v0, v1
+; GCN-NEXT:    v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %var1 = call nnan float @llvm.canonicalize.f32(float %arg1)
+  %var2 = call nnan float @llvm.canonicalize.f32(float %arg2)
+  %cmp1 = fcmp nnan uge float %var1, %arg3
+  %cmp2 = fcmp nnan uge float %var2, %arg3
+  %and1  = and i1 %cmp1, %cmp2
+  ret i1 %and1
+}
+
 define i1 @test76(float %arg1, float %arg2, float %arg3) {
-; GFX11-LABEL: test76:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
-; GFX11-NEXT:    v_max_f32_e32 v0, v0, v1
-; GFX11-NEXT:    v_cmp_ngt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test76:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_max_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT:    v_cmp_le_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: test76:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GCN-NEXT:    v_max_f32_e32 v0, v0, v1
+; GCN-NEXT:    v_cmp_ngt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %var1 = call float @llvm.canonicalize.f32(float %arg1)
   %var2 = call float @llvm.canonicalize.f32(float %arg2)
   %cmp1 = fcmp ule float %var1, %arg3
@@ -1246,26 +1292,32 @@ define i1 @test76(float %arg1, float %arg2, float %arg3) {
   ret i1 %and1
 }
 
+define i1 @test76_nnan(float %arg1, float %arg2, float %arg3) {
+; GCN-LABEL: test76_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_max_f32_e32 v0, v0, v1
+; GCN-NEXT:    v_cmp_le_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %var1 = call nnan float @llvm.canonicalize.f32(float %arg1)
+  %var2 = call nnan float @llvm.canonicalize.f32(float %arg2)
+  %cmp1 = fcmp nnan ule float %var1, %arg3
+  %cmp2 = fcmp nnan ule float %var2, %arg3
+  %and1  = and i1 %cmp1, %cmp2
+  ret i1 %and1
+}
+
 define i1 @test77(double %arg1, double %arg2, double %arg3) {
-; GFX11-LABEL: test77:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11-NEXT:    v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11-NEXT:    v_max_f64 v[0:1], v[0:1], v[2:3]
-; GFX11-NEXT:    v_cmp_nge_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test77:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11NONANS-NEXT:    v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11NONANS-NEXT:    v_max_f64 v[0:1], v[0:1], v[2:3]
-; GFX11NONANS-NEXT:    v_cmp_lt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: test77:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT:    v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT:    v_max_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT:    v_cmp_nge_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %var1 = call double @llvm.canonicalize.f64(double %arg1)
   %var2 = call double @llvm.canonicalize.f64(double %arg2)
   %cmp1 = fcmp ult double %var1, %arg3
@@ -1274,6 +1326,24 @@ define i1 @test77(double %arg1, double %arg2, double %arg3) {
   ret i1 %and1
 }
 
+define i1 @test77_nnan(double %arg1, double %arg2, double %arg3) {
+; GCN-LABEL: test77_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT:    v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT:    v_max_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT:    v_cmp_lt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %var1 = call nnan double @llvm.canonicalize.f64(double %arg1)
+  %var2 = call nnan double @llvm.canonicalize.f64(double %arg2)
+  %cmp1 = fcmp nnan ult double %var1, %arg3
+  %cmp2 = fcmp nnan ult double %var2, %arg3
+  %and1 = and i1 %cmp1, %cmp2
+  ret i1 %and1
+}
+
 define i1 @test78(float %arg1, float %arg2, float %arg3) #0 {
 ; GCN-LABEL: test78:
 ; GCN:       ; %bb.0:
@@ -1289,27 +1359,33 @@ define i1 @test78(float %arg1, float %arg2, float %arg3) #0 {
 }
 
 define i1 @test79(float %arg1, float %arg2, float %arg3) #0 {
-; GFX11-LABEL: test79:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_max_f32_e32 v0, v0, v1
-; GFX11-NEXT:    v_cmp_nge_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test79:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_max_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: test79:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_max_f32_e32 v0, v0, v1
+; GCN-NEXT:    v_cmp_nge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp ult float %arg1, %arg3
   %cmp2 = fcmp ugt float %arg3, %arg2
   %and1  = and i1 %cmp1, %cmp2
   ret i1 %and1
 }
 
+define i1 @test79_nnan(float %arg1, float %arg2, float %arg3) #0 {
+; GCN-LABEL: test79_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_max_f32_e32 v0, v0, v1
+; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %cmp1 = fcmp nnan ult float %arg1, %arg3
+  %cmp2 = fcmp nnan ugt float %arg3, %arg2
+  %and1  = and i1 %cmp1, %cmp2
+  ret i1 %and1
+}
+
 define i1 @test80(float %arg1, float %arg2, float %arg3) {
 ; GCN-LABEL: test80:
 ; GCN:       ; %bb.0:
@@ -1364,22 +1440,14 @@ define i1 @test82(double %arg1, double %arg2, double %arg3) {
 }
 
 define i1 @test83(float %arg1, float %arg2, float %arg3) {
-; GFX11-LABEL: test83:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
-; GFX11-NEXT:    v_max_f32_e32 v0, v0, v1
-; GFX11-NEXT:    v_cmp_ngt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test83:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_max_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT:    v_cmp_le_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: test83:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GCN-NEXT:    v_max_f32_e32 v0, v0, v1
+; GCN-NEXT:    v_cmp_ngt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %var1 = call float @llvm.canonicalize.f32(float %arg1)
   %var2 = call float @llvm.canonicalize.f32(float %arg2)
   %cmp1 = fcmp ule float %var1, %arg3
@@ -1388,6 +1456,23 @@ define i1 @test83(float %arg1, float %arg2, float %arg3) {
   ret i1 %and1
 }
 
+define i1 @test83_nnan(float %arg1, float %arg2, float %arg3) {
+; GCN-LABEL: test83_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GCN-NEXT:    v_max_f32_e32 v0, v0, v1
+; GCN-NEXT:    v_cmp_le_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %var1 = call float @llvm.canonicalize.f32(float %arg1)
+  %var2 = call float @llvm.canonicalize.f32(float %arg2)
+  %cmp1 = fcmp nnan ule float %var1, %arg3
+  %cmp2 = fcmp nnan uge float %arg3, %var2
+  %and1  = and i1 %cmp1, %cmp2
+  ret i1 %and1
+}
+
 define i1 @test84(half %arg1, half %arg2, half %arg3) {
 ; GFX11-TRUE16-LABEL: test84:
 ; GFX11-TRUE16:       ; %bb.0:
@@ -1408,22 +1493,6 @@ define i1 @test84(half %arg1, half %arg2, half %arg3) {
 ; GFX11-FAKE16-NEXT:    v_cmp_lt_f16_e32 vcc_lo, v0, v2
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
-;
-; GCN-TRUE16-LABEL: test84:
-; GCN-TRUE16:       ; %bb.0:
-; GCN-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-TRUE16-NEXT:    v_min_f16_e32 v0.l, v0.l, v1.l
-; GCN-TRUE16-NEXT:    v_cmp_lt_f16_e32 vcc_lo, v0.l, v2.l
-; GCN-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GCN-TRUE16-NEXT:    s_setpc_b64 s[30:31]
-;
-; GCN-FAKE16-LABEL: test84:
-; GCN-FAKE16:       ; %bb.0:
-; GCN-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-FAKE16-NEXT:    v_min_f16_e32 v0, v0, v1
-; GCN-FAKE16-NEXT:    v_cmp_lt_f16_e32 vcc_lo, v0, v2
-; GCN-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GCN-FAKE16-NEXT:    s_setpc_b64 s[30:31]
   %var1 = call half @llvm.canonicalize.f16(half %arg1)
   %var2 = call half @llvm.canonicalize.f16(half %arg2)
   %cmp1 = fcmp olt half %var1, %arg3
@@ -1432,6 +1501,30 @@ define i1 @test84(half %arg1, half %arg2, half %arg3) {
   ret i1 %or1
 }
 
+define i1 @test84_nnan(half %arg1, half %arg2, half %arg3) {
+; GFX11-TRUE16-LABEL: test84_nnan:
+; GFX11-TRUE16:       ; %bb.0:
+; GFX11-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT:    v_min_f16_e32 v0.l, v0.l, v1.l
+; GFX11-TRUE16-NEXT:    v_cmp_lt_f16_e32 vcc_lo, v0.l, v2.l
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: test84_nnan:
+; GFX11-FAKE16:       ; %bb.0:
+; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT:    v_min_f16_e32 v0, v0, v1
+; GFX11-FAKE16-NEXT:    v_cmp_lt_f16_e32 vcc_lo, v0, v2
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
+  %var1 = call nnan half @llvm.canonicalize.f16(half %arg1)
+  %var2 = call nnan half @llvm.canonicalize.f16(half %arg2)
+  %cmp1 = fcmp nnan olt half %var1, %arg3
+  %cmp2 = fcmp nnan olt half %var2, %arg3
+  %or1  = or i1 %cmp1, %cmp2
+  ret i1 %or1
+}
+
 define <2 x i1> @test85(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg3) {
 ; GFX11-TRUE16-LABEL: test85:
 ; GFX11-TRUE16:       ; %bb.0:
@@ -1458,28 +1551,6 @@ define <2 x i1> @test85(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg3) {
 ; GFX11-FAKE16-NEXT:    v_cmp_le_f16_e32 vcc_lo, v3, v1
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc_lo
 ; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
-;
-; GCN-TRUE16-LABEL: test85:
-; GCN-TRUE16:       ; %bb.0:
-; GCN-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-TRUE16-NEXT:    v_pk_min_f16 v1, v0, v1
-; GCN-TRUE16-NEXT:    v_cmp_le_f16_e32 vcc_lo, v1.l, v2.l
-; GCN-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GCN-TRUE16-NEXT:    v_cmp_le_f16_e32 vcc_lo, v1.h, v2.h
-; GCN-TRUE16-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc_lo
-; GCN-TRUE16-NEXT:    s_setpc_b64 s[30:31]
-;
-; GCN-FAKE16-LABEL: test85:
-; GCN-FAKE16:       ; %bb.0:
-; GCN-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-FAKE16-NEXT:    v_pk_min_f16 v0, v0, v1
-; GCN-FAKE16-NEXT:    v_lshrrev_b32_e32 v1, 16, v2
-; GCN-FAKE16-NEXT:    v_lshrrev_b32_e32 v3, 16, v0
-; GCN-FAKE16-NEXT:    v_cmp_le_f16_e32 vcc_lo, v0, v2
-; GCN-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GCN-FAKE16-NEXT:    v_cmp_le_f16_e32 vcc_lo, v3, v1
-; GCN-FAKE16-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc_lo
-; GCN-FAKE16-NEXT:    s_setpc_b64 s[30:31]
   %var1 = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg1)
   %var2 = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg2)
   %cmp1 = fcmp ole <2 x half> %var1, %arg3
@@ -1488,6 +1559,36 @@ define <2 x i1> @test85(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg3) {
   ret <2 x i1> %or1
 }
 
+define <2 x i1> @test85_nnan(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg3) {
+; GFX11-TRUE16-LABEL: test85_nnan:
+; GFX11-TRUE16:       ; %bb.0:
+; GFX11-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT:    v_pk_min_f16 v1, v0, v1
+; GFX11-TRUE16-NEXT:    v_cmp_le_f16_e32 vcc_lo, v1.l, v2.l
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX11-TRUE16-NEXT:    v_cmp_le_f16_e32 vcc_lo, v1.h, v2.h
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc_lo
+; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: test85_nnan:
+; GFX11-FAKE16:       ; %bb.0:
+; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT:    v_pk_min_f16 v0, v0, v1
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v3, 16, v0
+; GFX11-FAKE16-NEXT:    v_cmp_le_f16_e32 vcc_lo, v0, v2
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cmp_le_f16_e32 vcc_lo, v3, v1
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc_lo
+; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
+  %var1 = call nnan <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg1)
+  %var2 = call nnan <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg2)
+  %cmp1 = fcmp nnan ole <2 x half> %var1, %arg3
+  %cmp2 = fcmp nnan ole <2 x half> %var2, %arg3
+  %or1  = or <2 x i1> %cmp1, %cmp2
+  ret <2 x i1> %or1
+}
+
 define <2 x i1> @test86(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg3) {
 ; GFX11-TRUE16-LABEL: test86:
 ; GFX11-TRUE16:       ; %bb.0:
@@ -1514,28 +1615,6 @@ define <2 x i1> @test86(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg3) {
 ; GFX11-FAKE16-NEXT:    v_cmp_gt_f16_e32 vcc_lo, v3, v1
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc_lo
 ; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
-;
-; GCN-TRUE16-LABEL: test86:
-; GCN-TRUE16:       ; %bb.0:
-; GCN-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-TRUE16-NEXT:    v_pk_max_f16 v1, v0, v1
-; GCN-TRUE16-NEXT:    v_cmp_gt_f16_e32 vcc_lo, v1.l, v2.l
-; GCN-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GCN-TRUE16-NEXT:    v_cmp_gt_f16_e32 vcc_lo, v1.h, v2.h
-; GCN-TRUE16-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc_lo
-; GCN-TRUE16-NEXT:    s_setpc_b64 s[30:31]
-;
-; GCN-FAKE16-LABEL: test86:
-; GCN-FAKE16:       ; %bb.0:
-; GCN-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-FAKE16-NEXT:    v_pk_max_f16 v0, v0, v1
-; GCN-FAKE16-NEXT:    v_lshrrev_b32_e32 v1, 16, v2
-; GCN-FAKE16-NEXT:    v_lshrrev_b32_e32 v3, 16, v0
-; GCN-FAKE16-NEXT:    v_cmp_gt_f16_e32 vcc_lo, v0, v2
-; GCN-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GCN-FAKE16-NEXT:    v_cmp_gt_f16_e32 vcc_lo, v3, v1
-; GCN-FAKE16-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc_lo
-; GCN-FAKE16-NEXT:    s_setpc_b64 s[30:31]
   %var1 = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg1)
   %var2 = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg2)
   %cmp1 = fcmp ogt <2 x half> %var1, %arg3
@@ -1544,15 +1623,45 @@ define <2 x i1> @test86(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg3) {
   ret <2 x i1> %or1
 }
 
-define i1 @test87(half %arg1, half %arg2, half %arg3) {
-; GFX11-TRUE16-LABEL: test87:
+define <2 x i1> @test86_nnan(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg3) {
+; GFX11-TRUE16-LABEL: test86_nnan:
 ; GFX11-TRUE16:       ; %bb.0:
 ; GFX11-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT:    v_max_f16_e32 v0.l, v0.l, v0.l
-; GFX11-TRUE16-NEXT:    v_max_f16_e32 v0.h, v1.l, v1.l
-; GFX11-TRUE16-NEXT:    v_max_f16_e32 v0.l, v0.l, v0.h
-; GFX11-TRUE16-NEXT:    v_cmp_ge_f16_e32 vcc_lo, v0.l, v2.l
-; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX11-TRUE16-NEXT:    v_pk_max_f16 v1, v0, v1
+; GFX11-TRUE16-NEXT:    v_cmp_gt_f16_e32 vcc_lo, v1.l, v2.l
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX11-TRUE16-NEXT:    v_cmp_gt_f16_e32 vcc_lo, v1.h, v2.h
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc_lo
+; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: test86_nnan:
+; GFX11-FAKE16:       ; %bb.0:
+; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT:    v_pk_max_f16 v0, v0, v1
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v3, 16, v0
+; GFX11-FAKE16-NEXT:    v_cmp_gt_f16_e32 vcc_lo, v0, v2
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cmp_gt_f16_e32 vcc_lo, v3, v1
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc_lo
+; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
+  %var1 = call nnan <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg1)
+  %var2 = call nnan <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg2)
+  %cmp1 = fcmp nnan ogt <2 x half> %var1, %arg3
+  %cmp2 = fcmp nnan ogt <2 x half> %var2, %arg3
+  %or1  = or <2 x i1> %cmp1, %cmp2
+  ret <2 x i1> %or1
+}
+
+define i1 @test87(half %arg1, half %arg2, half %arg3) {
+; GFX11-TRUE16-LABEL: test87:
+; GFX11-TRUE16:       ; %bb.0:
+; GFX11-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT:    v_max_f16_e32 v0.l, v0.l, v0.l
+; GFX11-TRUE16-NEXT:    v_max_f16_e32 v0.h, v1.l, v1.l
+; GFX11-TRUE16-NEXT:    v_max_f16_e32 v0.l, v0.l, v0.h
+; GFX11-TRUE16-NEXT:    v_cmp_ge_f16_e32 vcc_lo, v0.l, v2.l
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-FAKE16-LABEL: test87:
@@ -1564,22 +1673,6 @@ define i1 @test87(half %arg1, half %arg2, half %arg3) {
 ; GFX11-FAKE16-NEXT:    v_cmp_ge_f16_e32 vcc_lo, v0, v2
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
-;
-; GCN-TRUE16-LABEL: test87:
-; GCN-TRUE16:       ; %bb.0:
-; GCN-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-TRUE16-NEXT:    v_max_f16_e32 v0.l, v0.l, v1.l
-; GCN-TRUE16-NEXT:    v_cmp_ge_f16_e32 vcc_lo, v0.l, v2.l
-; GCN-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GCN-TRUE16-NEXT:    s_setpc_b64 s[30:31]
-;
-; GCN-FAKE16-LABEL: test87:
-; GCN-FAKE16:       ; %bb.0:
-; GCN-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-FAKE16-NEXT:    v_max_f16_e32 v0, v0, v1
-; GCN-FAKE16-NEXT:    v_cmp_ge_f16_e32 vcc_lo, v0, v2
-; GCN-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GCN-FAKE16-NEXT:    s_setpc_b64 s[30:31]
   %var1 = call half @llvm.canonicalize.f16(half %arg1)
   %var2 = call half @llvm.canonicalize.f16(half %arg2)
   %cmp1 = fcmp oge half %var1, %arg3
@@ -1588,6 +1681,30 @@ define i1 @test87(half %arg1, half %arg2, half %arg3) {
   ret i1 %or1
 }
 
+define i1 @test87_nnan(half %arg1, half %arg2, half %arg3) {
+; GFX11-TRUE16-LABEL: test87_nnan:
+; GFX11-TRUE16:       ; %bb.0:
+; GFX11-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT:    v_max_f16_e32 v0.l, v0.l, v1.l
+; GFX11-TRUE16-NEXT:    v_cmp_ge_f16_e32 vcc_lo, v0.l, v2.l
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: test87_nnan:
+; GFX11-FAKE16:       ; %bb.0:
+; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT:    v_max_f16_e32 v0, v0, v1
+; GFX11-FAKE16-NEXT:    v_cmp_ge_f16_e32 vcc_lo, v0, v2
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
+  %var1 = call nnan half @llvm.canonicalize.f16(half %arg1)
+  %var2 = call nnan half @llvm.canonicalize.f16(half %arg2)
+  %cmp1 = fcmp nnan oge half %var1, %arg3
+  %cmp2 = fcmp nnan oge half %var2, %arg3
+  %or1  = or i1 %cmp1, %cmp2
+  ret i1 %or1
+}
+
 define <2 x i1> @test88(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg3) {
 ; GFX11-TRUE16-LABEL: test88:
 ; GFX11-TRUE16:       ; %bb.0:
@@ -1614,28 +1731,6 @@ define <2 x i1> @test88(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg3) {
 ; GFX11-FAKE16-NEXT:    v_cmp_nle_f16_e32 vcc_lo, v3, v1
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc_lo
 ; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
-;
-; GCN-TRUE16-LABEL: test88:
-; GCN-TRUE16:       ; %bb.0:
-; GCN-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-TRUE16-NEXT:    v_pk_min_f16 v1, v0, v1
-; GCN-TRUE16-NEXT:    v_cmp_gt_f16_e32 vcc_lo, v1.l, v2.l
-; GCN-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GCN-TRUE16-NEXT:    v_cmp_gt_f16_e32 vcc_lo, v1.h, v2.h
-; GCN-TRUE16-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc_lo
-; GCN-TRUE16-NEXT:    s_setpc_b64 s[30:31]
-;
-; GCN-FAKE16-LABEL: test88:
-; GCN-FAKE16:       ; %bb.0:
-; GCN-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-FAKE16-NEXT:    v_pk_min_f16 v0, v0, v1
-; GCN-FAKE16-NEXT:    v_lshrrev_b32_e32 v1, 16, v2
-; GCN-FAKE16-NEXT:    v_lshrrev_b32_e32 v3, 16, v0
-; GCN-FAKE16-NEXT:    v_cmp_gt_f16_e32 vcc_lo, v0, v2
-; GCN-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GCN-FAKE16-NEXT:    v_cmp_gt_f16_e32 vcc_lo, v3, v1
-; GCN-FAKE16-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc_lo
-; GCN-FAKE16-NEXT:    s_setpc_b64 s[30:31]
   %var1 = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg1)
   %var2 = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg2)
   %cmp1 = fcmp ugt <2 x half> %var1, %arg3
@@ -1644,6 +1739,36 @@ define <2 x i1> @test88(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg3) {
   ret <2 x i1> %and1
 }
 
+define <2 x i1> @test88_nnan(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg3) {
+; GFX11-TRUE16-LABEL: test88_nnan:
+; GFX11-TRUE16:       ; %bb.0:
+; GFX11-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT:    v_pk_min_f16 v1, v0, v1
+; GFX11-TRUE16-NEXT:    v_cmp_gt_f16_e32 vcc_lo, v1.l, v2.l
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX11-TRUE16-NEXT:    v_cmp_gt_f16_e32 vcc_lo, v1.h, v2.h
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc_lo
+; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: test88_nnan:
+; GFX11-FAKE16:       ; %bb.0:
+; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT:    v_pk_min_f16 v0, v0, v1
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v3, 16, v0
+; GFX11-FAKE16-NEXT:    v_cmp_gt_f16_e32 vcc_lo, v0, v2
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cmp_gt_f16_e32 vcc_lo, v3, v1
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc_lo
+; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
+  %var1 = call nnan <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg1)
+  %var2 = call nnan <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg2)
+  %cmp1 = fcmp nnan ugt <2 x half> %var1, %arg3
+  %cmp2 = fcmp nnan ugt <2 x half> %var2, %arg3
+  %and1  = and <2 x i1> %cmp1, %cmp2
+  ret <2 x i1> %and1
+}
+
 define i1 @test89(half %arg1, half %arg2, half %arg3) {
 ; GFX11-TRUE16-LABEL: test89:
 ; GFX11-TRUE16:       ; %bb.0:
@@ -1664,22 +1789,6 @@ define i1 @test89(half %arg1, half %arg2, half %arg3) {
 ; GFX11-FAKE16-NEXT:    v_cmp_nlt_f16_e32 vcc_lo, v0, v2
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
-;
-; GCN-TRUE16-LABEL: test89:
-; GCN-TRUE16:       ; %bb.0:
-; GCN-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-TRUE16-NEXT:    v_min_f16_e32 v0.l, v0.l, v1.l
-; GCN-TRUE16-NEXT:    v_cmp_ge_f16_e32 vcc_lo, v0.l, v2.l
-; GCN-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GCN-TRUE16-NEXT:    s_setpc_b64 s[30:31]
-;
-; GCN-FAKE16-LABEL: test89:
-; GCN-FAKE16:       ; %bb.0:
-; GCN-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-FAKE16-NEXT:    v_min_f16_e32 v0, v0, v1
-; GCN-FAKE16-NEXT:    v_cmp_ge_f16_e32 vcc_lo, v0, v2
-; GCN-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GCN-FAKE16-NEXT:    s_setpc_b64 s[30:31]
   %var1 = call half @llvm.canonicalize.f16(half %arg1)
   %var2 = call half @llvm.canonicalize.f16(half %arg2)
   %cmp1 = fcmp uge half %var1, %arg3
@@ -1688,6 +1797,30 @@ define i1 @test89(half %arg1, half %arg2, half %arg3) {
   ret i1 %and1
 }
 
+define i1 @test89_nnan(half %arg1, half %arg2, half %arg3) {
+; GFX11-TRUE16-LABEL: test89_nnan:
+; GFX11-TRUE16:       ; %bb.0:
+; GFX11-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT:    v_min_f16_e32 v0.l, v0.l, v1.l
+; GFX11-TRUE16-NEXT:    v_cmp_ge_f16_e32 vcc_lo, v0.l, v2.l
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: test89_nnan:
+; GFX11-FAKE16:       ; %bb.0:
+; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT:    v_min_f16_e32 v0, v0, v1
+; GFX11-FAKE16-NEXT:    v_cmp_ge_f16_e32 vcc_lo, v0, v2
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
+  %var1 = call nnan half @llvm.canonicalize.f16(half %arg1)
+  %var2 = call nnan half @llvm.canonicalize.f16(half %arg2)
+  %cmp1 = fcmp nnan uge half %var1, %arg3
+  %cmp2 = fcmp nnan uge half %var2, %arg3
+  %and1  = and i1 %cmp1, %cmp2
+  ret i1 %and1
+}
+
 define i1 @test90(half %arg1, half %arg2, half %arg3) {
 ; GFX11-TRUE16-LABEL: test90:
 ; GFX11-TRUE16:       ; %bb.0:
@@ -1708,22 +1841,6 @@ define i1 @test90(half %arg1, half %arg2, half %arg3) {
 ; GFX11-FAKE16-NEXT:    v_cmp_ngt_f16_e32 vcc_lo, v0, v2
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
-;
-; GCN-TRUE16-LABEL: test90:
-; GCN-TRUE16:       ; %bb.0:
-; GCN-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-TRUE16-NEXT:    v_max_f16_e32 v0.l, v0.l, v1.l
-; GCN-TRUE16-NEXT:    v_cmp_le_f16_e32 vcc_lo, v0.l, v2.l
-; GCN-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GCN-TRUE16-NEXT:    s_setpc_b64 s[30:31]
-;
-; GCN-FAKE16-LABEL: test90:
-; GCN-FAKE16:       ; %bb.0:
-; GCN-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-FAKE16-NEXT:    v_max_f16_e32 v0, v0, v1
-; GCN-FAKE16-NEXT:    v_cmp_le_f16_e32 vcc_lo, v0, v2
-; GCN-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GCN-FAKE16-NEXT:    s_setpc_b64 s[30:31]
   %var1 = call half @llvm.canonicalize.f16(half %arg1)
   %var2 = call half @llvm.canonicalize.f16(half %arg2)
   %cmp1 = fcmp ule half %var1, %arg3
@@ -1732,6 +1849,30 @@ define i1 @test90(half %arg1, half %arg2, half %arg3) {
   ret i1 %and1
 }
 
+define i1 @test90_nnan(half %arg1, half %arg2, half %arg3) {
+; GFX11-TRUE16-LABEL: test90_nnan:
+; GFX11-TRUE16:       ; %bb.0:
+; GFX11-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT:    v_max_f16_e32 v0.l, v0.l, v1.l
+; GFX11-TRUE16-NEXT:    v_cmp_le_f16_e32 vcc_lo, v0.l, v2.l
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: test90_nnan:
+; GFX11-FAKE16:       ; %bb.0:
+; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT:    v_max_f16_e32 v0, v0, v1
+; GFX11-FAKE16-NEXT:    v_cmp_le_f16_e32 vcc_lo, v0, v2
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
+  %var1 = call nnan half @llvm.canonicalize.f16(half %arg1)
+  %var2 = call nnan half @llvm.canonicalize.f16(half %arg2)
+  %cmp1 = fcmp nnan ule half %var1, %arg3
+  %cmp2 = fcmp nnan ule half %var2, %arg3
+  %and1  = and i1 %cmp1, %cmp2
+  ret i1 %and1
+}
+
 define <2 x i1> @test91(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg3) {
 ; GFX11-TRUE16-LABEL: test91:
 ; GFX11-TRUE16:       ; %bb.0:
@@ -1758,28 +1899,6 @@ define <2 x i1> @test91(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg3) {
 ; GFX11-FAKE16-NEXT:    v_cmp_nge_f16_e32 vcc_lo, v3, v1
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc_lo
 ; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
-;
-; GCN-TRUE16-LABEL: test91:
-; GCN-TRUE16:       ; %bb.0:
-; GCN-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-TRUE16-NEXT:    v_pk_max_f16 v1, v0, v1
-; GCN-TRUE16-NEXT:    v_cmp_lt_f16_e32 vcc_lo, v1.l, v2.l
-; GCN-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GCN-TRUE16-NEXT:    v_cmp_lt_f16_e32 vcc_lo, v1.h, v2.h
-; GCN-TRUE16-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc_lo
-; GCN-TRUE16-NEXT:    s_setpc_b64 s[30:31]
-;
-; GCN-FAKE16-LABEL: test91:
-; GCN-FAKE16:       ; %bb.0:
-; GCN-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-FAKE16-NEXT:    v_pk_max_f16 v0, v0, v1
-; GCN-FAKE16-NEXT:    v_lshrrev_b32_e32 v1, 16, v2
-; GCN-FAKE16-NEXT:    v_lshrrev_b32_e32 v3, 16, v0
-; GCN-FAKE16-NEXT:    v_cmp_lt_f16_e32 vcc_lo, v0, v2
-; GCN-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GCN-FAKE16-NEXT:    v_cmp_lt_f16_e32 vcc_lo, v3, v1
-; GCN-FAKE16-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc_lo
-; GCN-FAKE16-NEXT:    s_setpc_b64 s[30:31]
   %var1 = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg1)
   %var2 = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg2)
   %cmp1 = fcmp ult <2 x half> %var1, %arg3
@@ -1788,6 +1907,36 @@ define <2 x i1> @test91(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg3) {
   ret <2 x i1> %and1
 }
 
+define <2 x i1> @test91_nnan(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg3) {
+; GFX11-TRUE16-LABEL: test91_nnan:
+; GFX11-TRUE16:       ; %bb.0:
+; GFX11-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT:    v_pk_max_f16 v1, v0, v1
+; GFX11-TRUE16-NEXT:    v_cmp_lt_f16_e32 vcc_lo, v1.l, v2.l
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX11-TRUE16-NEXT:    v_cmp_lt_f16_e32 vcc_lo, v1.h, v2.h
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc_lo
+; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: test91_nnan:
+; GFX11-FAKE16:       ; %bb.0:
+; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT:    v_pk_max_f16 v0, v0, v1
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v3, 16, v0
+; GFX11-FAKE16-NEXT:    v_cmp_lt_f16_e32 vcc_lo, v0, v2
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cmp_lt_f16_e32 vcc_lo, v3, v1
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc_lo
+; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
+  %var1 = call nnan <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg1)
+  %var2 = call nnan <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg2)
+  %cmp1 = fcmp nnan ult <2 x half> %var1, %arg3
+  %cmp2 = fcmp nnan ult <2 x half> %var2, %arg3
+  %and1 = and <2 x i1> %cmp1, %cmp2
+  ret <2 x i1> %and1
+}
+
 define i1 @test92(i32 %arg1, i32 %arg2, i32 %arg3, i32 %C) {
 ; GCN-LABEL: test92:
 ; GCN:       ; %bb.0:
@@ -2175,21 +2324,13 @@ define i1 @test107(float %arg1, float %arg2, float %arg3, float %C) {
 }
 
 define i1 @test108(float %arg1, float %arg2, float %arg3, float %C) {
-; GFX11-LABEL: test108:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_max3_f32 v0, v0, v1, v2
-; GFX11-NEXT:    v_cmp_nge_f32_e32 vcc_lo, v0, v3
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test108:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_max3_f32 v0, v0, v1, v2
-; GFX11NONANS-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v3
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: test108:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_max3_f32 v0, v0, v1, v2
+; GCN-NEXT:    v_cmp_nge_f32_e32 vcc_lo, v0, v3
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp ult float %arg1, %C
   %cmp2 = fcmp ult float %arg2, %C
   %cmp3 = fcmp ult float %arg3, %C
@@ -2198,28 +2339,34 @@ define i1 @test108(float %arg1, float %arg2, float %arg3, float %C) {
   ret i1 %and2
 }
 
+define i1 @test108_nnan(float %arg1, float %arg2, float %arg3, float %C) {
+; GCN-LABEL: test108_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_max3_f32 v0, v0, v1, v2
+; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v3
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %cmp1 = fcmp nnan ult float %arg1, %C
+  %cmp2 = fcmp nnan ult float %arg2, %C
+  %cmp3 = fcmp nnan ult float %arg3, %C
+  %and1 = and i1 %cmp1, %cmp2
+  %and2 = and i1 %and1, %cmp3
+  ret i1 %and2
+}
+
 define i1 @test109(float %arg1, float %arg2, float %arg3, float %arg4, float %C) {
-; GFX11-LABEL: test109:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_dual_max_f32 v1, v1, v1 :: v_dual_max_f32 v0, v0, v0
-; GFX11-NEXT:    v_dual_max_f32 v3, v3, v3 :: v_dual_max_f32 v2, v2, v2
-; GFX11-NEXT:    v_dual_min_f32 v0, v0, v1 :: v_dual_max_f32 v1, v2, v3
-; GFX11-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v4
-; GFX11-NEXT:    v_cmp_gt_f32_e64 s0, v1, v4
-; GFX11-NEXT:    s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test109:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_dual_min_f32 v0, v0, v1 :: v_dual_max_f32 v1, v2, v3
-; GFX11NONANS-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v4
-; GFX11NONANS-NEXT:    v_cmp_gt_f32_e64 s0, v1, v4
-; GFX11NONANS-NEXT:    s_or_b32 s0, vcc_lo, s0
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: test109:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_dual_max_f32 v1, v1, v1 :: v_dual_max_f32 v0, v0, v0
+; GCN-NEXT:    v_dual_max_f32 v3, v3, v3 :: v_dual_max_f32 v2, v2, v2
+; GCN-NEXT:    v_dual_min_f32 v0, v0, v1 :: v_dual_max_f32 v1, v2, v3
+; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v4
+; GCN-NEXT:    v_cmp_gt_f32_e64 s0, v1, v4
+; GCN-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp olt float %arg1, %C
   %cmp2 = fcmp olt float %arg2, %C
   %cmp3 = fcmp ogt float %arg3, %C
@@ -2230,6 +2377,26 @@ define i1 @test109(float %arg1, float %arg2, float %arg3, float %arg4, float %C)
   ret i1 %or3
 }
 
+define i1 @test109_nnan(float %arg1, float %arg2, float %arg3, float %arg4, float %C) {
+; GCN-LABEL: test109_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_dual_min_f32 v0, v0, v1 :: v_dual_max_f32 v1, v2, v3
+; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v4
+; GCN-NEXT:    v_cmp_gt_f32_e64 s0, v1, v4
+; GCN-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %cmp1 = fcmp nnan olt float %arg1, %C
+  %cmp2 = fcmp nnan olt float %arg2, %C
+  %cmp3 = fcmp nnan ogt float %arg3, %C
+  %cmp4 = fcmp nnan ogt float %arg4, %C
+  %or1 = or i1 %cmp1, %cmp2
+  %or2 = or i1 %cmp3, %cmp4
+  %or3 = or i1 %or1, %or2
+  ret i1 %or3
+}
+
 define i1 @test110(float %arg1, float %arg2, float %arg3, float %arg4, float %C1, float %C2, float %C3, float %C4, float %C) #0 {
 ; GCN-LABEL: test110:
 ; GCN:       ; %bb.0:
@@ -2257,28 +2424,17 @@ define i1 @test110(float %arg1, float %arg2, float %arg3, float %arg4, float %C1
 }
 
 define i1 @test111(float %arg1, float %arg2, float %arg3, float %arg4, float %arg5, float %arg6, float %arg7, float %arg8, float %C) {
-; GFX11-LABEL: test111:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_dual_max_f32 v3, v3, v3 :: v_dual_max_f32 v2, v2, v2
-; GFX11-NEXT:    v_dual_min_f32 v2, v2, v3 :: v_dual_max_f32 v3, v4, v4
-; GFX11-NEXT:    v_min3_f32 v0, v0, v1, v2
-; GFX11-NEXT:    v_min_f32_e32 v0, v0, v3
-; GFX11-NEXT:    v_min3_f32 v0, v5, v6, v0
-; GFX11-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v8
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test111:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_min_f32_e32 v2, v2, v3
-; GFX11NONANS-NEXT:    v_min3_f32 v0, v0, v1, v2
-; GFX11NONANS-NEXT:    v_min_f32_e32 v0, v0, v4
-; GFX11NONANS-NEXT:    v_min3_f32 v0, v5, v6, v0
-; GFX11NONANS-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v8
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: test111:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_dual_max_f32 v3, v3, v3 :: v_dual_max_f32 v2, v2, v2
+; GCN-NEXT:    v_dual_min_f32 v2, v2, v3 :: v_dual_max_f32 v3, v4, v4
+; GCN-NEXT:    v_min3_f32 v0, v0, v1, v2
+; GCN-NEXT:    v_min_f32_e32 v0, v0, v3
+; GCN-NEXT:    v_min3_f32 v0, v5, v6, v0
+; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v8
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp olt float %arg1, %C
   %cmp2 = fcmp olt float %arg2, %C
   %or1  = or i1 %cmp1, %cmp2
@@ -2297,31 +2453,49 @@ define i1 @test111(float %arg1, float %arg2, float %arg3, float %arg4, float %ar
   ret i1 %or6
 }
 
+define i1 @test111_nnan(float %arg1, float %arg2, float %arg3, float %arg4, float %arg5, float %arg6, float %arg7, float %arg8, float %C) {
+; GCN-LABEL: test111_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_min_f32_e32 v2, v2, v3
+; GCN-NEXT:    v_min3_f32 v0, v0, v1, v2
+; GCN-NEXT:    v_min_f32_e32 v0, v0, v4
+; GCN-NEXT:    v_min3_f32 v0, v5, v6, v0
+; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v8
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %cmp1 = fcmp nnan olt float %arg1, %C
+  %cmp2 = fcmp nnan olt float %arg2, %C
+  %or1  = or i1 %cmp1, %cmp2
+  %cmp3 = fcmp nnan olt float %arg3, %C
+  %cmp4 = fcmp nnan olt float %arg4, %C
+  %or2  = or i1 %cmp3, %cmp4
+  %cmp5 = fcmp nnan olt float %arg5, %C
+  %or3 = or i1 %or1, %or2
+  %or4 = or i1 %or3, %cmp5
+  %cmp6 = fcmp nnan olt float %arg6, %C
+  %cmp7 = fcmp nnan olt float %arg7, %C
+  %or5 = or i1 %cmp6, %cmp7
+  %cmp8 = fcmp nnan olt float %arg8, %C
+  %or6 = or i1 %or5, %or4
+  %or7 = or i1 %or6, %cmp8
+  ret i1 %or6
+}
+
 define i1 @test112(float %arg1, float %arg2, float %arg3, float %arg4, float %arg5, float %arg6, float %arg7, float %arg8, float %C) {
-; GFX11-LABEL: test112:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_dual_max_f32 v3, v3, v3 :: v_dual_max_f32 v2, v2, v2
-; GFX11-NEXT:    v_cmp_nge_f32_e32 vcc_lo, v4, v8
-; GFX11-NEXT:    v_dual_max_f32 v5, v5, v5 :: v_dual_min_f32 v2, v2, v3
-; GFX11-NEXT:    v_max_f32_e32 v3, v6, v6
-; GFX11-NEXT:    v_min3_f32 v0, v0, v1, v2
-; GFX11-NEXT:    v_min3_f32 v0, v0, v5, v3
-; GFX11-NEXT:    v_cmp_lt_f32_e64 s0, v0, v8
-; GFX11-NEXT:    s_or_b32 s0, s0, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test112:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_min_f32_e32 v2, v2, v3
-; GFX11NONANS-NEXT:    v_min3_f32 v0, v0, v1, v2
-; GFX11NONANS-NEXT:    v_min_f32_e32 v0, v0, v4
-; GFX11NONANS-NEXT:    v_min3_f32 v0, v5, v6, v0
-; GFX11NONANS-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v8
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: test112:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_dual_max_f32 v3, v3, v3 :: v_dual_max_f32 v2, v2, v2
+; GCN-NEXT:    v_cmp_nge_f32_e32 vcc_lo, v4, v8
+; GCN-NEXT:    v_dual_max_f32 v5, v5, v5 :: v_dual_min_f32 v2, v2, v3
+; GCN-NEXT:    v_max_f32_e32 v3, v6, v6
+; GCN-NEXT:    v_min3_f32 v0, v0, v1, v2
+; GCN-NEXT:    v_min3_f32 v0, v0, v5, v3
+; GCN-NEXT:    v_cmp_lt_f32_e64 s0, v0, v8
+; GCN-NEXT:    s_or_b32 s0, s0, vcc_lo
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp olt float %arg1, %C
   %cmp2 = fcmp olt float %arg2, %C
   %or1  = or i1 %cmp1, %cmp2
@@ -2340,25 +2514,46 @@ define i1 @test112(float %arg1, float %arg2, float %arg3, float %arg4, float %ar
   ret i1 %or6
 }
 
+define i1 @test112_nnan(float %arg1, float %arg2, float %arg3, float %arg4, float %arg5, float %arg6, float %arg7, float %arg8, float %C) {
+; GCN-LABEL: test112_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_min_f32_e32 v2, v2, v3
+; GCN-NEXT:    v_min3_f32 v0, v0, v1, v2
+; GCN-NEXT:    v_min_f32_e32 v0, v0, v4
+; GCN-NEXT:    v_min3_f32 v0, v5, v6, v0
+; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v8
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %cmp1 = fcmp nnan olt float %arg1, %C
+  %cmp2 = fcmp nnan olt float %arg2, %C
+  %or1  = or i1 %cmp1, %cmp2
+  %cmp3 = fcmp nnan olt float %arg3, %C
+  %cmp4 = fcmp nnan olt float %arg4, %C
+  %or2  = or i1 %cmp3, %cmp4
+  %cmp5 = fcmp nnan ult float %arg5, %C
+  %or3 = or i1 %or1, %or2
+  %or4 = or i1 %or3, %cmp5
+  %cmp6 = fcmp nnan olt float %arg6, %C
+  %cmp7 = fcmp nnan olt float %arg7, %C
+  %or5 = or i1 %cmp6, %cmp7
+  %cmp8 = fcmp nnan ult float %arg8, %C
+  %or6 = or i1 %or5, %or4
+  %or7 = or i1 %or6, %cmp8
+  ret i1 %or6
+}
+
 define i1 @test113(float %arg1, float %arg2, float %arg3, float %C) {
-; GFX11-LABEL: test113:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_dual_max_f32 v1, v1, v1 :: v_dual_max_f32 v0, v0, v0
-; GFX11-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v2, v3
-; GFX11-NEXT:    v_max_f32_e32 v0, v0, v1
-; GFX11-NEXT:    v_cmp_nge_f32_e64 s0, v0, v3
-; GFX11-NEXT:    s_or_b32 s0, s0, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test113:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_maxmin_f32 v0, v0, v1, v2
-; GFX11NONANS-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v3
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: test113:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_dual_max_f32 v1, v1, v1 :: v_dual_max_f32 v0, v0, v0
+; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v2, v3
+; GCN-NEXT:    v_max_f32_e32 v0, v0, v1
+; GCN-NEXT:    v_cmp_nge_f32_e64 s0, v0, v3
+; GCN-NEXT:    s_or_b32 s0, s0, vcc_lo
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp ult float %arg1, %C
   %cmp2 = fcmp ult float %arg2, %C
   %cmp3 = fcmp olt float %arg3, %C
@@ -2367,27 +2562,33 @@ define i1 @test113(float %arg1, float %arg2, float %arg3, float %C) {
   ret i1 %or1
 }
 
+define i1 @test113_nnan(float %arg1, float %arg2, float %arg3, float %C) {
+; GCN-LABEL: test113_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_maxmin_f32 v0, v0, v1, v2
+; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v3
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %cmp1 = fcmp nnan ult float %arg1, %C
+  %cmp2 = fcmp nnan ult float %arg2, %C
+  %cmp3 = fcmp nnan olt float %arg3, %C
+  %and1 = and i1 %cmp1, %cmp2
+  %or1 = or i1 %and1, %cmp3
+  ret i1 %or1
+}
+
 define i1 @test114(float %arg1, float %arg2, float %arg3, float %C) {
-; GFX11-LABEL: test114:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_dual_max_f32 v1, v1, v1 :: v_dual_max_f32 v0, v0, v0
-; GFX11-NEXT:    v_cmp_nge_f32_e32 vcc_lo, v2, v3
-; GFX11-NEXT:    v_max_f32_e32 v0, v0, v1
-; GFX11-NEXT:    v_cmp_gt_f32_e64 s0, v0, v3
-; GFX11-NEXT:    s_and_b32 s0, s0, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test114:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_max_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v2, v3
-; GFX11NONANS-NEXT:    v_cmp_gt_f32_e64 s0, v0, v3
-; GFX11NONANS-NEXT:    s_and_b32 s0, s0, vcc_lo
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: test114:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_dual_max_f32 v1, v1, v1 :: v_dual_max_f32 v0, v0, v0
+; GCN-NEXT:    v_cmp_nge_f32_e32 vcc_lo, v2, v3
+; GCN-NEXT:    v_max_f32_e32 v0, v0, v1
+; GCN-NEXT:    v_cmp_gt_f32_e64 s0, v0, v3
+; GCN-NEXT:    s_and_b32 s0, s0, vcc_lo
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp ogt float %arg1, %C
   %cmp2 = fcmp ogt float %arg2, %C
   %cmp3 = fcmp ult float %arg3, %C
@@ -2396,27 +2597,36 @@ define i1 @test114(float %arg1, float %arg2, float %arg3, float %C) {
   ret i1 %or1
 }
 
+define i1 @test114_nnan(float %arg1, float %arg2, float %arg3, float %C) {
+; GCN-LABEL: test114_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_max_f32_e32 v0, v0, v1
+; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v2, v3
+; GCN-NEXT:    v_cmp_gt_f32_e64 s0, v0, v3
+; GCN-NEXT:    s_and_b32 s0, s0, vcc_lo
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %cmp1 = fcmp nnan ogt float %arg1, %C
+  %cmp2 = fcmp nnan ogt float %arg2, %C
+  %cmp3 = fcmp nnan ult float %arg3, %C
+  %and1 = or i1 %cmp1, %cmp2
+  %or1 = and i1 %and1, %cmp3
+  ret i1 %or1
+}
+
 define i1 @test115(float %arg1, float %arg2, float %arg3, float %arg4, float %C) {
-; GFX11-LABEL: test115:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_dual_max_f32 v2, v2, v2 :: v_dual_max_f32 v1, v1, v1
-; GFX11-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v3, v3, v3
-; GFX11-NEXT:    v_dual_min_f32 v0, v0, v1 :: v_dual_max_f32 v1, v2, v3
-; GFX11-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v4
-; GFX11-NEXT:    v_cmp_nge_f32_e64 s0, v1, v4
-; GFX11-NEXT:    s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test115:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_max_f32_e32 v2, v2, v3
-; GFX11NONANS-NEXT:    v_min3_f32 v0, v0, v1, v2
-; GFX11NONANS-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v4
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: test115:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_dual_max_f32 v2, v2, v2 :: v_dual_max_f32 v1, v1, v1
+; GCN-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v3, v3, v3
+; GCN-NEXT:    v_dual_min_f32 v0, v0, v1 :: v_dual_max_f32 v1, v2, v3
+; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v4
+; GCN-NEXT:    v_cmp_nge_f32_e64 s0, v1, v4
+; GCN-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp olt float %arg1, %C
   %cmp2 = fcmp olt float %arg2, %C
   %var3 = call float @llvm.canonicalize.f32(float %arg3)
@@ -2429,49 +2639,53 @@ define i1 @test115(float %arg1, float %arg2, float %arg3, float %arg4, float %C)
   ret i1 %or2
 }
 
-define i1 @test116(float %arg1, float %arg2, float %arg3, float %arg4, float %arg5, float %arg6, float %arg7, float %arg8, float %arg9, float %arg10, float %C) {
-; GFX11-LABEL: test116:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_dual_max_f32 v9, v9, v9 :: v_dual_max_f32 v8, v8, v8
-; GFX11-NEXT:    v_dual_max_f32 v1, v1, v1 :: v_dual_max_f32 v0, v0, v0
-; GFX11-NEXT:    v_dual_max_f32 v3, v3, v3 :: v_dual_max_f32 v2, v2, v2
-; GFX11-NEXT:    v_dual_max_f32 v5, v5, v5 :: v_dual_max_f32 v4, v4, v4
-; GFX11-NEXT:    v_dual_max_f32 v7, v7, v7 :: v_dual_max_f32 v6, v6, v6
-; GFX11-NEXT:    v_min_f32_e32 v8, v8, v9
-; GFX11-NEXT:    v_dual_max_f32 v2, v2, v3 :: v_dual_min_f32 v3, v4, v5
-; GFX11-NEXT:    v_max_f32_e32 v4, v6, v7
-; GFX11-NEXT:    v_min3_f32 v0, v0, v1, v8
-; GFX11-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v2, v10
-; GFX11-NEXT:    v_cmp_lt_f32_e64 s0, v3, v10
-; GFX11-NEXT:    v_cmp_gt_f32_e64 s1, v4, v10
-; GFX11-NEXT:    v_cmp_lt_f32_e64 s2, v0, v10
-; GFX11-NEXT:    s_or_b32 s0, s0, s1
-; GFX11-NEXT:    s_or_b32 s1, s2, vcc_lo
-; GFX11-NEXT:    s_or_b32 s0, s0, s1
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test116:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_min_f32_e32 v8, v8, v9
-; GFX11NONANS-NEXT:    v_dual_max_f32 v2, v2, v3 :: v_dual_min_f32 v3, v4, v5
-; GFX11NONANS-NEXT:    v_max_f32_e32 v4, v6, v7
-; GFX11NONANS-NEXT:    v_min3_f32 v0, v0, v1, v8
-; GFX11NONANS-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v2, v10
-; GFX11NONANS-NEXT:    v_cmp_lt_f32_e64 s0, v3, v10
-; GFX11NONANS-NEXT:    v_cmp_gt_f32_e64 s1, v4, v10
-; GFX11NONANS-NEXT:    v_cmp_lt_f32_e64 s2, v0, v10
-; GFX11NONANS-NEXT:    s_or_b32 s0, s0, s1
-; GFX11NONANS-NEXT:    s_or_b32 s1, s2, vcc_lo
-; GFX11NONANS-NEXT:    s_or_b32 s0, s0, s1
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
-  %cmp1 = fcmp olt float %arg1, %C
-  %cmp2 = fcmp olt float %arg2, %C
-  %cmp3 = fcmp ogt float %arg3, %C
-  %cmp4 = fcmp ogt float %arg4, %C
+define i1 @test115_nnan(float %arg1, float %arg2, float %arg3, float %arg4, float %C) {
+; GCN-LABEL: test115_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_max_f32_e32 v2, v2, v3
+; GCN-NEXT:    v_min3_f32 v0, v0, v1, v2
+; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v4
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %cmp1 = fcmp nnan olt float %arg1, %C
+  %cmp2 = fcmp nnan olt float %arg2, %C
+  %var3 = call nnan float @llvm.canonicalize.f32(float %arg3)
+  %var4 = call nnan float @llvm.canonicalize.f32(float %arg4)
+  %cmp3 = fcmp nnan ult float %var3, %C
+  %cmp4 = fcmp nnan ult float %var4, %C
+  %or1 = or i1 %cmp1, %cmp2
+  %and1 = and i1 %cmp3, %cmp4
+  %or2 = or i1 %or1, %and1
+  ret i1 %or2
+}
+
+define i1 @test116(float %arg1, float %arg2, float %arg3, float %arg4, float %arg5, float %arg6, float %arg7, float %arg8, float %arg9, float %arg10, float %C) {
+; GCN-LABEL: test116:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_dual_max_f32 v9, v9, v9 :: v_dual_max_f32 v8, v8, v8
+; GCN-NEXT:    v_dual_max_f32 v1, v1, v1 :: v_dual_max_f32 v0, v0, v0
+; GCN-NEXT:    v_dual_max_f32 v3, v3, v3 :: v_dual_max_f32 v2, v2, v2
+; GCN-NEXT:    v_dual_max_f32 v5, v5, v5 :: v_dual_max_f32 v4, v4, v4
+; GCN-NEXT:    v_dual_max_f32 v7, v7, v7 :: v_dual_max_f32 v6, v6, v6
+; GCN-NEXT:    v_min_f32_e32 v8, v8, v9
+; GCN-NEXT:    v_dual_max_f32 v2, v2, v3 :: v_dual_min_f32 v3, v4, v5
+; GCN-NEXT:    v_max_f32_e32 v4, v6, v7
+; GCN-NEXT:    v_min3_f32 v0, v0, v1, v8
+; GCN-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v2, v10
+; GCN-NEXT:    v_cmp_lt_f32_e64 s0, v3, v10
+; GCN-NEXT:    v_cmp_gt_f32_e64 s1, v4, v10
+; GCN-NEXT:    v_cmp_lt_f32_e64 s2, v0, v10
+; GCN-NEXT:    s_or_b32 s0, s0, s1
+; GCN-NEXT:    s_or_b32 s1, s2, vcc_lo
+; GCN-NEXT:    s_or_b32 s0, s0, s1
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %cmp1 = fcmp olt float %arg1, %C
+  %cmp2 = fcmp olt float %arg2, %C
+  %cmp3 = fcmp ogt float %arg3, %C
+  %cmp4 = fcmp ogt float %arg4, %C
   %cmp5 = fcmp olt float %arg5, %C
   %cmp6 = fcmp olt float %arg6, %C
   %cmp7 = fcmp ogt float %arg7, %C
@@ -2490,46 +2704,67 @@ define i1 @test116(float %arg1, float %arg2, float %arg3, float %arg4, float %ar
   ret i1 %or9
 }
 
+define i1 @test116_nnan(float %arg1, float %arg2, float %arg3, float %arg4, float %arg5, float %arg6, float %arg7, float %arg8, float %arg9, float %arg10, float %C) {
+; GCN-LABEL: test116_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_min_f32_e32 v8, v8, v9
+; GCN-NEXT:    v_dual_max_f32 v2, v2, v3 :: v_dual_min_f32 v3, v4, v5
+; GCN-NEXT:    v_max_f32_e32 v4, v6, v7
+; GCN-NEXT:    v_min3_f32 v0, v0, v1, v8
+; GCN-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v2, v10
+; GCN-NEXT:    v_cmp_lt_f32_e64 s0, v3, v10
+; GCN-NEXT:    v_cmp_gt_f32_e64 s1, v4, v10
+; GCN-NEXT:    v_cmp_lt_f32_e64 s2, v0, v10
+; GCN-NEXT:    s_or_b32 s0, s0, s1
+; GCN-NEXT:    s_or_b32 s1, s2, vcc_lo
+; GCN-NEXT:    s_or_b32 s0, s0, s1
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %cmp1 = fcmp nnan olt float %arg1, %C
+  %cmp2 = fcmp nnan olt float %arg2, %C
+  %cmp3 = fcmp nnan ogt float %arg3, %C
+  %cmp4 = fcmp nnan ogt float %arg4, %C
+  %cmp5 = fcmp nnan olt float %arg5, %C
+  %cmp6 = fcmp nnan olt float %arg6, %C
+  %cmp7 = fcmp nnan ogt float %arg7, %C
+  %cmp8 = fcmp nnan ogt float %arg8, %C
+  %cmp9 = fcmp nnan olt float %arg9, %C
+  %cmp10 = fcmp nnan olt float %arg10, %C
+  %or1 = or i1 %cmp1, %cmp2
+  %or2 = or i1 %cmp3, %cmp4
+  %or3 = or i1 %cmp5, %cmp6
+  %or4 = or i1 %cmp7, %cmp8
+  %or5 = or i1 %cmp9, %cmp10
+  %or6 = or i1 %or1, %or2
+  %or7 = or i1 %or3, %or4
+  %or8 = or i1 %or5, %or6
+  %or9 = or i1 %or7, %or8
+  ret i1 %or9
+}
+
 define i1 @test117(float %arg1, float %arg2, float %arg3, float %arg4, float %arg5, float %arg6, float %arg7, float %arg8, float %arg9, float %arg10, float %arg11, float %arg12, float %C1, float %C2) {
-; GFX11-LABEL: test117:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_dual_max_f32 v3, v3, v3 :: v_dual_max_f32 v6, v6, v6
-; GFX11-NEXT:    v_dual_max_f32 v7, v7, v7 :: v_dual_max_f32 v10, v10, v10
-; GFX11-NEXT:    v_dual_max_f32 v1, v1, v1 :: v_dual_max_f32 v0, v0, v0
-; GFX11-NEXT:    v_dual_max_f32 v11, v11, v11 :: v_dual_max_f32 v2, v2, v2
-; GFX11-NEXT:    v_min_f32_e32 v6, v6, v7
-; GFX11-NEXT:    v_dual_min_f32 v0, v0, v1 :: v_dual_min_f32 v1, v10, v11
-; GFX11-NEXT:    v_min_f32_e32 v2, v2, v3
-; GFX11-NEXT:    v_min3_f32 v3, v4, v5, v6
-; GFX11-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v12
-; GFX11-NEXT:    v_min3_f32 v0, v8, v9, v1
-; GFX11-NEXT:    v_cmp_lt_f32_e64 s0, v2, v13
-; GFX11-NEXT:    v_cmp_lt_f32_e64 s1, v3, v13
-; GFX11-NEXT:    v_cmp_lt_f32_e64 s2, v0, v12
-; GFX11-NEXT:    s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT:    s_or_b32 s0, s0, s1
-; GFX11-NEXT:    s_or_b32 s0, s2, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test117:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_min_f32_e32 v6, v6, v7
-; GFX11NONANS-NEXT:    v_dual_min_f32 v0, v0, v1 :: v_dual_min_f32 v1, v10, v11
-; GFX11NONANS-NEXT:    v_min_f32_e32 v2, v2, v3
-; GFX11NONANS-NEXT:    v_min3_f32 v3, v4, v5, v6
-; GFX11NONANS-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v12
-; GFX11NONANS-NEXT:    v_min3_f32 v0, v8, v9, v1
-; GFX11NONANS-NEXT:    v_cmp_lt_f32_e64 s0, v2, v13
-; GFX11NONANS-NEXT:    v_cmp_lt_f32_e64 s1, v3, v13
-; GFX11NONANS-NEXT:    v_cmp_lt_f32_e64 s2, v0, v12
-; GFX11NONANS-NEXT:    s_or_b32 s0, vcc_lo, s0
-; GFX11NONANS-NEXT:    s_or_b32 s0, s0, s1
-; GFX11NONANS-NEXT:    s_or_b32 s0, s2, s0
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: test117:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_dual_max_f32 v3, v3, v3 :: v_dual_max_f32 v6, v6, v6
+; GCN-NEXT:    v_dual_max_f32 v7, v7, v7 :: v_dual_max_f32 v10, v10, v10
+; GCN-NEXT:    v_dual_max_f32 v1, v1, v1 :: v_dual_max_f32 v0, v0, v0
+; GCN-NEXT:    v_dual_max_f32 v11, v11, v11 :: v_dual_max_f32 v2, v2, v2
+; GCN-NEXT:    v_min_f32_e32 v6, v6, v7
+; GCN-NEXT:    v_dual_min_f32 v0, v0, v1 :: v_dual_min_f32 v1, v10, v11
+; GCN-NEXT:    v_min_f32_e32 v2, v2, v3
+; GCN-NEXT:    v_min3_f32 v3, v4, v5, v6
+; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v12
+; GCN-NEXT:    v_min3_f32 v0, v8, v9, v1
+; GCN-NEXT:    v_cmp_lt_f32_e64 s0, v2, v13
+; GCN-NEXT:    v_cmp_lt_f32_e64 s1, v3, v13
+; GCN-NEXT:    v_cmp_lt_f32_e64 s2, v0, v12
+; GCN-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT:    s_or_b32 s0, s0, s1
+; GCN-NEXT:    s_or_b32 s0, s2, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp olt float %arg1, %C1
   %cmp2 = fcmp olt float %arg2, %C1
   %cmp3 = fcmp olt float %arg3, %C2
@@ -2556,6 +2791,50 @@ define i1 @test117(float %arg1, float %arg2, float %arg3, float %arg4, float %ar
   ret i1 %or11
 }
 
+define i1 @test117_nnan(float %arg1, float %arg2, float %arg3, float %arg4, float %arg5, float %arg6, float %arg7, float %arg8, float %arg9, float %arg10, float %arg11, float %arg12, float %C1, float %C2) {
+; GCN-LABEL: test117_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_min_f32_e32 v6, v6, v7
+; GCN-NEXT:    v_dual_min_f32 v0, v0, v1 :: v_dual_min_f32 v1, v10, v11
+; GCN-NEXT:    v_min_f32_e32 v2, v2, v3
+; GCN-NEXT:    v_min3_f32 v3, v4, v5, v6
+; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v12
+; GCN-NEXT:    v_min3_f32 v0, v8, v9, v1
+; GCN-NEXT:    v_cmp_lt_f32_e64 s0, v2, v13
+; GCN-NEXT:    v_cmp_lt_f32_e64 s1, v3, v13
+; GCN-NEXT:    v_cmp_lt_f32_e64 s2, v0, v12
+; GCN-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT:    s_or_b32 s0, s0, s1
+; GCN-NEXT:    s_or_b32 s0, s2, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %cmp1 = fcmp nnan olt float %arg1, %C1
+  %cmp2 = fcmp nnan olt float %arg2, %C1
+  %cmp3 = fcmp nnan olt float %arg3, %C2
+  %cmp4 = fcmp nnan olt float %arg4, %C2
+  %cmp5 = fcmp nnan olt float %arg5, %C2
+  %cmp6 = fcmp nnan olt float %arg6, %C2
+  %cmp7 = fcmp nnan olt float %arg7, %C2
+  %cmp8 = fcmp nnan olt float %arg8, %C2
+  %cmp9 = fcmp nnan olt float %arg9, %C1
+  %cmp10 = fcmp nnan olt float %arg10, %C1
+  %cmp11 = fcmp nnan olt float %arg11, %C1
+  %cmp12 = fcmp nnan olt float %arg12, %C1
+  %or1 = or i1 %cmp1, %cmp2
+  %or2 = or i1 %cmp3, %cmp4
+  %or3 = or i1 %cmp5, %cmp6
+  %or4 = or i1 %cmp7, %cmp8
+  %or5 = or i1 %cmp9, %cmp10
+  %or6 = or i1 %cmp11, %cmp12
+  %or7 = or i1 %or1, %or2
+  %or8 = or i1 %or3, %or4
+  %or9 = or i1 %or5, %or6
+  %or10 = or i1 %or7, %or8
+  %or11 = or i1 %or9, %or10
+  ret i1 %or11
+}
+
 
 define i1 @test118(float %arg1, float %arg2, float %arg3, float %arg4, float %C1, float %C2, float %C3, float %C4, float %C) #0 {
 ; GCN-LABEL: test118:
@@ -2665,8 +2944,8 @@ define i1 @test122(double %arg1, double %arg2, double %arg3) #1 {
 ; GCN-NEXT:    v_cmp_lt_f64_e32 vcc_lo, v[0:1], v[4:5]
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
-  %cmp1 = fcmp ult double %arg1, %arg3
-  %cmp2 = fcmp ult double %arg2, %arg3
+  %cmp1 = fcmp nnan ult double %arg1, %arg3
+  %cmp2 = fcmp nnan ult double %arg2, %arg3
   %or1 = or i1 %cmp1, %cmp2
   ret i1 %or1
 }
@@ -2681,10 +2960,10 @@ define i1 @test123(double %arg1, double %arg2, double %arg3) #1 {
 ; GCN-NEXT:    v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
-  %var1 = call double @llvm.canonicalize.f64(double %arg1)
-  %var2 = call double @llvm.canonicalize.f64(double %arg2)
-  %cmp1 = fcmp ogt double %var1, %arg3
-  %cmp2 = fcmp ogt double %var2, %arg3
+  %var1 = call nnan double @llvm.canonicalize.f64(double %arg1)
+  %var2 = call nnan double @llvm.canonicalize.f64(double %arg2)
+  %cmp1 = fcmp nnan ogt double %var1, %arg3
+  %cmp2 = fcmp nnan ogt double %var2, %arg3
   %or1 = and i1 %cmp1, %cmp2
  ret i1 %or1
 }
@@ -2814,24 +3093,30 @@ define i1 @test131(i16 %arg1, i32 %arg2) {
 ; GFX11-FAKE16-NEXT:    s_or_b32 s0, vcc_lo, s0
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
+  %cmp1 = icmp ult i16 %arg1, 10
+  %cmp2 = icmp ult i32 %arg2, 10
+  %or = or i1 %cmp1, %cmp2
+  ret i1 %or
+}
+
+define i1 @test131_nnan(i16 %arg1, i32 %arg2) {
+; GFX11-TRUE16-LABEL: test131_nnan:
+; GFX11-TRUE16:       ; %bb.0:
+; GFX11-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT:    v_cmp_gt_u32_e32 vcc_lo, 10, v1
+; GFX11-TRUE16-NEXT:    v_cmp_gt_u16_e64 s0, 10, v0.l
+; GFX11-TRUE16-NEXT:    s_or_b32 s0, s0, vcc_lo
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
 ;
-; GCN-TRUE16-LABEL: test131:
-; GCN-TRUE16:       ; %bb.0:
-; GCN-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-TRUE16-NEXT:    v_cmp_gt_u32_e32 vcc_lo, 10, v1
-; GCN-TRUE16-NEXT:    v_cmp_gt_u16_e64 s0, 10, v0.l
-; GCN-TRUE16-NEXT:    s_or_b32 s0, s0, vcc_lo
-; GCN-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
-; GCN-TRUE16-NEXT:    s_setpc_b64 s[30:31]
-;
-; GCN-FAKE16-LABEL: test131:
-; GCN-FAKE16:       ; %bb.0:
-; GCN-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-FAKE16-NEXT:    v_cmp_gt_u16_e32 vcc_lo, 10, v0
-; GCN-FAKE16-NEXT:    v_cmp_gt_u32_e64 s0, 10, v1
-; GCN-FAKE16-NEXT:    s_or_b32 s0, vcc_lo, s0
-; GCN-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
-; GCN-FAKE16-NEXT:    s_setpc_b64 s[30:31]
+; GFX11-FAKE16-LABEL: test131_nnan:
+; GFX11-FAKE16:       ; %bb.0:
+; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT:    v_cmp_gt_u16_e32 vcc_lo, 10, v0
+; GFX11-FAKE16-NEXT:    v_cmp_gt_u32_e64 s0, 10, v1
+; GFX11-FAKE16-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp ult i16 %arg1, 10
   %cmp2 = icmp ult i32 %arg2, 10
   %or = or i1 %cmp1, %cmp2
@@ -2875,72 +3160,74 @@ define i1 @test133(i32 %arg1, i32 %arg2) {
 }
 
 define i1 @test134(float %arg1, float %arg2, float %arg3) #0 {
-; GFX11-LABEL: test134:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT:    v_cmp_gt_f32_e64 s0, v2, v1
-; GFX11-NEXT:    s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test134:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_max_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: test134:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT:    v_cmp_gt_f32_e64 s0, v2, v1
+; GCN-NEXT:    s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp olt float %arg1, %arg3
   %cmp2 = fcmp ogt float %arg3, %arg2
   %and1  = and i1 %cmp1, %cmp2
   ret i1 %and1
 }
 
+define i1 @test134_nnan(float %arg1, float %arg2, float %arg3) #0 {
+; GCN-LABEL: test134_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_max_f32_e32 v0, v0, v1
+; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %cmp1 = fcmp nnan olt float %arg1, %arg3
+  %cmp2 = fcmp nnan ogt float %arg3, %arg2
+  %and1  = and i1 %cmp1, %cmp2
+  ret i1 %and1
+}
+
 define i1 @test135(float %arg1, float %arg2, float %arg3) #0 {
-; GFX11-LABEL: test135:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_cmp_nge_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT:    v_cmp_nle_f32_e64 s0, v2, v1
-; GFX11-NEXT:    s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test135:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_min_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: test135:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_cmp_nge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT:    v_cmp_nle_f32_e64 s0, v2, v1
+; GCN-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp ult float %arg1, %arg3
   %cmp2 = fcmp ugt float %arg3, %arg2
   %or1  = or i1 %cmp1, %cmp2
   ret i1 %or1
 }
 
+define i1 @test135_nnan(float %arg1, float %arg2, float %arg3) #0 {
+; GCN-LABEL: test135_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_min_f32_e32 v0, v0, v1
+; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %cmp1 = fcmp nnan ult float %arg1, %arg3
+  %cmp2 = fcmp nnan ugt float %arg3, %arg2
+  %or1  = or i1 %cmp1, %cmp2
+  ret i1 %or1
+}
+
 define i1 @test136(double %arg1, double %arg2, double %arg3) {
-; GFX11-LABEL: test136:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11-NEXT:    v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11-NEXT:    v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT:    v_cmp_ge_f64_e64 s0, v[4:5], v[2:3]
-; GFX11-NEXT:    s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test136:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11NONANS-NEXT:    v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11NONANS-NEXT:    v_max_f64 v[0:1], v[0:1], v[2:3]
-; GFX11NONANS-NEXT:    v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: test136:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT:    v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT:    v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT:    v_cmp_ge_f64_e64 s0, v[4:5], v[2:3]
+; GCN-NEXT:    s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %var1 = call double @llvm.canonicalize.f64(double %arg1)
   %var2 = call double @llvm.canonicalize.f64(double %arg2)
   %cmp1 = fcmp ole double %var1, %arg3
@@ -2949,24 +3236,34 @@ define i1 @test136(double %arg1, double %arg2, double %arg3) {
   ret i1 %and1
 }
 
+define i1 @test136_nnan(double %arg1, double %arg2, double %arg3) {
+; GCN-LABEL: test136_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT:    v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT:    v_max_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT:    v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %var1 = call nnan double @llvm.canonicalize.f64(double %arg1)
+  %var2 = call nnan double @llvm.canonicalize.f64(double %arg2)
+  %cmp1 = fcmp nnan ole double %var1, %arg3
+  %cmp2 = fcmp nnan oge double %arg3, %var2
+  %and1  = and i1 %cmp1, %cmp2
+  ret i1 %and1
+}
+
 define i1 @test137(float %arg1, float %arg2, float %arg3) {
-; GFX11-LABEL: test137:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
-; GFX11-NEXT:    v_cmp_ngt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT:    v_cmp_nlt_f32_e64 s0, v2, v1
-; GFX11-NEXT:    s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test137:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_min_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT:    v_cmp_le_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: test137:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GCN-NEXT:    v_cmp_ngt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT:    v_cmp_nlt_f32_e64 s0, v2, v1
+; GCN-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %var1 = call float @llvm.canonicalize.f32(float %arg1)
   %var2 = call float @llvm.canonicalize.f32(float %arg2)
   %cmp1 = fcmp ule float %var1, %arg3
@@ -2975,208 +3272,264 @@ define i1 @test137(float %arg1, float %arg2, float %arg3) {
   ret i1 %or1
 }
 
+define i1 @test137_nnan(float %arg1, float %arg2, float %arg3) {
+; GCN-LABEL: test137_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_min_f32_e32 v0, v0, v1
+; GCN-NEXT:    v_cmp_le_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %var1 = call nnan float @llvm.canonicalize.f32(float %arg1)
+  %var2 = call nnan float @llvm.canonicalize.f32(float %arg2)
+  %cmp1 = fcmp nnan ule float %var1, %arg3
+  %cmp2 = fcmp nnan uge float %arg3, %var2
+  %or1  = or i1 %cmp1, %cmp2
+  ret i1 %or1
+}
+
 define i1 @test138(float %arg1, float %arg2, float %arg3) #0 {
-; GFX11-LABEL: test138:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT:    v_cmp_lt_f32_e64 s0, v1, v2
-; GFX11-NEXT:    s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test138:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_max_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: test138:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT:    v_cmp_lt_f32_e64 s0, v1, v2
+; GCN-NEXT:    s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp olt float %arg1, %arg3
   %cmp2 = fcmp olt float %arg2, %arg3
   %and1  = and i1 %cmp1, %cmp2
   ret i1 %and1
 }
 
+define i1 @test138_nnan(float %arg1, float %arg2, float %arg3) #0 {
+; GCN-LABEL: test138_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_max_f32_e32 v0, v0, v1
+; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %cmp1 = fcmp nnan olt float %arg1, %arg3
+  %cmp2 = fcmp nnan olt float %arg2, %arg3
+  %and1  = and i1 %cmp1, %cmp2
+  ret i1 %and1
+}
+
 define i1 @test139(double %arg1, double %arg2, double %arg3) #0 {
-; GFX11-LABEL: test139:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT:    v_cmp_le_f64_e64 s0, v[2:3], v[4:5]
-; GFX11-NEXT:    s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test139:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_max_f64 v[0:1], v[0:1], v[2:3]
-; GFX11NONANS-NEXT:    v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: test139:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT:    v_cmp_le_f64_e64 s0, v[2:3], v[4:5]
+; GCN-NEXT:    s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp ole double %arg1, %arg3
   %cmp2 = fcmp ole double %arg2, %arg3
   %and1  = and i1 %cmp1, %cmp2
   ret i1 %and1
 }
 
+define i1 @test139_nnan(double %arg1, double %arg2, double %arg3) #0 {
+; GCN-LABEL: test139_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_max_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT:    v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %cmp1 = fcmp nnan ole double %arg1, %arg3
+  %cmp2 = fcmp nnan ole double %arg2, %arg3
+  %and1  = and i1 %cmp1, %cmp2
+  ret i1 %and1
+}
+
 define i1 @test140(double %arg1, double %arg2, double %arg3) #0 {
-; GFX11-LABEL: test140:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT:    v_cmp_gt_f64_e64 s0, v[2:3], v[4:5]
-; GFX11-NEXT:    s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test140:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_min_f64 v[0:1], v[0:1], v[2:3]
-; GFX11NONANS-NEXT:    v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: test140:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT:    v_cmp_gt_f64_e64 s0, v[2:3], v[4:5]
+; GCN-NEXT:    s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp ogt double %arg1, %arg3
   %cmp2 = fcmp ogt double %arg2, %arg3
   %and1  = and i1 %cmp1, %cmp2
   ret i1 %and1
 }
 
+define i1 @test140_nnan(double %arg1, double %arg2, double %arg3) #0 {
+; GCN-LABEL: test140_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT:    v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %cmp1 = fcmp nnan ogt double %arg1, %arg3
+  %cmp2 = fcmp nnan ogt double %arg2, %arg3
+  %and1  = and i1 %cmp1, %cmp2
+  ret i1 %and1
+}
+
 define i1 @test141(float %arg1, float %arg2, float %arg3) #0 {
-; GFX11-LABEL: test141:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_cmp_ge_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT:    v_cmp_ge_f32_e64 s0, v1, v2
-; GFX11-NEXT:    s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test141:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_min_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT:    v_cmp_ge_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: test141:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT:    v_cmp_ge_f32_e64 s0, v1, v2
+; GCN-NEXT:    s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp oge float %arg1, %arg3
   %cmp2 = fcmp oge float %arg2, %arg3
   %and1  = and i1 %cmp1, %cmp2
   ret i1 %and1
 }
 
+define i1 @test141_nnan(float %arg1, float %arg2, float %arg3) #0 {
+; GCN-LABEL: test141_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_min_f32_e32 v0, v0, v1
+; GCN-NEXT:    v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %cmp1 = fcmp nnan oge float %arg1, %arg3
+  %cmp2 = fcmp nnan oge float %arg2, %arg3
+  %and1  = and i1 %cmp1, %cmp2
+  ret i1 %and1
+}
+
 define i1 @test142(double %arg1, double %arg2, double %arg3) #0 {
-; GFX11-LABEL: test142:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_cmp_nle_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT:    v_cmp_nle_f64_e64 s0, v[2:3], v[4:5]
-; GFX11-NEXT:    s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test142:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_max_f64 v[0:1], v[0:1], v[2:3]
-; GFX11NONANS-NEXT:    v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: test142:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_cmp_nle_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT:    v_cmp_nle_f64_e64 s0, v[2:3], v[4:5]
+; GCN-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp ugt double %arg1, %arg3
   %cmp2 = fcmp ugt double %arg2, %arg3
   %or1  = or i1 %cmp1, %cmp2
   ret i1 %or1
 }
 
+define i1 @test142_nnan(double %arg1, double %arg2, double %arg3) #0 {
+; GCN-LABEL: test142_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_max_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT:    v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %cmp1 = fcmp nnan ugt double %arg1, %arg3
+  %cmp2 = fcmp nnan ugt double %arg2, %arg3
+  %or1  = or i1 %cmp1, %cmp2
+  ret i1 %or1
+}
+
 define i1 @test143(float %arg1, float %arg2, float %arg3) #0 {
-; GFX11-LABEL: test143:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_cmp_nlt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT:    v_cmp_nlt_f32_e64 s0, v1, v2
-; GFX11-NEXT:    s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test143:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_max_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT:    v_cmp_ge_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: test143:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_cmp_nlt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT:    v_cmp_nlt_f32_e64 s0, v1, v2
+; GCN-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp uge float %arg1, %arg3
   %cmp2 = fcmp uge float %arg2, %arg3
   %or1  = or i1 %cmp1, %cmp2
   ret i1 %or1
 }
 
+define i1 @test143_nnan(float %arg1, float %arg2, float %arg3) #0 {
+; GCN-LABEL: test143_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_max_f32_e32 v0, v0, v1
+; GCN-NEXT:    v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %cmp1 = fcmp nnan uge float %arg1, %arg3
+  %cmp2 = fcmp nnan uge float %arg2, %arg3
+  %or1  = or i1 %cmp1, %cmp2
+  ret i1 %or1
+}
+
 define i1 @test144(float %arg1, float %arg2, float %arg3) #0 {
-; GFX11-LABEL: test144:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_cmp_ngt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT:    v_cmp_ngt_f32_e64 s0, v1, v2
-; GFX11-NEXT:    s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test144:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_min_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT:    v_cmp_le_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: test144:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_cmp_ngt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT:    v_cmp_ngt_f32_e64 s0, v1, v2
+; GCN-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp ule float %arg1, %arg3
   %cmp2 = fcmp ule float %arg2, %arg3
   %or1  = or i1 %cmp1, %cmp2
   ret i1 %or1
 }
 
+define i1 @test144_nnan(float %arg1, float %arg2, float %arg3) #0 {
+; GCN-LABEL: test144_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_min_f32_e32 v0, v0, v1
+; GCN-NEXT:    v_cmp_le_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %cmp1 = fcmp nnan ule float %arg1, %arg3
+  %cmp2 = fcmp nnan ule float %arg2, %arg3
+  %or1  = or i1 %cmp1, %cmp2
+  ret i1 %or1
+}
+
 define i1 @test145(double %arg1, double %arg2, double %arg3) #0 {
-; GFX11-LABEL: test145:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_cmp_nge_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT:    v_cmp_nge_f64_e64 s0, v[2:3], v[4:5]
-; GFX11-NEXT:    s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test145:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_min_f64 v[0:1], v[0:1], v[2:3]
-; GFX11NONANS-NEXT:    v_cmp_lt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: test145:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_cmp_nge_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT:    v_cmp_nge_f64_e64 s0, v[2:3], v[4:5]
+; GCN-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp ult double %arg1, %arg3
   %cmp2 = fcmp ult double %arg2, %arg3
   %or1 = or i1 %cmp1, %cmp2
   ret i1 %or1
 }
 
+define i1 @test145_nnan(double %arg1, double %arg2, double %arg3) #0 {
+; GCN-LABEL: test145_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT:    v_cmp_lt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %cmp1 = fcmp nnan ult double %arg1, %arg3
+  %cmp2 = fcmp nnan ult double %arg2, %arg3
+  %or1 = or i1 %cmp1, %cmp2
+  ret i1 %or1
+}
+
 define i1 @test146(float %arg1, float %arg2, float %arg3) {
-; GFX11-LABEL: test146:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
-; GFX11-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT:    v_cmp_lt_f32_e64 s0, v1, v2
-; GFX11-NEXT:    s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test146:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_max_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: test146:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT:    v_cmp_lt_f32_e64 s0, v1, v2
+; GCN-NEXT:    s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %var1 = call float @llvm.canonicalize.f32(float %arg1)
   %var2 = call float @llvm.canonicalize.f32(float %arg2)
   %cmp1 = fcmp olt float %var1, %arg3
@@ -3185,27 +3538,33 @@ define i1 @test146(float %arg1, float %arg2, float %arg3) {
   ret i1 %and1
 }
 
+define i1 @test146_nnan(float %arg1, float %arg2, float %arg3) {
+; GCN-LABEL: test146_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_max_f32_e32 v0, v0, v1
+; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %var1 = call nnan float @llvm.canonicalize.f32(float %arg1)
+  %var2 = call nnan float @llvm.canonicalize.f32(float %arg2)
+  %cmp1 = fcmp nnan olt float %var1, %arg3
+  %cmp2 = fcmp nnan olt float %var2, %arg3
+  %and1  = and i1 %cmp1, %cmp2
+  ret i1 %and1
+}
+
 define i1 @test147(double %arg1, double %arg2, double %arg3) {
-; GFX11-LABEL: test147:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11-NEXT:    v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11-NEXT:    v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT:    v_cmp_le_f64_e64 s0, v[2:3], v[4:5]
-; GFX11-NEXT:    s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test147:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11NONANS-NEXT:    v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11NONANS-NEXT:    v_max_f64 v[0:1], v[0:1], v[2:3]
-; GFX11NONANS-NEXT:    v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: test147:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT:    v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT:    v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT:    v_cmp_le_f64_e64 s0, v[2:3], v[4:5]
+; GCN-NEXT:    s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %var1 = call double @llvm.canonicalize.f64(double %arg1)
   %var2 = call double @llvm.canonicalize.f64(double %arg2)
   %cmp1 = fcmp ole double %var1, %arg3
@@ -3214,27 +3573,35 @@ define i1 @test147(double %arg1, double %arg2, double %arg3) {
   ret i1 %and1
 }
 
+define i1 @test147_nnan(double %arg1, double %arg2, double %arg3) {
+; GCN-LABEL: test147_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT:    v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT:    v_max_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT:    v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %var1 = call nnan double @llvm.canonicalize.f64(double %arg1)
+  %var2 = call nnan double @llvm.canonicalize.f64(double %arg2)
+  %cmp1 = fcmp nnan ole double %var1, %arg3
+  %cmp2 = fcmp nnan ole double %var2, %arg3
+  %and1  = and i1 %cmp1, %cmp2
+  ret i1 %and1
+}
+
 define i1 @test148(double %arg1, double %arg2, double %arg3) {
-; GFX11-LABEL: test148:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11-NEXT:    v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11-NEXT:    v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT:    v_cmp_gt_f64_e64 s0, v[2:3], v[4:5]
-; GFX11-NEXT:    s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test148:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11NONANS-NEXT:    v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11NONANS-NEXT:    v_min_f64 v[0:1], v[0:1], v[2:3]
-; GFX11NONANS-NEXT:    v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: test148:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT:    v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT:    v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT:    v_cmp_gt_f64_e64 s0, v[2:3], v[4:5]
+; GCN-NEXT:    s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %var1 = call double @llvm.canonicalize.f64(double %arg1)
   %var2 = call double @llvm.canonicalize.f64(double %arg2)
   %cmp1 = fcmp ogt double %var1, %arg3
@@ -3243,24 +3610,34 @@ define i1 @test148(double %arg1, double %arg2, double %arg3) {
   ret i1 %and1
 }
 
+define i1 @test148_nnan(double %arg1, double %arg2, double %arg3) {
+; GCN-LABEL: test148_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT:    v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT:    v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT:    v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %var1 = call nnan double @llvm.canonicalize.f64(double %arg1)
+  %var2 = call nnan double @llvm.canonicalize.f64(double %arg2)
+  %cmp1 = fcmp nnan ogt double %var1, %arg3
+  %cmp2 = fcmp nnan ogt double %var2, %arg3
+  %and1  = and i1 %cmp1, %cmp2
+  ret i1 %and1
+}
+
 define i1 @test149(float %arg1, float %arg2, float %arg3) {
-; GFX11-LABEL: test149:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
-; GFX11-NEXT:    v_cmp_ge_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT:    v_cmp_ge_f32_e64 s0, v1, v2
-; GFX11-NEXT:    s_and_b32 s0, vcc_lo, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test149:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_min_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT:    v_cmp_ge_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: test149:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GCN-NEXT:    v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT:    v_cmp_ge_f32_e64 s0, v1, v2
+; GCN-NEXT:    s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %var1 = call float @llvm.canonicalize.f32(float %arg1)
   %var2 = call float @llvm.canonicalize.f32(float %arg2)
   %cmp1 = fcmp oge float %var1, %arg3
@@ -3269,27 +3646,33 @@ define i1 @test149(float %arg1, float %arg2, float %arg3) {
   ret i1 %and1
 }
 
+define i1 @test149_nnan(float %arg1, float %arg2, float %arg3) {
+; GCN-LABEL: test149_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_min_f32_e32 v0, v0, v1
+; GCN-NEXT:    v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %var1 = call nnan float @llvm.canonicalize.f32(float %arg1)
+  %var2 = call nnan float @llvm.canonicalize.f32(float %arg2)
+  %cmp1 = fcmp nnan oge float %var1, %arg3
+  %cmp2 = fcmp nnan oge float %var2, %arg3
+  %and1  = and i1 %cmp1, %cmp2
+  ret i1 %and1
+}
+
 define i1 @test150(double %arg1, double %arg2, double %arg3) {
-; GFX11-LABEL: test150:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11-NEXT:    v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11-NEXT:    v_cmp_nle_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT:    v_cmp_nle_f64_e64 s0, v[2:3], v[4:5]
-; GFX11-NEXT:    s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test150:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11NONANS-NEXT:    v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11NONANS-NEXT:    v_max_f64 v[0:1], v[0:1], v[2:3]
-; GFX11NONANS-NEXT:    v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: test150:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT:    v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT:    v_cmp_nle_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT:    v_cmp_nle_f64_e64 s0, v[2:3], v[4:5]
+; GCN-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %var1 = call double @llvm.canonicalize.f64(double %arg1)
   %var2 = call double @llvm.canonicalize.f64(double %arg2)
   %cmp1 = fcmp ugt double %var1, %arg3
@@ -3298,24 +3681,34 @@ define i1 @test150(double %arg1, double %arg2, double %arg3) {
   ret i1 %or1
 }
 
+define i1 @test150_nnan(double %arg1, double %arg2, double %arg3) {
+; GCN-LABEL: test150_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT:    v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT:    v_max_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT:    v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %var1 = call nnan double @llvm.canonicalize.f64(double %arg1)
+  %var2 = call nnan double @llvm.canonicalize.f64(double %arg2)
+  %cmp1 = fcmp nnan ugt double %var1, %arg3
+  %cmp2 = fcmp nnan ugt double %var2, %arg3
+  %or1  = or i1 %cmp1, %cmp2
+  ret i1 %or1
+}
+
 define i1 @test151(float %arg1, float %arg2, float %arg3) {
-; GFX11-LABEL: test151:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
-; GFX11-NEXT:    v_cmp_nlt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT:    v_cmp_nlt_f32_e64 s0, v1, v2
-; GFX11-NEXT:    s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test151:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_max_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT:    v_cmp_ge_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: test151:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GCN-NEXT:    v_cmp_nlt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT:    v_cmp_nlt_f32_e64 s0, v1, v2
+; GCN-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %var1 = call float @llvm.canonicalize.f32(float %arg1)
   %var2 = call float @llvm.canonicalize.f32(float %arg2)
   %cmp1 = fcmp uge float %var1, %arg3
@@ -3324,24 +3717,32 @@ define i1 @test151(float %arg1, float %arg2, float %arg3) {
   ret i1 %or1
 }
 
+define i1 @test151_nnan(float %arg1, float %arg2, float %arg3) {
+; GCN-LABEL: test151_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_max_f32_e32 v0, v0, v1
+; GCN-NEXT:    v_cmp_ge_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %var1 = call nnan float @llvm.canonicalize.f32(float %arg1)
+  %var2 = call nnan float @llvm.canonicalize.f32(float %arg2)
+  %cmp1 = fcmp nnan uge float %var1, %arg3
+  %cmp2 = fcmp nnan uge float %var2, %arg3
+  %or1  = or i1 %cmp1, %cmp2
+  ret i1 %or1
+}
+
 define i1 @test152(float %arg1, float %arg2, float %arg3) {
-; GFX11-LABEL: test152:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
-; GFX11-NEXT:    v_cmp_ngt_f32_e32 vcc_lo, v0, v2
-; GFX11-NEXT:    v_cmp_ngt_f32_e64 s0, v1, v2
-; GFX11-NEXT:    s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test152:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_min_f32_e32 v0, v0, v1
-; GFX11NONANS-NEXT:    v_cmp_le_f32_e32 vcc_lo, v0, v2
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: test152:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GCN-NEXT:    v_cmp_ngt_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT:    v_cmp_ngt_f32_e64 s0, v1, v2
+; GCN-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %var1 = call float @llvm.canonicalize.f32(float %arg1)
   %var2 = call float @llvm.canonicalize.f32(float %arg2)
   %cmp1 = fcmp ule float %var1, %arg3
@@ -3350,27 +3751,33 @@ define i1 @test152(float %arg1, float %arg2, float %arg3) {
   ret i1 %or1
 }
 
+define i1 @test152_nnan(float %arg1, float %arg2, float %arg3) {
+; GCN-LABEL: test152_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_min_f32_e32 v0, v0, v1
+; GCN-NEXT:    v_cmp_le_f32_e32 vcc_lo, v0, v2
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %var1 = call nnan float @llvm.canonicalize.f32(float %arg1)
+  %var2 = call nnan float @llvm.canonicalize.f32(float %arg2)
+  %cmp1 = fcmp nnan ule float %var1, %arg3
+  %cmp2 = fcmp nnan ule float %var2, %arg3
+  %or1  = or i1 %cmp1, %cmp2
+  ret i1 %or1
+}
+
 define i1 @test153(double %arg1, double %arg2, double %arg3) {
-; GFX11-LABEL: test153:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11-NEXT:    v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11-NEXT:    v_cmp_nge_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT:    v_cmp_nge_f64_e64 s0, v[2:3], v[4:5]
-; GFX11-NEXT:    s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11NONANS-LABEL: test153:
-; GFX11NONANS:       ; %bb.0:
-; GFX11NONANS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11NONANS-NEXT:    v_max_f64 v[0:1], v[0:1], v[0:1]
-; GFX11NONANS-NEXT:    v_max_f64 v[2:3], v[2:3], v[2:3]
-; GFX11NONANS-NEXT:    v_min_f64 v[0:1], v[0:1], v[2:3]
-; GFX11NONANS-NEXT:    v_cmp_lt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11NONANS-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11NONANS-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: test153:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT:    v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT:    v_cmp_nge_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT:    v_cmp_nge_f64_e64 s0, v[2:3], v[4:5]
+; GCN-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %var1 = call double @llvm.canonicalize.f64(double %arg1)
   %var2 = call double @llvm.canonicalize.f64(double %arg2)
   %cmp1 = fcmp ult double %var1, %arg3
@@ -3379,6 +3786,24 @@ define i1 @test153(double %arg1, double %arg2, double %arg3) {
   ret i1 %or1
 }
 
+define i1 @test153_nnan(double %arg1, double %arg2, double %arg3) {
+; GCN-LABEL: test153_nnan:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_max_f64 v[0:1], v[0:1], v[0:1]
+; GCN-NEXT:    v_max_f64 v[2:3], v[2:3], v[2:3]
+; GCN-NEXT:    v_min_f64 v[0:1], v[0:1], v[2:3]
+; GCN-NEXT:    v_cmp_lt_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %var1 = call nnan double @llvm.canonicalize.f64(double %arg1)
+  %var2 = call nnan double @llvm.canonicalize.f64(double %arg2)
+  %cmp1 = fcmp nnan ult double %var1, %arg3
+  %cmp2 = fcmp nnan ult double %var2, %arg3
+  %or1 = or i1 %cmp1, %cmp2
+  ret i1 %or1
+}
+
 declare double @llvm.canonicalize.f64(double)
 declare float @llvm.canonicalize.f32(float)
 declare half @llvm.canonicalize.f16(half)
@@ -3387,5 +3812,4 @@ declare <2 x half> @llvm.canonicalize.v2f16(<2 x half>)
 attributes #0 = { nounwind "amdgpu-ieee"="false" }
 attributes #1 = { nounwind "no-nans-fp-math"="true" }
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; GFX11NONANS-FAKE16: {{.*}}
-; GFX11NONANS-TRUE16: {{.*}}
+; GFX11: {{.*}}
diff --git a/llvm/test/CodeGen/Mips/fcmp.ll b/llvm/test/CodeGen/Mips/fcmp.ll
index c0b34454d6206..c3bdcb52c0d6d 100644
--- a/llvm/test/CodeGen/Mips/fcmp.ll
+++ b/llvm/test/CodeGen/Mips/fcmp.ll
@@ -1073,7 +1073,7 @@ entry:
 ; MM32R6-DAG:    bnezc    $[[T5]],
 
   %add = fadd fast float %at, %angle
-  %cmp = fcmp ogt float %add, 1.000000e+00
+  %cmp = fcmp nnan ogt float %add, 1.000000e+00
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:
@@ -1131,7 +1131,7 @@ entry:
 ; MM32R6-DAG:    bnezc    $[[T5]],
 
   %add = fadd fast double %at, %angle
-  %cmp = fcmp ogt double %add, 1.000000e+00
+  %cmp = fcmp nnan ogt double %add, 1.000000e+00
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:
@@ -1143,4 +1143,4 @@ if.end:
   ret double %theta.0
 }
 
-attributes #0 = { nounwind readnone "no-nans-fp-math"="true" }
+attributes #0 = { nounwind readnone }
diff --git a/llvm/test/CodeGen/PowerPC/change-no-infs.ll b/llvm/test/CodeGen/PowerPC/change-no-infs.ll
deleted file mode 100644
index 0cd5eb5408e3e..0000000000000
--- a/llvm/test/CodeGen/PowerPC/change-no-infs.ll
+++ /dev/null
@@ -1,67 +0,0 @@
-; Check that we can enable/disable NoInfsFPMath and NoNaNsInFPMath via function
-; attributes.  An attribute on one function should not magically apply to the
-; next one.
-
-; RUN: llc < %s -mtriple=powerpc64-unknown-unknown -mcpu=pwr7 -mattr=-vsx \
-; RUN:   | FileCheck %s --check-prefix=CHECK --check-prefix=SAFE
-
-; RUN: llc < %s -mtriple=powerpc64-unknown-unknown -mcpu=pwr7 -mattr=-vsx \
-; RUN:   -enable-no-infs-fp-math -enable-no-nans-fp-math \
-; RUN:   | FileCheck %s --check-prefix=CHECK --check-prefix=UNSAFE
-
-; The fcmp+select in these functions should be converted to a fsel instruction
-; when both NoInfsFPMath and NoNaNsInFPMath are enabled.
-
-; CHECK-LABEL: default0:
-define double @default0(double %a, double %y, double %z) {
-entry:
-; SAFE-NOT:  fsel
-; UNSAFE:    fsel
-  %cmp = fcmp ult double %a, 0.000000e+00
-  %z.y = select i1 %cmp, double %z, double %y
-  ret double %z.y
-}
-
-; CHECK-LABEL: unsafe_math_off:
-define double @unsafe_math_off(double %a, double %y, double %z) #0 #2 {
-entry:
-; SAFE-NOT:   fsel
-; UNSAFE-NOT: fsel
-  %cmp = fcmp ult double %a, 0.000000e+00
-  %z.y = select i1 %cmp, double %z, double %y
-  ret double %z.y
-}
-
-; CHECK-LABEL: default1:
-define double @default1(double %a, double %y, double %z) {
-; SAFE-NOT:  fsel
-; UNSAFE:    fsel
-  %cmp = fcmp ult double %a, 0.000000e+00
-  %z.y = select i1 %cmp, double %z, double %y
-  ret double %z.y
-}
-
-; CHECK-LABEL: unsafe_math_on:
-define double @unsafe_math_on(double %a, double %y, double %z) #1 #3 {
-entry:
-; SAFE-NOT:   fsel
-; UNSAFE-NOT: fsel
-  %cmp = fcmp ult double %a, 0.000000e+00
-  %z.y = select i1 %cmp, double %z, double %y
-  ret double %z.y
-}
-
-; CHECK-LABEL: default2:
-define double @default2(double %a, double %y, double %z) {
-; SAFE-NOT:  fsel
-; UNSAFE:    fsel
-  %cmp = fcmp ult double %a, 0.000000e+00
-  %z.y = select i1 %cmp, double %z, double %y
-  ret double %z.y
-}
-
-attributes #0 = { "no-infs-fp-math"="false" }
-attributes #1 = { "no-nans-fp-math"="false" }
-
-attributes #2 = { "no-infs-fp-math"="false" }
-attributes #3 = { "no-infs-fp-math"="true" }
diff --git a/llvm/test/CodeGen/PowerPC/fsel.ll b/llvm/test/CodeGen/PowerPC/fsel.ll
index dea442d8404e1..e7e370d629d98 100644
--- a/llvm/test/CodeGen/PowerPC/fsel.ll
+++ b/llvm/test/CodeGen/PowerPC/fsel.ll
@@ -1,6 +1,5 @@
 ; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -mattr=-vsx | FileCheck %s
-; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -enable-no-infs-fp-math -enable-no-nans-fp-math -mattr=-vsx | FileCheck -check-prefix=CHECK-FM %s
-; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -enable-no-infs-fp-math -enable-no-nans-fp-math -mattr=+vsx | FileCheck -check-prefix=CHECK-FM-VSX %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -mattr=+vsx | FileCheck -check-prefix=CHECK-VSX %s
 target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
 target triple = "powerpc64-unknown-linux-gnu"
 
@@ -13,14 +12,21 @@ entry:
 ; CHECK: @zerocmp1
 ; CHECK-NOT: fsel
 ; CHECK: blr
+}
+
+define double @zerocmp1_finite(double %a, double %y, double %z) #0 {
+entry:
+  %cmp = fcmp ninf nnan ult double %a, 0.000000e+00
+  %z.y = select ninf nnan i1 %cmp, double %z, double %y
+  ret double %z.y
 
-; CHECK-FM: @zerocmp1
-; CHECK-FM: fsel 1, 1, 2, 3
-; CHECK-FM: blr
+; CHECK: @zerocmp1_finite
+; CHECK: fsel 1, 1, 2, 3
+; CHECK: blr
 
-; CHECK-FM-VSX: @zerocmp1
-; CHECK-FM-VSX: fsel 1, 1, 2, 3
-; CHECK-FM-VSX: blr
+; CHECK-VSX: @zerocmp1_finite
+; CHECK-VSX: fsel 1, 1, 2, 3
+; CHECK-VSX: blr
 }
 
 define double @zerocmp2(double %a, double %y, double %z) #0 {
@@ -32,16 +38,23 @@ entry:
 ; CHECK: @zerocmp2
 ; CHECK-NOT: fsel
 ; CHECK: blr
+}
 
-; CHECK-FM: @zerocmp2
-; CHECK-FM: fneg [[REG:[0-9]+]], 1
-; CHECK-FM: fsel 1, [[REG]], 3, 2
-; CHECK-FM: blr
+define double @zerocmp2_finite(double %a, double %y, double %z) #0 {
+entry:
+  %cmp = fcmp ninf nnan ogt double %a, 0.000000e+00
+  %y.z = select ninf nnan i1 %cmp, double %y, double %z
+  ret double %y.z
 
-; CHECK-FM-VSX: @zerocmp2
-; CHECK-FM-VSX: xsnegdp [[REG:[0-9]+]], 1
-; CHECK-FM-VSX: fsel 1, [[REG]], 3, 2
-; CHECK-FM-VSX: blr
+; CHECK: @zerocmp2_finite
+; CHECK: fneg [[REG:[0-9]+]], 1
+; CHECK: fsel 1, [[REG]], 3, 2
+; CHECK: blr
+
+; CHECK-VSX: @zerocmp2_finite
+; CHECK-VSX: xsnegdp [[REG:[0-9]+]], 1
+; CHECK-VSX: fsel 1, [[REG]], 3, 2
+; CHECK-VSX: blr
 }
 
 define double @zerocmp3(double %a, double %y, double %z) #0 {
@@ -53,18 +66,25 @@ entry:
 ; CHECK: @zerocmp3
 ; CHECK-NOT: fsel
 ; CHECK: blr
+}
 
-; CHECK-FM: @zerocmp3
-; CHECK-FM: fsel [[REG:[0-9]+]], 1, 2, 3
-; CHECK-FM: fneg [[REG2:[0-9]+]], 1
-; CHECK-FM: fsel 1, [[REG2]], [[REG]], 3
-; CHECK-FM: blr
+define double @zerocmp3_finite(double %a, double %y, double %z) #0 {
+entry:
+  %cmp = fcmp ninf nnan oeq double %a, 0.000000e+00
+  %y.z = select ninf nnan i1 %cmp, double %y, double %z
+  ret double %y.z
+
+; CHECK: @zerocmp3
+; CHECK: fsel [[REG:[0-9]+]], 1, 2, 3
+; CHECK: fneg [[REG2:[0-9]+]], 1
+; CHECK: fsel 1, [[REG2]], [[REG]], 3
+; CHECK: blr
 
-; CHECK-FM-VSX: @zerocmp3
-; CHECK-FM-VSX: fsel [[REG:[0-9]+]], 1, 2, 3
-; CHECK-FM-VSX: xsnegdp [[REG2:[0-9]+]], 1
-; CHECK-FM-VSX: fsel 1, [[REG2]], [[REG]], 3
-; CHECK-FM-VSX: blr
+; CHECK-VSX: @zerocmp3
+; CHECK-VSX: fsel [[REG:[0-9]+]], 1, 2, 3
+; CHECK-VSX: xsnegdp [[REG2:[0-9]+]], 1
+; CHECK-VSX: fsel 1, [[REG2]], [[REG]], 3
+; CHECK-VSX: blr
 }
 
 define double @min1(double %a, double %b) #0 {
@@ -76,16 +96,23 @@ entry:
 ; CHECK: @min1
 ; CHECK-NOT: fsel
 ; CHECK: blr
+}
 
-; CHECK-FM: @min1
-; CHECK-FM: fsub [[REG:[0-9]+]], 2, 1
-; CHECK-FM: fsel 1, [[REG]], 1, 2
-; CHECK-FM: blr
+define double @min1_finite(double %a, double %b) #0 {
+entry:
+  %cmp = fcmp ninf nnan ole double %a, %b
+  %cond = select ninf nnan i1 %cmp, double %a, double %b
+  ret double %cond
+
+; CHECK: @min1_finite
+; CHECK: fsub [[REG:[0-9]+]], 2, 1
+; CHECK: fsel 1, [[REG]], 1, 2
+; CHECK: blr
 
-; CHECK-FM-VSX: @min1
-; CHECK-FM-VSX: xssubdp [[REG:[0-9]+]], 2, 1
-; CHECK-FM-VSX: fsel 1, [[REG]], 1, 2
-; CHECK-FM-VSX: blr
+; CHECK-VSX: @min1_finite
+; CHECK-VSX: xssubdp [[REG:[0-9]+]], 2, 1
+; CHECK-VSX: fsel 1, [[REG]], 1, 2
+; CHECK-VSX: blr
 }
 
 define double @max1(double %a, double %b) #0 {
@@ -97,16 +124,23 @@ entry:
 ; CHECK: @max1
 ; CHECK-NOT: fsel
 ; CHECK: blr
+}
 
-; CHECK-FM: @max1
-; CHECK-FM: fsub [[REG:[0-9]+]], 1, 2
-; CHECK-FM: fsel 1, [[REG]], 1, 2
-; CHECK-FM: blr
+define double @max1_finite(double %a, double %b) #0 {
+entry:
+  %cmp = fcmp ninf nnan oge double %a, %b
+  %cond = select ninf nnan i1 %cmp, double %a, double %b
+  ret double %cond
+
+; CHECK: @max1_finite
+; CHECK: fsub [[REG:[0-9]+]], 1, 2
+; CHECK: fsel 1, [[REG]], 1, 2
+; CHECK: blr
 
-; CHECK-FM-VSX: @max1
-; CHECK-FM-VSX: xssubdp [[REG:[0-9]+]], 1, 2
-; CHECK-FM-VSX: fsel 1, [[REG]], 1, 2
-; CHECK-FM-VSX: blr
+; CHECK-VSX: @max1_finite
+; CHECK-VSX: xssubdp [[REG:[0-9]+]], 1, 2
+; CHECK-VSX: fsel 1, [[REG]], 1, 2
+; CHECK-VSX: blr
 }
 
 define double @cmp1(double %a, double %b, double %y, double %z) #0 {
@@ -118,16 +152,23 @@ entry:
 ; CHECK: @cmp1
 ; CHECK-NOT: fsel
 ; CHECK: blr
+}
 
-; CHECK-FM: @cmp1
-; CHECK-FM: fsub [[REG:[0-9]+]], 1, 2
-; CHECK-FM: fsel 1, [[REG]], 3, 4
-; CHECK-FM: blr
+define double @cmp1_finite(double %a, double %b, double %y, double %z) #0 {
+entry:
+  %cmp = fcmp ninf nnan ult double %a, %b
+  %z.y = select ninf nnan i1 %cmp, double %z, double %y
+  ret double %z.y
+
+; CHECK: @cmp1_finite
+; CHECK: fsub [[REG:[0-9]+]], 1, 2
+; CHECK: fsel 1, [[REG]], 3, 4
+; CHECK: blr
 
-; CHECK-FM-VSX: @cmp1
-; CHECK-FM-VSX: xssubdp [[REG:[0-9]+]], 1, 2
-; CHECK-FM-VSX: fsel 1, [[REG]], 3, 4
-; CHECK-FM-VSX: blr
+; CHECK-VSX: @cmp1_finite
+; CHECK-VSX: xssubdp [[REG:[0-9]+]], 1, 2
+; CHECK-VSX: fsel 1, [[REG]], 3, 4
+; CHECK-VSX: blr
 }
 
 define double @cmp2(double %a, double %b, double %y, double %z) #0 {
@@ -139,16 +180,23 @@ entry:
 ; CHECK: @cmp2
 ; CHECK-NOT: fsel
 ; CHECK: blr
+}
 
-; CHECK-FM: @cmp2
-; CHECK-FM: fsub [[REG:[0-9]+]], 2, 1
-; CHECK-FM: fsel 1, [[REG]], 4, 3
-; CHECK-FM: blr
+define double @cmp2_finite(double %a, double %b, double %y, double %z) #0 {
+entry:
+  %cmp = fcmp ninf nnan ogt double %a, %b
+  %y.z = select ninf nnan i1 %cmp, double %y, double %z
+  ret double %y.z
+
+; CHECK: @cmp2_finite
+; CHECK: fsub [[REG:[0-9]+]], 2, 1
+; CHECK: fsel 1, [[REG]], 4, 3
+; CHECK: blr
 
-; CHECK-FM-VSX: @cmp2
-; CHECK-FM-VSX: xssubdp [[REG:[0-9]+]], 2, 1
-; CHECK-FM-VSX: fsel 1, [[REG]], 4, 3
-; CHECK-FM-VSX: blr
+; CHECK-VSX: @cmp2_finite
+; CHECK-VSX: xssubdp [[REG:[0-9]+]], 2, 1
+; CHECK-VSX: fsel 1, [[REG]], 4, 3
+; CHECK-VSX: blr
 }
 
 define double @cmp3(double %a, double %b, double %y, double %z) #0 {
@@ -160,20 +208,27 @@ entry:
 ; CHECK: @cmp3
 ; CHECK-NOT: fsel
 ; CHECK: blr
+}
 
-; CHECK-FM: @cmp3
-; CHECK-FM: fsub [[REG:[0-9]+]], 1, 2
-; CHECK-FM: fsel [[REG2:[0-9]+]], [[REG]], 3, 4
-; CHECK-FM: fneg [[REG3:[0-9]+]], [[REG]]
-; CHECK-FM: fsel 1, [[REG3]], [[REG2]], 4
-; CHECK-FM: blr
+define double @cmp3_finite(double %a, double %b, double %y, double %z) #0 {
+entry:
+  %cmp = fcmp ninf nnan oeq double %a, %b
+  %y.z = select ninf nnan i1 %cmp, double %y, double %z
+  ret double %y.z
+
+; CHECK: @cmp3_finite
+; CHECK: fsub [[REG:[0-9]+]], 1, 2
+; CHECK: fsel [[REG2:[0-9]+]], [[REG]], 3, 4
+; CHECK: fneg [[REG3:[0-9]+]], [[REG]]
+; CHECK: fsel 1, [[REG3]], [[REG2]], 4
+; CHECK: blr
 
-; CHECK-FM-VSX: @cmp3
-; CHECK-FM-VSX: xssubdp [[REG:[0-9]+]], 1, 2
-; CHECK-FM-VSX: fsel [[REG2:[0-9]+]], [[REG]], 3, 4
-; CHECK-FM-VSX: xsnegdp [[REG3:[0-9]+]], [[REG]]
-; CHECK-FM-VSX: fsel 1, [[REG3]], [[REG2]], 4
-; CHECK-FM-VSX: blr
+; CHECK-VSX: @cmp3_finite
+; CHECK-VSX: xssubdp [[REG:[0-9]+]], 1, 2
+; CHECK-VSX: fsel [[REG2:[0-9]+]], [[REG]], 3, 4
+; CHECK-VSX: xsnegdp [[REG3:[0-9]+]], [[REG]]
+; CHECK-VSX: fsel 1, [[REG3]], [[REG2]], 4
+; CHECK-VSX: blr
 }
 
 attributes #0 = { nounwind readnone }
diff --git a/llvm/test/CodeGen/PowerPC/scalar-equal.ll b/llvm/test/CodeGen/PowerPC/scalar-equal.ll
index c0b11b47236a9..18c41fc25def2 100644
--- a/llvm/test/CodeGen/PowerPC/scalar-equal.ll
+++ b/llvm/test/CodeGen/PowerPC/scalar-equal.ll
@@ -1,57 +1,31 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names \
-; RUN:   -verify-machineinstrs --enable-no-signed-zeros-fp-math \
-; RUN:   --enable-no-nans-fp-math --enable-no-infs-fp-math \
-; RUN:   -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \
-; RUN:   --check-prefix=FAST-P8
-; RUN: llc -mcpu=pwr9 -ppc-asm-full-reg-names \
-; RUN:   -verify-machineinstrs --enable-no-signed-zeros-fp-math \
-; RUN:   --enable-no-nans-fp-math --enable-no-infs-fp-math \
+; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names -verify-machineinstrs \
 ; RUN:   -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \
-; RUN:   --check-prefix=FAST-P9
+; RUN:   --check-prefix=CHECK-P8
 ; RUN: llc -mcpu=pwr9 -ppc-asm-full-reg-names -verify-machineinstrs \
 ; RUN:   -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \
-; RUN:   --check-prefix=NO-FAST-P9
-; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names -verify-machineinstrs \
-; RUN:   -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \
-; RUN:   --check-prefix=NO-FAST-P8
+; RUN:   --check-prefix=CHECK-P9
 
 define double @testoeq(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: testoeq:
-; FAST-P8:       # %bb.0: # %entry
-; FAST-P8-NEXT:    xssubdp f0, f1, f2
-; FAST-P8-NEXT:    fsel f1, f0, f3, f4
-; FAST-P8-NEXT:    xsnegdp f0, f0
-; FAST-P8-NEXT:    fsel f1, f0, f1, f4
-; FAST-P8-NEXT:    blr
+; CHECK-P8-LABEL: testoeq:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    xscmpudp cr0, f1, f2
+; CHECK-P8-NEXT:    beq cr0, .LBB0_2
+; CHECK-P8-NEXT:  # %bb.1: # %entry
+; CHECK-P8-NEXT:    fmr f3, f4
+; CHECK-P8-NEXT:  .LBB0_2: # %entry
+; CHECK-P8-NEXT:    fmr f1, f3
+; CHECK-P8-NEXT:    blr
 ;
-; FAST-P9-LABEL: testoeq:
-; FAST-P9:       # %bb.0: # %entry
-; FAST-P9-NEXT:    xssubdp f0, f1, f2
-; FAST-P9-NEXT:    fsel f1, f0, f3, f4
-; FAST-P9-NEXT:    xsnegdp f0, f0
-; FAST-P9-NEXT:    fsel f1, f0, f1, f4
-; FAST-P9-NEXT:    blr
-;
-; NO-FAST-P9-LABEL: testoeq:
-; NO-FAST-P9:       # %bb.0: # %entry
-; NO-FAST-P9-NEXT:    xscmpudp cr0, f1, f2
-; NO-FAST-P9-NEXT:    beq cr0, .LBB0_2
-; NO-FAST-P9-NEXT:  # %bb.1: # %entry
-; NO-FAST-P9-NEXT:    fmr f3, f4
-; NO-FAST-P9-NEXT:  .LBB0_2: # %entry
-; NO-FAST-P9-NEXT:    fmr f1, f3
-; NO-FAST-P9-NEXT:    blr
-;
-; NO-FAST-P8-LABEL: testoeq:
-; NO-FAST-P8:       # %bb.0: # %entry
-; NO-FAST-P8-NEXT:    xscmpudp cr0, f1, f2
-; NO-FAST-P8-NEXT:    beq cr0, .LBB0_2
-; NO-FAST-P8-NEXT:  # %bb.1: # %entry
-; NO-FAST-P8-NEXT:    fmr f3, f4
-; NO-FAST-P8-NEXT:  .LBB0_2: # %entry
-; NO-FAST-P8-NEXT:    fmr f1, f3
-; NO-FAST-P8-NEXT:    blr
+; CHECK-P9-LABEL: testoeq:
+; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    xscmpudp cr0, f1, f2
+; CHECK-P9-NEXT:    beq cr0, .LBB0_2
+; CHECK-P9-NEXT:  # %bb.1: # %entry
+; CHECK-P9-NEXT:    fmr f3, f4
+; CHECK-P9-NEXT:  .LBB0_2: # %entry
+; CHECK-P9-NEXT:    fmr f1, f3
+; CHECK-P9-NEXT:    blr
 entry:
   %cmp = fcmp oeq double %a, %b
   %cond = select i1 %cmp, double %c, double %d
@@ -59,37 +33,21 @@ entry:
 }
 
 define double @testoeq_fast(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: testoeq_fast:
-; FAST-P8:       # %bb.0: # %entry
-; FAST-P8-NEXT:    xssubdp f0, f1, f2
-; FAST-P8-NEXT:    fsel f1, f0, f3, f4
-; FAST-P8-NEXT:    xsnegdp f0, f0
-; FAST-P8-NEXT:    fsel f1, f0, f1, f4
-; FAST-P8-NEXT:    blr
-;
-; FAST-P9-LABEL: testoeq_fast:
-; FAST-P9:       # %bb.0: # %entry
-; FAST-P9-NEXT:    xssubdp f0, f1, f2
-; FAST-P9-NEXT:    fsel f1, f0, f3, f4
-; FAST-P9-NEXT:    xsnegdp f0, f0
-; FAST-P9-NEXT:    fsel f1, f0, f1, f4
-; FAST-P9-NEXT:    blr
-;
-; NO-FAST-P9-LABEL: testoeq_fast:
-; NO-FAST-P9:       # %bb.0: # %entry
-; NO-FAST-P9-NEXT:    xssubdp f0, f1, f2
-; NO-FAST-P9-NEXT:    fsel f1, f0, f3, f4
-; NO-FAST-P9-NEXT:    xsnegdp f0, f0
-; NO-FAST-P9-NEXT:    fsel f1, f0, f1, f4
-; NO-FAST-P9-NEXT:    blr
+; CHECK-P8-LABEL: testoeq_fast:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    xssubdp f0, f1, f2
+; CHECK-P8-NEXT:    fsel f1, f0, f3, f4
+; CHECK-P8-NEXT:    xsnegdp f0, f0
+; CHECK-P8-NEXT:    fsel f1, f0, f1, f4
+; CHECK-P8-NEXT:    blr
 ;
-; NO-FAST-P8-LABEL: testoeq_fast:
-; NO-FAST-P8:       # %bb.0: # %entry
-; NO-FAST-P8-NEXT:    xssubdp f0, f1, f2
-; NO-FAST-P8-NEXT:    fsel f1, f0, f3, f4
-; NO-FAST-P8-NEXT:    xsnegdp f0, f0
-; NO-FAST-P8-NEXT:    fsel f1, f0, f1, f4
-; NO-FAST-P8-NEXT:    blr
+; CHECK-P9-LABEL: testoeq_fast:
+; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    xssubdp f0, f1, f2
+; CHECK-P9-NEXT:    fsel f1, f0, f3, f4
+; CHECK-P9-NEXT:    xsnegdp f0, f0
+; CHECK-P9-NEXT:    fsel f1, f0, f1, f4
+; CHECK-P9-NEXT:    blr
 entry:
   %cmp = fcmp nnan ninf nsz oeq double %a, %b
   %cond = select nnan ninf nsz i1 %cmp, double %c, double %d
diff --git a/llvm/test/CodeGen/PowerPC/scalar_cmp.ll b/llvm/test/CodeGen/PowerPC/scalar_cmp.ll
index 881d1f4c4093b..b9a7051b1fd4f 100644
--- a/llvm/test/CodeGen/PowerPC/scalar_cmp.ll
+++ b/llvm/test/CodeGen/PowerPC/scalar_cmp.ll
@@ -1,58 +1,32 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names \
-; RUN:   -verify-machineinstrs --enable-no-signed-zeros-fp-math \
-; RUN:   --enable-no-nans-fp-math --enable-no-infs-fp-math \
-; RUN:   -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \
-; RUN:   --check-prefix=FAST-P8
-; RUN: llc -mcpu=pwr9 -ppc-asm-full-reg-names \
-; RUN:   -verify-machineinstrs --enable-no-signed-zeros-fp-math \
-; RUN:   --enable-no-nans-fp-math --enable-no-infs-fp-math \
-; RUN:   -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \
-; RUN:   --check-prefix=FAST-P9
 ; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names -verify-machineinstrs \
 ; RUN:   -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \
-; RUN:   --check-prefix=NO-FAST-P8
+; RUN:   --check-prefix=CHECK-P8
 ; RUN: llc -mcpu=pwr9 -ppc-asm-full-reg-names -verify-machineinstrs \
 ; RUN:   -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \
-; RUN:   --check-prefix=NO-FAST-P9
+; RUN:   --check-prefix=CHECK-P9
 
 ; Test oeq
 define float @select_oeq_float(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_oeq_float:
-; FAST-P8:       # %bb.0: # %entry
-; FAST-P8-NEXT:    xssubsp f0, f1, f2
-; FAST-P8-NEXT:    xsnegdp f1, f0
-; FAST-P8-NEXT:    fsel f0, f0, f3, f4
-; FAST-P8-NEXT:    fsel f1, f1, f0, f4
-; FAST-P8-NEXT:    blr
-;
-; FAST-P9-LABEL: select_oeq_float:
-; FAST-P9:       # %bb.0: # %entry
-; FAST-P9-NEXT:    xssubsp f0, f1, f2
-; FAST-P9-NEXT:    xsnegdp f1, f0
-; FAST-P9-NEXT:    fsel f0, f0, f3, f4
-; FAST-P9-NEXT:    fsel f1, f1, f0, f4
-; FAST-P9-NEXT:    blr
-;
-; NO-FAST-P8-LABEL: select_oeq_float:
-; NO-FAST-P8:       # %bb.0: # %entry
-; NO-FAST-P8-NEXT:    fcmpu cr0, f1, f2
-; NO-FAST-P8-NEXT:    beq cr0, .LBB0_2
-; NO-FAST-P8-NEXT:  # %bb.1: # %entry
-; NO-FAST-P8-NEXT:    fmr f3, f4
-; NO-FAST-P8-NEXT:  .LBB0_2: # %entry
-; NO-FAST-P8-NEXT:    fmr f1, f3
-; NO-FAST-P8-NEXT:    blr
-;
-; NO-FAST-P9-LABEL: select_oeq_float:
-; NO-FAST-P9:       # %bb.0: # %entry
-; NO-FAST-P9-NEXT:    fcmpu cr0, f1, f2
-; NO-FAST-P9-NEXT:    beq cr0, .LBB0_2
-; NO-FAST-P9-NEXT:  # %bb.1: # %entry
-; NO-FAST-P9-NEXT:    fmr f3, f4
-; NO-FAST-P9-NEXT:  .LBB0_2: # %entry
-; NO-FAST-P9-NEXT:    fmr f1, f3
-; NO-FAST-P9-NEXT:    blr
+; CHECK-P8-LABEL: select_oeq_float:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    fcmpu cr0, f1, f2
+; CHECK-P8-NEXT:    beq cr0, .LBB0_2
+; CHECK-P8-NEXT:  # %bb.1: # %entry
+; CHECK-P8-NEXT:    fmr f3, f4
+; CHECK-P8-NEXT:  .LBB0_2: # %entry
+; CHECK-P8-NEXT:    fmr f1, f3
+; CHECK-P8-NEXT:    blr
+;
+; CHECK-P9-LABEL: select_oeq_float:
+; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    fcmpu cr0, f1, f2
+; CHECK-P9-NEXT:    beq cr0, .LBB0_2
+; CHECK-P9-NEXT:  # %bb.1: # %entry
+; CHECK-P9-NEXT:    fmr f3, f4
+; CHECK-P9-NEXT:  .LBB0_2: # %entry
+; CHECK-P9-NEXT:    fmr f1, f3
+; CHECK-P9-NEXT:    blr
 entry:
   %cmp = fcmp oeq float %a, %b
   %cond = select i1 %cmp, float %c, float %d
@@ -60,41 +34,25 @@ entry:
 }
 
 define float @select_oeq_float_nsz(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_oeq_float_nsz:
-; FAST-P8:       # %bb.0: # %entry
-; FAST-P8-NEXT:    xssubsp f0, f2, f1
-; FAST-P8-NEXT:    xssubsp f1, f1, f2
-; FAST-P8-NEXT:    fsel f1, f1, f3, f4
-; FAST-P8-NEXT:    fsel f1, f0, f1, f4
-; FAST-P8-NEXT:    blr
-;
-; FAST-P9-LABEL: select_oeq_float_nsz:
-; FAST-P9:       # %bb.0: # %entry
-; FAST-P9-NEXT:    xssubsp f0, f2, f1
-; FAST-P9-NEXT:    xssubsp f1, f1, f2
-; FAST-P9-NEXT:    fsel f1, f1, f3, f4
-; FAST-P9-NEXT:    fsel f1, f0, f1, f4
-; FAST-P9-NEXT:    blr
-;
-; NO-FAST-P8-LABEL: select_oeq_float_nsz:
-; NO-FAST-P8:       # %bb.0: # %entry
-; NO-FAST-P8-NEXT:    fcmpu cr0, f1, f2
-; NO-FAST-P8-NEXT:    beq cr0, .LBB1_2
-; NO-FAST-P8-NEXT:  # %bb.1: # %entry
-; NO-FAST-P8-NEXT:    fmr f3, f4
-; NO-FAST-P8-NEXT:  .LBB1_2: # %entry
-; NO-FAST-P8-NEXT:    fmr f1, f3
-; NO-FAST-P8-NEXT:    blr
-;
-; NO-FAST-P9-LABEL: select_oeq_float_nsz:
-; NO-FAST-P9:       # %bb.0: # %entry
-; NO-FAST-P9-NEXT:    fcmpu cr0, f1, f2
-; NO-FAST-P9-NEXT:    beq cr0, .LBB1_2
-; NO-FAST-P9-NEXT:  # %bb.1: # %entry
-; NO-FAST-P9-NEXT:    fmr f3, f4
-; NO-FAST-P9-NEXT:  .LBB1_2: # %entry
-; NO-FAST-P9-NEXT:    fmr f1, f3
-; NO-FAST-P9-NEXT:    blr
+; CHECK-P8-LABEL: select_oeq_float_nsz:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    fcmpu cr0, f1, f2
+; CHECK-P8-NEXT:    beq cr0, .LBB1_2
+; CHECK-P8-NEXT:  # %bb.1: # %entry
+; CHECK-P8-NEXT:    fmr f3, f4
+; CHECK-P8-NEXT:  .LBB1_2: # %entry
+; CHECK-P8-NEXT:    fmr f1, f3
+; CHECK-P8-NEXT:    blr
+;
+; CHECK-P9-LABEL: select_oeq_float_nsz:
+; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    fcmpu cr0, f1, f2
+; CHECK-P9-NEXT:    beq cr0, .LBB1_2
+; CHECK-P9-NEXT:  # %bb.1: # %entry
+; CHECK-P9-NEXT:    fmr f3, f4
+; CHECK-P9-NEXT:  .LBB1_2: # %entry
+; CHECK-P9-NEXT:    fmr f1, f3
+; CHECK-P9-NEXT:    blr
 entry:
   %cmp = fcmp nsz oeq float %a, %b
   %cond = select i1 %cmp, float %c, float %d
@@ -102,41 +60,25 @@ entry:
 }
 
 define double @select_oeq_double(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: select_oeq_double:
-; FAST-P8:       # %bb.0: # %entry
-; FAST-P8-NEXT:    xssubdp f0, f1, f2
-; FAST-P8-NEXT:    fsel f1, f0, f3, f4
-; FAST-P8-NEXT:    xsnegdp f0, f0
-; FAST-P8-NEXT:    fsel f1, f0, f1, f4
-; FAST-P8-NEXT:    blr
-;
-; FAST-P9-LABEL: select_oeq_double:
-; FAST-P9:       # %bb.0: # %entry
-; FAST-P9-NEXT:    xssubdp f0, f1, f2
-; FAST-P9-NEXT:    fsel f1, f0, f3, f4
-; FAST-P9-NEXT:    xsnegdp f0, f0
-; FAST-P9-NEXT:    fsel f1, f0, f1, f4
-; FAST-P9-NEXT:    blr
-;
-; NO-FAST-P8-LABEL: select_oeq_double:
-; NO-FAST-P8:       # %bb.0: # %entry
-; NO-FAST-P8-NEXT:    xscmpudp cr0, f1, f2
-; NO-FAST-P8-NEXT:    beq cr0, .LBB2_2
-; NO-FAST-P8-NEXT:  # %bb.1: # %entry
-; NO-FAST-P8-NEXT:    fmr f3, f4
-; NO-FAST-P8-NEXT:  .LBB2_2: # %entry
-; NO-FAST-P8-NEXT:    fmr f1, f3
-; NO-FAST-P8-NEXT:    blr
-;
-; NO-FAST-P9-LABEL: select_oeq_double:
-; NO-FAST-P9:       # %bb.0: # %entry
-; NO-FAST-P9-NEXT:    xscmpudp cr0, f1, f2
-; NO-FAST-P9-NEXT:    beq cr0, .LBB2_2
-; NO-FAST-P9-NEXT:  # %bb.1: # %entry
-; NO-FAST-P9-NEXT:    fmr f3, f4
-; NO-FAST-P9-NEXT:  .LBB2_2: # %entry
-; NO-FAST-P9-NEXT:    fmr f1, f3
-; NO-FAST-P9-NEXT:    blr
+; CHECK-P8-LABEL: select_oeq_double:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    xscmpudp cr0, f1, f2
+; CHECK-P8-NEXT:    beq cr0, .LBB2_2
+; CHECK-P8-NEXT:  # %bb.1: # %entry
+; CHECK-P8-NEXT:    fmr f3, f4
+; CHECK-P8-NEXT:  .LBB2_2: # %entry
+; CHECK-P8-NEXT:    fmr f1, f3
+; CHECK-P8-NEXT:    blr
+;
+; CHECK-P9-LABEL: select_oeq_double:
+; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    xscmpudp cr0, f1, f2
+; CHECK-P9-NEXT:    beq cr0, .LBB2_2
+; CHECK-P9-NEXT:  # %bb.1: # %entry
+; CHECK-P9-NEXT:    fmr f3, f4
+; CHECK-P9-NEXT:  .LBB2_2: # %entry
+; CHECK-P9-NEXT:    fmr f1, f3
+; CHECK-P9-NEXT:    blr
 entry:
   %cmp = fcmp oeq double %a, %b
   %cond = select i1 %cmp, double %c, double %d
@@ -144,37 +86,21 @@ entry:
 }
 
 define float @select_fast_oeq_float(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_fast_oeq_float:
-; FAST-P8:       # %bb.0: # %entry
-; FAST-P8-NEXT:    xssubsp f0, f2, f1
-; FAST-P8-NEXT:    xssubsp f1, f1, f2
-; FAST-P8-NEXT:    fsel f1, f1, f3, f4
-; FAST-P8-NEXT:    fsel f1, f0, f1, f4
-; FAST-P8-NEXT:    blr
-;
-; FAST-P9-LABEL: select_fast_oeq_float:
-; FAST-P9:       # %bb.0: # %entry
-; FAST-P9-NEXT:    xssubsp f0, f2, f1
-; FAST-P9-NEXT:    xssubsp f1, f1, f2
-; FAST-P9-NEXT:    fsel f1, f1, f3, f4
-; FAST-P9-NEXT:    fsel f1, f0, f1, f4
-; FAST-P9-NEXT:    blr
-;
-; NO-FAST-P8-LABEL: select_fast_oeq_float:
-; NO-FAST-P8:       # %bb.0: # %entry
-; NO-FAST-P8-NEXT:    xssubsp f0, f2, f1
-; NO-FAST-P8-NEXT:    xssubsp f1, f1, f2
-; NO-FAST-P8-NEXT:    fsel f1, f1, f3, f4
-; NO-FAST-P8-NEXT:    fsel f1, f0, f1, f4
-; NO-FAST-P8-NEXT:    blr
-;
-; NO-FAST-P9-LABEL: select_fast_oeq_float:
-; NO-FAST-P9:       # %bb.0: # %entry
-; NO-FAST-P9-NEXT:    xssubsp f0, f2, f1
-; NO-FAST-P9-NEXT:    xssubsp f1, f1, f2
-; NO-FAST-P9-NEXT:    fsel f1, f1, f3, f4
-; NO-FAST-P9-NEXT:    fsel f1, f0, f1, f4
-; NO-FAST-P9-NEXT:    blr
+; CHECK-P8-LABEL: select_fast_oeq_float:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    xssubsp f0, f2, f1
+; CHECK-P8-NEXT:    xssubsp f1, f1, f2
+; CHECK-P8-NEXT:    fsel f1, f1, f3, f4
+; CHECK-P8-NEXT:    fsel f1, f0, f1, f4
+; CHECK-P8-NEXT:    blr
+;
+; CHECK-P9-LABEL: select_fast_oeq_float:
+; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    xssubsp f0, f2, f1
+; CHECK-P9-NEXT:    xssubsp f1, f1, f2
+; CHECK-P9-NEXT:    fsel f1, f1, f3, f4
+; CHECK-P9-NEXT:    fsel f1, f0, f1, f4
+; CHECK-P9-NEXT:    blr
 entry:
   %cmp = fcmp nnan ninf nsz oeq float %a, %b
   %cond = select i1 %cmp, float %c, float %d
@@ -182,37 +108,21 @@ entry:
 }
 
 define double @select_fast_oeq_double(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: select_fast_oeq_double:
-; FAST-P8:       # %bb.0: # %entry
-; FAST-P8-NEXT:    xssubdp f0, f1, f2
-; FAST-P8-NEXT:    fsel f1, f0, f3, f4
-; FAST-P8-NEXT:    xsnegdp f0, f0
-; FAST-P8-NEXT:    fsel f1, f0, f1, f4
-; FAST-P8-NEXT:    blr
-;
-; FAST-P9-LABEL: select_fast_oeq_double:
-; FAST-P9:       # %bb.0: # %entry
-; FAST-P9-NEXT:    xssubdp f0, f1, f2
-; FAST-P9-NEXT:    fsel f1, f0, f3, f4
-; FAST-P9-NEXT:    xsnegdp f0, f0
-; FAST-P9-NEXT:    fsel f1, f0, f1, f4
-; FAST-P9-NEXT:    blr
-;
-; NO-FAST-P8-LABEL: select_fast_oeq_double:
-; NO-FAST-P8:       # %bb.0: # %entry
-; NO-FAST-P8-NEXT:    xssubdp f0, f1, f2
-; NO-FAST-P8-NEXT:    fsel f1, f0, f3, f4
-; NO-FAST-P8-NEXT:    xsnegdp f0, f0
-; NO-FAST-P8-NEXT:    fsel f1, f0, f1, f4
-; NO-FAST-P8-NEXT:    blr
-;
-; NO-FAST-P9-LABEL: select_fast_oeq_double:
-; NO-FAST-P9:       # %bb.0: # %entry
-; NO-FAST-P9-NEXT:    xssubdp f0, f1, f2
-; NO-FAST-P9-NEXT:    fsel f1, f0, f3, f4
-; NO-FAST-P9-NEXT:    xsnegdp f0, f0
-; NO-FAST-P9-NEXT:    fsel f1, f0, f1, f4
-; NO-FAST-P9-NEXT:    blr
+; CHECK-P8-LABEL: select_fast_oeq_double:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    xssubdp f0, f1, f2
+; CHECK-P8-NEXT:    fsel f1, f0, f3, f4
+; CHECK-P8-NEXT:    xsnegdp f0, f0
+; CHECK-P8-NEXT:    fsel f1, f0, f1, f4
+; CHECK-P8-NEXT:    blr
+;
+; CHECK-P9-LABEL: select_fast_oeq_double:
+; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    xssubdp f0, f1, f2
+; CHECK-P9-NEXT:    fsel f1, f0, f3, f4
+; CHECK-P9-NEXT:    xsnegdp f0, f0
+; CHECK-P9-NEXT:    fsel f1, f0, f1, f4
+; CHECK-P9-NEXT:    blr
 entry:
   %cmp = fcmp nnan ninf nsz oeq double %a, %b
   %cond = select i1 %cmp, double %c, double %d
@@ -222,43 +132,27 @@ entry:
 
 ; Test one
 define float @select_one_float(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_one_float:
-; FAST-P8:       # %bb.0: # %entry
-; FAST-P8-NEXT:    xssubsp f0, f1, f2
-; FAST-P8-NEXT:    xsnegdp f1, f0
-; FAST-P8-NEXT:    fsel f0, f0, f4, f3
-; FAST-P8-NEXT:    fsel f1, f1, f0, f3
-; FAST-P8-NEXT:    blr
-;
-; FAST-P9-LABEL: select_one_float:
-; FAST-P9:       # %bb.0: # %entry
-; FAST-P9-NEXT:    xssubsp f0, f1, f2
-; FAST-P9-NEXT:    xsnegdp f1, f0
-; FAST-P9-NEXT:    fsel f0, f0, f4, f3
-; FAST-P9-NEXT:    fsel f1, f1, f0, f3
-; FAST-P9-NEXT:    blr
-;
-; NO-FAST-P8-LABEL: select_one_float:
-; NO-FAST-P8:       # %bb.0: # %entry
-; NO-FAST-P8-NEXT:    fcmpu cr0, f1, f2
-; NO-FAST-P8-NEXT:    crnor 4*cr5+lt, un, eq
-; NO-FAST-P8-NEXT:    bc 12, 4*cr5+lt, .LBB5_2
-; NO-FAST-P8-NEXT:  # %bb.1: # %entry
-; NO-FAST-P8-NEXT:    fmr f3, f4
-; NO-FAST-P8-NEXT:  .LBB5_2: # %entry
-; NO-FAST-P8-NEXT:    fmr f1, f3
-; NO-FAST-P8-NEXT:    blr
-;
-; NO-FAST-P9-LABEL: select_one_float:
-; NO-FAST-P9:       # %bb.0: # %entry
-; NO-FAST-P9-NEXT:    fcmpu cr0, f1, f2
-; NO-FAST-P9-NEXT:    crnor 4*cr5+lt, un, eq
-; NO-FAST-P9-NEXT:    bc 12, 4*cr5+lt, .LBB5_2
-; NO-FAST-P9-NEXT:  # %bb.1: # %entry
-; NO-FAST-P9-NEXT:    fmr f3, f4
-; NO-FAST-P9-NEXT:  .LBB5_2: # %entry
-; NO-FAST-P9-NEXT:    fmr f1, f3
-; NO-FAST-P9-NEXT:    blr
+; CHECK-P8-LABEL: select_one_float:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    fcmpu cr0, f1, f2
+; CHECK-P8-NEXT:    crnor 4*cr5+lt, un, eq
+; CHECK-P8-NEXT:    bc 12, 4*cr5+lt, .LBB5_2
+; CHECK-P8-NEXT:  # %bb.1: # %entry
+; CHECK-P8-NEXT:    fmr f3, f4
+; CHECK-P8-NEXT:  .LBB5_2: # %entry
+; CHECK-P8-NEXT:    fmr f1, f3
+; CHECK-P8-NEXT:    blr
+;
+; CHECK-P9-LABEL: select_one_float:
+; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    fcmpu cr0, f1, f2
+; CHECK-P9-NEXT:    crnor 4*cr5+lt, un, eq
+; CHECK-P9-NEXT:    bc 12, 4*cr5+lt, .LBB5_2
+; CHECK-P9-NEXT:  # %bb.1: # %entry
+; CHECK-P9-NEXT:    fmr f3, f4
+; CHECK-P9-NEXT:  .LBB5_2: # %entry
+; CHECK-P9-NEXT:    fmr f1, f3
+; CHECK-P9-NEXT:    blr
 entry:
   %cmp = fcmp one float %a, %b
   %cond = select i1 %cmp, float %c, float %d
@@ -266,43 +160,27 @@ entry:
 }
 
 define float @select_one_float_nsz(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_one_float_nsz:
-; FAST-P8:       # %bb.0: # %entry
-; FAST-P8-NEXT:    xssubsp f0, f2, f1
-; FAST-P8-NEXT:    xssubsp f1, f1, f2
-; FAST-P8-NEXT:    fsel f1, f1, f4, f3
-; FAST-P8-NEXT:    fsel f1, f0, f1, f3
-; FAST-P8-NEXT:    blr
-;
-; FAST-P9-LABEL: select_one_float_nsz:
-; FAST-P9:       # %bb.0: # %entry
-; FAST-P9-NEXT:    xssubsp f0, f2, f1
-; FAST-P9-NEXT:    xssubsp f1, f1, f2
-; FAST-P9-NEXT:    fsel f1, f1, f4, f3
-; FAST-P9-NEXT:    fsel f1, f0, f1, f3
-; FAST-P9-NEXT:    blr
-;
-; NO-FAST-P8-LABEL: select_one_float_nsz:
-; NO-FAST-P8:       # %bb.0: # %entry
-; NO-FAST-P8-NEXT:    fcmpu cr0, f1, f2
-; NO-FAST-P8-NEXT:    crnor 4*cr5+lt, un, eq
-; NO-FAST-P8-NEXT:    bc 12, 4*cr5+lt, .LBB6_2
-; NO-FAST-P8-NEXT:  # %bb.1: # %entry
-; NO-FAST-P8-NEXT:    fmr f3, f4
-; NO-FAST-P8-NEXT:  .LBB6_2: # %entry
-; NO-FAST-P8-NEXT:    fmr f1, f3
-; NO-FAST-P8-NEXT:    blr
-;
-; NO-FAST-P9-LABEL: select_one_float_nsz:
-; NO-FAST-P9:       # %bb.0: # %entry
-; NO-FAST-P9-NEXT:    fcmpu cr0, f1, f2
-; NO-FAST-P9-NEXT:    crnor 4*cr5+lt, un, eq
-; NO-FAST-P9-NEXT:    bc 12, 4*cr5+lt, .LBB6_2
-; NO-FAST-P9-NEXT:  # %bb.1: # %entry
-; NO-FAST-P9-NEXT:    fmr f3, f4
-; NO-FAST-P9-NEXT:  .LBB6_2: # %entry
-; NO-FAST-P9-NEXT:    fmr f1, f3
-; NO-FAST-P9-NEXT:    blr
+; CHECK-P8-LABEL: select_one_float_nsz:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    fcmpu cr0, f1, f2
+; CHECK-P8-NEXT:    crnor 4*cr5+lt, un, eq
+; CHECK-P8-NEXT:    bc 12, 4*cr5+lt, .LBB6_2
+; CHECK-P8-NEXT:  # %bb.1: # %entry
+; CHECK-P8-NEXT:    fmr f3, f4
+; CHECK-P8-NEXT:  .LBB6_2: # %entry
+; CHECK-P8-NEXT:    fmr f1, f3
+; CHECK-P8-NEXT:    blr
+;
+; CHECK-P9-LABEL: select_one_float_nsz:
+; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    fcmpu cr0, f1, f2
+; CHECK-P9-NEXT:    crnor 4*cr5+lt, un, eq
+; CHECK-P9-NEXT:    bc 12, 4*cr5+lt, .LBB6_2
+; CHECK-P9-NEXT:  # %bb.1: # %entry
+; CHECK-P9-NEXT:    fmr f3, f4
+; CHECK-P9-NEXT:  .LBB6_2: # %entry
+; CHECK-P9-NEXT:    fmr f1, f3
+; CHECK-P9-NEXT:    blr
 entry:
   %cmp = fcmp nsz one float %a, %b
   %cond = select i1 %cmp, float %c, float %d
@@ -310,43 +188,27 @@ entry:
 }
 
 define double @select_one_double(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: select_one_double:
-; FAST-P8:       # %bb.0: # %entry
-; FAST-P8-NEXT:    xssubdp f0, f1, f2
-; FAST-P8-NEXT:    fsel f1, f0, f4, f3
-; FAST-P8-NEXT:    xsnegdp f0, f0
-; FAST-P8-NEXT:    fsel f1, f0, f1, f3
-; FAST-P8-NEXT:    blr
-;
-; FAST-P9-LABEL: select_one_double:
-; FAST-P9:       # %bb.0: # %entry
-; FAST-P9-NEXT:    xssubdp f0, f1, f2
-; FAST-P9-NEXT:    fsel f1, f0, f4, f3
-; FAST-P9-NEXT:    xsnegdp f0, f0
-; FAST-P9-NEXT:    fsel f1, f0, f1, f3
-; FAST-P9-NEXT:    blr
-;
-; NO-FAST-P8-LABEL: select_one_double:
-; NO-FAST-P8:       # %bb.0: # %entry
-; NO-FAST-P8-NEXT:    fcmpu cr0, f1, f2
-; NO-FAST-P8-NEXT:    crnor 4*cr5+lt, un, eq
-; NO-FAST-P8-NEXT:    bc 12, 4*cr5+lt, .LBB7_2
-; NO-FAST-P8-NEXT:  # %bb.1: # %entry
-; NO-FAST-P8-NEXT:    fmr f3, f4
-; NO-FAST-P8-NEXT:  .LBB7_2: # %entry
-; NO-FAST-P8-NEXT:    fmr f1, f3
-; NO-FAST-P8-NEXT:    blr
-;
-; NO-FAST-P9-LABEL: select_one_double:
-; NO-FAST-P9:       # %bb.0: # %entry
-; NO-FAST-P9-NEXT:    fcmpu cr0, f1, f2
-; NO-FAST-P9-NEXT:    crnor 4*cr5+lt, un, eq
-; NO-FAST-P9-NEXT:    bc 12, 4*cr5+lt, .LBB7_2
-; NO-FAST-P9-NEXT:  # %bb.1: # %entry
-; NO-FAST-P9-NEXT:    fmr f3, f4
-; NO-FAST-P9-NEXT:  .LBB7_2: # %entry
-; NO-FAST-P9-NEXT:    fmr f1, f3
-; NO-FAST-P9-NEXT:    blr
+; CHECK-P8-LABEL: select_one_double:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    fcmpu cr0, f1, f2
+; CHECK-P8-NEXT:    crnor 4*cr5+lt, un, eq
+; CHECK-P8-NEXT:    bc 12, 4*cr5+lt, .LBB7_2
+; CHECK-P8-NEXT:  # %bb.1: # %entry
+; CHECK-P8-NEXT:    fmr f3, f4
+; CHECK-P8-NEXT:  .LBB7_2: # %entry
+; CHECK-P8-NEXT:    fmr f1, f3
+; CHECK-P8-NEXT:    blr
+;
+; CHECK-P9-LABEL: select_one_double:
+; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    fcmpu cr0, f1, f2
+; CHECK-P9-NEXT:    crnor 4*cr5+lt, un, eq
+; CHECK-P9-NEXT:    bc 12, 4*cr5+lt, .LBB7_2
+; CHECK-P9-NEXT:  # %bb.1: # %entry
+; CHECK-P9-NEXT:    fmr f3, f4
+; CHECK-P9-NEXT:  .LBB7_2: # %entry
+; CHECK-P9-NEXT:    fmr f1, f3
+; CHECK-P9-NEXT:    blr
 entry:
   %cmp = fcmp one double %a, %b
   %cond = select i1 %cmp, double %c, double %d
@@ -354,37 +216,21 @@ entry:
 }
 
 define float @select_fast_one_float(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_fast_one_float:
-; FAST-P8:       # %bb.0: # %entry
-; FAST-P8-NEXT:    xssubsp f0, f2, f1
-; FAST-P8-NEXT:    xssubsp f1, f1, f2
-; FAST-P8-NEXT:    fsel f1, f1, f4, f3
-; FAST-P8-NEXT:    fsel f1, f0, f1, f3
-; FAST-P8-NEXT:    blr
-;
-; FAST-P9-LABEL: select_fast_one_float:
-; FAST-P9:       # %bb.0: # %entry
-; FAST-P9-NEXT:    xssubsp f0, f2, f1
-; FAST-P9-NEXT:    xssubsp f1, f1, f2
-; FAST-P9-NEXT:    fsel f1, f1, f4, f3
-; FAST-P9-NEXT:    fsel f1, f0, f1, f3
-; FAST-P9-NEXT:    blr
-;
-; NO-FAST-P8-LABEL: select_fast_one_float:
-; NO-FAST-P8:       # %bb.0: # %entry
-; NO-FAST-P8-NEXT:    xssubsp f0, f2, f1
-; NO-FAST-P8-NEXT:    xssubsp f1, f1, f2
-; NO-FAST-P8-NEXT:    fsel f1, f1, f4, f3
-; NO-FAST-P8-NEXT:    fsel f1, f0, f1, f3
-; NO-FAST-P8-NEXT:    blr
-;
-; NO-FAST-P9-LABEL: select_fast_one_float:
-; NO-FAST-P9:       # %bb.0: # %entry
-; NO-FAST-P9-NEXT:    xssubsp f0, f2, f1
-; NO-FAST-P9-NEXT:    xssubsp f1, f1, f2
-; NO-FAST-P9-NEXT:    fsel f1, f1, f4, f3
-; NO-FAST-P9-NEXT:    fsel f1, f0, f1, f3
-; NO-FAST-P9-NEXT:    blr
+; CHECK-P8-LABEL: select_fast_one_float:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    xssubsp f0, f2, f1
+; CHECK-P8-NEXT:    xssubsp f1, f1, f2
+; CHECK-P8-NEXT:    fsel f1, f1, f4, f3
+; CHECK-P8-NEXT:    fsel f1, f0, f1, f3
+; CHECK-P8-NEXT:    blr
+;
+; CHECK-P9-LABEL: select_fast_one_float:
+; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    xssubsp f0, f2, f1
+; CHECK-P9-NEXT:    xssubsp f1, f1, f2
+; CHECK-P9-NEXT:    fsel f1, f1, f4, f3
+; CHECK-P9-NEXT:    fsel f1, f0, f1, f3
+; CHECK-P9-NEXT:    blr
 entry:
   %cmp = fcmp nnan ninf nsz one float %a, %b
   %cond = select i1 %cmp, float %c, float %d
@@ -392,37 +238,21 @@ entry:
 }
 
 define double @select_fast_one_double(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: select_fast_one_double:
-; FAST-P8:       # %bb.0: # %entry
-; FAST-P8-NEXT:    xssubdp f0, f1, f2
-; FAST-P8-NEXT:    fsel f1, f0, f4, f3
-; FAST-P8-NEXT:    xsnegdp f0, f0
-; FAST-P8-NEXT:    fsel f1, f0, f1, f3
-; FAST-P8-NEXT:    blr
-;
-; FAST-P9-LABEL: select_fast_one_double:
-; FAST-P9:       # %bb.0: # %entry
-; FAST-P9-NEXT:    xssubdp f0, f1, f2
-; FAST-P9-NEXT:    fsel f1, f0, f4, f3
-; FAST-P9-NEXT:    xsnegdp f0, f0
-; FAST-P9-NEXT:    fsel f1, f0, f1, f3
-; FAST-P9-NEXT:    blr
-;
-; NO-FAST-P8-LABEL: select_fast_one_double:
-; NO-FAST-P8:       # %bb.0: # %entry
-; NO-FAST-P8-NEXT:    xssubdp f0, f1, f2
-; NO-FAST-P8-NEXT:    fsel f1, f0, f4, f3
-; NO-FAST-P8-NEXT:    xsnegdp f0, f0
-; NO-FAST-P8-NEXT:    fsel f1, f0, f1, f3
-; NO-FAST-P8-NEXT:    blr
-;
-; NO-FAST-P9-LABEL: select_fast_one_double:
-; NO-FAST-P9:       # %bb.0: # %entry
-; NO-FAST-P9-NEXT:    xssubdp f0, f1, f2
-; NO-FAST-P9-NEXT:    fsel f1, f0, f4, f3
-; NO-FAST-P9-NEXT:    xsnegdp f0, f0
-; NO-FAST-P9-NEXT:    fsel f1, f0, f1, f3
-; NO-FAST-P9-NEXT:    blr
+; CHECK-P8-LABEL: select_fast_one_double:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    xssubdp f0, f1, f2
+; CHECK-P8-NEXT:    fsel f1, f0, f4, f3
+; CHECK-P8-NEXT:    xsnegdp f0, f0
+; CHECK-P8-NEXT:    fsel f1, f0, f1, f3
+; CHECK-P8-NEXT:    blr
+;
+; CHECK-P9-LABEL: select_fast_one_double:
+; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    xssubdp f0, f1, f2
+; CHECK-P9-NEXT:    fsel f1, f0, f4, f3
+; CHECK-P9-NEXT:    xsnegdp f0, f0
+; CHECK-P9-NEXT:    fsel f1, f0, f1, f3
+; CHECK-P9-NEXT:    blr
 entry:
   %cmp = fcmp nnan ninf nsz one double %a, %b
   %cond = select i1 %cmp, double %c, double %d
@@ -432,39 +262,27 @@ entry:
 
 ; Test oge
 define float @select_oge_float(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_oge_float:
-; FAST-P8:       # %bb.0: # %entry
-; FAST-P8-NEXT:    xssubsp f0, f1, f2
-; FAST-P8-NEXT:    fsel f1, f0, f3, f4
-; FAST-P8-NEXT:    blr
-;
-; FAST-P9-LABEL: select_oge_float:
-; FAST-P9:       # %bb.0: # %entry
-; FAST-P9-NEXT:    xssubsp f0, f1, f2
-; FAST-P9-NEXT:    fsel f1, f0, f3, f4
-; FAST-P9-NEXT:    blr
-;
-; NO-FAST-P8-LABEL: select_oge_float:
-; NO-FAST-P8:       # %bb.0: # %entry
-; NO-FAST-P8-NEXT:    fcmpu cr0, f1, f2
-; NO-FAST-P8-NEXT:    crnor 4*cr5+lt, un, lt
-; NO-FAST-P8-NEXT:    bc 12, 4*cr5+lt, .LBB10_2
-; NO-FAST-P8-NEXT:  # %bb.1: # %entry
-; NO-FAST-P8-NEXT:    fmr f3, f4
-; NO-FAST-P8-NEXT:  .LBB10_2: # %entry
-; NO-FAST-P8-NEXT:    fmr f1, f3
-; NO-FAST-P8-NEXT:    blr
-;
-; NO-FAST-P9-LABEL: select_oge_float:
-; NO-FAST-P9:       # %bb.0: # %entry
-; NO-FAST-P9-NEXT:    fcmpu cr0, f1, f2
-; NO-FAST-P9-NEXT:    crnor 4*cr5+lt, un, lt
-; NO-FAST-P9-NEXT:    bc 12, 4*cr5+lt, .LBB10_2
-; NO-FAST-P9-NEXT:  # %bb.1: # %entry
-; NO-FAST-P9-NEXT:    fmr f3, f4
-; NO-FAST-P9-NEXT:  .LBB10_2: # %entry
-; NO-FAST-P9-NEXT:    fmr f1, f3
-; NO-FAST-P9-NEXT:    blr
+; CHECK-P8-LABEL: select_oge_float:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    fcmpu cr0, f1, f2
+; CHECK-P8-NEXT:    crnor 4*cr5+lt, un, lt
+; CHECK-P8-NEXT:    bc 12, 4*cr5+lt, .LBB10_2
+; CHECK-P8-NEXT:  # %bb.1: # %entry
+; CHECK-P8-NEXT:    fmr f3, f4
+; CHECK-P8-NEXT:  .LBB10_2: # %entry
+; CHECK-P8-NEXT:    fmr f1, f3
+; CHECK-P8-NEXT:    blr
+;
+; CHECK-P9-LABEL: select_oge_float:
+; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    fcmpu cr0, f1, f2
+; CHECK-P9-NEXT:    crnor 4*cr5+lt, un, lt
+; CHECK-P9-NEXT:    bc 12, 4*cr5+lt, .LBB10_2
+; CHECK-P9-NEXT:  # %bb.1: # %entry
+; CHECK-P9-NEXT:    fmr f3, f4
+; CHECK-P9-NEXT:  .LBB10_2: # %entry
+; CHECK-P9-NEXT:    fmr f1, f3
+; CHECK-P9-NEXT:    blr
 entry:
   %cmp = fcmp oge float %a, %b
   %cond = select i1 %cmp, float %c, float %d
@@ -472,39 +290,27 @@ entry:
 }
 
 define double @select_oge_double(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: select_oge_double:
-; FAST-P8:       # %bb.0: # %entry
-; FAST-P8-NEXT:    xssubdp f0, f1, f2
-; FAST-P8-NEXT:    fsel f1, f0, f3, f4
-; FAST-P8-NEXT:    blr
-;
-; FAST-P9-LABEL: select_oge_double:
-; FAST-P9:       # %bb.0: # %entry
-; FAST-P9-NEXT:    xssubdp f0, f1, f2
-; FAST-P9-NEXT:    fsel f1, f0, f3, f4
-; FAST-P9-NEXT:    blr
-;
-; NO-FAST-P8-LABEL: select_oge_double:
-; NO-FAST-P8:       # %bb.0: # %entry
-; NO-FAST-P8-NEXT:    fcmpu cr0, f1, f2
-; NO-FAST-P8-NEXT:    crnor 4*cr5+lt, un, lt
-; NO-FAST-P8-NEXT:    bc 12, 4*cr5+lt, .LBB11_2
-; NO-FAST-P8-NEXT:  # %bb.1: # %entry
-; NO-FAST-P8-NEXT:    fmr f3, f4
-; NO-FAST-P8-NEXT:  .LBB11_2: # %entry
-; NO-FAST-P8-NEXT:    fmr f1, f3
-; NO-FAST-P8-NEXT:    blr
-;
-; NO-FAST-P9-LABEL: select_oge_double:
-; NO-FAST-P9:       # %bb.0: # %entry
-; NO-FAST-P9-NEXT:    fcmpu cr0, f1, f2
-; NO-FAST-P9-NEXT:    crnor 4*cr5+lt, un, lt
-; NO-FAST-P9-NEXT:    bc 12, 4*cr5+lt, .LBB11_2
-; NO-FAST-P9-NEXT:  # %bb.1: # %entry
-; NO-FAST-P9-NEXT:    fmr f3, f4
-; NO-FAST-P9-NEXT:  .LBB11_2: # %entry
-; NO-FAST-P9-NEXT:    fmr f1, f3
-; NO-FAST-P9-NEXT:    blr
+; CHECK-P8-LABEL: select_oge_double:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    fcmpu cr0, f1, f2
+; CHECK-P8-NEXT:    crnor 4*cr5+lt, un, lt
+; CHECK-P8-NEXT:    bc 12, 4*cr5+lt, .LBB11_2
+; CHECK-P8-NEXT:  # %bb.1: # %entry
+; CHECK-P8-NEXT:    fmr f3, f4
+; CHECK-P8-NEXT:  .LBB11_2: # %entry
+; CHECK-P8-NEXT:    fmr f1, f3
+; CHECK-P8-NEXT:    blr
+;
+; CHECK-P9-LABEL: select_oge_double:
+; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    fcmpu cr0, f1, f2
+; CHECK-P9-NEXT:    crnor 4*cr5+lt, un, lt
+; CHECK-P9-NEXT:    bc 12, 4*cr5+lt, .LBB11_2
+; CHECK-P9-NEXT:  # %bb.1: # %entry
+; CHECK-P9-NEXT:    fmr f3, f4
+; CHECK-P9-NEXT:  .LBB11_2: # %entry
+; CHECK-P9-NEXT:    fmr f1, f3
+; CHECK-P9-NEXT:    blr
 entry:
   %cmp = fcmp oge double %a, %b
   %cond = select i1 %cmp, double %c, double %d
@@ -512,29 +318,17 @@ entry:
 }
 
 define float @select_fast_oge_float(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_fast_oge_float:
-; FAST-P8:       # %bb.0: # %entry
-; FAST-P8-NEXT:    xssubsp f0, f1, f2
-; FAST-P8-NEXT:    fsel f1, f0, f3, f4
-; FAST-P8-NEXT:    blr
-;
-; FAST-P9-LABEL: select_fast_oge_float:
-; FAST-P9:       # %bb.0: # %entry
-; FAST-P9-NEXT:    xssubsp f0, f1, f2
-; FAST-P9-NEXT:    fsel f1, f0, f3, f4
-; FAST-P9-NEXT:    blr
-;
-; NO-FAST-P8-LABEL: select_fast_oge_float:
-; NO-FAST-P8:       # %bb.0: # %entry
-; NO-FAST-P8-NEXT:    xssubsp f0, f1, f2
-; NO-FAST-P8-NEXT:    fsel f1, f0, f3, f4
-; NO-FAST-P8-NEXT:    blr
-;
-; NO-FAST-P9-LABEL: select_fast_oge_float:
-; NO-FAST-P9:       # %bb.0: # %entry
-; NO-FAST-P9-NEXT:    xssubsp f0, f1, f2
-; NO-FAST-P9-NEXT:    fsel f1, f0, f3, f4
-; NO-FAST-P9-NEXT:    blr
+; CHECK-P8-LABEL: select_fast_oge_float:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    xssubsp f0, f1, f2
+; CHECK-P8-NEXT:    fsel f1, f0, f3, f4
+; CHECK-P8-NEXT:    blr
+;
+; CHECK-P9-LABEL: select_fast_oge_float:
+; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    xssubsp f0, f1, f2
+; CHECK-P9-NEXT:    fsel f1, f0, f3, f4
+; CHECK-P9-NEXT:    blr
 entry:
   %cmp = fcmp nnan ninf nsz oge float %a, %b
   %cond = select i1 %cmp, float %c, float %d
@@ -542,29 +336,17 @@ entry:
 }
 
 define double @select_fast_oge_double(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: select_fast_oge_double:
-; FAST-P8:       # %bb.0: # %entry
-; FAST-P8-NEXT:    xssubdp f0, f1, f2
-; FAST-P8-NEXT:    fsel f1, f0, f3, f4
-; FAST-P8-NEXT:    blr
-;
-; FAST-P9-LABEL: select_fast_oge_double:
-; FAST-P9:       # %bb.0: # %entry
-; FAST-P9-NEXT:    xssubdp f0, f1, f2
-; FAST-P9-NEXT:    fsel f1, f0, f3, f4
-; FAST-P9-NEXT:    blr
-;
-; NO-FAST-P8-LABEL: select_fast_oge_double:
-; NO-FAST-P8:       # %bb.0: # %entry
-; NO-FAST-P8-NEXT:    xssubdp f0, f1, f2
-; NO-FAST-P8-NEXT:    fsel f1, f0, f3, f4
-; NO-FAST-P8-NEXT:    blr
-;
-; NO-FAST-P9-LABEL: select_fast_oge_double:
-; NO-FAST-P9:       # %bb.0: # %entry
-; NO-FAST-P9-NEXT:    xssubdp f0, f1, f2
-; NO-FAST-P9-NEXT:    fsel f1, f0, f3, f4
-; NO-FAST-P9-NEXT:    blr
+; CHECK-P8-LABEL: select_fast_oge_double:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    xssubdp f0, f1, f2
+; CHECK-P8-NEXT:    fsel f1, f0, f3, f4
+; CHECK-P8-NEXT:    blr
+;
+; CHECK-P9-LABEL: select_fast_oge_double:
+; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    xssubdp f0, f1, f2
+; CHECK-P9-NEXT:    fsel f1, f0, f3, f4
+; CHECK-P9-NEXT:    blr
 entry:
   %cmp = fcmp nnan ninf nsz oge double %a, %b
   %cond = select i1 %cmp, double %c, double %d
@@ -574,37 +356,25 @@ entry:
 
 ; Test olt
 define float @select_olt_float(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_olt_float:
-; FAST-P8:       # %bb.0: # %entry
-; FAST-P8-NEXT:    xssubsp f0, f1, f2
-; FAST-P8-NEXT:    fsel f1, f0, f4, f3
-; FAST-P8-NEXT:    blr
-;
-; FAST-P9-LABEL: select_olt_float:
-; FAST-P9:       # %bb.0: # %entry
-; FAST-P9-NEXT:    xssubsp f0, f1, f2
-; FAST-P9-NEXT:    fsel f1, f0, f4, f3
-; FAST-P9-NEXT:    blr
-;
-; NO-FAST-P8-LABEL: select_olt_float:
-; NO-FAST-P8:       # %bb.0: # %entry
-; NO-FAST-P8-NEXT:    fcmpu cr0, f1, f2
-; NO-FAST-P8-NEXT:    blt cr0, .LBB14_2
-; NO-FAST-P8-NEXT:  # %bb.1: # %entry
-; NO-FAST-P8-NEXT:    fmr f3, f4
-; NO-FAST-P8-NEXT:  .LBB14_2: # %entry
-; NO-FAST-P8-NEXT:    fmr f1, f3
-; NO-FAST-P8-NEXT:    blr
-;
-; NO-FAST-P9-LABEL: select_olt_float:
-; NO-FAST-P9:       # %bb.0: # %entry
-; NO-FAST-P9-NEXT:    fcmpu cr0, f1, f2
-; NO-FAST-P9-NEXT:    blt cr0, .LBB14_2
-; NO-FAST-P9-NEXT:  # %bb.1: # %entry
-; NO-FAST-P9-NEXT:    fmr f3, f4
-; NO-FAST-P9-NEXT:  .LBB14_2: # %entry
-; NO-FAST-P9-NEXT:    fmr f1, f3
-; NO-FAST-P9-NEXT:    blr
+; CHECK-P8-LABEL: select_olt_float:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    fcmpu cr0, f1, f2
+; CHECK-P8-NEXT:    blt cr0, .LBB14_2
+; CHECK-P8-NEXT:  # %bb.1: # %entry
+; CHECK-P8-NEXT:    fmr f3, f4
+; CHECK-P8-NEXT:  .LBB14_2: # %entry
+; CHECK-P8-NEXT:    fmr f1, f3
+; CHECK-P8-NEXT:    blr
+;
+; CHECK-P9-LABEL: select_olt_float:
+; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    fcmpu cr0, f1, f2
+; CHECK-P9-NEXT:    blt cr0, .LBB14_2
+; CHECK-P9-NEXT:  # %bb.1: # %entry
+; CHECK-P9-NEXT:    fmr f3, f4
+; CHECK-P9-NEXT:  .LBB14_2: # %entry
+; CHECK-P9-NEXT:    fmr f1, f3
+; CHECK-P9-NEXT:    blr
 entry:
   %cmp = fcmp olt float %a, %b
   %cond = select i1 %cmp, float %c, float %d
@@ -612,37 +382,25 @@ entry:
 }
 
 define double @select_olt_double(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: select_olt_double:
-; FAST-P8:       # %bb.0: # %entry
-; FAST-P8-NEXT:    xssubdp f0, f1, f2
-; FAST-P8-NEXT:    fsel f1, f0, f4, f3
-; FAST-P8-NEXT:    blr
-;
-; FAST-P9-LABEL: select_olt_double:
-; FAST-P9:       # %bb.0: # %entry
-; FAST-P9-NEXT:    xssubdp f0, f1, f2
-; FAST-P9-NEXT:    fsel f1, f0, f4, f3
-; FAST-P9-NEXT:    blr
-;
-; NO-FAST-P8-LABEL: select_olt_double:
-; NO-FAST-P8:       # %bb.0: # %entry
-; NO-FAST-P8-NEXT:    xscmpudp cr0, f1, f2
-; NO-FAST-P8-NEXT:    blt cr0, .LBB15_2
-; NO-FAST-P8-NEXT:  # %bb.1: # %entry
-; NO-FAST-P8-NEXT:    fmr f3, f4
-; NO-FAST-P8-NEXT:  .LBB15_2: # %entry
-; NO-FAST-P8-NEXT:    fmr f1, f3
-; NO-FAST-P8-NEXT:    blr
-;
-; NO-FAST-P9-LABEL: select_olt_double:
-; NO-FAST-P9:       # %bb.0: # %entry
-; NO-FAST-P9-NEXT:    xscmpudp cr0, f1, f2
-; NO-FAST-P9-NEXT:    blt cr0, .LBB15_2
-; NO-FAST-P9-NEXT:  # %bb.1: # %entry
-; NO-FAST-P9-NEXT:    fmr f3, f4
-; NO-FAST-P9-NEXT:  .LBB15_2: # %entry
-; NO-FAST-P9-NEXT:    fmr f1, f3
-; NO-FAST-P9-NEXT:    blr
+; CHECK-P8-LABEL: select_olt_double:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    xscmpudp cr0, f1, f2
+; CHECK-P8-NEXT:    blt cr0, .LBB15_2
+; CHECK-P8-NEXT:  # %bb.1: # %entry
+; CHECK-P8-NEXT:    fmr f3, f4
+; CHECK-P8-NEXT:  .LBB15_2: # %entry
+; CHECK-P8-NEXT:    fmr f1, f3
+; CHECK-P8-NEXT:    blr
+;
+; CHECK-P9-LABEL: select_olt_double:
+; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    xscmpudp cr0, f1, f2
+; CHECK-P9-NEXT:    blt cr0, .LBB15_2
+; CHECK-P9-NEXT:  # %bb.1: # %entry
+; CHECK-P9-NEXT:    fmr f3, f4
+; CHECK-P9-NEXT:  .LBB15_2: # %entry
+; CHECK-P9-NEXT:    fmr f1, f3
+; CHECK-P9-NEXT:    blr
 entry:
   %cmp = fcmp olt double %a, %b
   %cond = select i1 %cmp, double %c, double %d
@@ -650,29 +408,17 @@ entry:
 }
 
 define float @select_fast_olt_float(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_fast_olt_float:
-; FAST-P8:       # %bb.0: # %entry
-; FAST-P8-NEXT:    xssubsp f0, f1, f2
-; FAST-P8-NEXT:    fsel f1, f0, f4, f3
-; FAST-P8-NEXT:    blr
-;
-; FAST-P9-LABEL: select_fast_olt_float:
-; FAST-P9:       # %bb.0: # %entry
-; FAST-P9-NEXT:    xssubsp f0, f1, f2
-; FAST-P9-NEXT:    fsel f1, f0, f4, f3
-; FAST-P9-NEXT:    blr
-;
-; NO-FAST-P8-LABEL: select_fast_olt_float:
-; NO-FAST-P8:       # %bb.0: # %entry
-; NO-FAST-P8-NEXT:    xssubsp f0, f1, f2
-; NO-FAST-P8-NEXT:    fsel f1, f0, f4, f3
-; NO-FAST-P8-NEXT:    blr
-;
-; NO-FAST-P9-LABEL: select_fast_olt_float:
-; NO-FAST-P9:       # %bb.0: # %entry
-; NO-FAST-P9-NEXT:    xssubsp f0, f1, f2
-; NO-FAST-P9-NEXT:    fsel f1, f0, f4, f3
-; NO-FAST-P9-NEXT:    blr
+; CHECK-P8-LABEL: select_fast_olt_float:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    xssubsp f0, f1, f2
+; CHECK-P8-NEXT:    fsel f1, f0, f4, f3
+; CHECK-P8-NEXT:    blr
+;
+; CHECK-P9-LABEL: select_fast_olt_float:
+; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    xssubsp f0, f1, f2
+; CHECK-P9-NEXT:    fsel f1, f0, f4, f3
+; CHECK-P9-NEXT:    blr
 entry:
   %cmp = fcmp ninf nnan nsz olt float %a, %b
   %cond = select i1 %cmp, float %c, float %d
@@ -680,29 +426,17 @@ entry:
 }
 
 define double @select_fast_olt_double(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: select_fast_olt_double:
-; FAST-P8:       # %bb.0: # %entry
-; FAST-P8-NEXT:    xssubdp f0, f1, f2
-; FAST-P8-NEXT:    fsel f1, f0, f4, f3
-; FAST-P8-NEXT:    blr
-;
-; FAST-P9-LABEL: select_fast_olt_double:
-; FAST-P9:       # %bb.0: # %entry
-; FAST-P9-NEXT:    xssubdp f0, f1, f2
-; FAST-P9-NEXT:    fsel f1, f0, f4, f3
-; FAST-P9-NEXT:    blr
-;
-; NO-FAST-P8-LABEL: select_fast_olt_double:
-; NO-FAST-P8:       # %bb.0: # %entry
-; NO-FAST-P8-NEXT:    xssubdp f0, f1, f2
-; NO-FAST-P8-NEXT:    fsel f1, f0, f4, f3
-; NO-FAST-P8-NEXT:    blr
-;
-; NO-FAST-P9-LABEL: select_fast_olt_double:
-; NO-FAST-P9:       # %bb.0: # %entry
-; NO-FAST-P9-NEXT:    xssubdp f0, f1, f2
-; NO-FAST-P9-NEXT:    fsel f1, f0, f4, f3
-; NO-FAST-P9-NEXT:    blr
+; CHECK-P8-LABEL: select_fast_olt_double:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    xssubdp f0, f1, f2
+; CHECK-P8-NEXT:    fsel f1, f0, f4, f3
+; CHECK-P8-NEXT:    blr
+;
+; CHECK-P9-LABEL: select_fast_olt_double:
+; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    xssubdp f0, f1, f2
+; CHECK-P9-NEXT:    fsel f1, f0, f4, f3
+; CHECK-P9-NEXT:    blr
 entry:
   %cmp = fcmp nnan ninf nsz olt double %a, %b
   %cond = select i1 %cmp, double %c, double %d
@@ -712,37 +446,25 @@ entry:
 
 ; Test ogt
 define float @select_ogt_float(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_ogt_float:
-; FAST-P8:       # %bb.0: # %entry
-; FAST-P8-NEXT:    xssubsp f0, f2, f1
-; FAST-P8-NEXT:    fsel f1, f0, f4, f3
-; FAST-P8-NEXT:    blr
-;
-; FAST-P9-LABEL: select_ogt_float:
-; FAST-P9:       # %bb.0: # %entry
-; FAST-P9-NEXT:    xssubsp f0, f2, f1
-; FAST-P9-NEXT:    fsel f1, f0, f4, f3
-; FAST-P9-NEXT:    blr
-;
-; NO-FAST-P8-LABEL: select_ogt_float:
-; NO-FAST-P8:       # %bb.0: # %entry
-; NO-FAST-P8-NEXT:    fcmpu cr0, f1, f2
-; NO-FAST-P8-NEXT:    bgt cr0, .LBB18_2
-; NO-FAST-P8-NEXT:  # %bb.1: # %entry
-; NO-FAST-P8-NEXT:    fmr f3, f4
-; NO-FAST-P8-NEXT:  .LBB18_2: # %entry
-; NO-FAST-P8-NEXT:    fmr f1, f3
-; NO-FAST-P8-NEXT:    blr
-;
-; NO-FAST-P9-LABEL: select_ogt_float:
-; NO-FAST-P9:       # %bb.0: # %entry
-; NO-FAST-P9-NEXT:    fcmpu cr0, f1, f2
-; NO-FAST-P9-NEXT:    bgt cr0, .LBB18_2
-; NO-FAST-P9-NEXT:  # %bb.1: # %entry
-; NO-FAST-P9-NEXT:    fmr f3, f4
-; NO-FAST-P9-NEXT:  .LBB18_2: # %entry
-; NO-FAST-P9-NEXT:    fmr f1, f3
-; NO-FAST-P9-NEXT:    blr
+; CHECK-P8-LABEL: select_ogt_float:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    fcmpu cr0, f1, f2
+; CHECK-P8-NEXT:    bgt cr0, .LBB18_2
+; CHECK-P8-NEXT:  # %bb.1: # %entry
+; CHECK-P8-NEXT:    fmr f3, f4
+; CHECK-P8-NEXT:  .LBB18_2: # %entry
+; CHECK-P8-NEXT:    fmr f1, f3
+; CHECK-P8-NEXT:    blr
+;
+; CHECK-P9-LABEL: select_ogt_float:
+; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    fcmpu cr0, f1, f2
+; CHECK-P9-NEXT:    bgt cr0, .LBB18_2
+; CHECK-P9-NEXT:  # %bb.1: # %entry
+; CHECK-P9-NEXT:    fmr f3, f4
+; CHECK-P9-NEXT:  .LBB18_2: # %entry
+; CHECK-P9-NEXT:    fmr f1, f3
+; CHECK-P9-NEXT:    blr
 entry:
   %cmp = fcmp ogt float %a, %b
   %cond = select i1 %cmp, float %c, float %d
@@ -750,37 +472,25 @@ entry:
 }
 
 define double @select_ogt_double(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: select_ogt_double:
-; FAST-P8:       # %bb.0: # %entry
-; FAST-P8-NEXT:    xssubdp f0, f2, f1
-; FAST-P8-NEXT:    fsel f1, f0, f4, f3
-; FAST-P8-NEXT:    blr
-;
-; FAST-P9-LABEL: select_ogt_double:
-; FAST-P9:       # %bb.0: # %entry
-; FAST-P9-NEXT:    xssubdp f0, f2, f1
-; FAST-P9-NEXT:    fsel f1, f0, f4, f3
-; FAST-P9-NEXT:    blr
-;
-; NO-FAST-P8-LABEL: select_ogt_double:
-; NO-FAST-P8:       # %bb.0: # %entry
-; NO-FAST-P8-NEXT:    xscmpudp cr0, f1, f2
-; NO-FAST-P8-NEXT:    bgt cr0, .LBB19_2
-; NO-FAST-P8-NEXT:  # %bb.1: # %entry
-; NO-FAST-P8-NEXT:    fmr f3, f4
-; NO-FAST-P8-NEXT:  .LBB19_2: # %entry
-; NO-FAST-P8-NEXT:    fmr f1, f3
-; NO-FAST-P8-NEXT:    blr
-;
-; NO-FAST-P9-LABEL: select_ogt_double:
-; NO-FAST-P9:       # %bb.0: # %entry
-; NO-FAST-P9-NEXT:    xscmpudp cr0, f1, f2
-; NO-FAST-P9-NEXT:    bgt cr0, .LBB19_2
-; NO-FAST-P9-NEXT:  # %bb.1: # %entry
-; NO-FAST-P9-NEXT:    fmr f3, f4
-; NO-FAST-P9-NEXT:  .LBB19_2: # %entry
-; NO-FAST-P9-NEXT:    fmr f1, f3
-; NO-FAST-P9-NEXT:    blr
+; CHECK-P8-LABEL: select_ogt_double:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    xscmpudp cr0, f1, f2
+; CHECK-P8-NEXT:    bgt cr0, .LBB19_2
+; CHECK-P8-NEXT:  # %bb.1: # %entry
+; CHECK-P8-NEXT:    fmr f3, f4
+; CHECK-P8-NEXT:  .LBB19_2: # %entry
+; CHECK-P8-NEXT:    fmr f1, f3
+; CHECK-P8-NEXT:    blr
+;
+; CHECK-P9-LABEL: select_ogt_double:
+; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    xscmpudp cr0, f1, f2
+; CHECK-P9-NEXT:    bgt cr0, .LBB19_2
+; CHECK-P9-NEXT:  # %bb.1: # %entry
+; CHECK-P9-NEXT:    fmr f3, f4
+; CHECK-P9-NEXT:  .LBB19_2: # %entry
+; CHECK-P9-NEXT:    fmr f1, f3
+; CHECK-P9-NEXT:    blr
 entry:
   %cmp = fcmp ogt double %a, %b
   %cond = select i1 %cmp, double %c, double %d
@@ -788,29 +498,17 @@ entry:
 }
 
 define float @select_fast_ogt_float(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_fast_ogt_float:
-; FAST-P8:       # %bb.0: # %entry
-; FAST-P8-NEXT:    xssubsp f0, f2, f1
-; FAST-P8-NEXT:    fsel f1, f0, f4, f3
-; FAST-P8-NEXT:    blr
-;
-; FAST-P9-LABEL: select_fast_ogt_float:
-; FAST-P9:       # %bb.0: # %entry
-; FAST-P9-NEXT:    xssubsp f0, f2, f1
-; FAST-P9-NEXT:    fsel f1, f0, f4, f3
-; FAST-P9-NEXT:    blr
-;
-; NO-FAST-P8-LABEL: select_fast_ogt_float:
-; NO-FAST-P8:       # %bb.0: # %entry
-; NO-FAST-P8-NEXT:    xssubsp f0, f2, f1
-; NO-FAST-P8-NEXT:    fsel f1, f0, f4, f3
-; NO-FAST-P8-NEXT:    blr
-;
-; NO-FAST-P9-LABEL: select_fast_ogt_float:
-; NO-FAST-P9:       # %bb.0: # %entry
-; NO-FAST-P9-NEXT:    xssubsp f0, f2, f1
-; NO-FAST-P9-NEXT:    fsel f1, f0, f4, f3
-; NO-FAST-P9-NEXT:    blr
+; CHECK-P8-LABEL: select_fast_ogt_float:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    xssubsp f0, f2, f1
+; CHECK-P8-NEXT:    fsel f1, f0, f4, f3
+; CHECK-P8-NEXT:    blr
+;
+; CHECK-P9-LABEL: select_fast_ogt_float:
+; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    xssubsp f0, f2, f1
+; CHECK-P9-NEXT:    fsel f1, f0, f4, f3
+; CHECK-P9-NEXT:    blr
 entry:
   %cmp = fcmp nnan ninf nsz ogt float %a, %b
   %cond = select i1 %cmp, float %c, float %d
@@ -818,29 +516,17 @@ entry:
 }
 
 define double @select_fast_ogt_double(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: select_fast_ogt_double:
-; FAST-P8:       # %bb.0: # %entry
-; FAST-P8-NEXT:    xssubdp f0, f2, f1
-; FAST-P8-NEXT:    fsel f1, f0, f4, f3
-; FAST-P8-NEXT:    blr
-;
-; FAST-P9-LABEL: select_fast_ogt_double:
-; FAST-P9:       # %bb.0: # %entry
-; FAST-P9-NEXT:    xssubdp f0, f2, f1
-; FAST-P9-NEXT:    fsel f1, f0, f4, f3
-; FAST-P9-NEXT:    blr
-;
-; NO-FAST-P8-LABEL: select_fast_ogt_double:
-; NO-FAST-P8:       # %bb.0: # %entry
-; NO-FAST-P8-NEXT:    xssubdp f0, f2, f1
-; NO-FAST-P8-NEXT:    fsel f1, f0, f4, f3
-; NO-FAST-P8-NEXT:    blr
-;
-; NO-FAST-P9-LABEL: select_fast_ogt_double:
-; NO-FAST-P9:       # %bb.0: # %entry
-; NO-FAST-P9-NEXT:    xssubdp f0, f2, f1
-; NO-FAST-P9-NEXT:    fsel f1, f0, f4, f3
-; NO-FAST-P9-NEXT:    blr
+; CHECK-P8-LABEL: select_fast_ogt_double:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    xssubdp f0, f2, f1
+; CHECK-P8-NEXT:    fsel f1, f0, f4, f3
+; CHECK-P8-NEXT:    blr
+;
+; CHECK-P9-LABEL: select_fast_ogt_double:
+; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    xssubdp f0, f2, f1
+; CHECK-P9-NEXT:    fsel f1, f0, f4, f3
+; CHECK-P9-NEXT:    blr
 entry:
   %cmp = fcmp nnan ninf nsz ogt double %a, %b
   %cond = select i1 %cmp, double %c, double %d
@@ -850,39 +536,27 @@ entry:
 
 ; Test ole
 define float @select_ole_float(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_ole_float:
-; FAST-P8:       # %bb.0: # %entry
-; FAST-P8-NEXT:    xssubsp f0, f2, f1
-; FAST-P8-NEXT:    fsel f1, f0, f3, f4
-; FAST-P8-NEXT:    blr
-;
-; FAST-P9-LABEL: select_ole_float:
-; FAST-P9:       # %bb.0: # %entry
-; FAST-P9-NEXT:    xssubsp f0, f2, f1
-; FAST-P9-NEXT:    fsel f1, f0, f3, f4
-; FAST-P9-NEXT:    blr
-;
-; NO-FAST-P8-LABEL: select_ole_float:
-; NO-FAST-P8:       # %bb.0: # %entry
-; NO-FAST-P8-NEXT:    fcmpu cr0, f1, f2
-; NO-FAST-P8-NEXT:    crnor 4*cr5+lt, un, gt
-; NO-FAST-P8-NEXT:    bc 12, 4*cr5+lt, .LBB22_2
-; NO-FAST-P8-NEXT:  # %bb.1: # %entry
-; NO-FAST-P8-NEXT:    fmr f3, f4
-; NO-FAST-P8-NEXT:  .LBB22_2: # %entry
-; NO-FAST-P8-NEXT:    fmr f1, f3
-; NO-FAST-P8-NEXT:    blr
-;
-; NO-FAST-P9-LABEL: select_ole_float:
-; NO-FAST-P9:       # %bb.0: # %entry
-; NO-FAST-P9-NEXT:    fcmpu cr0, f1, f2
-; NO-FAST-P9-NEXT:    crnor 4*cr5+lt, un, gt
-; NO-FAST-P9-NEXT:    bc 12, 4*cr5+lt, .LBB22_2
-; NO-FAST-P9-NEXT:  # %bb.1: # %entry
-; NO-FAST-P9-NEXT:    fmr f3, f4
-; NO-FAST-P9-NEXT:  .LBB22_2: # %entry
-; NO-FAST-P9-NEXT:    fmr f1, f3
-; NO-FAST-P9-NEXT:    blr
+; CHECK-P8-LABEL: select_ole_float:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    fcmpu cr0, f1, f2
+; CHECK-P8-NEXT:    crnor 4*cr5+lt, un, gt
+; CHECK-P8-NEXT:    bc 12, 4*cr5+lt, .LBB22_2
+; CHECK-P8-NEXT:  # %bb.1: # %entry
+; CHECK-P8-NEXT:    fmr f3, f4
+; CHECK-P8-NEXT:  .LBB22_2: # %entry
+; CHECK-P8-NEXT:    fmr f1, f3
+; CHECK-P8-NEXT:    blr
+;
+; CHECK-P9-LABEL: select_ole_float:
+; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    fcmpu cr0, f1, f2
+; CHECK-P9-NEXT:    crnor 4*cr5+lt, un, gt
+; CHECK-P9-NEXT:    bc 12, 4*cr5+lt, .LBB22_2
+; CHECK-P9-NEXT:  # %bb.1: # %entry
+; CHECK-P9-NEXT:    fmr f3, f4
+; CHECK-P9-NEXT:  .LBB22_2: # %entry
+; CHECK-P9-NEXT:    fmr f1, f3
+; CHECK-P9-NEXT:    blr
 entry:
   %cmp = fcmp ole float %a, %b
   %cond = select i1 %cmp, float %c, float %d
@@ -890,39 +564,27 @@ entry:
 }
 
 define double @select_ole_double(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: select_ole_double:
-; FAST-P8:       # %bb.0: # %entry
-; FAST-P8-NEXT:    xssubdp f0, f2, f1
-; FAST-P8-NEXT:    fsel f1, f0, f3, f4
-; FAST-P8-NEXT:    blr
-;
-; FAST-P9-LABEL: select_ole_double:
-; FAST-P9:       # %bb.0: # %entry
-; FAST-P9-NEXT:    xssubdp f0, f2, f1
-; FAST-P9-NEXT:    fsel f1, f0, f3, f4
-; FAST-P9-NEXT:    blr
-;
-; NO-FAST-P8-LABEL: select_ole_double:
-; NO-FAST-P8:       # %bb.0: # %entry
-; NO-FAST-P8-NEXT:    fcmpu cr0, f1, f2
-; NO-FAST-P8-NEXT:    crnor 4*cr5+lt, un, gt
-; NO-FAST-P8-NEXT:    bc 12, 4*cr5+lt, .LBB23_2
-; NO-FAST-P8-NEXT:  # %bb.1: # %entry
-; NO-FAST-P8-NEXT:    fmr f3, f4
-; NO-FAST-P8-NEXT:  .LBB23_2: # %entry
-; NO-FAST-P8-NEXT:    fmr f1, f3
-; NO-FAST-P8-NEXT:    blr
-;
-; NO-FAST-P9-LABEL: select_ole_double:
-; NO-FAST-P9:       # %bb.0: # %entry
-; NO-FAST-P9-NEXT:    fcmpu cr0, f1, f2
-; NO-FAST-P9-NEXT:    crnor 4*cr5+lt, un, gt
-; NO-FAST-P9-NEXT:    bc 12, 4*cr5+lt, .LBB23_2
-; NO-FAST-P9-NEXT:  # %bb.1: # %entry
-; NO-FAST-P9-NEXT:    fmr f3, f4
-; NO-FAST-P9-NEXT:  .LBB23_2: # %entry
-; NO-FAST-P9-NEXT:    fmr f1, f3
-; NO-FAST-P9-NEXT:    blr
+; CHECK-P8-LABEL: select_ole_double:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    fcmpu cr0, f1, f2
+; CHECK-P8-NEXT:    crnor 4*cr5+lt, un, gt
+; CHECK-P8-NEXT:    bc 12, 4*cr5+lt, .LBB23_2
+; CHECK-P8-NEXT:  # %bb.1: # %entry
+; CHECK-P8-NEXT:    fmr f3, f4
+; CHECK-P8-NEXT:  .LBB23_2: # %entry
+; CHECK-P8-NEXT:    fmr f1, f3
+; CHECK-P8-NEXT:    blr
+;
+; CHECK-P9-LABEL: select_ole_double:
+; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    fcmpu cr0, f1, f2
+; CHECK-P9-NEXT:    crnor 4*cr5+lt, un, gt
+; CHECK-P9-NEXT:    bc 12, 4*cr5+lt, .LBB23_2
+; CHECK-P9-NEXT:  # %bb.1: # %entry
+; CHECK-P9-NEXT:    fmr f3, f4
+; CHECK-P9-NEXT:  .LBB23_2: # %entry
+; CHECK-P9-NEXT:    fmr f1, f3
+; CHECK-P9-NEXT:    blr
 entry:
   %cmp = fcmp ole double %a, %b
   %cond = select i1 %cmp, double %c, double %d
@@ -930,29 +592,17 @@ entry:
 }
 
 define float @select_fast_ole_float(float %a, float %b, float %c, float %d) {
-; FAST-P8-LABEL: select_fast_ole_float:
-; FAST-P8:       # %bb.0: # %entry
-; FAST-P8-NEXT:    xssubsp f0, f2, f1
-; FAST-P8-NEXT:    fsel f1, f0, f3, f4
-; FAST-P8-NEXT:    blr
-;
-; FAST-P9-LABEL: select_fast_ole_float:
-; FAST-P9:       # %bb.0: # %entry
-; FAST-P9-NEXT:    xssubsp f0, f2, f1
-; FAST-P9-NEXT:    fsel f1, f0, f3, f4
-; FAST-P9-NEXT:    blr
-;
-; NO-FAST-P8-LABEL: select_fast_ole_float:
-; NO-FAST-P8:       # %bb.0: # %entry
-; NO-FAST-P8-NEXT:    xssubsp f0, f2, f1
-; NO-FAST-P8-NEXT:    fsel f1, f0, f3, f4
-; NO-FAST-P8-NEXT:    blr
-;
-; NO-FAST-P9-LABEL: select_fast_ole_float:
-; NO-FAST-P9:       # %bb.0: # %entry
-; NO-FAST-P9-NEXT:    xssubsp f0, f2, f1
-; NO-FAST-P9-NEXT:    fsel f1, f0, f3, f4
-; NO-FAST-P9-NEXT:    blr
+; CHECK-P8-LABEL: select_fast_ole_float:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    xssubsp f0, f2, f1
+; CHECK-P8-NEXT:    fsel f1, f0, f3, f4
+; CHECK-P8-NEXT:    blr
+;
+; CHECK-P9-LABEL: select_fast_ole_float:
+; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    xssubsp f0, f2, f1
+; CHECK-P9-NEXT:    fsel f1, f0, f3, f4
+; CHECK-P9-NEXT:    blr
 entry:
   %cmp = fcmp nnan ninf nsz ole float %a, %b
   %cond = select i1 %cmp, float %c, float %d
@@ -960,29 +610,17 @@ entry:
 }
 
 define double @select_fast_ole_double(double %a, double %b, double %c, double %d) {
-; FAST-P8-LABEL: select_fast_ole_double:
-; FAST-P8:       # %bb.0: # %entry
-; FAST-P8-NEXT:    xssubdp f0, f2, f1
-; FAST-P8-NEXT:    fsel f1, f0, f3, f4
-; FAST-P8-NEXT:    blr
-;
-; FAST-P9-LABEL: select_fast_ole_double:
-; FAST-P9:       # %bb.0: # %entry
-; FAST-P9-NEXT:    xssubdp f0, f2, f1
-; FAST-P9-NEXT:    fsel f1, f0, f3, f4
-; FAST-P9-NEXT:    blr
-;
-; NO-FAST-P8-LABEL: select_fast_ole_double:
-; NO-FAST-P8:       # %bb.0: # %entry
-; NO-FAST-P8-NEXT:    xssubdp f0, f2, f1
-; NO-FAST-P8-NEXT:    fsel f1, f0, f3, f4
-; NO-FAST-P8-NEXT:    blr
-;
-; NO-FAST-P9-LABEL: select_fast_ole_double:
-; NO-FAST-P9:       # %bb.0: # %entry
-; NO-FAST-P9-NEXT:    xssubdp f0, f2, f1
-; NO-FAST-P9-NEXT:    fsel f1, f0, f3, f4
-; NO-FAST-P9-NEXT:    blr
+; CHECK-P8-LABEL: select_fast_ole_double:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    xssubdp f0, f2, f1
+; CHECK-P8-NEXT:    fsel f1, f0, f3, f4
+; CHECK-P8-NEXT:    blr
+;
+; CHECK-P9-LABEL: select_fast_ole_double:
+; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    xssubdp f0, f2, f1
+; CHECK-P9-NEXT:    fsel f1, f0, f3, f4
+; CHECK-P9-NEXT:    blr
 entry:
   %cmp = fcmp nnan ninf nsz ole double %a, %b
   %cond = select i1 %cmp, double %c, double %d
@@ -991,149 +629,167 @@ entry:
 
 ; Test RHS is 1.000000e+00
 define double @onecmp1(double %a, double %y, double %z) {
-; FAST-P8-LABEL: onecmp1:
-; FAST-P8:       # %bb.0: # %entry
-; FAST-P8-NEXT:    vspltisw v2, -1
-; FAST-P8-NEXT:    xvcvsxwdp vs0, vs34
-; FAST-P8-NEXT:    xsadddp f0, f1, f0
-; FAST-P8-NEXT:    fsel f1, f0, f2, f3
-; FAST-P8-NEXT:    blr
-;
-; FAST-P9-LABEL: onecmp1:
-; FAST-P9:       # %bb.0: # %entry
-; FAST-P9-NEXT:    vspltisw v2, -1
-; FAST-P9-NEXT:    xvcvsxwdp vs0, vs34
-; FAST-P9-NEXT:    xsadddp f0, f1, f0
-; FAST-P9-NEXT:    fsel f1, f0, f2, f3
-; FAST-P9-NEXT:    blr
-;
-; NO-FAST-P8-LABEL: onecmp1:
-; NO-FAST-P8:       # %bb.0: # %entry
-; NO-FAST-P8-NEXT:    vspltisw v2, 1
-; NO-FAST-P8-NEXT:    xvcvsxwdp vs0, vs34
-; NO-FAST-P8-NEXT:    fcmpu cr0, f1, f0
-; NO-FAST-P8-NEXT:    bc 12, lt, .LBB26_3
-; NO-FAST-P8-NEXT:  # %bb.1: # %entry
-; NO-FAST-P8-NEXT:    fcmpu cr0, f1, f1
-; NO-FAST-P8-NEXT:    bc 12, un, .LBB26_3
-; NO-FAST-P8-NEXT:  # %bb.2: # %entry
-; NO-FAST-P8-NEXT:    fmr f3, f2
-; NO-FAST-P8-NEXT:  .LBB26_3: # %entry
-; NO-FAST-P8-NEXT:    fmr f1, f3
-; NO-FAST-P8-NEXT:    blr
-;
-; NO-FAST-P9-LABEL: onecmp1:
-; NO-FAST-P9:       # %bb.0: # %entry
-; NO-FAST-P9-NEXT:    vspltisw v2, 1
-; NO-FAST-P9-NEXT:    xvcvsxwdp vs0, vs34
-; NO-FAST-P9-NEXT:    fcmpu cr0, f1, f0
-; NO-FAST-P9-NEXT:    bc 12, lt, .LBB26_3
-; NO-FAST-P9-NEXT:  # %bb.1: # %entry
-; NO-FAST-P9-NEXT:    fcmpu cr0, f1, f1
-; NO-FAST-P9-NEXT:    bc 12, un, .LBB26_3
-; NO-FAST-P9-NEXT:  # %bb.2: # %entry
-; NO-FAST-P9-NEXT:    fmr f3, f2
-; NO-FAST-P9-NEXT:  .LBB26_3: # %entry
-; NO-FAST-P9-NEXT:    fmr f1, f3
-; NO-FAST-P9-NEXT:    blr
+; CHECK-P8-LABEL: onecmp1:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    vspltisw v2, 1
+; CHECK-P8-NEXT:    xvcvsxwdp vs0, vs34
+; CHECK-P8-NEXT:    fcmpu cr0, f1, f0
+; CHECK-P8-NEXT:    bc 12, lt, .LBB26_3
+; CHECK-P8-NEXT:  # %bb.1: # %entry
+; CHECK-P8-NEXT:    fcmpu cr0, f1, f1
+; CHECK-P8-NEXT:    bc 12, un, .LBB26_3
+; CHECK-P8-NEXT:  # %bb.2: # %entry
+; CHECK-P8-NEXT:    fmr f3, f2
+; CHECK-P8-NEXT:  .LBB26_3: # %entry
+; CHECK-P8-NEXT:    fmr f1, f3
+; CHECK-P8-NEXT:    blr
+;
+; CHECK-P9-LABEL: onecmp1:
+; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    vspltisw v2, 1
+; CHECK-P9-NEXT:    xvcvsxwdp vs0, vs34
+; CHECK-P9-NEXT:    fcmpu cr0, f1, f0
+; CHECK-P9-NEXT:    bc 12, lt, .LBB26_3
+; CHECK-P9-NEXT:  # %bb.1: # %entry
+; CHECK-P9-NEXT:    fcmpu cr0, f1, f1
+; CHECK-P9-NEXT:    bc 12, un, .LBB26_3
+; CHECK-P9-NEXT:  # %bb.2: # %entry
+; CHECK-P9-NEXT:    fmr f3, f2
+; CHECK-P9-NEXT:  .LBB26_3: # %entry
+; CHECK-P9-NEXT:    fmr f1, f3
+; CHECK-P9-NEXT:    blr
 entry:
   %cmp = fcmp ult double %a, 1.000000e+00
   %z.y = select i1 %cmp, double %z, double %y
   ret double %z.y
 }
 
+define double @onecmp1_fast(double %a, double %y, double %z) {
+; CHECK-P8-LABEL: onecmp1_fast:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    vspltisw v2, -1
+; CHECK-P8-NEXT:    xvcvsxwdp vs0, vs34
+; CHECK-P8-NEXT:    xsadddp f0, f1, f0
+; CHECK-P8-NEXT:    fsel f1, f0, f2, f3
+; CHECK-P8-NEXT:    blr
+;
+; CHECK-P9-LABEL: onecmp1_fast:
+; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    vspltisw v2, -1
+; CHECK-P9-NEXT:    xvcvsxwdp vs0, vs34
+; CHECK-P9-NEXT:    xsadddp f0, f1, f0
+; CHECK-P9-NEXT:    fsel f1, f0, f2, f3
+; CHECK-P9-NEXT:    blr
+entry:
+  %cmp = fcmp fast ult double %a, 1.000000e+00
+  %z.y = select fast i1 %cmp, double %z, double %y
+  ret double %z.y
+}
+
 define double @onecmp2(double %a, double %y, double %z) {
-; FAST-P8-LABEL: onecmp2:
-; FAST-P8:       # %bb.0: # %entry
-; FAST-P8-NEXT:    vspltisw v2, 1
-; FAST-P8-NEXT:    xvcvsxwdp vs0, vs34
-; FAST-P8-NEXT:    xssubdp f0, f0, f1
-; FAST-P8-NEXT:    fsel f1, f0, f3, f2
-; FAST-P8-NEXT:    blr
-;
-; FAST-P9-LABEL: onecmp2:
-; FAST-P9:       # %bb.0: # %entry
-; FAST-P9-NEXT:    vspltisw v2, 1
-; FAST-P9-NEXT:    xvcvsxwdp vs0, vs34
-; FAST-P9-NEXT:    xssubdp f0, f0, f1
-; FAST-P9-NEXT:    fsel f1, f0, f3, f2
-; FAST-P9-NEXT:    blr
-;
-; NO-FAST-P8-LABEL: onecmp2:
-; NO-FAST-P8:       # %bb.0: # %entry
-; NO-FAST-P8-NEXT:    vspltisw v2, 1
-; NO-FAST-P8-NEXT:    xvcvsxwdp vs0, vs34
-; NO-FAST-P8-NEXT:    xscmpudp cr0, f1, f0
-; NO-FAST-P8-NEXT:    bgt cr0, .LBB27_2
-; NO-FAST-P8-NEXT:  # %bb.1: # %entry
-; NO-FAST-P8-NEXT:    fmr f2, f3
-; NO-FAST-P8-NEXT:  .LBB27_2: # %entry
-; NO-FAST-P8-NEXT:    fmr f1, f2
-; NO-FAST-P8-NEXT:    blr
-;
-; NO-FAST-P9-LABEL: onecmp2:
-; NO-FAST-P9:       # %bb.0: # %entry
-; NO-FAST-P9-NEXT:    vspltisw v2, 1
-; NO-FAST-P9-NEXT:    xvcvsxwdp vs0, vs34
-; NO-FAST-P9-NEXT:    xscmpudp cr0, f1, f0
-; NO-FAST-P9-NEXT:    bgt cr0, .LBB27_2
-; NO-FAST-P9-NEXT:  # %bb.1: # %entry
-; NO-FAST-P9-NEXT:    fmr f2, f3
-; NO-FAST-P9-NEXT:  .LBB27_2: # %entry
-; NO-FAST-P9-NEXT:    fmr f1, f2
-; NO-FAST-P9-NEXT:    blr
+; CHECK-P8-LABEL: onecmp2:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    vspltisw v2, 1
+; CHECK-P8-NEXT:    xvcvsxwdp vs0, vs34
+; CHECK-P8-NEXT:    xscmpudp cr0, f1, f0
+; CHECK-P8-NEXT:    bgt cr0, .LBB28_2
+; CHECK-P8-NEXT:  # %bb.1: # %entry
+; CHECK-P8-NEXT:    fmr f2, f3
+; CHECK-P8-NEXT:  .LBB28_2: # %entry
+; CHECK-P8-NEXT:    fmr f1, f2
+; CHECK-P8-NEXT:    blr
+;
+; CHECK-P9-LABEL: onecmp2:
+; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    vspltisw v2, 1
+; CHECK-P9-NEXT:    xvcvsxwdp vs0, vs34
+; CHECK-P9-NEXT:    xscmpudp cr0, f1, f0
+; CHECK-P9-NEXT:    bgt cr0, .LBB28_2
+; CHECK-P9-NEXT:  # %bb.1: # %entry
+; CHECK-P9-NEXT:    fmr f2, f3
+; CHECK-P9-NEXT:  .LBB28_2: # %entry
+; CHECK-P9-NEXT:    fmr f1, f2
+; CHECK-P9-NEXT:    blr
 entry:
   %cmp = fcmp ogt double %a, 1.000000e+00
   %y.z = select i1 %cmp, double %y, double %z
   ret double %y.z
 }
 
+define double @onecmp2_fast(double %a, double %y, double %z) {
+; CHECK-P8-LABEL: onecmp2_fast:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    vspltisw v2, 1
+; CHECK-P8-NEXT:    xvcvsxwdp vs0, vs34
+; CHECK-P8-NEXT:    xssubdp f0, f0, f1
+; CHECK-P8-NEXT:    fsel f1, f0, f3, f2
+; CHECK-P8-NEXT:    blr
+;
+; CHECK-P9-LABEL: onecmp2_fast:
+; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    vspltisw v2, 1
+; CHECK-P9-NEXT:    xvcvsxwdp vs0, vs34
+; CHECK-P9-NEXT:    xssubdp f0, f0, f1
+; CHECK-P9-NEXT:    fsel f1, f0, f3, f2
+; CHECK-P9-NEXT:    blr
+entry:
+  %cmp = fcmp fast ogt double %a, 1.000000e+00
+  %y.z = select fast i1 %cmp, double %y, double %z
+  ret double %y.z
+}
+
 define double @onecmp3(double %a, double %y, double %z) {
-; FAST-P8-LABEL: onecmp3:
-; FAST-P8:       # %bb.0: # %entry
-; FAST-P8-NEXT:    vspltisw v2, -1
-; FAST-P8-NEXT:    xvcvsxwdp vs0, vs34
-; FAST-P8-NEXT:    xsadddp f0, f1, f0
-; FAST-P8-NEXT:    fsel f1, f0, f2, f3
-; FAST-P8-NEXT:    xsnegdp f0, f0
-; FAST-P8-NEXT:    fsel f1, f0, f1, f3
-; FAST-P8-NEXT:    blr
-;
-; FAST-P9-LABEL: onecmp3:
-; FAST-P9:       # %bb.0: # %entry
-; FAST-P9-NEXT:    vspltisw v2, -1
-; FAST-P9-NEXT:    xvcvsxwdp vs0, vs34
-; FAST-P9-NEXT:    xsadddp f0, f1, f0
-; FAST-P9-NEXT:    fsel f1, f0, f2, f3
-; FAST-P9-NEXT:    xsnegdp f0, f0
-; FAST-P9-NEXT:    fsel f1, f0, f1, f3
-; FAST-P9-NEXT:    blr
-;
-; NO-FAST-P8-LABEL: onecmp3:
-; NO-FAST-P8:       # %bb.0: # %entry
-; NO-FAST-P8-NEXT:    vspltisw v2, 1
-; NO-FAST-P8-NEXT:    xvcvsxwdp vs0, vs34
-; NO-FAST-P8-NEXT:    xscmpudp cr0, f1, f0
-; NO-FAST-P8-NEXT:    beq cr0, .LBB28_2
-; NO-FAST-P8-NEXT:  # %bb.1: # %entry
-; NO-FAST-P8-NEXT:    fmr f2, f3
-; NO-FAST-P8-NEXT:  .LBB28_2: # %entry
-; NO-FAST-P8-NEXT:    fmr f1, f2
-; NO-FAST-P8-NEXT:    blr
-;
-; NO-FAST-P9-LABEL: onecmp3:
-; NO-FAST-P9:       # %bb.0: # %entry
-; NO-FAST-P9-NEXT:    vspltisw v2, 1
-; NO-FAST-P9-NEXT:    xvcvsxwdp vs0, vs34
-; NO-FAST-P9-NEXT:    xscmpudp cr0, f1, f0
-; NO-FAST-P9-NEXT:    beq cr0, .LBB28_2
-; NO-FAST-P9-NEXT:  # %bb.1: # %entry
-; NO-FAST-P9-NEXT:    fmr f2, f3
-; NO-FAST-P9-NEXT:  .LBB28_2: # %entry
-; NO-FAST-P9-NEXT:    fmr f1, f2
-; NO-FAST-P9-NEXT:    blr
+; CHECK-P8-LABEL: onecmp3:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    vspltisw v2, 1
+; CHECK-P8-NEXT:    xvcvsxwdp vs0, vs34
+; CHECK-P8-NEXT:    xscmpudp cr0, f1, f0
+; CHECK-P8-NEXT:    beq cr0, .LBB30_2
+; CHECK-P8-NEXT:  # %bb.1: # %entry
+; CHECK-P8-NEXT:    fmr f2, f3
+; CHECK-P8-NEXT:  .LBB30_2: # %entry
+; CHECK-P8-NEXT:    fmr f1, f2
+; CHECK-P8-NEXT:    blr
+;
+; CHECK-P9-LABEL: onecmp3:
+; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    vspltisw v2, 1
+; CHECK-P9-NEXT:    xvcvsxwdp vs0, vs34
+; CHECK-P9-NEXT:    xscmpudp cr0, f1, f0
+; CHECK-P9-NEXT:    beq cr0, .LBB30_2
+; CHECK-P9-NEXT:  # %bb.1: # %entry
+; CHECK-P9-NEXT:    fmr f2, f3
+; CHECK-P9-NEXT:  .LBB30_2: # %entry
+; CHECK-P9-NEXT:    fmr f1, f2
+; CHECK-P9-NEXT:    blr
 entry:
   %cmp = fcmp oeq double %a, 1.000000e+00
   %y.z = select i1 %cmp, double %y, double %z
   ret double %y.z
 }
+
+define double @onecmp3_fast(double %a, double %y, double %z) {
+; CHECK-P8-LABEL: onecmp3_fast:
+; CHECK-P8:       # %bb.0: # %entry
+; CHECK-P8-NEXT:    vspltisw v2, -1
+; CHECK-P8-NEXT:    xvcvsxwdp vs0, vs34
+; CHECK-P8-NEXT:    xsadddp f0, f1, f0
+; CHECK-P8-NEXT:    fsel f1, f0, f2, f3
+; CHECK-P8-NEXT:    xsnegdp f0, f0
+; CHECK-P8-NEXT:    fsel f1, f0, f1, f3
+; CHECK-P8-NEXT:    blr
+;
+; CHECK-P9-LABEL: onecmp3_fast:
+; CHECK-P9:       # %bb.0: # %entry
+; CHECK-P9-NEXT:    vspltisw v2, -1
+; CHECK-P9-NEXT:    xvcvsxwdp vs0, vs34
+; CHECK-P9-NEXT:    xsadddp f0, f1, f0
+; CHECK-P9-NEXT:    fsel f1, f0, f2, f3
+; CHECK-P9-NEXT:    xsnegdp f0, f0
+; CHECK-P9-NEXT:    fsel f1, f0, f1, f3
+; CHECK-P9-NEXT:    blr
+entry:
+  %cmp = fcmp fast oeq double %a, 1.000000e+00
+  %y.z = select fast i1 %cmp, double %y, double %z
+  ret double %y.z
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll
index ff923efe8eb43..658145698d29c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll
@@ -58,7 +58,7 @@ define <vscale x 8 x i1> @fcmp_oeq_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) {
 ; CHECK-LABEL: fcmp_oeq_vv_nxv8bf16_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -67,11 +67,11 @@ define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmfeq.vv v0, v16, v12
 ; CHECK-NEXT:    ret
-  %vc = fcmp oeq <vscale x 8 x bfloat> %va, %vb
+  %vc = fcmp nnan oeq <vscale x 8 x bfloat> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) {
 ; CHECK-LABEL: fcmp_oeq_vf_nxv8bf16_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fcvt.s.bf16 fa5, fa0
@@ -82,7 +82,7 @@ define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
   %splat = shufflevector <vscale x 8 x bfloat> %head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp oeq <vscale x 8 x bfloat> %va, %splat
+  %vc = fcmp nnan oeq <vscale x 8 x bfloat> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -129,7 +129,7 @@ define <vscale x 8 x i1> @fcmp_ogt_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) {
 ; CHECK-LABEL: fcmp_ogt_vv_nxv8bf16_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -138,11 +138,11 @@ define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmflt.vv v0, v16, v12
 ; CHECK-NEXT:    ret
-  %vc = fcmp ogt <vscale x 8 x bfloat> %va, %vb
+  %vc = fcmp nnan ogt <vscale x 8 x bfloat> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) {
 ; CHECK-LABEL: fcmp_ogt_vf_nxv8bf16_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fcvt.s.bf16 fa5, fa0
@@ -153,7 +153,7 @@ define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
   %splat = shufflevector <vscale x 8 x bfloat> %head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp ogt <vscale x 8 x bfloat> %va, %splat
+  %vc = fcmp nnan ogt <vscale x 8 x bfloat> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -200,7 +200,7 @@ define <vscale x 8 x i1> @fcmp_oge_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_oge_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_oge_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) {
 ; CHECK-LABEL: fcmp_oge_vv_nxv8bf16_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -209,11 +209,11 @@ define <vscale x 8 x i1> @fcmp_oge_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmfle.vv v0, v16, v12
 ; CHECK-NEXT:    ret
-  %vc = fcmp oge <vscale x 8 x bfloat> %va, %vb
+  %vc = fcmp nnan oge <vscale x 8 x bfloat> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_oge_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_oge_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) {
 ; CHECK-LABEL: fcmp_oge_vf_nxv8bf16_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fcvt.s.bf16 fa5, fa0
@@ -224,7 +224,7 @@ define <vscale x 8 x i1> @fcmp_oge_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
   %splat = shufflevector <vscale x 8 x bfloat> %head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp oge <vscale x 8 x bfloat> %va, %splat
+  %vc = fcmp nnan oge <vscale x 8 x bfloat> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -271,7 +271,7 @@ define <vscale x 8 x i1> @fcmp_olt_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_olt_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_olt_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) {
 ; CHECK-LABEL: fcmp_olt_vv_nxv8bf16_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -280,11 +280,11 @@ define <vscale x 8 x i1> @fcmp_olt_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmflt.vv v0, v16, v12
 ; CHECK-NEXT:    ret
-  %vc = fcmp olt <vscale x 8 x bfloat> %va, %vb
+  %vc = fcmp nnan olt <vscale x 8 x bfloat> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_olt_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_olt_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) {
 ; CHECK-LABEL: fcmp_olt_vf_nxv8bf16_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fcvt.s.bf16 fa5, fa0
@@ -295,7 +295,7 @@ define <vscale x 8 x i1> @fcmp_olt_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
   %splat = shufflevector <vscale x 8 x bfloat> %head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp olt <vscale x 8 x bfloat> %va, %splat
+  %vc = fcmp nnan olt <vscale x 8 x bfloat> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -342,7 +342,7 @@ define <vscale x 8 x i1> @fcmp_ole_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ole_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ole_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) {
 ; CHECK-LABEL: fcmp_ole_vv_nxv8bf16_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -351,11 +351,11 @@ define <vscale x 8 x i1> @fcmp_ole_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmfle.vv v0, v16, v12
 ; CHECK-NEXT:    ret
-  %vc = fcmp ole <vscale x 8 x bfloat> %va, %vb
+  %vc = fcmp nnan ole <vscale x 8 x bfloat> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ole_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_ole_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) {
 ; CHECK-LABEL: fcmp_ole_vf_nxv8bf16_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fcvt.s.bf16 fa5, fa0
@@ -366,7 +366,7 @@ define <vscale x 8 x i1> @fcmp_ole_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
   %splat = shufflevector <vscale x 8 x bfloat> %head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp ole <vscale x 8 x bfloat> %va, %splat
+  %vc = fcmp nnan ole <vscale x 8 x bfloat> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -419,7 +419,7 @@ define <vscale x 8 x i1> @fcmp_one_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_one_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_one_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) {
 ; CHECK-LABEL: fcmp_one_vv_nxv8bf16_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -428,11 +428,11 @@ define <vscale x 8 x i1> @fcmp_one_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmfne.vv v0, v16, v12
 ; CHECK-NEXT:    ret
-  %vc = fcmp one <vscale x 8 x bfloat> %va, %vb
+  %vc = fcmp nnan one <vscale x 8 x bfloat> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_one_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_one_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) {
 ; CHECK-LABEL: fcmp_one_vf_nxv8bf16_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fcvt.s.bf16 fa5, fa0
@@ -443,7 +443,7 @@ define <vscale x 8 x i1> @fcmp_one_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
   %splat = shufflevector <vscale x 8 x bfloat> %head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp one <vscale x 8 x bfloat> %va, %splat
+  %vc = fcmp nnan one <vscale x 8 x bfloat> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -502,7 +502,7 @@ define <vscale x 8 x i1> @fcmp_ord_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ord_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ord_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) {
 ; CHECK-LABEL: fcmp_ord_vv_nxv8bf16_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -515,11 +515,11 @@ define <vscale x 8 x i1> @fcmp_ord_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
 ; CHECK-NEXT:    vmfeq.vv v8, v12, v12
 ; CHECK-NEXT:    vmand.mm v0, v8, v10
 ; CHECK-NEXT:    ret
-  %vc = fcmp ord <vscale x 8 x bfloat> %va, %vb
+  %vc = fcmp nnan ord <vscale x 8 x bfloat> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ord_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_ord_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) {
 ; CHECK-LABEL: fcmp_ord_vf_nxv8bf16_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fcvt.s.bf16 fa5, fa0
@@ -534,7 +534,7 @@ define <vscale x 8 x i1> @fcmp_ord_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
   %splat = shufflevector <vscale x 8 x bfloat> %head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp ord <vscale x 8 x bfloat> %va, %splat
+  %vc = fcmp nnan ord <vscale x 8 x bfloat> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -587,7 +587,7 @@ define <vscale x 8 x i1> @fcmp_ueq_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) {
 ; CHECK-LABEL: fcmp_ueq_vv_nxv8bf16_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -596,11 +596,11 @@ define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmfeq.vv v0, v16, v12
 ; CHECK-NEXT:    ret
-  %vc = fcmp ueq <vscale x 8 x bfloat> %va, %vb
+  %vc = fcmp nnan ueq <vscale x 8 x bfloat> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) {
 ; CHECK-LABEL: fcmp_ueq_vf_nxv8bf16_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fcvt.s.bf16 fa5, fa0
@@ -611,7 +611,7 @@ define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
   %splat = shufflevector <vscale x 8 x bfloat> %head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp ueq <vscale x 8 x bfloat> %va, %splat
+  %vc = fcmp nnan ueq <vscale x 8 x bfloat> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -661,7 +661,7 @@ define <vscale x 8 x i1> @fcmp_ugt_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) {
 ; CHECK-LABEL: fcmp_ugt_vv_nxv8bf16_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -670,11 +670,11 @@ define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmflt.vv v0, v16, v12
 ; CHECK-NEXT:    ret
-  %vc = fcmp ugt <vscale x 8 x bfloat> %va, %vb
+  %vc = fcmp nnan ugt <vscale x 8 x bfloat> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) {
 ; CHECK-LABEL: fcmp_ugt_vf_nxv8bf16_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fcvt.s.bf16 fa5, fa0
@@ -685,7 +685,7 @@ define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
   %splat = shufflevector <vscale x 8 x bfloat> %head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp ugt <vscale x 8 x bfloat> %va, %splat
+  %vc = fcmp nnan ugt <vscale x 8 x bfloat> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -735,7 +735,7 @@ define <vscale x 8 x i1> @fcmp_uge_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_uge_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_uge_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) {
 ; CHECK-LABEL: fcmp_uge_vv_nxv8bf16_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -744,11 +744,11 @@ define <vscale x 8 x i1> @fcmp_uge_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmfle.vv v0, v16, v12
 ; CHECK-NEXT:    ret
-  %vc = fcmp uge <vscale x 8 x bfloat> %va, %vb
+  %vc = fcmp nnan uge <vscale x 8 x bfloat> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_uge_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_uge_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) {
 ; CHECK-LABEL: fcmp_uge_vf_nxv8bf16_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fcvt.s.bf16 fa5, fa0
@@ -759,7 +759,7 @@ define <vscale x 8 x i1> @fcmp_uge_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
   %splat = shufflevector <vscale x 8 x bfloat> %head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp uge <vscale x 8 x bfloat> %va, %splat
+  %vc = fcmp nnan uge <vscale x 8 x bfloat> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -809,7 +809,7 @@ define <vscale x 8 x i1> @fcmp_ult_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ult_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ult_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) {
 ; CHECK-LABEL: fcmp_ult_vv_nxv8bf16_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -818,11 +818,11 @@ define <vscale x 8 x i1> @fcmp_ult_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmflt.vv v0, v16, v12
 ; CHECK-NEXT:    ret
-  %vc = fcmp ult <vscale x 8 x bfloat> %va, %vb
+  %vc = fcmp nnan ult <vscale x 8 x bfloat> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ult_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_ult_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) {
 ; CHECK-LABEL: fcmp_ult_vf_nxv8bf16_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fcvt.s.bf16 fa5, fa0
@@ -833,7 +833,7 @@ define <vscale x 8 x i1> @fcmp_ult_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
   %splat = shufflevector <vscale x 8 x bfloat> %head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp ult <vscale x 8 x bfloat> %va, %splat
+  %vc = fcmp nnan ult <vscale x 8 x bfloat> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -883,7 +883,7 @@ define <vscale x 8 x i1> @fcmp_ule_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ule_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ule_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) {
 ; CHECK-LABEL: fcmp_ule_vv_nxv8bf16_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -892,11 +892,11 @@ define <vscale x 8 x i1> @fcmp_ule_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmfle.vv v0, v16, v12
 ; CHECK-NEXT:    ret
-  %vc = fcmp ule <vscale x 8 x bfloat> %va, %vb
+  %vc = fcmp nnan ule <vscale x 8 x bfloat> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ule_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_ule_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) {
 ; CHECK-LABEL: fcmp_ule_vf_nxv8bf16_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fcvt.s.bf16 fa5, fa0
@@ -907,7 +907,7 @@ define <vscale x 8 x i1> @fcmp_ule_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
   %splat = shufflevector <vscale x 8 x bfloat> %head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp ule <vscale x 8 x bfloat> %va, %splat
+  %vc = fcmp nnan ule <vscale x 8 x bfloat> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -954,7 +954,7 @@ define <vscale x 8 x i1> @fcmp_une_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_une_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_une_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) {
 ; CHECK-LABEL: fcmp_une_vv_nxv8bf16_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -963,11 +963,11 @@ define <vscale x 8 x i1> @fcmp_une_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmfne.vv v0, v16, v12
 ; CHECK-NEXT:    ret
-  %vc = fcmp une <vscale x 8 x bfloat> %va, %vb
+  %vc = fcmp nnan une <vscale x 8 x bfloat> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_une_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_une_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) {
 ; CHECK-LABEL: fcmp_une_vf_nxv8bf16_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fcvt.s.bf16 fa5, fa0
@@ -978,7 +978,7 @@ define <vscale x 8 x i1> @fcmp_une_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
   %splat = shufflevector <vscale x 8 x bfloat> %head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp une <vscale x 8 x bfloat> %va, %splat
+  %vc = fcmp nnan une <vscale x 8 x bfloat> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1037,7 +1037,7 @@ define <vscale x 8 x i1> @fcmp_uno_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_uno_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_uno_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) {
 ; CHECK-LABEL: fcmp_uno_vv_nxv8bf16_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -1050,11 +1050,11 @@ define <vscale x 8 x i1> @fcmp_uno_vv_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
 ; CHECK-NEXT:    vmfne.vv v8, v12, v12
 ; CHECK-NEXT:    vmor.mm v0, v8, v10
 ; CHECK-NEXT:    ret
-  %vc = fcmp uno <vscale x 8 x bfloat> %va, %vb
+  %vc = fcmp nnan uno <vscale x 8 x bfloat> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_uno_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) #0 {
+define <vscale x 8 x i1> @fcmp_uno_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va, bfloat %b) {
 ; CHECK-LABEL: fcmp_uno_vf_nxv8bf16_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fcvt.s.bf16 fa5, fa0
@@ -1069,7 +1069,7 @@ define <vscale x 8 x i1> @fcmp_uno_vf_nxv8bf16_nonans(<vscale x 8 x bfloat> %va,
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
   %splat = shufflevector <vscale x 8 x bfloat> %head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp uno <vscale x 8 x bfloat> %va, %splat
+  %vc = fcmp nnan uno <vscale x 8 x bfloat> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1134,7 +1134,7 @@ define <vscale x 8 x i1> @fcmp_oeq_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
 ; ZVFH-LABEL: fcmp_oeq_vv_nxv8f16_nonans:
 ; ZVFH:       # %bb.0:
 ; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -1149,11 +1149,11 @@ define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; ZVFHMIN-NEXT:    vmfeq.vv v0, v16, v12
 ; ZVFHMIN-NEXT:    ret
-  %vc = fcmp oeq <vscale x 8 x half> %va, %vb
+  %vc = fcmp nnan oeq <vscale x 8 x half> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) {
 ; ZVFH-LABEL: fcmp_oeq_vf_nxv8f16_nonans:
 ; ZVFH:       # %bb.0:
 ; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -1170,7 +1170,7 @@ define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8f16_nonans(<vscale x 8 x half> %va, ha
 ; ZVFHMIN-NEXT:    ret
   %head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp oeq <vscale x 8 x half> %va, %splat
+  %vc = fcmp nnan oeq <vscale x 8 x half> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1235,7 +1235,7 @@ define <vscale x 8 x i1> @fcmp_ogt_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
 ; ZVFH-LABEL: fcmp_ogt_vv_nxv8f16_nonans:
 ; ZVFH:       # %bb.0:
 ; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -1250,11 +1250,11 @@ define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; ZVFHMIN-NEXT:    vmflt.vv v0, v16, v12
 ; ZVFHMIN-NEXT:    ret
-  %vc = fcmp ogt <vscale x 8 x half> %va, %vb
+  %vc = fcmp nnan ogt <vscale x 8 x half> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) {
 ; ZVFH-LABEL: fcmp_ogt_vf_nxv8f16_nonans:
 ; ZVFH:       # %bb.0:
 ; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -1271,7 +1271,7 @@ define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8f16_nonans(<vscale x 8 x half> %va, ha
 ; ZVFHMIN-NEXT:    ret
   %head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp ogt <vscale x 8 x half> %va, %splat
+  %vc = fcmp nnan ogt <vscale x 8 x half> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1336,7 +1336,7 @@ define <vscale x 8 x i1> @fcmp_oge_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_oge_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_oge_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
 ; ZVFH-LABEL: fcmp_oge_vv_nxv8f16_nonans:
 ; ZVFH:       # %bb.0:
 ; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -1351,11 +1351,11 @@ define <vscale x 8 x i1> @fcmp_oge_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; ZVFHMIN-NEXT:    vmfle.vv v0, v16, v12
 ; ZVFHMIN-NEXT:    ret
-  %vc = fcmp oge <vscale x 8 x half> %va, %vb
+  %vc = fcmp nnan oge <vscale x 8 x half> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_oge_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_oge_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) {
 ; ZVFH-LABEL: fcmp_oge_vf_nxv8f16_nonans:
 ; ZVFH:       # %bb.0:
 ; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -1372,7 +1372,7 @@ define <vscale x 8 x i1> @fcmp_oge_vf_nxv8f16_nonans(<vscale x 8 x half> %va, ha
 ; ZVFHMIN-NEXT:    ret
   %head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp oge <vscale x 8 x half> %va, %splat
+  %vc = fcmp nnan oge <vscale x 8 x half> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1437,7 +1437,7 @@ define <vscale x 8 x i1> @fcmp_olt_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_olt_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_olt_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
 ; ZVFH-LABEL: fcmp_olt_vv_nxv8f16_nonans:
 ; ZVFH:       # %bb.0:
 ; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -1452,11 +1452,11 @@ define <vscale x 8 x i1> @fcmp_olt_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; ZVFHMIN-NEXT:    vmflt.vv v0, v16, v12
 ; ZVFHMIN-NEXT:    ret
-  %vc = fcmp olt <vscale x 8 x half> %va, %vb
+  %vc = fcmp nnan olt <vscale x 8 x half> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_olt_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_olt_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) {
 ; ZVFH-LABEL: fcmp_olt_vf_nxv8f16_nonans:
 ; ZVFH:       # %bb.0:
 ; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -1473,7 +1473,7 @@ define <vscale x 8 x i1> @fcmp_olt_vf_nxv8f16_nonans(<vscale x 8 x half> %va, ha
 ; ZVFHMIN-NEXT:    ret
   %head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp olt <vscale x 8 x half> %va, %splat
+  %vc = fcmp nnan olt <vscale x 8 x half> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1538,7 +1538,7 @@ define <vscale x 8 x i1> @fcmp_ole_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ole_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ole_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
 ; ZVFH-LABEL: fcmp_ole_vv_nxv8f16_nonans:
 ; ZVFH:       # %bb.0:
 ; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -1553,11 +1553,11 @@ define <vscale x 8 x i1> @fcmp_ole_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; ZVFHMIN-NEXT:    vmfle.vv v0, v16, v12
 ; ZVFHMIN-NEXT:    ret
-  %vc = fcmp ole <vscale x 8 x half> %va, %vb
+  %vc = fcmp nnan ole <vscale x 8 x half> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ole_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_ole_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) {
 ; ZVFH-LABEL: fcmp_ole_vf_nxv8f16_nonans:
 ; ZVFH:       # %bb.0:
 ; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -1574,7 +1574,7 @@ define <vscale x 8 x i1> @fcmp_ole_vf_nxv8f16_nonans(<vscale x 8 x half> %va, ha
 ; ZVFHMIN-NEXT:    ret
   %head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp ole <vscale x 8 x half> %va, %splat
+  %vc = fcmp nnan ole <vscale x 8 x half> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1651,7 +1651,7 @@ define <vscale x 8 x i1> @fcmp_one_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_one_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_one_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
 ; ZVFH-LABEL: fcmp_one_vv_nxv8f16_nonans:
 ; ZVFH:       # %bb.0:
 ; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -1666,11 +1666,11 @@ define <vscale x 8 x i1> @fcmp_one_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; ZVFHMIN-NEXT:    vmfne.vv v0, v16, v12
 ; ZVFHMIN-NEXT:    ret
-  %vc = fcmp one <vscale x 8 x half> %va, %vb
+  %vc = fcmp nnan one <vscale x 8 x half> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_one_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_one_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) {
 ; ZVFH-LABEL: fcmp_one_vf_nxv8f16_nonans:
 ; ZVFH:       # %bb.0:
 ; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -1687,7 +1687,7 @@ define <vscale x 8 x i1> @fcmp_one_vf_nxv8f16_nonans(<vscale x 8 x half> %va, ha
 ; ZVFHMIN-NEXT:    ret
   %head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp one <vscale x 8 x half> %va, %splat
+  %vc = fcmp nnan one <vscale x 8 x half> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1772,7 +1772,7 @@ define <vscale x 8 x i1> @fcmp_ord_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
 ; ZVFH-LABEL: fcmp_ord_vv_nxv8f16_nonans:
 ; ZVFH:       # %bb.0:
 ; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -1793,11 +1793,11 @@ define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
 ; ZVFHMIN-NEXT:    vmfeq.vv v8, v12, v12
 ; ZVFHMIN-NEXT:    vmand.mm v0, v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %vc = fcmp ord <vscale x 8 x half> %va, %vb
+  %vc = fcmp nnan ord <vscale x 8 x half> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) {
 ; ZVFH-LABEL: fcmp_ord_vf_nxv8f16_nonans:
 ; ZVFH:       # %bb.0:
 ; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -1821,7 +1821,7 @@ define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f16_nonans(<vscale x 8 x half> %va, ha
 ; ZVFHMIN-NEXT:    ret
   %head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp ord <vscale x 8 x half> %va, %splat
+  %vc = fcmp nnan ord <vscale x 8 x half> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1898,7 +1898,7 @@ define <vscale x 8 x i1> @fcmp_ueq_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
 ; ZVFH-LABEL: fcmp_ueq_vv_nxv8f16_nonans:
 ; ZVFH:       # %bb.0:
 ; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -1913,11 +1913,11 @@ define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; ZVFHMIN-NEXT:    vmfeq.vv v0, v16, v12
 ; ZVFHMIN-NEXT:    ret
-  %vc = fcmp ueq <vscale x 8 x half> %va, %vb
+  %vc = fcmp nnan ueq <vscale x 8 x half> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) {
 ; ZVFH-LABEL: fcmp_ueq_vf_nxv8f16_nonans:
 ; ZVFH:       # %bb.0:
 ; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -1934,7 +1934,7 @@ define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f16_nonans(<vscale x 8 x half> %va, ha
 ; ZVFHMIN-NEXT:    ret
   %head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp ueq <vscale x 8 x half> %va, %splat
+  %vc = fcmp nnan ueq <vscale x 8 x half> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2005,7 +2005,7 @@ define <vscale x 8 x i1> @fcmp_ugt_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
 ; ZVFH-LABEL: fcmp_ugt_vv_nxv8f16_nonans:
 ; ZVFH:       # %bb.0:
 ; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -2020,11 +2020,11 @@ define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; ZVFHMIN-NEXT:    vmflt.vv v0, v16, v12
 ; ZVFHMIN-NEXT:    ret
-  %vc = fcmp ugt <vscale x 8 x half> %va, %vb
+  %vc = fcmp nnan ugt <vscale x 8 x half> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) {
 ; ZVFH-LABEL: fcmp_ugt_vf_nxv8f16_nonans:
 ; ZVFH:       # %bb.0:
 ; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -2041,7 +2041,7 @@ define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f16_nonans(<vscale x 8 x half> %va, ha
 ; ZVFHMIN-NEXT:    ret
   %head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp ugt <vscale x 8 x half> %va, %splat
+  %vc = fcmp nnan ugt <vscale x 8 x half> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2112,7 +2112,7 @@ define <vscale x 8 x i1> @fcmp_uge_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
 ; ZVFH-LABEL: fcmp_uge_vv_nxv8f16_nonans:
 ; ZVFH:       # %bb.0:
 ; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -2127,11 +2127,11 @@ define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; ZVFHMIN-NEXT:    vmfle.vv v0, v16, v12
 ; ZVFHMIN-NEXT:    ret
-  %vc = fcmp uge <vscale x 8 x half> %va, %vb
+  %vc = fcmp nnan uge <vscale x 8 x half> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) {
 ; ZVFH-LABEL: fcmp_uge_vf_nxv8f16_nonans:
 ; ZVFH:       # %bb.0:
 ; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -2148,7 +2148,7 @@ define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f16_nonans(<vscale x 8 x half> %va, ha
 ; ZVFHMIN-NEXT:    ret
   %head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp uge <vscale x 8 x half> %va, %splat
+  %vc = fcmp nnan uge <vscale x 8 x half> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2219,7 +2219,7 @@ define <vscale x 8 x i1> @fcmp_ult_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
 ; ZVFH-LABEL: fcmp_ult_vv_nxv8f16_nonans:
 ; ZVFH:       # %bb.0:
 ; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -2234,11 +2234,11 @@ define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; ZVFHMIN-NEXT:    vmflt.vv v0, v16, v12
 ; ZVFHMIN-NEXT:    ret
-  %vc = fcmp ult <vscale x 8 x half> %va, %vb
+  %vc = fcmp nnan ult <vscale x 8 x half> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) {
 ; ZVFH-LABEL: fcmp_ult_vf_nxv8f16_nonans:
 ; ZVFH:       # %bb.0:
 ; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -2255,7 +2255,7 @@ define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f16_nonans(<vscale x 8 x half> %va, ha
 ; ZVFHMIN-NEXT:    ret
   %head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp ult <vscale x 8 x half> %va, %splat
+  %vc = fcmp nnan ult <vscale x 8 x half> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2326,7 +2326,7 @@ define <vscale x 8 x i1> @fcmp_ule_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
 ; ZVFH-LABEL: fcmp_ule_vv_nxv8f16_nonans:
 ; ZVFH:       # %bb.0:
 ; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -2341,11 +2341,11 @@ define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; ZVFHMIN-NEXT:    vmfle.vv v0, v16, v12
 ; ZVFHMIN-NEXT:    ret
-  %vc = fcmp ule <vscale x 8 x half> %va, %vb
+  %vc = fcmp nnan ule <vscale x 8 x half> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) {
 ; ZVFH-LABEL: fcmp_ule_vf_nxv8f16_nonans:
 ; ZVFH:       # %bb.0:
 ; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -2362,7 +2362,7 @@ define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f16_nonans(<vscale x 8 x half> %va, ha
 ; ZVFHMIN-NEXT:    ret
   %head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp ule <vscale x 8 x half> %va, %splat
+  %vc = fcmp nnan ule <vscale x 8 x half> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2427,7 +2427,7 @@ define <vscale x 8 x i1> @fcmp_une_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_une_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_une_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
 ; ZVFH-LABEL: fcmp_une_vv_nxv8f16_nonans:
 ; ZVFH:       # %bb.0:
 ; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -2442,11 +2442,11 @@ define <vscale x 8 x i1> @fcmp_une_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; ZVFHMIN-NEXT:    vmfne.vv v0, v16, v12
 ; ZVFHMIN-NEXT:    ret
-  %vc = fcmp une <vscale x 8 x half> %va, %vb
+  %vc = fcmp nnan une <vscale x 8 x half> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_une_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_une_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) {
 ; ZVFH-LABEL: fcmp_une_vf_nxv8f16_nonans:
 ; ZVFH:       # %bb.0:
 ; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -2463,7 +2463,7 @@ define <vscale x 8 x i1> @fcmp_une_vf_nxv8f16_nonans(<vscale x 8 x half> %va, ha
 ; ZVFHMIN-NEXT:    ret
   %head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp une <vscale x 8 x half> %va, %splat
+  %vc = fcmp nnan une <vscale x 8 x half> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2548,7 +2548,7 @@ define <vscale x 8 x i1> @fcmp_uno_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
 ; ZVFH-LABEL: fcmp_uno_vv_nxv8f16_nonans:
 ; ZVFH:       # %bb.0:
 ; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -2569,11 +2569,11 @@ define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f16_nonans(<vscale x 8 x half> %va, <v
 ; ZVFHMIN-NEXT:    vmfne.vv v8, v12, v12
 ; ZVFHMIN-NEXT:    vmor.mm v0, v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %vc = fcmp uno <vscale x 8 x half> %va, %vb
+  %vc = fcmp nnan uno <vscale x 8 x half> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) #0 {
+define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f16_nonans(<vscale x 8 x half> %va, half %b) {
 ; ZVFH-LABEL: fcmp_uno_vf_nxv8f16_nonans:
 ; ZVFH:       # %bb.0:
 ; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -2597,7 +2597,7 @@ define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f16_nonans(<vscale x 8 x half> %va, ha
 ; ZVFHMIN-NEXT:    ret
   %head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp uno <vscale x 8 x half> %va, %splat
+  %vc = fcmp nnan uno <vscale x 8 x half> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2635,17 +2635,17 @@ define <vscale x 8 x i1> @fcmp_oeq_fv_nxv8f32(<vscale x 8 x float> %va, float %b
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
 ; CHECK-LABEL: fcmp_oeq_vv_nxv8f32_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v12
 ; CHECK-NEXT:    ret
-  %vc = fcmp oeq <vscale x 8 x float> %va, %vb
+  %vc = fcmp nnan oeq <vscale x 8 x float> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) {
 ; CHECK-LABEL: fcmp_oeq_vf_nxv8f32_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -2653,7 +2653,7 @@ define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8f32_nonans(<vscale x 8 x float> %va, f
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp oeq <vscale x 8 x float> %va, %splat
+  %vc = fcmp nnan oeq <vscale x 8 x float> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2691,17 +2691,17 @@ define <vscale x 8 x i1> @fcmp_ogt_fv_nxv8f32(<vscale x 8 x float> %va, float %b
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
 ; CHECK-LABEL: fcmp_ogt_vv_nxv8f32_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmflt.vv v0, v12, v8
 ; CHECK-NEXT:    ret
-  %vc = fcmp ogt <vscale x 8 x float> %va, %vb
+  %vc = fcmp nnan ogt <vscale x 8 x float> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) {
 ; CHECK-LABEL: fcmp_ogt_vf_nxv8f32_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -2709,7 +2709,7 @@ define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8f32_nonans(<vscale x 8 x float> %va, f
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp ogt <vscale x 8 x float> %va, %splat
+  %vc = fcmp nnan ogt <vscale x 8 x float> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2747,17 +2747,17 @@ define <vscale x 8 x i1> @fcmp_oge_fv_nxv8f32(<vscale x 8 x float> %va, float %b
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_oge_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_oge_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
 ; CHECK-LABEL: fcmp_oge_vv_nxv8f32_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmfle.vv v0, v12, v8
 ; CHECK-NEXT:    ret
-  %vc = fcmp oge <vscale x 8 x float> %va, %vb
+  %vc = fcmp nnan oge <vscale x 8 x float> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_oge_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_oge_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) {
 ; CHECK-LABEL: fcmp_oge_vf_nxv8f32_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -2765,7 +2765,7 @@ define <vscale x 8 x i1> @fcmp_oge_vf_nxv8f32_nonans(<vscale x 8 x float> %va, f
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp oge <vscale x 8 x float> %va, %splat
+  %vc = fcmp nnan oge <vscale x 8 x float> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2803,17 +2803,17 @@ define <vscale x 8 x i1> @fcmp_olt_fv_nxv8f32(<vscale x 8 x float> %va, float %b
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_olt_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_olt_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
 ; CHECK-LABEL: fcmp_olt_vv_nxv8f32_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmflt.vv v0, v8, v12
 ; CHECK-NEXT:    ret
-  %vc = fcmp olt <vscale x 8 x float> %va, %vb
+  %vc = fcmp nnan olt <vscale x 8 x float> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_olt_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_olt_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) {
 ; CHECK-LABEL: fcmp_olt_vf_nxv8f32_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -2821,7 +2821,7 @@ define <vscale x 8 x i1> @fcmp_olt_vf_nxv8f32_nonans(<vscale x 8 x float> %va, f
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp olt <vscale x 8 x float> %va, %splat
+  %vc = fcmp nnan olt <vscale x 8 x float> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2859,17 +2859,17 @@ define <vscale x 8 x i1> @fcmp_ole_fv_nxv8f32(<vscale x 8 x float> %va, float %b
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ole_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ole_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
 ; CHECK-LABEL: fcmp_ole_vv_nxv8f32_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmfle.vv v0, v8, v12
 ; CHECK-NEXT:    ret
-  %vc = fcmp ole <vscale x 8 x float> %va, %vb
+  %vc = fcmp nnan ole <vscale x 8 x float> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ole_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_ole_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) {
 ; CHECK-LABEL: fcmp_ole_vf_nxv8f32_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -2877,7 +2877,7 @@ define <vscale x 8 x i1> @fcmp_ole_vf_nxv8f32_nonans(<vscale x 8 x float> %va, f
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp ole <vscale x 8 x float> %va, %splat
+  %vc = fcmp nnan ole <vscale x 8 x float> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2921,17 +2921,17 @@ define <vscale x 8 x i1> @fcmp_one_fv_nxv8f32(<vscale x 8 x float> %va, float %b
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_one_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_one_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
 ; CHECK-LABEL: fcmp_one_vv_nxv8f32_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmfne.vv v0, v8, v12
 ; CHECK-NEXT:    ret
-  %vc = fcmp one <vscale x 8 x float> %va, %vb
+  %vc = fcmp nnan one <vscale x 8 x float> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_one_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_one_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) {
 ; CHECK-LABEL: fcmp_one_vf_nxv8f32_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -2939,7 +2939,7 @@ define <vscale x 8 x i1> @fcmp_one_vf_nxv8f32_nonans(<vscale x 8 x float> %va, f
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp one <vscale x 8 x float> %va, %splat
+  %vc = fcmp nnan one <vscale x 8 x float> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2985,7 +2985,7 @@ define <vscale x 8 x i1> @fcmp_ord_fv_nxv8f32(<vscale x 8 x float> %va, float %b
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
 ; CHECK-LABEL: fcmp_ord_vv_nxv8f32_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -2993,11 +2993,11 @@ define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <
 ; CHECK-NEXT:    vmfeq.vv v12, v8, v8
 ; CHECK-NEXT:    vmand.mm v0, v12, v16
 ; CHECK-NEXT:    ret
-  %vc = fcmp ord <vscale x 8 x float> %va, %vb
+  %vc = fcmp nnan ord <vscale x 8 x float> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) {
 ; CHECK-LABEL: fcmp_ord_vf_nxv8f32_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -3008,7 +3008,7 @@ define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f32_nonans(<vscale x 8 x float> %va, f
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp ord <vscale x 8 x float> %va, %splat
+  %vc = fcmp nnan ord <vscale x 8 x float> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -3052,17 +3052,17 @@ define <vscale x 8 x i1> @fcmp_ueq_fv_nxv8f32(<vscale x 8 x float> %va, float %b
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
 ; CHECK-LABEL: fcmp_ueq_vv_nxv8f32_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v12
 ; CHECK-NEXT:    ret
-  %vc = fcmp ueq <vscale x 8 x float> %va, %vb
+  %vc = fcmp nnan ueq <vscale x 8 x float> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) {
 ; CHECK-LABEL: fcmp_ueq_vf_nxv8f32_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -3070,7 +3070,7 @@ define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f32_nonans(<vscale x 8 x float> %va, f
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp ueq <vscale x 8 x float> %va, %splat
+  %vc = fcmp nnan ueq <vscale x 8 x float> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -3111,17 +3111,17 @@ define <vscale x 8 x i1> @fcmp_ugt_fv_nxv8f32(<vscale x 8 x float> %va, float %b
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
 ; CHECK-LABEL: fcmp_ugt_vv_nxv8f32_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmflt.vv v0, v12, v8
 ; CHECK-NEXT:    ret
-  %vc = fcmp ugt <vscale x 8 x float> %va, %vb
+  %vc = fcmp nnan ugt <vscale x 8 x float> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) {
 ; CHECK-LABEL: fcmp_ugt_vf_nxv8f32_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -3129,7 +3129,7 @@ define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f32_nonans(<vscale x 8 x float> %va, f
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp ugt <vscale x 8 x float> %va, %splat
+  %vc = fcmp nnan ugt <vscale x 8 x float> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -3170,17 +3170,17 @@ define <vscale x 8 x i1> @fcmp_uge_fv_nxv8f32(<vscale x 8 x float> %va, float %b
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
 ; CHECK-LABEL: fcmp_uge_vv_nxv8f32_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmfle.vv v0, v12, v8
 ; CHECK-NEXT:    ret
-  %vc = fcmp uge <vscale x 8 x float> %va, %vb
+  %vc = fcmp nnan uge <vscale x 8 x float> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) {
 ; CHECK-LABEL: fcmp_uge_vf_nxv8f32_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -3188,7 +3188,7 @@ define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f32_nonans(<vscale x 8 x float> %va, f
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp uge <vscale x 8 x float> %va, %splat
+  %vc = fcmp nnan uge <vscale x 8 x float> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -3229,17 +3229,17 @@ define <vscale x 8 x i1> @fcmp_ult_fv_nxv8f32(<vscale x 8 x float> %va, float %b
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
 ; CHECK-LABEL: fcmp_ult_vv_nxv8f32_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmflt.vv v0, v8, v12
 ; CHECK-NEXT:    ret
-  %vc = fcmp ult <vscale x 8 x float> %va, %vb
+  %vc = fcmp nnan ult <vscale x 8 x float> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) {
 ; CHECK-LABEL: fcmp_ult_vf_nxv8f32_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -3247,7 +3247,7 @@ define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f32_nonans(<vscale x 8 x float> %va, f
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp ult <vscale x 8 x float> %va, %splat
+  %vc = fcmp nnan ult <vscale x 8 x float> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -3288,17 +3288,17 @@ define <vscale x 8 x i1> @fcmp_ule_fv_nxv8f32(<vscale x 8 x float> %va, float %b
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
 ; CHECK-LABEL: fcmp_ule_vv_nxv8f32_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmfle.vv v0, v8, v12
 ; CHECK-NEXT:    ret
-  %vc = fcmp ule <vscale x 8 x float> %va, %vb
+  %vc = fcmp nnan ule <vscale x 8 x float> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) {
 ; CHECK-LABEL: fcmp_ule_vf_nxv8f32_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -3306,7 +3306,7 @@ define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f32_nonans(<vscale x 8 x float> %va, f
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp ule <vscale x 8 x float> %va, %splat
+  %vc = fcmp nnan ule <vscale x 8 x float> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -3344,17 +3344,17 @@ define <vscale x 8 x i1> @fcmp_une_fv_nxv8f32(<vscale x 8 x float> %va, float %b
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_une_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_une_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
 ; CHECK-LABEL: fcmp_une_vv_nxv8f32_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmfne.vv v0, v8, v12
 ; CHECK-NEXT:    ret
-  %vc = fcmp une <vscale x 8 x float> %va, %vb
+  %vc = fcmp nnan une <vscale x 8 x float> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_une_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_une_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) {
 ; CHECK-LABEL: fcmp_une_vf_nxv8f32_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -3362,7 +3362,7 @@ define <vscale x 8 x i1> @fcmp_une_vf_nxv8f32_nonans(<vscale x 8 x float> %va, f
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp une <vscale x 8 x float> %va, %splat
+  %vc = fcmp nnan une <vscale x 8 x float> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -3408,7 +3408,7 @@ define <vscale x 8 x i1> @fcmp_uno_fv_nxv8f32(<vscale x 8 x float> %va, float %b
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
 ; CHECK-LABEL: fcmp_uno_vv_nxv8f32_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -3416,11 +3416,11 @@ define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f32_nonans(<vscale x 8 x float> %va, <
 ; CHECK-NEXT:    vmfne.vv v12, v8, v8
 ; CHECK-NEXT:    vmor.mm v0, v12, v16
 ; CHECK-NEXT:    ret
-  %vc = fcmp uno <vscale x 8 x float> %va, %vb
+  %vc = fcmp nnan uno <vscale x 8 x float> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) #0 {
+define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f32_nonans(<vscale x 8 x float> %va, float %b) {
 ; CHECK-LABEL: fcmp_uno_vf_nxv8f32_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -3431,7 +3431,7 @@ define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f32_nonans(<vscale x 8 x float> %va, f
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp uno <vscale x 8 x float> %va, %splat
+  %vc = fcmp nnan uno <vscale x 8 x float> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -3469,17 +3469,17 @@ define <vscale x 8 x i1> @fcmp_oeq_fv_nxv8f64(<vscale x 8 x double> %va, double
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
 ; CHECK-LABEL: fcmp_oeq_vv_nxv8f64_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v16
 ; CHECK-NEXT:    ret
-  %vc = fcmp oeq <vscale x 8 x double> %va, %vb
+  %vc = fcmp nnan oeq <vscale x 8 x double> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) {
 ; CHECK-LABEL: fcmp_oeq_vf_nxv8f64_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
@@ -3487,7 +3487,7 @@ define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8f64_nonans(<vscale x 8 x double> %va,
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp oeq <vscale x 8 x double> %va, %splat
+  %vc = fcmp nnan oeq <vscale x 8 x double> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -3525,17 +3525,17 @@ define <vscale x 8 x i1> @fcmp_ogt_fv_nxv8f64(<vscale x 8 x double> %va, double
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
 ; CHECK-LABEL: fcmp_ogt_vv_nxv8f64_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmflt.vv v0, v16, v8
 ; CHECK-NEXT:    ret
-  %vc = fcmp ogt <vscale x 8 x double> %va, %vb
+  %vc = fcmp nnan ogt <vscale x 8 x double> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) {
 ; CHECK-LABEL: fcmp_ogt_vf_nxv8f64_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
@@ -3543,7 +3543,7 @@ define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8f64_nonans(<vscale x 8 x double> %va,
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp ogt <vscale x 8 x double> %va, %splat
+  %vc = fcmp nnan ogt <vscale x 8 x double> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -3581,17 +3581,17 @@ define <vscale x 8 x i1> @fcmp_oge_fv_nxv8f64(<vscale x 8 x double> %va, double
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_oge_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_oge_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
 ; CHECK-LABEL: fcmp_oge_vv_nxv8f64_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmfle.vv v0, v16, v8
 ; CHECK-NEXT:    ret
-  %vc = fcmp oge <vscale x 8 x double> %va, %vb
+  %vc = fcmp nnan oge <vscale x 8 x double> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_oge_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_oge_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) {
 ; CHECK-LABEL: fcmp_oge_vf_nxv8f64_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
@@ -3599,7 +3599,7 @@ define <vscale x 8 x i1> @fcmp_oge_vf_nxv8f64_nonans(<vscale x 8 x double> %va,
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp oge <vscale x 8 x double> %va, %splat
+  %vc = fcmp nnan oge <vscale x 8 x double> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -3637,17 +3637,17 @@ define <vscale x 8 x i1> @fcmp_olt_fv_nxv8f64(<vscale x 8 x double> %va, double
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_olt_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_olt_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
 ; CHECK-LABEL: fcmp_olt_vv_nxv8f64_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmflt.vv v0, v8, v16
 ; CHECK-NEXT:    ret
-  %vc = fcmp olt <vscale x 8 x double> %va, %vb
+  %vc = fcmp nnan olt <vscale x 8 x double> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_olt_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_olt_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) {
 ; CHECK-LABEL: fcmp_olt_vf_nxv8f64_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
@@ -3655,7 +3655,7 @@ define <vscale x 8 x i1> @fcmp_olt_vf_nxv8f64_nonans(<vscale x 8 x double> %va,
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp olt <vscale x 8 x double> %va, %splat
+  %vc = fcmp nnan olt <vscale x 8 x double> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -3693,17 +3693,17 @@ define <vscale x 8 x i1> @fcmp_ole_fv_nxv8f64(<vscale x 8 x double> %va, double
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ole_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ole_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
 ; CHECK-LABEL: fcmp_ole_vv_nxv8f64_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmfle.vv v0, v8, v16
 ; CHECK-NEXT:    ret
-  %vc = fcmp ole <vscale x 8 x double> %va, %vb
+  %vc = fcmp nnan ole <vscale x 8 x double> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ole_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_ole_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) {
 ; CHECK-LABEL: fcmp_ole_vf_nxv8f64_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
@@ -3711,7 +3711,7 @@ define <vscale x 8 x i1> @fcmp_ole_vf_nxv8f64_nonans(<vscale x 8 x double> %va,
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp ole <vscale x 8 x double> %va, %splat
+  %vc = fcmp nnan ole <vscale x 8 x double> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -3755,17 +3755,17 @@ define <vscale x 8 x i1> @fcmp_one_fv_nxv8f64(<vscale x 8 x double> %va, double
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_one_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_one_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
 ; CHECK-LABEL: fcmp_one_vv_nxv8f64_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmfne.vv v0, v8, v16
 ; CHECK-NEXT:    ret
-  %vc = fcmp one <vscale x 8 x double> %va, %vb
+  %vc = fcmp nnan one <vscale x 8 x double> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_one_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_one_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) {
 ; CHECK-LABEL: fcmp_one_vf_nxv8f64_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
@@ -3773,7 +3773,7 @@ define <vscale x 8 x i1> @fcmp_one_vf_nxv8f64_nonans(<vscale x 8 x double> %va,
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp one <vscale x 8 x double> %va, %splat
+  %vc = fcmp nnan one <vscale x 8 x double> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -3819,7 +3819,7 @@ define <vscale x 8 x i1> @fcmp_ord_fv_nxv8f64(<vscale x 8 x double> %va, double
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
 ; CHECK-LABEL: fcmp_ord_vv_nxv8f64_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
@@ -3827,11 +3827,11 @@ define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f64_nonans(<vscale x 8 x double> %va,
 ; CHECK-NEXT:    vmfeq.vv v16, v8, v8
 ; CHECK-NEXT:    vmand.mm v0, v16, v24
 ; CHECK-NEXT:    ret
-  %vc = fcmp ord <vscale x 8 x double> %va, %vb
+  %vc = fcmp nnan ord <vscale x 8 x double> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) {
 ; CHECK-LABEL: fcmp_ord_vf_nxv8f64_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
@@ -3842,7 +3842,7 @@ define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f64_nonans(<vscale x 8 x double> %va,
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp ord <vscale x 8 x double> %va, %splat
+  %vc = fcmp nnan ord <vscale x 8 x double> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -3886,17 +3886,17 @@ define <vscale x 8 x i1> @fcmp_ueq_fv_nxv8f64(<vscale x 8 x double> %va, double
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
 ; CHECK-LABEL: fcmp_ueq_vv_nxv8f64_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v16
 ; CHECK-NEXT:    ret
-  %vc = fcmp ueq <vscale x 8 x double> %va, %vb
+  %vc = fcmp nnan ueq <vscale x 8 x double> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) {
 ; CHECK-LABEL: fcmp_ueq_vf_nxv8f64_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
@@ -3904,7 +3904,7 @@ define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f64_nonans(<vscale x 8 x double> %va,
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp ueq <vscale x 8 x double> %va, %splat
+  %vc = fcmp nnan ueq <vscale x 8 x double> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -3945,17 +3945,17 @@ define <vscale x 8 x i1> @fcmp_ugt_fv_nxv8f64(<vscale x 8 x double> %va, double
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
 ; CHECK-LABEL: fcmp_ugt_vv_nxv8f64_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmflt.vv v0, v16, v8
 ; CHECK-NEXT:    ret
-  %vc = fcmp ugt <vscale x 8 x double> %va, %vb
+  %vc = fcmp nnan ugt <vscale x 8 x double> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) {
 ; CHECK-LABEL: fcmp_ugt_vf_nxv8f64_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
@@ -3963,7 +3963,7 @@ define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f64_nonans(<vscale x 8 x double> %va,
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp ugt <vscale x 8 x double> %va, %splat
+  %vc = fcmp nnan ugt <vscale x 8 x double> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -4004,17 +4004,17 @@ define <vscale x 8 x i1> @fcmp_uge_fv_nxv8f64(<vscale x 8 x double> %va, double
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
 ; CHECK-LABEL: fcmp_uge_vv_nxv8f64_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmfle.vv v0, v16, v8
 ; CHECK-NEXT:    ret
-  %vc = fcmp uge <vscale x 8 x double> %va, %vb
+  %vc = fcmp nnan uge <vscale x 8 x double> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) {
 ; CHECK-LABEL: fcmp_uge_vf_nxv8f64_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
@@ -4022,7 +4022,7 @@ define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f64_nonans(<vscale x 8 x double> %va,
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp uge <vscale x 8 x double> %va, %splat
+  %vc = fcmp nnan uge <vscale x 8 x double> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -4063,17 +4063,17 @@ define <vscale x 8 x i1> @fcmp_ult_fv_nxv8f64(<vscale x 8 x double> %va, double
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
 ; CHECK-LABEL: fcmp_ult_vv_nxv8f64_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmflt.vv v0, v8, v16
 ; CHECK-NEXT:    ret
-  %vc = fcmp ult <vscale x 8 x double> %va, %vb
+  %vc = fcmp nnan ult <vscale x 8 x double> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) {
 ; CHECK-LABEL: fcmp_ult_vf_nxv8f64_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
@@ -4081,7 +4081,7 @@ define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f64_nonans(<vscale x 8 x double> %va,
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp ult <vscale x 8 x double> %va, %splat
+  %vc = fcmp nnan ult <vscale x 8 x double> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -4122,17 +4122,17 @@ define <vscale x 8 x i1> @fcmp_ule_fv_nxv8f64(<vscale x 8 x double> %va, double
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
 ; CHECK-LABEL: fcmp_ule_vv_nxv8f64_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmfle.vv v0, v8, v16
 ; CHECK-NEXT:    ret
-  %vc = fcmp ule <vscale x 8 x double> %va, %vb
+  %vc = fcmp nnan ule <vscale x 8 x double> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) {
 ; CHECK-LABEL: fcmp_ule_vf_nxv8f64_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
@@ -4140,7 +4140,7 @@ define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f64_nonans(<vscale x 8 x double> %va,
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp ule <vscale x 8 x double> %va, %splat
+  %vc = fcmp nnan ule <vscale x 8 x double> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -4178,17 +4178,17 @@ define <vscale x 8 x i1> @fcmp_une_fv_nxv8f64(<vscale x 8 x double> %va, double
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_une_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_une_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
 ; CHECK-LABEL: fcmp_une_vv_nxv8f64_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmfne.vv v0, v8, v16
 ; CHECK-NEXT:    ret
-  %vc = fcmp une <vscale x 8 x double> %va, %vb
+  %vc = fcmp nnan une <vscale x 8 x double> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_une_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_une_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) {
 ; CHECK-LABEL: fcmp_une_vf_nxv8f64_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
@@ -4196,7 +4196,7 @@ define <vscale x 8 x i1> @fcmp_une_vf_nxv8f64_nonans(<vscale x 8 x double> %va,
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp une <vscale x 8 x double> %va, %splat
+  %vc = fcmp nnan une <vscale x 8 x double> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -4242,7 +4242,7 @@ define <vscale x 8 x i1> @fcmp_uno_fv_nxv8f64(<vscale x 8 x double> %va, double
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) #0 {
+define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f64_nonans(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
 ; CHECK-LABEL: fcmp_uno_vv_nxv8f64_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
@@ -4250,11 +4250,11 @@ define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f64_nonans(<vscale x 8 x double> %va,
 ; CHECK-NEXT:    vmfne.vv v16, v8, v8
 ; CHECK-NEXT:    vmor.mm v0, v16, v24
 ; CHECK-NEXT:    ret
-  %vc = fcmp uno <vscale x 8 x double> %va, %vb
+  %vc = fcmp nnan uno <vscale x 8 x double> %va, %vb
   ret <vscale x 8 x i1> %vc
 }
 
-define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) #0 {
+define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f64_nonans(<vscale x 8 x double> %va, double %b) {
 ; CHECK-LABEL: fcmp_uno_vf_nxv8f64_nonans:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
@@ -4265,7 +4265,7 @@ define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f64_nonans(<vscale x 8 x double> %va,
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = fcmp uno <vscale x 8 x double> %va, %splat
+  %vc = fcmp nnan uno <vscale x 8 x double> %va, %splat
   ret <vscale x 8 x i1> %vc
 }
 
@@ -4322,5 +4322,3 @@ define <vscale x 16 x i1> @fcmp_oeq_vf_nx16f64(<vscale x 16 x double> %va) {
   %vc = fcmp oeq <vscale x 16 x double> %va, zeroinitializer
   ret <vscale x 16 x i1> %vc
 }
-
-attributes #0 = { "no-nans-fp-math"="true" }
diff --git a/llvm/test/CodeGen/X86/2006-05-22-FPSetEQ.ll b/llvm/test/CodeGen/X86/2006-05-22-FPSetEQ.ll
index 940fe8cf6ba75..61f79d3572ddf 100644
--- a/llvm/test/CodeGen/X86/2006-05-22-FPSetEQ.ll
+++ b/llvm/test/CodeGen/X86/2006-05-22-FPSetEQ.ll
@@ -1,13 +1,17 @@
-; RUN: llc < %s -mtriple=i686-- -mattr=-sse | FileCheck %s -check-prefix=WITHNANS
-; RUN: llc < %s -mtriple=i686-- -mattr=-sse -enable-no-nans-fp-math | FileCheck %s -check-prefix=NONANS
+; RUN: llc < %s -mtriple=i686-- -mattr=-sse | FileCheck %s
 
-; WITHNANS-LABEL: test:
-; WITHNANS: setnp
-; NONANS-LABEL: test:
-; NONANS-NOT: setnp
+; CHECK-LABEL: test:
+; CHECK: setnp
 define i32 @test(float %f) {
 	%tmp = fcmp oeq float %f, 0.000000e+00		; <i1> [#uses=1]
 	%tmp.upgrd.1 = zext i1 %tmp to i32		; <i32> [#uses=1]
 	ret i32 %tmp.upgrd.1
 }
 
+; CHECK-LABEL: test_nnan:
+; CHECK-NOT: setnp
+define i32 @test_nnan(float %f) {
+	%tmp = fcmp nnan oeq float %f, 0.000000e+00		; <i1> [#uses=1]
+	%tmp.upgrd.1 = zext i1 %tmp to i32		; <i32> [#uses=1]
+	ret i32 %tmp.upgrd.1
+}
diff --git a/llvm/test/CodeGen/X86/avx-minmax.ll b/llvm/test/CodeGen/X86/avx-minmax.ll
index 8e4b6c6af4cb1..27864a9eefa8e 100644
--- a/llvm/test/CodeGen/X86/avx-minmax.ll
+++ b/llvm/test/CodeGen/X86/avx-minmax.ll
@@ -1,12 +1,12 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx -enable-no-nans-fp-math | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s
 
 define <2 x double> @maxpd(<2 x double> %x, <2 x double> %y) {
 ; CHECK-LABEL: maxpd:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmaxpd %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
-  %max_is_x = fcmp oge <2 x double> %x, %y
+  %max_is_x = fcmp nnan oge <2 x double> %x, %y
   %max = select <2 x i1> %max_is_x, <2 x double> %x, <2 x double> %y
   ret <2 x double> %max
 }
@@ -16,7 +16,7 @@ define <2 x double> @minpd(<2 x double> %x, <2 x double> %y) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vminpd %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
-  %min_is_x = fcmp ole <2 x double> %x, %y
+  %min_is_x = fcmp nnan ole <2 x double> %x, %y
   %min = select <2 x i1> %min_is_x, <2 x double> %x, <2 x double> %y
   ret <2 x double> %min
 }
@@ -26,7 +26,7 @@ define <4 x float> @maxps(<4 x float> %x, <4 x float> %y) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmaxps %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
-  %max_is_x = fcmp oge <4 x float> %x, %y
+  %max_is_x = fcmp nnan oge <4 x float> %x, %y
   %max = select <4 x i1> %max_is_x, <4 x float> %x, <4 x float> %y
   ret <4 x float> %max
 }
@@ -36,7 +36,7 @@ define <4 x float> @minps(<4 x float> %x, <4 x float> %y) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vminps %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
-  %min_is_x = fcmp ole <4 x float> %x, %y
+  %min_is_x = fcmp nnan ole <4 x float> %x, %y
   %min = select <4 x i1> %min_is_x, <4 x float> %x, <4 x float> %y
   ret <4 x float> %min
 }
@@ -46,7 +46,7 @@ define <4 x double> @vmaxpd(<4 x double> %x, <4 x double> %y) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmaxpd %ymm1, %ymm0, %ymm0
 ; CHECK-NEXT:    retq
-  %max_is_x = fcmp oge <4 x double> %x, %y
+  %max_is_x = fcmp nnan oge <4 x double> %x, %y
   %max = select <4 x i1> %max_is_x, <4 x double> %x, <4 x double> %y
   ret <4 x double> %max
 }
@@ -56,7 +56,7 @@ define <4 x double> @vminpd(<4 x double> %x, <4 x double> %y) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vminpd %ymm1, %ymm0, %ymm0
 ; CHECK-NEXT:    retq
-  %min_is_x = fcmp ole <4 x double> %x, %y
+  %min_is_x = fcmp nnan ole <4 x double> %x, %y
   %min = select <4 x i1> %min_is_x, <4 x double> %x, <4 x double> %y
   ret <4 x double> %min
 }
@@ -66,7 +66,7 @@ define <8 x float> @vmaxps(<8 x float> %x, <8 x float> %y) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmaxps %ymm1, %ymm0, %ymm0
 ; CHECK-NEXT:    retq
-  %max_is_x = fcmp oge <8 x float> %x, %y
+  %max_is_x = fcmp nnan oge <8 x float> %x, %y
   %max = select <8 x i1> %max_is_x, <8 x float> %x, <8 x float> %y
   ret <8 x float> %max
 }
@@ -76,7 +76,7 @@ define <8 x float> @vminps(<8 x float> %x, <8 x float> %y) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vminps %ymm1, %ymm0, %ymm0
 ; CHECK-NEXT:    retq
-  %min_is_x = fcmp ole <8 x float> %x, %y
+  %min_is_x = fcmp nnan ole <8 x float> %x, %y
   %min = select <8 x i1> %min_is_x, <8 x float> %x, <8 x float> %y
   ret <8 x float> %min
 }
diff --git a/llvm/test/CodeGen/X86/sse-minmax-fast.ll b/llvm/test/CodeGen/X86/sse-minmax-fast.ll
new file mode 100644
index 0000000000000..9ccd20d88c0b9
--- /dev/null
+++ b/llvm/test/CodeGen/X86/sse-minmax-fast.ll
@@ -0,0 +1,735 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; TODO: Drop -enable-no-signed-zeros-fp-math
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.2 -enable-no-signed-zeros-fp-math | FileCheck %s
+
+; Some of these patterns can be matched as SSE min or max. Some of
+; them can be matched provided that the operands are swapped.
+; Some of them can't be matched at all and require a comparison
+; and a conditional branch.
+
+; The naming convention is {,x_,y_}{o,u}{gt,lt,ge,le}{,_inverse}
+;  _x: use 0.0 instead of %y
+;  _y: use -0.0 instead of %y
+; _inverse : swap the arms of the select.
+
+define double @ogt(double %x, double %y)  {
+; CHECK-LABEL: ogt:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    maxsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz ogt double %x, %y
+  %d = select i1 %c, double %x, double %y
+  ret double %d
+}
+
+define double @olt(double %x, double %y)  {
+; CHECK-LABEL: olt:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    minsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz olt double %x, %y
+  %d = select i1 %c, double %x, double %y
+  ret double %d
+}
+
+define double @ogt_inverse(double %x, double %y) "no-nans-fp-math"="true" {
+; CHECK-LABEL: ogt_inverse:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    minsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz ogt double %x, %y
+  %d = select ninf nnan nsz i1 %c, double %y, double %x
+  ret double %d
+}
+
+define double @olt_inverse(double %x, double %y)  {
+; CHECK-LABEL: olt_inverse:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    maxsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz olt double %x, %y
+  %d = select i1 %c, double %y, double %x
+  ret double %d
+}
+
+define double @oge(double %x, double %y)  {
+; CHECK-LABEL: oge:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    maxsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz oge double %x, %y
+  %d = select i1 %c, double %x, double %y
+  ret double %d
+}
+
+define double @ole(double %x, double %y)  {
+; CHECK-LABEL: ole:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    minsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz ole double %x, %y
+  %d = select i1 %c, double %x, double %y
+  ret double %d
+}
+
+define double @oge_inverse(double %x, double %y)  {
+; CHECK-LABEL: oge_inverse:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    minsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz oge double %x, %y
+  %d = select i1 %c, double %y, double %x
+  ret double %d
+}
+
+define double @ole_inverse(double %x, double %y)  {
+; CHECK-LABEL: ole_inverse:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    maxsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz ole double %x, %y
+  %d = select i1 %c, double %y, double %x
+  ret double %d
+}
+
+define double @ogt_x(double %x)  {
+; CHECK-LABEL: ogt_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    maxsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz ogt double %x, 0.000000e+00
+  %d = select i1 %c, double %x, double 0.000000e+00
+  ret double %d
+}
+
+define double @olt_x(double %x)  {
+; CHECK-LABEL: olt_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    minsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz olt double %x, 0.000000e+00
+  %d = select i1 %c, double %x, double 0.000000e+00
+  ret double %d
+}
+
+define double @ogt_inverse_x(double %x)  {
+; CHECK-LABEL: ogt_inverse_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    minsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz ogt double %x, 0.000000e+00
+  %d = select i1 %c, double 0.000000e+00, double %x
+  ret double %d
+}
+
+define double @olt_inverse_x(double %x)  {
+; CHECK-LABEL: olt_inverse_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    maxsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz olt double %x, 0.000000e+00
+  %d = select i1 %c, double 0.000000e+00, double %x
+  ret double %d
+}
+
+define double @oge_x(double %x)  {
+; CHECK-LABEL: oge_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    maxsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz oge double %x, 0.000000e+00
+  %d = select i1 %c, double %x, double 0.000000e+00
+  ret double %d
+}
+
+define double @ole_x(double %x)  {
+; CHECK-LABEL: ole_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    minsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz ole double %x, 0.000000e+00
+  %d = select i1 %c, double %x, double 0.000000e+00
+  ret double %d
+}
+
+define double @oge_inverse_x(double %x)  {
+; CHECK-LABEL: oge_inverse_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    minsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz oge double %x, 0.000000e+00
+  %d = select i1 %c, double 0.000000e+00, double %x
+  ret double %d
+}
+
+define double @ole_inverse_x(double %x)  {
+; CHECK-LABEL: ole_inverse_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    maxsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz ole double %x, 0.000000e+00
+  %d = select i1 %c, double 0.000000e+00, double %x
+  ret double %d
+}
+
+define double @ugt(double %x, double %y)  {
+; CHECK-LABEL: ugt:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    maxsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz ugt double %x, %y
+  %d = select i1 %c, double %x, double %y
+  ret double %d
+}
+
+define double @ult(double %x, double %y)  {
+; CHECK-LABEL: ult:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    minsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz ult double %x, %y
+  %d = select i1 %c, double %x, double %y
+  ret double %d
+}
+
+define double @ugt_inverse(double %x, double %y)  {
+; CHECK-LABEL: ugt_inverse:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    minsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz ugt double %x, %y
+  %d = select i1 %c, double %y, double %x
+  ret double %d
+}
+
+define double @ult_inverse(double %x, double %y)  {
+; CHECK-LABEL: ult_inverse:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    maxsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz ult double %x, %y
+  %d = select i1 %c, double %y, double %x
+  ret double %d
+}
+
+define double @uge(double %x, double %y)  {
+; CHECK-LABEL: uge:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    maxsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz uge double %x, %y
+  %d = select i1 %c, double %x, double %y
+  ret double %d
+}
+
+define double @ule(double %x, double %y)  {
+; CHECK-LABEL: ule:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    minsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz ule double %x, %y
+  %d = select i1 %c, double %x, double %y
+  ret double %d
+}
+
+define double @uge_inverse(double %x, double %y)  {
+; CHECK-LABEL: uge_inverse:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    minsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz uge double %x, %y
+  %d = select i1 %c, double %y, double %x
+  ret double %d
+}
+
+define double @ule_inverse(double %x, double %y)  {
+; CHECK-LABEL: ule_inverse:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    maxsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz ule double %x, %y
+  %d = select i1 %c, double %y, double %x
+  ret double %d
+}
+
+define double @ugt_x(double %x)  {
+; CHECK-LABEL: ugt_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    maxsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz ugt double %x, 0.000000e+00
+  %d = select i1 %c, double %x, double 0.000000e+00
+  ret double %d
+}
+
+define double @ult_x(double %x)  {
+; CHECK-LABEL: ult_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    minsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz ult double %x, 0.000000e+00
+  %d = select i1 %c, double %x, double 0.000000e+00
+  ret double %d
+}
+
+define double @ugt_inverse_x(double %x)  {
+; CHECK-LABEL: ugt_inverse_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    minsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz ugt double %x, 0.000000e+00
+  %d = select i1 %c, double 0.000000e+00, double %x
+  ret double %d
+}
+
+define double @ult_inverse_x(double %x)  {
+; CHECK-LABEL: ult_inverse_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    maxsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz ult double %x, 0.000000e+00
+  %d = select i1 %c, double 0.000000e+00, double %x
+  ret double %d
+}
+
+define double @uge_x(double %x)  {
+; CHECK-LABEL: uge_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    maxsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz uge double %x, 0.000000e+00
+  %d = select i1 %c, double %x, double 0.000000e+00
+  ret double %d
+}
+
+define double @ule_x(double %x)  {
+; CHECK-LABEL: ule_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    minsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz ule double %x, 0.000000e+00
+  %d = select i1 %c, double %x, double 0.000000e+00
+  ret double %d
+}
+
+define double @uge_inverse_x(double %x)  {
+; CHECK-LABEL: uge_inverse_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    minsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz uge double %x, 0.000000e+00
+  %d = select i1 %c, double 0.000000e+00, double %x
+  ret double %d
+}
+
+define double @ule_inverse_x(double %x)  {
+; CHECK-LABEL: ule_inverse_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    maxsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz ule double %x, 0.000000e+00
+  %d = select i1 %c, double 0.000000e+00, double %x
+  ret double %d
+}
+
+define double @ogt_y(double %x)  {
+; CHECK-LABEL: ogt_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz ogt double %x, -0.000000e+00
+  %d = select i1 %c, double %x, double -0.000000e+00
+  ret double %d
+}
+
+define double @olt_y(double %x)  {
+; CHECK-LABEL: olt_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz olt double %x, -0.000000e+00
+  %d = select i1 %c, double %x, double -0.000000e+00
+  ret double %d
+}
+
+define double @ogt_inverse_y(double %x)  {
+; CHECK-LABEL: ogt_inverse_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT:    minsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz ogt double %x, -0.000000e+00
+  %d = select i1 %c, double -0.000000e+00, double %x
+  ret double %d
+}
+
+define double @olt_inverse_y(double %x)  {
+; CHECK-LABEL: olt_inverse_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT:    maxsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz olt double %x, -0.000000e+00
+  %d = select i1 %c, double -0.000000e+00, double %x
+  ret double %d
+}
+
+define double @oge_y(double %x)  {
+; CHECK-LABEL: oge_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz oge double %x, -0.000000e+00
+  %d = select i1 %c, double %x, double -0.000000e+00
+  ret double %d
+}
+
+define double @ole_y(double %x)  {
+; CHECK-LABEL: ole_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz ole double %x, -0.000000e+00
+  %d = select i1 %c, double %x, double -0.000000e+00
+  ret double %d
+}
+
+define double @oge_inverse_y(double %x)  {
+; CHECK-LABEL: oge_inverse_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT:    minsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz oge double %x, -0.000000e+00
+  %d = select i1 %c, double -0.000000e+00, double %x
+  ret double %d
+}
+
+define double @ole_inverse_y(double %x)  {
+; CHECK-LABEL: ole_inverse_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT:    maxsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz ole double %x, -0.000000e+00
+  %d = select i1 %c, double -0.000000e+00, double %x
+  ret double %d
+}
+
+define double @ugt_y(double %x)  {
+; CHECK-LABEL: ugt_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz ugt double %x, -0.000000e+00
+  %d = select i1 %c, double %x, double -0.000000e+00
+  ret double %d
+}
+
+define double @ult_y(double %x)  {
+; CHECK-LABEL: ult_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz ult double %x, -0.000000e+00
+  %d = select i1 %c, double %x, double -0.000000e+00
+  ret double %d
+}
+
+define double @ugt_inverse_y(double %x)  {
+; CHECK-LABEL: ugt_inverse_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT:    minsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz ugt double %x, -0.000000e+00
+  %d = select i1 %c, double -0.000000e+00, double %x
+  ret double %d
+}
+
+define double @ult_inverse_y(double %x)  {
+; CHECK-LABEL: ult_inverse_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT:    maxsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz ult double %x, -0.000000e+00
+  %d = select i1 %c, double -0.000000e+00, double %x
+  ret double %d
+}
+
+define double @uge_y(double %x)  {
+; CHECK-LABEL: uge_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz uge double %x, -0.000000e+00
+  %d = select i1 %c, double %x, double -0.000000e+00
+  ret double %d
+}
+
+define double @ule_y(double %x)  {
+; CHECK-LABEL: ule_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz ule double %x, -0.000000e+00
+  %d = select i1 %c, double %x, double -0.000000e+00
+  ret double %d
+}
+
+define double @uge_inverse_y(double %x)  {
+; CHECK-LABEL: uge_inverse_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT:    minsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz uge double %x, -0.000000e+00
+  %d = select i1 %c, double -0.000000e+00, double %x
+  ret double %d
+}
+
+define double @ule_inverse_y(double %x)  {
+; CHECK-LABEL: ule_inverse_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT:    maxsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan nsz ule double %x, -0.000000e+00
+  %d = select i1 %c, double -0.000000e+00, double %x
+  ret double %d
+}
+
+; Test a few more misc. cases.
+
+define double @clampTo3k_a(double %x)  {
+; CHECK-LABEL: clampTo3k_a:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT:    minsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %t0 = fcmp ninf nnan nsz ogt double %x, 3.000000e+03
+  %y = select i1 %t0, double 3.000000e+03, double %x
+  ret double %y
+}
+
+define double @clampTo3k_b(double %x)  {
+; CHECK-LABEL: clampTo3k_b:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT:    minsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %t0 = fcmp ninf nnan nsz uge double %x, 3.000000e+03
+  %y = select i1 %t0, double 3.000000e+03, double %x
+  ret double %y
+}
+
+define double @clampTo3k_c(double %x)  {
+; CHECK-LABEL: clampTo3k_c:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT:    maxsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %t0 = fcmp ninf nnan nsz olt double %x, 3.000000e+03
+  %y = select i1 %t0, double 3.000000e+03, double %x
+  ret double %y
+}
+
+define double @clampTo3k_d(double %x)  {
+; CHECK-LABEL: clampTo3k_d:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT:    maxsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %t0 = fcmp ninf nnan nsz ule double %x, 3.000000e+03
+  %y = select i1 %t0, double 3.000000e+03, double %x
+  ret double %y
+}
+
+define double @clampTo3k_e(double %x)  {
+; CHECK-LABEL: clampTo3k_e:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT:    maxsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %t0 = fcmp ninf nnan nsz olt double %x, 3.000000e+03
+  %y = select i1 %t0, double 3.000000e+03, double %x
+  ret double %y
+}
+
+define double @clampTo3k_f(double %x)  {
+; CHECK-LABEL: clampTo3k_f:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT:    maxsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %t0 = fcmp ninf nnan nsz ule double %x, 3.000000e+03
+  %y = select i1 %t0, double 3.000000e+03, double %x
+  ret double %y
+}
+
+define double @clampTo3k_g(double %x)  {
+; CHECK-LABEL: clampTo3k_g:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT:    minsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %t0 = fcmp ninf nnan nsz ogt double %x, 3.000000e+03
+  %y = select i1 %t0, double 3.000000e+03, double %x
+  ret double %y
+}
+
+define double @clampTo3k_h(double %x)  {
+; CHECK-LABEL: clampTo3k_h:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT:    minsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %t0 = fcmp ninf nnan nsz uge double %x, 3.000000e+03
+  %y = select i1 %t0, double 3.000000e+03, double %x
+  ret double %y
+}
+
+define <2 x double> @test_maxpd(<2 x double> %x, <2 x double> %y)  {
+; CHECK-LABEL: test_maxpd:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    maxpd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %max_is_x = fcmp ninf nnan nsz oge <2 x double> %x, %y
+  %max = select <2 x i1> %max_is_x, <2 x double> %x, <2 x double> %y
+  ret <2 x double> %max
+}
+
+define <2 x double> @test_minpd(<2 x double> %x, <2 x double> %y)  {
+; CHECK-LABEL: test_minpd:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    minpd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %min_is_x = fcmp ninf nnan nsz ole <2 x double> %x, %y
+  %min = select <2 x i1> %min_is_x, <2 x double> %x, <2 x double> %y
+  ret <2 x double> %min
+}
+
+define <4 x float> @test_maxps(<4 x float> %x, <4 x float> %y)  {
+; CHECK-LABEL: test_maxps:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    maxps %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %max_is_x = fcmp ninf nnan nsz oge <4 x float> %x, %y
+  %max = select <4 x i1> %max_is_x, <4 x float> %x, <4 x float> %y
+  ret <4 x float> %max
+}
+
+define <4 x float> @test_minps(<4 x float> %x, <4 x float> %y)  {
+; CHECK-LABEL: test_minps:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    minps %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %min_is_x = fcmp ninf nnan nsz ole <4 x float> %x, %y
+  %min = select <4 x i1> %min_is_x, <4 x float> %x, <4 x float> %y
+  ret <4 x float> %min
+}
+
+define <2 x float> @test_maxps_illegal_v2f32(<2 x float> %x, <2 x float> %y)  {
+; CHECK-LABEL: test_maxps_illegal_v2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    maxps %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %max_is_x = fcmp ninf nnan nsz oge <2 x float> %x, %y
+  %max = select <2 x i1> %max_is_x, <2 x float> %x, <2 x float> %y
+  ret <2 x float> %max
+}
+
+define <2 x float> @test_minps_illegal_v2f32(<2 x float> %x, <2 x float> %y)  {
+; CHECK-LABEL: test_minps_illegal_v2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    minps %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %min_is_x = fcmp ninf nnan nsz ole <2 x float> %x, %y
+  %min = select <2 x i1> %min_is_x, <2 x float> %x, <2 x float> %y
+  ret <2 x float> %min
+}
+
+define <3 x float> @test_maxps_illegal_v3f32(<3 x float> %x, <3 x float> %y)  {
+; CHECK-LABEL: test_maxps_illegal_v3f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    maxps %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %max_is_x = fcmp ninf nnan nsz oge <3 x float> %x, %y
+  %max = select <3 x i1> %max_is_x, <3 x float> %x, <3 x float> %y
+  ret <3 x float> %max
+}
+
+define <3 x float> @test_minps_illegal_v3f32(<3 x float> %x, <3 x float> %y)  {
+; CHECK-LABEL: test_minps_illegal_v3f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    minps %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %min_is_x = fcmp ninf nnan nsz ole <3 x float> %x, %y
+  %min = select <3 x i1> %min_is_x, <3 x float> %x, <3 x float> %y
+  ret <3 x float> %min
+}
+
+; OSS-Fuzz #13838
+; https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=13838
+define float @ossfuzz13838(float %x) {
+; CHECK-LABEL: ossfuzz13838:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = [2.55E+2,0.0E+0,0.0E+0,0.0E+0]
+; CHECK-NEXT:    retq
+bb:
+  %cmp2 = fcmp ninf nnan nsz olt float %x, 2.550000e+02
+  %B1 = urem i1 %cmp2, %cmp2
+  %min = select i1 %B1, float %x, float 2.550000e+02
+  %B = frem float %min, 0x47EFFFFFE0000000
+  %cmp1 = fcmp ninf nnan nsz olt float %B, 1.000000e+00
+  %r = select i1 %cmp1, float 1.000000e+00, float %min
+  ret float %r
+}
diff --git a/llvm/test/CodeGen/X86/sse-minmax-finite.ll b/llvm/test/CodeGen/X86/sse-minmax-finite.ll
new file mode 100644
index 0000000000000..4b762b996ca8b
--- /dev/null
+++ b/llvm/test/CodeGen/X86/sse-minmax-finite.ll
@@ -0,0 +1,735 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.2 | FileCheck %s
+
+; Some of these patterns can be matched as SSE min or max. Some of
+; them can be matched provided that the operands are swapped.
+; Some of them can't be matched at all and require a comparison
+; and a conditional branch.
+
+; The naming convention is {,x_,y_}{o,u}{gt,lt,ge,le}{,_inverse}
+;  _x: use 0.0 instead of %y
+;  _y: use -0.0 instead of %y
+; _inverse : swap the arms of the select.
+
+define double @ogt(double %x, double %y)  {
+; CHECK-LABEL: ogt:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    maxsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan ogt double %x, %y
+  %d = select ninf nnan nsz i1 %c, double %x, double %y
+  ret double %d
+}
+
+define double @olt(double %x, double %y)  {
+; CHECK-LABEL: olt:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    minsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan olt double %x, %y
+  %d = select ninf nnan nsz i1 %c, double %x, double %y
+  ret double %d
+}
+
+define double @ogt_inverse(double %x, double %y)  {
+; CHECK-LABEL: ogt_inverse:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    minsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan ogt double %x, %y
+  %d = select ninf nnan nsz i1 %c, double %y, double %x
+  ret double %d
+}
+
+define double @olt_inverse(double %x, double %y)  {
+; CHECK-LABEL: olt_inverse:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    maxsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan olt double %x, %y
+  %d = select ninf nnan nsz i1 %c, double %y, double %x
+  ret double %d
+}
+
+define double @oge(double %x, double %y)  {
+; CHECK-LABEL: oge:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    maxsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan oge double %x, %y
+  %d = select ninf nnan nsz i1 %c, double %x, double %y
+  ret double %d
+}
+
+define double @ole(double %x, double %y)  {
+; CHECK-LABEL: ole:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    minsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan ole double %x, %y
+  %d = select ninf nnan nsz i1 %c, double %x, double %y
+  ret double %d
+}
+
+define double @oge_inverse(double %x, double %y)  {
+; CHECK-LABEL: oge_inverse:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    minsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan oge double %x, %y
+  %d = select ninf nnan nsz i1 %c, double %y, double %x
+  ret double %d
+}
+
+define double @ole_inverse(double %x, double %y)  {
+; CHECK-LABEL: ole_inverse:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    maxsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan ole double %x, %y
+  %d = select ninf nnan nsz i1 %c, double %y, double %x
+  ret double %d
+}
+
+define double @ogt_x(double %x)  {
+; CHECK-LABEL: ogt_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    maxsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan ogt double %x, 0.000000e+00
+  %d = select ninf nnan nsz i1 %c, double %x, double 0.000000e+00
+  ret double %d
+}
+
+define double @olt_x(double %x)  {
+; CHECK-LABEL: olt_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    minsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan olt double %x, 0.000000e+00
+  %d = select ninf nnan nsz i1 %c, double %x, double 0.000000e+00
+  ret double %d
+}
+
+define double @ogt_inverse_x(double %x)  {
+; CHECK-LABEL: ogt_inverse_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    minsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan ogt double %x, 0.000000e+00
+  %d = select ninf nnan nsz i1 %c, double 0.000000e+00, double %x
+  ret double %d
+}
+
+define double @olt_inverse_x(double %x)  {
+; CHECK-LABEL: olt_inverse_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    maxsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan olt double %x, 0.000000e+00
+  %d = select ninf nnan nsz i1 %c, double 0.000000e+00, double %x
+  ret double %d
+}
+
+define double @oge_x(double %x)  {
+; CHECK-LABEL: oge_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    maxsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan oge double %x, 0.000000e+00
+  %d = select ninf nnan nsz i1 %c, double %x, double 0.000000e+00
+  ret double %d
+}
+
+define double @ole_x(double %x)  {
+; CHECK-LABEL: ole_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    minsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan ole double %x, 0.000000e+00
+  %d = select ninf nnan nsz i1 %c, double %x, double 0.000000e+00
+  ret double %d
+}
+
+define double @oge_inverse_x(double %x)  {
+; CHECK-LABEL: oge_inverse_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    minsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan oge double %x, 0.000000e+00
+  %d = select ninf nnan nsz i1 %c, double 0.000000e+00, double %x
+  ret double %d
+}
+
+define double @ole_inverse_x(double %x)  {
+; CHECK-LABEL: ole_inverse_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    maxsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan ole double %x, 0.000000e+00
+  %d = select ninf nnan nsz i1 %c, double 0.000000e+00, double %x
+  ret double %d
+}
+
+define double @ugt(double %x, double %y)  {
+; CHECK-LABEL: ugt:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    maxsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan ugt double %x, %y
+  %d = select ninf nnan nsz i1 %c, double %x, double %y
+  ret double %d
+}
+
+define double @ult(double %x, double %y)  {
+; CHECK-LABEL: ult:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    minsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan ult double %x, %y
+  %d = select ninf nnan nsz i1 %c, double %x, double %y
+  ret double %d
+}
+
+define double @ugt_inverse(double %x, double %y)  {
+; CHECK-LABEL: ugt_inverse:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    minsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan ugt double %x, %y
+  %d = select ninf nnan nsz i1 %c, double %y, double %x
+  ret double %d
+}
+
+define double @ult_inverse(double %x, double %y)  {
+; CHECK-LABEL: ult_inverse:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    maxsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan ult double %x, %y
+  %d = select ninf nnan nsz i1 %c, double %y, double %x
+  ret double %d
+}
+
+define double @uge(double %x, double %y)  {
+; CHECK-LABEL: uge:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    maxsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan uge double %x, %y
+  %d = select ninf nnan nsz i1 %c, double %x, double %y
+  ret double %d
+}
+
+define double @ule(double %x, double %y)  {
+; CHECK-LABEL: ule:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    minsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan ule double %x, %y
+  %d = select ninf nnan nsz i1 %c, double %x, double %y
+  ret double %d
+}
+
+define double @uge_inverse(double %x, double %y)  {
+; CHECK-LABEL: uge_inverse:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    minsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan uge double %x, %y
+  %d = select ninf nnan nsz i1 %c, double %y, double %x
+  ret double %d
+}
+
+define double @ule_inverse(double %x, double %y)  {
+; CHECK-LABEL: ule_inverse:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    maxsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan ule double %x, %y
+  %d = select ninf nnan nsz i1 %c, double %y, double %x
+  ret double %d
+}
+
+define double @ugt_x(double %x)  {
+; CHECK-LABEL: ugt_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    maxsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan ugt double %x, 0.000000e+00
+  %d = select ninf nnan nsz i1 %c, double %x, double 0.000000e+00
+  ret double %d
+}
+
+define double @ult_x(double %x)  {
+; CHECK-LABEL: ult_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    minsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan ult double %x, 0.000000e+00
+  %d = select ninf nnan nsz i1 %c, double %x, double 0.000000e+00
+  ret double %d
+}
+
+define double @ugt_inverse_x(double %x)  {
+; CHECK-LABEL: ugt_inverse_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    minsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan ugt double %x, 0.000000e+00
+  %d = select ninf nnan nsz i1 %c, double 0.000000e+00, double %x
+  ret double %d
+}
+
+define double @ult_inverse_x(double %x)  {
+; CHECK-LABEL: ult_inverse_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    maxsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan ult double %x, 0.000000e+00
+  %d = select ninf nnan nsz i1 %c, double 0.000000e+00, double %x
+  ret double %d
+}
+
+define double @uge_x(double %x)  {
+; CHECK-LABEL: uge_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    maxsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan uge double %x, 0.000000e+00
+  %d = select ninf nnan nsz i1 %c, double %x, double 0.000000e+00
+  ret double %d
+}
+
+define double @ule_x(double %x)  {
+; CHECK-LABEL: ule_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    minsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan ule double %x, 0.000000e+00
+  %d = select ninf nnan nsz i1 %c, double %x, double 0.000000e+00
+  ret double %d
+}
+
+define double @uge_inverse_x(double %x)  {
+; CHECK-LABEL: uge_inverse_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    minsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan uge double %x, 0.000000e+00
+  %d = select ninf nnan nsz i1 %c, double 0.000000e+00, double %x
+  ret double %d
+}
+
+define double @ule_inverse_x(double %x)  {
+; CHECK-LABEL: ule_inverse_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    maxsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan ule double %x, 0.000000e+00
+  %d = select ninf nnan nsz i1 %c, double 0.000000e+00, double %x
+  ret double %d
+}
+
+define double @ogt_y(double %x)  {
+; CHECK-LABEL: ogt_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan ogt double %x, -0.000000e+00
+  %d = select ninf nnan nsz i1 %c, double %x, double -0.000000e+00
+  ret double %d
+}
+
+define double @olt_y(double %x)  {
+; CHECK-LABEL: olt_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan olt double %x, -0.000000e+00
+  %d = select ninf nnan nsz i1 %c, double %x, double -0.000000e+00
+  ret double %d
+}
+
+define double @ogt_inverse_y(double %x)  {
+; CHECK-LABEL: ogt_inverse_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT:    minsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan ogt double %x, -0.000000e+00
+  %d = select ninf nnan nsz i1 %c, double -0.000000e+00, double %x
+  ret double %d
+}
+
+define double @olt_inverse_y(double %x)  {
+; CHECK-LABEL: olt_inverse_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT:    maxsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan olt double %x, -0.000000e+00
+  %d = select ninf nnan nsz i1 %c, double -0.000000e+00, double %x
+  ret double %d
+}
+
+define double @oge_y(double %x)  {
+; CHECK-LABEL: oge_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan oge double %x, -0.000000e+00
+  %d = select ninf nnan nsz i1 %c, double %x, double -0.000000e+00
+  ret double %d
+}
+
+define double @ole_y(double %x)  {
+; CHECK-LABEL: ole_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan ole double %x, -0.000000e+00
+  %d = select ninf nnan nsz i1 %c, double %x, double -0.000000e+00
+  ret double %d
+}
+
+define double @oge_inverse_y(double %x)  {
+; CHECK-LABEL: oge_inverse_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT:    minsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan oge double %x, -0.000000e+00
+  %d = select ninf nnan nsz i1 %c, double -0.000000e+00, double %x
+  ret double %d
+}
+
+define double @ole_inverse_y(double %x)  {
+; CHECK-LABEL: ole_inverse_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT:    maxsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan ole double %x, -0.000000e+00
+  %d = select ninf nnan nsz i1 %c, double -0.000000e+00, double %x
+  ret double %d
+}
+
+define double @ugt_y(double %x)  {
+; CHECK-LABEL: ugt_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan ugt double %x, -0.000000e+00
+  %d = select ninf nnan nsz i1 %c, double %x, double -0.000000e+00
+  ret double %d
+}
+
+define double @ult_y(double %x)  {
+; CHECK-LABEL: ult_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan ult double %x, -0.000000e+00
+  %d = select ninf nnan nsz i1 %c, double %x, double -0.000000e+00
+  ret double %d
+}
+
+define double @ugt_inverse_y(double %x)  {
+; CHECK-LABEL: ugt_inverse_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT:    minsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan ugt double %x, -0.000000e+00
+  %d = select ninf nnan nsz i1 %c, double -0.000000e+00, double %x
+  ret double %d
+}
+
+define double @ult_inverse_y(double %x)  {
+; CHECK-LABEL: ult_inverse_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT:    maxsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan ult double %x, -0.000000e+00
+  %d = select ninf nnan nsz i1 %c, double -0.000000e+00, double %x
+  ret double %d
+}
+
+define double @uge_y(double %x)  {
+; CHECK-LABEL: uge_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan uge double %x, -0.000000e+00
+  %d = select ninf nnan nsz i1 %c, double %x, double -0.000000e+00
+  ret double %d
+}
+
+define double @ule_y(double %x)  {
+; CHECK-LABEL: ule_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan ule double %x, -0.000000e+00
+  %d = select ninf nnan nsz i1 %c, double %x, double -0.000000e+00
+  ret double %d
+}
+
+define double @uge_inverse_y(double %x)  {
+; CHECK-LABEL: uge_inverse_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT:    minsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan uge double %x, -0.000000e+00
+  %d = select ninf nnan nsz i1 %c, double -0.000000e+00, double %x
+  ret double %d
+}
+
+define double @ule_inverse_y(double %x)  {
+; CHECK-LABEL: ule_inverse_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT:    maxsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %c = fcmp ninf nnan ule double %x, -0.000000e+00
+  %d = select ninf nnan nsz i1 %c, double -0.000000e+00, double %x
+  ret double %d
+}
+
+; Test a few more misc. cases.
+
+define double @clampTo3k_a(double %x)  {
+; CHECK-LABEL: clampTo3k_a:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT:    minsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %t0 = fcmp ninf nnan ogt double %x, 3.000000e+03
+  %y = select ninf nnan nsz i1 %t0, double 3.000000e+03, double %x
+  ret double %y
+}
+
+define double @clampTo3k_b(double %x)  {
+; CHECK-LABEL: clampTo3k_b:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT:    minsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %t0 = fcmp ninf nnan uge double %x, 3.000000e+03
+  %y = select ninf nnan nsz i1 %t0, double 3.000000e+03, double %x
+  ret double %y
+}
+
+define double @clampTo3k_c(double %x)  {
+; CHECK-LABEL: clampTo3k_c:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT:    maxsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %t0 = fcmp ninf nnan olt double %x, 3.000000e+03
+  %y = select ninf nnan nsz i1 %t0, double 3.000000e+03, double %x
+  ret double %y
+}
+
+define double @clampTo3k_d(double %x)  {
+; CHECK-LABEL: clampTo3k_d:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT:    maxsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %t0 = fcmp ninf nnan ule double %x, 3.000000e+03
+  %y = select ninf nnan nsz i1 %t0, double 3.000000e+03, double %x
+  ret double %y
+}
+
+define double @clampTo3k_e(double %x)  {
+; CHECK-LABEL: clampTo3k_e:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT:    maxsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %t0 = fcmp ninf nnan olt double %x, 3.000000e+03
+  %y = select ninf nnan nsz i1 %t0, double 3.000000e+03, double %x
+  ret double %y
+}
+
+define double @clampTo3k_f(double %x)  {
+; CHECK-LABEL: clampTo3k_f:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT:    maxsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %t0 = fcmp ninf nnan ule double %x, 3.000000e+03
+  %y = select ninf nnan nsz i1 %t0, double 3.000000e+03, double %x
+  ret double %y
+}
+
+define double @clampTo3k_g(double %x)  {
+; CHECK-LABEL: clampTo3k_g:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT:    minsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %t0 = fcmp ninf nnan ogt double %x, 3.000000e+03
+  %y = select ninf nnan nsz i1 %t0, double 3.000000e+03, double %x
+  ret double %y
+}
+
+define double @clampTo3k_h(double %x)  {
+; CHECK-LABEL: clampTo3k_h:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT:    minsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %t0 = fcmp ninf nnan uge double %x, 3.000000e+03
+  %y = select ninf nnan nsz i1 %t0, double 3.000000e+03, double %x
+  ret double %y
+}
+
+define <2 x double> @test_maxpd(<2 x double> %x, <2 x double> %y)  {
+; CHECK-LABEL: test_maxpd:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    maxpd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %max_is_x = fcmp ninf nnan oge <2 x double> %x, %y
+  %max = select ninf nnan nsz <2 x i1> %max_is_x, <2 x double> %x, <2 x double> %y
+  ret <2 x double> %max
+}
+
+define <2 x double> @test_minpd(<2 x double> %x, <2 x double> %y)  {
+; CHECK-LABEL: test_minpd:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    minpd %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %min_is_x = fcmp ninf nnan ole <2 x double> %x, %y
+  %min = select ninf nnan nsz <2 x i1> %min_is_x, <2 x double> %x, <2 x double> %y
+  ret <2 x double> %min
+}
+
+define <4 x float> @test_maxps(<4 x float> %x, <4 x float> %y)  {
+; CHECK-LABEL: test_maxps:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    maxps %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %max_is_x = fcmp ninf nnan oge <4 x float> %x, %y
+  %max = select ninf nnan nsz <4 x i1> %max_is_x, <4 x float> %x, <4 x float> %y
+  ret <4 x float> %max
+}
+
+define <4 x float> @test_minps(<4 x float> %x, <4 x float> %y)  {
+; CHECK-LABEL: test_minps:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    minps %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %min_is_x = fcmp ninf nnan ole <4 x float> %x, %y
+  %min = select ninf nnan nsz <4 x i1> %min_is_x, <4 x float> %x, <4 x float> %y
+  ret <4 x float> %min
+}
+
+define <2 x float> @test_maxps_illegal_v2f32(<2 x float> %x, <2 x float> %y)  {
+; CHECK-LABEL: test_maxps_illegal_v2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    maxps %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %max_is_x = fcmp ninf nnan oge <2 x float> %x, %y
+  %max = select ninf nnan nsz <2 x i1> %max_is_x, <2 x float> %x, <2 x float> %y
+  ret <2 x float> %max
+}
+
+define <2 x float> @test_minps_illegal_v2f32(<2 x float> %x, <2 x float> %y)  {
+; CHECK-LABEL: test_minps_illegal_v2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    minps %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %min_is_x = fcmp ninf nnan ole <2 x float> %x, %y
+  %min = select ninf nnan nsz <2 x i1> %min_is_x, <2 x float> %x, <2 x float> %y
+  ret <2 x float> %min
+}
+
+define <3 x float> @test_maxps_illegal_v3f32(<3 x float> %x, <3 x float> %y)  {
+; CHECK-LABEL: test_maxps_illegal_v3f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    maxps %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %max_is_x = fcmp ninf nnan oge <3 x float> %x, %y
+  %max = select ninf nnan nsz <3 x i1> %max_is_x, <3 x float> %x, <3 x float> %y
+  ret <3 x float> %max
+}
+
+define <3 x float> @test_minps_illegal_v3f32(<3 x float> %x, <3 x float> %y)  {
+; CHECK-LABEL: test_minps_illegal_v3f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    minps %xmm1, %xmm0
+; CHECK-NEXT:    retq
+  %min_is_x = fcmp ninf nnan ole <3 x float> %x, %y
+  %min = select ninf nnan nsz <3 x i1> %min_is_x, <3 x float> %x, <3 x float> %y
+  ret <3 x float> %min
+}
+
+; OSS-Fuzz #13838
+; https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=13838
+define float @ossfuzz13838(float %x) {
+; CHECK-LABEL: ossfuzz13838:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = [2.55E+2,0.0E+0,0.0E+0,0.0E+0]
+; CHECK-NEXT:    retq
+bb:
+  %cmp2 = fcmp ninf nnan fast olt float %x, 2.550000e+02
+  %B1 = urem i1 %cmp2, %cmp2
+  %min = select ninf nnan nsz i1 %B1, float %x, float 2.550000e+02
+  %B = frem float %min, 0x47EFFFFFE0000000
+  %cmp1 = fcmp ninf nnan fast olt float %B, 1.000000e+00
+  %r = select ninf nnan nsz i1 %cmp1, float 1.000000e+00, float %min
+  ret float %r
+}
diff --git a/llvm/test/CodeGen/X86/sse-minmax.ll b/llvm/test/CodeGen/X86/sse-minmax.ll
index 7904b21a3b1fa..8d07ff57ba638 100644
--- a/llvm/test/CodeGen/X86/sse-minmax.ll
+++ b/llvm/test/CodeGen/X86/sse-minmax.ll
@@ -1,7 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.2                                                          | FileCheck %s --check-prefix=ALL --check-prefix=STRICT
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.2 -enable-no-signed-zeros-fp-math -enable-no-nans-fp-math  | FileCheck %s --check-prefix=ALL --check-prefix=RELAX --check-prefix=UNSAFE
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.2 -enable-no-nans-fp-math                                  | FileCheck %s --check-prefix=ALL --check-prefix=RELAX --check-prefix=FINITE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.2 | FileCheck %s
 
 ; Some of these patterns can be matched as SSE min or max. Some of
 ; them can be matched provided that the operands are swapped.
@@ -14,972 +12,588 @@
 ; _inverse : swap the arms of the select.
 
 define double @ogt(double %x, double %y)  {
-; ALL-LABEL: ogt:
-; ALL:       # %bb.0:
-; ALL-NEXT:    maxsd %xmm1, %xmm0
-; ALL-NEXT:    retq
+; CHECK-LABEL: ogt:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    maxsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp ogt double %x, %y
   %d = select i1 %c, double %x, double %y
   ret double %d
 }
 
 define double @olt(double %x, double %y)  {
-; ALL-LABEL: olt:
-; ALL:       # %bb.0:
-; ALL-NEXT:    minsd %xmm1, %xmm0
-; ALL-NEXT:    retq
+; CHECK-LABEL: olt:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    minsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp olt double %x, %y
   %d = select i1 %c, double %x, double %y
   ret double %d
 }
 
 define double @ogt_inverse(double %x, double %y)  {
-; STRICT-LABEL: ogt_inverse:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    minsd %xmm0, %xmm1
-; STRICT-NEXT:    movapd %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; UNSAFE-LABEL: ogt_inverse:
-; UNSAFE:       # %bb.0:
-; UNSAFE-NEXT:    minsd %xmm1, %xmm0
-; UNSAFE-NEXT:    retq
-;
-; FINITE-LABEL: ogt_inverse:
-; FINITE:       # %bb.0:
-; FINITE-NEXT:    minsd %xmm0, %xmm1
-; FINITE-NEXT:    movapd %xmm1, %xmm0
-; FINITE-NEXT:    retq
+; CHECK-LABEL: ogt_inverse:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    minsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp ogt double %x, %y
   %d = select i1 %c, double %y, double %x
   ret double %d
 }
 
 define double @olt_inverse(double %x, double %y)  {
-; STRICT-LABEL: olt_inverse:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    maxsd %xmm0, %xmm1
-; STRICT-NEXT:    movapd %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; UNSAFE-LABEL: olt_inverse:
-; UNSAFE:       # %bb.0:
-; UNSAFE-NEXT:    maxsd %xmm1, %xmm0
-; UNSAFE-NEXT:    retq
-;
-; FINITE-LABEL: olt_inverse:
-; FINITE:       # %bb.0:
-; FINITE-NEXT:    maxsd %xmm0, %xmm1
-; FINITE-NEXT:    movapd %xmm1, %xmm0
-; FINITE-NEXT:    retq
+; CHECK-LABEL: olt_inverse:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    maxsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp olt double %x, %y
   %d = select i1 %c, double %y, double %x
   ret double %d
 }
 
 define double @oge(double %x, double %y)  {
-; STRICT-LABEL: oge:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    movapd %xmm0, %xmm2
-; STRICT-NEXT:    movapd %xmm1, %xmm0
-; STRICT-NEXT:    cmplesd %xmm2, %xmm0
-; STRICT-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
-; STRICT-NEXT:    movapd %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; RELAX-LABEL: oge:
-; RELAX:       # %bb.0:
-; RELAX-NEXT:    maxsd %xmm1, %xmm0
-; RELAX-NEXT:    retq
+; CHECK-LABEL: oge:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movapd %xmm0, %xmm2
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    cmplesd %xmm2, %xmm0
+; CHECK-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp oge double %x, %y
   %d = select i1 %c, double %x, double %y
   ret double %d
 }
 
 define double @ole(double %x, double %y)  {
-; STRICT-LABEL: ole:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    movapd %xmm0, %xmm2
-; STRICT-NEXT:    cmplesd %xmm1, %xmm0
-; STRICT-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
-; STRICT-NEXT:    movapd %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; RELAX-LABEL: ole:
-; RELAX:       # %bb.0:
-; RELAX-NEXT:    minsd %xmm1, %xmm0
-; RELAX-NEXT:    retq
+; CHECK-LABEL: ole:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movapd %xmm0, %xmm2
+; CHECK-NEXT:    cmplesd %xmm1, %xmm0
+; CHECK-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp ole double %x, %y
   %d = select i1 %c, double %x, double %y
   ret double %d
 }
 
 define double @oge_inverse(double %x, double %y)  {
-; STRICT-LABEL: oge_inverse:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    movapd %xmm0, %xmm2
-; STRICT-NEXT:    movapd %xmm1, %xmm0
-; STRICT-NEXT:    cmplesd %xmm2, %xmm0
-; STRICT-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
-; STRICT-NEXT:    movapd %xmm2, %xmm0
-; STRICT-NEXT:    retq
-;
-; UNSAFE-LABEL: oge_inverse:
-; UNSAFE:       # %bb.0:
-; UNSAFE-NEXT:    minsd %xmm1, %xmm0
-; UNSAFE-NEXT:    retq
-;
-; FINITE-LABEL: oge_inverse:
-; FINITE:       # %bb.0:
-; FINITE-NEXT:    minsd %xmm0, %xmm1
-; FINITE-NEXT:    movapd %xmm1, %xmm0
-; FINITE-NEXT:    retq
+; CHECK-LABEL: oge_inverse:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movapd %xmm0, %xmm2
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    cmplesd %xmm2, %xmm0
+; CHECK-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
+; CHECK-NEXT:    movapd %xmm2, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp oge double %x, %y
   %d = select i1 %c, double %y, double %x
   ret double %d
 }
 
 define double @ole_inverse(double %x, double %y)  {
-; STRICT-LABEL: ole_inverse:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    movapd %xmm0, %xmm2
-; STRICT-NEXT:    cmplesd %xmm1, %xmm0
-; STRICT-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
-; STRICT-NEXT:    movapd %xmm2, %xmm0
-; STRICT-NEXT:    retq
-;
-; UNSAFE-LABEL: ole_inverse:
-; UNSAFE:       # %bb.0:
-; UNSAFE-NEXT:    maxsd %xmm1, %xmm0
-; UNSAFE-NEXT:    retq
-;
-; FINITE-LABEL: ole_inverse:
-; FINITE:       # %bb.0:
-; FINITE-NEXT:    maxsd %xmm0, %xmm1
-; FINITE-NEXT:    movapd %xmm1, %xmm0
-; FINITE-NEXT:    retq
+; CHECK-LABEL: ole_inverse:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movapd %xmm0, %xmm2
+; CHECK-NEXT:    cmplesd %xmm1, %xmm0
+; CHECK-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
+; CHECK-NEXT:    movapd %xmm2, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp ole double %x, %y
   %d = select i1 %c, double %y, double %x
   ret double %d
 }
 
 define double @ogt_x(double %x)  {
-; ALL-LABEL: ogt_x:
-; ALL:       # %bb.0:
-; ALL-NEXT:    xorpd %xmm1, %xmm1
-; ALL-NEXT:    maxsd %xmm1, %xmm0
-; ALL-NEXT:    retq
+; CHECK-LABEL: ogt_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    maxsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp ogt double %x, 0.000000e+00
   %d = select i1 %c, double %x, double 0.000000e+00
   ret double %d
 }
 
 define double @olt_x(double %x)  {
-; ALL-LABEL: olt_x:
-; ALL:       # %bb.0:
-; ALL-NEXT:    xorpd %xmm1, %xmm1
-; ALL-NEXT:    minsd %xmm1, %xmm0
-; ALL-NEXT:    retq
+; CHECK-LABEL: olt_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    minsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp olt double %x, 0.000000e+00
   %d = select i1 %c, double %x, double 0.000000e+00
   ret double %d
 }
 
 define double @ogt_inverse_x(double %x)  {
-; STRICT-LABEL: ogt_inverse_x:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    xorpd %xmm1, %xmm1
-; STRICT-NEXT:    minsd %xmm0, %xmm1
-; STRICT-NEXT:    movapd %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; UNSAFE-LABEL: ogt_inverse_x:
-; UNSAFE:       # %bb.0:
-; UNSAFE-NEXT:    xorpd %xmm1, %xmm1
-; UNSAFE-NEXT:    minsd %xmm1, %xmm0
-; UNSAFE-NEXT:    retq
-;
-; FINITE-LABEL: ogt_inverse_x:
-; FINITE:       # %bb.0:
-; FINITE-NEXT:    xorpd %xmm1, %xmm1
-; FINITE-NEXT:    minsd %xmm0, %xmm1
-; FINITE-NEXT:    movapd %xmm1, %xmm0
-; FINITE-NEXT:    retq
+; CHECK-LABEL: ogt_inverse_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    minsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp ogt double %x, 0.000000e+00
   %d = select i1 %c, double 0.000000e+00, double %x
   ret double %d
 }
 
 define double @olt_inverse_x(double %x)  {
-; STRICT-LABEL: olt_inverse_x:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    xorpd %xmm1, %xmm1
-; STRICT-NEXT:    maxsd %xmm0, %xmm1
-; STRICT-NEXT:    movapd %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; UNSAFE-LABEL: olt_inverse_x:
-; UNSAFE:       # %bb.0:
-; UNSAFE-NEXT:    xorpd %xmm1, %xmm1
-; UNSAFE-NEXT:    maxsd %xmm1, %xmm0
-; UNSAFE-NEXT:    retq
-;
-; FINITE-LABEL: olt_inverse_x:
-; FINITE:       # %bb.0:
-; FINITE-NEXT:    xorpd %xmm1, %xmm1
-; FINITE-NEXT:    maxsd %xmm0, %xmm1
-; FINITE-NEXT:    movapd %xmm1, %xmm0
-; FINITE-NEXT:    retq
+; CHECK-LABEL: olt_inverse_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    maxsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp olt double %x, 0.000000e+00
   %d = select i1 %c, double 0.000000e+00, double %x
   ret double %d
 }
 
 define double @oge_x(double %x)  {
-; STRICT-LABEL: oge_x:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    xorpd %xmm1, %xmm1
-; STRICT-NEXT:    cmplesd %xmm0, %xmm1
-; STRICT-NEXT:    andpd %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; RELAX-LABEL: oge_x:
-; RELAX:       # %bb.0:
-; RELAX-NEXT:    xorpd %xmm1, %xmm1
-; RELAX-NEXT:    maxsd %xmm1, %xmm0
-; RELAX-NEXT:    retq
+; CHECK-LABEL: oge_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    cmplesd %xmm0, %xmm1
+; CHECK-NEXT:    andpd %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp oge double %x, 0.000000e+00
   %d = select i1 %c, double %x, double 0.000000e+00
   ret double %d
 }
 
 define double @ole_x(double %x)  {
-; STRICT-LABEL: ole_x:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    xorpd %xmm1, %xmm1
-; STRICT-NEXT:    movapd %xmm0, %xmm2
-; STRICT-NEXT:    cmplesd %xmm1, %xmm2
-; STRICT-NEXT:    andpd %xmm2, %xmm0
-; STRICT-NEXT:    retq
-;
-; RELAX-LABEL: ole_x:
-; RELAX:       # %bb.0:
-; RELAX-NEXT:    xorpd %xmm1, %xmm1
-; RELAX-NEXT:    minsd %xmm1, %xmm0
-; RELAX-NEXT:    retq
+; CHECK-LABEL: ole_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    movapd %xmm0, %xmm2
+; CHECK-NEXT:    cmplesd %xmm1, %xmm2
+; CHECK-NEXT:    andpd %xmm2, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp ole double %x, 0.000000e+00
   %d = select i1 %c, double %x, double 0.000000e+00
   ret double %d
 }
 
 define double @oge_inverse_x(double %x)  {
-; STRICT-LABEL: oge_inverse_x:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    xorpd %xmm1, %xmm1
-; STRICT-NEXT:    cmplesd %xmm0, %xmm1
-; STRICT-NEXT:    andnpd %xmm0, %xmm1
-; STRICT-NEXT:    movapd %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; UNSAFE-LABEL: oge_inverse_x:
-; UNSAFE:       # %bb.0:
-; UNSAFE-NEXT:    xorpd %xmm1, %xmm1
-; UNSAFE-NEXT:    minsd %xmm1, %xmm0
-; UNSAFE-NEXT:    retq
-;
-; FINITE-LABEL: oge_inverse_x:
-; FINITE:       # %bb.0:
-; FINITE-NEXT:    xorpd %xmm1, %xmm1
-; FINITE-NEXT:    minsd %xmm0, %xmm1
-; FINITE-NEXT:    movapd %xmm1, %xmm0
-; FINITE-NEXT:    retq
+; CHECK-LABEL: oge_inverse_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    cmplesd %xmm0, %xmm1
+; CHECK-NEXT:    andnpd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp oge double %x, 0.000000e+00
   %d = select i1 %c, double 0.000000e+00, double %x
   ret double %d
 }
 
 define double @ole_inverse_x(double %x)  {
-; STRICT-LABEL: ole_inverse_x:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    xorpd %xmm2, %xmm2
-; STRICT-NEXT:    movapd %xmm0, %xmm1
-; STRICT-NEXT:    cmplesd %xmm2, %xmm1
-; STRICT-NEXT:    andnpd %xmm0, %xmm1
-; STRICT-NEXT:    movapd %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; UNSAFE-LABEL: ole_inverse_x:
-; UNSAFE:       # %bb.0:
-; UNSAFE-NEXT:    xorpd %xmm1, %xmm1
-; UNSAFE-NEXT:    maxsd %xmm1, %xmm0
-; UNSAFE-NEXT:    retq
-;
-; FINITE-LABEL: ole_inverse_x:
-; FINITE:       # %bb.0:
-; FINITE-NEXT:    xorpd %xmm1, %xmm1
-; FINITE-NEXT:    maxsd %xmm0, %xmm1
-; FINITE-NEXT:    movapd %xmm1, %xmm0
-; FINITE-NEXT:    retq
+; CHECK-LABEL: ole_inverse_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm2, %xmm2
+; CHECK-NEXT:    movapd %xmm0, %xmm1
+; CHECK-NEXT:    cmplesd %xmm2, %xmm1
+; CHECK-NEXT:    andnpd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp ole double %x, 0.000000e+00
   %d = select i1 %c, double 0.000000e+00, double %x
   ret double %d
 }
 
 define double @ugt(double %x, double %y)  {
-; STRICT-LABEL: ugt:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    movapd %xmm0, %xmm2
-; STRICT-NEXT:    cmpnlesd %xmm1, %xmm0
-; STRICT-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
-; STRICT-NEXT:    movapd %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; RELAX-LABEL: ugt:
-; RELAX:       # %bb.0:
-; RELAX-NEXT:    maxsd %xmm1, %xmm0
-; RELAX-NEXT:    retq
+; CHECK-LABEL: ugt:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movapd %xmm0, %xmm2
+; CHECK-NEXT:    cmpnlesd %xmm1, %xmm0
+; CHECK-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp ugt double %x, %y
   %d = select i1 %c, double %x, double %y
   ret double %d
 }
 
 define double @ult(double %x, double %y)  {
-; STRICT-LABEL: ult:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    movapd %xmm0, %xmm2
-; STRICT-NEXT:    movapd %xmm1, %xmm0
-; STRICT-NEXT:    cmpnlesd %xmm2, %xmm0
-; STRICT-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
-; STRICT-NEXT:    movapd %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; RELAX-LABEL: ult:
-; RELAX:       # %bb.0:
-; RELAX-NEXT:    minsd %xmm1, %xmm0
-; RELAX-NEXT:    retq
+; CHECK-LABEL: ult:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movapd %xmm0, %xmm2
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    cmpnlesd %xmm2, %xmm0
+; CHECK-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp ult double %x, %y
   %d = select i1 %c, double %x, double %y
   ret double %d
 }
 
 define double @ugt_inverse(double %x, double %y)  {
-; STRICT-LABEL: ugt_inverse:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    movapd %xmm0, %xmm2
-; STRICT-NEXT:    cmpnlesd %xmm1, %xmm0
-; STRICT-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
-; STRICT-NEXT:    movapd %xmm2, %xmm0
-; STRICT-NEXT:    retq
-;
-; UNSAFE-LABEL: ugt_inverse:
-; UNSAFE:       # %bb.0:
-; UNSAFE-NEXT:    minsd %xmm1, %xmm0
-; UNSAFE-NEXT:    retq
-;
-; FINITE-LABEL: ugt_inverse:
-; FINITE:       # %bb.0:
-; FINITE-NEXT:    minsd %xmm0, %xmm1
-; FINITE-NEXT:    movapd %xmm1, %xmm0
-; FINITE-NEXT:    retq
+; CHECK-LABEL: ugt_inverse:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movapd %xmm0, %xmm2
+; CHECK-NEXT:    cmpnlesd %xmm1, %xmm0
+; CHECK-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
+; CHECK-NEXT:    movapd %xmm2, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp ugt double %x, %y
   %d = select i1 %c, double %y, double %x
   ret double %d
 }
 
 define double @ult_inverse(double %x, double %y)  {
-; STRICT-LABEL: ult_inverse:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    movapd %xmm0, %xmm2
-; STRICT-NEXT:    movapd %xmm1, %xmm0
-; STRICT-NEXT:    cmpnlesd %xmm2, %xmm0
-; STRICT-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
-; STRICT-NEXT:    movapd %xmm2, %xmm0
-; STRICT-NEXT:    retq
-;
-; UNSAFE-LABEL: ult_inverse:
-; UNSAFE:       # %bb.0:
-; UNSAFE-NEXT:    maxsd %xmm1, %xmm0
-; UNSAFE-NEXT:    retq
-;
-; FINITE-LABEL: ult_inverse:
-; FINITE:       # %bb.0:
-; FINITE-NEXT:    maxsd %xmm0, %xmm1
-; FINITE-NEXT:    movapd %xmm1, %xmm0
-; FINITE-NEXT:    retq
+; CHECK-LABEL: ult_inverse:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movapd %xmm0, %xmm2
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    cmpnlesd %xmm2, %xmm0
+; CHECK-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
+; CHECK-NEXT:    movapd %xmm2, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp ult double %x, %y
   %d = select i1 %c, double %y, double %x
   ret double %d
 }
 
 define double @uge(double %x, double %y)  {
-; STRICT-LABEL: uge:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    maxsd %xmm0, %xmm1
-; STRICT-NEXT:    movapd %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; RELAX-LABEL: uge:
-; RELAX:       # %bb.0:
-; RELAX-NEXT:    maxsd %xmm1, %xmm0
-; RELAX-NEXT:    retq
+; CHECK-LABEL: uge:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    maxsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp uge double %x, %y
   %d = select i1 %c, double %x, double %y
   ret double %d
 }
 
 define double @ule(double %x, double %y)  {
-; STRICT-LABEL: ule:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    minsd %xmm0, %xmm1
-; STRICT-NEXT:    movapd %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; RELAX-LABEL: ule:
-; RELAX:       # %bb.0:
-; RELAX-NEXT:    minsd %xmm1, %xmm0
-; RELAX-NEXT:    retq
+; CHECK-LABEL: ule:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    minsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp ule double %x, %y
   %d = select i1 %c, double %x, double %y
   ret double %d
 }
 
 define double @uge_inverse(double %x, double %y)  {
-; STRICT-LABEL: uge_inverse:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    minsd %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; UNSAFE-LABEL: uge_inverse:
-; UNSAFE:       # %bb.0:
-; UNSAFE-NEXT:    minsd %xmm1, %xmm0
-; UNSAFE-NEXT:    retq
-;
-; FINITE-LABEL: uge_inverse:
-; FINITE:       # %bb.0:
-; FINITE-NEXT:    minsd %xmm0, %xmm1
-; FINITE-NEXT:    movapd %xmm1, %xmm0
-; FINITE-NEXT:    retq
+; CHECK-LABEL: uge_inverse:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    minsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp uge double %x, %y
   %d = select i1 %c, double %y, double %x
   ret double %d
 }
 
 define double @ule_inverse(double %x, double %y)  {
-; STRICT-LABEL: ule_inverse:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    maxsd %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; UNSAFE-LABEL: ule_inverse:
-; UNSAFE:       # %bb.0:
-; UNSAFE-NEXT:    maxsd %xmm1, %xmm0
-; UNSAFE-NEXT:    retq
-;
-; FINITE-LABEL: ule_inverse:
-; FINITE:       # %bb.0:
-; FINITE-NEXT:    maxsd %xmm0, %xmm1
-; FINITE-NEXT:    movapd %xmm1, %xmm0
-; FINITE-NEXT:    retq
+; CHECK-LABEL: ule_inverse:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    maxsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp ule double %x, %y
   %d = select i1 %c, double %y, double %x
   ret double %d
 }
 
 define double @ugt_x(double %x)  {
-; STRICT-LABEL: ugt_x:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    xorpd %xmm1, %xmm1
-; STRICT-NEXT:    movapd %xmm0, %xmm2
-; STRICT-NEXT:    cmpnlesd %xmm1, %xmm2
-; STRICT-NEXT:    andpd %xmm2, %xmm0
-; STRICT-NEXT:    retq
-;
-; RELAX-LABEL: ugt_x:
-; RELAX:       # %bb.0:
-; RELAX-NEXT:    xorpd %xmm1, %xmm1
-; RELAX-NEXT:    maxsd %xmm1, %xmm0
-; RELAX-NEXT:    retq
+; CHECK-LABEL: ugt_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    movapd %xmm0, %xmm2
+; CHECK-NEXT:    cmpnlesd %xmm1, %xmm2
+; CHECK-NEXT:    andpd %xmm2, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp ugt double %x, 0.000000e+00
   %d = select i1 %c, double %x, double 0.000000e+00
   ret double %d
 }
 
 define double @ult_x(double %x)  {
-; STRICT-LABEL: ult_x:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    xorpd %xmm1, %xmm1
-; STRICT-NEXT:    cmpnlesd %xmm0, %xmm1
-; STRICT-NEXT:    andpd %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; RELAX-LABEL: ult_x:
-; RELAX:       # %bb.0:
-; RELAX-NEXT:    xorpd %xmm1, %xmm1
-; RELAX-NEXT:    minsd %xmm1, %xmm0
-; RELAX-NEXT:    retq
+; CHECK-LABEL: ult_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    cmpnlesd %xmm0, %xmm1
+; CHECK-NEXT:    andpd %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp ult double %x, 0.000000e+00
   %d = select i1 %c, double %x, double 0.000000e+00
   ret double %d
 }
 
 define double @ugt_inverse_x(double %x)  {
-; STRICT-LABEL: ugt_inverse_x:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    xorpd %xmm2, %xmm2
-; STRICT-NEXT:    movapd %xmm0, %xmm1
-; STRICT-NEXT:    cmpnlesd %xmm2, %xmm1
-; STRICT-NEXT:    andnpd %xmm0, %xmm1
-; STRICT-NEXT:    movapd %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; UNSAFE-LABEL: ugt_inverse_x:
-; UNSAFE:       # %bb.0:
-; UNSAFE-NEXT:    xorpd %xmm1, %xmm1
-; UNSAFE-NEXT:    minsd %xmm1, %xmm0
-; UNSAFE-NEXT:    retq
-;
-; FINITE-LABEL: ugt_inverse_x:
-; FINITE:       # %bb.0:
-; FINITE-NEXT:    xorpd %xmm1, %xmm1
-; FINITE-NEXT:    minsd %xmm0, %xmm1
-; FINITE-NEXT:    movapd %xmm1, %xmm0
-; FINITE-NEXT:    retq
+; CHECK-LABEL: ugt_inverse_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm2, %xmm2
+; CHECK-NEXT:    movapd %xmm0, %xmm1
+; CHECK-NEXT:    cmpnlesd %xmm2, %xmm1
+; CHECK-NEXT:    andnpd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp ugt double %x, 0.000000e+00
   %d = select i1 %c, double 0.000000e+00, double %x
   ret double %d
 }
 
 define double @ult_inverse_x(double %x)  {
-; STRICT-LABEL: ult_inverse_x:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    xorpd %xmm1, %xmm1
-; STRICT-NEXT:    cmpnlesd %xmm0, %xmm1
-; STRICT-NEXT:    andnpd %xmm0, %xmm1
-; STRICT-NEXT:    movapd %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; UNSAFE-LABEL: ult_inverse_x:
-; UNSAFE:       # %bb.0:
-; UNSAFE-NEXT:    xorpd %xmm1, %xmm1
-; UNSAFE-NEXT:    maxsd %xmm1, %xmm0
-; UNSAFE-NEXT:    retq
-;
-; FINITE-LABEL: ult_inverse_x:
-; FINITE:       # %bb.0:
-; FINITE-NEXT:    xorpd %xmm1, %xmm1
-; FINITE-NEXT:    maxsd %xmm0, %xmm1
-; FINITE-NEXT:    movapd %xmm1, %xmm0
-; FINITE-NEXT:    retq
+; CHECK-LABEL: ult_inverse_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    cmpnlesd %xmm0, %xmm1
+; CHECK-NEXT:    andnpd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp ult double %x, 0.000000e+00
   %d = select i1 %c, double 0.000000e+00, double %x
   ret double %d
 }
 
 define double @uge_x(double %x)  {
-; STRICT-LABEL: uge_x:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    xorpd %xmm1, %xmm1
-; STRICT-NEXT:    maxsd %xmm0, %xmm1
-; STRICT-NEXT:    movapd %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; RELAX-LABEL: uge_x:
-; RELAX:       # %bb.0:
-; RELAX-NEXT:    xorpd %xmm1, %xmm1
-; RELAX-NEXT:    maxsd %xmm1, %xmm0
-; RELAX-NEXT:    retq
+; CHECK-LABEL: uge_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    maxsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp uge double %x, 0.000000e+00
   %d = select i1 %c, double %x, double 0.000000e+00
   ret double %d
 }
 
 define double @ule_x(double %x)  {
-; STRICT-LABEL: ule_x:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    xorpd %xmm1, %xmm1
-; STRICT-NEXT:    minsd %xmm0, %xmm1
-; STRICT-NEXT:    movapd %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; RELAX-LABEL: ule_x:
-; RELAX:       # %bb.0:
-; RELAX-NEXT:    xorpd %xmm1, %xmm1
-; RELAX-NEXT:    minsd %xmm1, %xmm0
-; RELAX-NEXT:    retq
+; CHECK-LABEL: ule_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    minsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp ule double %x, 0.000000e+00
   %d = select i1 %c, double %x, double 0.000000e+00
   ret double %d
 }
 
 define double @uge_inverse_x(double %x)  {
-; STRICT-LABEL: uge_inverse_x:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    xorpd %xmm1, %xmm1
-; STRICT-NEXT:    minsd %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; UNSAFE-LABEL: uge_inverse_x:
-; UNSAFE:       # %bb.0:
-; UNSAFE-NEXT:    xorpd %xmm1, %xmm1
-; UNSAFE-NEXT:    minsd %xmm1, %xmm0
-; UNSAFE-NEXT:    retq
-;
-; FINITE-LABEL: uge_inverse_x:
-; FINITE:       # %bb.0:
-; FINITE-NEXT:    xorpd %xmm1, %xmm1
-; FINITE-NEXT:    minsd %xmm0, %xmm1
-; FINITE-NEXT:    movapd %xmm1, %xmm0
-; FINITE-NEXT:    retq
+; CHECK-LABEL: uge_inverse_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    minsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp uge double %x, 0.000000e+00
   %d = select i1 %c, double 0.000000e+00, double %x
   ret double %d
 }
 
 define double @ule_inverse_x(double %x)  {
-; STRICT-LABEL: ule_inverse_x:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    xorpd %xmm1, %xmm1
-; STRICT-NEXT:    maxsd %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; UNSAFE-LABEL: ule_inverse_x:
-; UNSAFE:       # %bb.0:
-; UNSAFE-NEXT:    xorpd %xmm1, %xmm1
-; UNSAFE-NEXT:    maxsd %xmm1, %xmm0
-; UNSAFE-NEXT:    retq
-;
-; FINITE-LABEL: ule_inverse_x:
-; FINITE:       # %bb.0:
-; FINITE-NEXT:    xorpd %xmm1, %xmm1
-; FINITE-NEXT:    maxsd %xmm0, %xmm1
-; FINITE-NEXT:    movapd %xmm1, %xmm0
-; FINITE-NEXT:    retq
+; CHECK-LABEL: ule_inverse_x:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    maxsd %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp ule double %x, 0.000000e+00
   %d = select i1 %c, double 0.000000e+00, double %x
   ret double %d
 }
 
 define double @ogt_y(double %x)  {
-; ALL-LABEL: ogt_y:
-; ALL:       # %bb.0:
-; ALL-NEXT:    maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; ALL-NEXT:    retq
+; CHECK-LABEL: ogt_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp ogt double %x, -0.000000e+00
   %d = select i1 %c, double %x, double -0.000000e+00
   ret double %d
 }
 
 define double @olt_y(double %x)  {
-; ALL-LABEL: olt_y:
-; ALL:       # %bb.0:
-; ALL-NEXT:    minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; ALL-NEXT:    retq
+; CHECK-LABEL: olt_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp olt double %x, -0.000000e+00
   %d = select i1 %c, double %x, double -0.000000e+00
   ret double %d
 }
 
 define double @ogt_inverse_y(double %x)  {
-; STRICT-LABEL: ogt_inverse_y:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
-; STRICT-NEXT:    minsd %xmm0, %xmm1
-; STRICT-NEXT:    movapd %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; UNSAFE-LABEL: ogt_inverse_y:
-; UNSAFE:       # %bb.0:
-; UNSAFE-NEXT:    minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT:    retq
-;
-; FINITE-LABEL: ogt_inverse_y:
-; FINITE:       # %bb.0:
-; FINITE-NEXT:    movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
-; FINITE-NEXT:    minsd %xmm0, %xmm1
-; FINITE-NEXT:    movapd %xmm1, %xmm0
-; FINITE-NEXT:    retq
+; CHECK-LABEL: ogt_inverse_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT:    minsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp ogt double %x, -0.000000e+00
   %d = select i1 %c, double -0.000000e+00, double %x
   ret double %d
 }
 
 define double @olt_inverse_y(double %x)  {
-; STRICT-LABEL: olt_inverse_y:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
-; STRICT-NEXT:    maxsd %xmm0, %xmm1
-; STRICT-NEXT:    movapd %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; UNSAFE-LABEL: olt_inverse_y:
-; UNSAFE:       # %bb.0:
-; UNSAFE-NEXT:    maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT:    retq
-;
-; FINITE-LABEL: olt_inverse_y:
-; FINITE:       # %bb.0:
-; FINITE-NEXT:    movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
-; FINITE-NEXT:    maxsd %xmm0, %xmm1
-; FINITE-NEXT:    movapd %xmm1, %xmm0
-; FINITE-NEXT:    retq
+; CHECK-LABEL: olt_inverse_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT:    maxsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp olt double %x, -0.000000e+00
   %d = select i1 %c, double -0.000000e+00, double %x
   ret double %d
 }
 
 define double @oge_y(double %x)  {
-; STRICT-LABEL: oge_y:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    movapd %xmm0, %xmm1
-; STRICT-NEXT:    movsd {{.*#+}} xmm0 = [-0.0E+0,0.0E+0]
-; STRICT-NEXT:    cmplesd %xmm1, %xmm0
-; STRICT-NEXT:    movapd {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0]
-; STRICT-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
-; STRICT-NEXT:    movapd %xmm2, %xmm0
-; STRICT-NEXT:    retq
-;
-; RELAX-LABEL: oge_y:
-; RELAX:       # %bb.0:
-; RELAX-NEXT:    maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; RELAX-NEXT:    retq
+; CHECK-LABEL: oge_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movapd %xmm0, %xmm1
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT:    cmplesd %xmm1, %xmm0
+; CHECK-NEXT:    movapd {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0]
+; CHECK-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
+; CHECK-NEXT:    movapd %xmm2, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp oge double %x, -0.000000e+00
   %d = select i1 %c, double %x, double -0.000000e+00
   ret double %d
 }
 
 define double @ole_y(double %x)  {
-; STRICT-LABEL: ole_y:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    movapd %xmm0, %xmm1
-; STRICT-NEXT:    cmplesd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; STRICT-NEXT:    movapd {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0]
-; STRICT-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
-; STRICT-NEXT:    movapd %xmm2, %xmm0
-; STRICT-NEXT:    retq
-;
-; RELAX-LABEL: ole_y:
-; RELAX:       # %bb.0:
-; RELAX-NEXT:    minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; RELAX-NEXT:    retq
+; CHECK-LABEL: ole_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movapd %xmm0, %xmm1
+; CHECK-NEXT:    cmplesd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT:    movapd {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0]
+; CHECK-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
+; CHECK-NEXT:    movapd %xmm2, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp ole double %x, -0.000000e+00
   %d = select i1 %c, double %x, double -0.000000e+00
   ret double %d
 }
 
 define double @oge_inverse_y(double %x)  {
-; STRICT-LABEL: oge_inverse_y:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    movapd %xmm0, %xmm1
-; STRICT-NEXT:    movsd {{.*#+}} xmm0 = [-0.0E+0,0.0E+0]
-; STRICT-NEXT:    cmplesd %xmm1, %xmm0
-; STRICT-NEXT:    blendvpd %xmm0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; STRICT-NEXT:    movapd %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; UNSAFE-LABEL: oge_inverse_y:
-; UNSAFE:       # %bb.0:
-; UNSAFE-NEXT:    minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT:    retq
-;
-; FINITE-LABEL: oge_inverse_y:
-; FINITE:       # %bb.0:
-; FINITE-NEXT:    movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
-; FINITE-NEXT:    minsd %xmm0, %xmm1
-; FINITE-NEXT:    movapd %xmm1, %xmm0
-; FINITE-NEXT:    retq
+; CHECK-LABEL: oge_inverse_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movapd %xmm0, %xmm1
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT:    cmplesd %xmm1, %xmm0
+; CHECK-NEXT:    blendvpd %xmm0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp oge double %x, -0.000000e+00
   %d = select i1 %c, double -0.000000e+00, double %x
   ret double %d
 }
 
 define double @ole_inverse_y(double %x)  {
-; STRICT-LABEL: ole_inverse_y:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    movapd %xmm0, %xmm1
-; STRICT-NEXT:    cmplesd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; STRICT-NEXT:    blendvpd %xmm0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; STRICT-NEXT:    movapd %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; UNSAFE-LABEL: ole_inverse_y:
-; UNSAFE:       # %bb.0:
-; UNSAFE-NEXT:    maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT:    retq
-;
-; FINITE-LABEL: ole_inverse_y:
-; FINITE:       # %bb.0:
-; FINITE-NEXT:    movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
-; FINITE-NEXT:    maxsd %xmm0, %xmm1
-; FINITE-NEXT:    movapd %xmm1, %xmm0
-; FINITE-NEXT:    retq
+; CHECK-LABEL: ole_inverse_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movapd %xmm0, %xmm1
+; CHECK-NEXT:    cmplesd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT:    blendvpd %xmm0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp ole double %x, -0.000000e+00
   %d = select i1 %c, double -0.000000e+00, double %x
   ret double %d
 }
 
 define double @ugt_y(double %x)  {
-; STRICT-LABEL: ugt_y:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    movapd %xmm0, %xmm1
-; STRICT-NEXT:    cmpnlesd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; STRICT-NEXT:    movapd {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0]
-; STRICT-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
-; STRICT-NEXT:    movapd %xmm2, %xmm0
-; STRICT-NEXT:    retq
-;
-; RELAX-LABEL: ugt_y:
-; RELAX:       # %bb.0:
-; RELAX-NEXT:    maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; RELAX-NEXT:    retq
+; CHECK-LABEL: ugt_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movapd %xmm0, %xmm1
+; CHECK-NEXT:    cmpnlesd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT:    movapd {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0]
+; CHECK-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
+; CHECK-NEXT:    movapd %xmm2, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp ugt double %x, -0.000000e+00
   %d = select i1 %c, double %x, double -0.000000e+00
   ret double %d
 }
 
 define double @ult_y(double %x)  {
-; STRICT-LABEL: ult_y:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    movapd %xmm0, %xmm1
-; STRICT-NEXT:    movsd {{.*#+}} xmm0 = [-0.0E+0,0.0E+0]
-; STRICT-NEXT:    cmpnlesd %xmm1, %xmm0
-; STRICT-NEXT:    movapd {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0]
-; STRICT-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
-; STRICT-NEXT:    movapd %xmm2, %xmm0
-; STRICT-NEXT:    retq
-;
-; RELAX-LABEL: ult_y:
-; RELAX:       # %bb.0:
-; RELAX-NEXT:    minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; RELAX-NEXT:    retq
+; CHECK-LABEL: ult_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movapd %xmm0, %xmm1
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT:    cmpnlesd %xmm1, %xmm0
+; CHECK-NEXT:    movapd {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0]
+; CHECK-NEXT:    blendvpd %xmm0, %xmm1, %xmm2
+; CHECK-NEXT:    movapd %xmm2, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp ult double %x, -0.000000e+00
   %d = select i1 %c, double %x, double -0.000000e+00
   ret double %d
 }
 
 define double @ugt_inverse_y(double %x)  {
-; STRICT-LABEL: ugt_inverse_y:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    movapd %xmm0, %xmm1
-; STRICT-NEXT:    cmpnlesd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; STRICT-NEXT:    blendvpd %xmm0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; STRICT-NEXT:    movapd %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; UNSAFE-LABEL: ugt_inverse_y:
-; UNSAFE:       # %bb.0:
-; UNSAFE-NEXT:    minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT:    retq
-;
-; FINITE-LABEL: ugt_inverse_y:
-; FINITE:       # %bb.0:
-; FINITE-NEXT:    movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
-; FINITE-NEXT:    minsd %xmm0, %xmm1
-; FINITE-NEXT:    movapd %xmm1, %xmm0
-; FINITE-NEXT:    retq
+; CHECK-LABEL: ugt_inverse_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movapd %xmm0, %xmm1
+; CHECK-NEXT:    cmpnlesd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT:    blendvpd %xmm0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp ugt double %x, -0.000000e+00
   %d = select i1 %c, double -0.000000e+00, double %x
   ret double %d
 }
 
 define double @ult_inverse_y(double %x)  {
-; STRICT-LABEL: ult_inverse_y:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    movapd %xmm0, %xmm1
-; STRICT-NEXT:    movsd {{.*#+}} xmm0 = [-0.0E+0,0.0E+0]
-; STRICT-NEXT:    cmpnlesd %xmm1, %xmm0
-; STRICT-NEXT:    blendvpd %xmm0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; STRICT-NEXT:    movapd %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; UNSAFE-LABEL: ult_inverse_y:
-; UNSAFE:       # %bb.0:
-; UNSAFE-NEXT:    maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT:    retq
-;
-; FINITE-LABEL: ult_inverse_y:
-; FINITE:       # %bb.0:
-; FINITE-NEXT:    movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
-; FINITE-NEXT:    maxsd %xmm0, %xmm1
-; FINITE-NEXT:    movapd %xmm1, %xmm0
-; FINITE-NEXT:    retq
+; CHECK-LABEL: ult_inverse_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movapd %xmm0, %xmm1
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT:    cmpnlesd %xmm1, %xmm0
+; CHECK-NEXT:    blendvpd %xmm0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp ult double %x, -0.000000e+00
   %d = select i1 %c, double -0.000000e+00, double %x
   ret double %d
 }
 
 define double @uge_y(double %x)  {
-; STRICT-LABEL: uge_y:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
-; STRICT-NEXT:    maxsd %xmm0, %xmm1
-; STRICT-NEXT:    movapd %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; RELAX-LABEL: uge_y:
-; RELAX:       # %bb.0:
-; RELAX-NEXT:    maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; RELAX-NEXT:    retq
+; CHECK-LABEL: uge_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT:    maxsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp uge double %x, -0.000000e+00
   %d = select i1 %c, double %x, double -0.000000e+00
   ret double %d
 }
 
 define double @ule_y(double %x)  {
-; STRICT-LABEL: ule_y:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
-; STRICT-NEXT:    minsd %xmm0, %xmm1
-; STRICT-NEXT:    movapd %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; RELAX-LABEL: ule_y:
-; RELAX:       # %bb.0:
-; RELAX-NEXT:    minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; RELAX-NEXT:    retq
+; CHECK-LABEL: ule_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
+; CHECK-NEXT:    minsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp ule double %x, -0.000000e+00
   %d = select i1 %c, double %x, double -0.000000e+00
   ret double %d
 }
 
 define double @uge_inverse_y(double %x)  {
-; STRICT-LABEL: uge_inverse_y:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; STRICT-NEXT:    retq
-;
-; UNSAFE-LABEL: uge_inverse_y:
-; UNSAFE:       # %bb.0:
-; UNSAFE-NEXT:    minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT:    retq
-;
-; FINITE-LABEL: uge_inverse_y:
-; FINITE:       # %bb.0:
-; FINITE-NEXT:    movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
-; FINITE-NEXT:    minsd %xmm0, %xmm1
-; FINITE-NEXT:    movapd %xmm1, %xmm0
-; FINITE-NEXT:    retq
+; CHECK-LABEL: uge_inverse_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp uge double %x, -0.000000e+00
   %d = select i1 %c, double -0.000000e+00, double %x
   ret double %d
 }
 
 define double @ule_inverse_y(double %x)  {
-; STRICT-LABEL: ule_inverse_y:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; STRICT-NEXT:    retq
-;
-; UNSAFE-LABEL: ule_inverse_y:
-; UNSAFE:       # %bb.0:
-; UNSAFE-NEXT:    maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT:    retq
-;
-; FINITE-LABEL: ule_inverse_y:
-; FINITE:       # %bb.0:
-; FINITE-NEXT:    movsd {{.*#+}} xmm1 = [-0.0E+0,0.0E+0]
-; FINITE-NEXT:    maxsd %xmm0, %xmm1
-; FINITE-NEXT:    movapd %xmm1, %xmm0
-; FINITE-NEXT:    retq
+; CHECK-LABEL: ule_inverse_y:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT:    retq
   %c = fcmp ule double %x, -0.000000e+00
   %d = select i1 %c, double -0.000000e+00, double %x
   ret double %d
@@ -988,332 +602,196 @@ define double @ule_inverse_y(double %x)  {
 ; Test a few more misc. cases.
 
 define double @clampTo3k_a(double %x)  {
-; STRICT-LABEL: clampTo3k_a:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
-; STRICT-NEXT:    minsd %xmm0, %xmm1
-; STRICT-NEXT:    movapd %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; UNSAFE-LABEL: clampTo3k_a:
-; UNSAFE:       # %bb.0:
-; UNSAFE-NEXT:    minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT:    retq
-;
-; FINITE-LABEL: clampTo3k_a:
-; FINITE:       # %bb.0:
-; FINITE-NEXT:    movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
-; FINITE-NEXT:    minsd %xmm0, %xmm1
-; FINITE-NEXT:    movapd %xmm1, %xmm0
-; FINITE-NEXT:    retq
+; CHECK-LABEL: clampTo3k_a:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT:    minsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %t0 = fcmp ogt double %x, 3.000000e+03
   %y = select i1 %t0, double 3.000000e+03, double %x
   ret double %y
 }
 
 define double @clampTo3k_b(double %x)  {
-; STRICT-LABEL: clampTo3k_b:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; STRICT-NEXT:    retq
-;
-; UNSAFE-LABEL: clampTo3k_b:
-; UNSAFE:       # %bb.0:
-; UNSAFE-NEXT:    minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT:    retq
-;
-; FINITE-LABEL: clampTo3k_b:
-; FINITE:       # %bb.0:
-; FINITE-NEXT:    movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
-; FINITE-NEXT:    minsd %xmm0, %xmm1
-; FINITE-NEXT:    movapd %xmm1, %xmm0
-; FINITE-NEXT:    retq
+; CHECK-LABEL: clampTo3k_b:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT:    retq
   %t0 = fcmp uge double %x, 3.000000e+03
   %y = select i1 %t0, double 3.000000e+03, double %x
   ret double %y
 }
 
 define double @clampTo3k_c(double %x)  {
-; STRICT-LABEL: clampTo3k_c:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
-; STRICT-NEXT:    maxsd %xmm0, %xmm1
-; STRICT-NEXT:    movapd %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; UNSAFE-LABEL: clampTo3k_c:
-; UNSAFE:       # %bb.0:
-; UNSAFE-NEXT:    maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT:    retq
-;
-; FINITE-LABEL: clampTo3k_c:
-; FINITE:       # %bb.0:
-; FINITE-NEXT:    movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
-; FINITE-NEXT:    maxsd %xmm0, %xmm1
-; FINITE-NEXT:    movapd %xmm1, %xmm0
-; FINITE-NEXT:    retq
+; CHECK-LABEL: clampTo3k_c:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT:    maxsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %t0 = fcmp olt double %x, 3.000000e+03
   %y = select i1 %t0, double 3.000000e+03, double %x
   ret double %y
 }
 
 define double @clampTo3k_d(double %x)  {
-; STRICT-LABEL: clampTo3k_d:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; STRICT-NEXT:    retq
-;
-; UNSAFE-LABEL: clampTo3k_d:
-; UNSAFE:       # %bb.0:
-; UNSAFE-NEXT:    maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT:    retq
-;
-; FINITE-LABEL: clampTo3k_d:
-; FINITE:       # %bb.0:
-; FINITE-NEXT:    movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
-; FINITE-NEXT:    maxsd %xmm0, %xmm1
-; FINITE-NEXT:    movapd %xmm1, %xmm0
-; FINITE-NEXT:    retq
+; CHECK-LABEL: clampTo3k_d:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT:    retq
   %t0 = fcmp ule double %x, 3.000000e+03
   %y = select i1 %t0, double 3.000000e+03, double %x
   ret double %y
 }
 
 define double @clampTo3k_e(double %x)  {
-; STRICT-LABEL: clampTo3k_e:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
-; STRICT-NEXT:    maxsd %xmm0, %xmm1
-; STRICT-NEXT:    movapd %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; UNSAFE-LABEL: clampTo3k_e:
-; UNSAFE:       # %bb.0:
-; UNSAFE-NEXT:    maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT:    retq
-;
-; FINITE-LABEL: clampTo3k_e:
-; FINITE:       # %bb.0:
-; FINITE-NEXT:    movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
-; FINITE-NEXT:    maxsd %xmm0, %xmm1
-; FINITE-NEXT:    movapd %xmm1, %xmm0
-; FINITE-NEXT:    retq
+; CHECK-LABEL: clampTo3k_e:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT:    maxsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %t0 = fcmp olt double %x, 3.000000e+03
   %y = select i1 %t0, double 3.000000e+03, double %x
   ret double %y
 }
 
 define double @clampTo3k_f(double %x)  {
-; STRICT-LABEL: clampTo3k_f:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; STRICT-NEXT:    retq
-;
-; UNSAFE-LABEL: clampTo3k_f:
-; UNSAFE:       # %bb.0:
-; UNSAFE-NEXT:    maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT:    retq
-;
-; FINITE-LABEL: clampTo3k_f:
-; FINITE:       # %bb.0:
-; FINITE-NEXT:    movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
-; FINITE-NEXT:    maxsd %xmm0, %xmm1
-; FINITE-NEXT:    movapd %xmm1, %xmm0
-; FINITE-NEXT:    retq
+; CHECK-LABEL: clampTo3k_f:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    maxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT:    retq
   %t0 = fcmp ule double %x, 3.000000e+03
   %y = select i1 %t0, double 3.000000e+03, double %x
   ret double %y
 }
 
 define double @clampTo3k_g(double %x)  {
-; STRICT-LABEL: clampTo3k_g:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
-; STRICT-NEXT:    minsd %xmm0, %xmm1
-; STRICT-NEXT:    movapd %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; UNSAFE-LABEL: clampTo3k_g:
-; UNSAFE:       # %bb.0:
-; UNSAFE-NEXT:    minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT:    retq
-;
-; FINITE-LABEL: clampTo3k_g:
-; FINITE:       # %bb.0:
-; FINITE-NEXT:    movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
-; FINITE-NEXT:    minsd %xmm0, %xmm1
-; FINITE-NEXT:    movapd %xmm1, %xmm0
-; FINITE-NEXT:    retq
+; CHECK-LABEL: clampTo3k_g:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
+; CHECK-NEXT:    minsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %t0 = fcmp ogt double %x, 3.000000e+03
   %y = select i1 %t0, double 3.000000e+03, double %x
   ret double %y
 }
 
 define double @clampTo3k_h(double %x)  {
-; STRICT-LABEL: clampTo3k_h:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; STRICT-NEXT:    retq
-;
-; UNSAFE-LABEL: clampTo3k_h:
-; UNSAFE:       # %bb.0:
-; UNSAFE-NEXT:    minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; UNSAFE-NEXT:    retq
-;
-; FINITE-LABEL: clampTo3k_h:
-; FINITE:       # %bb.0:
-; FINITE-NEXT:    movsd {{.*#+}} xmm1 = [3.0E+3,0.0E+0]
-; FINITE-NEXT:    minsd %xmm0, %xmm1
-; FINITE-NEXT:    movapd %xmm1, %xmm0
-; FINITE-NEXT:    retq
+; CHECK-LABEL: clampTo3k_h:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-NEXT:    retq
   %t0 = fcmp uge double %x, 3.000000e+03
   %y = select i1 %t0, double 3.000000e+03, double %x
   ret double %y
 }
 
 define <2 x double> @test_maxpd(<2 x double> %x, <2 x double> %y)  {
-; STRICT-LABEL: test_maxpd:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    movapd %xmm0, %xmm2
-; STRICT-NEXT:    movapd %xmm1, %xmm0
-; STRICT-NEXT:    cmplepd %xmm2, %xmm0
-; STRICT-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
-; STRICT-NEXT:    movapd %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; RELAX-LABEL: test_maxpd:
-; RELAX:       # %bb.0:
-; RELAX-NEXT:    maxpd %xmm1, %xmm0
-; RELAX-NEXT:    retq
+; CHECK-LABEL: test_maxpd:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movapd %xmm0, %xmm2
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    cmplepd %xmm2, %xmm0
+; CHECK-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %max_is_x = fcmp oge <2 x double> %x, %y
   %max = select <2 x i1> %max_is_x, <2 x double> %x, <2 x double> %y
   ret <2 x double> %max
 }
 
 define <2 x double> @test_minpd(<2 x double> %x, <2 x double> %y)  {
-; STRICT-LABEL: test_minpd:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    movapd %xmm0, %xmm2
-; STRICT-NEXT:    cmplepd %xmm1, %xmm0
-; STRICT-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
-; STRICT-NEXT:    movapd %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; RELAX-LABEL: test_minpd:
-; RELAX:       # %bb.0:
-; RELAX-NEXT:    minpd %xmm1, %xmm0
-; RELAX-NEXT:    retq
+; CHECK-LABEL: test_minpd:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movapd %xmm0, %xmm2
+; CHECK-NEXT:    cmplepd %xmm1, %xmm0
+; CHECK-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %min_is_x = fcmp ole <2 x double> %x, %y
   %min = select <2 x i1> %min_is_x, <2 x double> %x, <2 x double> %y
   ret <2 x double> %min
 }
 
 define <4 x float> @test_maxps(<4 x float> %x, <4 x float> %y)  {
-; STRICT-LABEL: test_maxps:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    movaps %xmm0, %xmm2
-; STRICT-NEXT:    movaps %xmm1, %xmm0
-; STRICT-NEXT:    cmpleps %xmm2, %xmm0
-; STRICT-NEXT:    blendvps %xmm0, %xmm2, %xmm1
-; STRICT-NEXT:    movaps %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; RELAX-LABEL: test_maxps:
-; RELAX:       # %bb.0:
-; RELAX-NEXT:    maxps %xmm1, %xmm0
-; RELAX-NEXT:    retq
+; CHECK-LABEL: test_maxps:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movaps %xmm0, %xmm2
+; CHECK-NEXT:    movaps %xmm1, %xmm0
+; CHECK-NEXT:    cmpleps %xmm2, %xmm0
+; CHECK-NEXT:    blendvps %xmm0, %xmm2, %xmm1
+; CHECK-NEXT:    movaps %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %max_is_x = fcmp oge <4 x float> %x, %y
   %max = select <4 x i1> %max_is_x, <4 x float> %x, <4 x float> %y
   ret <4 x float> %max
 }
 
 define <4 x float> @test_minps(<4 x float> %x, <4 x float> %y)  {
-; STRICT-LABEL: test_minps:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    movaps %xmm0, %xmm2
-; STRICT-NEXT:    cmpleps %xmm1, %xmm0
-; STRICT-NEXT:    blendvps %xmm0, %xmm2, %xmm1
-; STRICT-NEXT:    movaps %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; RELAX-LABEL: test_minps:
-; RELAX:       # %bb.0:
-; RELAX-NEXT:    minps %xmm1, %xmm0
-; RELAX-NEXT:    retq
+; CHECK-LABEL: test_minps:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movaps %xmm0, %xmm2
+; CHECK-NEXT:    cmpleps %xmm1, %xmm0
+; CHECK-NEXT:    blendvps %xmm0, %xmm2, %xmm1
+; CHECK-NEXT:    movaps %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %min_is_x = fcmp ole <4 x float> %x, %y
   %min = select <4 x i1> %min_is_x, <4 x float> %x, <4 x float> %y
   ret <4 x float> %min
 }
 
 define <2 x float> @test_maxps_illegal_v2f32(<2 x float> %x, <2 x float> %y)  {
-; STRICT-LABEL: test_maxps_illegal_v2f32:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    movaps %xmm0, %xmm2
-; STRICT-NEXT:    movaps %xmm1, %xmm0
-; STRICT-NEXT:    cmpleps %xmm2, %xmm0
-; STRICT-NEXT:    blendvps %xmm0, %xmm2, %xmm1
-; STRICT-NEXT:    movaps %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; RELAX-LABEL: test_maxps_illegal_v2f32:
-; RELAX:       # %bb.0:
-; RELAX-NEXT:    maxps %xmm1, %xmm0
-; RELAX-NEXT:    retq
+; CHECK-LABEL: test_maxps_illegal_v2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movaps %xmm0, %xmm2
+; CHECK-NEXT:    movaps %xmm1, %xmm0
+; CHECK-NEXT:    cmpleps %xmm2, %xmm0
+; CHECK-NEXT:    blendvps %xmm0, %xmm2, %xmm1
+; CHECK-NEXT:    movaps %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %max_is_x = fcmp oge <2 x float> %x, %y
   %max = select <2 x i1> %max_is_x, <2 x float> %x, <2 x float> %y
   ret <2 x float> %max
 }
 
 define <2 x float> @test_minps_illegal_v2f32(<2 x float> %x, <2 x float> %y)  {
-; STRICT-LABEL: test_minps_illegal_v2f32:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    movaps %xmm0, %xmm2
-; STRICT-NEXT:    cmpleps %xmm1, %xmm0
-; STRICT-NEXT:    blendvps %xmm0, %xmm2, %xmm1
-; STRICT-NEXT:    movaps %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; RELAX-LABEL: test_minps_illegal_v2f32:
-; RELAX:       # %bb.0:
-; RELAX-NEXT:    minps %xmm1, %xmm0
-; RELAX-NEXT:    retq
+; CHECK-LABEL: test_minps_illegal_v2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movaps %xmm0, %xmm2
+; CHECK-NEXT:    cmpleps %xmm1, %xmm0
+; CHECK-NEXT:    blendvps %xmm0, %xmm2, %xmm1
+; CHECK-NEXT:    movaps %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %min_is_x = fcmp ole <2 x float> %x, %y
   %min = select <2 x i1> %min_is_x, <2 x float> %x, <2 x float> %y
   ret <2 x float> %min
 }
 
 define <3 x float> @test_maxps_illegal_v3f32(<3 x float> %x, <3 x float> %y)  {
-; STRICT-LABEL: test_maxps_illegal_v3f32:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    movaps %xmm0, %xmm2
-; STRICT-NEXT:    movaps %xmm1, %xmm0
-; STRICT-NEXT:    cmpleps %xmm2, %xmm0
-; STRICT-NEXT:    blendvps %xmm0, %xmm2, %xmm1
-; STRICT-NEXT:    movaps %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; RELAX-LABEL: test_maxps_illegal_v3f32:
-; RELAX:       # %bb.0:
-; RELAX-NEXT:    maxps %xmm1, %xmm0
-; RELAX-NEXT:    retq
+; CHECK-LABEL: test_maxps_illegal_v3f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movaps %xmm0, %xmm2
+; CHECK-NEXT:    movaps %xmm1, %xmm0
+; CHECK-NEXT:    cmpleps %xmm2, %xmm0
+; CHECK-NEXT:    blendvps %xmm0, %xmm2, %xmm1
+; CHECK-NEXT:    movaps %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %max_is_x = fcmp oge <3 x float> %x, %y
   %max = select <3 x i1> %max_is_x, <3 x float> %x, <3 x float> %y
   ret <3 x float> %max
 }
 
 define <3 x float> @test_minps_illegal_v3f32(<3 x float> %x, <3 x float> %y)  {
-; STRICT-LABEL: test_minps_illegal_v3f32:
-; STRICT:       # %bb.0:
-; STRICT-NEXT:    movaps %xmm0, %xmm2
-; STRICT-NEXT:    cmpleps %xmm1, %xmm0
-; STRICT-NEXT:    blendvps %xmm0, %xmm2, %xmm1
-; STRICT-NEXT:    movaps %xmm1, %xmm0
-; STRICT-NEXT:    retq
-;
-; RELAX-LABEL: test_minps_illegal_v3f32:
-; RELAX:       # %bb.0:
-; RELAX-NEXT:    minps %xmm1, %xmm0
-; RELAX-NEXT:    retq
+; CHECK-LABEL: test_minps_illegal_v3f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movaps %xmm0, %xmm2
+; CHECK-NEXT:    cmpleps %xmm1, %xmm0
+; CHECK-NEXT:    blendvps %xmm0, %xmm2, %xmm1
+; CHECK-NEXT:    movaps %xmm1, %xmm0
+; CHECK-NEXT:    retq
   %min_is_x = fcmp ole <3 x float> %x, %y
   %min = select <3 x i1> %min_is_x, <3 x float> %x, <3 x float> %y
   ret <3 x float> %min
@@ -1322,10 +800,10 @@ define <3 x float> @test_minps_illegal_v3f32(<3 x float> %x, <3 x float> %y)  {
 ; OSS-Fuzz #13838
 ; https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=13838
 define float @ossfuzz13838(float %x) {
-; ALL-LABEL: ossfuzz13838:
-; ALL:       # %bb.0: # %bb
-; ALL-NEXT:    movss {{.*#+}} xmm0 = [2.55E+2,0.0E+0,0.0E+0,0.0E+0]
-; ALL-NEXT:    retq
+; CHECK-LABEL: ossfuzz13838:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = [2.55E+2,0.0E+0,0.0E+0,0.0E+0]
+; CHECK-NEXT:    retq
 bb:
   %cmp2 = fcmp fast olt float %x, 2.550000e+02
   %B1 = urem i1 %cmp2, %cmp2

>From 6fb77dc68a2d9218de71ab0413fb990332071f27 Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Mon, 27 Oct 2025 16:31:46 +0800
Subject: [PATCH 4/6] Revert change in isKnownNeverNaN

---
 llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 6 ------
 1 file changed, 6 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 9e8336e3508a0..379242ec5a157 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -5869,12 +5869,6 @@ bool SelectionDAG::isKnownNeverNaN(SDValue Op, bool SNaN,
                            ? APInt::getAllOnes(VT.getVectorNumElements())
                            : APInt(1, 1);
 
-  // If all users of this operand is annotated with nnan, we can assume
-  // this operand is not NaN, since nnan also affects inputs.
-  if (llvm::all_of(Op->users(),
-                   [](const SDNode *N) { return N->getFlags().hasNoNaNs(); }))
-    return true;
-
   return isKnownNeverNaN(Op, DemandedElts, SNaN, Depth);
 }
 

>From d7cbb38d8d013d500a37f9cb3fb5113bb9f26655 Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Mon, 27 Oct 2025 19:08:55 +0800
Subject: [PATCH 5/6] revert tests

---
 .../AArch64/neon-compare-instructions.ll      | 111 +++-----
 .../CodeGen/AMDGPU/combine_andor_with_cmps.ll | 261 ++++++++++++------
 2 files changed, 223 insertions(+), 149 deletions(-)

diff --git a/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll b/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll
index a82ead2406945..11b3b62ec1c8d 100644
--- a/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll
+++ b/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll
@@ -3249,51 +3249,36 @@ define <2 x i64> @fcmone2xdouble_fast(<2 x double> %A, <2 x double> %B) {
 }
 
 define <2 x i32> @fcmord2xfloat_fast(<2 x float> %A, <2 x float> %B) {
-; CHECK-SD-LABEL: fcmord2xfloat_fast:
-; CHECK-SD:       // %bb.0:
-; CHECK-SD-NEXT:    fcmeq v0.2s, v0.2s, v0.2s
-; CHECK-SD-NEXT:    ret
-;
-; CHECK-GI-LABEL: fcmord2xfloat_fast:
-; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    fcmge v2.2s, v0.2s, v1.2s
-; CHECK-GI-NEXT:    fcmgt v0.2s, v1.2s, v0.2s
-; CHECK-GI-NEXT:    orr v0.8b, v0.8b, v2.8b
-; CHECK-GI-NEXT:    ret
+; CHECK-LABEL: fcmord2xfloat_fast:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmge v2.2s, v0.2s, v1.2s
+; CHECK-NEXT:    fcmgt v0.2s, v1.2s, v0.2s
+; CHECK-NEXT:    orr v0.8b, v0.8b, v2.8b
+; CHECK-NEXT:    ret
   %tmp3 = fcmp fast ord <2 x float> %A, %B
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
   ret <2 x i32> %tmp4
 }
 
 define <4 x i32> @fcmord4xfloat_fast(<4 x float> %A, <4 x float> %B) {
-; CHECK-SD-LABEL: fcmord4xfloat_fast:
-; CHECK-SD:       // %bb.0:
-; CHECK-SD-NEXT:    fcmeq v0.4s, v0.4s, v0.4s
-; CHECK-SD-NEXT:    ret
-;
-; CHECK-GI-LABEL: fcmord4xfloat_fast:
-; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    fcmge v2.4s, v0.4s, v1.4s
-; CHECK-GI-NEXT:    fcmgt v0.4s, v1.4s, v0.4s
-; CHECK-GI-NEXT:    orr v0.16b, v0.16b, v2.16b
-; CHECK-GI-NEXT:    ret
+; CHECK-LABEL: fcmord4xfloat_fast:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmge v2.4s, v0.4s, v1.4s
+; CHECK-NEXT:    fcmgt v0.4s, v1.4s, v0.4s
+; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    ret
   %tmp3 = fcmp fast ord <4 x float> %A, %B
   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
   ret <4 x i32> %tmp4
 }
 
 define <2 x i64> @fcmord2xdouble_fast(<2 x double> %A, <2 x double> %B) {
-; CHECK-SD-LABEL: fcmord2xdouble_fast:
-; CHECK-SD:       // %bb.0:
-; CHECK-SD-NEXT:    fcmeq v0.2d, v0.2d, v0.2d
-; CHECK-SD-NEXT:    ret
-;
-; CHECK-GI-LABEL: fcmord2xdouble_fast:
-; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    fcmge v2.2d, v0.2d, v1.2d
-; CHECK-GI-NEXT:    fcmgt v0.2d, v1.2d, v0.2d
-; CHECK-GI-NEXT:    orr v0.16b, v0.16b, v2.16b
-; CHECK-GI-NEXT:    ret
+; CHECK-LABEL: fcmord2xdouble_fast:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmge v2.2d, v0.2d, v1.2d
+; CHECK-NEXT:    fcmgt v0.2d, v1.2d, v0.2d
+; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    ret
   %tmp3 = fcmp fast ord <2 x double> %A, %B
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
   ret <2 x i64> %tmp4
@@ -3301,57 +3286,39 @@ define <2 x i64> @fcmord2xdouble_fast(<2 x double> %A, <2 x double> %B) {
 
 
 define <2 x i32> @fcmuno2xfloat_fast(<2 x float> %A, <2 x float> %B) {
-; CHECK-SD-LABEL: fcmuno2xfloat_fast:
-; CHECK-SD:       // %bb.0:
-; CHECK-SD-NEXT:    fcmeq v0.2s, v0.2s, v0.2s
-; CHECK-SD-NEXT:    mvn v0.8b, v0.8b
-; CHECK-SD-NEXT:    ret
-;
-; CHECK-GI-LABEL: fcmuno2xfloat_fast:
-; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    fcmge v2.2s, v0.2s, v1.2s
-; CHECK-GI-NEXT:    fcmgt v0.2s, v1.2s, v0.2s
-; CHECK-GI-NEXT:    orr v0.8b, v0.8b, v2.8b
-; CHECK-GI-NEXT:    mvn v0.8b, v0.8b
-; CHECK-GI-NEXT:    ret
+; CHECK-LABEL: fcmuno2xfloat_fast:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmge v2.2s, v0.2s, v1.2s
+; CHECK-NEXT:    fcmgt v0.2s, v1.2s, v0.2s
+; CHECK-NEXT:    orr v0.8b, v0.8b, v2.8b
+; CHECK-NEXT:    mvn v0.8b, v0.8b
+; CHECK-NEXT:    ret
   %tmp3 = fcmp fast uno <2 x float> %A, %B
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
   ret <2 x i32> %tmp4
 }
 
 define <4 x i32> @fcmuno4xfloat_fast(<4 x float> %A, <4 x float> %B) {
-; CHECK-SD-LABEL: fcmuno4xfloat_fast:
-; CHECK-SD:       // %bb.0:
-; CHECK-SD-NEXT:    fcmeq v0.4s, v0.4s, v0.4s
-; CHECK-SD-NEXT:    mvn v0.16b, v0.16b
-; CHECK-SD-NEXT:    ret
-;
-; CHECK-GI-LABEL: fcmuno4xfloat_fast:
-; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    fcmge v2.4s, v0.4s, v1.4s
-; CHECK-GI-NEXT:    fcmgt v0.4s, v1.4s, v0.4s
-; CHECK-GI-NEXT:    orr v0.16b, v0.16b, v2.16b
-; CHECK-GI-NEXT:    mvn v0.16b, v0.16b
-; CHECK-GI-NEXT:    ret
+; CHECK-LABEL: fcmuno4xfloat_fast:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmge v2.4s, v0.4s, v1.4s
+; CHECK-NEXT:    fcmgt v0.4s, v1.4s, v0.4s
+; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    mvn v0.16b, v0.16b
+; CHECK-NEXT:    ret
   %tmp3 = fcmp fast uno <4 x float> %A, %B
   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
   ret <4 x i32> %tmp4
 }
 
 define <2 x i64> @fcmuno2xdouble_fast(<2 x double> %A, <2 x double> %B) {
-; CHECK-SD-LABEL: fcmuno2xdouble_fast:
-; CHECK-SD:       // %bb.0:
-; CHECK-SD-NEXT:    fcmeq v0.2d, v0.2d, v0.2d
-; CHECK-SD-NEXT:    mvn v0.16b, v0.16b
-; CHECK-SD-NEXT:    ret
-;
-; CHECK-GI-LABEL: fcmuno2xdouble_fast:
-; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    fcmge v2.2d, v0.2d, v1.2d
-; CHECK-GI-NEXT:    fcmgt v0.2d, v1.2d, v0.2d
-; CHECK-GI-NEXT:    orr v0.16b, v0.16b, v2.16b
-; CHECK-GI-NEXT:    mvn v0.16b, v0.16b
-; CHECK-GI-NEXT:    ret
+; CHECK-LABEL: fcmuno2xdouble_fast:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmge v2.2d, v0.2d, v1.2d
+; CHECK-NEXT:    fcmgt v0.2d, v1.2d, v0.2d
+; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    mvn v0.16b, v0.16b
+; CHECK-NEXT:    ret
   %tmp3 = fcmp fast uno <2 x double> %A, %B
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
   ret <2 x i64> %tmp4
diff --git a/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll b/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll
index 70eac30c508b9..62f6d2c35181f 100644
--- a/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll
+++ b/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll
@@ -866,13 +866,15 @@ define i1 @test58(double %arg1, double %arg2, double %arg3) #0 {
   ret i1 %and1
 }
 
+; TODO: Check fast-flags from call in is_canonicalized*.
 define i1 @test58_nnan(double %arg1, double %arg2, double %arg3) #0 {
 ; GCN-LABEL: test58_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_min_f64 v[0:1], v[0:1], v[2:3]
 ; GCN-NEXT:    v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    v_cmp_gt_f64_e64 s0, v[2:3], v[4:5]
+; GCN-NEXT:    s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp nnan ugt double %arg1, %arg3
   %cmp2 = fcmp nnan ugt double %arg2, %arg3
@@ -898,9 +900,10 @@ define i1 @test59_nnan(float %arg1, float %arg2, float %arg3) #0 {
 ; GCN-LABEL: test59_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_min_f32_e32 v0, v0, v1
 ; GCN-NEXT:    v_cmp_ge_f32_e32 vcc_lo, v0, v2
-; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    v_cmp_ge_f32_e64 s0, v1, v2
+; GCN-NEXT:    s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp nnan uge float %arg1, %arg3
   %cmp2 = fcmp nnan uge float %arg2, %arg3
@@ -926,9 +929,10 @@ define i1 @test60_nnan(float %arg1, float %arg2, float %arg3) #0 {
 ; GCN-LABEL: test60_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_max_f32_e32 v0, v0, v1
 ; GCN-NEXT:    v_cmp_le_f32_e32 vcc_lo, v0, v2
-; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    v_cmp_le_f32_e64 s0, v1, v2
+; GCN-NEXT:    s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp nnan ule float %arg1, %arg3
   %cmp2 = fcmp nnan ule float %arg2, %arg3
@@ -954,9 +958,10 @@ define i1 @test61_nnan(double %arg1, double %arg2, double %arg3) #0 {
 ; GCN-LABEL: test61_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_max_f64 v[0:1], v[0:1], v[2:3]
 ; GCN-NEXT:    v_cmp_lt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    v_cmp_lt_f64_e64 s0, v[2:3], v[4:5]
+; GCN-NEXT:    s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp nnan ult double %arg1, %arg3
   %cmp2 = fcmp nnan ult double %arg2, %arg3
@@ -1125,6 +1130,7 @@ define i1 @test70_nnan(float %arg1, float %arg2, float %arg3) {
 ; GCN-LABEL: test70_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
 ; GCN-NEXT:    v_min_f32_e32 v0, v0, v1
 ; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v2
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
@@ -1194,6 +1200,7 @@ define i1 @test73_nnan(float %arg1, float %arg2, float %arg3) {
 ; GCN-LABEL: test73_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
 ; GCN-NEXT:    v_max_f32_e32 v0, v0, v1
 ; GCN-NEXT:    v_cmp_ge_f32_e32 vcc_lo, v0, v2
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
@@ -1263,6 +1270,7 @@ define i1 @test75_nnan(float %arg1, float %arg2, float %arg3) {
 ; GCN-LABEL: test75_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
 ; GCN-NEXT:    v_min_f32_e32 v0, v0, v1
 ; GCN-NEXT:    v_cmp_ge_f32_e32 vcc_lo, v0, v2
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
@@ -1296,6 +1304,7 @@ define i1 @test76_nnan(float %arg1, float %arg2, float %arg3) {
 ; GCN-LABEL: test76_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
 ; GCN-NEXT:    v_max_f32_e32 v0, v0, v1
 ; GCN-NEXT:    v_cmp_le_f32_e32 vcc_lo, v0, v2
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
@@ -1376,9 +1385,10 @@ define i1 @test79_nnan(float %arg1, float %arg2, float %arg3) #0 {
 ; GCN-LABEL: test79_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_max_f32_e32 v0, v0, v1
 ; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v2
-; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    v_cmp_gt_f32_e64 s0, v2, v1
+; GCN-NEXT:    s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp nnan ult float %arg1, %arg3
   %cmp2 = fcmp nnan ugt float %arg3, %arg2
@@ -1461,9 +1471,10 @@ define i1 @test83_nnan(float %arg1, float %arg2, float %arg3) {
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
-; GCN-NEXT:    v_max_f32_e32 v0, v0, v1
 ; GCN-NEXT:    v_cmp_le_f32_e32 vcc_lo, v0, v2
-; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    v_cmp_ge_f32_e64 s0, v2, v1
+; GCN-NEXT:    s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %var1 = call float @llvm.canonicalize.f32(float %arg1)
   %var2 = call float @llvm.canonicalize.f32(float %arg2)
@@ -1505,7 +1516,9 @@ define i1 @test84_nnan(half %arg1, half %arg2, half %arg3) {
 ; GFX11-TRUE16-LABEL: test84_nnan:
 ; GFX11-TRUE16:       ; %bb.0:
 ; GFX11-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT:    v_min_f16_e32 v0.l, v0.l, v1.l
+; GFX11-TRUE16-NEXT:    v_max_f16_e32 v0.l, v0.l, v0.l
+; GFX11-TRUE16-NEXT:    v_max_f16_e32 v0.h, v1.l, v1.l
+; GFX11-TRUE16-NEXT:    v_min_f16_e32 v0.l, v0.l, v0.h
 ; GFX11-TRUE16-NEXT:    v_cmp_lt_f16_e32 vcc_lo, v0.l, v2.l
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
@@ -1513,6 +1526,8 @@ define i1 @test84_nnan(half %arg1, half %arg2, half %arg3) {
 ; GFX11-FAKE16-LABEL: test84_nnan:
 ; GFX11-FAKE16:       ; %bb.0:
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT:    v_max_f16_e32 v0, v0, v0
+; GFX11-FAKE16-NEXT:    v_max_f16_e32 v1, v1, v1
 ; GFX11-FAKE16-NEXT:    v_min_f16_e32 v0, v0, v1
 ; GFX11-FAKE16-NEXT:    v_cmp_lt_f16_e32 vcc_lo, v0, v2
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
@@ -1563,6 +1578,8 @@ define <2 x i1> @test85_nnan(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg
 ; GFX11-TRUE16-LABEL: test85_nnan:
 ; GFX11-TRUE16:       ; %bb.0:
 ; GFX11-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT:    v_pk_max_f16 v0, v0, v0
+; GFX11-TRUE16-NEXT:    v_pk_max_f16 v1, v1, v1
 ; GFX11-TRUE16-NEXT:    v_pk_min_f16 v1, v0, v1
 ; GFX11-TRUE16-NEXT:    v_cmp_le_f16_e32 vcc_lo, v1.l, v2.l
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
@@ -1573,6 +1590,8 @@ define <2 x i1> @test85_nnan(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg
 ; GFX11-FAKE16-LABEL: test85_nnan:
 ; GFX11-FAKE16:       ; %bb.0:
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT:    v_pk_max_f16 v0, v0, v0
+; GFX11-FAKE16-NEXT:    v_pk_max_f16 v1, v1, v1
 ; GFX11-FAKE16-NEXT:    v_pk_min_f16 v0, v0, v1
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v1, 16, v2
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v3, 16, v0
@@ -1627,6 +1646,8 @@ define <2 x i1> @test86_nnan(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg
 ; GFX11-TRUE16-LABEL: test86_nnan:
 ; GFX11-TRUE16:       ; %bb.0:
 ; GFX11-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT:    v_pk_max_f16 v0, v0, v0
+; GFX11-TRUE16-NEXT:    v_pk_max_f16 v1, v1, v1
 ; GFX11-TRUE16-NEXT:    v_pk_max_f16 v1, v0, v1
 ; GFX11-TRUE16-NEXT:    v_cmp_gt_f16_e32 vcc_lo, v1.l, v2.l
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
@@ -1637,6 +1658,8 @@ define <2 x i1> @test86_nnan(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg
 ; GFX11-FAKE16-LABEL: test86_nnan:
 ; GFX11-FAKE16:       ; %bb.0:
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT:    v_pk_max_f16 v0, v0, v0
+; GFX11-FAKE16-NEXT:    v_pk_max_f16 v1, v1, v1
 ; GFX11-FAKE16-NEXT:    v_pk_max_f16 v0, v0, v1
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v1, 16, v2
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v3, 16, v0
@@ -1685,7 +1708,9 @@ define i1 @test87_nnan(half %arg1, half %arg2, half %arg3) {
 ; GFX11-TRUE16-LABEL: test87_nnan:
 ; GFX11-TRUE16:       ; %bb.0:
 ; GFX11-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT:    v_max_f16_e32 v0.l, v0.l, v1.l
+; GFX11-TRUE16-NEXT:    v_max_f16_e32 v0.l, v0.l, v0.l
+; GFX11-TRUE16-NEXT:    v_max_f16_e32 v0.h, v1.l, v1.l
+; GFX11-TRUE16-NEXT:    v_max_f16_e32 v0.l, v0.l, v0.h
 ; GFX11-TRUE16-NEXT:    v_cmp_ge_f16_e32 vcc_lo, v0.l, v2.l
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
@@ -1693,6 +1718,8 @@ define i1 @test87_nnan(half %arg1, half %arg2, half %arg3) {
 ; GFX11-FAKE16-LABEL: test87_nnan:
 ; GFX11-FAKE16:       ; %bb.0:
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT:    v_max_f16_e32 v0, v0, v0
+; GFX11-FAKE16-NEXT:    v_max_f16_e32 v1, v1, v1
 ; GFX11-FAKE16-NEXT:    v_max_f16_e32 v0, v0, v1
 ; GFX11-FAKE16-NEXT:    v_cmp_ge_f16_e32 vcc_lo, v0, v2
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
@@ -1743,6 +1770,8 @@ define <2 x i1> @test88_nnan(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg
 ; GFX11-TRUE16-LABEL: test88_nnan:
 ; GFX11-TRUE16:       ; %bb.0:
 ; GFX11-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT:    v_pk_max_f16 v0, v0, v0
+; GFX11-TRUE16-NEXT:    v_pk_max_f16 v1, v1, v1
 ; GFX11-TRUE16-NEXT:    v_pk_min_f16 v1, v0, v1
 ; GFX11-TRUE16-NEXT:    v_cmp_gt_f16_e32 vcc_lo, v1.l, v2.l
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
@@ -1753,6 +1782,8 @@ define <2 x i1> @test88_nnan(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg
 ; GFX11-FAKE16-LABEL: test88_nnan:
 ; GFX11-FAKE16:       ; %bb.0:
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT:    v_pk_max_f16 v0, v0, v0
+; GFX11-FAKE16-NEXT:    v_pk_max_f16 v1, v1, v1
 ; GFX11-FAKE16-NEXT:    v_pk_min_f16 v0, v0, v1
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v1, 16, v2
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v3, 16, v0
@@ -1801,7 +1832,9 @@ define i1 @test89_nnan(half %arg1, half %arg2, half %arg3) {
 ; GFX11-TRUE16-LABEL: test89_nnan:
 ; GFX11-TRUE16:       ; %bb.0:
 ; GFX11-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT:    v_min_f16_e32 v0.l, v0.l, v1.l
+; GFX11-TRUE16-NEXT:    v_max_f16_e32 v0.l, v0.l, v0.l
+; GFX11-TRUE16-NEXT:    v_max_f16_e32 v0.h, v1.l, v1.l
+; GFX11-TRUE16-NEXT:    v_min_f16_e32 v0.l, v0.l, v0.h
 ; GFX11-TRUE16-NEXT:    v_cmp_ge_f16_e32 vcc_lo, v0.l, v2.l
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
@@ -1809,6 +1842,8 @@ define i1 @test89_nnan(half %arg1, half %arg2, half %arg3) {
 ; GFX11-FAKE16-LABEL: test89_nnan:
 ; GFX11-FAKE16:       ; %bb.0:
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT:    v_max_f16_e32 v0, v0, v0
+; GFX11-FAKE16-NEXT:    v_max_f16_e32 v1, v1, v1
 ; GFX11-FAKE16-NEXT:    v_min_f16_e32 v0, v0, v1
 ; GFX11-FAKE16-NEXT:    v_cmp_ge_f16_e32 vcc_lo, v0, v2
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
@@ -1853,7 +1888,9 @@ define i1 @test90_nnan(half %arg1, half %arg2, half %arg3) {
 ; GFX11-TRUE16-LABEL: test90_nnan:
 ; GFX11-TRUE16:       ; %bb.0:
 ; GFX11-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT:    v_max_f16_e32 v0.l, v0.l, v1.l
+; GFX11-TRUE16-NEXT:    v_max_f16_e32 v0.l, v0.l, v0.l
+; GFX11-TRUE16-NEXT:    v_max_f16_e32 v0.h, v1.l, v1.l
+; GFX11-TRUE16-NEXT:    v_max_f16_e32 v0.l, v0.l, v0.h
 ; GFX11-TRUE16-NEXT:    v_cmp_le_f16_e32 vcc_lo, v0.l, v2.l
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
@@ -1861,6 +1898,8 @@ define i1 @test90_nnan(half %arg1, half %arg2, half %arg3) {
 ; GFX11-FAKE16-LABEL: test90_nnan:
 ; GFX11-FAKE16:       ; %bb.0:
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT:    v_max_f16_e32 v0, v0, v0
+; GFX11-FAKE16-NEXT:    v_max_f16_e32 v1, v1, v1
 ; GFX11-FAKE16-NEXT:    v_max_f16_e32 v0, v0, v1
 ; GFX11-FAKE16-NEXT:    v_cmp_le_f16_e32 vcc_lo, v0, v2
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
@@ -1911,6 +1950,8 @@ define <2 x i1> @test91_nnan(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg
 ; GFX11-TRUE16-LABEL: test91_nnan:
 ; GFX11-TRUE16:       ; %bb.0:
 ; GFX11-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT:    v_pk_max_f16 v0, v0, v0
+; GFX11-TRUE16-NEXT:    v_pk_max_f16 v1, v1, v1
 ; GFX11-TRUE16-NEXT:    v_pk_max_f16 v1, v0, v1
 ; GFX11-TRUE16-NEXT:    v_cmp_lt_f16_e32 vcc_lo, v1.l, v2.l
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
@@ -1921,6 +1962,8 @@ define <2 x i1> @test91_nnan(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg
 ; GFX11-FAKE16-LABEL: test91_nnan:
 ; GFX11-FAKE16:       ; %bb.0:
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT:    v_pk_max_f16 v0, v0, v0
+; GFX11-FAKE16-NEXT:    v_pk_max_f16 v1, v1, v1
 ; GFX11-FAKE16-NEXT:    v_pk_max_f16 v0, v0, v1
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v1, 16, v2
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v3, 16, v0
@@ -2343,9 +2386,12 @@ define i1 @test108_nnan(float %arg1, float %arg2, float %arg3, float %C) {
 ; GCN-LABEL: test108_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_max3_f32 v0, v0, v1, v2
 ; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v3
-; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    v_cmp_lt_f32_e64 s0, v1, v3
+; GCN-NEXT:    v_cmp_lt_f32_e64 s1, v2, v3
+; GCN-NEXT:    s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT:    s_and_b32 s0, s0, s1
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp nnan ult float %arg1, %C
   %cmp2 = fcmp nnan ult float %arg2, %C
@@ -2381,10 +2427,13 @@ define i1 @test109_nnan(float %arg1, float %arg2, float %arg3, float %arg4, floa
 ; GCN-LABEL: test109_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_dual_min_f32 v0, v0, v1 :: v_dual_max_f32 v1, v2, v3
 ; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v4
-; GCN-NEXT:    v_cmp_gt_f32_e64 s0, v1, v4
+; GCN-NEXT:    v_cmp_lt_f32_e64 s0, v1, v4
+; GCN-NEXT:    v_cmp_gt_f32_e64 s1, v2, v4
+; GCN-NEXT:    v_cmp_gt_f32_e64 s2, v3, v4
 ; GCN-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT:    s_or_b32 s1, s1, s2
+; GCN-NEXT:    s_or_b32 s0, s0, s1
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp nnan olt float %arg1, %C
@@ -2457,12 +2506,20 @@ define i1 @test111_nnan(float %arg1, float %arg2, float %arg3, float %arg4, floa
 ; GCN-LABEL: test111_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_min_f32_e32 v2, v2, v3
-; GCN-NEXT:    v_min3_f32 v0, v0, v1, v2
-; GCN-NEXT:    v_min_f32_e32 v0, v0, v4
-; GCN-NEXT:    v_min3_f32 v0, v5, v6, v0
 ; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v8
-; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    v_cmp_lt_f32_e64 s0, v1, v8
+; GCN-NEXT:    v_cmp_lt_f32_e64 s1, v2, v8
+; GCN-NEXT:    v_cmp_lt_f32_e64 s2, v3, v8
+; GCN-NEXT:    s_or_b32 s3, vcc_lo, s0
+; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v4, v8
+; GCN-NEXT:    s_or_b32 s2, s1, s2
+; GCN-NEXT:    v_cmp_lt_f32_e64 s0, v5, v8
+; GCN-NEXT:    v_cmp_lt_f32_e64 s1, v6, v8
+; GCN-NEXT:    s_or_b32 s2, s3, s2
+; GCN-NEXT:    s_or_b32 s2, s2, vcc_lo
+; GCN-NEXT:    s_or_b32 s0, s0, s1
+; GCN-NEXT:    s_or_b32 s0, s0, s2
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp nnan olt float %arg1, %C
   %cmp2 = fcmp nnan olt float %arg2, %C
@@ -2518,12 +2575,20 @@ define i1 @test112_nnan(float %arg1, float %arg2, float %arg3, float %arg4, floa
 ; GCN-LABEL: test112_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_min_f32_e32 v2, v2, v3
-; GCN-NEXT:    v_min3_f32 v0, v0, v1, v2
-; GCN-NEXT:    v_min_f32_e32 v0, v0, v4
-; GCN-NEXT:    v_min3_f32 v0, v5, v6, v0
 ; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v8
-; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    v_cmp_lt_f32_e64 s0, v1, v8
+; GCN-NEXT:    v_cmp_lt_f32_e64 s1, v2, v8
+; GCN-NEXT:    v_cmp_lt_f32_e64 s2, v3, v8
+; GCN-NEXT:    s_or_b32 s3, vcc_lo, s0
+; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v4, v8
+; GCN-NEXT:    s_or_b32 s2, s1, s2
+; GCN-NEXT:    v_cmp_lt_f32_e64 s0, v5, v8
+; GCN-NEXT:    v_cmp_lt_f32_e64 s1, v6, v8
+; GCN-NEXT:    s_or_b32 s2, s3, s2
+; GCN-NEXT:    s_or_b32 s2, s2, vcc_lo
+; GCN-NEXT:    s_or_b32 s0, s0, s1
+; GCN-NEXT:    s_or_b32 s0, s0, s2
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp nnan olt float %arg1, %C
   %cmp2 = fcmp nnan olt float %arg2, %C
@@ -2566,9 +2631,12 @@ define i1 @test113_nnan(float %arg1, float %arg2, float %arg3, float %C) {
 ; GCN-LABEL: test113_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_maxmin_f32 v0, v0, v1, v2
 ; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v3
-; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    v_cmp_lt_f32_e64 s0, v1, v3
+; GCN-NEXT:    v_cmp_lt_f32_e64 s1, v2, v3
+; GCN-NEXT:    s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT:    s_or_b32 s0, s0, s1
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp nnan ult float %arg1, %C
   %cmp2 = fcmp nnan ult float %arg2, %C
@@ -2601,10 +2669,11 @@ define i1 @test114_nnan(float %arg1, float %arg2, float %arg3, float %C) {
 ; GCN-LABEL: test114_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_max_f32_e32 v0, v0, v1
-; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v2, v3
-; GCN-NEXT:    v_cmp_gt_f32_e64 s0, v0, v3
-; GCN-NEXT:    s_and_b32 s0, s0, vcc_lo
+; GCN-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v0, v3
+; GCN-NEXT:    v_cmp_gt_f32_e64 s0, v1, v3
+; GCN-NEXT:    v_cmp_lt_f32_e64 s1, v2, v3
+; GCN-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT:    s_and_b32 s0, s0, s1
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp nnan ogt float %arg1, %C
@@ -2643,10 +2712,14 @@ define i1 @test115_nnan(float %arg1, float %arg2, float %arg3, float %arg4, floa
 ; GCN-LABEL: test115_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_max_f32_e32 v2, v2, v3
-; GCN-NEXT:    v_min3_f32 v0, v0, v1, v2
+; GCN-NEXT:    v_dual_max_f32 v2, v2, v2 :: v_dual_max_f32 v3, v3, v3
 ; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v4
-; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    v_cmp_lt_f32_e64 s0, v1, v4
+; GCN-NEXT:    v_max_f32_e32 v2, v2, v3
+; GCN-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT:    v_cmp_lt_f32_e64 s1, v2, v4
+; GCN-NEXT:    s_or_b32 s0, s0, s1
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp nnan olt float %arg1, %C
   %cmp2 = fcmp nnan olt float %arg2, %C
@@ -2708,17 +2781,25 @@ define i1 @test116_nnan(float %arg1, float %arg2, float %arg3, float %arg4, floa
 ; GCN-LABEL: test116_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_min_f32_e32 v8, v8, v9
-; GCN-NEXT:    v_dual_max_f32 v2, v2, v3 :: v_dual_min_f32 v3, v4, v5
-; GCN-NEXT:    v_max_f32_e32 v4, v6, v7
-; GCN-NEXT:    v_min3_f32 v0, v0, v1, v8
-; GCN-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v2, v10
-; GCN-NEXT:    v_cmp_lt_f32_e64 s0, v3, v10
-; GCN-NEXT:    v_cmp_gt_f32_e64 s1, v4, v10
-; GCN-NEXT:    v_cmp_lt_f32_e64 s2, v0, v10
-; GCN-NEXT:    s_or_b32 s0, s0, s1
-; GCN-NEXT:    s_or_b32 s1, s2, vcc_lo
+; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v10
+; GCN-NEXT:    v_cmp_lt_f32_e64 s0, v1, v10
+; GCN-NEXT:    v_cmp_gt_f32_e64 s1, v2, v10
+; GCN-NEXT:    v_cmp_gt_f32_e64 s2, v3, v10
+; GCN-NEXT:    v_cmp_lt_f32_e64 s3, v4, v10
+; GCN-NEXT:    v_cmp_lt_f32_e64 s4, v5, v10
+; GCN-NEXT:    v_cmp_gt_f32_e64 s5, v6, v10
+; GCN-NEXT:    v_cmp_gt_f32_e64 s6, v7, v10
+; GCN-NEXT:    v_cmp_lt_f32_e64 s7, v8, v10
+; GCN-NEXT:    v_cmp_lt_f32_e64 s8, v9, v10
+; GCN-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT:    s_or_b32 s1, s1, s2
+; GCN-NEXT:    s_or_b32 s2, s3, s4
+; GCN-NEXT:    s_or_b32 s3, s5, s6
+; GCN-NEXT:    s_or_b32 s4, s7, s8
 ; GCN-NEXT:    s_or_b32 s0, s0, s1
+; GCN-NEXT:    s_or_b32 s1, s2, s3
+; GCN-NEXT:    s_or_b32 s0, s4, s0
+; GCN-NEXT:    s_or_b32 s0, s1, s0
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp nnan olt float %arg1, %C
@@ -2795,16 +2876,27 @@ define i1 @test117_nnan(float %arg1, float %arg2, float %arg3, float %arg4, floa
 ; GCN-LABEL: test117_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_min_f32_e32 v6, v6, v7
-; GCN-NEXT:    v_dual_min_f32 v0, v0, v1 :: v_dual_min_f32 v1, v10, v11
-; GCN-NEXT:    v_min_f32_e32 v2, v2, v3
-; GCN-NEXT:    v_min3_f32 v3, v4, v5, v6
 ; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v12
-; GCN-NEXT:    v_min3_f32 v0, v8, v9, v1
-; GCN-NEXT:    v_cmp_lt_f32_e64 s0, v2, v13
-; GCN-NEXT:    v_cmp_lt_f32_e64 s1, v3, v13
-; GCN-NEXT:    v_cmp_lt_f32_e64 s2, v0, v12
+; GCN-NEXT:    v_cmp_lt_f32_e64 s0, v1, v12
+; GCN-NEXT:    v_cmp_lt_f32_e64 s1, v2, v13
+; GCN-NEXT:    v_cmp_lt_f32_e64 s2, v3, v13
+; GCN-NEXT:    v_cmp_lt_f32_e64 s3, v4, v13
+; GCN-NEXT:    v_cmp_lt_f32_e64 s4, v5, v13
+; GCN-NEXT:    v_cmp_lt_f32_e64 s5, v6, v13
+; GCN-NEXT:    v_cmp_lt_f32_e64 s6, v7, v13
+; GCN-NEXT:    v_cmp_lt_f32_e64 s7, v8, v12
+; GCN-NEXT:    v_cmp_lt_f32_e64 s8, v9, v12
+; GCN-NEXT:    v_cmp_lt_f32_e64 s9, v10, v12
+; GCN-NEXT:    v_cmp_lt_f32_e64 s10, v11, v12
 ; GCN-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT:    s_or_b32 s1, s1, s2
+; GCN-NEXT:    s_or_b32 s2, s3, s4
+; GCN-NEXT:    s_or_b32 s3, s5, s6
+; GCN-NEXT:    s_or_b32 s4, s7, s8
+; GCN-NEXT:    s_or_b32 s5, s9, s10
+; GCN-NEXT:    s_or_b32 s0, s0, s1
+; GCN-NEXT:    s_or_b32 s1, s2, s3
+; GCN-NEXT:    s_or_b32 s2, s4, s5
 ; GCN-NEXT:    s_or_b32 s0, s0, s1
 ; GCN-NEXT:    s_or_b32 s0, s2, s0
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
@@ -3178,9 +3270,10 @@ define i1 @test134_nnan(float %arg1, float %arg2, float %arg3) #0 {
 ; GCN-LABEL: test134_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_max_f32_e32 v0, v0, v1
 ; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v2
-; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    v_cmp_gt_f32_e64 s0, v2, v1
+; GCN-NEXT:    s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp nnan olt float %arg1, %arg3
   %cmp2 = fcmp nnan ogt float %arg3, %arg2
@@ -3207,9 +3300,10 @@ define i1 @test135_nnan(float %arg1, float %arg2, float %arg3) #0 {
 ; GCN-LABEL: test135_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_min_f32_e32 v0, v0, v1
 ; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v2
-; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    v_cmp_gt_f32_e64 s0, v2, v1
+; GCN-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp nnan ult float %arg1, %arg3
   %cmp2 = fcmp nnan ugt float %arg3, %arg2
@@ -3276,6 +3370,7 @@ define i1 @test137_nnan(float %arg1, float %arg2, float %arg3) {
 ; GCN-LABEL: test137_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
 ; GCN-NEXT:    v_min_f32_e32 v0, v0, v1
 ; GCN-NEXT:    v_cmp_le_f32_e32 vcc_lo, v0, v2
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
@@ -3307,9 +3402,10 @@ define i1 @test138_nnan(float %arg1, float %arg2, float %arg3) #0 {
 ; GCN-LABEL: test138_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_max_f32_e32 v0, v0, v1
 ; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v2
-; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    v_cmp_lt_f32_e64 s0, v1, v2
+; GCN-NEXT:    s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp nnan olt float %arg1, %arg3
   %cmp2 = fcmp nnan olt float %arg2, %arg3
@@ -3336,9 +3432,10 @@ define i1 @test139_nnan(double %arg1, double %arg2, double %arg3) #0 {
 ; GCN-LABEL: test139_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_max_f64 v[0:1], v[0:1], v[2:3]
 ; GCN-NEXT:    v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    v_cmp_le_f64_e64 s0, v[2:3], v[4:5]
+; GCN-NEXT:    s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp nnan ole double %arg1, %arg3
   %cmp2 = fcmp nnan ole double %arg2, %arg3
@@ -3365,9 +3462,10 @@ define i1 @test140_nnan(double %arg1, double %arg2, double %arg3) #0 {
 ; GCN-LABEL: test140_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_min_f64 v[0:1], v[0:1], v[2:3]
 ; GCN-NEXT:    v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    v_cmp_gt_f64_e64 s0, v[2:3], v[4:5]
+; GCN-NEXT:    s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp nnan ogt double %arg1, %arg3
   %cmp2 = fcmp nnan ogt double %arg2, %arg3
@@ -3394,9 +3492,10 @@ define i1 @test141_nnan(float %arg1, float %arg2, float %arg3) #0 {
 ; GCN-LABEL: test141_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_min_f32_e32 v0, v0, v1
 ; GCN-NEXT:    v_cmp_ge_f32_e32 vcc_lo, v0, v2
-; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    v_cmp_ge_f32_e64 s0, v1, v2
+; GCN-NEXT:    s_and_b32 s0, vcc_lo, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp nnan oge float %arg1, %arg3
   %cmp2 = fcmp nnan oge float %arg2, %arg3
@@ -3423,9 +3522,10 @@ define i1 @test142_nnan(double %arg1, double %arg2, double %arg3) #0 {
 ; GCN-LABEL: test142_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_max_f64 v[0:1], v[0:1], v[2:3]
 ; GCN-NEXT:    v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    v_cmp_gt_f64_e64 s0, v[2:3], v[4:5]
+; GCN-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp nnan ugt double %arg1, %arg3
   %cmp2 = fcmp nnan ugt double %arg2, %arg3
@@ -3452,9 +3552,10 @@ define i1 @test143_nnan(float %arg1, float %arg2, float %arg3) #0 {
 ; GCN-LABEL: test143_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_max_f32_e32 v0, v0, v1
 ; GCN-NEXT:    v_cmp_ge_f32_e32 vcc_lo, v0, v2
-; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    v_cmp_ge_f32_e64 s0, v1, v2
+; GCN-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp nnan uge float %arg1, %arg3
   %cmp2 = fcmp nnan uge float %arg2, %arg3
@@ -3481,9 +3582,10 @@ define i1 @test144_nnan(float %arg1, float %arg2, float %arg3) #0 {
 ; GCN-LABEL: test144_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_min_f32_e32 v0, v0, v1
 ; GCN-NEXT:    v_cmp_le_f32_e32 vcc_lo, v0, v2
-; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    v_cmp_le_f32_e64 s0, v1, v2
+; GCN-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp nnan ule float %arg1, %arg3
   %cmp2 = fcmp nnan ule float %arg2, %arg3
@@ -3510,9 +3612,10 @@ define i1 @test145_nnan(double %arg1, double %arg2, double %arg3) #0 {
 ; GCN-LABEL: test145_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_min_f64 v[0:1], v[0:1], v[2:3]
 ; GCN-NEXT:    v_cmp_lt_f64_e32 vcc_lo, v[0:1], v[4:5]
-; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GCN-NEXT:    v_cmp_lt_f64_e64 s0, v[2:3], v[4:5]
+; GCN-NEXT:    s_or_b32 s0, vcc_lo, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = fcmp nnan ult double %arg1, %arg3
   %cmp2 = fcmp nnan ult double %arg2, %arg3
@@ -3542,6 +3645,7 @@ define i1 @test146_nnan(float %arg1, float %arg2, float %arg3) {
 ; GCN-LABEL: test146_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
 ; GCN-NEXT:    v_max_f32_e32 v0, v0, v1
 ; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v2
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
@@ -3650,6 +3754,7 @@ define i1 @test149_nnan(float %arg1, float %arg2, float %arg3) {
 ; GCN-LABEL: test149_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
 ; GCN-NEXT:    v_min_f32_e32 v0, v0, v1
 ; GCN-NEXT:    v_cmp_ge_f32_e32 vcc_lo, v0, v2
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
@@ -3721,6 +3826,7 @@ define i1 @test151_nnan(float %arg1, float %arg2, float %arg3) {
 ; GCN-LABEL: test151_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
 ; GCN-NEXT:    v_max_f32_e32 v0, v0, v1
 ; GCN-NEXT:    v_cmp_ge_f32_e32 vcc_lo, v0, v2
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
@@ -3755,6 +3861,7 @@ define i1 @test152_nnan(float %arg1, float %arg2, float %arg3) {
 ; GCN-LABEL: test152_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
 ; GCN-NEXT:    v_min_f32_e32 v0, v0, v1
 ; GCN-NEXT:    v_cmp_le_f32_e32 vcc_lo, v0, v2
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo

>From 9049ac317e0ab2987a177e4aea255093a28c5d10 Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Wed, 29 Oct 2025 19:28:17 +0800
Subject: [PATCH 6/6] Fix AMDGPU fcanonicalize selection

---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     |  5 +-
 llvm/lib/Target/AMDGPU/SIISelLowering.h       |  2 +-
 llvm/lib/Target/AMDGPU/SIInstrInfo.td         |  8 +--
 llvm/lib/Target/AMDGPU/SIInstructions.td      |  5 +-
 .../CodeGen/AMDGPU/combine_andor_with_cmps.ll | 54 +++----------------
 5 files changed, 16 insertions(+), 58 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index be4229155c983..b137baccf46bc 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -14306,6 +14306,7 @@ SDValue SITargetLowering::performRcpCombine(SDNode *N,
 }
 
 bool SITargetLowering::isCanonicalized(SelectionDAG &DAG, SDValue Op,
+                                       SDNodeFlags UserFlags,
                                        unsigned MaxDepth) const {
   unsigned Opcode = Op.getOpcode();
   if (Opcode == ISD::FCANONICALIZE)
@@ -14313,7 +14314,7 @@ bool SITargetLowering::isCanonicalized(SelectionDAG &DAG, SDValue Op,
 
   if (auto *CFP = dyn_cast<ConstantFPSDNode>(Op)) {
     const auto &F = CFP->getValueAPF();
-    if (F.isNaN() && F.isSignaling())
+    if ((UserFlags.hasNoNaNs() || F.isNaN()) && F.isSignaling())
       return false;
     if (!F.isDenormal())
       return true;
@@ -14505,7 +14506,7 @@ bool SITargetLowering::isCanonicalized(SelectionDAG &DAG, SDValue Op,
 
   // FIXME: denormalsEnabledForType is broken for dynamic
   return denormalsEnabledForType(DAG, Op.getValueType()) &&
-         DAG.isKnownNeverSNaN(Op);
+         (UserFlags.hasNoNaNs() || DAG.isKnownNeverSNaN(Op));
 }
 
 bool SITargetLowering::isCanonicalized(Register Reg, const MachineFunction &MF,
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h
index 74e58f4272e10..13b4facc12b18 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h
@@ -555,7 +555,7 @@ class SITargetLowering final : public AMDGPUTargetLowering {
                            Register N1) const override;
 
   bool isCanonicalized(SelectionDAG &DAG, SDValue Op,
-                       unsigned MaxDepth = 5) const;
+                       SDNodeFlags UserFlags = {}, unsigned MaxDepth = 5) const;
   bool isCanonicalized(Register Reg, const MachineFunction &MF,
                        unsigned MaxDepth = 5) const;
   bool denormalsEnabledForType(const SelectionDAG &DAG, EVT VT) const;
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
index b7f63eceb5d5c..44122a5c845c4 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
@@ -1001,11 +1001,13 @@ def MFMALdScaleXForm : SDNodeXForm<timm, [{
   return CurDAG->getTargetConstant(New, SDLoc(N), MVT::i32);
 }]>;
 
-def is_canonicalized : PatLeaf<(fAny srcvalue:$src), [{
-  const SITargetLowering &Lowering =
+def fcanonicalize_canonicalized
+    : PatFrag<(ops node:$op), (fcanonicalize node:$op), [{
+    const SITargetLowering &Lowering =
       *static_cast<const SITargetLowering *>(getTargetLowering());
-  return Lowering.isCanonicalized(*CurDAG, Op);
+    return Lowering.isCanonicalized(*CurDAG, Op->getOperand(0), N->getFlags());
 }]> {
+  // FIXME: GlobalISel is dead code.
   let GISelPredicateCode = [{
     const SITargetLowering *TLI = static_cast<const SITargetLowering *>(
         MF.getSubtarget().getTargetLowering());
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index 6f1feb1dc2996..60422d3aae568 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -3459,10 +3459,7 @@ def : GCNPat<
 // If fcanonicalize's operand is implicitly canonicalized, we only need a copy.
 let AddedComplexity = 8 in {
 foreach vt = [f16, v2f16, f32, v2f32, f64] in {
-  def : GCNPat<
-    (fcanonicalize (vt is_canonicalized:$src)),
-    (COPY vt:$src)
-  >;
+  def : GCNPat<(fcanonicalize_canonicalized vt:$src), (COPY vt:$src)>;
 }
 }
 
diff --git a/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll b/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll
index 62f6d2c35181f..3b4dcfdc4c3af 100644
--- a/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll
+++ b/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll
@@ -1130,7 +1130,6 @@ define i1 @test70_nnan(float %arg1, float %arg2, float %arg3) {
 ; GCN-LABEL: test70_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
 ; GCN-NEXT:    v_min_f32_e32 v0, v0, v1
 ; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v2
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
@@ -1200,7 +1199,6 @@ define i1 @test73_nnan(float %arg1, float %arg2, float %arg3) {
 ; GCN-LABEL: test73_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
 ; GCN-NEXT:    v_max_f32_e32 v0, v0, v1
 ; GCN-NEXT:    v_cmp_ge_f32_e32 vcc_lo, v0, v2
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
@@ -1270,7 +1268,6 @@ define i1 @test75_nnan(float %arg1, float %arg2, float %arg3) {
 ; GCN-LABEL: test75_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
 ; GCN-NEXT:    v_min_f32_e32 v0, v0, v1
 ; GCN-NEXT:    v_cmp_ge_f32_e32 vcc_lo, v0, v2
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
@@ -1304,7 +1301,6 @@ define i1 @test76_nnan(float %arg1, float %arg2, float %arg3) {
 ; GCN-LABEL: test76_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
 ; GCN-NEXT:    v_max_f32_e32 v0, v0, v1
 ; GCN-NEXT:    v_cmp_le_f32_e32 vcc_lo, v0, v2
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
@@ -1516,9 +1512,7 @@ define i1 @test84_nnan(half %arg1, half %arg2, half %arg3) {
 ; GFX11-TRUE16-LABEL: test84_nnan:
 ; GFX11-TRUE16:       ; %bb.0:
 ; GFX11-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT:    v_max_f16_e32 v0.l, v0.l, v0.l
-; GFX11-TRUE16-NEXT:    v_max_f16_e32 v0.h, v1.l, v1.l
-; GFX11-TRUE16-NEXT:    v_min_f16_e32 v0.l, v0.l, v0.h
+; GFX11-TRUE16-NEXT:    v_min_f16_e32 v0.l, v0.l, v1.l
 ; GFX11-TRUE16-NEXT:    v_cmp_lt_f16_e32 vcc_lo, v0.l, v2.l
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
@@ -1526,8 +1520,6 @@ define i1 @test84_nnan(half %arg1, half %arg2, half %arg3) {
 ; GFX11-FAKE16-LABEL: test84_nnan:
 ; GFX11-FAKE16:       ; %bb.0:
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-FAKE16-NEXT:    v_max_f16_e32 v0, v0, v0
-; GFX11-FAKE16-NEXT:    v_max_f16_e32 v1, v1, v1
 ; GFX11-FAKE16-NEXT:    v_min_f16_e32 v0, v0, v1
 ; GFX11-FAKE16-NEXT:    v_cmp_lt_f16_e32 vcc_lo, v0, v2
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
@@ -1578,8 +1570,6 @@ define <2 x i1> @test85_nnan(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg
 ; GFX11-TRUE16-LABEL: test85_nnan:
 ; GFX11-TRUE16:       ; %bb.0:
 ; GFX11-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT:    v_pk_max_f16 v0, v0, v0
-; GFX11-TRUE16-NEXT:    v_pk_max_f16 v1, v1, v1
 ; GFX11-TRUE16-NEXT:    v_pk_min_f16 v1, v0, v1
 ; GFX11-TRUE16-NEXT:    v_cmp_le_f16_e32 vcc_lo, v1.l, v2.l
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
@@ -1590,8 +1580,6 @@ define <2 x i1> @test85_nnan(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg
 ; GFX11-FAKE16-LABEL: test85_nnan:
 ; GFX11-FAKE16:       ; %bb.0:
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-FAKE16-NEXT:    v_pk_max_f16 v0, v0, v0
-; GFX11-FAKE16-NEXT:    v_pk_max_f16 v1, v1, v1
 ; GFX11-FAKE16-NEXT:    v_pk_min_f16 v0, v0, v1
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v1, 16, v2
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v3, 16, v0
@@ -1646,8 +1634,6 @@ define <2 x i1> @test86_nnan(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg
 ; GFX11-TRUE16-LABEL: test86_nnan:
 ; GFX11-TRUE16:       ; %bb.0:
 ; GFX11-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT:    v_pk_max_f16 v0, v0, v0
-; GFX11-TRUE16-NEXT:    v_pk_max_f16 v1, v1, v1
 ; GFX11-TRUE16-NEXT:    v_pk_max_f16 v1, v0, v1
 ; GFX11-TRUE16-NEXT:    v_cmp_gt_f16_e32 vcc_lo, v1.l, v2.l
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
@@ -1658,8 +1644,6 @@ define <2 x i1> @test86_nnan(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg
 ; GFX11-FAKE16-LABEL: test86_nnan:
 ; GFX11-FAKE16:       ; %bb.0:
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-FAKE16-NEXT:    v_pk_max_f16 v0, v0, v0
-; GFX11-FAKE16-NEXT:    v_pk_max_f16 v1, v1, v1
 ; GFX11-FAKE16-NEXT:    v_pk_max_f16 v0, v0, v1
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v1, 16, v2
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v3, 16, v0
@@ -1708,9 +1692,7 @@ define i1 @test87_nnan(half %arg1, half %arg2, half %arg3) {
 ; GFX11-TRUE16-LABEL: test87_nnan:
 ; GFX11-TRUE16:       ; %bb.0:
 ; GFX11-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT:    v_max_f16_e32 v0.l, v0.l, v0.l
-; GFX11-TRUE16-NEXT:    v_max_f16_e32 v0.h, v1.l, v1.l
-; GFX11-TRUE16-NEXT:    v_max_f16_e32 v0.l, v0.l, v0.h
+; GFX11-TRUE16-NEXT:    v_max_f16_e32 v0.l, v0.l, v1.l
 ; GFX11-TRUE16-NEXT:    v_cmp_ge_f16_e32 vcc_lo, v0.l, v2.l
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
@@ -1718,8 +1700,6 @@ define i1 @test87_nnan(half %arg1, half %arg2, half %arg3) {
 ; GFX11-FAKE16-LABEL: test87_nnan:
 ; GFX11-FAKE16:       ; %bb.0:
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-FAKE16-NEXT:    v_max_f16_e32 v0, v0, v0
-; GFX11-FAKE16-NEXT:    v_max_f16_e32 v1, v1, v1
 ; GFX11-FAKE16-NEXT:    v_max_f16_e32 v0, v0, v1
 ; GFX11-FAKE16-NEXT:    v_cmp_ge_f16_e32 vcc_lo, v0, v2
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
@@ -1770,8 +1750,6 @@ define <2 x i1> @test88_nnan(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg
 ; GFX11-TRUE16-LABEL: test88_nnan:
 ; GFX11-TRUE16:       ; %bb.0:
 ; GFX11-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT:    v_pk_max_f16 v0, v0, v0
-; GFX11-TRUE16-NEXT:    v_pk_max_f16 v1, v1, v1
 ; GFX11-TRUE16-NEXT:    v_pk_min_f16 v1, v0, v1
 ; GFX11-TRUE16-NEXT:    v_cmp_gt_f16_e32 vcc_lo, v1.l, v2.l
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
@@ -1782,8 +1760,6 @@ define <2 x i1> @test88_nnan(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg
 ; GFX11-FAKE16-LABEL: test88_nnan:
 ; GFX11-FAKE16:       ; %bb.0:
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-FAKE16-NEXT:    v_pk_max_f16 v0, v0, v0
-; GFX11-FAKE16-NEXT:    v_pk_max_f16 v1, v1, v1
 ; GFX11-FAKE16-NEXT:    v_pk_min_f16 v0, v0, v1
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v1, 16, v2
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v3, 16, v0
@@ -1832,9 +1808,7 @@ define i1 @test89_nnan(half %arg1, half %arg2, half %arg3) {
 ; GFX11-TRUE16-LABEL: test89_nnan:
 ; GFX11-TRUE16:       ; %bb.0:
 ; GFX11-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT:    v_max_f16_e32 v0.l, v0.l, v0.l
-; GFX11-TRUE16-NEXT:    v_max_f16_e32 v0.h, v1.l, v1.l
-; GFX11-TRUE16-NEXT:    v_min_f16_e32 v0.l, v0.l, v0.h
+; GFX11-TRUE16-NEXT:    v_min_f16_e32 v0.l, v0.l, v1.l
 ; GFX11-TRUE16-NEXT:    v_cmp_ge_f16_e32 vcc_lo, v0.l, v2.l
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
@@ -1842,8 +1816,6 @@ define i1 @test89_nnan(half %arg1, half %arg2, half %arg3) {
 ; GFX11-FAKE16-LABEL: test89_nnan:
 ; GFX11-FAKE16:       ; %bb.0:
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-FAKE16-NEXT:    v_max_f16_e32 v0, v0, v0
-; GFX11-FAKE16-NEXT:    v_max_f16_e32 v1, v1, v1
 ; GFX11-FAKE16-NEXT:    v_min_f16_e32 v0, v0, v1
 ; GFX11-FAKE16-NEXT:    v_cmp_ge_f16_e32 vcc_lo, v0, v2
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
@@ -1888,9 +1860,7 @@ define i1 @test90_nnan(half %arg1, half %arg2, half %arg3) {
 ; GFX11-TRUE16-LABEL: test90_nnan:
 ; GFX11-TRUE16:       ; %bb.0:
 ; GFX11-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT:    v_max_f16_e32 v0.l, v0.l, v0.l
-; GFX11-TRUE16-NEXT:    v_max_f16_e32 v0.h, v1.l, v1.l
-; GFX11-TRUE16-NEXT:    v_max_f16_e32 v0.l, v0.l, v0.h
+; GFX11-TRUE16-NEXT:    v_max_f16_e32 v0.l, v0.l, v1.l
 ; GFX11-TRUE16-NEXT:    v_cmp_le_f16_e32 vcc_lo, v0.l, v2.l
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
@@ -1898,8 +1868,6 @@ define i1 @test90_nnan(half %arg1, half %arg2, half %arg3) {
 ; GFX11-FAKE16-LABEL: test90_nnan:
 ; GFX11-FAKE16:       ; %bb.0:
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-FAKE16-NEXT:    v_max_f16_e32 v0, v0, v0
-; GFX11-FAKE16-NEXT:    v_max_f16_e32 v1, v1, v1
 ; GFX11-FAKE16-NEXT:    v_max_f16_e32 v0, v0, v1
 ; GFX11-FAKE16-NEXT:    v_cmp_le_f16_e32 vcc_lo, v0, v2
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
@@ -1950,8 +1918,6 @@ define <2 x i1> @test91_nnan(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg
 ; GFX11-TRUE16-LABEL: test91_nnan:
 ; GFX11-TRUE16:       ; %bb.0:
 ; GFX11-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT:    v_pk_max_f16 v0, v0, v0
-; GFX11-TRUE16-NEXT:    v_pk_max_f16 v1, v1, v1
 ; GFX11-TRUE16-NEXT:    v_pk_max_f16 v1, v0, v1
 ; GFX11-TRUE16-NEXT:    v_cmp_lt_f16_e32 vcc_lo, v1.l, v2.l
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
@@ -1962,8 +1928,6 @@ define <2 x i1> @test91_nnan(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg
 ; GFX11-FAKE16-LABEL: test91_nnan:
 ; GFX11-FAKE16:       ; %bb.0:
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-FAKE16-NEXT:    v_pk_max_f16 v0, v0, v0
-; GFX11-FAKE16-NEXT:    v_pk_max_f16 v1, v1, v1
 ; GFX11-FAKE16-NEXT:    v_pk_max_f16 v0, v0, v1
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v1, 16, v2
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v3, 16, v0
@@ -2712,12 +2676,11 @@ define i1 @test115_nnan(float %arg1, float %arg2, float %arg3, float %arg4, floa
 ; GCN-LABEL: test115_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_dual_max_f32 v2, v2, v2 :: v_dual_max_f32 v3, v3, v3
+; GCN-NEXT:    v_max_f32_e32 v2, v2, v3
 ; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v4
 ; GCN-NEXT:    v_cmp_lt_f32_e64 s0, v1, v4
-; GCN-NEXT:    v_max_f32_e32 v2, v2, v3
-; GCN-NEXT:    s_or_b32 s0, vcc_lo, s0
 ; GCN-NEXT:    v_cmp_lt_f32_e64 s1, v2, v4
+; GCN-NEXT:    s_or_b32 s0, vcc_lo, s0
 ; GCN-NEXT:    s_or_b32 s0, s0, s1
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
@@ -3370,7 +3333,6 @@ define i1 @test137_nnan(float %arg1, float %arg2, float %arg3) {
 ; GCN-LABEL: test137_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
 ; GCN-NEXT:    v_min_f32_e32 v0, v0, v1
 ; GCN-NEXT:    v_cmp_le_f32_e32 vcc_lo, v0, v2
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
@@ -3645,7 +3607,6 @@ define i1 @test146_nnan(float %arg1, float %arg2, float %arg3) {
 ; GCN-LABEL: test146_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
 ; GCN-NEXT:    v_max_f32_e32 v0, v0, v1
 ; GCN-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v2
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
@@ -3754,7 +3715,6 @@ define i1 @test149_nnan(float %arg1, float %arg2, float %arg3) {
 ; GCN-LABEL: test149_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
 ; GCN-NEXT:    v_min_f32_e32 v0, v0, v1
 ; GCN-NEXT:    v_cmp_ge_f32_e32 vcc_lo, v0, v2
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
@@ -3826,7 +3786,6 @@ define i1 @test151_nnan(float %arg1, float %arg2, float %arg3) {
 ; GCN-LABEL: test151_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
 ; GCN-NEXT:    v_max_f32_e32 v0, v0, v1
 ; GCN-NEXT:    v_cmp_ge_f32_e32 vcc_lo, v0, v2
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
@@ -3861,7 +3820,6 @@ define i1 @test152_nnan(float %arg1, float %arg2, float %arg3) {
 ; GCN-LABEL: test152_nnan:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
 ; GCN-NEXT:    v_min_f32_e32 v0, v0, v1
 ; GCN-NEXT:    v_cmp_le_f32_e32 vcc_lo, v0, v2
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo



More information about the llvm-commits mailing list