[llvm] 55de46f - [PowerPC] Support constrained fp operation for setcc

QingShan Zhang via llvm-commits llvm-commits at lists.llvm.org
Thu Aug 6 22:18:05 PDT 2020


Author: QingShan Zhang
Date: 2020-08-07T05:16:36Z
New Revision: 55de46f3b2c5651b06e4739209907f14ab781d89

URL: https://github.com/llvm/llvm-project/commit/55de46f3b2c5651b06e4739209907f14ab781d89
DIFF: https://github.com/llvm/llvm-project/commit/55de46f3b2c5651b06e4739209907f14ab781d89.diff

LOG: [PowerPC] Support constrained fp operation for setcc

The constrained fp operation fcmp was added by https://reviews.llvm.org/D69281.
This patch is trying to add the support for PowerPC backend.

Reviewed By: uweigand

Differential Revision: https://reviews.llvm.org/D81727

Added: 
    llvm/test/CodeGen/PowerPC/fp-strict-fcmp.ll

Modified: 
    llvm/include/llvm/Target/TargetSelectionDAG.td
    llvm/lib/Target/PowerPC/P9InstrResources.td
    llvm/lib/Target/PowerPC/PPCISelLowering.cpp
    llvm/lib/Target/PowerPC/PPCInstrInfo.td

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/Target/TargetSelectionDAG.td b/llvm/include/llvm/Target/TargetSelectionDAG.td
index de809bb10d499..4b88f1a132857 100644
--- a/llvm/include/llvm/Target/TargetSelectionDAG.td
+++ b/llvm/include/llvm/Target/TargetSelectionDAG.td
@@ -553,6 +553,8 @@ def strict_sint_to_fp : SDNode<"ISD::STRICT_SINT_TO_FP",
                                SDTIntToFPOp, [SDNPHasChain]>;
 def strict_uint_to_fp : SDNode<"ISD::STRICT_UINT_TO_FP",
                                SDTIntToFPOp, [SDNPHasChain]>;
+def strict_fsetcc  : SDNode<"ISD::STRICT_FSETCC",  SDTSetCC, [SDNPHasChain]>;
+def strict_fsetccs : SDNode<"ISD::STRICT_FSETCCS", SDTSetCC, [SDNPHasChain]>;
 
 def setcc      : SDNode<"ISD::SETCC"      , SDTSetCC>;
 def select     : SDNode<"ISD::SELECT"     , SDTSelect>;
@@ -1420,6 +1422,12 @@ def any_sint_to_fp : PatFrags<(ops node:$src),
 def any_uint_to_fp : PatFrags<(ops node:$src),
                               [(strict_uint_to_fp node:$src),
                                (uint_to_fp node:$src)]>;
+def any_fsetcc : PatFrags<(ops node:$lhs, node:$rhs, node:$pred),
+                          [(strict_fsetcc node:$lhs, node:$rhs, node:$pred),
+                           (setcc node:$lhs, node:$rhs, node:$pred)]>;
+def any_fsetccs : PatFrags<(ops node:$lhs, node:$rhs, node:$pred),
+                          [(strict_fsetccs node:$lhs, node:$rhs, node:$pred),
+                           (setcc node:$lhs, node:$rhs, node:$pred)]>;
 
 multiclass binary_atomic_op_ord<SDNode atomic_op> {
   def NAME#_monotonic : PatFrag<(ops node:$ptr, node:$val),

diff  --git a/llvm/lib/Target/PowerPC/P9InstrResources.td b/llvm/lib/Target/PowerPC/P9InstrResources.td
index d7e3519d5539e..63531f72adfbc 100644
--- a/llvm/lib/Target/PowerPC/P9InstrResources.td
+++ b/llvm/lib/Target/PowerPC/P9InstrResources.td
@@ -94,7 +94,7 @@ def : InstRW<[P9_ALU_3C, IP_EXEC_1C, DISP_3SLOTS_1C],
     (instregex "CMPRB(8)?$"),
     (instregex "TD(I)?$"),
     (instregex "TW(I)?$"),
-    (instregex "FCMPU(S|D)$"),
+    (instregex "FCMP(O|U)(S|D)$"),
     (instregex "XSTSTDC(S|D)P$"),
     FTDIV,
     FTSQRT,

diff  --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 0ebc8a99b8ea9..1786edf9e1986 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -419,6 +419,16 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
   if (!Subtarget.useCRBits())
     setOperationAction(ISD::SETCC, MVT::i32, Custom);
 
+  if (Subtarget.hasFPU()) {
+    setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Legal);
+    setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Legal);
+    setOperationAction(ISD::STRICT_FSETCC, MVT::f128, Legal);
+
+    setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Legal);
+    setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Legal);
+    setOperationAction(ISD::STRICT_FSETCCS, MVT::f128, Legal);
+  }
+
   // PowerPC does not have BRCOND which requires SetCC
   if (!Subtarget.useCRBits())
     setOperationAction(ISD::BRCOND, MVT::Other, Expand);

diff  --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.td b/llvm/lib/Target/PowerPC/PPCInstrInfo.td
index 771a715926fd7..ac91f26b4e03f 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrInfo.td
+++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.td
@@ -2570,14 +2570,17 @@ let isCompare = 1, hasSideEffects = 0 in {
 }
 }
 let PPC970_Unit = 3, Predicates = [HasFPU] in {  // FPU Operations.
-//def FCMPO  : XForm_17<63, 32, (outs CRRC:$crD), (ins FPRC:$fA, FPRC:$fB),
-//                      "fcmpo $crD, $fA, $fB", IIC_FPCompare>;
 let isCompare = 1, hasSideEffects = 0 in {
   def FCMPUS : XForm_17<63, 0, (outs crrc:$crD), (ins f4rc:$fA, f4rc:$fB),
                         "fcmpu $crD, $fA, $fB", IIC_FPCompare>;
-  let Interpretation64Bit = 1, isCodeGenOnly = 1 in
-  def FCMPUD : XForm_17<63, 0, (outs crrc:$crD), (ins f8rc:$fA, f8rc:$fB),
-                        "fcmpu $crD, $fA, $fB", IIC_FPCompare>;
+  def FCMPOS : XForm_17<63, 32, (outs crrc:$crD), (ins f4rc:$fA, f4rc:$fB),
+                        "fcmpo $crD, $fA, $fB", IIC_FPCompare>;
+  let Interpretation64Bit = 1, isCodeGenOnly = 1 in {
+    def FCMPUD : XForm_17<63, 0, (outs crrc:$crD), (ins f8rc:$fA, f8rc:$fB),
+                          "fcmpu $crD, $fA, $fB", IIC_FPCompare>;
+    def FCMPOD : XForm_17<63, 32, (outs crrc:$crD), (ins f8rc:$fA, f8rc:$fB),
+                          "fcmpo $crD, $fA, $fB", IIC_FPCompare>;
+  }
 }
 
 def FTDIV: XForm_17<63, 128, (outs crrc:$crD), (ins f8rc:$fA, f8rc:$fB),
@@ -3934,14 +3937,27 @@ multiclass FSetCCPat<SDNode SetCC, ValueType Ty, PatLeaf FCmp> {
 }
 
 let Predicates = [HasFPU] in {
+// FCMPU: If either of the operands is a Signaling NaN, then VXSNAN is set.
 // SETCC for f32.
-defm : FSetCCPat<setcc, f32, FCMPUS>;
+defm : FSetCCPat<any_fsetcc, f32, FCMPUS>;
 
 // SETCC for f64.
-defm : FSetCCPat<setcc, f64, FCMPUD>;
+defm : FSetCCPat<any_fsetcc, f64, FCMPUD>;
 
 // SETCC for f128.
-defm : FSetCCPat<setcc, f128, XSCMPUQP>;
+defm : FSetCCPat<any_fsetcc, f128, XSCMPUQP>;
+
+// FCMPO: If either of the operands is a Signaling NaN, then VXSNAN is set and,
+// if neither operand is a Signaling NaN but at least one operand is a Quiet NaN,
+// then VXVC is set.
+// SETCCS for f32.
+defm : FSetCCPat<strict_fsetccs, f32, FCMPOS>;
+
+// SETCCS for f64.
+defm : FSetCCPat<strict_fsetccs, f64, FCMPOD>;
+
+// SETCCS for f128.
+defm : FSetCCPat<strict_fsetccs, f128, XSCMPOQP>;
 }
 
 // This must be in this file because it relies on patterns defined in this file

diff  --git a/llvm/test/CodeGen/PowerPC/fp-strict-fcmp.ll b/llvm/test/CodeGen/PowerPC/fp-strict-fcmp.ll
new file mode 100644
index 0000000000000..868ad7c09ff8d
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/fp-strict-fcmp.ll
@@ -0,0 +1,2699 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -verify-machineinstrs -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
+; RUN:   < %s -mtriple=powerpc64-unknown-linux -mcpu=pwr8 | FileCheck %s \
+; RUN:   -check-prefix=P8
+; RUN: llc -verify-machineinstrs -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
+; RUN:   < %s -mtriple=powerpc64le-unknown-linux -mcpu=pwr9 \
+; RUN:   | FileCheck %s -check-prefix=P9
+; RUN: llc -verify-machineinstrs -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
+; RUN:   < %s -mtriple=powerpc64le-unknown-linux -mcpu=pwr8 -mattr=-vsx | \
+; RUN:   FileCheck %s -check-prefix=NOVSX
+
+define i32 @test_f32_oeq_q(i32 %a, i32 %b, float %f1, float %f2) #0 {
+; P8-LABEL: test_f32_oeq_q:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpu cr0, f1, f2
+; P8-NEXT:    iseleq r3, r3, r4
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f32_oeq_q:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpu cr0, f1, f2
+; P9-NEXT:    iseleq r3, r3, r4
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f32_oeq_q:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpu cr0, f1, f2
+; NOVSX-NEXT:    iseleq r3, r3, r4
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmp.f32(
+                                               float %f1, float %f2, metadata !"oeq",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f32_ogt_q(i32 %a, i32 %b, float %f1, float %f2) #0 {
+; P8-LABEL: test_f32_ogt_q:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpu cr0, f1, f2
+; P8-NEXT:    iselgt r3, r3, r4
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f32_ogt_q:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpu cr0, f1, f2
+; P9-NEXT:    iselgt r3, r3, r4
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f32_ogt_q:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpu cr0, f1, f2
+; NOVSX-NEXT:    iselgt r3, r3, r4
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmp.f32(
+                                               float %f1, float %f2, metadata !"ogt",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f32_oge_q(i32 %a, i32 %b, float %f1, float %f2) #0 {
+; P8-LABEL: test_f32_oge_q:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpu cr0, f1, f2
+; P8-NEXT:    crnor 4*cr5+lt, un, lt
+; P8-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f32_oge_q:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpu cr0, f1, f2
+; P9-NEXT:    crnor 4*cr5+lt, un, lt
+; P9-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f32_oge_q:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpu cr0, f1, f2
+; NOVSX-NEXT:    crnor 4*cr5+lt, un, lt
+; NOVSX-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmp.f32(
+                                               float %f1, float %f2, metadata !"oge",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f32_olt_q(i32 %a, i32 %b, float %f1, float %f2) #0 {
+; P8-LABEL: test_f32_olt_q:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpu cr0, f1, f2
+; P8-NEXT:    isellt r3, r3, r4
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f32_olt_q:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpu cr0, f1, f2
+; P9-NEXT:    isellt r3, r3, r4
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f32_olt_q:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpu cr0, f1, f2
+; NOVSX-NEXT:    isellt r3, r3, r4
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmp.f32(
+                                               float %f1, float %f2, metadata !"olt",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f32_ole_q(i32 %a, i32 %b, float %f1, float %f2) #0 {
+; P8-LABEL: test_f32_ole_q:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpu cr0, f1, f2
+; P8-NEXT:    crnor 4*cr5+lt, un, gt
+; P8-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f32_ole_q:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpu cr0, f1, f2
+; P9-NEXT:    crnor 4*cr5+lt, un, gt
+; P9-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f32_ole_q:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpu cr0, f1, f2
+; NOVSX-NEXT:    crnor 4*cr5+lt, un, gt
+; NOVSX-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmp.f32(
+                                               float %f1, float %f2, metadata !"ole",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f32_one_q(i32 %a, i32 %b, float %f1, float %f2) #0 {
+; P8-LABEL: test_f32_one_q:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpu cr0, f1, f2
+; P8-NEXT:    crnor 4*cr5+lt, un, eq
+; P8-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f32_one_q:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpu cr0, f1, f2
+; P9-NEXT:    crnor 4*cr5+lt, un, eq
+; P9-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f32_one_q:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpu cr0, f1, f2
+; NOVSX-NEXT:    crnor 4*cr5+lt, un, eq
+; NOVSX-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmp.f32(
+                                               float %f1, float %f2, metadata !"one",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f32_ord_q(i32 %a, i32 %b, float %f1, float %f2) #0 {
+; P8-LABEL: test_f32_ord_q:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpu cr0, f1, f2
+; P8-NEXT:    isel r3, r4, r3, un
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f32_ord_q:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpu cr0, f1, f2
+; P9-NEXT:    isel r3, r4, r3, un
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f32_ord_q:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpu cr0, f1, f2
+; NOVSX-NEXT:    isel r3, r4, r3, un
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmp.f32(
+                                               float %f1, float %f2, metadata !"ord",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f32_ueq_q(i32 %a, i32 %b, float %f1, float %f2) #0 {
+; P8-LABEL: test_f32_ueq_q:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpu cr0, f1, f2
+; P8-NEXT:    cror 4*cr5+lt, eq, un
+; P8-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f32_ueq_q:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpu cr0, f1, f2
+; P9-NEXT:    cror 4*cr5+lt, eq, un
+; P9-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f32_ueq_q:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpu cr0, f1, f2
+; NOVSX-NEXT:    cror 4*cr5+lt, eq, un
+; NOVSX-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmp.f32(
+                                               float %f1, float %f2, metadata !"ueq",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f32_ugt_q(i32 %a, i32 %b, float %f1, float %f2) #0 {
+; P8-LABEL: test_f32_ugt_q:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpu cr0, f1, f2
+; P8-NEXT:    cror 4*cr5+lt, gt, un
+; P8-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f32_ugt_q:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpu cr0, f1, f2
+; P9-NEXT:    cror 4*cr5+lt, gt, un
+; P9-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f32_ugt_q:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpu cr0, f1, f2
+; NOVSX-NEXT:    cror 4*cr5+lt, gt, un
+; NOVSX-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmp.f32(
+                                               float %f1, float %f2, metadata !"ugt",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f32_uge_q(i32 %a, i32 %b, float %f1, float %f2) #0 {
+; P8-LABEL: test_f32_uge_q:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpu cr0, f1, f2
+; P8-NEXT:    isellt r3, r4, r3
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f32_uge_q:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpu cr0, f1, f2
+; P9-NEXT:    isellt r3, r4, r3
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f32_uge_q:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpu cr0, f1, f2
+; NOVSX-NEXT:    isellt r3, r4, r3
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmp.f32(
+                                               float %f1, float %f2, metadata !"uge",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f32_ult_q(i32 %a, i32 %b, float %f1, float %f2) #0 {
+; P8-LABEL: test_f32_ult_q:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpu cr0, f1, f2
+; P8-NEXT:    cror 4*cr5+lt, lt, un
+; P8-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f32_ult_q:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpu cr0, f1, f2
+; P9-NEXT:    cror 4*cr5+lt, lt, un
+; P9-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f32_ult_q:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpu cr0, f1, f2
+; NOVSX-NEXT:    cror 4*cr5+lt, lt, un
+; NOVSX-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmp.f32(
+                                               float %f1, float %f2, metadata !"ult",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f32_ule_q(i32 %a, i32 %b, float %f1, float %f2) #0 {
+; P8-LABEL: test_f32_ule_q:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpu cr0, f1, f2
+; P8-NEXT:    iselgt r3, r4, r3
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f32_ule_q:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpu cr0, f1, f2
+; P9-NEXT:    iselgt r3, r4, r3
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f32_ule_q:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpu cr0, f1, f2
+; NOVSX-NEXT:    iselgt r3, r4, r3
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmp.f32(
+                                               float %f1, float %f2, metadata !"ule",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f32_une_q(i32 %a, i32 %b, float %f1, float %f2) #0 {
+; P8-LABEL: test_f32_une_q:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpu cr0, f1, f2
+; P8-NEXT:    iseleq r3, r4, r3
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f32_une_q:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpu cr0, f1, f2
+; P9-NEXT:    iseleq r3, r4, r3
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f32_une_q:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpu cr0, f1, f2
+; NOVSX-NEXT:    iseleq r3, r4, r3
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmp.f32(
+                                               float %f1, float %f2, metadata !"une",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f32_uno_q(i32 %a, i32 %b, float %f1, float %f2) #0 {
+; P8-LABEL: test_f32_uno_q:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpu cr0, f1, f2
+; P8-NEXT:    isel r3, r3, r4, un
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f32_uno_q:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpu cr0, f1, f2
+; P9-NEXT:    isel r3, r3, r4, un
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f32_uno_q:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpu cr0, f1, f2
+; NOVSX-NEXT:    isel r3, r3, r4, un
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmp.f32(
+                                               float %f1, float %f2, metadata !"uno",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f64_oeq_q(i32 %a, i32 %b, double %f1, double %f2) #0 {
+; P8-LABEL: test_f64_oeq_q:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpu cr0, f1, f2
+; P8-NEXT:    iseleq r3, r3, r4
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f64_oeq_q:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpu cr0, f1, f2
+; P9-NEXT:    iseleq r3, r3, r4
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f64_oeq_q:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpu cr0, f1, f2
+; NOVSX-NEXT:    iseleq r3, r3, r4
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmp.f64(
+                                               double %f1, double %f2, metadata !"oeq",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f64_ogt_q(i32 %a, i32 %b, double %f1, double %f2) #0 {
+; P8-LABEL: test_f64_ogt_q:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpu cr0, f1, f2
+; P8-NEXT:    iselgt r3, r3, r4
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f64_ogt_q:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpu cr0, f1, f2
+; P9-NEXT:    iselgt r3, r3, r4
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f64_ogt_q:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpu cr0, f1, f2
+; NOVSX-NEXT:    iselgt r3, r3, r4
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmp.f64(
+                                               double %f1, double %f2, metadata !"ogt",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f64_oge_q(i32 %a, i32 %b, double %f1, double %f2) #0 {
+; P8-LABEL: test_f64_oge_q:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpu cr0, f1, f2
+; P8-NEXT:    crnor 4*cr5+lt, un, lt
+; P8-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f64_oge_q:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpu cr0, f1, f2
+; P9-NEXT:    crnor 4*cr5+lt, un, lt
+; P9-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f64_oge_q:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpu cr0, f1, f2
+; NOVSX-NEXT:    crnor 4*cr5+lt, un, lt
+; NOVSX-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmp.f64(
+                                               double %f1, double %f2, metadata !"oge",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f64_olt_q(i32 %a, i32 %b, double %f1, double %f2) #0 {
+; P8-LABEL: test_f64_olt_q:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpu cr0, f1, f2
+; P8-NEXT:    isellt r3, r3, r4
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f64_olt_q:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpu cr0, f1, f2
+; P9-NEXT:    isellt r3, r3, r4
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f64_olt_q:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpu cr0, f1, f2
+; NOVSX-NEXT:    isellt r3, r3, r4
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmp.f64(
+                                               double %f1, double %f2, metadata !"olt",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f64_ole_q(i32 %a, i32 %b, double %f1, double %f2) #0 {
+; P8-LABEL: test_f64_ole_q:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpu cr0, f1, f2
+; P8-NEXT:    crnor 4*cr5+lt, un, gt
+; P8-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f64_ole_q:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpu cr0, f1, f2
+; P9-NEXT:    crnor 4*cr5+lt, un, gt
+; P9-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f64_ole_q:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpu cr0, f1, f2
+; NOVSX-NEXT:    crnor 4*cr5+lt, un, gt
+; NOVSX-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmp.f64(
+                                               double %f1, double %f2, metadata !"ole",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f64_one_q(i32 %a, i32 %b, double %f1, double %f2) #0 {
+; P8-LABEL: test_f64_one_q:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpu cr0, f1, f2
+; P8-NEXT:    crnor 4*cr5+lt, un, eq
+; P8-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f64_one_q:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpu cr0, f1, f2
+; P9-NEXT:    crnor 4*cr5+lt, un, eq
+; P9-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f64_one_q:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpu cr0, f1, f2
+; NOVSX-NEXT:    crnor 4*cr5+lt, un, eq
+; NOVSX-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmp.f64(
+                                               double %f1, double %f2, metadata !"one",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f64_ord_q(i32 %a, i32 %b, double %f1, double %f2) #0 {
+; P8-LABEL: test_f64_ord_q:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpu cr0, f1, f2
+; P8-NEXT:    isel r3, r4, r3, un
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f64_ord_q:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpu cr0, f1, f2
+; P9-NEXT:    isel r3, r4, r3, un
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f64_ord_q:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpu cr0, f1, f2
+; NOVSX-NEXT:    isel r3, r4, r3, un
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmp.f64(
+                                               double %f1, double %f2, metadata !"ord",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f64_ueq_q(i32 %a, i32 %b, double %f1, double %f2) #0 {
+; P8-LABEL: test_f64_ueq_q:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpu cr0, f1, f2
+; P8-NEXT:    cror 4*cr5+lt, eq, un
+; P8-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f64_ueq_q:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpu cr0, f1, f2
+; P9-NEXT:    cror 4*cr5+lt, eq, un
+; P9-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f64_ueq_q:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpu cr0, f1, f2
+; NOVSX-NEXT:    cror 4*cr5+lt, eq, un
+; NOVSX-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmp.f64(
+                                               double %f1, double %f2, metadata !"ueq",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f64_ugt_q(i32 %a, i32 %b, double %f1, double %f2) #0 {
+; P8-LABEL: test_f64_ugt_q:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpu cr0, f1, f2
+; P8-NEXT:    cror 4*cr5+lt, gt, un
+; P8-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f64_ugt_q:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpu cr0, f1, f2
+; P9-NEXT:    cror 4*cr5+lt, gt, un
+; P9-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f64_ugt_q:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpu cr0, f1, f2
+; NOVSX-NEXT:    cror 4*cr5+lt, gt, un
+; NOVSX-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmp.f64(
+                                               double %f1, double %f2, metadata !"ugt",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f64_uge_q(i32 %a, i32 %b, double %f1, double %f2) #0 {
+; P8-LABEL: test_f64_uge_q:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpu cr0, f1, f2
+; P8-NEXT:    isellt r3, r4, r3
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f64_uge_q:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpu cr0, f1, f2
+; P9-NEXT:    isellt r3, r4, r3
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f64_uge_q:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpu cr0, f1, f2
+; NOVSX-NEXT:    isellt r3, r4, r3
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmp.f64(
+                                               double %f1, double %f2, metadata !"uge",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f64_ult_q(i32 %a, i32 %b, double %f1, double %f2) #0 {
+; P8-LABEL: test_f64_ult_q:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpu cr0, f1, f2
+; P8-NEXT:    cror 4*cr5+lt, lt, un
+; P8-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f64_ult_q:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpu cr0, f1, f2
+; P9-NEXT:    cror 4*cr5+lt, lt, un
+; P9-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f64_ult_q:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpu cr0, f1, f2
+; NOVSX-NEXT:    cror 4*cr5+lt, lt, un
+; NOVSX-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmp.f64(
+                                               double %f1, double %f2, metadata !"ult",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f64_ule_q(i32 %a, i32 %b, double %f1, double %f2) #0 {
+; P8-LABEL: test_f64_ule_q:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpu cr0, f1, f2
+; P8-NEXT:    iselgt r3, r4, r3
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f64_ule_q:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpu cr0, f1, f2
+; P9-NEXT:    iselgt r3, r4, r3
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f64_ule_q:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpu cr0, f1, f2
+; NOVSX-NEXT:    iselgt r3, r4, r3
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmp.f64(
+                                               double %f1, double %f2, metadata !"ule",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f64_une_q(i32 %a, i32 %b, double %f1, double %f2) #0 {
+; P8-LABEL: test_f64_une_q:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpu cr0, f1, f2
+; P8-NEXT:    iseleq r3, r4, r3
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f64_une_q:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpu cr0, f1, f2
+; P9-NEXT:    iseleq r3, r4, r3
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f64_une_q:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpu cr0, f1, f2
+; NOVSX-NEXT:    iseleq r3, r4, r3
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmp.f64(
+                                               double %f1, double %f2, metadata !"une",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f64_uno_q(i32 %a, i32 %b, double %f1, double %f2) #0 {
+; P8-LABEL: test_f64_uno_q:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpu cr0, f1, f2
+; P8-NEXT:    isel r3, r3, r4, un
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f64_uno_q:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpu cr0, f1, f2
+; P9-NEXT:    isel r3, r3, r4, un
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f64_uno_q:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpu cr0, f1, f2
+; NOVSX-NEXT:    isel r3, r3, r4, un
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmp.f64(
+                                               double %f1, double %f2, metadata !"uno",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f32_oeq_s(i32 %a, i32 %b, float %f1, float %f2) #0 {
+; P8-LABEL: test_f32_oeq_s:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpo cr0, f1, f2
+; P8-NEXT:    iseleq r3, r3, r4
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f32_oeq_s:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpo cr0, f1, f2
+; P9-NEXT:    iseleq r3, r3, r4
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f32_oeq_s:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpo cr0, f1, f2
+; NOVSX-NEXT:    iseleq r3, r3, r4
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmps.f32(
+                                               float %f1, float %f2, metadata !"oeq",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f32_ogt_s(i32 %a, i32 %b, float %f1, float %f2) #0 {
+; P8-LABEL: test_f32_ogt_s:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpo cr0, f1, f2
+; P8-NEXT:    iselgt r3, r3, r4
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f32_ogt_s:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpo cr0, f1, f2
+; P9-NEXT:    iselgt r3, r3, r4
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f32_ogt_s:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpo cr0, f1, f2
+; NOVSX-NEXT:    iselgt r3, r3, r4
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmps.f32(
+                                               float %f1, float %f2, metadata !"ogt",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f32_oge_s(i32 %a, i32 %b, float %f1, float %f2) #0 {
+; P8-LABEL: test_f32_oge_s:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpo cr0, f1, f2
+; P8-NEXT:    crnor 4*cr5+lt, un, lt
+; P8-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f32_oge_s:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpo cr0, f1, f2
+; P9-NEXT:    crnor 4*cr5+lt, un, lt
+; P9-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f32_oge_s:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpo cr0, f1, f2
+; NOVSX-NEXT:    crnor 4*cr5+lt, un, lt
+; NOVSX-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmps.f32(
+                                               float %f1, float %f2, metadata !"oge",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f32_olt_s(i32 %a, i32 %b, float %f1, float %f2) #0 {
+; P8-LABEL: test_f32_olt_s:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpo cr0, f1, f2
+; P8-NEXT:    isellt r3, r3, r4
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f32_olt_s:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpo cr0, f1, f2
+; P9-NEXT:    isellt r3, r3, r4
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f32_olt_s:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpo cr0, f1, f2
+; NOVSX-NEXT:    isellt r3, r3, r4
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmps.f32(
+                                               float %f1, float %f2, metadata !"olt",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f32_ole_s(i32 %a, i32 %b, float %f1, float %f2) #0 {
+; P8-LABEL: test_f32_ole_s:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpo cr0, f1, f2
+; P8-NEXT:    crnor 4*cr5+lt, un, gt
+; P8-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f32_ole_s:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpo cr0, f1, f2
+; P9-NEXT:    crnor 4*cr5+lt, un, gt
+; P9-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f32_ole_s:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpo cr0, f1, f2
+; NOVSX-NEXT:    crnor 4*cr5+lt, un, gt
+; NOVSX-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmps.f32(
+                                               float %f1, float %f2, metadata !"ole",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f32_one_s(i32 %a, i32 %b, float %f1, float %f2) #0 {
+; P8-LABEL: test_f32_one_s:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpo cr0, f1, f2
+; P8-NEXT:    crnor 4*cr5+lt, un, eq
+; P8-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f32_one_s:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpo cr0, f1, f2
+; P9-NEXT:    crnor 4*cr5+lt, un, eq
+; P9-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f32_one_s:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpo cr0, f1, f2
+; NOVSX-NEXT:    crnor 4*cr5+lt, un, eq
+; NOVSX-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmps.f32(
+                                               float %f1, float %f2, metadata !"one",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f32_ord_s(i32 %a, i32 %b, float %f1, float %f2) #0 {
+; P8-LABEL: test_f32_ord_s:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpo cr0, f1, f2
+; P8-NEXT:    isel r3, r4, r3, un
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f32_ord_s:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpo cr0, f1, f2
+; P9-NEXT:    isel r3, r4, r3, un
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f32_ord_s:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpo cr0, f1, f2
+; NOVSX-NEXT:    isel r3, r4, r3, un
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmps.f32(
+                                               float %f1, float %f2, metadata !"ord",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f32_ueq_s(i32 %a, i32 %b, float %f1, float %f2) #0 {
+; P8-LABEL: test_f32_ueq_s:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpo cr0, f1, f2
+; P8-NEXT:    cror 4*cr5+lt, eq, un
+; P8-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f32_ueq_s:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpo cr0, f1, f2
+; P9-NEXT:    cror 4*cr5+lt, eq, un
+; P9-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f32_ueq_s:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpo cr0, f1, f2
+; NOVSX-NEXT:    cror 4*cr5+lt, eq, un
+; NOVSX-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmps.f32(
+                                               float %f1, float %f2, metadata !"ueq",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f32_ugt_s(i32 %a, i32 %b, float %f1, float %f2) #0 {
+; P8-LABEL: test_f32_ugt_s:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpo cr0, f1, f2
+; P8-NEXT:    cror 4*cr5+lt, gt, un
+; P8-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f32_ugt_s:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpo cr0, f1, f2
+; P9-NEXT:    cror 4*cr5+lt, gt, un
+; P9-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f32_ugt_s:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpo cr0, f1, f2
+; NOVSX-NEXT:    cror 4*cr5+lt, gt, un
+; NOVSX-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmps.f32(
+                                               float %f1, float %f2, metadata !"ugt",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f32_uge_s(i32 %a, i32 %b, float %f1, float %f2) #0 {
+; P8-LABEL: test_f32_uge_s:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpo cr0, f1, f2
+; P8-NEXT:    isellt r3, r4, r3
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f32_uge_s:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpo cr0, f1, f2
+; P9-NEXT:    isellt r3, r4, r3
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f32_uge_s:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpo cr0, f1, f2
+; NOVSX-NEXT:    isellt r3, r4, r3
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmps.f32(
+                                               float %f1, float %f2, metadata !"uge",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f32_ult_s(i32 %a, i32 %b, float %f1, float %f2) #0 {
+; P8-LABEL: test_f32_ult_s:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpo cr0, f1, f2
+; P8-NEXT:    cror 4*cr5+lt, lt, un
+; P8-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f32_ult_s:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpo cr0, f1, f2
+; P9-NEXT:    cror 4*cr5+lt, lt, un
+; P9-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f32_ult_s:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpo cr0, f1, f2
+; NOVSX-NEXT:    cror 4*cr5+lt, lt, un
+; NOVSX-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmps.f32(
+                                               float %f1, float %f2, metadata !"ult",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f32_ule_s(i32 %a, i32 %b, float %f1, float %f2) #0 {
+; P8-LABEL: test_f32_ule_s:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpo cr0, f1, f2
+; P8-NEXT:    iselgt r3, r4, r3
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f32_ule_s:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpo cr0, f1, f2
+; P9-NEXT:    iselgt r3, r4, r3
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f32_ule_s:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpo cr0, f1, f2
+; NOVSX-NEXT:    iselgt r3, r4, r3
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmps.f32(
+                                               float %f1, float %f2, metadata !"ule",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f32_une_s(i32 %a, i32 %b, float %f1, float %f2) #0 {
+; P8-LABEL: test_f32_une_s:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpo cr0, f1, f2
+; P8-NEXT:    iseleq r3, r4, r3
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f32_une_s:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpo cr0, f1, f2
+; P9-NEXT:    iseleq r3, r4, r3
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f32_une_s:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpo cr0, f1, f2
+; NOVSX-NEXT:    iseleq r3, r4, r3
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmps.f32(
+                                               float %f1, float %f2, metadata !"une",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f32_uno_s(i32 %a, i32 %b, float %f1, float %f2) #0 {
+; P8-LABEL: test_f32_uno_s:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpo cr0, f1, f2
+; P8-NEXT:    isel r3, r3, r4, un
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f32_uno_s:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpo cr0, f1, f2
+; P9-NEXT:    isel r3, r3, r4, un
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f32_uno_s:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpo cr0, f1, f2
+; NOVSX-NEXT:    isel r3, r3, r4, un
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmps.f32(
+                                               float %f1, float %f2, metadata !"uno",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f64_oeq_s(i32 %a, i32 %b, double %f1, double %f2) #0 {
+; P8-LABEL: test_f64_oeq_s:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpo cr0, f1, f2
+; P8-NEXT:    iseleq r3, r3, r4
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f64_oeq_s:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpo cr0, f1, f2
+; P9-NEXT:    iseleq r3, r3, r4
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f64_oeq_s:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpo cr0, f1, f2
+; NOVSX-NEXT:    iseleq r3, r3, r4
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmps.f64(
+                                               double %f1, double %f2, metadata !"oeq",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f64_ogt_s(i32 %a, i32 %b, double %f1, double %f2) #0 {
+; P8-LABEL: test_f64_ogt_s:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpo cr0, f1, f2
+; P8-NEXT:    iselgt r3, r3, r4
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f64_ogt_s:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpo cr0, f1, f2
+; P9-NEXT:    iselgt r3, r3, r4
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f64_ogt_s:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpo cr0, f1, f2
+; NOVSX-NEXT:    iselgt r3, r3, r4
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmps.f64(
+                                               double %f1, double %f2, metadata !"ogt",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f64_oge_s(i32 %a, i32 %b, double %f1, double %f2) #0 {
+; P8-LABEL: test_f64_oge_s:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpo cr0, f1, f2
+; P8-NEXT:    crnor 4*cr5+lt, un, lt
+; P8-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f64_oge_s:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpo cr0, f1, f2
+; P9-NEXT:    crnor 4*cr5+lt, un, lt
+; P9-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f64_oge_s:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpo cr0, f1, f2
+; NOVSX-NEXT:    crnor 4*cr5+lt, un, lt
+; NOVSX-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmps.f64(
+                                               double %f1, double %f2, metadata !"oge",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f64_olt_s(i32 %a, i32 %b, double %f1, double %f2) #0 {
+; P8-LABEL: test_f64_olt_s:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpo cr0, f1, f2
+; P8-NEXT:    isellt r3, r3, r4
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f64_olt_s:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpo cr0, f1, f2
+; P9-NEXT:    isellt r3, r3, r4
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f64_olt_s:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpo cr0, f1, f2
+; NOVSX-NEXT:    isellt r3, r3, r4
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmps.f64(
+                                               double %f1, double %f2, metadata !"olt",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f64_ole_s(i32 %a, i32 %b, double %f1, double %f2) #0 {
+; P8-LABEL: test_f64_ole_s:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpo cr0, f1, f2
+; P8-NEXT:    crnor 4*cr5+lt, un, gt
+; P8-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f64_ole_s:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpo cr0, f1, f2
+; P9-NEXT:    crnor 4*cr5+lt, un, gt
+; P9-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f64_ole_s:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpo cr0, f1, f2
+; NOVSX-NEXT:    crnor 4*cr5+lt, un, gt
+; NOVSX-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmps.f64(
+                                               double %f1, double %f2, metadata !"ole",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f64_one_s(i32 %a, i32 %b, double %f1, double %f2) #0 {
+; P8-LABEL: test_f64_one_s:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpo cr0, f1, f2
+; P8-NEXT:    crnor 4*cr5+lt, un, eq
+; P8-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f64_one_s:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpo cr0, f1, f2
+; P9-NEXT:    crnor 4*cr5+lt, un, eq
+; P9-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f64_one_s:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpo cr0, f1, f2
+; NOVSX-NEXT:    crnor 4*cr5+lt, un, eq
+; NOVSX-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmps.f64(
+                                               double %f1, double %f2, metadata !"one",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f64_ord_s(i32 %a, i32 %b, double %f1, double %f2) #0 {
+; P8-LABEL: test_f64_ord_s:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpo cr0, f1, f2
+; P8-NEXT:    isel r3, r4, r3, un
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f64_ord_s:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpo cr0, f1, f2
+; P9-NEXT:    isel r3, r4, r3, un
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f64_ord_s:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpo cr0, f1, f2
+; NOVSX-NEXT:    isel r3, r4, r3, un
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmps.f64(
+                                               double %f1, double %f2, metadata !"ord",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f64_ueq_s(i32 %a, i32 %b, double %f1, double %f2) #0 {
+; P8-LABEL: test_f64_ueq_s:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpo cr0, f1, f2
+; P8-NEXT:    cror 4*cr5+lt, eq, un
+; P8-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f64_ueq_s:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpo cr0, f1, f2
+; P9-NEXT:    cror 4*cr5+lt, eq, un
+; P9-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f64_ueq_s:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpo cr0, f1, f2
+; NOVSX-NEXT:    cror 4*cr5+lt, eq, un
+; NOVSX-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmps.f64(
+                                               double %f1, double %f2, metadata !"ueq",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f64_ugt_s(i32 %a, i32 %b, double %f1, double %f2) #0 {
+; P8-LABEL: test_f64_ugt_s:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpo cr0, f1, f2
+; P8-NEXT:    cror 4*cr5+lt, gt, un
+; P8-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f64_ugt_s:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpo cr0, f1, f2
+; P9-NEXT:    cror 4*cr5+lt, gt, un
+; P9-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f64_ugt_s:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpo cr0, f1, f2
+; NOVSX-NEXT:    cror 4*cr5+lt, gt, un
+; NOVSX-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmps.f64(
+                                               double %f1, double %f2, metadata !"ugt",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f64_uge_s(i32 %a, i32 %b, double %f1, double %f2) #0 {
+; P8-LABEL: test_f64_uge_s:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpo cr0, f1, f2
+; P8-NEXT:    isellt r3, r4, r3
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f64_uge_s:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpo cr0, f1, f2
+; P9-NEXT:    isellt r3, r4, r3
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f64_uge_s:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpo cr0, f1, f2
+; NOVSX-NEXT:    isellt r3, r4, r3
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmps.f64(
+                                               double %f1, double %f2, metadata !"uge",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f64_ult_s(i32 %a, i32 %b, double %f1, double %f2) #0 {
+; P8-LABEL: test_f64_ult_s:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpo cr0, f1, f2
+; P8-NEXT:    cror 4*cr5+lt, lt, un
+; P8-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f64_ult_s:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpo cr0, f1, f2
+; P9-NEXT:    cror 4*cr5+lt, lt, un
+; P9-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f64_ult_s:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpo cr0, f1, f2
+; NOVSX-NEXT:    cror 4*cr5+lt, lt, un
+; NOVSX-NEXT:    isel r3, r3, r4, 4*cr5+lt
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmps.f64(
+                                               double %f1, double %f2, metadata !"ult",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f64_ule_s(i32 %a, i32 %b, double %f1, double %f2) #0 {
+; P8-LABEL: test_f64_ule_s:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpo cr0, f1, f2
+; P8-NEXT:    iselgt r3, r4, r3
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f64_ule_s:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpo cr0, f1, f2
+; P9-NEXT:    iselgt r3, r4, r3
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f64_ule_s:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpo cr0, f1, f2
+; NOVSX-NEXT:    iselgt r3, r4, r3
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmps.f64(
+                                               double %f1, double %f2, metadata !"ule",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f64_une_s(i32 %a, i32 %b, double %f1, double %f2) #0 {
+; P8-LABEL: test_f64_une_s:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpo cr0, f1, f2
+; P8-NEXT:    iseleq r3, r4, r3
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f64_une_s:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpo cr0, f1, f2
+; P9-NEXT:    iseleq r3, r4, r3
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f64_une_s:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpo cr0, f1, f2
+; NOVSX-NEXT:    iseleq r3, r4, r3
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmps.f64(
+                                               double %f1, double %f2, metadata !"une",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @test_f64_uno_s(i32 %a, i32 %b, double %f1, double %f2) #0 {
+; P8-LABEL: test_f64_uno_s:
+; P8:       # %bb.0:
+; P8-NEXT:    fcmpo cr0, f1, f2
+; P8-NEXT:    isel r3, r3, r4, un
+; P8-NEXT:    blr
+;
+; P9-LABEL: test_f64_uno_s:
+; P9:       # %bb.0:
+; P9-NEXT:    fcmpo cr0, f1, f2
+; P9-NEXT:    isel r3, r3, r4, un
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: test_f64_uno_s:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    fcmpo cr0, f1, f2
+; NOVSX-NEXT:    isel r3, r3, r4, un
+; NOVSX-NEXT:    blr
+  %cond = call i1 @llvm.experimental.constrained.fcmps.f64(
+                                               double %f1, double %f2, metadata !"uno",
+                                               metadata !"fpexcept.strict") #0
+  %res = select i1 %cond, i32 %a, i32 %b
+  ret i32 %res
+}
+
+define i32 @fcmp_olt_f128(fp128 %a, fp128 %b) #0 {
+; P8-LABEL: fcmp_olt_f128:
+; P8:       # %bb.0:
+; P8-NEXT:    mflr r0
+; P8-NEXT:    std r0, 16(r1)
+; P8-NEXT:    stdu r1, -112(r1)
+; P8-NEXT:    bl __ltkf2
+; P8-NEXT:    nop
+; P8-NEXT:    srwi r3, r3, 31
+; P8-NEXT:    addi r1, r1, 112
+; P8-NEXT:    ld r0, 16(r1)
+; P8-NEXT:    mtlr r0
+; P8-NEXT:    blr
+;
+; P9-LABEL: fcmp_olt_f128:
+; P9:       # %bb.0:
+; P9-NEXT:    xscmpuqp cr0, v2, v3
+; P9-NEXT:    li r3, 0
+; P9-NEXT:    li r4, 1
+; P9-NEXT:    isellt r3, r4, r3
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: fcmp_olt_f128:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    mflr r0
+; NOVSX-NEXT:    std r0, 16(r1)
+; NOVSX-NEXT:    stdu r1, -32(r1)
+; NOVSX-NEXT:    bl __ltkf2
+; NOVSX-NEXT:    nop
+; NOVSX-NEXT:    srwi r3, r3, 31
+; NOVSX-NEXT:    addi r1, r1, 32
+; NOVSX-NEXT:    ld r0, 16(r1)
+; NOVSX-NEXT:    mtlr r0
+; NOVSX-NEXT:    blr
+  %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"olt", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+define i32 @fcmp_ole_f128(fp128 %a, fp128 %b) #0 {
+; P8-LABEL: fcmp_ole_f128:
+; P8:       # %bb.0:
+; P8-NEXT:    mflr r0
+; P8-NEXT:    std r0, 16(r1)
+; P8-NEXT:    stdu r1, -112(r1)
+; P8-NEXT:    bl __lekf2
+; P8-NEXT:    nop
+; P8-NEXT:    extsw r3, r3
+; P8-NEXT:    neg r3, r3
+; P8-NEXT:    rldicl r3, r3, 1, 63
+; P8-NEXT:    xori r3, r3, 1
+; P8-NEXT:    addi r1, r1, 112
+; P8-NEXT:    ld r0, 16(r1)
+; P8-NEXT:    mtlr r0
+; P8-NEXT:    blr
+;
+; P9-LABEL: fcmp_ole_f128:
+; P9:       # %bb.0:
+; P9-NEXT:    xscmpuqp cr0, v2, v3
+; P9-NEXT:    xscmpuqp cr1, v2, v3
+; P9-NEXT:    li r3, 1
+; P9-NEXT:    crnor 4*cr5+lt, un, 4*cr1+un
+; P9-NEXT:    crnor 4*cr5+gt, gt, 4*cr1+gt
+; P9-NEXT:    crnand 4*cr5+lt, 4*cr5+gt, 4*cr5+lt
+; P9-NEXT:    isel r3, 0, r3, 4*cr5+lt
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: fcmp_ole_f128:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    mflr r0
+; NOVSX-NEXT:    std r0, 16(r1)
+; NOVSX-NEXT:    stdu r1, -32(r1)
+; NOVSX-NEXT:    bl __lekf2
+; NOVSX-NEXT:    nop
+; NOVSX-NEXT:    extsw r3, r3
+; NOVSX-NEXT:    neg r3, r3
+; NOVSX-NEXT:    rldicl r3, r3, 1, 63
+; NOVSX-NEXT:    xori r3, r3, 1
+; NOVSX-NEXT:    addi r1, r1, 32
+; NOVSX-NEXT:    ld r0, 16(r1)
+; NOVSX-NEXT:    mtlr r0
+; NOVSX-NEXT:    blr
+  %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"ole", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+define i32 @fcmp_ogt_f128(fp128 %a, fp128 %b) #0 {
+; P8-LABEL: fcmp_ogt_f128:
+; P8:       # %bb.0:
+; P8-NEXT:    mflr r0
+; P8-NEXT:    std r0, 16(r1)
+; P8-NEXT:    stdu r1, -112(r1)
+; P8-NEXT:    bl __gtkf2
+; P8-NEXT:    nop
+; P8-NEXT:    extsw r3, r3
+; P8-NEXT:    neg r3, r3
+; P8-NEXT:    rldicl r3, r3, 1, 63
+; P8-NEXT:    addi r1, r1, 112
+; P8-NEXT:    ld r0, 16(r1)
+; P8-NEXT:    mtlr r0
+; P8-NEXT:    blr
+;
+; P9-LABEL: fcmp_ogt_f128:
+; P9:       # %bb.0:
+; P9-NEXT:    xscmpuqp cr0, v2, v3
+; P9-NEXT:    li r3, 0
+; P9-NEXT:    li r4, 1
+; P9-NEXT:    iselgt r3, r4, r3
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: fcmp_ogt_f128:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    mflr r0
+; NOVSX-NEXT:    std r0, 16(r1)
+; NOVSX-NEXT:    stdu r1, -32(r1)
+; NOVSX-NEXT:    bl __gtkf2
+; NOVSX-NEXT:    nop
+; NOVSX-NEXT:    extsw r3, r3
+; NOVSX-NEXT:    neg r3, r3
+; NOVSX-NEXT:    rldicl r3, r3, 1, 63
+; NOVSX-NEXT:    addi r1, r1, 32
+; NOVSX-NEXT:    ld r0, 16(r1)
+; NOVSX-NEXT:    mtlr r0
+; NOVSX-NEXT:    blr
+  %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"ogt", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+define i32 @fcmp_oge_f128(fp128 %a, fp128 %b) #0 {
+; P8-LABEL: fcmp_oge_f128:
+; P8:       # %bb.0:
+; P8-NEXT:    mflr r0
+; P8-NEXT:    std r0, 16(r1)
+; P8-NEXT:    stdu r1, -112(r1)
+; P8-NEXT:    bl __gekf2
+; P8-NEXT:    nop
+; P8-NEXT:    not r3, r3
+; P8-NEXT:    srwi r3, r3, 31
+; P8-NEXT:    addi r1, r1, 112
+; P8-NEXT:    ld r0, 16(r1)
+; P8-NEXT:    mtlr r0
+; P8-NEXT:    blr
+;
+; P9-LABEL: fcmp_oge_f128:
+; P9:       # %bb.0:
+; P9-NEXT:    xscmpuqp cr0, v2, v3
+; P9-NEXT:    xscmpuqp cr1, v2, v3
+; P9-NEXT:    li r3, 1
+; P9-NEXT:    crnor 4*cr5+lt, un, 4*cr1+un
+; P9-NEXT:    crnor 4*cr5+gt, lt, 4*cr1+lt
+; P9-NEXT:    crnand 4*cr5+lt, 4*cr5+gt, 4*cr5+lt
+; P9-NEXT:    isel r3, 0, r3, 4*cr5+lt
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: fcmp_oge_f128:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    mflr r0
+; NOVSX-NEXT:    std r0, 16(r1)
+; NOVSX-NEXT:    stdu r1, -32(r1)
+; NOVSX-NEXT:    bl __gekf2
+; NOVSX-NEXT:    nop
+; NOVSX-NEXT:    not r3, r3
+; NOVSX-NEXT:    srwi r3, r3, 31
+; NOVSX-NEXT:    addi r1, r1, 32
+; NOVSX-NEXT:    ld r0, 16(r1)
+; NOVSX-NEXT:    mtlr r0
+; NOVSX-NEXT:    blr
+  %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"oge", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+define i32 @fcmp_oeq_f128(fp128 %a, fp128 %b) #0 {
+; P8-LABEL: fcmp_oeq_f128:
+; P8:       # %bb.0:
+; P8-NEXT:    mflr r0
+; P8-NEXT:    std r0, 16(r1)
+; P8-NEXT:    stdu r1, -112(r1)
+; P8-NEXT:    bl __eqkf2
+; P8-NEXT:    nop
+; P8-NEXT:    cntlzw r3, r3
+; P8-NEXT:    srwi r3, r3, 5
+; P8-NEXT:    addi r1, r1, 112
+; P8-NEXT:    ld r0, 16(r1)
+; P8-NEXT:    mtlr r0
+; P8-NEXT:    blr
+;
+; P9-LABEL: fcmp_oeq_f128:
+; P9:       # %bb.0:
+; P9-NEXT:    xscmpuqp cr0, v2, v3
+; P9-NEXT:    li r3, 0
+; P9-NEXT:    li r4, 1
+; P9-NEXT:    iseleq r3, r4, r3
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: fcmp_oeq_f128:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    mflr r0
+; NOVSX-NEXT:    std r0, 16(r1)
+; NOVSX-NEXT:    stdu r1, -32(r1)
+; NOVSX-NEXT:    bl __eqkf2
+; NOVSX-NEXT:    nop
+; NOVSX-NEXT:    cntlzw r3, r3
+; NOVSX-NEXT:    srwi r3, r3, 5
+; NOVSX-NEXT:    addi r1, r1, 32
+; NOVSX-NEXT:    ld r0, 16(r1)
+; NOVSX-NEXT:    mtlr r0
+; NOVSX-NEXT:    blr
+  %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"oeq", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+define i32 @fcmp_one_f128(fp128 %a, fp128 %b) #0 {
+; P8-LABEL: fcmp_one_f128:
+; P8:       # %bb.0:
+; P8-NEXT:    mflr r0
+; P8-NEXT:    std r0, 16(r1)
+; P8-NEXT:    stdu r1, -160(r1)
+; P8-NEXT:    std r26, 112(r1) # 8-byte Folded Spill
+; P8-NEXT:    std r27, 120(r1) # 8-byte Folded Spill
+; P8-NEXT:    std r28, 128(r1) # 8-byte Folded Spill
+; P8-NEXT:    std r29, 136(r1) # 8-byte Folded Spill
+; P8-NEXT:    mr r29, r5
+; P8-NEXT:    mr r28, r4
+; P8-NEXT:    mr r27, r3
+; P8-NEXT:    std r30, 144(r1) # 8-byte Folded Spill
+; P8-NEXT:    mr r30, r6
+; P8-NEXT:    bl __unordkf2
+; P8-NEXT:    nop
+; P8-NEXT:    cntlzw r3, r3
+; P8-NEXT:    mr r4, r28
+; P8-NEXT:    mr r5, r29
+; P8-NEXT:    mr r6, r30
+; P8-NEXT:    srwi r26, r3, 5
+; P8-NEXT:    mr r3, r27
+; P8-NEXT:    bl __eqkf2
+; P8-NEXT:    nop
+; P8-NEXT:    cntlzw r3, r3
+; P8-NEXT:    ld r30, 144(r1) # 8-byte Folded Reload
+; P8-NEXT:    ld r29, 136(r1) # 8-byte Folded Reload
+; P8-NEXT:    ld r28, 128(r1) # 8-byte Folded Reload
+; P8-NEXT:    ld r27, 120(r1) # 8-byte Folded Reload
+; P8-NEXT:    srwi r3, r3, 5
+; P8-NEXT:    xori r3, r3, 1
+; P8-NEXT:    and r3, r26, r3
+; P8-NEXT:    ld r26, 112(r1) # 8-byte Folded Reload
+; P8-NEXT:    addi r1, r1, 160
+; P8-NEXT:    ld r0, 16(r1)
+; P8-NEXT:    mtlr r0
+; P8-NEXT:    blr
+;
+; P9-LABEL: fcmp_one_f128:
+; P9:       # %bb.0:
+; P9-NEXT:    xscmpuqp cr0, v2, v3
+; P9-NEXT:    xscmpuqp cr1, v2, v3
+; P9-NEXT:    li r3, 1
+; P9-NEXT:    crnor 4*cr5+lt, un, 4*cr1+un
+; P9-NEXT:    crnor 4*cr5+gt, eq, 4*cr1+eq
+; P9-NEXT:    crnand 4*cr5+lt, 4*cr5+gt, 4*cr5+lt
+; P9-NEXT:    isel r3, 0, r3, 4*cr5+lt
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: fcmp_one_f128:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    mflr r0
+; NOVSX-NEXT:    std r26, -48(r1) # 8-byte Folded Spill
+; NOVSX-NEXT:    std r27, -40(r1) # 8-byte Folded Spill
+; NOVSX-NEXT:    std r28, -32(r1) # 8-byte Folded Spill
+; NOVSX-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
+; NOVSX-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; NOVSX-NEXT:    std r0, 16(r1)
+; NOVSX-NEXT:    stdu r1, -80(r1)
+; NOVSX-NEXT:    mr r30, r6
+; NOVSX-NEXT:    mr r29, r5
+; NOVSX-NEXT:    mr r28, r4
+; NOVSX-NEXT:    mr r27, r3
+; NOVSX-NEXT:    bl __unordkf2
+; NOVSX-NEXT:    nop
+; NOVSX-NEXT:    cntlzw r3, r3
+; NOVSX-NEXT:    mr r4, r28
+; NOVSX-NEXT:    mr r5, r29
+; NOVSX-NEXT:    mr r6, r30
+; NOVSX-NEXT:    srwi r26, r3, 5
+; NOVSX-NEXT:    mr r3, r27
+; NOVSX-NEXT:    bl __eqkf2
+; NOVSX-NEXT:    nop
+; NOVSX-NEXT:    cntlzw r3, r3
+; NOVSX-NEXT:    srwi r3, r3, 5
+; NOVSX-NEXT:    xori r3, r3, 1
+; NOVSX-NEXT:    and r3, r26, r3
+; NOVSX-NEXT:    addi r1, r1, 80
+; NOVSX-NEXT:    ld r0, 16(r1)
+; NOVSX-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; NOVSX-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
+; NOVSX-NEXT:    ld r28, -32(r1) # 8-byte Folded Reload
+; NOVSX-NEXT:    ld r27, -40(r1) # 8-byte Folded Reload
+; NOVSX-NEXT:    ld r26, -48(r1) # 8-byte Folded Reload
+; NOVSX-NEXT:    mtlr r0
+; NOVSX-NEXT:    blr
+  %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"one", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+define i32 @fcmp_ult_f128(fp128 %a, fp128 %b) #0 {
+; P8-LABEL: fcmp_ult_f128:
+; P8:       # %bb.0:
+; P8-NEXT:    mflr r0
+; P8-NEXT:    std r0, 16(r1)
+; P8-NEXT:    stdu r1, -112(r1)
+; P8-NEXT:    bl __gekf2
+; P8-NEXT:    nop
+; P8-NEXT:    srwi r3, r3, 31
+; P8-NEXT:    addi r1, r1, 112
+; P8-NEXT:    ld r0, 16(r1)
+; P8-NEXT:    mtlr r0
+; P8-NEXT:    blr
+;
+; P9-LABEL: fcmp_ult_f128:
+; P9:       # %bb.0:
+; P9-NEXT:    xscmpuqp cr0, v2, v3
+; P9-NEXT:    li r3, 1
+; P9-NEXT:    crnor 4*cr5+lt, lt, un
+; P9-NEXT:    isel r3, 0, r3, 4*cr5+lt
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: fcmp_ult_f128:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    mflr r0
+; NOVSX-NEXT:    std r0, 16(r1)
+; NOVSX-NEXT:    stdu r1, -32(r1)
+; NOVSX-NEXT:    bl __gekf2
+; NOVSX-NEXT:    nop
+; NOVSX-NEXT:    srwi r3, r3, 31
+; NOVSX-NEXT:    addi r1, r1, 32
+; NOVSX-NEXT:    ld r0, 16(r1)
+; NOVSX-NEXT:    mtlr r0
+; NOVSX-NEXT:    blr
+  %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"ult", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+define i32 @fcmp_ule_f128(fp128 %a, fp128 %b) #0 {
+; P8-LABEL: fcmp_ule_f128:
+; P8:       # %bb.0:
+; P8-NEXT:    mflr r0
+; P8-NEXT:    std r0, 16(r1)
+; P8-NEXT:    stdu r1, -112(r1)
+; P8-NEXT:    bl __gtkf2
+; P8-NEXT:    nop
+; P8-NEXT:    extsw r3, r3
+; P8-NEXT:    neg r3, r3
+; P8-NEXT:    rldicl r3, r3, 1, 63
+; P8-NEXT:    xori r3, r3, 1
+; P8-NEXT:    addi r1, r1, 112
+; P8-NEXT:    ld r0, 16(r1)
+; P8-NEXT:    mtlr r0
+; P8-NEXT:    blr
+;
+; P9-LABEL: fcmp_ule_f128:
+; P9:       # %bb.0:
+; P9-NEXT:    xscmpuqp cr0, v2, v3
+; P9-NEXT:    li r3, 1
+; P9-NEXT:    iselgt r3, 0, r3
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: fcmp_ule_f128:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    mflr r0
+; NOVSX-NEXT:    std r0, 16(r1)
+; NOVSX-NEXT:    stdu r1, -32(r1)
+; NOVSX-NEXT:    bl __gtkf2
+; NOVSX-NEXT:    nop
+; NOVSX-NEXT:    extsw r3, r3
+; NOVSX-NEXT:    neg r3, r3
+; NOVSX-NEXT:    rldicl r3, r3, 1, 63
+; NOVSX-NEXT:    xori r3, r3, 1
+; NOVSX-NEXT:    addi r1, r1, 32
+; NOVSX-NEXT:    ld r0, 16(r1)
+; NOVSX-NEXT:    mtlr r0
+; NOVSX-NEXT:    blr
+  %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"ule", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+define i32 @fcmp_ugt_f128(fp128 %a, fp128 %b) #0 {
+; P8-LABEL: fcmp_ugt_f128:
+; P8:       # %bb.0:
+; P8-NEXT:    mflr r0
+; P8-NEXT:    std r0, 16(r1)
+; P8-NEXT:    stdu r1, -112(r1)
+; P8-NEXT:    bl __lekf2
+; P8-NEXT:    nop
+; P8-NEXT:    extsw r3, r3
+; P8-NEXT:    neg r3, r3
+; P8-NEXT:    rldicl r3, r3, 1, 63
+; P8-NEXT:    addi r1, r1, 112
+; P8-NEXT:    ld r0, 16(r1)
+; P8-NEXT:    mtlr r0
+; P8-NEXT:    blr
+;
+; P9-LABEL: fcmp_ugt_f128:
+; P9:       # %bb.0:
+; P9-NEXT:    xscmpuqp cr0, v2, v3
+; P9-NEXT:    li r3, 1
+; P9-NEXT:    crnor 4*cr5+lt, gt, un
+; P9-NEXT:    isel r3, 0, r3, 4*cr5+lt
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: fcmp_ugt_f128:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    mflr r0
+; NOVSX-NEXT:    std r0, 16(r1)
+; NOVSX-NEXT:    stdu r1, -32(r1)
+; NOVSX-NEXT:    bl __lekf2
+; NOVSX-NEXT:    nop
+; NOVSX-NEXT:    extsw r3, r3
+; NOVSX-NEXT:    neg r3, r3
+; NOVSX-NEXT:    rldicl r3, r3, 1, 63
+; NOVSX-NEXT:    addi r1, r1, 32
+; NOVSX-NEXT:    ld r0, 16(r1)
+; NOVSX-NEXT:    mtlr r0
+; NOVSX-NEXT:    blr
+  %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"ugt", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+define i32 @fcmp_uge_f128(fp128 %a, fp128 %b) #0 {
+; P8-LABEL: fcmp_uge_f128:
+; P8:       # %bb.0:
+; P8-NEXT:    mflr r0
+; P8-NEXT:    std r0, 16(r1)
+; P8-NEXT:    stdu r1, -112(r1)
+; P8-NEXT:    bl __ltkf2
+; P8-NEXT:    nop
+; P8-NEXT:    not r3, r3
+; P8-NEXT:    srwi r3, r3, 31
+; P8-NEXT:    addi r1, r1, 112
+; P8-NEXT:    ld r0, 16(r1)
+; P8-NEXT:    mtlr r0
+; P8-NEXT:    blr
+;
+; P9-LABEL: fcmp_uge_f128:
+; P9:       # %bb.0:
+; P9-NEXT:    xscmpuqp cr0, v2, v3
+; P9-NEXT:    li r3, 1
+; P9-NEXT:    isellt r3, 0, r3
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: fcmp_uge_f128:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    mflr r0
+; NOVSX-NEXT:    std r0, 16(r1)
+; NOVSX-NEXT:    stdu r1, -32(r1)
+; NOVSX-NEXT:    bl __ltkf2
+; NOVSX-NEXT:    nop
+; NOVSX-NEXT:    not r3, r3
+; NOVSX-NEXT:    srwi r3, r3, 31
+; NOVSX-NEXT:    addi r1, r1, 32
+; NOVSX-NEXT:    ld r0, 16(r1)
+; NOVSX-NEXT:    mtlr r0
+; NOVSX-NEXT:    blr
+  %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"uge", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+define i32 @fcmp_ueq_f128(fp128 %a, fp128 %b) #0 {
+; P8-LABEL: fcmp_ueq_f128:
+; P8:       # %bb.0:
+; P8-NEXT:    mflr r0
+; P8-NEXT:    std r0, 16(r1)
+; P8-NEXT:    stdu r1, -160(r1)
+; P8-NEXT:    std r26, 112(r1) # 8-byte Folded Spill
+; P8-NEXT:    std r27, 120(r1) # 8-byte Folded Spill
+; P8-NEXT:    std r28, 128(r1) # 8-byte Folded Spill
+; P8-NEXT:    std r29, 136(r1) # 8-byte Folded Spill
+; P8-NEXT:    mr r29, r5
+; P8-NEXT:    mr r28, r4
+; P8-NEXT:    mr r27, r3
+; P8-NEXT:    std r30, 144(r1) # 8-byte Folded Spill
+; P8-NEXT:    mr r30, r6
+; P8-NEXT:    bl __eqkf2
+; P8-NEXT:    nop
+; P8-NEXT:    cntlzw r3, r3
+; P8-NEXT:    mr r4, r28
+; P8-NEXT:    mr r5, r29
+; P8-NEXT:    mr r6, r30
+; P8-NEXT:    srwi r26, r3, 5
+; P8-NEXT:    mr r3, r27
+; P8-NEXT:    bl __unordkf2
+; P8-NEXT:    nop
+; P8-NEXT:    cntlzw r3, r3
+; P8-NEXT:    ld r30, 144(r1) # 8-byte Folded Reload
+; P8-NEXT:    ld r29, 136(r1) # 8-byte Folded Reload
+; P8-NEXT:    ld r28, 128(r1) # 8-byte Folded Reload
+; P8-NEXT:    ld r27, 120(r1) # 8-byte Folded Reload
+; P8-NEXT:    srwi r3, r3, 5
+; P8-NEXT:    xori r3, r3, 1
+; P8-NEXT:    or r3, r3, r26
+; P8-NEXT:    ld r26, 112(r1) # 8-byte Folded Reload
+; P8-NEXT:    addi r1, r1, 160
+; P8-NEXT:    ld r0, 16(r1)
+; P8-NEXT:    mtlr r0
+; P8-NEXT:    blr
+;
+; P9-LABEL: fcmp_ueq_f128:
+; P9:       # %bb.0:
+; P9-NEXT:    xscmpuqp cr0, v2, v3
+; P9-NEXT:    li r3, 1
+; P9-NEXT:    crnor 4*cr5+lt, eq, un
+; P9-NEXT:    isel r3, 0, r3, 4*cr5+lt
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: fcmp_ueq_f128:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    mflr r0
+; NOVSX-NEXT:    std r26, -48(r1) # 8-byte Folded Spill
+; NOVSX-NEXT:    std r27, -40(r1) # 8-byte Folded Spill
+; NOVSX-NEXT:    std r28, -32(r1) # 8-byte Folded Spill
+; NOVSX-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
+; NOVSX-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; NOVSX-NEXT:    std r0, 16(r1)
+; NOVSX-NEXT:    stdu r1, -80(r1)
+; NOVSX-NEXT:    mr r30, r6
+; NOVSX-NEXT:    mr r29, r5
+; NOVSX-NEXT:    mr r28, r4
+; NOVSX-NEXT:    mr r27, r3
+; NOVSX-NEXT:    bl __eqkf2
+; NOVSX-NEXT:    nop
+; NOVSX-NEXT:    cntlzw r3, r3
+; NOVSX-NEXT:    mr r4, r28
+; NOVSX-NEXT:    mr r5, r29
+; NOVSX-NEXT:    mr r6, r30
+; NOVSX-NEXT:    srwi r26, r3, 5
+; NOVSX-NEXT:    mr r3, r27
+; NOVSX-NEXT:    bl __unordkf2
+; NOVSX-NEXT:    nop
+; NOVSX-NEXT:    cntlzw r3, r3
+; NOVSX-NEXT:    srwi r3, r3, 5
+; NOVSX-NEXT:    xori r3, r3, 1
+; NOVSX-NEXT:    or r3, r3, r26
+; NOVSX-NEXT:    addi r1, r1, 80
+; NOVSX-NEXT:    ld r0, 16(r1)
+; NOVSX-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; NOVSX-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
+; NOVSX-NEXT:    ld r28, -32(r1) # 8-byte Folded Reload
+; NOVSX-NEXT:    ld r27, -40(r1) # 8-byte Folded Reload
+; NOVSX-NEXT:    ld r26, -48(r1) # 8-byte Folded Reload
+; NOVSX-NEXT:    mtlr r0
+; NOVSX-NEXT:    blr
+  %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"ueq", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+define i32 @fcmp_une_f128(fp128 %a, fp128 %b) #0 {
+; P8-LABEL: fcmp_une_f128:
+; P8:       # %bb.0:
+; P8-NEXT:    mflr r0
+; P8-NEXT:    std r0, 16(r1)
+; P8-NEXT:    stdu r1, -112(r1)
+; P8-NEXT:    bl __nekf2
+; P8-NEXT:    nop
+; P8-NEXT:    cntlzw r3, r3
+; P8-NEXT:    srwi r3, r3, 5
+; P8-NEXT:    xori r3, r3, 1
+; P8-NEXT:    addi r1, r1, 112
+; P8-NEXT:    ld r0, 16(r1)
+; P8-NEXT:    mtlr r0
+; P8-NEXT:    blr
+;
+; P9-LABEL: fcmp_une_f128:
+; P9:       # %bb.0:
+; P9-NEXT:    xscmpuqp cr0, v2, v3
+; P9-NEXT:    li r3, 1
+; P9-NEXT:    iseleq r3, 0, r3
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: fcmp_une_f128:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    mflr r0
+; NOVSX-NEXT:    std r0, 16(r1)
+; NOVSX-NEXT:    stdu r1, -32(r1)
+; NOVSX-NEXT:    bl __nekf2
+; NOVSX-NEXT:    nop
+; NOVSX-NEXT:    cntlzw r3, r3
+; NOVSX-NEXT:    srwi r3, r3, 5
+; NOVSX-NEXT:    xori r3, r3, 1
+; NOVSX-NEXT:    addi r1, r1, 32
+; NOVSX-NEXT:    ld r0, 16(r1)
+; NOVSX-NEXT:    mtlr r0
+; NOVSX-NEXT:    blr
+  %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"une", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+define i32 @fcmps_olt_f128(fp128 %a, fp128 %b) #0 {
+; P8-LABEL: fcmps_olt_f128:
+; P8:       # %bb.0:
+; P8-NEXT:    mflr r0
+; P8-NEXT:    std r0, 16(r1)
+; P8-NEXT:    stdu r1, -112(r1)
+; P8-NEXT:    bl __ltkf2
+; P8-NEXT:    nop
+; P8-NEXT:    srwi r3, r3, 31
+; P8-NEXT:    addi r1, r1, 112
+; P8-NEXT:    ld r0, 16(r1)
+; P8-NEXT:    mtlr r0
+; P8-NEXT:    blr
+;
+; P9-LABEL: fcmps_olt_f128:
+; P9:       # %bb.0:
+; P9-NEXT:    xscmpoqp cr0, v2, v3
+; P9-NEXT:    li r3, 0
+; P9-NEXT:    li r4, 1
+; P9-NEXT:    isellt r3, r4, r3
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: fcmps_olt_f128:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    mflr r0
+; NOVSX-NEXT:    std r0, 16(r1)
+; NOVSX-NEXT:    stdu r1, -32(r1)
+; NOVSX-NEXT:    bl __ltkf2
+; NOVSX-NEXT:    nop
+; NOVSX-NEXT:    srwi r3, r3, 31
+; NOVSX-NEXT:    addi r1, r1, 32
+; NOVSX-NEXT:    ld r0, 16(r1)
+; NOVSX-NEXT:    mtlr r0
+; NOVSX-NEXT:    blr
+  %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"olt", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+define i32 @fcmps_ole_f128(fp128 %a, fp128 %b) #0 {
+; P8-LABEL: fcmps_ole_f128:
+; P8:       # %bb.0:
+; P8-NEXT:    mflr r0
+; P8-NEXT:    std r0, 16(r1)
+; P8-NEXT:    stdu r1, -112(r1)
+; P8-NEXT:    bl __lekf2
+; P8-NEXT:    nop
+; P8-NEXT:    extsw r3, r3
+; P8-NEXT:    neg r3, r3
+; P8-NEXT:    rldicl r3, r3, 1, 63
+; P8-NEXT:    xori r3, r3, 1
+; P8-NEXT:    addi r1, r1, 112
+; P8-NEXT:    ld r0, 16(r1)
+; P8-NEXT:    mtlr r0
+; P8-NEXT:    blr
+;
+; P9-LABEL: fcmps_ole_f128:
+; P9:       # %bb.0:
+; P9-NEXT:    xscmpoqp cr0, v2, v3
+; P9-NEXT:    xscmpoqp cr1, v2, v3
+; P9-NEXT:    li r3, 1
+; P9-NEXT:    crnor 4*cr5+lt, un, 4*cr1+un
+; P9-NEXT:    crnor 4*cr5+gt, gt, 4*cr1+gt
+; P9-NEXT:    crnand 4*cr5+lt, 4*cr5+gt, 4*cr5+lt
+; P9-NEXT:    isel r3, 0, r3, 4*cr5+lt
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: fcmps_ole_f128:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    mflr r0
+; NOVSX-NEXT:    std r0, 16(r1)
+; NOVSX-NEXT:    stdu r1, -32(r1)
+; NOVSX-NEXT:    bl __lekf2
+; NOVSX-NEXT:    nop
+; NOVSX-NEXT:    extsw r3, r3
+; NOVSX-NEXT:    neg r3, r3
+; NOVSX-NEXT:    rldicl r3, r3, 1, 63
+; NOVSX-NEXT:    xori r3, r3, 1
+; NOVSX-NEXT:    addi r1, r1, 32
+; NOVSX-NEXT:    ld r0, 16(r1)
+; NOVSX-NEXT:    mtlr r0
+; NOVSX-NEXT:    blr
+  %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"ole", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+define i32 @fcmps_ogt_f128(fp128 %a, fp128 %b) #0 {
+; P8-LABEL: fcmps_ogt_f128:
+; P8:       # %bb.0:
+; P8-NEXT:    mflr r0
+; P8-NEXT:    std r0, 16(r1)
+; P8-NEXT:    stdu r1, -112(r1)
+; P8-NEXT:    bl __gtkf2
+; P8-NEXT:    nop
+; P8-NEXT:    extsw r3, r3
+; P8-NEXT:    neg r3, r3
+; P8-NEXT:    rldicl r3, r3, 1, 63
+; P8-NEXT:    addi r1, r1, 112
+; P8-NEXT:    ld r0, 16(r1)
+; P8-NEXT:    mtlr r0
+; P8-NEXT:    blr
+;
+; P9-LABEL: fcmps_ogt_f128:
+; P9:       # %bb.0:
+; P9-NEXT:    xscmpoqp cr0, v2, v3
+; P9-NEXT:    li r3, 0
+; P9-NEXT:    li r4, 1
+; P9-NEXT:    iselgt r3, r4, r3
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: fcmps_ogt_f128:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    mflr r0
+; NOVSX-NEXT:    std r0, 16(r1)
+; NOVSX-NEXT:    stdu r1, -32(r1)
+; NOVSX-NEXT:    bl __gtkf2
+; NOVSX-NEXT:    nop
+; NOVSX-NEXT:    extsw r3, r3
+; NOVSX-NEXT:    neg r3, r3
+; NOVSX-NEXT:    rldicl r3, r3, 1, 63
+; NOVSX-NEXT:    addi r1, r1, 32
+; NOVSX-NEXT:    ld r0, 16(r1)
+; NOVSX-NEXT:    mtlr r0
+; NOVSX-NEXT:    blr
+  %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"ogt", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+define i32 @fcmps_oge_f128(fp128 %a, fp128 %b) #0 {
+; P8-LABEL: fcmps_oge_f128:
+; P8:       # %bb.0:
+; P8-NEXT:    mflr r0
+; P8-NEXT:    std r0, 16(r1)
+; P8-NEXT:    stdu r1, -112(r1)
+; P8-NEXT:    bl __gekf2
+; P8-NEXT:    nop
+; P8-NEXT:    not r3, r3
+; P8-NEXT:    srwi r3, r3, 31
+; P8-NEXT:    addi r1, r1, 112
+; P8-NEXT:    ld r0, 16(r1)
+; P8-NEXT:    mtlr r0
+; P8-NEXT:    blr
+;
+; P9-LABEL: fcmps_oge_f128:
+; P9:       # %bb.0:
+; P9-NEXT:    xscmpoqp cr0, v2, v3
+; P9-NEXT:    xscmpoqp cr1, v2, v3
+; P9-NEXT:    li r3, 1
+; P9-NEXT:    crnor 4*cr5+lt, un, 4*cr1+un
+; P9-NEXT:    crnor 4*cr5+gt, lt, 4*cr1+lt
+; P9-NEXT:    crnand 4*cr5+lt, 4*cr5+gt, 4*cr5+lt
+; P9-NEXT:    isel r3, 0, r3, 4*cr5+lt
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: fcmps_oge_f128:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    mflr r0
+; NOVSX-NEXT:    std r0, 16(r1)
+; NOVSX-NEXT:    stdu r1, -32(r1)
+; NOVSX-NEXT:    bl __gekf2
+; NOVSX-NEXT:    nop
+; NOVSX-NEXT:    not r3, r3
+; NOVSX-NEXT:    srwi r3, r3, 31
+; NOVSX-NEXT:    addi r1, r1, 32
+; NOVSX-NEXT:    ld r0, 16(r1)
+; NOVSX-NEXT:    mtlr r0
+; NOVSX-NEXT:    blr
+  %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"oge", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+define i32 @fcmps_oeq_f128(fp128 %a, fp128 %b) #0 {
+; P8-LABEL: fcmps_oeq_f128:
+; P8:       # %bb.0:
+; P8-NEXT:    mflr r0
+; P8-NEXT:    std r0, 16(r1)
+; P8-NEXT:    stdu r1, -112(r1)
+; P8-NEXT:    bl __eqkf2
+; P8-NEXT:    nop
+; P8-NEXT:    cntlzw r3, r3
+; P8-NEXT:    srwi r3, r3, 5
+; P8-NEXT:    addi r1, r1, 112
+; P8-NEXT:    ld r0, 16(r1)
+; P8-NEXT:    mtlr r0
+; P8-NEXT:    blr
+;
+; P9-LABEL: fcmps_oeq_f128:
+; P9:       # %bb.0:
+; P9-NEXT:    xscmpoqp cr0, v2, v3
+; P9-NEXT:    li r3, 0
+; P9-NEXT:    li r4, 1
+; P9-NEXT:    iseleq r3, r4, r3
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: fcmps_oeq_f128:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    mflr r0
+; NOVSX-NEXT:    std r0, 16(r1)
+; NOVSX-NEXT:    stdu r1, -32(r1)
+; NOVSX-NEXT:    bl __eqkf2
+; NOVSX-NEXT:    nop
+; NOVSX-NEXT:    cntlzw r3, r3
+; NOVSX-NEXT:    srwi r3, r3, 5
+; NOVSX-NEXT:    addi r1, r1, 32
+; NOVSX-NEXT:    ld r0, 16(r1)
+; NOVSX-NEXT:    mtlr r0
+; NOVSX-NEXT:    blr
+  %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"oeq", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+define i32 @fcmps_one_f128(fp128 %a, fp128 %b) #0 {
+; P8-LABEL: fcmps_one_f128:
+; P8:       # %bb.0:
+; P8-NEXT:    mflr r0
+; P8-NEXT:    std r0, 16(r1)
+; P8-NEXT:    stdu r1, -160(r1)
+; P8-NEXT:    std r26, 112(r1) # 8-byte Folded Spill
+; P8-NEXT:    std r27, 120(r1) # 8-byte Folded Spill
+; P8-NEXT:    std r28, 128(r1) # 8-byte Folded Spill
+; P8-NEXT:    std r29, 136(r1) # 8-byte Folded Spill
+; P8-NEXT:    mr r29, r5
+; P8-NEXT:    mr r28, r4
+; P8-NEXT:    mr r27, r3
+; P8-NEXT:    std r30, 144(r1) # 8-byte Folded Spill
+; P8-NEXT:    mr r30, r6
+; P8-NEXT:    bl __unordkf2
+; P8-NEXT:    nop
+; P8-NEXT:    cntlzw r3, r3
+; P8-NEXT:    mr r4, r28
+; P8-NEXT:    mr r5, r29
+; P8-NEXT:    mr r6, r30
+; P8-NEXT:    srwi r26, r3, 5
+; P8-NEXT:    mr r3, r27
+; P8-NEXT:    bl __eqkf2
+; P8-NEXT:    nop
+; P8-NEXT:    cntlzw r3, r3
+; P8-NEXT:    ld r30, 144(r1) # 8-byte Folded Reload
+; P8-NEXT:    ld r29, 136(r1) # 8-byte Folded Reload
+; P8-NEXT:    ld r28, 128(r1) # 8-byte Folded Reload
+; P8-NEXT:    ld r27, 120(r1) # 8-byte Folded Reload
+; P8-NEXT:    srwi r3, r3, 5
+; P8-NEXT:    xori r3, r3, 1
+; P8-NEXT:    and r3, r26, r3
+; P8-NEXT:    ld r26, 112(r1) # 8-byte Folded Reload
+; P8-NEXT:    addi r1, r1, 160
+; P8-NEXT:    ld r0, 16(r1)
+; P8-NEXT:    mtlr r0
+; P8-NEXT:    blr
+;
+; P9-LABEL: fcmps_one_f128:
+; P9:       # %bb.0:
+; P9-NEXT:    xscmpoqp cr0, v2, v3
+; P9-NEXT:    xscmpoqp cr1, v2, v3
+; P9-NEXT:    li r3, 1
+; P9-NEXT:    crnor 4*cr5+lt, un, 4*cr1+un
+; P9-NEXT:    crnor 4*cr5+gt, eq, 4*cr1+eq
+; P9-NEXT:    crnand 4*cr5+lt, 4*cr5+gt, 4*cr5+lt
+; P9-NEXT:    isel r3, 0, r3, 4*cr5+lt
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: fcmps_one_f128:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    mflr r0
+; NOVSX-NEXT:    std r26, -48(r1) # 8-byte Folded Spill
+; NOVSX-NEXT:    std r27, -40(r1) # 8-byte Folded Spill
+; NOVSX-NEXT:    std r28, -32(r1) # 8-byte Folded Spill
+; NOVSX-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
+; NOVSX-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; NOVSX-NEXT:    std r0, 16(r1)
+; NOVSX-NEXT:    stdu r1, -80(r1)
+; NOVSX-NEXT:    mr r30, r6
+; NOVSX-NEXT:    mr r29, r5
+; NOVSX-NEXT:    mr r28, r4
+; NOVSX-NEXT:    mr r27, r3
+; NOVSX-NEXT:    bl __unordkf2
+; NOVSX-NEXT:    nop
+; NOVSX-NEXT:    cntlzw r3, r3
+; NOVSX-NEXT:    mr r4, r28
+; NOVSX-NEXT:    mr r5, r29
+; NOVSX-NEXT:    mr r6, r30
+; NOVSX-NEXT:    srwi r26, r3, 5
+; NOVSX-NEXT:    mr r3, r27
+; NOVSX-NEXT:    bl __eqkf2
+; NOVSX-NEXT:    nop
+; NOVSX-NEXT:    cntlzw r3, r3
+; NOVSX-NEXT:    srwi r3, r3, 5
+; NOVSX-NEXT:    xori r3, r3, 1
+; NOVSX-NEXT:    and r3, r26, r3
+; NOVSX-NEXT:    addi r1, r1, 80
+; NOVSX-NEXT:    ld r0, 16(r1)
+; NOVSX-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; NOVSX-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
+; NOVSX-NEXT:    ld r28, -32(r1) # 8-byte Folded Reload
+; NOVSX-NEXT:    ld r27, -40(r1) # 8-byte Folded Reload
+; NOVSX-NEXT:    ld r26, -48(r1) # 8-byte Folded Reload
+; NOVSX-NEXT:    mtlr r0
+; NOVSX-NEXT:    blr
+  %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"one", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+define i32 @fcmps_ult_f128(fp128 %a, fp128 %b) #0 {
+; P8-LABEL: fcmps_ult_f128:
+; P8:       # %bb.0:
+; P8-NEXT:    mflr r0
+; P8-NEXT:    std r0, 16(r1)
+; P8-NEXT:    stdu r1, -112(r1)
+; P8-NEXT:    bl __gekf2
+; P8-NEXT:    nop
+; P8-NEXT:    srwi r3, r3, 31
+; P8-NEXT:    addi r1, r1, 112
+; P8-NEXT:    ld r0, 16(r1)
+; P8-NEXT:    mtlr r0
+; P8-NEXT:    blr
+;
+; P9-LABEL: fcmps_ult_f128:
+; P9:       # %bb.0:
+; P9-NEXT:    xscmpoqp cr0, v2, v3
+; P9-NEXT:    li r3, 1
+; P9-NEXT:    crnor 4*cr5+lt, lt, un
+; P9-NEXT:    isel r3, 0, r3, 4*cr5+lt
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: fcmps_ult_f128:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    mflr r0
+; NOVSX-NEXT:    std r0, 16(r1)
+; NOVSX-NEXT:    stdu r1, -32(r1)
+; NOVSX-NEXT:    bl __gekf2
+; NOVSX-NEXT:    nop
+; NOVSX-NEXT:    srwi r3, r3, 31
+; NOVSX-NEXT:    addi r1, r1, 32
+; NOVSX-NEXT:    ld r0, 16(r1)
+; NOVSX-NEXT:    mtlr r0
+; NOVSX-NEXT:    blr
+  %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"ult", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+define i32 @fcmps_ule_f128(fp128 %a, fp128 %b) #0 {
+; P8-LABEL: fcmps_ule_f128:
+; P8:       # %bb.0:
+; P8-NEXT:    mflr r0
+; P8-NEXT:    std r0, 16(r1)
+; P8-NEXT:    stdu r1, -112(r1)
+; P8-NEXT:    bl __gtkf2
+; P8-NEXT:    nop
+; P8-NEXT:    extsw r3, r3
+; P8-NEXT:    neg r3, r3
+; P8-NEXT:    rldicl r3, r3, 1, 63
+; P8-NEXT:    xori r3, r3, 1
+; P8-NEXT:    addi r1, r1, 112
+; P8-NEXT:    ld r0, 16(r1)
+; P8-NEXT:    mtlr r0
+; P8-NEXT:    blr
+;
+; P9-LABEL: fcmps_ule_f128:
+; P9:       # %bb.0:
+; P9-NEXT:    xscmpoqp cr0, v2, v3
+; P9-NEXT:    li r3, 1
+; P9-NEXT:    iselgt r3, 0, r3
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: fcmps_ule_f128:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    mflr r0
+; NOVSX-NEXT:    std r0, 16(r1)
+; NOVSX-NEXT:    stdu r1, -32(r1)
+; NOVSX-NEXT:    bl __gtkf2
+; NOVSX-NEXT:    nop
+; NOVSX-NEXT:    extsw r3, r3
+; NOVSX-NEXT:    neg r3, r3
+; NOVSX-NEXT:    rldicl r3, r3, 1, 63
+; NOVSX-NEXT:    xori r3, r3, 1
+; NOVSX-NEXT:    addi r1, r1, 32
+; NOVSX-NEXT:    ld r0, 16(r1)
+; NOVSX-NEXT:    mtlr r0
+; NOVSX-NEXT:    blr
+  %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"ule", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+define i32 @fcmps_ugt_f128(fp128 %a, fp128 %b) #0 {
+; P8-LABEL: fcmps_ugt_f128:
+; P8:       # %bb.0:
+; P8-NEXT:    mflr r0
+; P8-NEXT:    std r0, 16(r1)
+; P8-NEXT:    stdu r1, -112(r1)
+; P8-NEXT:    bl __lekf2
+; P8-NEXT:    nop
+; P8-NEXT:    extsw r3, r3
+; P8-NEXT:    neg r3, r3
+; P8-NEXT:    rldicl r3, r3, 1, 63
+; P8-NEXT:    addi r1, r1, 112
+; P8-NEXT:    ld r0, 16(r1)
+; P8-NEXT:    mtlr r0
+; P8-NEXT:    blr
+;
+; P9-LABEL: fcmps_ugt_f128:
+; P9:       # %bb.0:
+; P9-NEXT:    xscmpoqp cr0, v2, v3
+; P9-NEXT:    li r3, 1
+; P9-NEXT:    crnor 4*cr5+lt, gt, un
+; P9-NEXT:    isel r3, 0, r3, 4*cr5+lt
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: fcmps_ugt_f128:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    mflr r0
+; NOVSX-NEXT:    std r0, 16(r1)
+; NOVSX-NEXT:    stdu r1, -32(r1)
+; NOVSX-NEXT:    bl __lekf2
+; NOVSX-NEXT:    nop
+; NOVSX-NEXT:    extsw r3, r3
+; NOVSX-NEXT:    neg r3, r3
+; NOVSX-NEXT:    rldicl r3, r3, 1, 63
+; NOVSX-NEXT:    addi r1, r1, 32
+; NOVSX-NEXT:    ld r0, 16(r1)
+; NOVSX-NEXT:    mtlr r0
+; NOVSX-NEXT:    blr
+  %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"ugt", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+define i32 @fcmps_uge_f128(fp128 %a, fp128 %b) #0 {
+; P8-LABEL: fcmps_uge_f128:
+; P8:       # %bb.0:
+; P8-NEXT:    mflr r0
+; P8-NEXT:    std r0, 16(r1)
+; P8-NEXT:    stdu r1, -112(r1)
+; P8-NEXT:    bl __ltkf2
+; P8-NEXT:    nop
+; P8-NEXT:    not r3, r3
+; P8-NEXT:    srwi r3, r3, 31
+; P8-NEXT:    addi r1, r1, 112
+; P8-NEXT:    ld r0, 16(r1)
+; P8-NEXT:    mtlr r0
+; P8-NEXT:    blr
+;
+; P9-LABEL: fcmps_uge_f128:
+; P9:       # %bb.0:
+; P9-NEXT:    xscmpoqp cr0, v2, v3
+; P9-NEXT:    li r3, 1
+; P9-NEXT:    isellt r3, 0, r3
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: fcmps_uge_f128:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    mflr r0
+; NOVSX-NEXT:    std r0, 16(r1)
+; NOVSX-NEXT:    stdu r1, -32(r1)
+; NOVSX-NEXT:    bl __ltkf2
+; NOVSX-NEXT:    nop
+; NOVSX-NEXT:    not r3, r3
+; NOVSX-NEXT:    srwi r3, r3, 31
+; NOVSX-NEXT:    addi r1, r1, 32
+; NOVSX-NEXT:    ld r0, 16(r1)
+; NOVSX-NEXT:    mtlr r0
+; NOVSX-NEXT:    blr
+  %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"uge", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+define i32 @fcmps_ueq_f128(fp128 %a, fp128 %b) #0 {
+; P8-LABEL: fcmps_ueq_f128:
+; P8:       # %bb.0:
+; P8-NEXT:    mflr r0
+; P8-NEXT:    std r0, 16(r1)
+; P8-NEXT:    stdu r1, -160(r1)
+; P8-NEXT:    std r26, 112(r1) # 8-byte Folded Spill
+; P8-NEXT:    std r27, 120(r1) # 8-byte Folded Spill
+; P8-NEXT:    std r28, 128(r1) # 8-byte Folded Spill
+; P8-NEXT:    std r29, 136(r1) # 8-byte Folded Spill
+; P8-NEXT:    mr r29, r5
+; P8-NEXT:    mr r28, r4
+; P8-NEXT:    mr r27, r3
+; P8-NEXT:    std r30, 144(r1) # 8-byte Folded Spill
+; P8-NEXT:    mr r30, r6
+; P8-NEXT:    bl __eqkf2
+; P8-NEXT:    nop
+; P8-NEXT:    cntlzw r3, r3
+; P8-NEXT:    mr r4, r28
+; P8-NEXT:    mr r5, r29
+; P8-NEXT:    mr r6, r30
+; P8-NEXT:    srwi r26, r3, 5
+; P8-NEXT:    mr r3, r27
+; P8-NEXT:    bl __unordkf2
+; P8-NEXT:    nop
+; P8-NEXT:    cntlzw r3, r3
+; P8-NEXT:    ld r30, 144(r1) # 8-byte Folded Reload
+; P8-NEXT:    ld r29, 136(r1) # 8-byte Folded Reload
+; P8-NEXT:    ld r28, 128(r1) # 8-byte Folded Reload
+; P8-NEXT:    ld r27, 120(r1) # 8-byte Folded Reload
+; P8-NEXT:    srwi r3, r3, 5
+; P8-NEXT:    xori r3, r3, 1
+; P8-NEXT:    or r3, r3, r26
+; P8-NEXT:    ld r26, 112(r1) # 8-byte Folded Reload
+; P8-NEXT:    addi r1, r1, 160
+; P8-NEXT:    ld r0, 16(r1)
+; P8-NEXT:    mtlr r0
+; P8-NEXT:    blr
+;
+; P9-LABEL: fcmps_ueq_f128:
+; P9:       # %bb.0:
+; P9-NEXT:    xscmpoqp cr0, v2, v3
+; P9-NEXT:    li r3, 1
+; P9-NEXT:    crnor 4*cr5+lt, eq, un
+; P9-NEXT:    isel r3, 0, r3, 4*cr5+lt
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: fcmps_ueq_f128:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    mflr r0
+; NOVSX-NEXT:    std r26, -48(r1) # 8-byte Folded Spill
+; NOVSX-NEXT:    std r27, -40(r1) # 8-byte Folded Spill
+; NOVSX-NEXT:    std r28, -32(r1) # 8-byte Folded Spill
+; NOVSX-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
+; NOVSX-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
+; NOVSX-NEXT:    std r0, 16(r1)
+; NOVSX-NEXT:    stdu r1, -80(r1)
+; NOVSX-NEXT:    mr r30, r6
+; NOVSX-NEXT:    mr r29, r5
+; NOVSX-NEXT:    mr r28, r4
+; NOVSX-NEXT:    mr r27, r3
+; NOVSX-NEXT:    bl __eqkf2
+; NOVSX-NEXT:    nop
+; NOVSX-NEXT:    cntlzw r3, r3
+; NOVSX-NEXT:    mr r4, r28
+; NOVSX-NEXT:    mr r5, r29
+; NOVSX-NEXT:    mr r6, r30
+; NOVSX-NEXT:    srwi r26, r3, 5
+; NOVSX-NEXT:    mr r3, r27
+; NOVSX-NEXT:    bl __unordkf2
+; NOVSX-NEXT:    nop
+; NOVSX-NEXT:    cntlzw r3, r3
+; NOVSX-NEXT:    srwi r3, r3, 5
+; NOVSX-NEXT:    xori r3, r3, 1
+; NOVSX-NEXT:    or r3, r3, r26
+; NOVSX-NEXT:    addi r1, r1, 80
+; NOVSX-NEXT:    ld r0, 16(r1)
+; NOVSX-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; NOVSX-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
+; NOVSX-NEXT:    ld r28, -32(r1) # 8-byte Folded Reload
+; NOVSX-NEXT:    ld r27, -40(r1) # 8-byte Folded Reload
+; NOVSX-NEXT:    ld r26, -48(r1) # 8-byte Folded Reload
+; NOVSX-NEXT:    mtlr r0
+; NOVSX-NEXT:    blr
+  %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"ueq", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+define i32 @fcmps_une_f128(fp128 %a, fp128 %b) #0 {
+; P8-LABEL: fcmps_une_f128:
+; P8:       # %bb.0:
+; P8-NEXT:    mflr r0
+; P8-NEXT:    std r0, 16(r1)
+; P8-NEXT:    stdu r1, -112(r1)
+; P8-NEXT:    bl __nekf2
+; P8-NEXT:    nop
+; P8-NEXT:    cntlzw r3, r3
+; P8-NEXT:    srwi r3, r3, 5
+; P8-NEXT:    xori r3, r3, 1
+; P8-NEXT:    addi r1, r1, 112
+; P8-NEXT:    ld r0, 16(r1)
+; P8-NEXT:    mtlr r0
+; P8-NEXT:    blr
+;
+; P9-LABEL: fcmps_une_f128:
+; P9:       # %bb.0:
+; P9-NEXT:    xscmpoqp cr0, v2, v3
+; P9-NEXT:    li r3, 1
+; P9-NEXT:    iseleq r3, 0, r3
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: fcmps_une_f128:
+; NOVSX:       # %bb.0:
+; NOVSX-NEXT:    mflr r0
+; NOVSX-NEXT:    std r0, 16(r1)
+; NOVSX-NEXT:    stdu r1, -32(r1)
+; NOVSX-NEXT:    bl __nekf2
+; NOVSX-NEXT:    nop
+; NOVSX-NEXT:    cntlzw r3, r3
+; NOVSX-NEXT:    srwi r3, r3, 5
+; NOVSX-NEXT:    xori r3, r3, 1
+; NOVSX-NEXT:    addi r1, r1, 32
+; NOVSX-NEXT:    ld r0, 16(r1)
+; NOVSX-NEXT:    mtlr r0
+; NOVSX-NEXT:    blr
+  %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"une", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+attributes #0 = { strictfp nounwind }
+
+declare i1 @llvm.experimental.constrained.fcmp.f32(float, float, metadata, metadata)
+declare i1 @llvm.experimental.constrained.fcmp.f64(double, double, metadata, metadata)
+declare i1 @llvm.experimental.constrained.fcmps.f32(float, float, metadata, metadata)
+declare i1 @llvm.experimental.constrained.fcmps.f64(double, double, metadata, metadata)
+declare i1 @llvm.experimental.constrained.fcmps.f128(fp128, fp128, metadata, metadata)
+declare i1 @llvm.experimental.constrained.fcmp.f128(fp128, fp128, metadata, metadata)


        


More information about the llvm-commits mailing list