[llvm] 63b17eb - [RISCV] Add strictfp support for compares.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Tue Jan 11 20:35:03 PST 2022


Author: Craig Topper
Date: 2022-01-11T20:01:41-08:00
New Revision: 63b17eb9ec106540ee5cb91d285027512072a050

URL: https://github.com/llvm/llvm-project/commit/63b17eb9ec106540ee5cb91d285027512072a050
DIFF: https://github.com/llvm/llvm-project/commit/63b17eb9ec106540ee5cb91d285027512072a050.diff

LOG: [RISCV] Add strictfp support for compares.

This adds support for STRICT_FSETCC(quiet) and STRICT_FSETCCS(signaling).

FEQ matches well to STRICT_FSETCC oeq.
FLT/FLE matches well to STRICT_FSETCCS olt/ole.

Others require commuting operands or multiple instructions.

STRICT_FSETCC olt/ole/ogt/oge/ult/ule/ugt/uge uses FLT/FLE,
but we need to save/restore FFLAGS around them to avoid spurious
exceptions. I've implemented pseudo instructions with a
CustomInserter to insert the save/restore CSR instructions.
Unfortunately, this doesn't honor exceptions for signaling NANs
but I'm not sure if signaling nans are really supported by the
constrained intrinsics.

STRICT_FSETCC one and ueq expand to a pair of FLT instructions
with a save/restore of fflags around each. This could be improved
in the future.

There may be some opportunities to generate better code for strict
comparisons mixed with nonans fast math flags. I've left FIXMEs in
the .td files for that.

Co-Authored-by: ShihPo Hung <shihpo.hung at sifive.com>

Reviewed By: arcbbb

Differential Revision: https://reviews.llvm.org/D116694

Added: 
    llvm/test/CodeGen/RISCV/double-fcmp-strict.ll
    llvm/test/CodeGen/RISCV/float-fcmp-strict.ll
    llvm/test/CodeGen/RISCV/half-fcmp-strict.ll

Modified: 
    llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVInstrFormats.td
    llvm/lib/Target/RISCV/RISCVInstrInfo.td
    llvm/lib/Target/RISCV/RISCVInstrInfoD.td
    llvm/lib/Target/RISCV/RISCVInstrInfoF.td
    llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index 5dfb65ef131a9..54481b94fdd8d 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -3593,9 +3593,16 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
     if (Legalized) {
       // If we expanded the SETCC by swapping LHS and RHS, or by inverting the
       // condition code, create a new SETCC node.
-      if (Tmp3.getNode())
-        Tmp1 = DAG.getNode(ISD::SETCC, dl, Node->getValueType(0),
-                           Tmp1, Tmp2, Tmp3, Node->getFlags());
+      if (Tmp3.getNode()) {
+        if (IsStrict) {
+          Tmp1 = DAG.getNode(Node->getOpcode(), dl, Node->getVTList(),
+                             {Chain, Tmp1, Tmp2, Tmp3}, Node->getFlags());
+          Chain = Tmp1.getValue(1);
+        } else {
+          Tmp1 = DAG.getNode(Node->getOpcode(), dl, Node->getValueType(0), Tmp1,
+                             Tmp2, Tmp3, Node->getFlags());
+        }
+      }
 
       // If we expanded the SETCC by inverting the condition code, then wrap
       // the existing SETCC in a NOT to restore the intended condition.

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index f237cd93329a7..a185e59ef6d88 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -343,6 +343,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Legal);
     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal);
     setOperationAction(ISD::STRICT_FSQRT, MVT::f16, Legal);
+    setOperationAction(ISD::STRICT_FSETCC, MVT::f16, Legal);
+    setOperationAction(ISD::STRICT_FSETCCS, MVT::f16, Legal);
     for (auto CC : FPCCToExpand)
       setCondCodeAction(CC, MVT::f16, Expand);
     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
@@ -393,6 +395,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
     setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal);
     setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal);
     setOperationAction(ISD::STRICT_FSQRT, MVT::f32, Legal);
+    setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Legal);
+    setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Legal);
     for (auto CC : FPCCToExpand)
       setCondCodeAction(CC, MVT::f32, Expand);
     setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
@@ -426,6 +430,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
     setOperationAction(ISD::STRICT_FSQRT, MVT::f64, Legal);
+    setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Legal);
+    setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Legal);
     for (auto CC : FPCCToExpand)
       setCondCodeAction(CC, MVT::f64, Expand);
     setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
@@ -8070,6 +8076,42 @@ static bool isSelectPseudo(MachineInstr &MI) {
   }
 }
 
+static MachineBasicBlock *emitQuietFCMP(MachineInstr &MI, MachineBasicBlock *BB,
+                                        unsigned RelOpcode, unsigned EqOpcode,
+                                        const RISCVSubtarget &Subtarget) {
+  DebugLoc DL = MI.getDebugLoc();
+  Register DstReg = MI.getOperand(0).getReg();
+  Register Src1Reg = MI.getOperand(1).getReg();
+  Register Src2Reg = MI.getOperand(2).getReg();
+  MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
+  Register SavedFFlags = MRI.createVirtualRegister(&RISCV::GPRRegClass);
+  const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
+
+  // Save the current FFLAGS.
+  BuildMI(*BB, MI, DL, TII.get(RISCV::ReadFFLAGS), SavedFFlags);
+
+  auto MIB = BuildMI(*BB, MI, DL, TII.get(RelOpcode), DstReg)
+                 .addReg(Src1Reg)
+                 .addReg(Src2Reg);
+  if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
+    MIB->setFlag(MachineInstr::MIFlag::NoFPExcept);
+
+  // Restore the FFLAGS.
+  BuildMI(*BB, MI, DL, TII.get(RISCV::WriteFFLAGS))
+      .addReg(SavedFFlags, RegState::Kill);
+
+  // Issue a dummy FEQ opcode to raise exception for signaling NaNs.
+  auto MIB2 = BuildMI(*BB, MI, DL, TII.get(EqOpcode), RISCV::X0)
+                  .addReg(Src1Reg, getKillRegState(MI.getOperand(1).isKill()))
+                  .addReg(Src2Reg, getKillRegState(MI.getOperand(2).isKill()));
+  if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
+    MIB2->setFlag(MachineInstr::MIFlag::NoFPExcept);
+
+  // Erase the pseudoinstruction.
+  MI.eraseFromParent();
+  return BB;
+}
+
 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
                                            MachineBasicBlock *BB,
                                            const RISCVSubtarget &Subtarget) {
@@ -8211,6 +8253,18 @@ RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
     return emitBuildPairF64Pseudo(MI, BB);
   case RISCV::SplitF64Pseudo:
     return emitSplitF64Pseudo(MI, BB);
+  case RISCV::PseudoQuietFLE_H:
+    return emitQuietFCMP(MI, BB, RISCV::FLE_H, RISCV::FEQ_H, Subtarget);
+  case RISCV::PseudoQuietFLT_H:
+    return emitQuietFCMP(MI, BB, RISCV::FLT_H, RISCV::FEQ_H, Subtarget);
+  case RISCV::PseudoQuietFLE_S:
+    return emitQuietFCMP(MI, BB, RISCV::FLE_S, RISCV::FEQ_S, Subtarget);
+  case RISCV::PseudoQuietFLT_S:
+    return emitQuietFCMP(MI, BB, RISCV::FLT_S, RISCV::FEQ_S, Subtarget);
+  case RISCV::PseudoQuietFLE_D:
+    return emitQuietFCMP(MI, BB, RISCV::FLE_D, RISCV::FEQ_D, Subtarget);
+  case RISCV::PseudoQuietFLT_D:
+    return emitQuietFCMP(MI, BB, RISCV::FLT_D, RISCV::FEQ_D, Subtarget);
   }
 }
 

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrFormats.td b/llvm/lib/Target/RISCV/RISCVInstrFormats.td
index 6a16b6354f954..f99d0f56c4065 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrFormats.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrFormats.td
@@ -206,6 +206,13 @@ class Pseudo<dag outs, dag ins, list<dag> pattern, string opcodestr = "", string
   let isCodeGenOnly = 1;
 }
 
+class PseudoQuietFCMP<RegisterClass Ty>
+    : Pseudo<(outs GPR:$rd), (ins Ty:$rs1, Ty:$rs2), []> {
+  let hasSideEffects = 1;
+  let mayLoad = 0;
+  let mayStore = 0;
+}
+
 // Pseudo load instructions.
 class PseudoLoad<string opcodestr, RegisterClass rdty = GPR>
     : Pseudo<(outs rdty:$rd), (ins bare_symbol:$addr), [], opcodestr, "$rd, $addr"> {

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index 3ca9ad8c6ec5f..9a61ec2f51dbd 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -1369,6 +1369,10 @@ def ReadFRM : ReadSysReg<SysRegFRM, [FRM]>;
 def WriteFRM : WriteSysReg<SysRegFRM, [FRM]>;
 def WriteFRMImm : WriteSysRegImm<SysRegFRM, [FRM]>;
 
+let hasSideEffects = true in {
+def ReadFFLAGS : ReadSysReg<SysRegFFLAGS, [FFLAGS]>;
+def WriteFFLAGS : WriteSysReg<SysRegFFLAGS, [FFLAGS]>;
+}
 /// Other pseudo-instructions
 
 // Pessimistically assume the stack pointer will be clobbered

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
index b7601df013a64..e43ec89007455 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
@@ -158,6 +158,10 @@ def : InstAlias<"fge.d $rd, $rs, $rt",
 
 def PseudoFLD  : PseudoFloatLoad<"fld", FPR64>;
 def PseudoFSD  : PseudoStore<"fsd", FPR64>;
+let usesCustomInserter = 1 in {
+def PseudoQuietFLE_D : PseudoQuietFCMP<FPR64>;
+def PseudoQuietFLT_D : PseudoQuietFCMP<FPR64>;
+}
 } // Predicates = [HasStdExtD]
 
 //===----------------------------------------------------------------------===//
@@ -222,13 +226,34 @@ def : PatFpr64Fpr64<fminnum, FMIN_D>;
 def : PatFpr64Fpr64<fmaxnum, FMAX_D>;
 
 /// Setcc
-
-def : PatFpr64Fpr64<seteq, FEQ_D>;
-def : PatFpr64Fpr64<setoeq, FEQ_D>;
-def : PatFpr64Fpr64<setlt, FLT_D>;
-def : PatFpr64Fpr64<setolt, FLT_D>;
-def : PatFpr64Fpr64<setle, FLE_D>;
-def : PatFpr64Fpr64<setole, FLE_D>;
+// FIXME: SETEQ/SETLT/SETLE imply nonans, can we pick better instructions for
+// strict versions of those.
+
+// Match non-signaling FEQ_D
+def : PatSetCC<FPR64, any_fsetcc, SETEQ, FEQ_D>;
+def : PatSetCC<FPR64, any_fsetcc, SETOEQ, FEQ_D>;
+def : PatSetCC<FPR64, strict_fsetcc, SETLT, PseudoQuietFLT_D>;
+def : PatSetCC<FPR64, strict_fsetcc, SETOLT, PseudoQuietFLT_D>;
+def : PatSetCC<FPR64, strict_fsetcc, SETLE, PseudoQuietFLE_D>;
+def : PatSetCC<FPR64, strict_fsetcc, SETOLE, PseudoQuietFLE_D>;
+
+// Match signaling FEQ_D
+def : Pat<(strict_fsetccs FPR64:$rs1, FPR64:$rs2, SETEQ),
+          (AND (FLE_D $rs1, $rs2),
+               (FLE_D $rs2, $rs1))>;
+def : Pat<(strict_fsetccs FPR64:$rs1, FPR64:$rs2, SETOEQ),
+          (AND (FLE_D $rs1, $rs2),
+               (FLE_D $rs2, $rs1))>;
+// If both operands are the same, use a single FLE.
+def : Pat<(strict_fsetccs FPR64:$rs1, FPR64:$rs1, SETEQ),
+          (FLE_D $rs1, $rs1)>;
+def : Pat<(strict_fsetccs FPR64:$rs1, FPR64:$rs1, SETOEQ),
+          (FLE_D $rs1, $rs1)>;
+
+def : PatSetCC<FPR64, any_fsetccs, SETLT, FLT_D>;
+def : PatSetCC<FPR64, any_fsetccs, SETOLT, FLT_D>;
+def : PatSetCC<FPR64, any_fsetccs, SETLE, FLE_D>;
+def : PatSetCC<FPR64, any_fsetccs, SETOLE, FLE_D>;
 
 def Select_FPR64_Using_CC_GPR : SelectCC_rrirr<FPR64, GPR>;
 

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td
index e183095d9183f..ce70844f187c8 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td
@@ -309,6 +309,10 @@ def : MnemonicAlias<"fmv.x.s", "fmv.x.w">;
 
 def PseudoFLW  : PseudoFloatLoad<"flw", FPR32>;
 def PseudoFSW  : PseudoStore<"fsw", FPR32>;
+let usesCustomInserter = 1 in {
+def PseudoQuietFLE_S : PseudoQuietFCMP<FPR32>;
+def PseudoQuietFLT_S : PseudoQuietFCMP<FPR32>;
+}
 } // Predicates = [HasStdExtF]
 
 //===----------------------------------------------------------------------===//
@@ -319,6 +323,9 @@ def PseudoFSW  : PseudoStore<"fsw", FPR32>;
 def fpimm0 : PatLeaf<(fpimm), [{ return N->isExactlyValue(+0.0); }]>;
 
 /// Generic pattern classes
+class PatSetCC<RegisterClass Ty, SDPatternOperator OpNode, CondCode Cond, RVInst Inst>
+    : Pat<(OpNode Ty:$rs1, Ty:$rs2, Cond), (Inst $rs1, $rs2)>;
+
 class PatFpr32Fpr32<SDPatternOperator OpNode, RVInstR Inst>
     : Pat<(OpNode FPR32:$rs1, FPR32:$rs2), (Inst $rs1, $rs2)>;
 
@@ -373,13 +380,34 @@ def : PatFpr32Fpr32<fminnum, FMIN_S>;
 def : PatFpr32Fpr32<fmaxnum, FMAX_S>;
 
 /// Setcc
-
-def : PatFpr32Fpr32<seteq, FEQ_S>;
-def : PatFpr32Fpr32<setoeq, FEQ_S>;
-def : PatFpr32Fpr32<setlt, FLT_S>;
-def : PatFpr32Fpr32<setolt, FLT_S>;
-def : PatFpr32Fpr32<setle, FLE_S>;
-def : PatFpr32Fpr32<setole, FLE_S>;
+// FIXME: SETEQ/SETLT/SETLE imply nonans, can we pick better instructions for
+// strict versions of those.
+
+// Match non-signaling FEQ_S
+def : PatSetCC<FPR32, any_fsetcc, SETEQ, FEQ_S>;
+def : PatSetCC<FPR32, any_fsetcc, SETOEQ, FEQ_S>;
+def : PatSetCC<FPR32, strict_fsetcc, SETLT, PseudoQuietFLT_S>;
+def : PatSetCC<FPR32, strict_fsetcc, SETOLT, PseudoQuietFLT_S>;
+def : PatSetCC<FPR32, strict_fsetcc, SETLE, PseudoQuietFLE_S>;
+def : PatSetCC<FPR32, strict_fsetcc, SETOLE, PseudoQuietFLE_S>;
+
+// Match signaling FEQ_S
+def : Pat<(strict_fsetccs FPR32:$rs1, FPR32:$rs2, SETEQ),
+          (AND (FLE_S $rs1, $rs2),
+               (FLE_S $rs2, $rs1))>;
+def : Pat<(strict_fsetccs FPR32:$rs1, FPR32:$rs2, SETOEQ),
+          (AND (FLE_S $rs1, $rs2),
+               (FLE_S $rs2, $rs1))>;
+// If both operands are the same, use a single FLE.
+def : Pat<(strict_fsetccs FPR32:$rs1, FPR32:$rs1, SETEQ),
+          (FLE_S $rs1, $rs1)>;
+def : Pat<(strict_fsetccs FPR32:$rs1, FPR32:$rs1, SETOEQ),
+          (FLE_S $rs1, $rs1)>;
+
+def : PatSetCC<FPR32, any_fsetccs, SETLT, FLT_S>;
+def : PatSetCC<FPR32, any_fsetccs, SETOLT, FLT_S>;
+def : PatSetCC<FPR32, any_fsetccs, SETLE, FLE_S>;
+def : PatSetCC<FPR32, any_fsetccs, SETOLE, FLE_S>;
 
 def Select_FPR32_Using_CC_GPR : SelectCC_rrirr<FPR32, GPR>;
 

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td
index d03293108630d..bffd39ae9f515 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td
@@ -182,6 +182,10 @@ def : InstAlias<"fge.h $rd, $rs, $rt",
 let Predicates = [HasStdExtZfhmin] in {
 def PseudoFLH  : PseudoFloatLoad<"flh", FPR16>;
 def PseudoFSH  : PseudoStore<"fsh", FPR16>;
+let usesCustomInserter = 1 in {
+def PseudoQuietFLE_H : PseudoQuietFCMP<FPR16>;
+def PseudoQuietFLT_H : PseudoQuietFCMP<FPR16>;
+}
 } // Predicates = [HasStdExtZfhmin]
 
 //===----------------------------------------------------------------------===//
@@ -246,13 +250,34 @@ def : PatFpr16Fpr16<fminnum, FMIN_H>;
 def : PatFpr16Fpr16<fmaxnum, FMAX_H>;
 
 /// Setcc
-
-def : PatFpr16Fpr16<seteq, FEQ_H>;
-def : PatFpr16Fpr16<setoeq, FEQ_H>;
-def : PatFpr16Fpr16<setlt, FLT_H>;
-def : PatFpr16Fpr16<setolt, FLT_H>;
-def : PatFpr16Fpr16<setle, FLE_H>;
-def : PatFpr16Fpr16<setole, FLE_H>;
+// FIXME: SETEQ/SETLT/SETLE imply nonans, can we pick better instructions for
+// strict versions of those.
+
+// Match non-signaling FEQ_D
+def : PatSetCC<FPR16, any_fsetcc, SETEQ, FEQ_H>;
+def : PatSetCC<FPR16, any_fsetcc, SETOEQ, FEQ_H>;
+def : PatSetCC<FPR16, strict_fsetcc, SETLT, PseudoQuietFLT_H>;
+def : PatSetCC<FPR16, strict_fsetcc, SETOLT, PseudoQuietFLT_H>;
+def : PatSetCC<FPR16, strict_fsetcc, SETLE, PseudoQuietFLE_H>;
+def : PatSetCC<FPR16, strict_fsetcc, SETOLE, PseudoQuietFLE_H>;
+
+// Match signaling FEQ_H
+def : Pat<(strict_fsetccs FPR16:$rs1, FPR16:$rs2, SETEQ),
+          (AND (FLE_H $rs1, $rs2),
+               (FLE_H $rs2, $rs1))>;
+def : Pat<(strict_fsetccs FPR16:$rs1, FPR16:$rs2, SETOEQ),
+          (AND (FLE_H $rs1, $rs2),
+               (FLE_H $rs2, $rs1))>;
+// If both operands are the same, use a single FLE.
+def : Pat<(strict_fsetccs FPR16:$rs1, FPR16:$rs1, SETEQ),
+          (FLE_H $rs1, $rs1)>;
+def : Pat<(strict_fsetccs FPR16:$rs1, FPR16:$rs1, SETOEQ),
+          (FLE_H $rs1, $rs1)>;
+
+def : PatSetCC<FPR16, any_fsetccs, SETLT, FLT_H>;
+def : PatSetCC<FPR16, any_fsetccs, SETOLT, FLT_H>;
+def : PatSetCC<FPR16, any_fsetccs, SETLE, FLE_H>;
+def : PatSetCC<FPR16, any_fsetccs, SETOLE, FLE_H>;
 
 def Select_FPR16_Using_CC_GPR : SelectCC_rrirr<FPR16, GPR>;
 } // Predicates = [HasStdExtZfh]

diff  --git a/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll b/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll
new file mode 100644
index 0000000000000..15cf27319e328
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll
@@ -0,0 +1,1281 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
+; RUN:   -disable-strictnode-mutation -target-abi=ilp32d \
+; RUN:   | FileCheck -check-prefix=RV32IFD %s
+; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
+; RUN:   -disable-strictnode-mutation -target-abi=lp64d \
+; RUN:   | FileCheck -check-prefix=RV64IFD %s
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   -disable-strictnode-mutation | FileCheck -check-prefix=RV32I %s
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   -disable-strictnode-mutation | FileCheck -check-prefix=RV64I %s
+
+define i32 @fcmp_oeq(double %a, double %b) nounwind strictfp {
+; RV32IFD-LABEL: fcmp_oeq:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    feq.d a0, fa0, fa1
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_oeq:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    feq.d a0, fa0, fa1
+; RV64IFD-NEXT:    ret
+;
+; RV32I-LABEL: fcmp_oeq:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __eqdf2 at plt
+; RV32I-NEXT:    seqz a0, a0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmp_oeq:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __eqdf2 at plt
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+declare i1 @llvm.experimental.constrained.fcmp.f64(double, double, metadata, metadata)
+
+define i32 @fcmp_ogt(double %a, double %b) nounwind strictfp {
+; RV32IFD-LABEL: fcmp_ogt:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    frflags a1
+; RV32IFD-NEXT:    flt.d a0, fa1, fa0
+; RV32IFD-NEXT:    fsflags a1
+; RV32IFD-NEXT:    feq.d zero, fa1, fa0
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_ogt:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    frflags a1
+; RV64IFD-NEXT:    flt.d a0, fa1, fa0
+; RV64IFD-NEXT:    fsflags a1
+; RV64IFD-NEXT:    feq.d zero, fa1, fa0
+; RV64IFD-NEXT:    ret
+;
+; RV32I-LABEL: fcmp_ogt:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __gtdf2 at plt
+; RV32I-NEXT:    sgtz a0, a0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmp_ogt:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __gtdf2 at plt
+; RV64I-NEXT:    sgtz a0, a0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ogt", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_oge(double %a, double %b) nounwind strictfp {
+; RV32IFD-LABEL: fcmp_oge:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    frflags a1
+; RV32IFD-NEXT:    fle.d a0, fa1, fa0
+; RV32IFD-NEXT:    fsflags a1
+; RV32IFD-NEXT:    feq.d zero, fa1, fa0
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_oge:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    frflags a1
+; RV64IFD-NEXT:    fle.d a0, fa1, fa0
+; RV64IFD-NEXT:    fsflags a1
+; RV64IFD-NEXT:    feq.d zero, fa1, fa0
+; RV64IFD-NEXT:    ret
+;
+; RV32I-LABEL: fcmp_oge:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __gedf2 at plt
+; RV32I-NEXT:    li a1, -1
+; RV32I-NEXT:    slt a0, a1, a0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmp_oge:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __gedf2 at plt
+; RV64I-NEXT:    li a1, -1
+; RV64I-NEXT:    slt a0, a1, a0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oge", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_olt(double %a, double %b) nounwind strictfp {
+; RV32IFD-LABEL: fcmp_olt:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    frflags a1
+; RV32IFD-NEXT:    flt.d a0, fa0, fa1
+; RV32IFD-NEXT:    fsflags a1
+; RV32IFD-NEXT:    feq.d zero, fa0, fa1
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_olt:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    frflags a1
+; RV64IFD-NEXT:    flt.d a0, fa0, fa1
+; RV64IFD-NEXT:    fsflags a1
+; RV64IFD-NEXT:    feq.d zero, fa0, fa1
+; RV64IFD-NEXT:    ret
+;
+; RV32I-LABEL: fcmp_olt:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __ltdf2 at plt
+; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmp_olt:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __ltdf2 at plt
+; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"olt", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ole(double %a, double %b) nounwind strictfp {
+; RV32IFD-LABEL: fcmp_ole:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    frflags a1
+; RV32IFD-NEXT:    fle.d a0, fa0, fa1
+; RV32IFD-NEXT:    fsflags a1
+; RV32IFD-NEXT:    feq.d zero, fa0, fa1
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_ole:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    frflags a1
+; RV64IFD-NEXT:    fle.d a0, fa0, fa1
+; RV64IFD-NEXT:    fsflags a1
+; RV64IFD-NEXT:    feq.d zero, fa0, fa1
+; RV64IFD-NEXT:    ret
+;
+; RV32I-LABEL: fcmp_ole:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __ledf2 at plt
+; RV32I-NEXT:    slti a0, a0, 1
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmp_ole:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __ledf2 at plt
+; RV64I-NEXT:    slti a0, a0, 1
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ole", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+; FIXME: We only need one frflags before the two flts and one fsflags after the
+; two flts.
+define i32 @fcmp_one(double %a, double %b) nounwind strictfp {
+; RV32IFD-LABEL: fcmp_one:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    frflags a0
+; RV32IFD-NEXT:    flt.d a1, fa0, fa1
+; RV32IFD-NEXT:    fsflags a0
+; RV32IFD-NEXT:    feq.d zero, fa0, fa1
+; RV32IFD-NEXT:    frflags a0
+; RV32IFD-NEXT:    flt.d a2, fa1, fa0
+; RV32IFD-NEXT:    fsflags a0
+; RV32IFD-NEXT:    or a0, a2, a1
+; RV32IFD-NEXT:    feq.d zero, fa1, fa0
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_one:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    frflags a0
+; RV64IFD-NEXT:    flt.d a1, fa0, fa1
+; RV64IFD-NEXT:    fsflags a0
+; RV64IFD-NEXT:    feq.d zero, fa0, fa1
+; RV64IFD-NEXT:    frflags a0
+; RV64IFD-NEXT:    flt.d a2, fa1, fa0
+; RV64IFD-NEXT:    fsflags a0
+; RV64IFD-NEXT:    or a0, a2, a1
+; RV64IFD-NEXT:    feq.d zero, fa1, fa0
+; RV64IFD-NEXT:    ret
+;
+; RV32I-LABEL: fcmp_one:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    mv s2, a3
+; RV32I-NEXT:    mv s3, a2
+; RV32I-NEXT:    mv s0, a1
+; RV32I-NEXT:    mv s1, a0
+; RV32I-NEXT:    call __eqdf2 at plt
+; RV32I-NEXT:    snez s4, a0
+; RV32I-NEXT:    mv a0, s1
+; RV32I-NEXT:    mv a1, s0
+; RV32I-NEXT:    mv a2, s3
+; RV32I-NEXT:    mv a3, s2
+; RV32I-NEXT:    call __unorddf2 at plt
+; RV32I-NEXT:    seqz a0, a0
+; RV32I-NEXT:    and a0, a0, s4
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmp_one:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -32
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    mv s0, a1
+; RV64I-NEXT:    mv s1, a0
+; RV64I-NEXT:    call __eqdf2 at plt
+; RV64I-NEXT:    snez s2, a0
+; RV64I-NEXT:    mv a0, s1
+; RV64I-NEXT:    mv a1, s0
+; RV64I-NEXT:    call __unorddf2 at plt
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    and a0, a0, s2
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 32
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"one", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ord(double %a, double %b) nounwind strictfp {
+; RV32IFD-LABEL: fcmp_ord:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    feq.d a0, fa1, fa1
+; RV32IFD-NEXT:    feq.d a1, fa0, fa0
+; RV32IFD-NEXT:    and a0, a1, a0
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_ord:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    feq.d a0, fa1, fa1
+; RV64IFD-NEXT:    feq.d a1, fa0, fa0
+; RV64IFD-NEXT:    and a0, a1, a0
+; RV64IFD-NEXT:    ret
+;
+; RV32I-LABEL: fcmp_ord:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __unorddf2 at plt
+; RV32I-NEXT:    seqz a0, a0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmp_ord:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __unorddf2 at plt
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ord", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+; FIXME: We only need one frflags before the two flts and one fsflags after the
+; two flts.
+define i32 @fcmp_ueq(double %a, double %b) nounwind strictfp {
+; RV32IFD-LABEL: fcmp_ueq:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    frflags a0
+; RV32IFD-NEXT:    flt.d a1, fa0, fa1
+; RV32IFD-NEXT:    fsflags a0
+; RV32IFD-NEXT:    feq.d zero, fa0, fa1
+; RV32IFD-NEXT:    frflags a0
+; RV32IFD-NEXT:    flt.d a2, fa1, fa0
+; RV32IFD-NEXT:    fsflags a0
+; RV32IFD-NEXT:    or a0, a2, a1
+; RV32IFD-NEXT:    xori a0, a0, 1
+; RV32IFD-NEXT:    feq.d zero, fa1, fa0
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_ueq:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    frflags a0
+; RV64IFD-NEXT:    flt.d a1, fa0, fa1
+; RV64IFD-NEXT:    fsflags a0
+; RV64IFD-NEXT:    feq.d zero, fa0, fa1
+; RV64IFD-NEXT:    frflags a0
+; RV64IFD-NEXT:    flt.d a2, fa1, fa0
+; RV64IFD-NEXT:    fsflags a0
+; RV64IFD-NEXT:    or a0, a2, a1
+; RV64IFD-NEXT:    xori a0, a0, 1
+; RV64IFD-NEXT:    feq.d zero, fa1, fa0
+; RV64IFD-NEXT:    ret
+;
+; RV32I-LABEL: fcmp_ueq:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    mv s2, a3
+; RV32I-NEXT:    mv s3, a2
+; RV32I-NEXT:    mv s0, a1
+; RV32I-NEXT:    mv s1, a0
+; RV32I-NEXT:    call __eqdf2 at plt
+; RV32I-NEXT:    seqz s4, a0
+; RV32I-NEXT:    mv a0, s1
+; RV32I-NEXT:    mv a1, s0
+; RV32I-NEXT:    mv a2, s3
+; RV32I-NEXT:    mv a3, s2
+; RV32I-NEXT:    call __unorddf2 at plt
+; RV32I-NEXT:    snez a0, a0
+; RV32I-NEXT:    or a0, a0, s4
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmp_ueq:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -32
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    mv s0, a1
+; RV64I-NEXT:    mv s1, a0
+; RV64I-NEXT:    call __eqdf2 at plt
+; RV64I-NEXT:    seqz s2, a0
+; RV64I-NEXT:    mv a0, s1
+; RV64I-NEXT:    mv a1, s0
+; RV64I-NEXT:    call __unorddf2 at plt
+; RV64I-NEXT:    snez a0, a0
+; RV64I-NEXT:    or a0, a0, s2
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 32
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ueq", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ugt(double %a, double %b) nounwind strictfp {
+; RV32IFD-LABEL: fcmp_ugt:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    frflags a0
+; RV32IFD-NEXT:    fle.d a1, fa0, fa1
+; RV32IFD-NEXT:    fsflags a0
+; RV32IFD-NEXT:    xori a0, a1, 1
+; RV32IFD-NEXT:    feq.d zero, fa0, fa1
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_ugt:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    frflags a0
+; RV64IFD-NEXT:    fle.d a1, fa0, fa1
+; RV64IFD-NEXT:    fsflags a0
+; RV64IFD-NEXT:    xori a0, a1, 1
+; RV64IFD-NEXT:    feq.d zero, fa0, fa1
+; RV64IFD-NEXT:    ret
+;
+; RV32I-LABEL: fcmp_ugt:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __ledf2 at plt
+; RV32I-NEXT:    sgtz a0, a0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmp_ugt:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __ledf2 at plt
+; RV64I-NEXT:    sgtz a0, a0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ugt", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_uge(double %a, double %b) nounwind strictfp {
+; RV32IFD-LABEL: fcmp_uge:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    frflags a0
+; RV32IFD-NEXT:    flt.d a1, fa0, fa1
+; RV32IFD-NEXT:    fsflags a0
+; RV32IFD-NEXT:    xori a0, a1, 1
+; RV32IFD-NEXT:    feq.d zero, fa0, fa1
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_uge:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    frflags a0
+; RV64IFD-NEXT:    flt.d a1, fa0, fa1
+; RV64IFD-NEXT:    fsflags a0
+; RV64IFD-NEXT:    xori a0, a1, 1
+; RV64IFD-NEXT:    feq.d zero, fa0, fa1
+; RV64IFD-NEXT:    ret
+;
+; RV32I-LABEL: fcmp_uge:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __ltdf2 at plt
+; RV32I-NEXT:    li a1, -1
+; RV32I-NEXT:    slt a0, a1, a0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmp_uge:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __ltdf2 at plt
+; RV64I-NEXT:    li a1, -1
+; RV64I-NEXT:    slt a0, a1, a0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"uge", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ult(double %a, double %b) nounwind strictfp {
+; RV32IFD-LABEL: fcmp_ult:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    frflags a0
+; RV32IFD-NEXT:    fle.d a1, fa1, fa0
+; RV32IFD-NEXT:    fsflags a0
+; RV32IFD-NEXT:    xori a0, a1, 1
+; RV32IFD-NEXT:    feq.d zero, fa1, fa0
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_ult:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    frflags a0
+; RV64IFD-NEXT:    fle.d a1, fa1, fa0
+; RV64IFD-NEXT:    fsflags a0
+; RV64IFD-NEXT:    xori a0, a1, 1
+; RV64IFD-NEXT:    feq.d zero, fa1, fa0
+; RV64IFD-NEXT:    ret
+;
+; RV32I-LABEL: fcmp_ult:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __gedf2 at plt
+; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmp_ult:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __gedf2 at plt
+; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ult", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ule(double %a, double %b) nounwind strictfp {
+; RV32IFD-LABEL: fcmp_ule:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    frflags a0
+; RV32IFD-NEXT:    flt.d a1, fa1, fa0
+; RV32IFD-NEXT:    fsflags a0
+; RV32IFD-NEXT:    xori a0, a1, 1
+; RV32IFD-NEXT:    feq.d zero, fa1, fa0
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_ule:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    frflags a0
+; RV64IFD-NEXT:    flt.d a1, fa1, fa0
+; RV64IFD-NEXT:    fsflags a0
+; RV64IFD-NEXT:    xori a0, a1, 1
+; RV64IFD-NEXT:    feq.d zero, fa1, fa0
+; RV64IFD-NEXT:    ret
+;
+; RV32I-LABEL: fcmp_ule:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __gtdf2 at plt
+; RV32I-NEXT:    slti a0, a0, 1
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmp_ule:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __gtdf2 at plt
+; RV64I-NEXT:    slti a0, a0, 1
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ule", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_une(double %a, double %b) nounwind strictfp {
+; RV32IFD-LABEL: fcmp_une:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    feq.d a0, fa0, fa1
+; RV32IFD-NEXT:    xori a0, a0, 1
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_une:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    feq.d a0, fa0, fa1
+; RV64IFD-NEXT:    xori a0, a0, 1
+; RV64IFD-NEXT:    ret
+;
+; RV32I-LABEL: fcmp_une:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __nedf2 at plt
+; RV32I-NEXT:    snez a0, a0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmp_une:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __nedf2 at plt
+; RV64I-NEXT:    snez a0, a0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"une", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_uno(double %a, double %b) nounwind strictfp {
+; RV32IFD-LABEL: fcmp_uno:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    feq.d a0, fa1, fa1
+; RV32IFD-NEXT:    feq.d a1, fa0, fa0
+; RV32IFD-NEXT:    and a0, a1, a0
+; RV32IFD-NEXT:    xori a0, a0, 1
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_uno:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    feq.d a0, fa1, fa1
+; RV64IFD-NEXT:    feq.d a1, fa0, fa0
+; RV64IFD-NEXT:    and a0, a1, a0
+; RV64IFD-NEXT:    xori a0, a0, 1
+; RV64IFD-NEXT:    ret
+;
+; RV32I-LABEL: fcmp_uno:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __unorddf2 at plt
+; RV32I-NEXT:    snez a0, a0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmp_uno:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __unorddf2 at plt
+; RV64I-NEXT:    snez a0, a0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"uno", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_oeq(double %a, double %b) nounwind strictfp {
+; RV32IFD-LABEL: fcmps_oeq:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    fle.d a0, fa1, fa0
+; RV32IFD-NEXT:    fle.d a1, fa0, fa1
+; RV32IFD-NEXT:    and a0, a1, a0
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmps_oeq:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fle.d a0, fa1, fa0
+; RV64IFD-NEXT:    fle.d a1, fa0, fa1
+; RV64IFD-NEXT:    and a0, a1, a0
+; RV64IFD-NEXT:    ret
+;
+; RV32I-LABEL: fcmps_oeq:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __eqdf2 at plt
+; RV32I-NEXT:    seqz a0, a0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmps_oeq:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __eqdf2 at plt
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+declare i1 @llvm.experimental.constrained.fcmps.f64(double, double, metadata, metadata)
+
+define i32 @fcmps_ogt(double %a, double %b) nounwind strictfp {
+; RV32IFD-LABEL: fcmps_ogt:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    flt.d a0, fa1, fa0
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmps_ogt:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    flt.d a0, fa1, fa0
+; RV64IFD-NEXT:    ret
+;
+; RV32I-LABEL: fcmps_ogt:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __gtdf2 at plt
+; RV32I-NEXT:    sgtz a0, a0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmps_ogt:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __gtdf2 at plt
+; RV64I-NEXT:    sgtz a0, a0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ogt", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_oge(double %a, double %b) nounwind strictfp {
+; RV32IFD-LABEL: fcmps_oge:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    fle.d a0, fa1, fa0
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmps_oge:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fle.d a0, fa1, fa0
+; RV64IFD-NEXT:    ret
+;
+; RV32I-LABEL: fcmps_oge:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __gedf2 at plt
+; RV32I-NEXT:    li a1, -1
+; RV32I-NEXT:    slt a0, a1, a0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmps_oge:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __gedf2 at plt
+; RV64I-NEXT:    li a1, -1
+; RV64I-NEXT:    slt a0, a1, a0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oge", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_olt(double %a, double %b) nounwind strictfp {
+; RV32IFD-LABEL: fcmps_olt:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    flt.d a0, fa0, fa1
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmps_olt:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    flt.d a0, fa0, fa1
+; RV64IFD-NEXT:    ret
+;
+; RV32I-LABEL: fcmps_olt:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __ltdf2 at plt
+; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmps_olt:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __ltdf2 at plt
+; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"olt", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_ole(double %a, double %b) nounwind strictfp {
+; RV32IFD-LABEL: fcmps_ole:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    fle.d a0, fa0, fa1
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmps_ole:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fle.d a0, fa0, fa1
+; RV64IFD-NEXT:    ret
+;
+; RV32I-LABEL: fcmps_ole:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __ledf2 at plt
+; RV32I-NEXT:    slti a0, a0, 1
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmps_ole:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __ledf2 at plt
+; RV64I-NEXT:    slti a0, a0, 1
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ole", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_one(double %a, double %b) nounwind strictfp {
+; RV32IFD-LABEL: fcmps_one:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    flt.d a0, fa0, fa1
+; RV32IFD-NEXT:    flt.d a1, fa1, fa0
+; RV32IFD-NEXT:    or a0, a1, a0
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmps_one:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    flt.d a0, fa0, fa1
+; RV64IFD-NEXT:    flt.d a1, fa1, fa0
+; RV64IFD-NEXT:    or a0, a1, a0
+; RV64IFD-NEXT:    ret
+;
+; RV32I-LABEL: fcmps_one:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    mv s2, a3
+; RV32I-NEXT:    mv s3, a2
+; RV32I-NEXT:    mv s0, a1
+; RV32I-NEXT:    mv s1, a0
+; RV32I-NEXT:    call __eqdf2 at plt
+; RV32I-NEXT:    snez s4, a0
+; RV32I-NEXT:    mv a0, s1
+; RV32I-NEXT:    mv a1, s0
+; RV32I-NEXT:    mv a2, s3
+; RV32I-NEXT:    mv a3, s2
+; RV32I-NEXT:    call __unorddf2 at plt
+; RV32I-NEXT:    seqz a0, a0
+; RV32I-NEXT:    and a0, a0, s4
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmps_one:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -32
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    mv s0, a1
+; RV64I-NEXT:    mv s1, a0
+; RV64I-NEXT:    call __eqdf2 at plt
+; RV64I-NEXT:    snez s2, a0
+; RV64I-NEXT:    mv a0, s1
+; RV64I-NEXT:    mv a1, s0
+; RV64I-NEXT:    call __unorddf2 at plt
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    and a0, a0, s2
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 32
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"one", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_ord(double %a, double %b) nounwind strictfp {
+; RV32IFD-LABEL: fcmps_ord:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    fle.d a0, fa1, fa1
+; RV32IFD-NEXT:    fle.d a1, fa0, fa0
+; RV32IFD-NEXT:    and a0, a1, a0
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmps_ord:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fle.d a0, fa1, fa1
+; RV64IFD-NEXT:    fle.d a1, fa0, fa0
+; RV64IFD-NEXT:    and a0, a1, a0
+; RV64IFD-NEXT:    ret
+;
+; RV32I-LABEL: fcmps_ord:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __unorddf2 at plt
+; RV32I-NEXT:    seqz a0, a0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmps_ord:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __unorddf2 at plt
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ord", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_ueq(double %a, double %b) nounwind strictfp {
+; RV32IFD-LABEL: fcmps_ueq:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    flt.d a0, fa0, fa1
+; RV32IFD-NEXT:    flt.d a1, fa1, fa0
+; RV32IFD-NEXT:    or a0, a1, a0
+; RV32IFD-NEXT:    xori a0, a0, 1
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmps_ueq:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    flt.d a0, fa0, fa1
+; RV64IFD-NEXT:    flt.d a1, fa1, fa0
+; RV64IFD-NEXT:    or a0, a1, a0
+; RV64IFD-NEXT:    xori a0, a0, 1
+; RV64IFD-NEXT:    ret
+;
+; RV32I-LABEL: fcmps_ueq:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    mv s2, a3
+; RV32I-NEXT:    mv s3, a2
+; RV32I-NEXT:    mv s0, a1
+; RV32I-NEXT:    mv s1, a0
+; RV32I-NEXT:    call __eqdf2 at plt
+; RV32I-NEXT:    seqz s4, a0
+; RV32I-NEXT:    mv a0, s1
+; RV32I-NEXT:    mv a1, s0
+; RV32I-NEXT:    mv a2, s3
+; RV32I-NEXT:    mv a3, s2
+; RV32I-NEXT:    call __unorddf2 at plt
+; RV32I-NEXT:    snez a0, a0
+; RV32I-NEXT:    or a0, a0, s4
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmps_ueq:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -32
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    mv s0, a1
+; RV64I-NEXT:    mv s1, a0
+; RV64I-NEXT:    call __eqdf2 at plt
+; RV64I-NEXT:    seqz s2, a0
+; RV64I-NEXT:    mv a0, s1
+; RV64I-NEXT:    mv a1, s0
+; RV64I-NEXT:    call __unorddf2 at plt
+; RV64I-NEXT:    snez a0, a0
+; RV64I-NEXT:    or a0, a0, s2
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 32
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ueq", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_ugt(double %a, double %b) nounwind strictfp {
+; RV32IFD-LABEL: fcmps_ugt:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    fle.d a0, fa0, fa1
+; RV32IFD-NEXT:    xori a0, a0, 1
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmps_ugt:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fle.d a0, fa0, fa1
+; RV64IFD-NEXT:    xori a0, a0, 1
+; RV64IFD-NEXT:    ret
+;
+; RV32I-LABEL: fcmps_ugt:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __ledf2 at plt
+; RV32I-NEXT:    sgtz a0, a0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmps_ugt:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __ledf2 at plt
+; RV64I-NEXT:    sgtz a0, a0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ugt", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_uge(double %a, double %b) nounwind strictfp {
+; RV32IFD-LABEL: fcmps_uge:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    flt.d a0, fa0, fa1
+; RV32IFD-NEXT:    xori a0, a0, 1
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmps_uge:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    flt.d a0, fa0, fa1
+; RV64IFD-NEXT:    xori a0, a0, 1
+; RV64IFD-NEXT:    ret
+;
+; RV32I-LABEL: fcmps_uge:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __ltdf2 at plt
+; RV32I-NEXT:    li a1, -1
+; RV32I-NEXT:    slt a0, a1, a0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmps_uge:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __ltdf2 at plt
+; RV64I-NEXT:    li a1, -1
+; RV64I-NEXT:    slt a0, a1, a0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"uge", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_ult(double %a, double %b) nounwind strictfp {
+; RV32IFD-LABEL: fcmps_ult:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    fle.d a0, fa1, fa0
+; RV32IFD-NEXT:    xori a0, a0, 1
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmps_ult:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fle.d a0, fa1, fa0
+; RV64IFD-NEXT:    xori a0, a0, 1
+; RV64IFD-NEXT:    ret
+;
+; RV32I-LABEL: fcmps_ult:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __gedf2 at plt
+; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmps_ult:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __gedf2 at plt
+; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ult", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_ule(double %a, double %b) nounwind strictfp {
+; RV32IFD-LABEL: fcmps_ule:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    flt.d a0, fa1, fa0
+; RV32IFD-NEXT:    xori a0, a0, 1
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmps_ule:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    flt.d a0, fa1, fa0
+; RV64IFD-NEXT:    xori a0, a0, 1
+; RV64IFD-NEXT:    ret
+;
+; RV32I-LABEL: fcmps_ule:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __gtdf2 at plt
+; RV32I-NEXT:    slti a0, a0, 1
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmps_ule:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __gtdf2 at plt
+; RV64I-NEXT:    slti a0, a0, 1
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ule", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_une(double %a, double %b) nounwind strictfp {
+; RV32IFD-LABEL: fcmps_une:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    fle.d a0, fa1, fa0
+; RV32IFD-NEXT:    fle.d a1, fa0, fa1
+; RV32IFD-NEXT:    and a0, a1, a0
+; RV32IFD-NEXT:    xori a0, a0, 1
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmps_une:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fle.d a0, fa1, fa0
+; RV64IFD-NEXT:    fle.d a1, fa0, fa1
+; RV64IFD-NEXT:    and a0, a1, a0
+; RV64IFD-NEXT:    xori a0, a0, 1
+; RV64IFD-NEXT:    ret
+;
+; RV32I-LABEL: fcmps_une:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __nedf2 at plt
+; RV32I-NEXT:    snez a0, a0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmps_une:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __nedf2 at plt
+; RV64I-NEXT:    snez a0, a0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"une", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_uno(double %a, double %b) nounwind strictfp {
+; RV32IFD-LABEL: fcmps_uno:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    fle.d a0, fa1, fa1
+; RV32IFD-NEXT:    fle.d a1, fa0, fa0
+; RV32IFD-NEXT:    and a0, a1, a0
+; RV32IFD-NEXT:    xori a0, a0, 1
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmps_uno:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fle.d a0, fa1, fa1
+; RV64IFD-NEXT:    fle.d a1, fa0, fa0
+; RV64IFD-NEXT:    and a0, a1, a0
+; RV64IFD-NEXT:    xori a0, a0, 1
+; RV64IFD-NEXT:    ret
+;
+; RV32I-LABEL: fcmps_uno:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __unorddf2 at plt
+; RV32I-NEXT:    snez a0, a0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmps_uno:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __unorddf2 at plt
+; RV64I-NEXT:    snez a0, a0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"uno", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}

diff  --git a/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll b/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll
new file mode 100644
index 0000000000000..4b2efdf5ed0cf
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll
@@ -0,0 +1,1249 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \
+; RUN:   -disable-strictnode-mutation -target-abi=ilp32f \
+; RUN:   | FileCheck -check-prefix=RV32IF %s
+; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
+; RUN:   -disable-strictnode-mutation -target-abi=lp64f \
+; RUN:   | FileCheck -check-prefix=RV64IF %s
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   -disable-strictnode-mutation | FileCheck -check-prefix=RV32I %s
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   -disable-strictnode-mutation | FileCheck -check-prefix=RV64I %s
+
+define i32 @fcmp_oeq(float %a, float %b) nounwind strictfp {
+; RV32IF-LABEL: fcmp_oeq:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    feq.s a0, fa0, fa1
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fcmp_oeq:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    feq.s a0, fa0, fa1
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: fcmp_oeq:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __eqsf2 at plt
+; RV32I-NEXT:    seqz a0, a0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmp_oeq:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __eqsf2 at plt
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"oeq", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+declare i1 @llvm.experimental.constrained.fcmp.f32(float, float, metadata, metadata)
+
+define i32 @fcmp_ogt(float %a, float %b) nounwind strictfp {
+; RV32IF-LABEL: fcmp_ogt:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    frflags a1
+; RV32IF-NEXT:    flt.s a0, fa1, fa0
+; RV32IF-NEXT:    fsflags a1
+; RV32IF-NEXT:    feq.s zero, fa1, fa0
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fcmp_ogt:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    frflags a1
+; RV64IF-NEXT:    flt.s a0, fa1, fa0
+; RV64IF-NEXT:    fsflags a1
+; RV64IF-NEXT:    feq.s zero, fa1, fa0
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: fcmp_ogt:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __gtsf2 at plt
+; RV32I-NEXT:    sgtz a0, a0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmp_ogt:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __gtsf2 at plt
+; RV64I-NEXT:    sgtz a0, a0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ogt", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_oge(float %a, float %b) nounwind strictfp {
+; RV32IF-LABEL: fcmp_oge:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    frflags a1
+; RV32IF-NEXT:    fle.s a0, fa1, fa0
+; RV32IF-NEXT:    fsflags a1
+; RV32IF-NEXT:    feq.s zero, fa1, fa0
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fcmp_oge:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    frflags a1
+; RV64IF-NEXT:    fle.s a0, fa1, fa0
+; RV64IF-NEXT:    fsflags a1
+; RV64IF-NEXT:    feq.s zero, fa1, fa0
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: fcmp_oge:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __gesf2 at plt
+; RV32I-NEXT:    li a1, -1
+; RV32I-NEXT:    slt a0, a1, a0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmp_oge:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __gesf2 at plt
+; RV64I-NEXT:    li a1, -1
+; RV64I-NEXT:    slt a0, a1, a0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"oge", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_olt(float %a, float %b) nounwind strictfp {
+; RV32IF-LABEL: fcmp_olt:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    frflags a1
+; RV32IF-NEXT:    flt.s a0, fa0, fa1
+; RV32IF-NEXT:    fsflags a1
+; RV32IF-NEXT:    feq.s zero, fa0, fa1
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fcmp_olt:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    frflags a1
+; RV64IF-NEXT:    flt.s a0, fa0, fa1
+; RV64IF-NEXT:    fsflags a1
+; RV64IF-NEXT:    feq.s zero, fa0, fa1
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: fcmp_olt:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __ltsf2 at plt
+; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmp_olt:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __ltsf2 at plt
+; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"olt", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ole(float %a, float %b) nounwind strictfp {
+; RV32IF-LABEL: fcmp_ole:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    frflags a1
+; RV32IF-NEXT:    fle.s a0, fa0, fa1
+; RV32IF-NEXT:    fsflags a1
+; RV32IF-NEXT:    feq.s zero, fa0, fa1
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fcmp_ole:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    frflags a1
+; RV64IF-NEXT:    fle.s a0, fa0, fa1
+; RV64IF-NEXT:    fsflags a1
+; RV64IF-NEXT:    feq.s zero, fa0, fa1
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: fcmp_ole:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __lesf2 at plt
+; RV32I-NEXT:    slti a0, a0, 1
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmp_ole:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __lesf2 at plt
+; RV64I-NEXT:    slti a0, a0, 1
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ole", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+; FIXME: We only need one frflags before the two flts and one fsflags after the
+; two flts.
+define i32 @fcmp_one(float %a, float %b) nounwind strictfp {
+; RV32IF-LABEL: fcmp_one:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    frflags a0
+; RV32IF-NEXT:    flt.s a1, fa0, fa1
+; RV32IF-NEXT:    fsflags a0
+; RV32IF-NEXT:    feq.s zero, fa0, fa1
+; RV32IF-NEXT:    frflags a0
+; RV32IF-NEXT:    flt.s a2, fa1, fa0
+; RV32IF-NEXT:    fsflags a0
+; RV32IF-NEXT:    or a0, a2, a1
+; RV32IF-NEXT:    feq.s zero, fa1, fa0
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fcmp_one:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    frflags a0
+; RV64IF-NEXT:    flt.s a1, fa0, fa1
+; RV64IF-NEXT:    fsflags a0
+; RV64IF-NEXT:    feq.s zero, fa0, fa1
+; RV64IF-NEXT:    frflags a0
+; RV64IF-NEXT:    flt.s a2, fa1, fa0
+; RV64IF-NEXT:    fsflags a0
+; RV64IF-NEXT:    or a0, a2, a1
+; RV64IF-NEXT:    feq.s zero, fa1, fa0
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: fcmp_one:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    mv s0, a1
+; RV32I-NEXT:    mv s1, a0
+; RV32I-NEXT:    call __eqsf2 at plt
+; RV32I-NEXT:    snez s2, a0
+; RV32I-NEXT:    mv a0, s1
+; RV32I-NEXT:    mv a1, s0
+; RV32I-NEXT:    call __unordsf2 at plt
+; RV32I-NEXT:    seqz a0, a0
+; RV32I-NEXT:    and a0, a0, s2
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 0(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmp_one:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -32
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    mv s0, a1
+; RV64I-NEXT:    mv s1, a0
+; RV64I-NEXT:    call __eqsf2 at plt
+; RV64I-NEXT:    snez s2, a0
+; RV64I-NEXT:    mv a0, s1
+; RV64I-NEXT:    mv a1, s0
+; RV64I-NEXT:    call __unordsf2 at plt
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    and a0, a0, s2
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 32
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"one", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ord(float %a, float %b) nounwind strictfp {
+; RV32IF-LABEL: fcmp_ord:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    feq.s a0, fa1, fa1
+; RV32IF-NEXT:    feq.s a1, fa0, fa0
+; RV32IF-NEXT:    and a0, a1, a0
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fcmp_ord:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    feq.s a0, fa1, fa1
+; RV64IF-NEXT:    feq.s a1, fa0, fa0
+; RV64IF-NEXT:    and a0, a1, a0
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: fcmp_ord:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __unordsf2 at plt
+; RV32I-NEXT:    seqz a0, a0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmp_ord:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __unordsf2 at plt
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ord", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+; FIXME: We only need one frflags before the two flts and one fsflags after the
+; two flts.
+define i32 @fcmp_ueq(float %a, float %b) nounwind strictfp {
+; RV32IF-LABEL: fcmp_ueq:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    frflags a0
+; RV32IF-NEXT:    flt.s a1, fa0, fa1
+; RV32IF-NEXT:    fsflags a0
+; RV32IF-NEXT:    feq.s zero, fa0, fa1
+; RV32IF-NEXT:    frflags a0
+; RV32IF-NEXT:    flt.s a2, fa1, fa0
+; RV32IF-NEXT:    fsflags a0
+; RV32IF-NEXT:    or a0, a2, a1
+; RV32IF-NEXT:    xori a0, a0, 1
+; RV32IF-NEXT:    feq.s zero, fa1, fa0
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fcmp_ueq:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    frflags a0
+; RV64IF-NEXT:    flt.s a1, fa0, fa1
+; RV64IF-NEXT:    fsflags a0
+; RV64IF-NEXT:    feq.s zero, fa0, fa1
+; RV64IF-NEXT:    frflags a0
+; RV64IF-NEXT:    flt.s a2, fa1, fa0
+; RV64IF-NEXT:    fsflags a0
+; RV64IF-NEXT:    or a0, a2, a1
+; RV64IF-NEXT:    xori a0, a0, 1
+; RV64IF-NEXT:    feq.s zero, fa1, fa0
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: fcmp_ueq:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    mv s0, a1
+; RV32I-NEXT:    mv s1, a0
+; RV32I-NEXT:    call __eqsf2 at plt
+; RV32I-NEXT:    seqz s2, a0
+; RV32I-NEXT:    mv a0, s1
+; RV32I-NEXT:    mv a1, s0
+; RV32I-NEXT:    call __unordsf2 at plt
+; RV32I-NEXT:    snez a0, a0
+; RV32I-NEXT:    or a0, a0, s2
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 0(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmp_ueq:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -32
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    mv s0, a1
+; RV64I-NEXT:    mv s1, a0
+; RV64I-NEXT:    call __eqsf2 at plt
+; RV64I-NEXT:    seqz s2, a0
+; RV64I-NEXT:    mv a0, s1
+; RV64I-NEXT:    mv a1, s0
+; RV64I-NEXT:    call __unordsf2 at plt
+; RV64I-NEXT:    snez a0, a0
+; RV64I-NEXT:    or a0, a0, s2
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 32
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ueq", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ugt(float %a, float %b) nounwind strictfp {
+; RV32IF-LABEL: fcmp_ugt:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    frflags a0
+; RV32IF-NEXT:    fle.s a1, fa0, fa1
+; RV32IF-NEXT:    fsflags a0
+; RV32IF-NEXT:    xori a0, a1, 1
+; RV32IF-NEXT:    feq.s zero, fa0, fa1
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fcmp_ugt:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    frflags a0
+; RV64IF-NEXT:    fle.s a1, fa0, fa1
+; RV64IF-NEXT:    fsflags a0
+; RV64IF-NEXT:    xori a0, a1, 1
+; RV64IF-NEXT:    feq.s zero, fa0, fa1
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: fcmp_ugt:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __lesf2 at plt
+; RV32I-NEXT:    sgtz a0, a0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmp_ugt:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __lesf2 at plt
+; RV64I-NEXT:    sgtz a0, a0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ugt", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_uge(float %a, float %b) nounwind strictfp {
+; RV32IF-LABEL: fcmp_uge:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    frflags a0
+; RV32IF-NEXT:    flt.s a1, fa0, fa1
+; RV32IF-NEXT:    fsflags a0
+; RV32IF-NEXT:    xori a0, a1, 1
+; RV32IF-NEXT:    feq.s zero, fa0, fa1
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fcmp_uge:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    frflags a0
+; RV64IF-NEXT:    flt.s a1, fa0, fa1
+; RV64IF-NEXT:    fsflags a0
+; RV64IF-NEXT:    xori a0, a1, 1
+; RV64IF-NEXT:    feq.s zero, fa0, fa1
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: fcmp_uge:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __ltsf2 at plt
+; RV32I-NEXT:    li a1, -1
+; RV32I-NEXT:    slt a0, a1, a0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmp_uge:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __ltsf2 at plt
+; RV64I-NEXT:    li a1, -1
+; RV64I-NEXT:    slt a0, a1, a0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"uge", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ult(float %a, float %b) nounwind strictfp {
+; RV32IF-LABEL: fcmp_ult:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    frflags a0
+; RV32IF-NEXT:    fle.s a1, fa1, fa0
+; RV32IF-NEXT:    fsflags a0
+; RV32IF-NEXT:    xori a0, a1, 1
+; RV32IF-NEXT:    feq.s zero, fa1, fa0
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fcmp_ult:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    frflags a0
+; RV64IF-NEXT:    fle.s a1, fa1, fa0
+; RV64IF-NEXT:    fsflags a0
+; RV64IF-NEXT:    xori a0, a1, 1
+; RV64IF-NEXT:    feq.s zero, fa1, fa0
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: fcmp_ult:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __gesf2 at plt
+; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmp_ult:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __gesf2 at plt
+; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ult", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ule(float %a, float %b) nounwind strictfp {
+; RV32IF-LABEL: fcmp_ule:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    frflags a0
+; RV32IF-NEXT:    flt.s a1, fa1, fa0
+; RV32IF-NEXT:    fsflags a0
+; RV32IF-NEXT:    xori a0, a1, 1
+; RV32IF-NEXT:    feq.s zero, fa1, fa0
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fcmp_ule:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    frflags a0
+; RV64IF-NEXT:    flt.s a1, fa1, fa0
+; RV64IF-NEXT:    fsflags a0
+; RV64IF-NEXT:    xori a0, a1, 1
+; RV64IF-NEXT:    feq.s zero, fa1, fa0
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: fcmp_ule:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __gtsf2 at plt
+; RV32I-NEXT:    slti a0, a0, 1
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmp_ule:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __gtsf2 at plt
+; RV64I-NEXT:    slti a0, a0, 1
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ule", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_une(float %a, float %b) nounwind strictfp {
+; RV32IF-LABEL: fcmp_une:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    feq.s a0, fa0, fa1
+; RV32IF-NEXT:    xori a0, a0, 1
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fcmp_une:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    feq.s a0, fa0, fa1
+; RV64IF-NEXT:    xori a0, a0, 1
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: fcmp_une:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __nesf2 at plt
+; RV32I-NEXT:    snez a0, a0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmp_une:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __nesf2 at plt
+; RV64I-NEXT:    snez a0, a0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"une", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_uno(float %a, float %b) nounwind strictfp {
+; RV32IF-LABEL: fcmp_uno:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    feq.s a0, fa1, fa1
+; RV32IF-NEXT:    feq.s a1, fa0, fa0
+; RV32IF-NEXT:    and a0, a1, a0
+; RV32IF-NEXT:    xori a0, a0, 1
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fcmp_uno:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    feq.s a0, fa1, fa1
+; RV64IF-NEXT:    feq.s a1, fa0, fa0
+; RV64IF-NEXT:    and a0, a1, a0
+; RV64IF-NEXT:    xori a0, a0, 1
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: fcmp_uno:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __unordsf2 at plt
+; RV32I-NEXT:    snez a0, a0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmp_uno:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __unordsf2 at plt
+; RV64I-NEXT:    snez a0, a0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"uno", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_oeq(float %a, float %b) nounwind strictfp {
+; RV32IF-LABEL: fcmps_oeq:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fle.s a0, fa1, fa0
+; RV32IF-NEXT:    fle.s a1, fa0, fa1
+; RV32IF-NEXT:    and a0, a1, a0
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fcmps_oeq:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fle.s a0, fa1, fa0
+; RV64IF-NEXT:    fle.s a1, fa0, fa1
+; RV64IF-NEXT:    and a0, a1, a0
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: fcmps_oeq:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __eqsf2 at plt
+; RV32I-NEXT:    seqz a0, a0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmps_oeq:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __eqsf2 at plt
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"oeq", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+declare i1 @llvm.experimental.constrained.fcmps.f32(float, float, metadata, metadata)
+
+define i32 @fcmps_ogt(float %a, float %b) nounwind strictfp {
+; RV32IF-LABEL: fcmps_ogt:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    flt.s a0, fa1, fa0
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fcmps_ogt:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    flt.s a0, fa1, fa0
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: fcmps_ogt:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __gtsf2 at plt
+; RV32I-NEXT:    sgtz a0, a0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmps_ogt:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __gtsf2 at plt
+; RV64I-NEXT:    sgtz a0, a0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ogt", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_oge(float %a, float %b) nounwind strictfp {
+; RV32IF-LABEL: fcmps_oge:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fle.s a0, fa1, fa0
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fcmps_oge:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fle.s a0, fa1, fa0
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: fcmps_oge:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __gesf2 at plt
+; RV32I-NEXT:    li a1, -1
+; RV32I-NEXT:    slt a0, a1, a0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmps_oge:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __gesf2 at plt
+; RV64I-NEXT:    li a1, -1
+; RV64I-NEXT:    slt a0, a1, a0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"oge", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_olt(float %a, float %b) nounwind strictfp {
+; RV32IF-LABEL: fcmps_olt:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    flt.s a0, fa0, fa1
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fcmps_olt:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    flt.s a0, fa0, fa1
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: fcmps_olt:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __ltsf2 at plt
+; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmps_olt:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __ltsf2 at plt
+; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"olt", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_ole(float %a, float %b) nounwind strictfp {
+; RV32IF-LABEL: fcmps_ole:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fle.s a0, fa0, fa1
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fcmps_ole:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fle.s a0, fa0, fa1
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: fcmps_ole:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __lesf2 at plt
+; RV32I-NEXT:    slti a0, a0, 1
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmps_ole:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __lesf2 at plt
+; RV64I-NEXT:    slti a0, a0, 1
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ole", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_one(float %a, float %b) nounwind strictfp {
+; RV32IF-LABEL: fcmps_one:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    flt.s a0, fa0, fa1
+; RV32IF-NEXT:    flt.s a1, fa1, fa0
+; RV32IF-NEXT:    or a0, a1, a0
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fcmps_one:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    flt.s a0, fa0, fa1
+; RV64IF-NEXT:    flt.s a1, fa1, fa0
+; RV64IF-NEXT:    or a0, a1, a0
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: fcmps_one:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    mv s0, a1
+; RV32I-NEXT:    mv s1, a0
+; RV32I-NEXT:    call __eqsf2 at plt
+; RV32I-NEXT:    snez s2, a0
+; RV32I-NEXT:    mv a0, s1
+; RV32I-NEXT:    mv a1, s0
+; RV32I-NEXT:    call __unordsf2 at plt
+; RV32I-NEXT:    seqz a0, a0
+; RV32I-NEXT:    and a0, a0, s2
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 0(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmps_one:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -32
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    mv s0, a1
+; RV64I-NEXT:    mv s1, a0
+; RV64I-NEXT:    call __eqsf2 at plt
+; RV64I-NEXT:    snez s2, a0
+; RV64I-NEXT:    mv a0, s1
+; RV64I-NEXT:    mv a1, s0
+; RV64I-NEXT:    call __unordsf2 at plt
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    and a0, a0, s2
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 32
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"one", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_ord(float %a, float %b) nounwind strictfp {
+; RV32IF-LABEL: fcmps_ord:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fle.s a0, fa1, fa1
+; RV32IF-NEXT:    fle.s a1, fa0, fa0
+; RV32IF-NEXT:    and a0, a1, a0
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fcmps_ord:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fle.s a0, fa1, fa1
+; RV64IF-NEXT:    fle.s a1, fa0, fa0
+; RV64IF-NEXT:    and a0, a1, a0
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: fcmps_ord:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __unordsf2 at plt
+; RV32I-NEXT:    seqz a0, a0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmps_ord:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __unordsf2 at plt
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ord", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_ueq(float %a, float %b) nounwind strictfp {
+; RV32IF-LABEL: fcmps_ueq:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    flt.s a0, fa0, fa1
+; RV32IF-NEXT:    flt.s a1, fa1, fa0
+; RV32IF-NEXT:    or a0, a1, a0
+; RV32IF-NEXT:    xori a0, a0, 1
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fcmps_ueq:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    flt.s a0, fa0, fa1
+; RV64IF-NEXT:    flt.s a1, fa1, fa0
+; RV64IF-NEXT:    or a0, a1, a0
+; RV64IF-NEXT:    xori a0, a0, 1
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: fcmps_ueq:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    mv s0, a1
+; RV32I-NEXT:    mv s1, a0
+; RV32I-NEXT:    call __eqsf2 at plt
+; RV32I-NEXT:    seqz s2, a0
+; RV32I-NEXT:    mv a0, s1
+; RV32I-NEXT:    mv a1, s0
+; RV32I-NEXT:    call __unordsf2 at plt
+; RV32I-NEXT:    snez a0, a0
+; RV32I-NEXT:    or a0, a0, s2
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 0(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmps_ueq:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -32
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    mv s0, a1
+; RV64I-NEXT:    mv s1, a0
+; RV64I-NEXT:    call __eqsf2 at plt
+; RV64I-NEXT:    seqz s2, a0
+; RV64I-NEXT:    mv a0, s1
+; RV64I-NEXT:    mv a1, s0
+; RV64I-NEXT:    call __unordsf2 at plt
+; RV64I-NEXT:    snez a0, a0
+; RV64I-NEXT:    or a0, a0, s2
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 32
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ueq", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_ugt(float %a, float %b) nounwind strictfp {
+; RV32IF-LABEL: fcmps_ugt:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fle.s a0, fa0, fa1
+; RV32IF-NEXT:    xori a0, a0, 1
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fcmps_ugt:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fle.s a0, fa0, fa1
+; RV64IF-NEXT:    xori a0, a0, 1
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: fcmps_ugt:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __lesf2 at plt
+; RV32I-NEXT:    sgtz a0, a0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmps_ugt:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __lesf2 at plt
+; RV64I-NEXT:    sgtz a0, a0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ugt", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_uge(float %a, float %b) nounwind strictfp {
+; RV32IF-LABEL: fcmps_uge:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    flt.s a0, fa0, fa1
+; RV32IF-NEXT:    xori a0, a0, 1
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fcmps_uge:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    flt.s a0, fa0, fa1
+; RV64IF-NEXT:    xori a0, a0, 1
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: fcmps_uge:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __ltsf2 at plt
+; RV32I-NEXT:    li a1, -1
+; RV32I-NEXT:    slt a0, a1, a0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmps_uge:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __ltsf2 at plt
+; RV64I-NEXT:    li a1, -1
+; RV64I-NEXT:    slt a0, a1, a0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"uge", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_ult(float %a, float %b) nounwind strictfp {
+; RV32IF-LABEL: fcmps_ult:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fle.s a0, fa1, fa0
+; RV32IF-NEXT:    xori a0, a0, 1
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fcmps_ult:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fle.s a0, fa1, fa0
+; RV64IF-NEXT:    xori a0, a0, 1
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: fcmps_ult:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __gesf2 at plt
+; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmps_ult:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __gesf2 at plt
+; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ult", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_ule(float %a, float %b) nounwind strictfp {
+; RV32IF-LABEL: fcmps_ule:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    flt.s a0, fa1, fa0
+; RV32IF-NEXT:    xori a0, a0, 1
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fcmps_ule:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    flt.s a0, fa1, fa0
+; RV64IF-NEXT:    xori a0, a0, 1
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: fcmps_ule:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __gtsf2 at plt
+; RV32I-NEXT:    slti a0, a0, 1
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmps_ule:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __gtsf2 at plt
+; RV64I-NEXT:    slti a0, a0, 1
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ule", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_une(float %a, float %b) nounwind strictfp {
+; RV32IF-LABEL: fcmps_une:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fle.s a0, fa1, fa0
+; RV32IF-NEXT:    fle.s a1, fa0, fa1
+; RV32IF-NEXT:    and a0, a1, a0
+; RV32IF-NEXT:    xori a0, a0, 1
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fcmps_une:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fle.s a0, fa1, fa0
+; RV64IF-NEXT:    fle.s a1, fa0, fa1
+; RV64IF-NEXT:    and a0, a1, a0
+; RV64IF-NEXT:    xori a0, a0, 1
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: fcmps_une:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __nesf2 at plt
+; RV32I-NEXT:    snez a0, a0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmps_une:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __nesf2 at plt
+; RV64I-NEXT:    snez a0, a0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"une", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_uno(float %a, float %b) nounwind strictfp {
+; RV32IF-LABEL: fcmps_uno:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fle.s a0, fa1, fa1
+; RV32IF-NEXT:    fle.s a1, fa0, fa0
+; RV32IF-NEXT:    and a0, a1, a0
+; RV32IF-NEXT:    xori a0, a0, 1
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fcmps_uno:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fle.s a0, fa1, fa1
+; RV64IF-NEXT:    fle.s a1, fa0, fa0
+; RV64IF-NEXT:    and a0, a1, a0
+; RV64IF-NEXT:    xori a0, a0, 1
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: fcmps_uno:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call __unordsf2 at plt
+; RV32I-NEXT:    snez a0, a0
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fcmps_uno:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call __unordsf2 at plt
+; RV64I-NEXT:    snez a0, a0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"uno", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}

diff  --git a/llvm/test/CodeGen/RISCV/half-fcmp-strict.ll b/llvm/test/CodeGen/RISCV/half-fcmp-strict.ll
new file mode 100644
index 0000000000000..5ca941479a4ad
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/half-fcmp-strict.ll
@@ -0,0 +1,573 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-zfh -verify-machineinstrs \
+; RUN:   -target-abi ilp32f -disable-strictnode-mutation < %s \
+; RUN:   | FileCheck -check-prefix=RV32IZFH %s
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-zfh -verify-machineinstrs \
+; RUN:   -target-abi lp64f -disable-strictnode-mutation < %s \
+; RUN:   | FileCheck -check-prefix=RV64IZFH %s
+
+define i32 @fcmp_oeq(half %a, half %b) nounwind strictfp {
+; RV32IZFH-LABEL: fcmp_oeq:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    feq.h a0, fa0, fa1
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: fcmp_oeq:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    feq.h a0, fa0, fa1
+; RV64IZFH-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"oeq", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+declare i1 @llvm.experimental.constrained.fcmp.f16(half, half, metadata, metadata)
+
+define i32 @fcmp_ogt(half %a, half %b) nounwind strictfp {
+; RV32IZFH-LABEL: fcmp_ogt:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    frflags a1
+; RV32IZFH-NEXT:    flt.h a0, fa1, fa0
+; RV32IZFH-NEXT:    fsflags a1
+; RV32IZFH-NEXT:    feq.h zero, fa1, fa0
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: fcmp_ogt:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    frflags a1
+; RV64IZFH-NEXT:    flt.h a0, fa1, fa0
+; RV64IZFH-NEXT:    fsflags a1
+; RV64IZFH-NEXT:    feq.h zero, fa1, fa0
+; RV64IZFH-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"ogt", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_oge(half %a, half %b) nounwind strictfp {
+; RV32IZFH-LABEL: fcmp_oge:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    frflags a1
+; RV32IZFH-NEXT:    fle.h a0, fa1, fa0
+; RV32IZFH-NEXT:    fsflags a1
+; RV32IZFH-NEXT:    feq.h zero, fa1, fa0
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: fcmp_oge:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    frflags a1
+; RV64IZFH-NEXT:    fle.h a0, fa1, fa0
+; RV64IZFH-NEXT:    fsflags a1
+; RV64IZFH-NEXT:    feq.h zero, fa1, fa0
+; RV64IZFH-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"oge", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_olt(half %a, half %b) nounwind strictfp {
+; RV32IZFH-LABEL: fcmp_olt:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    frflags a1
+; RV32IZFH-NEXT:    flt.h a0, fa0, fa1
+; RV32IZFH-NEXT:    fsflags a1
+; RV32IZFH-NEXT:    feq.h zero, fa0, fa1
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: fcmp_olt:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    frflags a1
+; RV64IZFH-NEXT:    flt.h a0, fa0, fa1
+; RV64IZFH-NEXT:    fsflags a1
+; RV64IZFH-NEXT:    feq.h zero, fa0, fa1
+; RV64IZFH-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"olt", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ole(half %a, half %b) nounwind strictfp {
+; RV32IZFH-LABEL: fcmp_ole:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    frflags a1
+; RV32IZFH-NEXT:    fle.h a0, fa0, fa1
+; RV32IZFH-NEXT:    fsflags a1
+; RV32IZFH-NEXT:    feq.h zero, fa0, fa1
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: fcmp_ole:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    frflags a1
+; RV64IZFH-NEXT:    fle.h a0, fa0, fa1
+; RV64IZFH-NEXT:    fsflags a1
+; RV64IZFH-NEXT:    feq.h zero, fa0, fa1
+; RV64IZFH-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"ole", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+; FIXME: We only need one frflags before the two flts and one fsflags after the
+; two flts.
+define i32 @fcmp_one(half %a, half %b) nounwind strictfp {
+; RV32IZFH-LABEL: fcmp_one:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    frflags a0
+; RV32IZFH-NEXT:    flt.h a1, fa0, fa1
+; RV32IZFH-NEXT:    fsflags a0
+; RV32IZFH-NEXT:    feq.h zero, fa0, fa1
+; RV32IZFH-NEXT:    frflags a0
+; RV32IZFH-NEXT:    flt.h a2, fa1, fa0
+; RV32IZFH-NEXT:    fsflags a0
+; RV32IZFH-NEXT:    or a0, a2, a1
+; RV32IZFH-NEXT:    feq.h zero, fa1, fa0
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: fcmp_one:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    frflags a0
+; RV64IZFH-NEXT:    flt.h a1, fa0, fa1
+; RV64IZFH-NEXT:    fsflags a0
+; RV64IZFH-NEXT:    feq.h zero, fa0, fa1
+; RV64IZFH-NEXT:    frflags a0
+; RV64IZFH-NEXT:    flt.h a2, fa1, fa0
+; RV64IZFH-NEXT:    fsflags a0
+; RV64IZFH-NEXT:    or a0, a2, a1
+; RV64IZFH-NEXT:    feq.h zero, fa1, fa0
+; RV64IZFH-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"one", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ord(half %a, half %b) nounwind strictfp {
+; RV32IZFH-LABEL: fcmp_ord:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    feq.h a0, fa1, fa1
+; RV32IZFH-NEXT:    feq.h a1, fa0, fa0
+; RV32IZFH-NEXT:    and a0, a1, a0
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: fcmp_ord:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    feq.h a0, fa1, fa1
+; RV64IZFH-NEXT:    feq.h a1, fa0, fa0
+; RV64IZFH-NEXT:    and a0, a1, a0
+; RV64IZFH-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"ord", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+; FIXME: We only need one frflags before the two flts and one fsflags after the
+; two flts.
+define i32 @fcmp_ueq(half %a, half %b) nounwind strictfp {
+; RV32IZFH-LABEL: fcmp_ueq:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    frflags a0
+; RV32IZFH-NEXT:    flt.h a1, fa0, fa1
+; RV32IZFH-NEXT:    fsflags a0
+; RV32IZFH-NEXT:    feq.h zero, fa0, fa1
+; RV32IZFH-NEXT:    frflags a0
+; RV32IZFH-NEXT:    flt.h a2, fa1, fa0
+; RV32IZFH-NEXT:    fsflags a0
+; RV32IZFH-NEXT:    or a0, a2, a1
+; RV32IZFH-NEXT:    xori a0, a0, 1
+; RV32IZFH-NEXT:    feq.h zero, fa1, fa0
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: fcmp_ueq:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    frflags a0
+; RV64IZFH-NEXT:    flt.h a1, fa0, fa1
+; RV64IZFH-NEXT:    fsflags a0
+; RV64IZFH-NEXT:    feq.h zero, fa0, fa1
+; RV64IZFH-NEXT:    frflags a0
+; RV64IZFH-NEXT:    flt.h a2, fa1, fa0
+; RV64IZFH-NEXT:    fsflags a0
+; RV64IZFH-NEXT:    or a0, a2, a1
+; RV64IZFH-NEXT:    xori a0, a0, 1
+; RV64IZFH-NEXT:    feq.h zero, fa1, fa0
+; RV64IZFH-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"ueq", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ugt(half %a, half %b) nounwind strictfp {
+; RV32IZFH-LABEL: fcmp_ugt:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    frflags a0
+; RV32IZFH-NEXT:    fle.h a1, fa0, fa1
+; RV32IZFH-NEXT:    fsflags a0
+; RV32IZFH-NEXT:    xori a0, a1, 1
+; RV32IZFH-NEXT:    feq.h zero, fa0, fa1
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: fcmp_ugt:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    frflags a0
+; RV64IZFH-NEXT:    fle.h a1, fa0, fa1
+; RV64IZFH-NEXT:    fsflags a0
+; RV64IZFH-NEXT:    xori a0, a1, 1
+; RV64IZFH-NEXT:    feq.h zero, fa0, fa1
+; RV64IZFH-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"ugt", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_uge(half %a, half %b) nounwind strictfp {
+; RV32IZFH-LABEL: fcmp_uge:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    frflags a0
+; RV32IZFH-NEXT:    flt.h a1, fa0, fa1
+; RV32IZFH-NEXT:    fsflags a0
+; RV32IZFH-NEXT:    xori a0, a1, 1
+; RV32IZFH-NEXT:    feq.h zero, fa0, fa1
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: fcmp_uge:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    frflags a0
+; RV64IZFH-NEXT:    flt.h a1, fa0, fa1
+; RV64IZFH-NEXT:    fsflags a0
+; RV64IZFH-NEXT:    xori a0, a1, 1
+; RV64IZFH-NEXT:    feq.h zero, fa0, fa1
+; RV64IZFH-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"uge", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ult(half %a, half %b) nounwind strictfp {
+; RV32IZFH-LABEL: fcmp_ult:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    frflags a0
+; RV32IZFH-NEXT:    fle.h a1, fa1, fa0
+; RV32IZFH-NEXT:    fsflags a0
+; RV32IZFH-NEXT:    xori a0, a1, 1
+; RV32IZFH-NEXT:    feq.h zero, fa1, fa0
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: fcmp_ult:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    frflags a0
+; RV64IZFH-NEXT:    fle.h a1, fa1, fa0
+; RV64IZFH-NEXT:    fsflags a0
+; RV64IZFH-NEXT:    xori a0, a1, 1
+; RV64IZFH-NEXT:    feq.h zero, fa1, fa0
+; RV64IZFH-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"ult", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ule(half %a, half %b) nounwind strictfp {
+; RV32IZFH-LABEL: fcmp_ule:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    frflags a0
+; RV32IZFH-NEXT:    flt.h a1, fa1, fa0
+; RV32IZFH-NEXT:    fsflags a0
+; RV32IZFH-NEXT:    xori a0, a1, 1
+; RV32IZFH-NEXT:    feq.h zero, fa1, fa0
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: fcmp_ule:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    frflags a0
+; RV64IZFH-NEXT:    flt.h a1, fa1, fa0
+; RV64IZFH-NEXT:    fsflags a0
+; RV64IZFH-NEXT:    xori a0, a1, 1
+; RV64IZFH-NEXT:    feq.h zero, fa1, fa0
+; RV64IZFH-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"ule", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_une(half %a, half %b) nounwind strictfp {
+; RV32IZFH-LABEL: fcmp_une:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    feq.h a0, fa0, fa1
+; RV32IZFH-NEXT:    xori a0, a0, 1
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: fcmp_une:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    feq.h a0, fa0, fa1
+; RV64IZFH-NEXT:    xori a0, a0, 1
+; RV64IZFH-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"une", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_uno(half %a, half %b) nounwind strictfp {
+; RV32IZFH-LABEL: fcmp_uno:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    feq.h a0, fa1, fa1
+; RV32IZFH-NEXT:    feq.h a1, fa0, fa0
+; RV32IZFH-NEXT:    and a0, a1, a0
+; RV32IZFH-NEXT:    xori a0, a0, 1
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: fcmp_uno:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    feq.h a0, fa1, fa1
+; RV64IZFH-NEXT:    feq.h a1, fa0, fa0
+; RV64IZFH-NEXT:    and a0, a1, a0
+; RV64IZFH-NEXT:    xori a0, a0, 1
+; RV64IZFH-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"uno", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_oeq(half %a, half %b) nounwind strictfp {
+; RV32IZFH-LABEL: fcmps_oeq:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fle.h a0, fa1, fa0
+; RV32IZFH-NEXT:    fle.h a1, fa0, fa1
+; RV32IZFH-NEXT:    and a0, a1, a0
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: fcmps_oeq:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fle.h a0, fa1, fa0
+; RV64IZFH-NEXT:    fle.h a1, fa0, fa1
+; RV64IZFH-NEXT:    and a0, a1, a0
+; RV64IZFH-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"oeq", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+declare i1 @llvm.experimental.constrained.fcmps.f16(half, half, metadata, metadata)
+
+define i32 @fcmps_ogt(half %a, half %b) nounwind strictfp {
+; RV32IZFH-LABEL: fcmps_ogt:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    flt.h a0, fa1, fa0
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: fcmps_ogt:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    flt.h a0, fa1, fa0
+; RV64IZFH-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"ogt", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_oge(half %a, half %b) nounwind strictfp {
+; RV32IZFH-LABEL: fcmps_oge:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fle.h a0, fa1, fa0
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: fcmps_oge:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fle.h a0, fa1, fa0
+; RV64IZFH-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"oge", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_olt(half %a, half %b) nounwind strictfp {
+; RV32IZFH-LABEL: fcmps_olt:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    flt.h a0, fa0, fa1
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: fcmps_olt:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    flt.h a0, fa0, fa1
+; RV64IZFH-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"olt", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_ole(half %a, half %b) nounwind strictfp {
+; RV32IZFH-LABEL: fcmps_ole:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fle.h a0, fa0, fa1
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: fcmps_ole:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fle.h a0, fa0, fa1
+; RV64IZFH-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"ole", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_one(half %a, half %b) nounwind strictfp {
+; RV32IZFH-LABEL: fcmps_one:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    flt.h a0, fa0, fa1
+; RV32IZFH-NEXT:    flt.h a1, fa1, fa0
+; RV32IZFH-NEXT:    or a0, a1, a0
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: fcmps_one:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    flt.h a0, fa0, fa1
+; RV64IZFH-NEXT:    flt.h a1, fa1, fa0
+; RV64IZFH-NEXT:    or a0, a1, a0
+; RV64IZFH-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"one", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_ord(half %a, half %b) nounwind strictfp {
+; RV32IZFH-LABEL: fcmps_ord:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fle.h a0, fa1, fa1
+; RV32IZFH-NEXT:    fle.h a1, fa0, fa0
+; RV32IZFH-NEXT:    and a0, a1, a0
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: fcmps_ord:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fle.h a0, fa1, fa1
+; RV64IZFH-NEXT:    fle.h a1, fa0, fa0
+; RV64IZFH-NEXT:    and a0, a1, a0
+; RV64IZFH-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"ord", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_ueq(half %a, half %b) nounwind strictfp {
+; RV32IZFH-LABEL: fcmps_ueq:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    flt.h a0, fa0, fa1
+; RV32IZFH-NEXT:    flt.h a1, fa1, fa0
+; RV32IZFH-NEXT:    or a0, a1, a0
+; RV32IZFH-NEXT:    xori a0, a0, 1
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: fcmps_ueq:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    flt.h a0, fa0, fa1
+; RV64IZFH-NEXT:    flt.h a1, fa1, fa0
+; RV64IZFH-NEXT:    or a0, a1, a0
+; RV64IZFH-NEXT:    xori a0, a0, 1
+; RV64IZFH-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"ueq", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_ugt(half %a, half %b) nounwind strictfp {
+; RV32IZFH-LABEL: fcmps_ugt:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fle.h a0, fa0, fa1
+; RV32IZFH-NEXT:    xori a0, a0, 1
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: fcmps_ugt:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fle.h a0, fa0, fa1
+; RV64IZFH-NEXT:    xori a0, a0, 1
+; RV64IZFH-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"ugt", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_uge(half %a, half %b) nounwind strictfp {
+; RV32IZFH-LABEL: fcmps_uge:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    flt.h a0, fa0, fa1
+; RV32IZFH-NEXT:    xori a0, a0, 1
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: fcmps_uge:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    flt.h a0, fa0, fa1
+; RV64IZFH-NEXT:    xori a0, a0, 1
+; RV64IZFH-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"uge", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_ult(half %a, half %b) nounwind strictfp {
+; RV32IZFH-LABEL: fcmps_ult:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fle.h a0, fa1, fa0
+; RV32IZFH-NEXT:    xori a0, a0, 1
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: fcmps_ult:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fle.h a0, fa1, fa0
+; RV64IZFH-NEXT:    xori a0, a0, 1
+; RV64IZFH-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"ult", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_ule(half %a, half %b) nounwind strictfp {
+; RV32IZFH-LABEL: fcmps_ule:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    flt.h a0, fa1, fa0
+; RV32IZFH-NEXT:    xori a0, a0, 1
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: fcmps_ule:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    flt.h a0, fa1, fa0
+; RV64IZFH-NEXT:    xori a0, a0, 1
+; RV64IZFH-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"ule", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_une(half %a, half %b) nounwind strictfp {
+; RV32IZFH-LABEL: fcmps_une:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fle.h a0, fa1, fa0
+; RV32IZFH-NEXT:    fle.h a1, fa0, fa1
+; RV32IZFH-NEXT:    and a0, a1, a0
+; RV32IZFH-NEXT:    xori a0, a0, 1
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: fcmps_une:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fle.h a0, fa1, fa0
+; RV64IZFH-NEXT:    fle.h a1, fa0, fa1
+; RV64IZFH-NEXT:    and a0, a1, a0
+; RV64IZFH-NEXT:    xori a0, a0, 1
+; RV64IZFH-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"une", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_uno(half %a, half %b) nounwind strictfp {
+; RV32IZFH-LABEL: fcmps_uno:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fle.h a0, fa1, fa1
+; RV32IZFH-NEXT:    fle.h a1, fa0, fa0
+; RV32IZFH-NEXT:    and a0, a1, a0
+; RV32IZFH-NEXT:    xori a0, a0, 1
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: fcmps_uno:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fle.h a0, fa1, fa1
+; RV64IZFH-NEXT:    fle.h a1, fa0, fa0
+; RV64IZFH-NEXT:    and a0, a1, a0
+; RV64IZFH-NEXT:    xori a0, a0, 1
+; RV64IZFH-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"uno", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}


        


More information about the llvm-commits mailing list