[llvm] a6d699b - [LoongArch] Add codegen support for strict_fsetccs and any_fsetcc

Weining Lu via llvm-commits llvm-commits at lists.llvm.org
Sun Sep 25 22:06:48 PDT 2022


Author: gonglingqin
Date: 2022-09-26T13:05:36+08:00
New Revision: a6d699b55df3ca83b25fd0c0898dd51bb4b61e9c

URL: https://github.com/llvm/llvm-project/commit/a6d699b55df3ca83b25fd0c0898dd51bb4b61e9c
DIFF: https://github.com/llvm/llvm-project/commit/a6d699b55df3ca83b25fd0c0898dd51bb4b61e9c.diff

LOG: [LoongArch] Add codegen support for strict_fsetccs and any_fsetcc

Differential Revision: https://reviews.llvm.org/D134274

Added: 
    llvm/test/CodeGen/LoongArch/double-fcmp-strict.ll
    llvm/test/CodeGen/LoongArch/double-fcmps-strict.ll
    llvm/test/CodeGen/LoongArch/float-fcmp-strict.ll
    llvm/test/CodeGen/LoongArch/float-fcmps-strict.ll

Modified: 
    llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td
    llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td
    llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td
index d9993e0a616f..9a901593c523 100644
--- a/llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td
@@ -157,11 +157,8 @@ def : Pat<(fcanonicalize FPR32:$fj), (FMAX_S $fj, $fj)>;
 
 // Match non-signaling comparison
 
-// TODO: change setcc to any_fsetcc after call is supported because
-// we need to call llvm.experimental.constrained.fcmp.f32 in testcase.
-// See RISCV float-fcmp-strict.ll for reference.
 class PatFPSetcc<CondCode cc, LAInst CmpInst, RegisterClass RegTy>
-    : Pat<(setcc RegTy:$fj, RegTy:$fk, cc),
+    : Pat<(any_fsetcc RegTy:$fj, RegTy:$fk, cc),
           (MOVCF2GR (CmpInst RegTy:$fj, RegTy:$fk))>;
 // SETOGT/SETOGE/SETUGT/SETUGE will expand into SETOLT/SETOLE/SETULT/SETULE.
 def : PatFPSetcc<SETOEQ, FCMP_CEQ_S,  FPR32>;
@@ -196,7 +193,22 @@ defm : PatFPBrcond<SETUNE, FCMP_CUNE_S, FPR32>;
 defm : PatFPBrcond<SETUO,  FCMP_CUN_S, FPR32>;
 defm : PatFPBrcond<SETLT,  FCMP_CLT_S, FPR32>;
 
-// TODO: Match signaling comparison strict_fsetccs with FCMP_S*_S instructions.
+// Match signaling comparison
+
+class PatStrictFsetccs<CondCode cc, LAInst CmpInst, RegisterClass RegTy>
+    : Pat<(strict_fsetccs RegTy:$fj, RegTy:$fk, cc),
+          (MOVCF2GR (CmpInst RegTy:$fj, RegTy:$fk))>;
+def : PatStrictFsetccs<SETOEQ, FCMP_SEQ_S,  FPR32>;
+def : PatStrictFsetccs<SETOLT, FCMP_SLT_S,  FPR32>;
+def : PatStrictFsetccs<SETOLE, FCMP_SLE_S,  FPR32>;
+def : PatStrictFsetccs<SETONE, FCMP_SNE_S,  FPR32>;
+def : PatStrictFsetccs<SETO,   FCMP_SOR_S,  FPR32>;
+def : PatStrictFsetccs<SETUEQ, FCMP_SUEQ_S, FPR32>;
+def : PatStrictFsetccs<SETULT, FCMP_SULT_S, FPR32>;
+def : PatStrictFsetccs<SETULE, FCMP_SULE_S, FPR32>;
+def : PatStrictFsetccs<SETUNE, FCMP_SUNE_S, FPR32>;
+def : PatStrictFsetccs<SETUO,  FCMP_SUN_S,  FPR32>;
+def : PatStrictFsetccs<SETLT,  FCMP_SLT_S,  FPR32>;
 
 /// Select
 

diff  --git a/llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td
index 42a02514ffac..9fb9b99d32f3 100644
--- a/llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td
@@ -168,10 +168,6 @@ def : Pat<(fcanonicalize FPR64:$fj), (FMAX_D $fj, $fj)>;
 
 // Match non-signaling comparison
 
-// TODO: Change setcc to any_fsetcc after call is supported because
-// we need to call llvm.experimental.constrained.fcmp.f64 in testcase.
-// See RISCV float-fcmp-strict.ll for reference.
-
 // SETOGT/SETOGE/SETUGT/SETUGE will expand into SETOLT/SETOLE/SETULT/SETULE.
 def : PatFPSetcc<SETOEQ, FCMP_CEQ_D,  FPR64>;
 def : PatFPSetcc<SETOLT, FCMP_CLT_D,  FPR64>;
@@ -197,7 +193,19 @@ defm : PatFPBrcond<SETUNE, FCMP_CUNE_D, FPR64>;
 defm : PatFPBrcond<SETUO,  FCMP_CUN_D, FPR64>;
 defm : PatFPBrcond<SETLT,  FCMP_CLT_D, FPR64>;
 
-// TODO: Match signaling comparison strict_fsetccs with FCMP_S*_D instructions.
+// Match signaling comparison
+
+def : PatStrictFsetccs<SETOEQ, FCMP_SEQ_D,  FPR64>;
+def : PatStrictFsetccs<SETOLT, FCMP_SLT_D,  FPR64>;
+def : PatStrictFsetccs<SETOLE, FCMP_SLE_D,  FPR64>;
+def : PatStrictFsetccs<SETONE, FCMP_SNE_D,  FPR64>;
+def : PatStrictFsetccs<SETO,   FCMP_SOR_D,  FPR64>;
+def : PatStrictFsetccs<SETUEQ, FCMP_SUEQ_D, FPR64>;
+def : PatStrictFsetccs<SETULT, FCMP_SULT_D, FPR64>;
+def : PatStrictFsetccs<SETULE, FCMP_SULE_D, FPR64>;
+def : PatStrictFsetccs<SETUNE, FCMP_SUNE_D, FPR64>;
+def : PatStrictFsetccs<SETUO,  FCMP_SUN_D,  FPR64>;
+def : PatStrictFsetccs<SETLT,  FCMP_SLT_D,  FPR64>;
 
 /// Select
 

diff  --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 920a9da58b85..c2b1b443444e 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -118,11 +118,15 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
     setOperationAction(ISD::FMA, MVT::f32, Legal);
     setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal);
     setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal);
+    setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Legal);
+    setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Legal);
   }
   if (Subtarget.hasBasicD()) {
     setCondCodeAction(FPCCToExpand, MVT::f64, Expand);
     setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
     setOperationAction(ISD::BR_CC, MVT::f64, Expand);
+    setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Legal);
+    setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Legal);
     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
     setOperationAction(ISD::FMA, MVT::f64, Legal);

diff  --git a/llvm/test/CodeGen/LoongArch/double-fcmp-strict.ll b/llvm/test/CodeGen/LoongArch/double-fcmp-strict.ll
new file mode 100644
index 000000000000..066f60752e2a
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/double-fcmp-strict.ll
@@ -0,0 +1,243 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64
+
+declare i1 @llvm.experimental.constrained.fcmp.f64(double, double, metadata, metadata)
+
+define i32 @fcmp_oeq(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmp_oeq:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.ceq.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_oeq:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.ceq.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ogt(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmp_ogt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.clt.d $fcc0, $fa1, $fa0
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_ogt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.clt.d $fcc0, $fa1, $fa0
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ogt", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_oge(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmp_oge:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cle.d $fcc0, $fa1, $fa0
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_oge:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cle.d $fcc0, $fa1, $fa0
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oge", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_olt(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmp_olt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.clt.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_olt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.clt.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"olt", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ole(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmp_ole:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cle.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_ole:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cle.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ole", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_one(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmp_one:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cne.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_one:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cne.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"one", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ord(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmp_ord:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cor.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_ord:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cor.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ord", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ueq(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmp_ueq:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cueq.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_ueq:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cueq.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ueq", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ugt(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmp_ugt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cult.d $fcc0, $fa1, $fa0
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_ugt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cult.d $fcc0, $fa1, $fa0
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ugt", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_uge(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmp_uge:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cule.d $fcc0, $fa1, $fa0
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_uge:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cule.d $fcc0, $fa1, $fa0
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"uge", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ult(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmp_ult:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cult.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_ult:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cult.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ult", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ule(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmp_ule:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cule.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_ule:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cule.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ule", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_une(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmp_une:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cune.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_une:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cune.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"une", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_uno(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmp_uno:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cun.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_uno:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cun.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"uno", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}

diff  --git a/llvm/test/CodeGen/LoongArch/double-fcmps-strict.ll b/llvm/test/CodeGen/LoongArch/double-fcmps-strict.ll
new file mode 100644
index 000000000000..c8974fb94622
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/double-fcmps-strict.ll
@@ -0,0 +1,482 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64
+
+declare i1 @llvm.experimental.constrained.fcmps.f64(double, double, metadata, metadata)
+declare i1 @llvm.experimental.constrained.fcmp.f64(double, double, metadata, metadata)
+
+define i32 @fcmps_oeq(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmps_oeq:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.seq.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmps_oeq:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.seq.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_ogt(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmps_ogt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.slt.d $fcc0, $fa1, $fa0
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmps_ogt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.slt.d $fcc0, $fa1, $fa0
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ogt", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_oge(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmps_oge:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.sle.d $fcc0, $fa1, $fa0
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmps_oge:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.sle.d $fcc0, $fa1, $fa0
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oge", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_olt(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmps_olt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.slt.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmps_olt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.slt.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"olt", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_ole(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmps_ole:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.sle.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmps_ole:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.sle.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ole", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_one(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmps_one:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.sne.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmps_one:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.sne.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"one", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_ord(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmps_ord:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.sor.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmps_ord:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.sor.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ord", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_ueq(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmps_ueq:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.sueq.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmps_ueq:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.sueq.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ueq", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_ugt(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmps_ugt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.sult.d $fcc0, $fa1, $fa0
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmps_ugt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.sult.d $fcc0, $fa1, $fa0
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ugt", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_uge(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmps_uge:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.sule.d $fcc0, $fa1, $fa0
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmps_uge:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.sule.d $fcc0, $fa1, $fa0
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"uge", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_ult(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmps_ult:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.sult.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmps_ult:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.sult.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ult", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_ule(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmps_ule:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.sule.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmps_ule:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.sule.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ule", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_une(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmps_une:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.sune.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmps_une:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.sune.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"une", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_uno(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmps_uno:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.sun.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmps_uno:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.sun.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"uno", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_oeq(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmp_oeq:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.ceq.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_oeq:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.ceq.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ogt(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmp_ogt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.clt.d $fcc0, $fa1, $fa0
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_ogt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.clt.d $fcc0, $fa1, $fa0
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ogt", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_oge(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmp_oge:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cle.d $fcc0, $fa1, $fa0
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_oge:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cle.d $fcc0, $fa1, $fa0
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oge", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_olt(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmp_olt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.clt.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_olt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.clt.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"olt", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ole(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmp_ole:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cle.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_ole:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cle.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ole", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_one(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmp_one:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cne.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_one:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cne.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"one", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ord(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmp_ord:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cor.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_ord:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cor.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ord", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ueq(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmp_ueq:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cueq.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_ueq:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cueq.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ueq", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ugt(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmp_ugt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cult.d $fcc0, $fa1, $fa0
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_ugt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cult.d $fcc0, $fa1, $fa0
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ugt", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_uge(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmp_uge:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cule.d $fcc0, $fa1, $fa0
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_uge:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cule.d $fcc0, $fa1, $fa0
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"uge", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ult(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmp_ult:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cult.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_ult:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cult.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ult", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ule(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmp_ule:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cule.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_ule:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cule.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ule", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_une(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmp_une:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cune.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_une:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cune.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"une", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_uno(double %a, double %b) nounwind strictfp {
+; LA32-LABEL: fcmp_uno:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cun.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_uno:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cun.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"uno", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}

diff  --git a/llvm/test/CodeGen/LoongArch/float-fcmp-strict.ll b/llvm/test/CodeGen/LoongArch/float-fcmp-strict.ll
new file mode 100644
index 000000000000..0459d5019378
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/float-fcmp-strict.ll
@@ -0,0 +1,243 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA64
+
+declare i1 @llvm.experimental.constrained.fcmp.f32(float, float, metadata, metadata)
+
+define i32 @fcmp_oeq(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmp_oeq:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.ceq.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_oeq:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.ceq.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"oeq", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ogt(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmp_ogt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.clt.s $fcc0, $fa1, $fa0
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_ogt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.clt.s $fcc0, $fa1, $fa0
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ogt", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_oge(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmp_oge:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cle.s $fcc0, $fa1, $fa0
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_oge:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cle.s $fcc0, $fa1, $fa0
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"oge", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_olt(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmp_olt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.clt.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_olt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.clt.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"olt", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ole(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmp_ole:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cle.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_ole:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cle.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ole", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_one(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmp_one:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cne.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_one:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cne.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"one", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ord(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmp_ord:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cor.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_ord:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cor.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ord", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ueq(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmp_ueq:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cueq.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_ueq:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cueq.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ueq", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ugt(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmp_ugt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cult.s $fcc0, $fa1, $fa0
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_ugt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cult.s $fcc0, $fa1, $fa0
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ugt", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_uge(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmp_uge:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cule.s $fcc0, $fa1, $fa0
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_uge:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cule.s $fcc0, $fa1, $fa0
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"uge", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ult(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmp_ult:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cult.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_ult:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cult.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ult", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ule(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmp_ule:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cule.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_ule:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cule.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ule", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_une(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmp_une:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cune.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_une:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cune.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"une", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_uno(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmp_uno:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cun.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_uno:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cun.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"uno", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}

diff  --git a/llvm/test/CodeGen/LoongArch/float-fcmps-strict.ll b/llvm/test/CodeGen/LoongArch/float-fcmps-strict.ll
new file mode 100644
index 000000000000..cad4d45c147e
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/float-fcmps-strict.ll
@@ -0,0 +1,482 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA64
+
+declare i1 @llvm.experimental.constrained.fcmps.f32(float, float, metadata, metadata)
+declare i1 @llvm.experimental.constrained.fcmp.f32(float, float, metadata, metadata)
+
+define i32 @fcmps_oeq(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmps_oeq:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.seq.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmps_oeq:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.seq.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"oeq", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_ogt(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmps_ogt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.slt.s $fcc0, $fa1, $fa0
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmps_ogt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.slt.s $fcc0, $fa1, $fa0
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ogt", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_oge(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmps_oge:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.sle.s $fcc0, $fa1, $fa0
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmps_oge:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.sle.s $fcc0, $fa1, $fa0
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"oge", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_olt(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmps_olt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.slt.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmps_olt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.slt.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"olt", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_ole(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmps_ole:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.sle.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmps_ole:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.sle.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ole", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_one(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmps_one:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.sne.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmps_one:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.sne.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"one", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_ord(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmps_ord:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.sor.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmps_ord:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.sor.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ord", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_ueq(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmps_ueq:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.sueq.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmps_ueq:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.sueq.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ueq", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_ugt(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmps_ugt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.sult.s $fcc0, $fa1, $fa0
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmps_ugt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.sult.s $fcc0, $fa1, $fa0
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ugt", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_uge(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmps_uge:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.sule.s $fcc0, $fa1, $fa0
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmps_uge:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.sule.s $fcc0, $fa1, $fa0
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"uge", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_ult(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmps_ult:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.sult.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmps_ult:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.sult.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ult", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_ule(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmps_ule:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.sule.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmps_ule:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.sule.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ule", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_une(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmps_une:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.sune.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmps_une:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.sune.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"une", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmps_uno(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmps_uno:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.sun.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmps_uno:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.sun.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"uno", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_oeq(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmp_oeq:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.ceq.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_oeq:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.ceq.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"oeq", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ogt(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmp_ogt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.clt.s $fcc0, $fa1, $fa0
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_ogt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.clt.s $fcc0, $fa1, $fa0
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ogt", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_oge(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmp_oge:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cle.s $fcc0, $fa1, $fa0
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_oge:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cle.s $fcc0, $fa1, $fa0
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"oge", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_olt(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmp_olt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.clt.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_olt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.clt.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"olt", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ole(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmp_ole:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cle.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_ole:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cle.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ole", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_one(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmp_one:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cne.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_one:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cne.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"one", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ord(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmp_ord:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cor.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_ord:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cor.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ord", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ueq(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmp_ueq:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cueq.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_ueq:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cueq.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ueq", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ugt(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmp_ugt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cult.s $fcc0, $fa1, $fa0
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_ugt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cult.s $fcc0, $fa1, $fa0
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ugt", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_uge(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmp_uge:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cule.s $fcc0, $fa1, $fa0
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_uge:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cule.s $fcc0, $fa1, $fa0
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"uge", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ult(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmp_ult:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cult.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_ult:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cult.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ult", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_ule(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmp_ule:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cule.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_ule:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cule.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ule", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_une(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmp_une:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cune.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_une:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cune.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"une", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fcmp_uno(float %a, float %b) nounwind strictfp {
+; LA32-LABEL: fcmp_uno:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cun.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: fcmp_uno:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cun.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    ret
+  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"uno", metadata !"fpexcept.strict") strictfp
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}


        


More information about the llvm-commits mailing list