[llvm] [Mips] Add instruction selection for strict FP (PR #168870)

Erik Enikeev via llvm-commits llvm-commits at lists.llvm.org
Thu Nov 20 04:47:48 PST 2025


https://github.com/Varnike created https://github.com/llvm/llvm-project/pull/168870

This consists of marking the various strict opcodes as legal, and adjusting instruction selection patterns so that 'op' is 'any_op'. The changes are similar to those in D114946 for AArch64 and #160696 for ARM. Only Mips32/64 FPU instructions are affected.

Added lowering for for STRICT_FP_TO_UINT and STRICT_FP_TO_SINT ops.

>From 1c485a28a81214fac476d09cbded390751d9bb88 Mon Sep 17 00:00:00 2001
From: Erik Enikeev <evonatarius at gmail.com>
Date: Thu, 20 Nov 2025 15:43:25 +0300
Subject: [PATCH] [Mips] Add instruction selection for strict FP

---
 llvm/lib/Target/Mips/MipsISelLowering.cpp   |  22 +
 llvm/lib/Target/Mips/MipsISelLowering.h     |   1 +
 llvm/lib/Target/Mips/MipsInstrFPU.td        |  71 +-
 llvm/lib/Target/Mips/MipsSEISelLowering.cpp |   6 +
 llvm/test/CodeGen/Mips/fp-intrinsics.ll     | 734 ++++++++++++++++++++
 5 files changed, 798 insertions(+), 36 deletions(-)
 create mode 100644 llvm/test/CodeGen/Mips/fp-intrinsics.ll

diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp
index 2fd73275721b1..3b4c481980e52 100644
--- a/llvm/lib/Target/Mips/MipsISelLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp
@@ -356,6 +356,8 @@ MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM,
   setOperationAction(ISD::FCOPYSIGN,          MVT::f32,   Custom);
   setOperationAction(ISD::FCOPYSIGN,          MVT::f64,   Custom);
   setOperationAction(ISD::FP_TO_SINT,         MVT::i32,   Custom);
+  setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
+  setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
 
   if (Subtarget.hasMips32r2() ||
       getTargetMachine().getTargetTriple().isOSLinux())
@@ -395,6 +397,8 @@ MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM,
       setOperationAction(ISD::STORE,              MVT::i64,   Custom);
     }
     setOperationAction(ISD::FP_TO_SINT,         MVT::i64,   Custom);
+    setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom);
+    setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
     setOperationAction(ISD::SHL_PARTS,          MVT::i64,   Custom);
     setOperationAction(ISD::SRA_PARTS,          MVT::i64,   Custom);
     setOperationAction(ISD::SRL_PARTS,          MVT::i64,   Custom);
@@ -433,6 +437,7 @@ MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM,
   setOperationAction(ISD::FP_TO_UINT,        MVT::i32,   Expand);
   setOperationAction(ISD::FP_TO_UINT,        MVT::i64,   Expand);
   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1,    Expand);
+
   if (Subtarget.hasCnMips()) {
     setOperationAction(ISD::CTPOP,           MVT::i32,   Legal);
     setOperationAction(ISD::CTPOP,           MVT::i64,   Legal);
@@ -1354,6 +1359,9 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const
   case ISD::LOAD:               return lowerLOAD(Op, DAG);
   case ISD::STORE:              return lowerSTORE(Op, DAG);
   case ISD::EH_DWARF_CFA:       return lowerEH_DWARF_CFA(Op, DAG);
+  case ISD::STRICT_FP_TO_SINT:
+  case ISD::STRICT_FP_TO_UINT:
+    return lowerSTRICT_FP_TO_INT(Op, DAG);
   case ISD::FP_TO_SINT:         return lowerFP_TO_SINT(Op, DAG);
   case ISD::READCYCLECOUNTER:
     return lowerREADCYCLECOUNTER(Op, DAG);
@@ -3011,6 +3019,20 @@ SDValue MipsTargetLowering::lowerFP_TO_SINT(SDValue Op,
   return DAG.getNode(ISD::BITCAST, SDLoc(Op), Op.getValueType(), Trunc);
 }
 
+SDValue MipsTargetLowering::lowerSTRICT_FP_TO_INT(SDValue Op,
+                                                  SelectionDAG &DAG) const {
+  assert(Op->isStrictFPOpcode());
+  SDValue SrcVal = Op.getOperand(1);
+  SDLoc Loc(Op);
+
+  SDValue Result =
+      DAG.getNode(Op.getOpcode() == ISD::STRICT_FP_TO_SINT ? ISD::FP_TO_SINT
+                                                           : ISD::FP_TO_UINT,
+                  Loc, Op.getValueType(), SrcVal);
+
+  return DAG.getMergeValues({Result, Op.getOperand(0)}, Loc);
+}
+
 //===----------------------------------------------------------------------===//
 //                      Calling Convention Implementation
 //===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/Mips/MipsISelLowering.h b/llvm/lib/Target/Mips/MipsISelLowering.h
index 25a0bf9b797d5..fb6b671ad6035 100644
--- a/llvm/lib/Target/Mips/MipsISelLowering.h
+++ b/llvm/lib/Target/Mips/MipsISelLowering.h
@@ -590,6 +590,7 @@ class TargetRegisterClass;
                                  bool IsSRA) const;
     SDValue lowerEH_DWARF_CFA(SDValue Op, SelectionDAG &DAG) const;
     SDValue lowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const;
+    SDValue lowerSTRICT_FP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
     SDValue lowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG) const;
 
     /// isEligibleForTailCallOptimization - Check whether the call is eligible
diff --git a/llvm/lib/Target/Mips/MipsInstrFPU.td b/llvm/lib/Target/Mips/MipsInstrFPU.td
index 4ca329d214981..c065ba6c9632e 100644
--- a/llvm/lib/Target/Mips/MipsInstrFPU.td
+++ b/llvm/lib/Target/Mips/MipsInstrFPU.td
@@ -203,14 +203,14 @@ class MADDS_FT<string opstr, RegisterOperand RC, InstrItinClass Itin,
                SDPatternOperator OpNode = null_frag> :
   InstSE<(outs RC:$fd), (ins RC:$fr, RC:$fs, RC:$ft),
          !strconcat(opstr, "\t$fd, $fr, $fs, $ft"),
-         [(set RC:$fd, (OpNode (fmul RC:$fs, RC:$ft), RC:$fr))], Itin,
+         [(set RC:$fd, (OpNode (any_fmul RC:$fs, RC:$ft), RC:$fr))], Itin,
          FrmFR, opstr>, HARDFLOAT;
 
 class NMADDS_FT<string opstr, RegisterOperand RC, InstrItinClass Itin,
                 SDPatternOperator OpNode = null_frag> :
   InstSE<(outs RC:$fd), (ins RC:$fr, RC:$fs, RC:$ft),
          !strconcat(opstr, "\t$fd, $fr, $fs, $ft"),
-         [(set RC:$fd, (fsub fpimm0, (OpNode (fmul RC:$fs, RC:$ft), RC:$fr)))],
+         [(set RC:$fd, (any_fsub fpimm0, (OpNode (any_fmul RC:$fs, RC:$ft), RC:$fr)))],
          Itin, FrmFR, opstr>, HARDFLOAT;
 
 class LWXC1_FT<string opstr, RegisterOperand DRC,
@@ -280,7 +280,6 @@ class C_COND_FT<string CondStr, string Typestr, RegisterOperand RC,
   let hasFCCRegOperand = 1;
 }
 
-
 multiclass C_COND_M<string TypeStr, RegisterOperand RC, bits<5> fmt,
                     InstrItinClass itin> {
   def C_F_#NAME : MMRel, C_COND_FT<"f", TypeStr, RC, itin>,
@@ -539,8 +538,8 @@ let AdditionalPredicates = [NotInMicroMips] in {
 
 let AdditionalPredicates = [NotInMicroMips] in {
   def FSQRT_S : MMRel, StdMMR6Rel, ABSS_FT<"sqrt.s", FGR32Opnd, FGR32Opnd,
-                II_SQRT_S, fsqrt>, ABSS_FM<0x4, 16>, ISA_MIPS2;
-  defm FSQRT : ABSS_M<"sqrt.d", II_SQRT_D, fsqrt>, ABSS_FM<0x4, 17>, ISA_MIPS2;
+                II_SQRT_S, any_fsqrt>, ABSS_FM<0x4, 16>, ISA_MIPS2;
+  defm FSQRT : ABSS_M<"sqrt.d", II_SQRT_D, any_fsqrt>, ABSS_FM<0x4, 17>, ISA_MIPS2;
 }
 
 // The odd-numbered registers are only referenced when doing loads,
@@ -661,58 +660,58 @@ let AdditionalPredicates = [NotInMicroMips],
 
 /// Floating-point Arithmetic
 let AdditionalPredicates = [NotInMicroMips] in {
-  def FADD_S : MMRel, ADDS_FT<"add.s", FGR32Opnd, II_ADD_S, 1, fadd>,
+  def FADD_S : MMRel, ADDS_FT<"add.s", FGR32Opnd, II_ADD_S, 1, any_fadd>,
                ADDS_FM<0x00, 16>, ISA_MIPS1;
-  defm FADD :  ADDS_M<"add.d", II_ADD_D, 1, fadd>, ADDS_FM<0x00, 17>,
+  defm FADD :  ADDS_M<"add.d", II_ADD_D, 1, any_fadd>, ADDS_FM<0x00, 17>,
                ISA_MIPS1;
-  def FDIV_S : MMRel, ADDS_FT<"div.s", FGR32Opnd, II_DIV_S, 0, fdiv>,
+  def FDIV_S : MMRel, ADDS_FT<"div.s", FGR32Opnd, II_DIV_S, 0, any_fdiv>,
                ADDS_FM<0x03, 16>, ISA_MIPS1;
-  defm FDIV :  ADDS_M<"div.d", II_DIV_D, 0, fdiv>, ADDS_FM<0x03, 17>,
+  defm FDIV :  ADDS_M<"div.d", II_DIV_D, 0, any_fdiv>, ADDS_FM<0x03, 17>,
                ISA_MIPS1;
-  def FMUL_S : MMRel, ADDS_FT<"mul.s", FGR32Opnd, II_MUL_S, 1, fmul>,
+  def FMUL_S : MMRel, ADDS_FT<"mul.s", FGR32Opnd, II_MUL_S, 1, any_fmul>,
                ADDS_FM<0x02, 16>, ISA_MIPS1;
-  defm FMUL :  ADDS_M<"mul.d", II_MUL_D, 1, fmul>, ADDS_FM<0x02, 17>,
+  defm FMUL :  ADDS_M<"mul.d", II_MUL_D, 1, any_fmul>, ADDS_FM<0x02, 17>,
                ISA_MIPS1;
-  def FSUB_S : MMRel, ADDS_FT<"sub.s", FGR32Opnd, II_SUB_S, 0, fsub>,
+  def FSUB_S : MMRel, ADDS_FT<"sub.s", FGR32Opnd, II_SUB_S, 0, any_fsub>,
                ADDS_FM<0x01, 16>, ISA_MIPS1;
-  defm FSUB :  ADDS_M<"sub.d", II_SUB_D, 0, fsub>, ADDS_FM<0x01, 17>,
+  defm FSUB :  ADDS_M<"sub.d", II_SUB_D, 0, any_fsub>, ADDS_FM<0x01, 17>,
                ISA_MIPS1;
 }
 
 let AdditionalPredicates = [NotInMicroMips, HasMadd4] in {
-  def MADD_S : MMRel, MADDS_FT<"madd.s", FGR32Opnd, II_MADD_S, fadd>,
+  def MADD_S : MMRel, MADDS_FT<"madd.s", FGR32Opnd, II_MADD_S, any_fadd>,
                MADDS_FM<4, 0>, INSN_MIPS4_32R2_NOT_32R6_64R6;
-  def MSUB_S : MMRel, MADDS_FT<"msub.s", FGR32Opnd, II_MSUB_S, fsub>,
+  def MSUB_S : MMRel, MADDS_FT<"msub.s", FGR32Opnd, II_MSUB_S, any_fsub>,
                MADDS_FM<5, 0>, INSN_MIPS4_32R2_NOT_32R6_64R6;
 
-  def MADD_D32 : MMRel, MADDS_FT<"madd.d", AFGR64Opnd, II_MADD_D, fadd>,
+  def MADD_D32 : MMRel, MADDS_FT<"madd.d", AFGR64Opnd, II_MADD_D, any_fadd>,
                  MADDS_FM<4, 1>, INSN_MIPS4_32R2_NOT_32R6_64R6, FGR_32;
-  def MSUB_D32 : MMRel, MADDS_FT<"msub.d", AFGR64Opnd, II_MSUB_D, fsub>,
+  def MSUB_D32 : MMRel, MADDS_FT<"msub.d", AFGR64Opnd, II_MSUB_D, any_fsub>,
                  MADDS_FM<5, 1>, INSN_MIPS4_32R2_NOT_32R6_64R6, FGR_32;
 
   let DecoderNamespace = "MipsFP64" in {
-    def MADD_D64 : MADDS_FT<"madd.d", FGR64Opnd, II_MADD_D, fadd>,
+    def MADD_D64 : MADDS_FT<"madd.d", FGR64Opnd, II_MADD_D, any_fadd>,
                    MADDS_FM<4, 1>, INSN_MIPS4_32R2_NOT_32R6_64R6, FGR_64;
-    def MSUB_D64 : MADDS_FT<"msub.d", FGR64Opnd, II_MSUB_D, fsub>,
+    def MSUB_D64 : MADDS_FT<"msub.d", FGR64Opnd, II_MSUB_D, any_fsub>,
                    MADDS_FM<5, 1>, INSN_MIPS4_32R2_NOT_32R6_64R6, FGR_64;
   }
 }
 
 let AdditionalPredicates = [NoNaNsFPMath, HasMadd4, NotInMicroMips] in {
-  def NMADD_S : MMRel, NMADDS_FT<"nmadd.s", FGR32Opnd, II_NMADD_S, fadd>,
+  def NMADD_S : MMRel, NMADDS_FT<"nmadd.s", FGR32Opnd, II_NMADD_S, any_fadd>,
                 MADDS_FM<6, 0>, INSN_MIPS4_32R2_NOT_32R6_64R6;
-  def NMSUB_S : MMRel, NMADDS_FT<"nmsub.s", FGR32Opnd, II_NMSUB_S, fsub>,
+  def NMSUB_S : MMRel, NMADDS_FT<"nmsub.s", FGR32Opnd, II_NMSUB_S, any_fsub>,
                 MADDS_FM<7, 0>, INSN_MIPS4_32R2_NOT_32R6_64R6;
 
-  def NMADD_D32 : MMRel, NMADDS_FT<"nmadd.d", AFGR64Opnd, II_NMADD_D, fadd>,
+  def NMADD_D32 : MMRel, NMADDS_FT<"nmadd.d", AFGR64Opnd, II_NMADD_D, any_fadd>,
                   MADDS_FM<6, 1>, INSN_MIPS4_32R2_NOT_32R6_64R6, FGR_32;
-  def NMSUB_D32 : MMRel, NMADDS_FT<"nmsub.d", AFGR64Opnd, II_NMSUB_D, fsub>,
+  def NMSUB_D32 : MMRel, NMADDS_FT<"nmsub.d", AFGR64Opnd, II_NMSUB_D, any_fsub>,
                   MADDS_FM<7, 1>, INSN_MIPS4_32R2_NOT_32R6_64R6, FGR_32;
 
   let DecoderNamespace = "MipsFP64" in {
-    def NMADD_D64 : NMADDS_FT<"nmadd.d", FGR64Opnd, II_NMADD_D, fadd>,
+    def NMADD_D64 : NMADDS_FT<"nmadd.d", FGR64Opnd, II_NMADD_D, any_fadd>,
                     MADDS_FM<6, 1>, INSN_MIPS4_32R2_NOT_32R6_64R6, FGR_64;
-    def NMSUB_D64 : NMADDS_FT<"nmsub.d", FGR64Opnd, II_NMSUB_D, fsub>,
+    def NMSUB_D64 : NMADDS_FT<"nmsub.d", FGR64Opnd, II_NMSUB_D, any_fsub>,
                     MADDS_FM<7, 1>, INSN_MIPS4_32R2_NOT_32R6_64R6, FGR_64;
   }
 }
@@ -935,7 +934,7 @@ let AdditionalPredicates = [NotInMicroMips] in {
 def : MipsPat<(f32 fpimm0), (MTC1 ZERO)>, ISA_MIPS1;
 def : MipsPat<(f32 fpimm0neg), (FNEG_S (MTC1 ZERO))>, ISA_MIPS1;
 
-def : MipsPat<(f32 (sint_to_fp GPR32Opnd:$src)),
+def : MipsPat<(f32 (any_sint_to_fp GPR32Opnd:$src)),
               (PseudoCVT_S_W GPR32Opnd:$src)>;
 def : MipsPat<(MipsTruncIntFP FGR32Opnd:$src),
               (TRUNC_W_S FGR32Opnd:$src)>, ISA_MIPS1;
@@ -943,14 +942,14 @@ def : MipsPat<(MipsTruncIntFP FGR32Opnd:$src),
 def : MipsPat<(MipsMTC1_D64 GPR32Opnd:$src),
               (MTC1_D64 GPR32Opnd:$src)>, ISA_MIPS1, FGR_64;
 
-def : MipsPat<(f64 (sint_to_fp GPR32Opnd:$src)),
+def : MipsPat<(f64 (any_sint_to_fp GPR32Opnd:$src)),
               (PseudoCVT_D32_W GPR32Opnd:$src)>, FGR_32;
 let AdditionalPredicates = [NotInMicroMips] in {
   def : MipsPat<(MipsTruncIntFP AFGR64Opnd:$src),
                 (TRUNC_W_D32 AFGR64Opnd:$src)>, ISA_MIPS2, FGR_32;
-  def : MipsPat<(f32 (fpround AFGR64Opnd:$src)),
+  def : MipsPat<(f32 (any_fpround AFGR64Opnd:$src)),
                 (CVT_S_D32 AFGR64Opnd:$src)>, ISA_MIPS1, FGR_32;
-  def : MipsPat<(f64 (fpextend FGR32Opnd:$src)),
+  def : MipsPat<(f64 (any_fpextend FGR32Opnd:$src)),
                 (CVT_D32_S FGR32Opnd:$src)>, ISA_MIPS1, FGR_32;
 }
 
@@ -958,11 +957,11 @@ def : MipsPat<(f64 fpimm0), (DMTC1 ZERO_64)>, ISA_MIPS3, GPR_64, FGR_64;
 def : MipsPat<(f64 fpimm0neg), (FNEG_D64 (DMTC1 ZERO_64))>, ISA_MIPS3, GPR_64,
       FGR_64;
 
-def : MipsPat<(f64 (sint_to_fp GPR32Opnd:$src)),
+def : MipsPat<(f64 (any_sint_to_fp GPR32Opnd:$src)), 
               (PseudoCVT_D64_W GPR32Opnd:$src)>, FGR_64;
-def : MipsPat<(f32 (sint_to_fp GPR64Opnd:$src)),
+def : MipsPat<(f32 (any_sint_to_fp GPR64Opnd:$src)),
               (EXTRACT_SUBREG (PseudoCVT_S_L GPR64Opnd:$src), sub_lo)>, FGR_64;
-def : MipsPat<(f64 (sint_to_fp GPR64Opnd:$src)),
+def : MipsPat<(f64 (any_sint_to_fp GPR64Opnd:$src)),
               (PseudoCVT_D64_L GPR64Opnd:$src)>, FGR_64;
 
 def : MipsPat<(MipsTruncIntFP FGR64Opnd:$src),
@@ -973,17 +972,17 @@ def : MipsPat<(MipsTruncIntFP FGR64Opnd:$src),
               (TRUNC_L_D64 FGR64Opnd:$src)>, ISA_MIPS2, FGR_64;
 
 let AdditionalPredicates = [NotInMicroMips] in {
-  def : MipsPat<(f32 (fpround FGR64Opnd:$src)),
+  def : MipsPat<(f32 (any_fpround FGR64Opnd:$src)),
                 (CVT_S_D64 FGR64Opnd:$src)>, ISA_MIPS1, FGR_64;
-  def : MipsPat<(f64 (fpextend FGR32Opnd:$src)),
+  def : MipsPat<(f64 (any_fpextend FGR32Opnd:$src)),
                 (CVT_D64_S FGR32Opnd:$src)>, ISA_MIPS1, FGR_64;
 }
 
 // To generate NMADD and NMSUB instructions when fneg node is present
 multiclass NMADD_NMSUB<Instruction Nmadd, Instruction Nmsub, RegisterOperand RC> {
-  def : MipsPat<(fneg (fadd (fmul RC:$fs, RC:$ft), RC:$fr)),
+  def : MipsPat<(fneg (any_fadd (any_fmul RC:$fs, RC:$ft), RC:$fr)),
                 (Nmadd RC:$fr, RC:$fs, RC:$ft)>;
-  def : MipsPat<(fneg (fsub (fmul RC:$fs, RC:$ft), RC:$fr)),
+  def : MipsPat<(fneg (any_fsub (any_fmul RC:$fs, RC:$ft), RC:$fr)),
                 (Nmsub RC:$fr, RC:$fs, RC:$ft)>;
 }
 
diff --git a/llvm/lib/Target/Mips/MipsSEISelLowering.cpp b/llvm/lib/Target/Mips/MipsSEISelLowering.cpp
index 19917f3650bb5..e91337bdbfbc2 100644
--- a/llvm/lib/Target/Mips/MipsSEISelLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsSEISelLowering.cpp
@@ -210,6 +210,12 @@ MipsSETargetLowering::MipsSETargetLowering(const MipsTargetMachine &TM,
       else
         addRegisterClass(MVT::f64, &Mips::AFGR64RegClass);
     }
+
+    for (auto Op : {ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL,
+                    ISD::STRICT_FDIV, ISD::STRICT_FSQRT}) {
+      setOperationAction(Op, MVT::f32, Legal);
+      setOperationAction(Op, MVT::f64, Legal);
+    }
   }
 
   // Targets with 64bits integer registers, but no 64bit floating point register
diff --git a/llvm/test/CodeGen/Mips/fp-intrinsics.ll b/llvm/test/CodeGen/Mips/fp-intrinsics.ll
new file mode 100644
index 0000000000000..66f966c3e4bf6
--- /dev/null
+++ b/llvm/test/CodeGen/Mips/fp-intrinsics.ll
@@ -0,0 +1,734 @@
+; RUN: llc -mtriple=mips -mcpu=mips32r2 < %s | FileCheck %s -check-prefixes=CHECK,CHECK-R2
+; RUN: llc -mtriple=mips -mcpu=mips32r6 < %s | FileCheck %s -check-prefixes=CHECK,CHECK-R6
+; RUN: llc -mtriple=mips -mcpu=mips32r2 -mattr=+fp64,+fpxx -o - %s | FileCheck %s -check-prefixes=CHECK,CHECK-R2
+; RUN: llc -mtriple=mips -mcpu=mips64r2 -o - %s | FileCheck %s -check-prefixes=CHECK,CHECK-R2
+
+; Single-precision intrinsics
+
+define float @add_f32(float %x, float %y) #0 {
+; CHECK-LABEL: add_f32:
+; CHECK: add.s
+  %val = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret float %val
+}
+
+define float @sub_f32(float %x, float %y) #0 {
+; CHECK-LABEL: sub_f32:
+; CHECK: sub.s
+  %val = call float @llvm.experimental.constrained.fsub.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret float %val
+}
+
+define float @mul_f32(float %x, float %y) #0 {
+; CHECK-LABEL: mul_f32:
+; CHECK: mul.s
+  %val = call float @llvm.experimental.constrained.fmul.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret float %val
+}
+
+define float @div_f32(float %x, float %y) #0 {
+; CHECK-LABEL: div_f32:
+; CHECK: div.s
+  %val = call float @llvm.experimental.constrained.fdiv.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret float %val
+}
+
+define float @frem_f32(float %x, float %y) #0 {
+; CHECK-LABEL: frem_f32:
+; CHECK: jal fmodf
+  %val = call float @llvm.experimental.constrained.frem.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret float %val
+}
+
+define float @fma_f32(float %x, float %y, float %z) #0 {
+; CHECK-LABEL: fma_f32:
+; CHECK: jal fmaf
+  %val = call float @llvm.experimental.constrained.fma.f32(float %x, float %y, float %z, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret float %val
+}
+
+define i32 @fptosi_f32(float %x) #0 {
+; CHECK-LABEL: fptosi_f32:
+; CHECK: trunc.w.s
+  %val = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %x, metadata !"fpexcept.strict") #0
+  ret i32 %val
+}
+
+define i32 @fptoui_f32(float %x) #0 {
+; CHECK-LABEL: fptoui_f32:
+; CHECK: trunc.w.s
+; CHECK: trunc.w.s
+  %val = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %x, metadata !"fpexcept.strict") #0
+  ret i32 %val
+}
+
+define float @sqrt_f32(float %x) #0 {
+; CHECK-LABEL: sqrt_f32:
+; CHECK: sqrt.s
+  %val = call float @llvm.experimental.constrained.sqrt.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret float %val
+}
+
+define float @powi_f32(float %x, i32 %y) #0 {
+; CHECK-LABEL: powi_f32:
+; CHECK: jal __powisf2
+  %val = call float @llvm.experimental.constrained.powi.f32(float %x, i32 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret float %val
+}
+
+define float @sin_f32(float %x) #0 {
+; CHECK-LABEL: sin_f32:
+; CHECK: jal sinf
+  %val = call float @llvm.experimental.constrained.sin.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret float %val
+}
+
+define float @cos_f32(float %x) #0 {
+; CHECK-LABEL: cos_f32:
+; CHECK: jal cosf
+  %val = call float @llvm.experimental.constrained.cos.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret float %val
+}
+
+define float @tan_f32(float %x) #0 {
+; CHECK-LABEL: tan_f32:
+; CHECK: jal tanf
+  %val = call float @llvm.experimental.constrained.tan.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret float %val
+}
+
+define float @acos_f32(float %x, float %y) #0 {
+; CHECK-LABEL: acos_f32:
+; CHECK: jal acosf
+  %val = call float @llvm.experimental.constrained.acos.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret float %val
+}
+
+define float @asin_f32(float %x, float %y) #0 {
+; CHECK-LABEL: asin_f32:
+; CHECK: jal asinf
+  %val = call float @llvm.experimental.constrained.asin.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret float %val
+}
+
+define float @atan_f32(float %x, float %y) #0 {
+; CHECK-LABEL: atan_f32:
+; CHECK: jal atanf
+  %val = call float @llvm.experimental.constrained.atan.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret float %val
+}
+
+define float @cosh_f32(float %x, float %y) #0 {
+; CHECK-LABEL: cosh_f32:
+; CHECK: jal coshf
+  %val = call float @llvm.experimental.constrained.cosh.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret float %val
+}
+
+define float @sinh_f32(float %x, float %y) #0 {
+; CHECK-LABEL: sinh_f32:
+; CHECK: jal sinhf
+  %val = call float @llvm.experimental.constrained.sinh.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret float %val
+}
+
+define float @tanh_f32(float %x, float %y) #0 {
+; CHECK-LABEL: tanh_f32:
+; CHECK: jal tanhf
+  %val = call float @llvm.experimental.constrained.tanh.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret float %val
+}
+
+define float @fmuladd_f32(float %x, float %y, float %z) #0 {
+; CHECK-LABEL: fmuladd_f32:
+; CHECK-R2: madd.s
+; CHECK-R6: mul.s
+; CHECK-R6: add.s
+  %val = call float @llvm.experimental.constrained.fmuladd.f32(float %x, float %y, float %z, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret float %val
+}
+
+define float @ldexp_f32(float %x, i32 %y) #0 {
+; CHECK-LABEL: ldexp_f32:
+; CHECK: jal ldexpf
+  %val = call float @llvm.experimental.constrained.ldexp.f32.i32(float %x, i32 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret float %val
+}
+
+define float @roundeven_f32(float %x) #0 {
+; CHECK-LABEL: roundeven_f32:
+; CHECK: jal roundevenf
+  %val = call float @llvm.experimental.constrained.roundeven.f32(float %x, metadata !"fpexcept.strict") #0
+  ret float %val
+}
+
+define float @uitofp_f32_i32(i32 %x) #0 {
+; CHECK-LABEL: uitofp_f32_i32:
+; CHECK: ldc1
+; CHECK: ldc1
+; CHECK: cvt.s.d
+  %val = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret float %val
+}
+
+define float @atan2_f32(float %x, float %y) #0 {
+; CHECK-LABEL: atan2_f32:
+; CHECK: jal atan2f
+  %val = call float @llvm.experimental.constrained.atan2.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret float %val
+}
+
+define float @pow_f32(float %x, float %y) #0 {
+; CHECK-LABEL: pow_f32:
+; CHECK: jal powf
+  %val = call float @llvm.experimental.constrained.pow.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret float %val
+}
+
+define float @log_f32(float %x) #0 {
+; CHECK-LABEL: log_f32:
+; CHECK: jal logf
+  %val = call float @llvm.experimental.constrained.log.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret float %val
+}
+
+define float @log10_f32(float %x) #0 {
+; CHECK-LABEL: log10_f32:
+; CHECK: jal log10f
+  %val = call float @llvm.experimental.constrained.log10.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret float %val
+}
+
+define float @log2_f32(float %x) #0 {
+; CHECK-LABEL: log2_f32:
+; CHECK: jal log2f
+  %val = call float @llvm.experimental.constrained.log2.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret float %val
+}
+
+define float @exp_f32(float %x) #0 {
+; CHECK-LABEL: exp_f32:
+; CHECK: jr $ra
+  %val = call float @llvm.experimental.constrained.exp.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret float %val
+}
+
+define float @exp2_f32(float %x) #0 {
+; CHECK-LABEL: exp2_f32:
+; CHECK: jal exp2f
+  %val = call float @llvm.experimental.constrained.exp2.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret float %val
+}
+
+define float @rint_f32(float %x) #0 {
+; CHECK-LABEL: rint_f32:
+; CHECK: jal rintf
+  %val = call float @llvm.experimental.constrained.rint.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret float %val
+}
+
+define float @nearbyint_f32(float %x) #0 {
+; CHECK-LABEL: nearbyint_f32:
+; CHECK: jal nearbyintf
+  %val = call float @llvm.experimental.constrained.nearbyint.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret float %val
+}
+
+define i32 @lrint_f32(float %x) #0 {
+; CHECK-LABEL: lrint_f32:
+; CHECK: jal lrintf
+  %val = call i32 @llvm.experimental.constrained.lrint.i32.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret i32 %val
+}
+
+define i32 @llrint_f32(float %x) #0 {
+; CHECK-LABEL: llrint_f32:
+; CHECK: jal llrintf
+  %val = call i32 @llvm.experimental.constrained.llrint.i32.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret i32 %val
+}
+
+define float @maxnum_f32(float %x, float %y) #0 {
+; CHECK-LABEL: maxnum_f32:
+; CHECK-R2: jal fmaxf
+; CHECK-R6: max.s
+  %val = call float @llvm.experimental.constrained.maxnum.f32(float %x, float %y, metadata !"fpexcept.strict") #0
+  ret float %val
+}
+
+define float @minnum_f32(float %x, float %y) #0 {
+; CHECK-LABEL: minnum_f32:
+; CHECK-R2: jal fminf
+; CHECK-R6: min.s
+  %val = call float @llvm.experimental.constrained.minnum.f32(float %x, float %y, metadata !"fpexcept.strict") #0
+  ret float %val
+}
+
+define float @ceil_f32(float %x) #0 {
+; CHECK-LABEL: ceil_f32:
+; CHECK: jal ceilf
+  %val = call float @llvm.experimental.constrained.ceil.f32(float %x, metadata !"fpexcept.strict") #0
+  ret float %val
+}
+
+define float @floor_f32(float %x) #0 {
+; CHECK-LABEL: floor_f32:
+; CHECK: jal floorf
+  %val = call float @llvm.experimental.constrained.floor.f32(float %x, metadata !"fpexcept.strict") #0
+  ret float %val
+}
+
+define i32 @lround_f32(float %x) #0 {
+; CHECK-LABEL: lround_f32:
+; CHECK: jal lroundf
+  %val = call i32 @llvm.experimental.constrained.lround.i32.f32(float %x, metadata !"fpexcept.strict") #0
+  ret i32 %val
+}
+
+define i32 @llround_f32(float %x) #0 {
+; CHECK-LABEL: llround_f32:
+; CHECK: jal llroundf
+  %val = call i32 @llvm.experimental.constrained.llround.i32.f32(float %x, metadata !"fpexcept.strict") #0
+  ret i32 %val
+}
+
+define float @round_f32(float %x) #0 {
+; CHECK-LABEL: round_f32:
+; CHECK: jal roundf
+  %val = call float @llvm.experimental.constrained.round.f32(float %x, metadata !"fpexcept.strict") #0
+  ret float %val
+}
+
+define float @trunc_f32(float %x) #0 {
+; CHECK-LABEL: trunc_f32:
+; CHECK: jal truncf
+  %val = call float @llvm.experimental.constrained.trunc.f32(float %x, metadata !"fpexcept.strict") #0
+  ret float %val
+}
+
+; Double-precision intrinsics
+
+define double @add_f64(double %x, double %y) #0 {
+; CHECK-LABEL: add_f64:
+; CHECK: add.d
+  %val = call double @llvm.experimental.constrained.fadd.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret double %val
+}
+
+define double @sub_f64(double %x, double %y) #0 {
+; CHECK-LABEL: sub_f64:
+; CHECK: sub.d
+  %val = call double @llvm.experimental.constrained.fsub.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret double %val
+}
+
+define double @mul_f64(double %x, double %y) #0 {
+; CHECK-LABEL: mul_f64:
+; CHECK: mul.d
+  %val = call double @llvm.experimental.constrained.fmul.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret double %val
+}
+
+define double @div_f64(double %x, double %y) #0 {
+; CHECK-LABEL: div_f64:
+; CHECK: div.d
+  %val = call double @llvm.experimental.constrained.fdiv.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret double %val
+}
+
+define double @frem_f64(double %x, double %y) #0 {
+; CHECK-LABEL: frem_f64:
+; CHECK: jal fmod
+  %val = call double @llvm.experimental.constrained.frem.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret double %val
+}
+
+define double @fma_f64(double %x, double %y, double %z) #0 {
+; CHECK-LABEL: fma_f64:
+; CHECK: jal fma
+  %val = call double @llvm.experimental.constrained.fma.f64(double %x, double %y, double %z, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret double %val
+}
+
+define i32 @fptosi_f64(double %x) #0 {
+; CHECK-LABEL: fptosi_f64:
+; CHECK: trunc.w.d
+  %val = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %x, metadata !"fpexcept.strict") #0
+  ret i32 %val
+}
+
+define i32 @fptoui_f64(double %x) #0 {
+; CHECK-LABEL: fptoui_f64:
+; CHECK: trunc.w.d 
+; CHECK: trunc.w.d
+  %val = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %x, metadata !"fpexcept.strict") #0
+  ret i32 %val
+}
+
+define double @sqrt_f64(double %x) #0 {
+; CHECK-LABEL: sqrt_f64:
+; CHECK: sqrt.d
+  %val = call double @llvm.experimental.constrained.sqrt.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret double %val
+}
+
+define double @powi_f64(double %x, i32 %y) #0 {
+; CHECK-LABEL: powi_f64:
+; CHECK: jal __powidf2
+  %val = call double @llvm.experimental.constrained.powi.f64(double %x, i32 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret double %val
+}
+
+define double @sin_f64(double %x) #0 {
+; CHECK-LABEL: sin_f64:
+; CHECK: jal sin
+  %val = call double @llvm.experimental.constrained.sin.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret double %val
+}
+
+define double @cos_f64(double %x) #0 {
+; CHECK-LABEL: cos_f64:
+; CHECK: jal cos
+  %val = call double @llvm.experimental.constrained.cos.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret double %val
+}
+
+define double @tan_f64(double %x) #0 {
+; CHECK-LABEL: tan_f64:
+; CHECK: jal tan
+  %val = call double @llvm.experimental.constrained.tan.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret double %val
+}
+
+define double @acos_f64(double %x, double %y) #0 {
+; CHECK-LABEL: acos_f64:
+; CHECK: jal acos
+  %val = call double @llvm.experimental.constrained.acos.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret double %val
+}
+
+define double @asin_f64(double %x, double %y) #0 {
+; CHECK-LABEL: asin_f64:
+; CHECK: jal asin
+  %val = call double @llvm.experimental.constrained.asin.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret double %val
+}
+
+define double @atan_f64(double %x, double %y) #0 {
+; CHECK-LABEL: atan_f64:
+; CHECK: jal atan
+  %val = call double @llvm.experimental.constrained.atan.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret double %val
+}
+
+define double @cosh_f64(double %x, double %y) #0 {
+; CHECK-LABEL: cosh_f64:
+; CHECK: jal cosh
+  %val = call double @llvm.experimental.constrained.cosh.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret double %val
+}
+
+define double @sinh_f64(double %x, double %y) #0 {
+; CHECK-LABEL: sinh_f64:
+; CHECK: jal sinh
+  %val = call double @llvm.experimental.constrained.sinh.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret double %val
+}
+
+define double @tanh_f64(double %x, double %y) #0 {
+; CHECK-LABEL: tanh_f64:
+; CHECK: jal tanh
+  %val = call double @llvm.experimental.constrained.tanh.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret double %val
+}
+
+define double @fmuladd_f64(double %x, double %y, double %z) #0 {
+; CHECK-LABEL: fmuladd_f64:
+; CHECK-R2: madd.d
+; CHECK-R6: mul.d
+; CHECK-R6: add.d
+  %val = call double @llvm.experimental.constrained.fmuladd.f64(double %x, double %y, double %z, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret double %val
+}
+
+define double @ldexp_f64(double %x, i32 %y) #0 {
+; CHECK-LABEL: ldexp_f64:
+; CHECK: jal ldexp
+  %val = call double @llvm.experimental.constrained.ldexp.f64.i32(double %x, i32 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret double %val
+}
+
+define double @roundeven_f64(double %x) #0 {
+; CHECK-LABEL: roundeven_f64:
+; CHECK: jal roundeven
+  %val = call double @llvm.experimental.constrained.roundeven.f64(double %x, metadata !"fpexcept.strict") #0
+  ret double %val
+}
+
+define double @uitofp_f64_i32(i32 %x) #0 {
+; CHECK-LABEL: uitofp_f64_i32:
+; CHECK: ldc1 
+; CHECK: ldc1
+  %val = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret double %val
+}
+
+define double @atan2_f64(double %x, double %y) #0 {
+; CHECK-LABEL: atan2_f64:
+; CHECK: jal atan2
+  %val = call double @llvm.experimental.constrained.atan2.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret double %val
+}
+
+define double @pow_f64(double %x, double %y) #0 {
+; CHECK-LABEL: pow_f64:
+; CHECK: jal pow
+  %val = call double @llvm.experimental.constrained.pow.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret double %val
+}
+
+define double @log_f64(double %x) #0 {
+; CHECK-LABEL: log_f64:
+; CHECK: jal log
+  %val = call double @llvm.experimental.constrained.log.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret double %val
+}
+
+define double @log10_f64(double %x) #0 {
+; CHECK-LABEL: log10_f64:
+; CHECK: jal log10
+  %val = call double @llvm.experimental.constrained.log10.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret double %val
+}
+
+define double @log2_f64(double %x) #0 {
+; CHECK-LABEL: log2_f64:
+; CHECK: jal log2
+  %val = call double @llvm.experimental.constrained.log2.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret double %val
+}
+
+define double @exp_f64(double %x) #0 {
+; CHECK-LABEL: exp_f64:
+; CHECK: jal exp
+  %val = call double @llvm.experimental.constrained.exp.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret double %val
+}
+
+define double @exp2_f64(double %x) #0 {
+; CHECK-LABEL: exp2_f64:
+; CHECK: jal exp2
+  %val = call double @llvm.experimental.constrained.exp2.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret double %val
+}
+
+define double @rint_f64(double %x) #0 {
+; CHECK-LABEL: rint_f64:
+; CHECK: jal rint
+  %val = call double @llvm.experimental.constrained.rint.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret double %val
+}
+
+define double @nearbyint_f64(double %x) #0 {
+; CHECK-LABEL: nearbyint_f64:
+; CHECK: jal nearbyint
+  %val = call double @llvm.experimental.constrained.nearbyint.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret double %val
+}
+
+define i32 @lrint_f64(double %x) #0 {
+; CHECK-LABEL: lrint_f64:
+; CHECK: jal lrint
+  %val = call i32 @llvm.experimental.constrained.lrint.i32.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret i32 %val
+}
+
+define i32 @llrint_f64(double %x) #0 {
+; CHECK-LABEL: llrint_f64:
+; CHECK: jal llrint
+  %val = call i32 @llvm.experimental.constrained.llrint.i32.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret i32 %val
+}
+
+define double @maxnum_f64(double %x, double %y) #0 {
+; CHECK-LABEL: maxnum_f64:
+; CHECK-R2: jal fmax
+; CHECK-R6: max.d
+  %val = call double @llvm.experimental.constrained.maxnum.f64(double %x, double %y, metadata !"fpexcept.strict") #0
+  ret double %val
+}
+
+define double @minnum_f64(double %x, double %y) #0 {
+; CHECK-LABEL: minnum_f64:
+; CHECK-R2: jal fmin
+; CHECK-R6: min.d
+  %val = call double @llvm.experimental.constrained.minnum.f64(double %x, double %y, metadata !"fpexcept.strict") #0
+  ret double %val
+}
+
+define double @ceil_f64(double %x) #0 {
+; CHECK-LABEL: ceil_f64:
+; CHECK: jal ceil
+  %val = call double @llvm.experimental.constrained.ceil.f64(double %x, metadata !"fpexcept.strict") #0
+  ret double %val
+}
+
+define double @floor_f64(double %x) #0 {
+; CHECK-LABEL: floor_f64:
+; CHECK: jal floor
+  %val = call double @llvm.experimental.constrained.floor.f64(double %x, metadata !"fpexcept.strict") #0
+  ret double %val
+}
+
+define i32 @lround_f64(double %x) #0 {
+; CHECK-LABEL: lround_f64:
+; CHECK: jal lround
+  %val = call i32 @llvm.experimental.constrained.lround.i32.f64(double %x, metadata !"fpexcept.strict") #0
+  ret i32 %val
+}
+
+define i32 @llround_f64(double %x) #0 {
+; CHECK-LABEL: llround_f64:
+; CHECK: jal llround
+  %val = call i32 @llvm.experimental.constrained.llround.i32.f64(double %x, metadata !"fpexcept.strict") #0
+  ret i32 %val
+}
+
+define double @round_f64(double %x) #0 {
+; CHECK-LABEL: round_f64:
+; CHECK: jal round
+  %val = call double @llvm.experimental.constrained.round.f64(double %x, metadata !"fpexcept.strict") #0
+  ret double %val
+}
+
+define double @trunc_f64(double %x) #0 {
+; CHECK-LABEL: trunc_f64:
+; CHECK: jal trunc
+  %val = call double @llvm.experimental.constrained.trunc.f64(double %x, metadata !"fpexcept.strict") #0
+  ret double %val
+}
+
+define float @fptrunc_f32(double %x) #0 {
+; CHECK-LABEL: fptrunc_f32:
+; CHECK: cvt.s.d
+  %val = call float @llvm.experimental.constrained.fptrunc.f32.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret float %val
+}
+
+define double @fpext_f32(float %x) #0 {
+; CHECK-LABEL: fpext_f32:
+; CHECK: cvt.d.s
+  %val = call double @llvm.experimental.constrained.fpext.f64.f32(float %x, metadata !"fpexcept.strict") #0
+  ret double %val
+}
+
+define float @sitofp_f32_i32(i32 %x) #0 {
+; CHECK-LABEL: sitofp_f32_i32:
+; CHECK: ldc1
+; CHECK: ldc1
+; CHECK: cvt.s.d
+  %val = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret float %val
+}
+
+define double @sitofp_f64_i32(i32 %x) #0 {
+; CHECK-LABEL: sitofp_f64_i32:
+; CHECK: ldc1
+; CHECK: ldc1
+  %val = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret double %val
+}
+
+
+attributes #0 = { strictfp }
+
+declare float @llvm.experimental.constrained.fadd.f32(float, float, metadata, metadata)
+declare float @llvm.experimental.constrained.fsub.f32(float, float, metadata, metadata)
+declare float @llvm.experimental.constrained.fmul.f32(float, float, metadata, metadata)
+declare float @llvm.experimental.constrained.fdiv.f32(float, float, metadata, metadata)
+declare float @llvm.experimental.constrained.frem.f32(float, float, metadata, metadata)
+declare float @llvm.experimental.constrained.fma.f32(float, float, float, metadata, metadata)
+declare i32 @llvm.experimental.constrained.fptosi.i32.f32(float, metadata)
+declare i32 @llvm.experimental.constrained.fptoui.i32.f32(float, metadata)
+declare float @llvm.experimental.constrained.sqrt.f32(float, metadata, metadata)
+declare float @llvm.experimental.constrained.powi.f32(float, i32, metadata, metadata)
+declare float @llvm.experimental.constrained.sin.f32(float, metadata, metadata)
+declare float @llvm.experimental.constrained.cos.f32(float, metadata, metadata)
+declare float @llvm.experimental.constrained.tan.f32(float, metadata, metadata)
+declare float @llvm.experimental.constrained.acos.f32(float, metadata, metadata)
+declare float @llvm.experimental.constrained.asin.f32(float, metadata, metadata)
+declare float @llvm.experimental.constrained.atan.f32(float, metadata, metadata)
+declare float @llvm.experimental.constrained.cosh.f32(float, metadata, metadata)
+declare float @llvm.experimental.constrained.sinh.f32(float, metadata, metadata)
+declare float @llvm.experimental.constrained.tanh.f32(float, metadata, metadata)
+declare float @llvm.experimental.constrained.fmuladd.f32(float, float, float, metadata, metadata)
+declare float @llvm.experimental.constrained.ldexp.f32.i32(float, i32, metadata, metadata)
+declare float @llvm.experimental.constrained.roundeven.f32(float, metadata)
+declare float @llvm.experimental.constrained.uitofp.f32.i32(i32, metadata, metadata)
+declare float @llvm.experimental.constrained.atan2.f32(float, float, metadata, metadata)
+declare float @llvm.experimental.constrained.pow.f32(float, float, metadata, metadata)
+declare float @llvm.experimental.constrained.log.f32(float, metadata, metadata)
+declare float @llvm.experimental.constrained.log10.f32(float, metadata, metadata)
+declare float @llvm.experimental.constrained.log2.f32(float, metadata, metadata)
+declare float @llvm.experimental.constrained.exp.f32(float, metadata, metadata)
+declare float @llvm.experimental.constrained.exp2.f32(float, metadata, metadata)
+declare float @llvm.experimental.constrained.rint.f32(float, metadata, metadata)
+declare float @llvm.experimental.constrained.nearbyint.f32(float, metadata, metadata)
+declare i32 @llvm.experimental.constrained.lrint.i32.f32(float, metadata, metadata)
+declare i32 @llvm.experimental.constrained.llrint.i32.f32(float, metadata, metadata)
+declare float @llvm.experimental.constrained.maxnum.f32(float, float, metadata)
+declare float @llvm.experimental.constrained.minnum.f32(float, float, metadata)
+declare float @llvm.experimental.constrained.ceil.f32(float, metadata)
+declare float @llvm.experimental.constrained.floor.f32(float, metadata)
+declare i32 @llvm.experimental.constrained.lround.i32.f32(float, metadata)
+declare i32 @llvm.experimental.constrained.llround.i32.f32(float, metadata)
+declare float @llvm.experimental.constrained.round.f32(float, metadata)
+declare float @llvm.experimental.constrained.trunc.f32(float, metadata)
+
+declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata)
+declare double @llvm.experimental.constrained.fsub.f64(double, double, metadata, metadata)
+declare double @llvm.experimental.constrained.fmul.f64(double, double, metadata, metadata)
+declare double @llvm.experimental.constrained.fdiv.f64(double, double, metadata, metadata)
+declare double @llvm.experimental.constrained.frem.f64(double, double, metadata, metadata)
+declare double @llvm.experimental.constrained.fma.f64(double, double, double, metadata, metadata)
+declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata)
+declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata)
+declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadata)
+declare double @llvm.experimental.constrained.powi.f64(double, i32, metadata, metadata)
+declare double @llvm.experimental.constrained.sin.f64(double, metadata, metadata)
+declare double @llvm.experimental.constrained.cos.f64(double, metadata, metadata)
+declare double @llvm.experimental.constrained.tan.f64(double, metadata, metadata)
+declare double @llvm.experimental.constrained.acos.f64(double, metadata, metadata)
+declare double @llvm.experimental.constrained.asin.f64(double, metadata, metadata)
+declare double @llvm.experimental.constrained.atan.f64(double, metadata, metadata)
+declare double @llvm.experimental.constrained.cosh.f64(double, metadata, metadata)
+declare double @llvm.experimental.constrained.sinh.f64(double, metadata, metadata)
+declare double @llvm.experimental.constrained.tanh.f64(double, metadata, metadata)
+declare double @llvm.experimental.constrained.fmuladd.f64(double, double, double, metadata, metadata)
+declare double @llvm.experimental.constrained.ldexp.f64.i32(double, i32, metadata, metadata)
+declare double @llvm.experimental.constrained.roundeven.f64(double, metadata)
+declare double @llvm.experimental.constrained.uitofp.f64.i32(i32, metadata, metadata)
+declare double @llvm.experimental.constrained.atan2.f64(double, double, metadata, metadata)
+declare double @llvm.experimental.constrained.pow.f64(double, double, metadata, metadata)
+declare double @llvm.experimental.constrained.log.f64(double, metadata, metadata)
+declare double @llvm.experimental.constrained.log10.f64(double, metadata, metadata)
+declare double @llvm.experimental.constrained.log2.f64(double, metadata, metadata)
+declare double @llvm.experimental.constrained.exp.f64(double, metadata, metadata)
+declare double @llvm.experimental.constrained.exp2.f64(double, metadata, metadata)
+declare double @llvm.experimental.constrained.rint.f64(double, metadata, metadata)
+declare double @llvm.experimental.constrained.nearbyint.f64(double, metadata, metadata)
+declare i32 @llvm.experimental.constrained.lrint.i32.f64(double, metadata, metadata)
+declare i32 @llvm.experimental.constrained.llrint.i32.f64(double, metadata, metadata)
+declare double @llvm.experimental.constrained.maxnum.f64(double, double, metadata)
+declare double @llvm.experimental.constrained.minnum.f64(double, double, metadata)
+declare double @llvm.experimental.constrained.ceil.f64(double, metadata)
+declare double @llvm.experimental.constrained.floor.f64(double, metadata)
+declare i32 @llvm.experimental.constrained.lround.i32.f64(double, metadata)
+declare i32 @llvm.experimental.constrained.llround.i32.f64(double, metadata)
+declare double @llvm.experimental.constrained.round.f64(double, metadata)
+declare double @llvm.experimental.constrained.trunc.f64(double, metadata)
+
+declare float @llvm.experimental.constrained.fptrunc.f32.f64(double, metadata, metadata)
+declare double @llvm.experimental.constrained.fpext.f64.f32(float, metadata)
+declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata)
+declare double @llvm.experimental.constrained.sitofp.f64.i32(i32, metadata, metadata)



More information about the llvm-commits mailing list