[llvm] [ARM] Add instruction selection for strict FP (PR #160696)
Erik Enikeev via llvm-commits
llvm-commits at lists.llvm.org
Thu Sep 25 05:24:20 PDT 2025
https://github.com/Varnike created https://github.com/llvm/llvm-project/pull/160696
This consists of marking the various strict opcodes as legal, and adjusting instruction selection patterns so that 'op' is 'any_op'. The changes are similar to those in D114946 for AArch64.
Custom lowering and promotion are set for some FP16 strict ops to work correctly.
This PR is part of the work on adding strict FP support in ARM, which was previously discussed in #137101.
>From f785ffc64b3a904df4abc4593df3a0420f00e8df Mon Sep 17 00:00:00 2001
From: Erik Enikeev <evonatarius at gmail.com>
Date: Fri, 1 Aug 2025 13:49:51 -0400
Subject: [PATCH] [ARM] Add instruction selection for strict FP
This consists of marking the various strict opcodes as legal, and
adjusting instruction selection patterns so that 'op' is 'any_op'.
The changes are similar to those in D114946 for AArch64.
Custom lowering and promotion are set for some FP16 strict ops to
work correctly.
---
llvm/lib/Target/ARM/ARMISelLowering.cpp | 93 ++-
llvm/lib/Target/ARM/ARMInstrInfo.td | 6 +-
llvm/lib/Target/ARM/ARMInstrVFP.td | 175 ++---
llvm/test/CodeGen/ARM/fp-intrinsics.ll | 168 +++++
llvm/test/CodeGen/ARM/fp16-fullfp16.ll | 896 ++++++++++++++++++++++++
5 files changed, 1225 insertions(+), 113 deletions(-)
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 9052cbfa89deb..17dae0b6d11f1 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -602,10 +602,20 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM_,
setOperationAction(ISD::FP_TO_SINT_SAT, MVT::i64, Custom);
setOperationAction(ISD::FP_TO_UINT_SAT, MVT::i64, Custom);
- if (!Subtarget->hasVFP2Base())
+ if (!Subtarget->hasVFP2Base()) {
setAllExpand(MVT::f32);
- if (!Subtarget->hasFP64())
+ } else {
+ for (auto Op : {ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL,
+ ISD::STRICT_FDIV, ISD::STRICT_FMA, ISD::STRICT_FSQRT})
+ setOperationAction(Op, MVT::f32, Legal);
+ }
+ if (!Subtarget->hasFP64()) {
setAllExpand(MVT::f64);
+ } else {
+ for (auto Op : {ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL,
+ ISD::STRICT_FDIV, ISD::STRICT_FMA, ISD::STRICT_FSQRT})
+ setOperationAction(Op, MVT::f64, Legal);
+ }
}
if (Subtarget->hasFullFP16()) {
@@ -1338,29 +1348,42 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM_,
}
// FP16 often need to be promoted to call lib functions
+ // clang-format off
if (Subtarget->hasFullFP16()) {
- setOperationAction(ISD::FREM, MVT::f16, Promote);
+ for (auto Op : {ISD::FREM, ISD::FPOW, ISD::FPOWI,
+ ISD::FCOS, ISD::FSIN, ISD::FSINCOS,
+ ISD::FSINCOSPI, ISD::FMODF, ISD::FACOS,
+ ISD::FASIN, ISD::FATAN, ISD::FATAN2,
+ ISD::FCOSH, ISD::FSINH, ISD::FTANH,
+ ISD::FTAN, ISD::FEXP, ISD::FEXP2,
+ ISD::FEXP10, ISD::FLOG, ISD::FLOG2,
+ ISD::FLOG10, ISD::STRICT_FREM, ISD::STRICT_FPOW,
+ ISD::STRICT_FPOWI, ISD::STRICT_FCOS, ISD::STRICT_FSIN,
+ ISD::STRICT_FACOS, ISD::STRICT_FASIN, ISD::STRICT_FATAN,
+ ISD::STRICT_FATAN2, ISD::STRICT_FCOSH, ISD::STRICT_FSINH,
+ ISD::STRICT_FTANH, ISD::STRICT_FEXP, ISD::STRICT_FEXP2,
+ ISD::STRICT_FLOG, ISD::STRICT_FLOG2, ISD::STRICT_FLOG10,
+ ISD::STRICT_FTAN}) {
+ setOperationAction(Op, MVT::f16, Promote);
+ }
+
+ // Round-to-integer need custom lowering for fp16, as Promote doesn't work
+ // because the result type is integer.
+ for (auto Op : {ISD::LROUND, ISD::LLROUND, ISD::LRINT, ISD::LLRINT,
+ ISD::STRICT_LROUND, ISD::STRICT_LLROUND, ISD::STRICT_LRINT,
+ ISD::STRICT_LLRINT})
+ setOperationAction(Op, MVT::f16, Custom);
+
+ for (auto Op : {ISD::FROUND, ISD::FROUNDEVEN, ISD::FTRUNC,
+ ISD::FNEARBYINT, ISD::FRINT, ISD::FFLOOR,
+ ISD::FCEIL, ISD::STRICT_FROUND, ISD::STRICT_FROUNDEVEN,
+ ISD::STRICT_FTRUNC, ISD::STRICT_FNEARBYINT, ISD::STRICT_FRINT,
+ ISD::STRICT_FFLOOR, ISD::STRICT_FCEIL}) {
+ setOperationAction(Op, MVT::f16, Legal);
+ }
+ // clang-format on
+
setOperationAction(ISD::FCOPYSIGN, MVT::f16, Expand);
- setOperationAction(ISD::FSIN, MVT::f16, Promote);
- setOperationAction(ISD::FCOS, MVT::f16, Promote);
- setOperationAction(ISD::FTAN, MVT::f16, Promote);
- setOperationAction(ISD::FSINCOS, MVT::f16, Promote);
- setOperationAction(ISD::FPOWI, MVT::f16, Promote);
- setOperationAction(ISD::FPOW, MVT::f16, Promote);
- setOperationAction(ISD::FEXP, MVT::f16, Promote);
- setOperationAction(ISD::FEXP2, MVT::f16, Promote);
- setOperationAction(ISD::FEXP10, MVT::f16, Promote);
- setOperationAction(ISD::FLOG, MVT::f16, Promote);
- setOperationAction(ISD::FLOG10, MVT::f16, Promote);
- setOperationAction(ISD::FLOG2, MVT::f16, Promote);
-
- setOperationAction(ISD::FROUND, MVT::f16, Legal);
- setOperationAction(ISD::FROUNDEVEN, MVT::f16, Legal);
- setOperationAction(ISD::FTRUNC, MVT::f16, Legal);
- setOperationAction(ISD::FNEARBYINT, MVT::f16, Legal);
- setOperationAction(ISD::FRINT, MVT::f16, Legal);
- setOperationAction(ISD::FFLOOR, MVT::f16, Legal);
- setOperationAction(ISD::FCEIL, MVT::f16, Legal);
}
if (Subtarget->hasNEON()) {
@@ -10721,6 +10744,30 @@ SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
return LowerCMP(Op, DAG);
case ISD::ABS:
return LowerABS(Op, DAG);
+ case ISD::LRINT:
+ case ISD::LLRINT:
+ case ISD::LROUND:
+ case ISD::LLROUND: {
+ assert((Op.getOperand(0).getValueType() == MVT::f16 ||
+ Op.getOperand(1).getValueType() == MVT::bf16) &&
+ "Expected custom lowering of rounding operations only for f16");
+ SDLoc DL(Op);
+ SDValue Ext = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Op.getOperand(0));
+ return DAG.getNode(Op.getOpcode(), DL, Op.getValueType(), Ext);
+ }
+ case ISD::STRICT_LROUND:
+ case ISD::STRICT_LLROUND:
+ case ISD::STRICT_LRINT:
+ case ISD::STRICT_LLRINT: {
+ assert((Op.getOperand(1).getValueType() == MVT::f16 ||
+ Op.getOperand(1).getValueType() == MVT::bf16) &&
+ "Expected custom lowering of rounding operations only for f16");
+ SDLoc DL(Op);
+ SDValue Ext = DAG.getNode(ISD::STRICT_FP_EXTEND, DL, {MVT::f32, MVT::Other},
+ {Op.getOperand(0), Op.getOperand(1)});
+ return DAG.getNode(Op.getOpcode(), DL, {Op.getValueType(), MVT::Other},
+ {Ext.getValue(1), Ext.getValue(0)});
+ }
}
}
diff --git a/llvm/lib/Target/ARM/ARMInstrInfo.td b/llvm/lib/Target/ARM/ARMInstrInfo.td
index 282ff534fc112..85c2fb1a914dd 100644
--- a/llvm/lib/Target/ARM/ARMInstrInfo.td
+++ b/llvm/lib/Target/ARM/ARMInstrInfo.td
@@ -473,15 +473,15 @@ def xor_su : PatFrag<(ops node:$lhs, node:$rhs), (xor node:$lhs, node:$rhs)>;
// An 'fmul' node with a single use.
let HasOneUse = 1 in
-def fmul_su : PatFrag<(ops node:$lhs, node:$rhs), (fmul node:$lhs, node:$rhs)>;
+def fmul_su : PatFrag<(ops node:$lhs, node:$rhs), (any_fmul node:$lhs, node:$rhs)>;
// An 'fadd' node which checks for single non-hazardous use.
-def fadd_mlx : PatFrag<(ops node:$lhs, node:$rhs),(fadd node:$lhs, node:$rhs),[{
+def fadd_mlx : PatFrag<(ops node:$lhs, node:$rhs),(any_fadd node:$lhs, node:$rhs),[{
return hasNoVMLxHazardUse(N);
}]>;
// An 'fsub' node which checks for single non-hazardous use.
-def fsub_mlx : PatFrag<(ops node:$lhs, node:$rhs),(fsub node:$lhs, node:$rhs),[{
+def fsub_mlx : PatFrag<(ops node:$lhs, node:$rhs),(any_fsub node:$lhs, node:$rhs),[{
return hasNoVMLxHazardUse(N);
}]>;
diff --git a/llvm/lib/Target/ARM/ARMInstrVFP.td b/llvm/lib/Target/ARM/ARMInstrVFP.td
index 31650e0137beb..4b15b419a8d00 100644
--- a/llvm/lib/Target/ARM/ARMInstrVFP.td
+++ b/llvm/lib/Target/ARM/ARMInstrVFP.td
@@ -439,14 +439,14 @@ let TwoOperandAliasConstraint = "$Dn = $Dd" in
def VADDD : ADbI<0b11100, 0b11, 0, 0,
(outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
IIC_fpALU64, "vadd", ".f64\t$Dd, $Dn, $Dm",
- [(set DPR:$Dd, (fadd DPR:$Dn, (f64 DPR:$Dm)))]>,
+ [(set DPR:$Dd, (any_fadd DPR:$Dn, (f64 DPR:$Dm)))]>,
Sched<[WriteFPALU64]>;
let TwoOperandAliasConstraint = "$Sn = $Sd" in
def VADDS : ASbIn<0b11100, 0b11, 0, 0,
(outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
IIC_fpALU32, "vadd", ".f32\t$Sd, $Sn, $Sm",
- [(set SPR:$Sd, (fadd SPR:$Sn, SPR:$Sm))]>,
+ [(set SPR:$Sd, (any_fadd SPR:$Sn, SPR:$Sm))]>,
Sched<[WriteFPALU32]> {
// Some single precision VFP instructions may be executed on both NEON and
// VFP pipelines on A8.
@@ -457,21 +457,21 @@ let TwoOperandAliasConstraint = "$Sn = $Sd" in
def VADDH : AHbI<0b11100, 0b11, 0, 0,
(outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
IIC_fpALU16, "vadd", ".f16\t$Sd, $Sn, $Sm",
- [(set (f16 HPR:$Sd), (fadd (f16 HPR:$Sn), (f16 HPR:$Sm)))]>,
+ [(set (f16 HPR:$Sd), (any_fadd (f16 HPR:$Sn), (f16 HPR:$Sm)))]>,
Sched<[WriteFPALU32]>;
let TwoOperandAliasConstraint = "$Dn = $Dd" in
def VSUBD : ADbI<0b11100, 0b11, 1, 0,
(outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
IIC_fpALU64, "vsub", ".f64\t$Dd, $Dn, $Dm",
- [(set DPR:$Dd, (fsub DPR:$Dn, (f64 DPR:$Dm)))]>,
+ [(set DPR:$Dd, (any_fsub DPR:$Dn, (f64 DPR:$Dm)))]>,
Sched<[WriteFPALU64]>;
let TwoOperandAliasConstraint = "$Sn = $Sd" in
def VSUBS : ASbIn<0b11100, 0b11, 1, 0,
(outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
IIC_fpALU32, "vsub", ".f32\t$Sd, $Sn, $Sm",
- [(set SPR:$Sd, (fsub SPR:$Sn, SPR:$Sm))]>,
+ [(set SPR:$Sd, (any_fsub SPR:$Sn, SPR:$Sm))]>,
Sched<[WriteFPALU32]>{
// Some single precision VFP instructions may be executed on both NEON and
// VFP pipelines on A8.
@@ -482,42 +482,42 @@ let TwoOperandAliasConstraint = "$Sn = $Sd" in
def VSUBH : AHbI<0b11100, 0b11, 1, 0,
(outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
IIC_fpALU16, "vsub", ".f16\t$Sd, $Sn, $Sm",
- [(set (f16 HPR:$Sd), (fsub (f16 HPR:$Sn), (f16 HPR:$Sm)))]>,
+ [(set (f16 HPR:$Sd), (any_fsub (f16 HPR:$Sn), (f16 HPR:$Sm)))]>,
Sched<[WriteFPALU32]>;
let TwoOperandAliasConstraint = "$Dn = $Dd" in
def VDIVD : ADbI<0b11101, 0b00, 0, 0,
(outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
IIC_fpDIV64, "vdiv", ".f64\t$Dd, $Dn, $Dm",
- [(set DPR:$Dd, (fdiv DPR:$Dn, (f64 DPR:$Dm)))]>,
+ [(set DPR:$Dd, (any_fdiv DPR:$Dn, (f64 DPR:$Dm)))]>,
Sched<[WriteFPDIV64]>;
let TwoOperandAliasConstraint = "$Sn = $Sd" in
def VDIVS : ASbI<0b11101, 0b00, 0, 0,
(outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
IIC_fpDIV32, "vdiv", ".f32\t$Sd, $Sn, $Sm",
- [(set SPR:$Sd, (fdiv SPR:$Sn, SPR:$Sm))]>,
+ [(set SPR:$Sd, (any_fdiv SPR:$Sn, SPR:$Sm))]>,
Sched<[WriteFPDIV32]>;
let TwoOperandAliasConstraint = "$Sn = $Sd" in
def VDIVH : AHbI<0b11101, 0b00, 0, 0,
(outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
IIC_fpDIV16, "vdiv", ".f16\t$Sd, $Sn, $Sm",
- [(set (f16 HPR:$Sd), (fdiv (f16 HPR:$Sn), (f16 HPR:$Sm)))]>,
+ [(set (f16 HPR:$Sd), (any_fdiv (f16 HPR:$Sn), (f16 HPR:$Sm)))]>,
Sched<[WriteFPDIV32]>;
let TwoOperandAliasConstraint = "$Dn = $Dd" in
def VMULD : ADbI<0b11100, 0b10, 0, 0,
(outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
IIC_fpMUL64, "vmul", ".f64\t$Dd, $Dn, $Dm",
- [(set DPR:$Dd, (fmul DPR:$Dn, (f64 DPR:$Dm)))]>,
+ [(set DPR:$Dd, (any_fmul DPR:$Dn, (f64 DPR:$Dm)))]>,
Sched<[WriteFPMUL64, ReadFPMUL, ReadFPMUL]>;
let TwoOperandAliasConstraint = "$Sn = $Sd" in
def VMULS : ASbIn<0b11100, 0b10, 0, 0,
(outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
IIC_fpMUL32, "vmul", ".f32\t$Sd, $Sn, $Sm",
- [(set SPR:$Sd, (fmul SPR:$Sn, SPR:$Sm))]>,
+ [(set SPR:$Sd, (any_fmul SPR:$Sn, SPR:$Sm))]>,
Sched<[WriteFPMUL32, ReadFPMUL, ReadFPMUL]> {
// Some single precision VFP instructions may be executed on both NEON and
// VFP pipelines on A8.
@@ -528,21 +528,21 @@ let TwoOperandAliasConstraint = "$Sn = $Sd" in
def VMULH : AHbI<0b11100, 0b10, 0, 0,
(outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
IIC_fpMUL16, "vmul", ".f16\t$Sd, $Sn, $Sm",
- [(set (f16 HPR:$Sd), (fmul (f16 HPR:$Sn), (f16 HPR:$Sm)))]>,
+ [(set (f16 HPR:$Sd), (any_fmul (f16 HPR:$Sn), (f16 HPR:$Sm)))]>,
Sched<[WriteFPMUL32, ReadFPMUL, ReadFPMUL]>;
let TwoOperandAliasConstraint = "$Dn = $Dd" in
def VNMULD : ADbI<0b11100, 0b10, 1, 0,
(outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
IIC_fpMUL64, "vnmul", ".f64\t$Dd, $Dn, $Dm",
- [(set DPR:$Dd, (fneg (fmul DPR:$Dn, (f64 DPR:$Dm))))]>,
+ [(set DPR:$Dd, (fneg (any_fmul DPR:$Dn, (f64 DPR:$Dm))))]>,
Sched<[WriteFPMUL64, ReadFPMUL, ReadFPMUL]>;
let TwoOperandAliasConstraint = "$Sn = $Sd" in
def VNMULS : ASbI<0b11100, 0b10, 1, 0,
(outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
IIC_fpMUL32, "vnmul", ".f32\t$Sd, $Sn, $Sm",
- [(set SPR:$Sd, (fneg (fmul SPR:$Sn, SPR:$Sm)))]>,
+ [(set SPR:$Sd, (fneg (any_fmul SPR:$Sn, SPR:$Sm)))]>,
Sched<[WriteFPMUL32, ReadFPMUL, ReadFPMUL]> {
// Some single precision VFP instructions may be executed on both NEON and
// VFP pipelines on A8.
@@ -553,7 +553,7 @@ let TwoOperandAliasConstraint = "$Sn = $Sd" in
def VNMULH : AHbI<0b11100, 0b10, 1, 0,
(outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
IIC_fpMUL16, "vnmul", ".f16\t$Sd, $Sn, $Sm",
- [(set (f16 HPR:$Sd), (fneg (fmul (f16 HPR:$Sn), (f16 HPR:$Sm))))]>,
+ [(set (f16 HPR:$Sd), (fneg (any_fmul (f16 HPR:$Sn), (f16 HPR:$Sm))))]>,
Sched<[WriteFPMUL32, ReadFPMUL, ReadFPMUL]>;
multiclass vsel_inst<string op, bits<2> opc, int CC> {
@@ -587,7 +587,7 @@ defm VSELGE : vsel_inst<"ge", 0b10, 10>;
defm VSELEQ : vsel_inst<"eq", 0b00, 0>;
defm VSELVS : vsel_inst<"vs", 0b01, 6>;
-multiclass vmaxmin_inst<string op, bit opc, SDNode SD> {
+multiclass vmaxmin_inst<string op, bit opc, PatFrags SD> {
let DecoderNamespace = "VFPV8", PostEncoderMethod = "",
isUnpredicable = 1 in {
def H : AHbInp<0b11101, 0b00, opc,
@@ -610,8 +610,8 @@ multiclass vmaxmin_inst<string op, bit opc, SDNode SD> {
}
}
-defm VFP_VMAXNM : vmaxmin_inst<"vmaxnm", 0, fmaxnum>;
-defm VFP_VMINNM : vmaxmin_inst<"vminnm", 1, fminnum>;
+defm VFP_VMAXNM : vmaxmin_inst<"vmaxnm", 0, any_fmaxnum>;
+defm VFP_VMINNM : vmaxmin_inst<"vminnm", 1, any_fminnum>;
// Match reassociated forms only if not sign dependent rounding.
def : Pat<(fmul (fneg DPR:$a), (f64 DPR:$b)),
@@ -745,7 +745,7 @@ def VCMPZH : AHuI<0b11101, 0b11, 0b0101, 0b01, 0,
def VCVTDS : ASuI<0b11101, 0b11, 0b0111, 0b11, 0,
(outs DPR:$Dd), (ins SPR:$Sm),
IIC_fpCVTDS, "vcvt", ".f64.f32\t$Dd, $Sm", "",
- [(set DPR:$Dd, (fpextend SPR:$Sm))]>,
+ [(set DPR:$Dd, (any_fpextend SPR:$Sm))]>,
Sched<[WriteFPCVT]> {
// Instruction operands.
bits<5> Dd;
@@ -764,7 +764,7 @@ def VCVTDS : ASuI<0b11101, 0b11, 0b0111, 0b11, 0,
// Special case encoding: bits 11-8 is 0b1011.
def VCVTSD : VFPAI<(outs SPR:$Sd), (ins DPR:$Dm), VFPUnaryFrm,
IIC_fpCVTSD, "vcvt", ".f32.f64\t$Sd, $Dm", "",
- [(set SPR:$Sd, (fpround DPR:$Dm))]>,
+ [(set SPR:$Sd, (any_fpround DPR:$Dm))]>,
Sched<[WriteFPCVT]> {
// Instruction operands.
bits<5> Sd;
@@ -794,7 +794,7 @@ def VCVTBHS: ASuI<0b11101, 0b11, 0b0010, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sm),
Requires<[HasFP16]>,
Sched<[WriteFPCVT]>;
-def : FP16Pat<(f32 (fpextend (f16 HPR:$Sm))),
+def : FP16Pat<(f32 (any_fpextend (f16 HPR:$Sm))),
(VCVTBHS (COPY_TO_REGCLASS (f16 HPR:$Sm), SPR))>;
def : FP16Pat<(f16_to_fp GPR:$a),
(VCVTBHS (COPY_TO_REGCLASS GPR:$a, SPR))>;
@@ -806,16 +806,16 @@ def VCVTBSH: ASuI<0b11101, 0b11, 0b0011, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sda,
Requires<[HasFP16]>,
Sched<[WriteFPCVT]>;
-def : FP16Pat<(f16 (fpround SPR:$Sm)),
+def : FP16Pat<(f16 (any_fpround SPR:$Sm)),
(COPY_TO_REGCLASS (VCVTBSH (IMPLICIT_DEF), SPR:$Sm), HPR)>;
def : FP16Pat<(fp_to_f16 SPR:$a),
(i32 (COPY_TO_REGCLASS (VCVTBSH (IMPLICIT_DEF), SPR:$a), GPR))>;
-def : FP16Pat<(insertelt (v8f16 MQPR:$src1), (f16 (fpround (f32 SPR:$src2))), imm_even:$lane),
+def : FP16Pat<(insertelt (v8f16 MQPR:$src1), (f16 (any_fpround (f32 SPR:$src2))), imm_even:$lane),
(v8f16 (INSERT_SUBREG (v8f16 MQPR:$src1),
(VCVTBSH (EXTRACT_SUBREG (v8f16 MQPR:$src1), (SSubReg_f16_reg imm:$lane)),
SPR:$src2),
(SSubReg_f16_reg imm:$lane)))>;
-def : FP16Pat<(insertelt (v4f16 DPR:$src1), (f16 (fpround (f32 SPR:$src2))), imm_even:$lane),
+def : FP16Pat<(insertelt (v4f16 DPR:$src1), (f16 (any_fpround (f32 SPR:$src2))), imm_even:$lane),
(v4f16 (INSERT_SUBREG (v4f16 DPR:$src1),
(VCVTBSH (EXTRACT_SUBREG (v4f16 DPR:$src1), (SSubReg_f16_reg imm:$lane)),
SPR:$src2),
@@ -828,9 +828,9 @@ def VCVTTHS: ASuI<0b11101, 0b11, 0b0010, 0b11, 0, (outs SPR:$Sd), (ins SPR:$Sm),
Requires<[HasFP16]>,
Sched<[WriteFPCVT]>;
-def : FP16Pat<(f32 (fpextend (extractelt (v8f16 MQPR:$src), imm_odd:$lane))),
+def : FP16Pat<(f32 (any_fpextend (extractelt (v8f16 MQPR:$src), imm_odd:$lane))),
(VCVTTHS (EXTRACT_SUBREG MQPR:$src, (SSubReg_f16_reg imm_odd:$lane)))>;
-def : FP16Pat<(f32 (fpextend (extractelt (v4f16 DPR:$src), imm_odd:$lane))),
+def : FP16Pat<(f32 (any_fpextend (extractelt (v4f16 DPR:$src), imm_odd:$lane))),
(VCVTTHS (EXTRACT_SUBREG
(v2f32 (COPY_TO_REGCLASS (v4f16 DPR:$src), DPR_VFP2)),
(SSubReg_f16_reg imm_odd:$lane)))>;
@@ -842,12 +842,12 @@ def VCVTTSH: ASuI<0b11101, 0b11, 0b0011, 0b11, 0, (outs SPR:$Sd), (ins SPR:$Sda,
Requires<[HasFP16]>,
Sched<[WriteFPCVT]>;
-def : FP16Pat<(insertelt (v8f16 MQPR:$src1), (f16 (fpround (f32 SPR:$src2))), imm_odd:$lane),
+def : FP16Pat<(insertelt (v8f16 MQPR:$src1), (f16 (any_fpround (f32 SPR:$src2))), imm_odd:$lane),
(v8f16 (INSERT_SUBREG (v8f16 MQPR:$src1),
(VCVTTSH (EXTRACT_SUBREG (v8f16 MQPR:$src1), (SSubReg_f16_reg imm:$lane)),
SPR:$src2),
(SSubReg_f16_reg imm:$lane)))>;
-def : FP16Pat<(insertelt (v4f16 DPR:$src1), (f16 (fpround (f32 SPR:$src2))), imm_odd:$lane),
+def : FP16Pat<(insertelt (v4f16 DPR:$src1), (f16 (any_fpround (f32 SPR:$src2))), imm_odd:$lane),
(v4f16 (INSERT_SUBREG (v4f16 DPR:$src1),
(VCVTTSH (EXTRACT_SUBREG (v4f16 DPR:$src1), (SSubReg_f16_reg imm:$lane)),
SPR:$src2),
@@ -869,7 +869,7 @@ def VCVTBHD : ADuI<0b11101, 0b11, 0b0010, 0b01, 0,
let hasSideEffects = 0;
}
-def : FullFP16Pat<(f64 (fpextend (f16 HPR:$Sm))),
+def : FullFP16Pat<(f64 (any_fpextend (f16 HPR:$Sm))),
(VCVTBHD (COPY_TO_REGCLASS (f16 HPR:$Sm), SPR))>,
Requires<[HasFPARMv8, HasDPVFP]>;
def : FP16Pat<(f64 (f16_to_fp GPR:$a)),
@@ -894,7 +894,7 @@ def VCVTBDH : ADuI<0b11101, 0b11, 0b0011, 0b01, 0,
let hasSideEffects = 0;
}
-def : FullFP16Pat<(f16 (fpround DPR:$Dm)),
+def : FullFP16Pat<(f16 (any_fpround DPR:$Dm)),
(COPY_TO_REGCLASS (VCVTBDH (IMPLICIT_DEF), DPR:$Dm), HPR)>,
Requires<[HasFPARMv8, HasDPVFP]>;
def : FP16Pat<(fp_to_f16 (f64 DPR:$a)),
@@ -1000,41 +1000,41 @@ multiclass vcvt_inst<string opc, bits<2> rm,
let Predicates = [HasFPARMv8] in {
let Predicates = [HasFullFP16] in {
- def : Pat<(i32 (fp_to_sint (node (f16 HPR:$a)))),
+ def : Pat<(i32 (any_fp_to_sint (node (f16 HPR:$a)))),
(COPY_TO_REGCLASS
(!cast<Instruction>(NAME#"SH") (f16 HPR:$a)),
GPR)>;
- def : Pat<(i32 (fp_to_uint (node (f16 HPR:$a)))),
+ def : Pat<(i32 (any_fp_to_uint (node (f16 HPR:$a)))),
(COPY_TO_REGCLASS
(!cast<Instruction>(NAME#"UH") (f16 HPR:$a)),
GPR)>;
}
- def : Pat<(i32 (fp_to_sint (node SPR:$a))),
+ def : Pat<(i32 (any_fp_to_sint (node SPR:$a))),
(COPY_TO_REGCLASS
(!cast<Instruction>(NAME#"SS") SPR:$a),
GPR)>;
- def : Pat<(i32 (fp_to_uint (node SPR:$a))),
+ def : Pat<(i32 (any_fp_to_uint (node SPR:$a))),
(COPY_TO_REGCLASS
(!cast<Instruction>(NAME#"US") SPR:$a),
GPR)>;
}
let Predicates = [HasFPARMv8, HasDPVFP] in {
- def : Pat<(i32 (fp_to_sint (node (f64 DPR:$a)))),
+ def : Pat<(i32 (any_fp_to_sint (node (f64 DPR:$a)))),
(COPY_TO_REGCLASS
(!cast<Instruction>(NAME#"SD") DPR:$a),
GPR)>;
- def : Pat<(i32 (fp_to_uint (node (f64 DPR:$a)))),
+ def : Pat<(i32 (any_fp_to_uint (node (f64 DPR:$a)))),
(COPY_TO_REGCLASS
(!cast<Instruction>(NAME#"UD") DPR:$a),
GPR)>;
}
}
-defm VCVTA : vcvt_inst<"a", 0b00, fround>;
+defm VCVTA : vcvt_inst<"a", 0b00, any_fround>;
defm VCVTN : vcvt_inst<"n", 0b01>;
-defm VCVTP : vcvt_inst<"p", 0b10, fceil>;
-defm VCVTM : vcvt_inst<"m", 0b11, ffloor>;
+defm VCVTP : vcvt_inst<"p", 0b10, any_fceil>;
+defm VCVTM : vcvt_inst<"m", 0b11, any_ffloor>;
def VNEGD : ADuI<0b11101, 0b11, 0b0001, 0b01, 0,
(outs DPR:$Dd), (ins DPR:$Dm),
@@ -1093,9 +1093,9 @@ multiclass vrint_inst_zrx<string opc, bit op, bit op2, SDPatternOperator node> {
Requires<[HasFPARMv8,HasDPVFP]>;
}
-defm VRINTZ : vrint_inst_zrx<"z", 0, 1, ftrunc>;
-defm VRINTR : vrint_inst_zrx<"r", 0, 0, fnearbyint>;
-defm VRINTX : vrint_inst_zrx<"x", 1, 0, frint>;
+defm VRINTZ : vrint_inst_zrx<"z", 0, 1, any_ftrunc>;
+defm VRINTR : vrint_inst_zrx<"r", 0, 0, any_fnearbyint>;
+defm VRINTX : vrint_inst_zrx<"x", 1, 0, any_frint>;
multiclass vrint_inst_anpm<string opc, bits<2> rm,
SDPatternOperator node = null_frag> {
@@ -1135,27 +1135,28 @@ multiclass vrint_inst_anpm<string opc, bits<2> rm,
Requires<[HasFPARMv8,HasDPVFP]>;
}
-defm VRINTA : vrint_inst_anpm<"a", 0b00, fround>;
-defm VRINTN : vrint_inst_anpm<"n", 0b01, froundeven>;
-defm VRINTP : vrint_inst_anpm<"p", 0b10, fceil>;
-defm VRINTM : vrint_inst_anpm<"m", 0b11, ffloor>;
+defm VRINTA : vrint_inst_anpm<"a", 0b00, any_fround>;
+defm VRINTN : vrint_inst_anpm<"n", 0b01, any_froundeven>;
+defm VRINTP : vrint_inst_anpm<"p", 0b10, any_fceil>;
+defm VRINTM : vrint_inst_anpm<"m", 0b11, any_ffloor>;
+
def VSQRTD : ADuI<0b11101, 0b11, 0b0001, 0b11, 0,
(outs DPR:$Dd), (ins DPR:$Dm),
IIC_fpSQRT64, "vsqrt", ".f64\t$Dd, $Dm", "",
- [(set DPR:$Dd, (fsqrt (f64 DPR:$Dm)))]>,
+ [(set DPR:$Dd, (any_fsqrt (f64 DPR:$Dm)))]>,
Sched<[WriteFPSQRT64]>;
def VSQRTS : ASuI<0b11101, 0b11, 0b0001, 0b11, 0,
(outs SPR:$Sd), (ins SPR:$Sm),
IIC_fpSQRT32, "vsqrt", ".f32\t$Sd, $Sm", "",
- [(set SPR:$Sd, (fsqrt SPR:$Sm))]>,
+ [(set SPR:$Sd, (any_fsqrt SPR:$Sm))]>,
Sched<[WriteFPSQRT32]>;
def VSQRTH : AHuI<0b11101, 0b11, 0b0001, 0b11, 0,
(outs HPR:$Sd), (ins HPR:$Sm),
IIC_fpSQRT16, "vsqrt", ".f16\t$Sd, $Sm",
- [(set (f16 HPR:$Sd), (fsqrt (f16 HPR:$Sm)))]>;
+ [(set (f16 HPR:$Sd), (any_fsqrt (f16 HPR:$Sm)))]>;
let hasSideEffects = 0 in {
let isMoveReg = 1 in {
@@ -1495,10 +1496,10 @@ def VSITOD : AVConv1IDs_Encode<0b11101, 0b11, 0b1000, 0b1011,
}
let Predicates=[HasVFP2, HasDPVFP] in {
- def : VFPPat<(f64 (sint_to_fp GPR:$a)),
+ def : VFPPat<(f64 (any_sint_to_fp GPR:$a)),
(VSITOD (COPY_TO_REGCLASS GPR:$a, SPR))>;
- def : VFPPat<(f64 (sint_to_fp (i32 (alignedload32 addrmode5:$a)))),
+ def : VFPPat<(f64 (any_sint_to_fp (i32 (alignedload32 addrmode5:$a)))),
(VSITOD (VLDRS addrmode5:$a))>;
}
@@ -1514,10 +1515,10 @@ def VSITOS : AVConv1InSs_Encode<0b11101, 0b11, 0b1000, 0b1010,
let D = VFPNeonA8Domain;
}
-def : VFPNoNEONPat<(f32 (sint_to_fp GPR:$a)),
+def : VFPNoNEONPat<(f32 (any_sint_to_fp GPR:$a)),
(VSITOS (COPY_TO_REGCLASS GPR:$a, SPR))>;
-def : VFPNoNEONPat<(f32 (sint_to_fp (i32 (alignedload32 addrmode5:$a)))),
+def : VFPNoNEONPat<(f32 (any_sint_to_fp (i32 (alignedload32 addrmode5:$a)))),
(VSITOS (VLDRS addrmode5:$a))>;
def VSITOH : AVConv1IHs_Encode<0b11101, 0b11, 0b1000, 0b1001,
@@ -1529,7 +1530,7 @@ def VSITOH : AVConv1IHs_Encode<0b11101, 0b11, 0b1000, 0b1001,
let isUnpredicable = 1;
}
-def : VFPNoNEONPat<(f16 (sint_to_fp GPR:$a)),
+def : VFPNoNEONPat<(f16 (any_sint_to_fp GPR:$a)),
(VSITOH (COPY_TO_REGCLASS GPR:$a, SPR))>;
def VUITOD : AVConv1IDs_Encode<0b11101, 0b11, 0b1000, 0b1011,
@@ -1541,10 +1542,10 @@ def VUITOD : AVConv1IDs_Encode<0b11101, 0b11, 0b1000, 0b1011,
}
let Predicates=[HasVFP2, HasDPVFP] in {
- def : VFPPat<(f64 (uint_to_fp GPR:$a)),
+ def : VFPPat<(f64 (any_uint_to_fp GPR:$a)),
(VUITOD (COPY_TO_REGCLASS GPR:$a, SPR))>;
- def : VFPPat<(f64 (uint_to_fp (i32 (alignedload32 addrmode5:$a)))),
+ def : VFPPat<(f64 (any_uint_to_fp (i32 (alignedload32 addrmode5:$a)))),
(VUITOD (VLDRS addrmode5:$a))>;
}
@@ -1560,10 +1561,10 @@ def VUITOS : AVConv1InSs_Encode<0b11101, 0b11, 0b1000, 0b1010,
let D = VFPNeonA8Domain;
}
-def : VFPNoNEONPat<(f32 (uint_to_fp GPR:$a)),
+def : VFPNoNEONPat<(f32 (any_uint_to_fp GPR:$a)),
(VUITOS (COPY_TO_REGCLASS GPR:$a, SPR))>;
-def : VFPNoNEONPat<(f32 (uint_to_fp (i32 (alignedload32 addrmode5:$a)))),
+def : VFPNoNEONPat<(f32 (any_uint_to_fp (i32 (alignedload32 addrmode5:$a)))),
(VUITOS (VLDRS addrmode5:$a))>;
def VUITOH : AVConv1IHs_Encode<0b11101, 0b11, 0b1000, 0b1001,
@@ -1575,7 +1576,7 @@ def VUITOH : AVConv1IHs_Encode<0b11101, 0b11, 0b1000, 0b1001,
let isUnpredicable = 1;
}
-def : VFPNoNEONPat<(f16 (uint_to_fp GPR:$a)),
+def : VFPNoNEONPat<(f16 (any_uint_to_fp GPR:$a)),
(VUITOH (COPY_TO_REGCLASS GPR:$a, SPR))>;
// FP -> Int:
@@ -1649,12 +1650,12 @@ def VTOSIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1101, 0b1011,
}
let Predicates=[HasVFP2, HasDPVFP] in {
- def : VFPPat<(i32 (fp_to_sint (f64 DPR:$a))),
+ def : VFPPat<(i32 (any_fp_to_sint (f64 DPR:$a))),
(COPY_TO_REGCLASS (VTOSIZD DPR:$a), GPR)>;
def : VFPPat<(i32 (fp_to_sint_sat (f64 DPR:$a), i32)),
(COPY_TO_REGCLASS (VTOSIZD DPR:$a), GPR)>;
- def : VFPPat<(alignedstore32 (i32 (fp_to_sint (f64 DPR:$a))), addrmode5:$ptr),
+ def : VFPPat<(alignedstore32 (i32 (any_fp_to_sint (f64 DPR:$a))), addrmode5:$ptr),
(VSTRS (VTOSIZD DPR:$a), addrmode5:$ptr)>;
def : VFPPat<(alignedstore32 (i32 (fp_to_sint_sat (f64 DPR:$a), i32)), addrmode5:$ptr),
(VSTRS (VTOSIZD DPR:$a), addrmode5:$ptr)>;
@@ -1672,12 +1673,12 @@ def VTOSIZS : AVConv1InsS_Encode<0b11101, 0b11, 0b1101, 0b1010,
let D = VFPNeonA8Domain;
}
-def : VFPNoNEONPat<(i32 (fp_to_sint SPR:$a)),
+def : VFPNoNEONPat<(i32 (any_fp_to_sint SPR:$a)),
(COPY_TO_REGCLASS (VTOSIZS SPR:$a), GPR)>;
def : VFPPat<(i32 (fp_to_sint_sat SPR:$a, i32)),
(COPY_TO_REGCLASS (VTOSIZS SPR:$a), GPR)>;
-def : VFPNoNEONPat<(alignedstore32 (i32 (fp_to_sint (f32 SPR:$a))),
+def : VFPNoNEONPat<(alignedstore32 (i32 (any_fp_to_sint (f32 SPR:$a))),
addrmode5:$ptr),
(VSTRS (VTOSIZS SPR:$a), addrmode5:$ptr)>;
def : VFPPat<(alignedstore32 (i32 (fp_to_sint_sat (f32 SPR:$a), i32)),
@@ -1693,7 +1694,7 @@ def VTOSIZH : AVConv1IsH_Encode<0b11101, 0b11, 0b1101, 0b1001,
let isUnpredicable = 1;
}
-def : VFPNoNEONPat<(i32 (fp_to_sint (f16 HPR:$a))),
+def : VFPNoNEONPat<(i32 (any_fp_to_sint (f16 HPR:$a))),
(COPY_TO_REGCLASS (VTOSIZH (f16 HPR:$a)), GPR)>;
def : VFPPat<(i32 (fp_to_sint_sat (f16 HPR:$a), i32)),
(COPY_TO_REGCLASS (VTOSIZH (f16 HPR:$a)), GPR)>;
@@ -1707,12 +1708,12 @@ def VTOUIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1100, 0b1011,
}
let Predicates=[HasVFP2, HasDPVFP] in {
- def : VFPPat<(i32 (fp_to_uint (f64 DPR:$a))),
+ def : VFPPat<(i32 (any_fp_to_uint (f64 DPR:$a))),
(COPY_TO_REGCLASS (VTOUIZD DPR:$a), GPR)>;
def : VFPPat<(i32 (fp_to_uint_sat (f64 DPR:$a), i32)),
(COPY_TO_REGCLASS (VTOUIZD DPR:$a), GPR)>;
- def : VFPPat<(alignedstore32 (i32 (fp_to_uint (f64 DPR:$a))), addrmode5:$ptr),
+ def : VFPPat<(alignedstore32 (i32 (any_fp_to_uint (f64 DPR:$a))), addrmode5:$ptr),
(VSTRS (VTOUIZD DPR:$a), addrmode5:$ptr)>;
def : VFPPat<(alignedstore32 (i32 (fp_to_uint_sat (f64 DPR:$a), i32)), addrmode5:$ptr),
(VSTRS (VTOUIZD DPR:$a), addrmode5:$ptr)>;
@@ -1730,12 +1731,12 @@ def VTOUIZS : AVConv1InsS_Encode<0b11101, 0b11, 0b1100, 0b1010,
let D = VFPNeonA8Domain;
}
-def : VFPNoNEONPat<(i32 (fp_to_uint SPR:$a)),
+def : VFPNoNEONPat<(i32 (any_fp_to_uint SPR:$a)),
(COPY_TO_REGCLASS (VTOUIZS SPR:$a), GPR)>;
def : VFPPat<(i32 (fp_to_uint_sat SPR:$a, i32)),
(COPY_TO_REGCLASS (VTOUIZS SPR:$a), GPR)>;
-def : VFPNoNEONPat<(alignedstore32 (i32 (fp_to_uint (f32 SPR:$a))),
+def : VFPNoNEONPat<(alignedstore32 (i32 (any_fp_to_uint (f32 SPR:$a))),
addrmode5:$ptr),
(VSTRS (VTOUIZS SPR:$a), addrmode5:$ptr)>;
def : VFPPat<(alignedstore32 (i32 (fp_to_uint_sat (f32 SPR:$a), i32)),
@@ -1751,7 +1752,7 @@ def VTOUIZH : AVConv1IsH_Encode<0b11101, 0b11, 0b1100, 0b1001,
let isUnpredicable = 1;
}
-def : VFPNoNEONPat<(i32 (fp_to_uint (f16 HPR:$a))),
+def : VFPNoNEONPat<(i32 (any_fp_to_uint (f16 HPR:$a))),
(COPY_TO_REGCLASS (VTOUIZH (f16 HPR:$a)), GPR)>;
def : VFPPat<(i32 (fp_to_uint_sat (f16 HPR:$a), i32)),
(COPY_TO_REGCLASS (VTOUIZH (f16 HPR:$a)), GPR)>;
@@ -2279,13 +2280,13 @@ def : Pat<(fadd_mlx HPR:$dstin, (fmul_su (f16 HPR:$a), HPR:$b)),
// Match @llvm.fma.* intrinsics
// (fma x, y, z) -> (vfms z, x, y)
-def : Pat<(f64 (fma DPR:$Dn, DPR:$Dm, DPR:$Ddin)),
+def : Pat<(f64 (any_fma DPR:$Dn, DPR:$Dm, DPR:$Ddin)),
(VFMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
Requires<[HasVFP4,HasDPVFP]>;
-def : Pat<(f32 (fma SPR:$Sn, SPR:$Sm, SPR:$Sdin)),
+def : Pat<(f32 (any_fma SPR:$Sn, SPR:$Sm, SPR:$Sdin)),
(VFMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
Requires<[HasVFP4]>;
-def : Pat<(f16 (fma HPR:$Sn, HPR:$Sm, (f16 HPR:$Sdin))),
+def : Pat<(f16 (any_fma HPR:$Sn, HPR:$Sm, (f16 HPR:$Sdin))),
(VFMAH (f16 HPR:$Sdin), (f16 HPR:$Sn), (f16 HPR:$Sm))>,
Requires<[HasFullFP16]>;
@@ -2331,13 +2332,13 @@ def : Pat<(fsub_mlx HPR:$dstin, (fmul_su (f16 HPR:$a), HPR:$b)),
// Match @llvm.fma.* intrinsics
// (fma (fneg x), y, z) -> (vfms z, x, y)
-def : Pat<(f64 (fma (fneg DPR:$Dn), DPR:$Dm, DPR:$Ddin)),
+def : Pat<(f64 (any_fma (fneg DPR:$Dn), DPR:$Dm, DPR:$Ddin)),
(VFMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
Requires<[HasVFP4,HasDPVFP]>;
-def : Pat<(f32 (fma (fneg SPR:$Sn), SPR:$Sm, SPR:$Sdin)),
+def : Pat<(f32 (any_fma (fneg SPR:$Sn), SPR:$Sm, SPR:$Sdin)),
(VFMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
Requires<[HasVFP4]>;
-def : Pat<(f16 (fma (fneg (f16 HPR:$Sn)), (f16 HPR:$Sm), (f16 HPR:$Sdin))),
+def : Pat<(f16 (any_fma (fneg (f16 HPR:$Sn)), (f16 HPR:$Sm), (f16 HPR:$Sdin))),
(VFMSH (f16 HPR:$Sdin), (f16 HPR:$Sn), (f16 HPR:$Sm))>,
Requires<[HasFullFP16]>;
@@ -2380,23 +2381,23 @@ def : Pat<(fsub_mlx (fneg (fmul_su SPR:$a, SPR:$b)), SPR:$dstin),
// Match @llvm.fma.* intrinsics
// (fneg (fma x, y, z)) -> (vfnma z, x, y)
-def : Pat<(fneg (fma (f64 DPR:$Dn), (f64 DPR:$Dm), (f64 DPR:$Ddin))),
+def : Pat<(fneg (any_fma (f64 DPR:$Dn), (f64 DPR:$Dm), (f64 DPR:$Ddin))),
(VFNMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
Requires<[HasVFP4,HasDPVFP]>;
-def : Pat<(fneg (fma (f32 SPR:$Sn), (f32 SPR:$Sm), (f32 SPR:$Sdin))),
+def : Pat<(fneg (any_fma (f32 SPR:$Sn), (f32 SPR:$Sm), (f32 SPR:$Sdin))),
(VFNMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
Requires<[HasVFP4]>;
-def : Pat<(fneg (fma (f16 HPR:$Sn), (f16 HPR:$Sm), (f16 (f16 HPR:$Sdin)))),
+def : Pat<(fneg (any_fma (f16 HPR:$Sn), (f16 HPR:$Sm), (f16 (f16 HPR:$Sdin)))),
(VFNMAH (f16 HPR:$Sdin), (f16 HPR:$Sn), (f16 HPR:$Sm))>,
Requires<[HasFullFP16]>;
// (fma (fneg x), y, (fneg z)) -> (vfnma z, x, y)
-def : Pat<(f64 (fma (fneg DPR:$Dn), DPR:$Dm, (fneg DPR:$Ddin))),
+def : Pat<(f64 (any_fma (fneg DPR:$Dn), DPR:$Dm, (fneg DPR:$Ddin))),
(VFNMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
Requires<[HasVFP4,HasDPVFP]>;
-def : Pat<(f32 (fma (fneg SPR:$Sn), SPR:$Sm, (fneg SPR:$Sdin))),
+def : Pat<(f32 (any_fma (fneg SPR:$Sn), SPR:$Sm, (fneg SPR:$Sdin))),
(VFNMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
Requires<[HasVFP4]>;
-def : Pat<(f16 (fma (fneg (f16 HPR:$Sn)), (f16 HPR:$Sm), (fneg (f16 HPR:$Sdin)))),
+def : Pat<(f16 (any_fma (fneg (f16 HPR:$Sn)), (f16 HPR:$Sm), (fneg (f16 HPR:$Sdin)))),
(VFNMAH (f16 HPR:$Sdin), (f16 HPR:$Sn), (f16 HPR:$Sm))>,
Requires<[HasFullFP16]>;
@@ -2438,23 +2439,23 @@ def : Pat<(fsub_mlx (fmul_su SPR:$a, SPR:$b), SPR:$dstin),
// Match @llvm.fma.* intrinsics
// (fma x, y, (fneg z)) -> (vfnms z, x, y))
-def : Pat<(f64 (fma DPR:$Dn, DPR:$Dm, (fneg DPR:$Ddin))),
+def : Pat<(f64 (any_fma DPR:$Dn, DPR:$Dm, (fneg DPR:$Ddin))),
(VFNMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
Requires<[HasVFP4,HasDPVFP]>;
-def : Pat<(f32 (fma SPR:$Sn, SPR:$Sm, (fneg SPR:$Sdin))),
+def : Pat<(f32 (any_fma SPR:$Sn, SPR:$Sm, (fneg SPR:$Sdin))),
(VFNMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
Requires<[HasVFP4]>;
-def : Pat<(f16 (fma (f16 HPR:$Sn), (f16 HPR:$Sm), (fneg (f16 HPR:$Sdin)))),
+def : Pat<(f16 (any_fma (f16 HPR:$Sn), (f16 HPR:$Sm), (fneg (f16 HPR:$Sdin)))),
(VFNMSH (f16 HPR:$Sdin), (f16 HPR:$Sn), (f16 HPR:$Sm))>,
Requires<[HasFullFP16]>;
// (fneg (fma (fneg x), y, z)) -> (vfnms z, x, y)
-def : Pat<(fneg (f64 (fma (fneg DPR:$Dn), DPR:$Dm, DPR:$Ddin))),
+def : Pat<(fneg (f64 (any_fma (fneg DPR:$Dn), DPR:$Dm, DPR:$Ddin))),
(VFNMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
Requires<[HasVFP4,HasDPVFP]>;
-def : Pat<(fneg (f32 (fma (fneg SPR:$Sn), SPR:$Sm, SPR:$Sdin))),
+def : Pat<(fneg (f32 (any_fma (fneg SPR:$Sn), SPR:$Sm, SPR:$Sdin))),
(VFNMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
Requires<[HasVFP4]>;
-def : Pat<(fneg (f16 (fma (fneg (f16 HPR:$Sn)), (f16 HPR:$Sm), (f16 HPR:$Sdin)))),
+def : Pat<(fneg (f16 (any_fma (fneg (f16 HPR:$Sn)), (f16 HPR:$Sm), (f16 HPR:$Sdin)))),
(VFNMSH (f16 HPR:$Sdin), (f16 HPR:$Sn), (f16 HPR:$Sm))>,
Requires<[HasFullFP16]>;
diff --git a/llvm/test/CodeGen/ARM/fp-intrinsics.ll b/llvm/test/CodeGen/ARM/fp-intrinsics.ll
index 93b6a58a22b6c..0d0b6fd315129 100644
--- a/llvm/test/CodeGen/ARM/fp-intrinsics.ll
+++ b/llvm/test/CodeGen/ARM/fp-intrinsics.ll
@@ -146,6 +146,80 @@ define float @tan_f32(float %x) #0 {
ret float %val
}
+; CHECK-LABEL: acos_f32:
+; CHECK: bl acosf
+define float @acos_f32(float %x, float %y) #0 {
+ %val = call float @llvm.experimental.constrained.acos.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret float %val
+}
+
+; CHECK-LABEL: asin_f32:
+; CHECK: bl asinf
+define float @asin_f32(float %x, float %y) #0 {
+ %val = call float @llvm.experimental.constrained.asin.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret float %val
+}
+
+; CHECK-LABEL: atan_f32:
+; CHECK: bl atanf
+define float @atan_f32(float %x, float %y) #0 {
+ %val = call float @llvm.experimental.constrained.atan.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret float %val
+}
+
+; CHECK-LABEL: cosh_f32:
+; CHECK: bl coshf
+define float @cosh_f32(float %x, float %y) #0 {
+ %val = call float @llvm.experimental.constrained.cosh.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret float %val
+}
+
+; CHECK-LABEL: sinh_f32:
+; CHECK: bl sinhf
+define float @sinh_f32(float %x, float %y) #0 {
+ %val = call float @llvm.experimental.constrained.sinh.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret float %val
+}
+
+; CHECK-LABEL: tanh_f32:
+; CHECK: bl tanhf
+define float @tanh_f32(float %x, float %y) #0 {
+ %val = call float @llvm.experimental.constrained.tanh.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret float %val
+}
+
+; CHECK-LABEL: fmuladd_f32:
+; CHECK-SP: vfma.f32
+; CHECK-NOSP: bl __aeabi_fmul
+; CHECK-NOSP: bl __aeabi_fadd
+define float @fmuladd_f32(float %x, float %y, float %z) #0 {
+ %val = call float @llvm.experimental.constrained.fmuladd.f32(float %x, float %y, float %z, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret float %val
+}
+
+; CHECK-LABEL: ldexp_f32:
+; CHECK: bl ldexpf
+define float @ldexp_f32(float %x, i32 %y) #0 {
+ %val = call float @llvm.experimental.constrained.ldexp.f32.i32(float %x, i32 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret float %val
+}
+
+; CHECK-LABEL: roundeven_f32:
+; CHECK-SP-V8: vrintn.f32
+; CHECK-NOSP: bl roundevenf
+define float @roundeven_f32(float %x) #0 {
+ %val = call float @llvm.experimental.constrained.roundeven.f32(float %x, metadata !"fpexcept.strict") #0
+ ret float %val
+}
+
+; CHECK-LABEL: uitofp_f32_i32:
+; CHECK-NOSP: bl __aeabi_ui2f
+; FIXME-CHECK-SP: vcvt.f32.f64
+define float @uitofp_f32_i32(i32 %x) #0 {
+ %val = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret float %val
+}
+
; CHECK-LABEL: atan2_f32:
; CHECK: bl atan2f
define float @atan2_f32(float %x, float %y) #0 {
@@ -617,6 +691,80 @@ define double @tan_f64(double %x) #0 {
ret double %val
}
+; CHECK-LABEL: acos_f64:
+; CHECK: bl acos
+define double @acos_f64(double %x, double %y) #0 {
+ %val = call double @llvm.experimental.constrained.acos.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret double %val
+}
+
+; CHECK-LABEL: asin_f64:
+; CHECK: bl asin
+define double @asin_f64(double %x, double %y) #0 {
+ %val = call double @llvm.experimental.constrained.asin.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret double %val
+}
+
+; CHECK-LABEL: atan_f64:
+; CHECK: bl atan
+define double @atan_f64(double %x, double %y) #0 {
+ %val = call double @llvm.experimental.constrained.atan.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret double %val
+}
+
+; CHECK-LABEL: cosh_f64:
+; CHECK: bl cosh
+define double @cosh_f64(double %x, double %y) #0 {
+ %val = call double @llvm.experimental.constrained.cosh.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret double %val
+}
+
+; CHECK-LABEL: sinh_f64:
+; CHECK: bl sinh
+define double @sinh_f64(double %x, double %y) #0 {
+ %val = call double @llvm.experimental.constrained.sinh.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret double %val
+}
+
+; CHECK-LABEL: tanh_f64:
+; CHECK: bl tanh
+define double @tanh_f64(double %x, double %y) #0 {
+ %val = call double @llvm.experimental.constrained.tanh.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret double %val
+}
+
+; CHECK-LABEL: fmuladd_f64:
+; CHECK-DP: vfma.f64
+; CHECK-NODP: bl __aeabi_dmul
+; CHECK-NODP: bl __aeabi_dadd
+define double @fmuladd_f64(double %x, double %y, double %z) #0 {
+ %val = call double @llvm.experimental.constrained.fmuladd.f64(double %x, double %y, double %z, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret double %val
+}
+
+; CHECK-LABEL: ldexp_f64:
+; CHECK: bl ldexp
+define double @ldexp_f64(double %x, i32 %y) #0 {
+ %val = call double @llvm.experimental.constrained.ldexp.f64.i32(double %x, i32 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret double %val
+}
+
+; CHECK-LABEL: roundeven_f64:
+; CHECK-DP-V8: vrintn.f64
+; CHECK-NODP: bl roundeven
+define double @roundeven_f64(double %x) #0 {
+ %val = call double @llvm.experimental.constrained.roundeven.f64(double %x, metadata !"fpexcept.strict") #0
+ ret double %val
+}
+
+; CHECK-LABEL: uitofp_f64_i32:
+; CHECK-NOSP: bl __aeabi_ui2d
+; FIXME-CHECK-SP: vsub.f64
+define double @uitofp_f64_i32(i32 %x) #0 {
+ %val = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret double %val
+}
+
; CHECK-LABEL: atan2_f64:
; CHECK: bl atan2
define double @atan2_f64(double %x, double %y) #0 {
@@ -1052,6 +1200,16 @@ declare float @llvm.experimental.constrained.powi.f32(float, i32, metadata, meta
declare float @llvm.experimental.constrained.sin.f32(float, metadata, metadata)
declare float @llvm.experimental.constrained.cos.f32(float, metadata, metadata)
declare float @llvm.experimental.constrained.tan.f32(float, metadata, metadata)
+declare float @llvm.experimental.constrained.acos.f32(float, metadata, metadata)
+declare float @llvm.experimental.constrained.asin.f32(float, metadata, metadata)
+declare float @llvm.experimental.constrained.atan.f32(float, metadata, metadata)
+declare float @llvm.experimental.constrained.cosh.f32(float, metadata, metadata)
+declare float @llvm.experimental.constrained.sinh.f32(float, metadata, metadata)
+declare float @llvm.experimental.constrained.tanh.f32(float, metadata, metadata)
+declare float @llvm.experimental.constrained.fmuladd.f32(float, float, float, metadata, metadata)
+declare float @llvm.experimental.constrained.ldexp.f32.i32(float, i32, metadata, metadata)
+declare float @llvm.experimental.constrained.roundeven.f32(float, metadata)
+declare float @llvm.experimental.constrained.uitofp.f32.i32(i32, metadata, metadata)
declare float @llvm.experimental.constrained.atan2.f32(float, float, metadata, metadata)
declare float @llvm.experimental.constrained.pow.f32(float, float, metadata, metadata)
declare float @llvm.experimental.constrained.log.f32(float, metadata, metadata)
@@ -1087,6 +1245,16 @@ declare double @llvm.experimental.constrained.powi.f64(double, i32, metadata, me
declare double @llvm.experimental.constrained.sin.f64(double, metadata, metadata)
declare double @llvm.experimental.constrained.cos.f64(double, metadata, metadata)
declare double @llvm.experimental.constrained.tan.f64(double, metadata, metadata)
+declare double @llvm.experimental.constrained.acos.f64(double, metadata, metadata)
+declare double @llvm.experimental.constrained.asin.f64(double, metadata, metadata)
+declare double @llvm.experimental.constrained.atan.f64(double, metadata, metadata)
+declare double @llvm.experimental.constrained.cosh.f64(double, metadata, metadata)
+declare double @llvm.experimental.constrained.sinh.f64(double, metadata, metadata)
+declare double @llvm.experimental.constrained.tanh.f64(double, metadata, metadata)
+declare double @llvm.experimental.constrained.fmuladd.f64(double, double, double, metadata, metadata)
+declare double @llvm.experimental.constrained.ldexp.f64.i32(double, i32, metadata, metadata)
+declare double @llvm.experimental.constrained.roundeven.f64(double, metadata)
+declare double @llvm.experimental.constrained.uitofp.f64.i32(i32, metadata, metadata)
declare double @llvm.experimental.constrained.atan2.f64(double, double, metadata, metadata)
declare double @llvm.experimental.constrained.pow.f64(double, double, metadata, metadata)
declare double @llvm.experimental.constrained.log.f64(double, metadata, metadata)
diff --git a/llvm/test/CodeGen/ARM/fp16-fullfp16.ll b/llvm/test/CodeGen/ARM/fp16-fullfp16.ll
index 200b14bae56ed..5e08e990af25f 100644
--- a/llvm/test/CodeGen/ARM/fp16-fullfp16.ll
+++ b/llvm/test/CodeGen/ARM/fp16-fullfp16.ll
@@ -613,6 +613,902 @@ define void @test_fmuladd(ptr %p, ptr %q, ptr %r) {
ret void
}
+; Half-precision intrinsics
+
+define half @add_f16(half %x, half %y) #0 {
+; CHECK-LABEL: add_f16:
+; CHECK: vadd.f16 s0, s0, s1
+; CHECK-NEXT: bx lr
+ %val = call half @llvm.experimental.constrained.fadd.f16(half %x, half %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define half @sub_f16(half %x, half %y) #0 {
+; CHECK-LABEL: sub_f16:
+; CHECK: vsub.f16 s0, s0, s1
+; CHECK-NEXT: bx lr
+ %val = call half @llvm.experimental.constrained.fsub.f16(half %x, half %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define half @mul_f16(half %x, half %y) #0 {
+; CHECK-LABEL: mul_f16:
+; CHECK: vmul.f16 s0, s0, s1
+; CHECK-NEXT: bx lr
+ %val = call half @llvm.experimental.constrained.fmul.f16(half %x, half %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define half @div_f16(half %x, half %y) #0 {
+; CHECK-LABEL: div_f16:
+; CHECK: vdiv.f16 s0, s0, s1
+; CHECK-NEXT: bx lr
+ %val = call half @llvm.experimental.constrained.fdiv.f16(half %x, half %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define half @frem_f16(half %x, half %y) #0 {
+; CHECK-LABEL: frem_f16:
+; CHECK: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NEXT: vcvtb.f32.f16 s1, s1
+; CHECK-NEXT: bl fmodf
+; CHECK-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NEXT: pop {r11, pc}
+ %val = call half @llvm.experimental.constrained.frem.f16(half %x, half %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define half @fma_f16(half %x, half %y, half %z) #0 {
+; CHECK-LABEL: fma_f16:
+; CHECK: vfma.f16 s2, s0, s1
+; CHECK-NEXT: vmov.f32 s0, s2
+; CHECK-NEXT: bx lr
+ %val = call half @llvm.experimental.constrained.fma.f16(half %x, half %y, half %z, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define half @fmuladd_f16(half %x, half %y, half %z) #0 {
+; CHECK-LABEL: fmuladd_f16:
+; CHECK: vfma.f16 s2, s0, s1
+; CHECK-NEXT: vmov.f32 s0, s2
+; CHECK-NEXT: bx lr
+ %val = call half @llvm.experimental.constrained.fmuladd.f16(half %x, half %y, half %z, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define i32 @fptosi_i32_f16(half %x) #0 {
+; CHECK-LABEL: fptosi_i32_f16:
+; CHECK: vcvt.s32.f16 s0, s0
+; CHECK-NEXT: vmov r0, s0
+; CHECK-NEXT: bx lr
+ %val = call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %x, metadata !"fpexcept.strict") #0
+ ret i32 %val
+}
+
+define i32 @fptoui_i32_f16(half %x) #0 {
+; CHECK-LABEL: fptoui_i32_f16:
+; CHECK: vcvt.s32.f16 s0, s0
+; CHECK-NEXT: vmov r0, s0
+; CHECK-NEXT: bx lr
+ %val = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %x, metadata !"fpexcept.strict") #0
+ ret i32 %val
+}
+
+define i64 @fptosi_i64_f16(half %x) #0 {
+; CHECK-LABEL: fptosi_i64_f16:
+; CHECK: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: vmov.f16 r0, s0
+; CHECK-NEXT: vmov s0, r0
+; CHECK-NEXT: bl __fixhfdi
+; CHECK-NEXT: pop {r11, pc}
+ %val = call i64 @llvm.experimental.constrained.fptosi.i64.f16(half %x, metadata !"fpexcept.strict") #0
+ ret i64 %val
+}
+
+define i64 @fptoui_i64_f16(half %x) #0 {
+; CHECK-LABEL: fptoui_i64_f16:
+; CHECK: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: vmov.f16 r0, s0
+; CHECK-NEXT: vmov s0, r0
+; CHECK-NEXT: bl __fixunshfdi
+; CHECK-NEXT: pop {r11, pc}
+ %val = call i64 @llvm.experimental.constrained.fptoui.i64.f16(half %x, metadata !"fpexcept.strict") #0
+ ret i64 %val
+}
+
+define half @sitofp_f16_i32(i32 %x) #0 {
+; CHECK-LABEL: sitofp_f16_i32:
+; CHECK: .pad #8
+; CHECK-NEXT: sub sp, sp, #8
+; CHECK-NEXT: movw r1, #0
+; CHECK-NEXT: eor r0, r0, #-2147483648
+; CHECK-NEXT: movt r1, #17200
+; CHECK-NEXT: str r0, [sp]
+; CHECK-NEXT: str r1, [sp, #4]
+; CHECK-NEXT: vldr d16, .LCPI53_0
+; CHECK-NEXT: vldr d17, [sp]
+; CHECK-NEXT: vsub.f64 d16, d17, d16
+; CHECK-NEXT: vcvtb.f16.f64 s0, d16
+; CHECK-NEXT: add sp, sp, #8
+; CHECK-NEXT: bx lr
+; CHECK-NEXT: .p2align 3
+; CHECK-NEXT: .LCPI53_0:
+; CHECK-NEXT: .long 2147483648
+; CHECK-NEXT: .long 1127219200
+ %val = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define half @uitofp_f16_i32(i32 %x) #0 {
+; CHECK-LABEL: uitofp_f16_i32:
+; CHECK: .pad #8
+; CHECK-NEXT: sub sp, sp, #8
+; CHECK-NEXT: movw r1, #0
+; CHECK-NEXT: str r0, [sp]
+; CHECK-NEXT: movt r1, #17200
+; CHECK-NEXT: vldr d16, .LCPI54_0
+; CHECK-NEXT: str r1, [sp, #4]
+; CHECK-NEXT: vldr d17, [sp]
+; CHECK-NEXT: vsub.f64 d16, d17, d16
+; CHECK-NEXT: vcvtb.f16.f64 s0, d16
+; CHECK-NEXT: add sp, sp, #8
+; CHECK-NEXT: bx lr
+; CHECK-NEXT: .p2align 3
+; CHECK-NEXT: .LCPI54_0:
+; CHECK-NEXT: .long 0
+; CHECK-NEXT: .long 1127219200
+ %val = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define half @sitofp_f16_i64(i64 %x) #0 {
+; CHECK-LABEL: sitofp_f16_i64:
+; CHECK: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: bl __floatdihf
+; CHECK-NEXT: pop {r11, pc}
+ %val = call half @llvm.experimental.constrained.sitofp.f16.i64(i64 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define half @uitofp_f16_i64(i64 %x) #0 {
+; CHECK-LABEL: uitofp_f16_i64:
+; CHECK: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: bl __floatundihf
+; CHECK-NEXT: pop {r11, pc}
+ %val = call half @llvm.experimental.constrained.uitofp.f16.i64(i64 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define half @sitofp_f16_i128(i128 %x) #0 {
+; CHECK-LABEL: sitofp_f16_i128:
+; CHECK: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: bl __floattihf
+; CHECK-NEXT: pop {r11, pc}
+ %val = call half @llvm.experimental.constrained.sitofp.f16.i128(i128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define half @uitofp_f16_i128(i128 %x) #0 {
+; CHECK-LABEL: uitofp_f16_i128:
+; CHECK: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: bl __floatuntihf
+; CHECK-NEXT: pop {r11, pc}
+ %val = call half @llvm.experimental.constrained.uitofp.f16.i128(i128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define half @sqrt_f16(half %x) #0 {
+; CHECK-LABEL: sqrt_f16:
+; CHECK: vsqrt.f16 s0, s0
+; CHECK-NEXT: bx lr
+ %val = call half @llvm.experimental.constrained.sqrt.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define half @powi_f16(half %x, i32 %y) #0 {
+; CHECK-LABEL: powi_f16:
+; CHECK: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NEXT: bl __powisf2
+; CHECK-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NEXT: pop {r11, pc}
+ %val = call half @llvm.experimental.constrained.powi.f16(half %x, i32 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define half @sin_f16(half %x) #0 {
+; CHECK-LABEL: sin_f16:
+; CHECK: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NEXT: bl sinf
+; CHECK-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NEXT: pop {r11, pc}
+ %val = call half @llvm.experimental.constrained.sin.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define half @cos_f16(half %x) #0 {
+; CHECK-LABEL: cos_f16:
+; CHECK: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NEXT: bl cosf
+; CHECK-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NEXT: pop {r11, pc}
+ %val = call half @llvm.experimental.constrained.cos.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define half @tan_f16(half %x) #0 {
+; CHECK-LABEL: tan_f16:
+; CHECK: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NEXT: bl tanf
+; CHECK-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NEXT: pop {r11, pc}
+ %val = call half @llvm.experimental.constrained.tan.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define half @asin_f16(half %x) #0 {
+; CHECK-LABEL: asin_f16:
+; CHECK: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NEXT: bl asinf
+; CHECK-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NEXT: pop {r11, pc}
+ %val = call half @llvm.experimental.constrained.asin.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define half @acos_f16(half %x) #0 {
+; CHECK-LABEL: acos_f16:
+; CHECK: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NEXT: bl acosf
+; CHECK-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NEXT: pop {r11, pc}
+ %val = call half @llvm.experimental.constrained.acos.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define half @atan_f16(half %x) #0 {
+; CHECK-LABEL: atan_f16:
+; CHECK: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NEXT: bl atanf
+; CHECK-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NEXT: pop {r11, pc}
+ %val = call half @llvm.experimental.constrained.atan.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define half @atan2_f16(half %x, half %y) #0 {
+; CHECK-LABEL: atan2_f16:
+; CHECK: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NEXT: vcvtb.f32.f16 s1, s1
+; CHECK-NEXT: bl atan2f
+; CHECK-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NEXT: pop {r11, pc}
+ %val = call half @llvm.experimental.constrained.atan2.f16(half %x, half %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define half @sinh_f16(half %x) #0 {
+; CHECK-LABEL: sinh_f16:
+; CHECK: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NEXT: bl sinhf
+; CHECK-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NEXT: pop {r11, pc}
+ %val = call half @llvm.experimental.constrained.sinh.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define half @cosh_f16(half %x) #0 {
+; CHECK-LABEL: cosh_f16:
+; CHECK: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NEXT: bl coshf
+; CHECK-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NEXT: pop {r11, pc}
+ %val = call half @llvm.experimental.constrained.cosh.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define half @tanh_f16(half %x) #0 {
+; CHECK-LABEL: tanh_f16:
+; CHECK: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NEXT: bl tanhf
+; CHECK-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NEXT: pop {r11, pc}
+ %val = call half @llvm.experimental.constrained.tanh.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define half @pow_f16(half %x, half %y) #0 {
+; CHECK-LABEL: pow_f16:
+; CHECK: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NEXT: vcvtb.f32.f16 s1, s1
+; CHECK-NEXT: bl powf
+; CHECK-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NEXT: pop {r11, pc}
+ %val = call half @llvm.experimental.constrained.pow.f16(half %x, half %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define half @log_f16(half %x) #0 {
+; CHECK-LABEL: log_f16:
+; CHECK: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NEXT: bl logf
+; CHECK-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NEXT: pop {r11, pc}
+ %val = call half @llvm.experimental.constrained.log.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define half @log10_f16(half %x) #0 {
+; CHECK-LABEL: log10_f16:
+; CHECK: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NEXT: bl log10f
+; CHECK-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NEXT: pop {r11, pc}
+ %val = call half @llvm.experimental.constrained.log10.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define half @log2_f16(half %x) #0 {
+; CHECK-LABEL: log2_f16:
+; CHECK: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NEXT: bl log2f
+; CHECK-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NEXT: pop {r11, pc}
+ %val = call half @llvm.experimental.constrained.log2.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define half @exp_f16(half %x) #0 {
+; CHECK-LABEL: exp_f16:
+; CHECK: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NEXT: bl expf
+; CHECK-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NEXT: pop {r11, pc}
+ %val = call half @llvm.experimental.constrained.exp.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define half @exp2_f16(half %x) #0 {
+; CHECK-LABEL: exp2_f16:
+; CHECK: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NEXT: bl exp2f
+; CHECK-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NEXT: pop {r11, pc}
+ %val = call half @llvm.experimental.constrained.exp2.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define half @rint_f16(half %x) #0 {
+; CHECK-LABEL: rint_f16:
+; CHECK: vrintx.f16 s0, s0
+; CHECK-NEXT: bx lr
+ %val = call half @llvm.experimental.constrained.rint.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define half @nearbyint_f16(half %x) #0 {
+; CHECK-LABEL: nearbyint_f16:
+; CHECK: vrintr.f16 s0, s0
+; CHECK-NEXT: bx lr
+ %val = call half @llvm.experimental.constrained.nearbyint.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define i32 @lrint_f16(half %x) #0 {
+; CHECK-LABEL: lrint_f16:
+; CHECK: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NEXT: bl lrintf
+; CHECK-NEXT: pop {r11, pc}
+ %val = call i32 @llvm.experimental.constrained.lrint.i32.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret i32 %val
+}
+
+define i64 @llrint_f16(half %x) #0 {
+; CHECK-LABEL: llrint_f16:
+; CHECK: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NEXT: bl llrintf
+; CHECK-NEXT: pop {r11, pc}
+ %val = call i64 @llvm.experimental.constrained.llrint.i64.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret i64 %val
+}
+
+define half @maxnum_f16(half %x, half %y) #0 {
+; CHECK-LABEL: maxnum_f16:
+; CHECK: vmaxnm.f16 s0, s0, s1
+; CHECK-NEXT: bx lr
+ %val = call half @llvm.experimental.constrained.maxnum.f16(half %x, half %y, metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define half @minnum_f16(half %x, half %y) #0 {
+; CHECK-LABEL: minnum_f16:
+; CHECK: vminnm.f16 s0, s0, s1
+; CHECK-NEXT: bx lr
+ %val = call half @llvm.experimental.constrained.minnum.f16(half %x, half %y, metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define half @ceil_f16(half %x) #0 {
+; CHECK-LABEL: ceil_f16:
+; CHECK: vrintp.f16 s0, s0
+; CHECK-NEXT: bx lr
+ %val = call half @llvm.experimental.constrained.ceil.f16(half %x, metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define half @floor_f16(half %x) #0 {
+; CHECK-LABEL: floor_f16:
+; CHECK: vrintm.f16 s0, s0
+; CHECK-NEXT: bx lr
+ %val = call half @llvm.experimental.constrained.floor.f16(half %x, metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define i32 @lround_f16(half %x) #0 {
+; CHECK-LABEL: lround_f16:
+; CHECK: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NEXT: bl lroundf
+; CHECK-NEXT: pop {r11, pc}
+ %val = call i32 @llvm.experimental.constrained.lround.i32.f16(half %x, metadata !"fpexcept.strict") #0
+ ret i32 %val
+}
+
+define i64 @llround_f16(half %x) #0 {
+; CHECK-LABEL: llround_f16:
+; CHECK: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NEXT: bl llroundf
+; CHECK-NEXT: pop {r11, pc}
+ %val = call i64 @llvm.experimental.constrained.llround.i64.f16(half %x, metadata !"fpexcept.strict") #0
+ ret i64 %val
+}
+
+define half @round_f16(half %x) #0 {
+; CHECK-LABEL: round_f16:
+; CHECK: vrinta.f16 s0, s0
+; CHECK-NEXT: bx lr
+ %val = call half @llvm.experimental.constrained.round.f16(half %x, metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define half @roundeven_f16(half %x) #0 {
+; CHECK-LABEL: roundeven_f16:
+; CHECK: vrintn.f16 s0, s0
+; CHECK-NEXT: bx lr
+ %val = call half @llvm.experimental.constrained.roundeven.f16(half %x, metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define half @trunc_f16(half %x) #0 {
+; CHECK-LABEL: trunc_f16:
+; CHECK: vrintz.f16 s0, s0
+; CHECK-NEXT: bx lr
+ %val = call half @llvm.experimental.constrained.trunc.f16(half %x, metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define half @ldexp_f16(half %x, i32 %y) #0 {
+; CHECK-LABEL: ldexp_f16:
+; CHECK: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NEXT: bl ldexpf
+; CHECK-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NEXT: pop {r11, pc}
+ %val = call half @llvm.experimental.constrained.ldexp.f16.i32(half %x, i32 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define i32 @fcmp_olt_f16(half %a, half %b) #0 {
+; CHECK-LABEL: fcmp_olt_f16:
+; CHECK: vcmp.f16 s0, s1
+; CHECK-NEXT: mov r0, #0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: movwmi r0, #1
+; CHECK-NEXT: bx lr
+ %cmp = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"olt", metadata !"fpexcept.strict") #0
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+define i32 @fcmp_ole_f16(half %a, half %b) #0 {
+; CHECK-LABEL: fcmp_ole_f16:
+; CHECK: vcmp.f16 s0, s1
+; CHECK-NEXT: mov r0, #0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: movwls r0, #1
+; CHECK-NEXT: bx lr
+ %cmp = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"ole", metadata !"fpexcept.strict") #0
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+define i32 @fcmp_ogt_f16(half %a, half %b) #0 {
+; CHECK-LABEL: fcmp_ogt_f16:
+; CHECK: vcmp.f16 s0, s1
+; CHECK-NEXT: mov r0, #0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: movwgt r0, #1
+; CHECK-NEXT: bx lr
+ %cmp = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"ogt", metadata !"fpexcept.strict") #0
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+define i32 @fcmp_oge_f16(half %a, half %b) #0 {
+; CHECK-LABEL: fcmp_oge_f16:
+; CHECK: vcmp.f16 s0, s1
+; CHECK-NEXT: mov r0, #0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: movwge r0, #1
+; CHECK-NEXT: bx lr
+ %cmp = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"oge", metadata !"fpexcept.strict") #0
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+define i32 @fcmp_oeq_f16(half %a, half %b) #0 {
+; CHECK-LABEL: fcmp_oeq_f16:
+; CHECK: vcmp.f16 s0, s1
+; CHECK-NEXT: mov r0, #0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: movweq r0, #1
+; CHECK-NEXT: bx lr
+ %cmp = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"oeq", metadata !"fpexcept.strict") #0
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+define i32 @fcmp_one_f16(half %a, half %b) #0 {
+; CHECK-LABEL: fcmp_one_f16:
+; CHECK: vcmp.f16 s0, s1
+; CHECK-NEXT: mov r0, #0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: movwmi r0, #1
+; CHECK-NEXT: movwgt r0, #1
+; CHECK-NEXT: bx lr
+ %cmp = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"one", metadata !"fpexcept.strict") #0
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+define i32 @fcmp_ult_f16(half %a, half %b) #0 {
+; CHECK-LABEL: fcmp_ult_f16:
+; CHECK: vcmp.f16 s0, s1
+; CHECK-NEXT: mov r0, #0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: movwlt r0, #1
+; CHECK-NEXT: bx lr
+ %cmp = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"ult", metadata !"fpexcept.strict") #0
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+define i32 @fcmp_ule_f16(half %a, half %b) #0 {
+; CHECK-LABEL: fcmp_ule_f16:
+; CHECK: vcmp.f16 s0, s1
+; CHECK-NEXT: mov r0, #0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: movwle r0, #1
+; CHECK-NEXT: bx lr
+ %cmp = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"ule", metadata !"fpexcept.strict") #0
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+define i32 @fcmp_ugt_f16(half %a, half %b) #0 {
+; CHECK-LABEL: fcmp_ugt_f16:
+; CHECK: vcmp.f16 s0, s1
+; CHECK-NEXT: mov r0, #0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: movwhi r0, #1
+; CHECK-NEXT: bx lr
+ %cmp = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"ugt", metadata !"fpexcept.strict") #0
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+define i32 @fcmp_uge_f16(half %a, half %b) #0 {
+; CHECK-LABEL: fcmp_uge_f16:
+; CHECK: vcmp.f16 s0, s1
+; CHECK-NEXT: mov r0, #0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: movwpl r0, #1
+; CHECK-NEXT: bx lr
+ %cmp = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"uge", metadata !"fpexcept.strict") #0
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+define i32 @fcmp_ueq_f16(half %a, half %b) #0 {
+; CHECK-LABEL: fcmp_ueq_f16:
+; CHECK: vcmp.f16 s0, s1
+; CHECK-NEXT: mov r0, #0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: movweq r0, #1
+; CHECK-NEXT: movwvs r0, #1
+; CHECK-NEXT: bx lr
+ %cmp = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"ueq", metadata !"fpexcept.strict") #0
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+define i32 @fcmp_une_f16(half %a, half %b) #0 {
+; CHECK-LABEL: fcmp_une_f16:
+; CHECK: vcmp.f16 s0, s1
+; CHECK-NEXT: mov r0, #0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: movwne r0, #1
+; CHECK-NEXT: bx lr
+ %cmp = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"une", metadata !"fpexcept.strict") #0
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+define i32 @fcmps_olt_f16(half %a, half %b) #0 {
+; CHECK-LABEL: fcmps_olt_f16:
+; CHECK: vcmpe.f16 s0, s1
+; CHECK-NEXT: mov r0, #0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: movwmi r0, #1
+; CHECK-NEXT: bx lr
+ %cmp = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"olt", metadata !"fpexcept.strict") #0
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+define i32 @fcmps_ole_f16(half %a, half %b) #0 {
+; CHECK-LABEL: fcmps_ole_f16:
+; CHECK: vcmpe.f16 s0, s1
+; CHECK-NEXT: mov r0, #0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: movwls r0, #1
+; CHECK-NEXT: bx lr
+ %cmp = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"ole", metadata !"fpexcept.strict") #0
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+define i32 @fcmps_ogt_f16(half %a, half %b) #0 {
+; CHECK-LABEL: fcmps_ogt_f16:
+; CHECK: vcmpe.f16 s0, s1
+; CHECK-NEXT: mov r0, #0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: movwgt r0, #1
+; CHECK-NEXT: bx lr
+ %cmp = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"ogt", metadata !"fpexcept.strict") #0
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+define i32 @fcmps_oge_f16(half %a, half %b) #0 {
+; CHECK-LABEL: fcmps_oge_f16:
+; CHECK: vcmpe.f16 s0, s1
+; CHECK-NEXT: mov r0, #0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: movwge r0, #1
+; CHECK-NEXT: bx lr
+ %cmp = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"oge", metadata !"fpexcept.strict") #0
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+define i32 @fcmps_oeq_f16(half %a, half %b) #0 {
+; CHECK-LABEL: fcmps_oeq_f16:
+; CHECK: vcmpe.f16 s0, s1
+; CHECK-NEXT: mov r0, #0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: movweq r0, #1
+; CHECK-NEXT: bx lr
+ %cmp = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"oeq", metadata !"fpexcept.strict") #0
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+define i32 @fcmps_one_f16(half %a, half %b) #0 {
+; CHECK-LABEL: fcmps_one_f16:
+; CHECK: vcmpe.f16 s0, s1
+; CHECK-NEXT: mov r0, #0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: movwmi r0, #1
+; CHECK-NEXT: movwgt r0, #1
+; CHECK-NEXT: bx lr
+ %cmp = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"one", metadata !"fpexcept.strict") #0
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+define i32 @fcmps_ult_f16(half %a, half %b) #0 {
+; CHECK-LABEL: fcmps_ult_f16:
+; CHECK: vcmpe.f16 s0, s1
+; CHECK-NEXT: mov r0, #0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: movwlt r0, #1
+; CHECK-NEXT: bx lr
+ %cmp = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"ult", metadata !"fpexcept.strict") #0
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+define i32 @fcmps_ule_f16(half %a, half %b) #0 {
+; CHECK-LABEL: fcmps_ule_f16:
+; CHECK: vcmpe.f16 s0, s1
+; CHECK-NEXT: mov r0, #0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: movwle r0, #1
+; CHECK-NEXT: bx lr
+ %cmp = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"ule", metadata !"fpexcept.strict") #0
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+define i32 @fcmps_ugt_f16(half %a, half %b) #0 {
+; CHECK-LABEL: fcmps_ugt_f16:
+; CHECK: vcmpe.f16 s0, s1
+; CHECK-NEXT: mov r0, #0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: movwhi r0, #1
+; CHECK-NEXT: bx lr
+ %cmp = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"ugt", metadata !"fpexcept.strict") #0
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+define i32 @fcmps_uge_f16(half %a, half %b) #0 {
+; CHECK-LABEL: fcmps_uge_f16:
+; CHECK: vcmpe.f16 s0, s1
+; CHECK-NEXT: mov r0, #0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: movwpl r0, #1
+; CHECK-NEXT: bx lr
+ %cmp = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"uge", metadata !"fpexcept.strict") #0
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+define i32 @fcmps_ueq_f16(half %a, half %b) #0 {
+; CHECK-LABEL: fcmps_ueq_f16:
+; CHECK: vcmpe.f16 s0, s1
+; CHECK-NEXT: mov r0, #0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: movweq r0, #1
+; CHECK-NEXT: movwvs r0, #1
+; CHECK-NEXT: bx lr
+ %cmp = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"ueq", metadata !"fpexcept.strict") #0
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+define i32 @fcmps_une_f16(half %a, half %b) #0 {
+; CHECK-LABEL: fcmps_une_f16:
+; CHECK: vcmpe.f16 s0, s1
+; CHECK-NEXT: mov r0, #0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: movwne r0, #1
+; CHECK-NEXT: bx lr
+ %cmp = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"une", metadata !"fpexcept.strict") #0
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+}
+
+
+; Intrinsics to convert between floating-point types
+
+define half @fptrunc_f16_f32(float %x) #0 {
+; CHECK-LABEL: fptrunc_f16_f32:
+; CHECK: vcvtb.f16.f32 s0, s0
+; CHECK-NEXT: bx lr
+ %val = call half @llvm.experimental.constrained.fptrunc.f16.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret half %val
+}
+
+define float @fpext_f32_f16(half %x) #0 {
+; CHECK-LABEL: fpext_f32_f16:
+; CHECK: vcvtb.f32.f16 s0, s0
+; CHECK-NEXT: bx lr
+ %val = call float @llvm.experimental.constrained.fpext.f32.f16(half %x, metadata !"fpexcept.strict") #0
+ ret float %val
+}
+
+
+attributes #0 = { strictfp }
+
+declare half @llvm.experimental.constrained.fadd.f16(half, half, metadata, metadata)
+declare half @llvm.experimental.constrained.fsub.f16(half, half, metadata, metadata)
+declare half @llvm.experimental.constrained.fmul.f16(half, half, metadata, metadata)
+declare half @llvm.experimental.constrained.fdiv.f16(half, half, metadata, metadata)
+declare half @llvm.experimental.constrained.frem.f16(half, half, metadata, metadata)
+declare half @llvm.experimental.constrained.fma.f16(half, half, half, metadata, metadata)
+declare half @llvm.experimental.constrained.fmuladd.f16(half, half, half, metadata, metadata)
+declare i32 @llvm.experimental.constrained.fptosi.i32.f16(half, metadata)
+declare i32 @llvm.experimental.constrained.fptoui.i32.f16(half, metadata)
+declare i64 @llvm.experimental.constrained.fptosi.i64.f16(half, metadata)
+declare i64 @llvm.experimental.constrained.fptoui.i64.f16(half, metadata)
+declare half @llvm.experimental.constrained.sitofp.f16.i32(i32, metadata, metadata)
+declare half @llvm.experimental.constrained.uitofp.f16.i32(i32, metadata, metadata)
+declare half @llvm.experimental.constrained.sitofp.f16.i64(i64, metadata, metadata)
+declare half @llvm.experimental.constrained.uitofp.f16.i64(i64, metadata, metadata)
+declare half @llvm.experimental.constrained.sitofp.f16.i128(i128, metadata, metadata)
+declare half @llvm.experimental.constrained.uitofp.f16.i128(i128, metadata, metadata)
+declare half @llvm.experimental.constrained.sqrt.f16(half, metadata, metadata)
+declare half @llvm.experimental.constrained.powi.f16(half, i32, metadata, metadata)
+declare half @llvm.experimental.constrained.sin.f16(half, metadata, metadata)
+declare half @llvm.experimental.constrained.cos.f16(half, metadata, metadata)
+declare half @llvm.experimental.constrained.tan.f16(half, metadata, metadata)
+declare half @llvm.experimental.constrained.pow.f16(half, half, metadata, metadata)
+declare half @llvm.experimental.constrained.log.f16(half, metadata, metadata)
+declare half @llvm.experimental.constrained.log10.f16(half, metadata, metadata)
+declare half @llvm.experimental.constrained.log2.f16(half, metadata, metadata)
+declare half @llvm.experimental.constrained.exp.f16(half, metadata, metadata)
+declare half @llvm.experimental.constrained.exp2.f16(half, metadata, metadata)
+declare half @llvm.experimental.constrained.rint.f16(half, metadata, metadata)
+declare half @llvm.experimental.constrained.nearbyint.f16(half, metadata, metadata)
+declare i32 @llvm.experimental.constrained.lrint.i32.f16(half, metadata, metadata)
+declare i64 @llvm.experimental.constrained.llrint.i64.f16(half, metadata, metadata)
+declare half @llvm.experimental.constrained.maxnum.f16(half, half, metadata)
+declare half @llvm.experimental.constrained.minnum.f16(half, half, metadata)
+declare half @llvm.experimental.constrained.ceil.f16(half, metadata)
+declare half @llvm.experimental.constrained.floor.f16(half, metadata)
+declare i32 @llvm.experimental.constrained.lround.i32.f16(half, metadata)
+declare i64 @llvm.experimental.constrained.llround.i64.f16(half, metadata)
+declare half @llvm.experimental.constrained.round.f16(half, metadata)
+declare half @llvm.experimental.constrained.roundeven.f16(half, metadata)
+declare half @llvm.experimental.constrained.trunc.f16(half, metadata)
+declare i1 @llvm.experimental.constrained.fcmps.f16(half, half, metadata, metadata)
+declare i1 @llvm.experimental.constrained.fcmp.f16(half, half, metadata, metadata)
+
+declare half @llvm.experimental.constrained.fptrunc.f16.f32(float, metadata, metadata)
+declare float @llvm.experimental.constrained.fpext.f32.f16(half, metadata)
+
+
declare half @llvm.sqrt.f16(half %a)
declare half @llvm.powi.f16.i32(half %a, i32 %b)
declare half @llvm.sin.f16(half %a)
More information about the llvm-commits
mailing list