[llvm] [SelectionDAG][ARM] Add lowering for strict fp16 nodes on systems without fullfp16 (PR #173666)
Erik Enikeev via llvm-commits
llvm-commits at lists.llvm.org
Fri Dec 26 06:30:40 PST 2025
https://github.com/Varnike created https://github.com/llvm/llvm-project/pull/173666
As shown in #137101, on systems without fullfp16, strict fp16 nodes do not lower at the moment. This patch fixes the issue by adding promotion of the corresponding nodes in `SoftPromoteHalfResult`.
>From 4cfa36e114b4e74e4e46a0fe3b1a7485679b4e9b Mon Sep 17 00:00:00 2001
From: Erik Enikeev <evonatarius at gmail.com>
Date: Fri, 19 Dec 2025 21:23:31 +0300
Subject: [PATCH] [SelectionDAG][ARM] Add lowering for strict fp16 nodes on
systems without fullfp16
---
.../SelectionDAG/LegalizeFloatTypes.cpp | 207 +-
llvm/lib/Target/ARM/ARMISelLowering.cpp | 60 +-
llvm/test/CodeGen/ARM/fp16-fullfp16.ll | 1713 ++++++++++++++++-
3 files changed, 1862 insertions(+), 118 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
index 67844c684df9f..f2c6b95a277f9 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
@@ -3388,7 +3388,31 @@ void DAGTypeLegalizer::SoftPromoteHalfResult(SDNode *N, unsigned ResNo) {
case ISD::FTRUNC:
case ISD::FTAN:
case ISD::FTANH:
- case ISD::FCANONICALIZE: R = SoftPromoteHalfRes_UnaryOp(N); break;
+ case ISD::FCANONICALIZE:
+ case ISD::STRICT_FSQRT:
+ case ISD::STRICT_FSIN:
+ case ISD::STRICT_FCOS:
+ case ISD::STRICT_FTAN:
+ case ISD::STRICT_FASIN:
+ case ISD::STRICT_FACOS:
+ case ISD::STRICT_FATAN:
+ case ISD::STRICT_FSINH:
+ case ISD::STRICT_FCOSH:
+ case ISD::STRICT_FTANH:
+ case ISD::STRICT_FEXP:
+ case ISD::STRICT_FEXP2:
+ case ISD::STRICT_FLOG:
+ case ISD::STRICT_FLOG2:
+ case ISD::STRICT_FLOG10:
+ case ISD::STRICT_FRINT:
+ case ISD::STRICT_FNEARBYINT:
+ case ISD::STRICT_FCEIL:
+ case ISD::STRICT_FFLOOR:
+ case ISD::STRICT_FROUND:
+ case ISD::STRICT_FROUNDEVEN:
+ case ISD::STRICT_FTRUNC:
+ R = SoftPromoteHalfRes_UnaryOp(N);
+ break;
case ISD::FABS:
R = SoftPromoteHalfRes_FABS(N);
break;
@@ -3412,13 +3436,28 @@ void DAGTypeLegalizer::SoftPromoteHalfResult(SDNode *N, unsigned ResNo) {
case ISD::FPOW:
case ISD::FATAN2:
case ISD::FREM:
- case ISD::FSUB: R = SoftPromoteHalfRes_BinOp(N); break;
+ case ISD::FSUB:
+ case ISD::STRICT_FADD:
+ case ISD::STRICT_FDIV:
+ case ISD::STRICT_FMAXIMUM:
+ case ISD::STRICT_FMINIMUM:
+ case ISD::STRICT_FMAXNUM:
+ case ISD::STRICT_FMINNUM:
+ case ISD::STRICT_FMUL:
+ case ISD::STRICT_FPOW:
+ case ISD::STRICT_FATAN2:
+ case ISD::STRICT_FSUB:
+ case ISD::STRICT_FREM: R = SoftPromoteHalfRes_BinOp(N); break;
case ISD::FMA: // FMA is same as FMAD
- case ISD::FMAD: R = SoftPromoteHalfRes_FMAD(N); break;
+ case ISD::FMAD:
+ case ISD::STRICT_FMA: R = SoftPromoteHalfRes_FMAD(N); break;
case ISD::FPOWI:
- case ISD::FLDEXP: R = SoftPromoteHalfRes_ExpOp(N); break;
+ case ISD::FLDEXP:
+ case ISD::STRICT_FPOWI:
+ case ISD::STRICT_FLDEXP:
+ R = SoftPromoteHalfRes_ExpOp(N); break;
case ISD::FFREXP: R = SoftPromoteHalfRes_FFREXP(N); break;
@@ -3534,12 +3573,68 @@ SDValue DAGTypeLegalizer::SoftPromoteHalfRes_FCOPYSIGN(SDNode *N) {
SDValue DAGTypeLegalizer::SoftPromoteHalfRes_FMAD(SDNode *N) {
EVT OVT = N->getValueType(0);
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), OVT);
- SDValue Op0 = GetSoftPromotedHalf(N->getOperand(0));
- SDValue Op1 = GetSoftPromotedHalf(N->getOperand(1));
- SDValue Op2 = GetSoftPromotedHalf(N->getOperand(2));
+ bool IsStrict = N->isStrictFPOpcode();
+ SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
+ SDValue Op0 = GetSoftPromotedHalf(N->getOperand(IsStrict ? 1 : 0));
+ SDValue Op1 = GetSoftPromotedHalf(N->getOperand(IsStrict ? 2 : 1));
+ SDValue Op2 = GetSoftPromotedHalf(N->getOperand(IsStrict ? 3 : 2));
SDNodeFlags Flags = N->getFlags();
SDLoc dl(N);
+ if (IsStrict) {
+ // Promote to the larger FP type.
+ auto PromotionOpcode = GetPromotionOpcodeStrict(OVT, NVT);
+ Op0 = DAG.getNode(PromotionOpcode, dl, DAG.getVTList(NVT, MVT::Other),
+ Chain, Op0);
+ Op1 = DAG.getNode(PromotionOpcode, dl, DAG.getVTList(NVT, MVT::Other),
+ Op0.getValue(1), Op1);
+ Op2 = DAG.getNode(PromotionOpcode, dl, DAG.getVTList(NVT, MVT::Other),
+ Op1.getValue(1), Op2);
+
+ SDValue Res;
+ if (OVT == MVT::f16) {
+ SDValue A64 = DAG.getNode(ISD::STRICT_FP_EXTEND, dl,
+ DAG.getVTList(MVT::f64, MVT::Other),
+ Op2.getValue(1), Op0);
+ SDValue B64 = DAG.getNode(ISD::STRICT_FP_EXTEND, dl,
+ DAG.getVTList(MVT::f64, MVT::Other),
+ A64.getValue(1), Op1);
+ SDValue C64 = DAG.getNode(ISD::STRICT_FP_EXTEND, dl,
+ DAG.getVTList(MVT::f64, MVT::Other),
+ B64.getValue(1), Op2);
+
+ // Prefer a wide FMA node if available; otherwise expand to mul+add.
+ SDValue WideRes;
+ if (TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), MVT::f64)) {
+ WideRes = DAG.getNode(ISD::STRICT_FMA, dl,
+ DAG.getVTList(MVT::f64, MVT::Other),
+ C64.getValue(1), A64, B64, C64);
+ } else {
+ SDValue Mul = DAG.getNode(ISD::STRICT_FMUL, dl,
+ DAG.getVTList(MVT::f64, MVT::Other),
+ C64.getValue(1), A64, B64);
+ WideRes = DAG.getNode(ISD::STRICT_FADD, dl,
+ DAG.getVTList(MVT::f64, MVT::Other),
+ Mul.getValue(1), Mul, C64);
+ }
+
+ Res = DAG.getNode(GetPromotionOpcodeStrict(MVT::f64, OVT), dl,
+ DAG.getVTList(MVT::i16, MVT::Other),
+ WideRes.getValue(1), WideRes);
+ } else {
+ Res = DAG.getNode(N->getOpcode(), dl, DAG.getVTList(NVT, MVT::Other),
+ Op2.getValue(1), Op0, Op1, Op2);
+
+ // Convert back to FP16 as an integer.
+ Res = DAG.getNode(GetPromotionOpcodeStrict(NVT, OVT), dl,
+ DAG.getVTList(MVT::i16, MVT::Other), Res.getValue(1),
+ Res);
+ }
+
+ ReplaceValueWith(SDValue(N, 1), Res.getValue(1));
+ return Res;
+ }
+
// Promote to the larger FP type.
auto PromotionOpcode = GetPromotionOpcode(OVT, NVT);
Op0 = DAG.getNode(PromotionOpcode, dl, NVT, Op0);
@@ -3574,10 +3669,30 @@ SDValue DAGTypeLegalizer::SoftPromoteHalfRes_FMAD(SDNode *N) {
SDValue DAGTypeLegalizer::SoftPromoteHalfRes_ExpOp(SDNode *N) {
EVT OVT = N->getValueType(0);
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), OVT);
- SDValue Op0 = GetSoftPromotedHalf(N->getOperand(0));
- SDValue Op1 = N->getOperand(1);
+ bool IsStrict = N->isStrictFPOpcode();
+ SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
+ SDValue Op0 = GetSoftPromotedHalf(N->getOperand(IsStrict ? 1 : 0));
+ SDValue Op1 = N->getOperand(IsStrict ? 2 : 1);
SDLoc dl(N);
+ if (IsStrict) {
+ // Promote to the larger FP type.
+ Op0 = DAG.getNode(GetPromotionOpcodeStrict(OVT, NVT), dl,
+ DAG.getVTList(NVT, MVT::Other), Chain, Op0);
+
+ SDValue Res =
+ DAG.getNode(N->getOpcode(), dl, DAG.getVTList(NVT, MVT::Other),
+ Op0.getValue(1), Op0, Op1);
+
+ // Convert back to FP16 as an integer.
+ Res =
+ DAG.getNode(GetPromotionOpcodeStrict(NVT, OVT), dl,
+ DAG.getVTList(MVT::i16, MVT::Other), Res.getValue(1), Res);
+
+ ReplaceValueWith(SDValue(N, 1), Res.getValue(1));
+ return Res;
+ }
+
// Promote to the larger FP type.
Op0 = DAG.getNode(GetPromotionOpcode(OVT, NVT), dl, NVT, Op0);
@@ -3730,11 +3845,29 @@ SDValue DAGTypeLegalizer::SoftPromoteHalfRes_UNDEF(SDNode *N) {
}
SDValue DAGTypeLegalizer::SoftPromoteHalfRes_UnaryOp(SDNode *N) {
+ bool IsStrict = N->isStrictFPOpcode();
+ SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
EVT OVT = N->getValueType(0);
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), OVT);
- SDValue Op = GetSoftPromotedHalf(N->getOperand(0));
+ SDValue Op = GetSoftPromotedHalf(N->getOperand(IsStrict ? 1 : 0));
SDLoc dl(N);
+ if (IsStrict) {
+ // Promote to the larger FP type.
+ Op = DAG.getNode(GetPromotionOpcodeStrict(OVT, NVT), dl,
+ DAG.getVTList(NVT, MVT::Other), Chain, Op);
+ SDValue Res = DAG.getNode(
+ N->getOpcode(), dl, DAG.getVTList(NVT, MVT::Other), Op.getValue(1), Op);
+
+ // Convert back to FP16 as an integer.
+ Res =
+ DAG.getNode(GetPromotionOpcodeStrict(NVT, OVT), dl,
+ DAG.getVTList(MVT::i16, MVT::Other), Res.getValue(1), Res);
+
+ ReplaceValueWith(SDValue(N, 1), Res.getValue(1));
+ return Res;
+ }
+
// Promote to the larger FP type.
Op = DAG.getNode(GetPromotionOpcode(OVT, NVT), dl, NVT, Op);
@@ -3767,12 +3900,34 @@ SDValue DAGTypeLegalizer::SoftPromoteHalfRes_AssertNoFPClass(SDNode *N) {
}
SDValue DAGTypeLegalizer::SoftPromoteHalfRes_BinOp(SDNode *N) {
+ bool IsStrict = N->isStrictFPOpcode();
+ SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
EVT OVT = N->getValueType(0);
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), OVT);
- SDValue Op0 = GetSoftPromotedHalf(N->getOperand(0));
- SDValue Op1 = GetSoftPromotedHalf(N->getOperand(1));
+ SDValue Op0 = GetSoftPromotedHalf(N->getOperand(IsStrict ? 1 : 0));
+ SDValue Op1 = GetSoftPromotedHalf(N->getOperand(IsStrict ? 2 : 1));
SDLoc dl(N);
+ if (IsStrict) {
+ auto PromotionOpcode = GetPromotionOpcodeStrict(OVT, NVT);
+ Op0 = DAG.getNode(PromotionOpcode, dl, DAG.getVTList(NVT, MVT::Other),
+ Chain, Op0);
+ Op1 = DAG.getNode(PromotionOpcode, dl, DAG.getVTList(NVT, MVT::Other),
+ Op0.getValue(1), Op1);
+
+ SDValue Res =
+ DAG.getNode(N->getOpcode(), dl, DAG.getVTList(NVT, MVT::Other),
+ Op1.getValue(1), Op0, Op1);
+
+ // Convert back to FP16 as an integer.
+ Res =
+ DAG.getNode(GetPromotionOpcodeStrict(NVT, OVT), dl,
+ DAG.getVTList(MVT::i16, MVT::Other), Res.getValue(1), Res);
+
+ ReplaceValueWith(SDValue(N, 1), Res.getValue(1));
+ return Res;
+ }
+
// Promote to the larger FP type.
auto PromotionOpcode = GetPromotionOpcode(OVT, NVT);
Op0 = DAG.getNode(PromotionOpcode, dl, NVT, Op0);
@@ -3845,6 +4000,8 @@ bool DAGTypeLegalizer::SoftPromoteHalfOperand(SDNode *N, unsigned OpNo) {
case ISD::STRICT_FP_EXTEND:
case ISD::FP_EXTEND: Res = SoftPromoteHalfOp_FP_EXTEND(N); break;
case ISD::SELECT_CC: Res = SoftPromoteHalfOp_SELECT_CC(N, OpNo); break;
+ case ISD::STRICT_FSETCC:
+ case ISD::STRICT_FSETCCS:
case ISD::SETCC: Res = SoftPromoteHalfOp_SETCC(N); break;
case ISD::STORE: Res = SoftPromoteHalfOp_STORE(N, OpNo); break;
case ISD::ATOMIC_STORE:
@@ -3980,9 +4137,12 @@ SDValue DAGTypeLegalizer::SoftPromoteHalfOp_SELECT_CC(SDNode *N,
}
SDValue DAGTypeLegalizer::SoftPromoteHalfOp_SETCC(SDNode *N) {
- SDValue Op0 = N->getOperand(0);
- SDValue Op1 = N->getOperand(1);
- ISD::CondCode CCCode = cast<CondCodeSDNode>(N->getOperand(2))->get();
+ bool IsStrict = N->isStrictFPOpcode();
+ SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
+ SDValue Op0 = N->getOperand(IsStrict ? 1 : 0);
+ SDValue Op1 = N->getOperand(IsStrict ? 2 : 1);
+ ISD::CondCode CCCode =
+ cast<CondCodeSDNode>(N->getOperand(IsStrict ? 3 : 2))->get();
SDLoc dl(N);
EVT SVT = Op0.getValueType();
@@ -3991,6 +4151,23 @@ SDValue DAGTypeLegalizer::SoftPromoteHalfOp_SETCC(SDNode *N) {
Op0 = GetSoftPromotedHalf(Op0);
Op1 = GetSoftPromotedHalf(Op1);
+ if (IsStrict) {
+ // Promote to the larger FP type.
+ auto PromotionOpcode = GetPromotionOpcodeStrict(SVT, NVT);
+ Op0 = DAG.getNode(PromotionOpcode, dl, DAG.getVTList(NVT, MVT::Other),
+ Chain, Op0);
+ Op1 = DAG.getNode(PromotionOpcode, dl, DAG.getVTList(NVT, MVT::Other),
+ Op0.getValue(1), Op1);
+
+ SDValue Res =
+ DAG.getSetCC(SDLoc(N), N->getValueType(0), Op0, Op1, CCCode,
+ Op1.getValue(1), N->getOpcode() == ISD::STRICT_FSETCCS);
+
+ ReplaceValueWith(SDValue(N, 0), Res);
+ ReplaceValueWith(SDValue(N, 1), Res.getValue(1));
+ return SDValue();
+ }
+
// Promote to the larger FP type.
auto PromotionOpcode = GetPromotionOpcode(SVT, NVT);
Op0 = DAG.getNode(PromotionOpcode, dl, NVT, Op0);
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 9e949fbdaac2b..4de8119902aea 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -1250,8 +1250,6 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM_,
}
// Strict floating-point comparisons need custom lowering.
- setOperationAction(ISD::STRICT_FSETCC, MVT::f16, Custom);
- setOperationAction(ISD::STRICT_FSETCCS, MVT::f16, Custom);
setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Custom);
setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Custom);
setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Custom);
@@ -1286,33 +1284,33 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM_,
// FP16 often need to be promoted to call lib functions
// clang-format off
- if (Subtarget->hasFullFP16()) {
- setOperationAction(ISD::LRINT, MVT::f16, Expand);
- setOperationAction(ISD::LROUND, MVT::f16, Expand);
- setOperationAction(ISD::FCOPYSIGN, MVT::f16, Expand);
-
- for (auto Op : {ISD::FREM, ISD::FPOW, ISD::FPOWI,
- ISD::FCOS, ISD::FSIN, ISD::FSINCOS,
- ISD::FSINCOSPI, ISD::FMODF, ISD::FACOS,
- ISD::FASIN, ISD::FATAN, ISD::FATAN2,
- ISD::FCOSH, ISD::FSINH, ISD::FTANH,
- ISD::FTAN, ISD::FEXP, ISD::FEXP2,
- ISD::FEXP10, ISD::FLOG, ISD::FLOG2,
- ISD::FLOG10, ISD::STRICT_FREM, ISD::STRICT_FPOW,
- ISD::STRICT_FPOWI, ISD::STRICT_FCOS, ISD::STRICT_FSIN,
- ISD::STRICT_FACOS, ISD::STRICT_FASIN, ISD::STRICT_FATAN,
- ISD::STRICT_FATAN2, ISD::STRICT_FCOSH, ISD::STRICT_FSINH,
- ISD::STRICT_FTANH, ISD::STRICT_FEXP, ISD::STRICT_FEXP2,
- ISD::STRICT_FLOG, ISD::STRICT_FLOG2, ISD::STRICT_FLOG10,
- ISD::STRICT_FTAN}) {
- setOperationAction(Op, MVT::f16, Promote);
- }
-
- // Round-to-integer need custom lowering for fp16, as Promote doesn't work
- // because the result type is integer.
- for (auto Op : {ISD::STRICT_LROUND, ISD::STRICT_LLROUND, ISD::STRICT_LRINT, ISD::STRICT_LLRINT})
- setOperationAction(Op, MVT::f16, Custom);
+ setOperationAction(ISD::LRINT, MVT::f16, Expand);
+ setOperationAction(ISD::LROUND, MVT::f16, Expand);
+ setOperationAction(ISD::FCOPYSIGN, MVT::f16, Expand);
+
+ for (auto Op : {ISD::FREM, ISD::FPOW, ISD::FPOWI,
+ ISD::FCOS, ISD::FSIN, ISD::FSINCOS,
+ ISD::FSINCOSPI, ISD::FMODF, ISD::FACOS,
+ ISD::FASIN, ISD::FATAN, ISD::FATAN2,
+ ISD::FCOSH, ISD::FSINH, ISD::FTANH,
+ ISD::FTAN, ISD::FEXP, ISD::FEXP2,
+ ISD::FEXP10, ISD::FLOG, ISD::FLOG2,
+ ISD::FLOG10, ISD::STRICT_FREM, ISD::STRICT_FPOW,
+ ISD::STRICT_FPOWI, ISD::STRICT_FCOS, ISD::STRICT_FSIN,
+ ISD::STRICT_FACOS, ISD::STRICT_FASIN, ISD::STRICT_FATAN,
+ ISD::STRICT_FATAN2, ISD::STRICT_FCOSH, ISD::STRICT_FSINH,
+ ISD::STRICT_FTANH, ISD::STRICT_FEXP, ISD::STRICT_FEXP2,
+ ISD::STRICT_FLOG, ISD::STRICT_FLOG2, ISD::STRICT_FLOG10,
+ ISD::STRICT_FTAN}) {
+ setOperationAction(Op, MVT::f16, Promote);
+ }
+
+ // Round-to-integer need custom lowering for fp16, as Promote doesn't work
+ // because the result type is integer.
+ for (auto Op : {ISD::STRICT_LROUND, ISD::STRICT_LLROUND, ISD::STRICT_LRINT, ISD::STRICT_LLRINT})
+ setOperationAction(Op, MVT::f16, Custom);
+ if (Subtarget->hasFullFP16()) {
for (auto Op : {ISD::FROUND, ISD::FROUNDEVEN, ISD::FTRUNC,
ISD::FNEARBYINT, ISD::FRINT, ISD::FFLOOR,
ISD::FCEIL, ISD::STRICT_FROUND, ISD::STRICT_FROUNDEVEN,
@@ -1321,6 +1319,12 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM_,
setOperationAction(Op, MVT::f16, Legal);
}
// clang-format on
+
+ setOperationAction(ISD::STRICT_FSETCC, MVT::f16, Custom);
+ setOperationAction(ISD::STRICT_FSETCCS, MVT::f16, Custom);
+ } else {
+ setOperationAction(ISD::STRICT_FSETCC, MVT::f16, Promote);
+ setOperationAction(ISD::STRICT_FSETCCS, MVT::f16, Promote);
}
if (Subtarget->hasNEON()) {
diff --git a/llvm/test/CodeGen/ARM/fp16-fullfp16.ll b/llvm/test/CodeGen/ARM/fp16-fullfp16.ll
index 7b9474313e5bf..2099287154ab1 100644
--- a/llvm/test/CodeGen/ARM/fp16-fullfp16.ll
+++ b/llvm/test/CodeGen/ARM/fp16-fullfp16.ll
@@ -1,5 +1,6 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
; RUN: llc -mtriple armv8a-none-none-eabihf -mattr=fullfp16 -asm-verbose=false < %s | FileCheck %s
+; RUN: llc -mtriple thumbv8.1m.main-none-eabihf -mcpu=cortex-m33 %s -o - | FileCheck %s --check-prefix=CHECK-NOFULLFP16
define void @test_fadd(ptr %p, ptr %q) {
; CHECK-LABEL: test_fadd:
@@ -8,6 +9,20 @@ define void @test_fadd(ptr %p, ptr %q) {
; CHECK-NEXT: vadd.f16 s0, s2, s0
; CHECK-NEXT: vstr.16 s0, [r0]
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: test_fadd:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: ldrh r1, [r1]
+; CHECK-NOFULLFP16-NEXT: ldrh r2, [r0]
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vmov s2, r2
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-NOFULLFP16-NEXT: vadd.f32 s0, s2, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: strh r1, [r0]
+; CHECK-NOFULLFP16-NEXT: bx lr
%a = load half, ptr %p, align 2
%b = load half, ptr %q, align 2
%r = fadd half %a, %b
@@ -22,6 +37,20 @@ define void @test_fsub(ptr %p, ptr %q) {
; CHECK-NEXT: vsub.f16 s0, s2, s0
; CHECK-NEXT: vstr.16 s0, [r0]
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: test_fsub:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: ldrh r1, [r1]
+; CHECK-NOFULLFP16-NEXT: ldrh r2, [r0]
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vmov s2, r2
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-NOFULLFP16-NEXT: vsub.f32 s0, s2, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: strh r1, [r0]
+; CHECK-NOFULLFP16-NEXT: bx lr
%a = load half, ptr %p, align 2
%b = load half, ptr %q, align 2
%r = fsub half %a, %b
@@ -36,6 +65,20 @@ define void @test_fmul(ptr %p, ptr %q) {
; CHECK-NEXT: vmul.f16 s0, s2, s0
; CHECK-NEXT: vstr.16 s0, [r0]
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: test_fmul:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: ldrh r1, [r1]
+; CHECK-NOFULLFP16-NEXT: ldrh r2, [r0]
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vmov s2, r2
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-NOFULLFP16-NEXT: vmul.f32 s0, s2, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: strh r1, [r0]
+; CHECK-NOFULLFP16-NEXT: bx lr
%a = load half, ptr %p, align 2
%b = load half, ptr %q, align 2
%r = fmul half %a, %b
@@ -50,6 +93,20 @@ define void @test_fdiv(ptr %p, ptr %q) {
; CHECK-NEXT: vdiv.f16 s0, s2, s0
; CHECK-NEXT: vstr.16 s0, [r0]
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: test_fdiv:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: ldrh r1, [r1]
+; CHECK-NOFULLFP16-NEXT: ldrh r2, [r0]
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vmov s2, r2
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-NOFULLFP16-NEXT: vdiv.f32 s0, s2, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: strh r1, [r0]
+; CHECK-NOFULLFP16-NEXT: bx lr
%a = load half, ptr %p, align 2
%b = load half, ptr %q, align 2
%r = fdiv half %a, %b
@@ -70,6 +127,23 @@ define arm_aapcs_vfpcc void @test_frem(ptr %p, ptr %q) {
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vstr.16 s0, [r4]
; CHECK-NEXT: pop {r4, pc}
+;
+; CHECK-NOFULLFP16-LABEL: test_frem:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r4, lr}
+; CHECK-NOFULLFP16-NEXT: push {r4, lr}
+; CHECK-NOFULLFP16-NEXT: mov r4, r0
+; CHECK-NOFULLFP16-NEXT: ldrh r0, [r0]
+; CHECK-NOFULLFP16-NEXT: ldrh r1, [r1]
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vmov s2, r1
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s1, s2
+; CHECK-NOFULLFP16-NEXT: bl fmodf
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: strh r0, [r4]
+; CHECK-NOFULLFP16-NEXT: pop {r4, pc}
%a = load half, ptr %p, align 2
%b = load half, ptr %q, align 2
%r = frem half %a, %b
@@ -82,6 +156,12 @@ define void @test_load_store(ptr %p, ptr %q) {
; CHECK: vldr.16 s0, [r0]
; CHECK-NEXT: vstr.16 s0, [r1]
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: test_load_store:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: ldrh r0, [r0]
+; CHECK-NOFULLFP16-NEXT: strh r0, [r1]
+; CHECK-NOFULLFP16-NEXT: bx lr
%a = load half, ptr %p, align 2
store half %a, ptr %q
ret void
@@ -93,6 +173,15 @@ define i32 @test_fptosi_i32(ptr %p) {
; CHECK-NEXT: vcvt.s32.f16 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: test_fptosi_i32:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: ldrh r0, [r0]
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvt.s32.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: bx lr
%a = load half, ptr %p, align 2
%r = fptosi half %a to i32
ret i32 %r
@@ -106,6 +195,17 @@ define i64 @test_fptosi_i64(ptr %p) {
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: bl __fixhfdi
; CHECK-NEXT: pop {r11, pc}
+;
+; CHECK-NOFULLFP16-LABEL: test_fptosi_i64:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r7, lr}
+; CHECK-NOFULLFP16-NEXT: push {r7, lr}
+; CHECK-NOFULLFP16-NEXT: ldrh r0, [r0]
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: bl __aeabi_f2lz
+; CHECK-NOFULLFP16-NEXT: pop {r7, pc}
%a = load half, ptr %p, align 2
%r = fptosi half %a to i64
ret i64 %r
@@ -117,6 +217,15 @@ define i32 @test_fptoui_i32(ptr %p) {
; CHECK-NEXT: vcvt.u32.f16 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: test_fptoui_i32:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: ldrh r0, [r0]
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvt.u32.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: bx lr
%a = load half, ptr %p, align 2
%r = fptoui half %a to i32
ret i32 %r
@@ -130,6 +239,17 @@ define i64 @test_fptoui_i64(ptr %p) {
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: bl __fixunshfdi
; CHECK-NEXT: pop {r11, pc}
+;
+; CHECK-NOFULLFP16-LABEL: test_fptoui_i64:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r7, lr}
+; CHECK-NOFULLFP16-NEXT: push {r7, lr}
+; CHECK-NOFULLFP16-NEXT: ldrh r0, [r0]
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: bl __aeabi_f2ulz
+; CHECK-NOFULLFP16-NEXT: pop {r7, pc}
%a = load half, ptr %p, align 2
%r = fptoui half %a to i64
ret i64 %r
@@ -141,6 +261,15 @@ define void @test_sitofp_i32(i32 %a, ptr %p) {
; CHECK-NEXT: vcvt.f16.s32 s0, s0
; CHECK-NEXT: vstr.16 s0, [r1]
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: test_sitofp_i32:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvt.f32.s32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: strh r0, [r1]
+; CHECK-NOFULLFP16-NEXT: bx lr
%r = sitofp i32 %a to half
store half %r, ptr %p
ret void
@@ -152,6 +281,15 @@ define void @test_uitofp_i32(i32 %a, ptr %p) {
; CHECK-NEXT: vcvt.f16.u32 s0, s0
; CHECK-NEXT: vstr.16 s0, [r1]
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: test_uitofp_i32:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvt.f32.u32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: strh r0, [r1]
+; CHECK-NOFULLFP16-NEXT: bx lr
%r = uitofp i32 %a to half
store half %r, ptr %p
ret void
@@ -165,6 +303,18 @@ define void @test_sitofp_i64(i64 %a, ptr %p) {
; CHECK-NEXT: bl __floatdihf
; CHECK-NEXT: vstr.16 s0, [r4]
; CHECK-NEXT: pop {r4, pc}
+;
+; CHECK-NOFULLFP16-LABEL: test_sitofp_i64:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r4, lr}
+; CHECK-NOFULLFP16-NEXT: push {r4, lr}
+; CHECK-NOFULLFP16-NEXT: mov r4, r2
+; CHECK-NOFULLFP16-NEXT: bl __aeabi_l2f
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: strh r0, [r4]
+; CHECK-NOFULLFP16-NEXT: pop {r4, pc}
%r = sitofp i64 %a to half
store half %r, ptr %p
ret void
@@ -178,6 +328,18 @@ define void @test_uitofp_i64(i64 %a, ptr %p) {
; CHECK-NEXT: bl __floatundihf
; CHECK-NEXT: vstr.16 s0, [r4]
; CHECK-NEXT: pop {r4, pc}
+;
+; CHECK-NOFULLFP16-LABEL: test_uitofp_i64:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r4, lr}
+; CHECK-NOFULLFP16-NEXT: push {r4, lr}
+; CHECK-NOFULLFP16-NEXT: mov r4, r2
+; CHECK-NOFULLFP16-NEXT: bl __aeabi_ul2f
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: strh r0, [r4]
+; CHECK-NOFULLFP16-NEXT: pop {r4, pc}
%r = uitofp i64 %a to half
store half %r, ptr %p
ret void
@@ -188,6 +350,13 @@ define void @test_fptrunc_float(float %f, ptr %p) {
; CHECK: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vstr.16 s0, [r0]
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: test_fptrunc_float:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: strh r1, [r0]
+; CHECK-NOFULLFP16-NEXT: bx lr
%a = fptrunc float %f to half
store half %a, ptr %p
ret void
@@ -198,6 +367,16 @@ define void @test_fptrunc_double(double %d, ptr %p) {
; CHECK: vcvtb.f16.f64 s0, d0
; CHECK-NEXT: vstr.16 s0, [r0]
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: test_fptrunc_double:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r4, lr}
+; CHECK-NOFULLFP16-NEXT: push {r4, lr}
+; CHECK-NOFULLFP16-NEXT: mov r4, r0
+; CHECK-NOFULLFP16-NEXT: vmov r0, r1, d0
+; CHECK-NOFULLFP16-NEXT: bl __aeabi_d2h
+; CHECK-NOFULLFP16-NEXT: strh r0, [r4]
+; CHECK-NOFULLFP16-NEXT: pop {r4, pc}
%a = fptrunc double %d to half
store half %a, ptr %p
ret void
@@ -208,6 +387,13 @@ define float @test_fpextend_float(ptr %p) {
; CHECK: vldr.16 s0, [r0]
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: test_fpextend_float:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: ldrh r0, [r0]
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: bx lr
%a = load half, ptr %p, align 2
%r = fpext half %a to float
ret float %r
@@ -218,6 +404,18 @@ define double @test_fpextend_double(ptr %p) {
; CHECK: vldr.16 s0, [r0]
; CHECK-NEXT: vcvtb.f64.f16 d0, s0
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: test_fpextend_double:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r7, lr}
+; CHECK-NOFULLFP16-NEXT: push {r7, lr}
+; CHECK-NOFULLFP16-NEXT: ldrh r0, [r0]
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: bl __aeabi_f2d
+; CHECK-NOFULLFP16-NEXT: vmov d0, r0, r1
+; CHECK-NOFULLFP16-NEXT: pop {r7, pc}
%a = load half, ptr %p, align 2
%r = fpext half %a to double
ret double %r
@@ -227,6 +425,11 @@ define i16 @test_bitcast_halftoi16(ptr %p) {
; CHECK-LABEL: test_bitcast_halftoi16:
; CHECK: ldrh r0, [r0]
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: test_bitcast_halftoi16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: ldrh r0, [r0]
+; CHECK-NOFULLFP16-NEXT: bx lr
%a = load half, ptr %p, align 2
%r = bitcast half %a to i16
ret i16 %r
@@ -236,6 +439,11 @@ define void @test_bitcast_i16tohalf(i16 %a, ptr %p) {
; CHECK-LABEL: test_bitcast_i16tohalf:
; CHECK: strh r0, [r1]
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: test_bitcast_i16tohalf:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: strh r0, [r1]
+; CHECK-NOFULLFP16-NEXT: bx lr
%r = bitcast i16 %a to half
store half %r, ptr %p
ret void
@@ -247,6 +455,17 @@ define void @test_sqrt(ptr %p) {
; CHECK-NEXT: vsqrt.f16 s0, s0
; CHECK-NEXT: vstr.16 s0, [r0]
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: test_sqrt:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: ldrh r1, [r0]
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vsqrt.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: strh r1, [r0]
+; CHECK-NOFULLFP16-NEXT: bx lr
%a = load half, ptr %p, align 2
%r = call half @llvm.sqrt.f16(half %a)
store half %r, ptr %p
@@ -265,6 +484,21 @@ define void @test_fpowi(ptr %p, i32 %b) {
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vstr.16 s0, [r4]
; CHECK-NEXT: pop {r4, pc}
+;
+; CHECK-NOFULLFP16-LABEL: test_fpowi:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r4, lr}
+; CHECK-NOFULLFP16-NEXT: push {r4, lr}
+; CHECK-NOFULLFP16-NEXT: ldrh r2, [r0]
+; CHECK-NOFULLFP16-NEXT: mov r4, r0
+; CHECK-NOFULLFP16-NEXT: vmov s0, r2
+; CHECK-NOFULLFP16-NEXT: mov r0, r1
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: bl __powisf2
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: strh r0, [r4]
+; CHECK-NOFULLFP16-NEXT: pop {r4, pc}
%a = load half, ptr %p, align 2
%r = call half @llvm.powi.f16.i32(half %a, i32 %b)
store half %r, ptr %p
@@ -282,6 +516,20 @@ define void @test_sin(ptr %p) {
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vstr.16 s0, [r4]
; CHECK-NEXT: pop {r4, pc}
+;
+; CHECK-NOFULLFP16-LABEL: test_sin:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r4, lr}
+; CHECK-NOFULLFP16-NEXT: push {r4, lr}
+; CHECK-NOFULLFP16-NEXT: mov r4, r0
+; CHECK-NOFULLFP16-NEXT: ldrh r0, [r0]
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: bl sinf
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: strh r0, [r4]
+; CHECK-NOFULLFP16-NEXT: pop {r4, pc}
%a = load half, ptr %p, align 2
%r = call half @llvm.sin.f16(half %a)
store half %r, ptr %p
@@ -299,6 +547,20 @@ define void @test_cos(ptr %p) {
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vstr.16 s0, [r4]
; CHECK-NEXT: pop {r4, pc}
+;
+; CHECK-NOFULLFP16-LABEL: test_cos:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r4, lr}
+; CHECK-NOFULLFP16-NEXT: push {r4, lr}
+; CHECK-NOFULLFP16-NEXT: mov r4, r0
+; CHECK-NOFULLFP16-NEXT: ldrh r0, [r0]
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: bl cosf
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: strh r0, [r4]
+; CHECK-NOFULLFP16-NEXT: pop {r4, pc}
%a = load half, ptr %p, align 2
%r = call half @llvm.cos.f16(half %a)
store half %r, ptr %p
@@ -316,6 +578,20 @@ define void @test_tan(ptr %p) {
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vstr.16 s0, [r4]
; CHECK-NEXT: pop {r4, pc}
+;
+; CHECK-NOFULLFP16-LABEL: test_tan:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r4, lr}
+; CHECK-NOFULLFP16-NEXT: push {r4, lr}
+; CHECK-NOFULLFP16-NEXT: mov r4, r0
+; CHECK-NOFULLFP16-NEXT: ldrh r0, [r0]
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: bl tanf
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: strh r0, [r4]
+; CHECK-NOFULLFP16-NEXT: pop {r4, pc}
%a = load half, ptr %p, align 2
%r = call half @llvm.tan.f16(half %a)
store half %r, ptr %p
@@ -335,6 +611,23 @@ define void @test_pow(ptr %p, ptr %q) {
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vstr.16 s0, [r4]
; CHECK-NEXT: pop {r4, pc}
+;
+; CHECK-NOFULLFP16-LABEL: test_pow:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r4, lr}
+; CHECK-NOFULLFP16-NEXT: push {r4, lr}
+; CHECK-NOFULLFP16-NEXT: mov r4, r0
+; CHECK-NOFULLFP16-NEXT: ldrh r0, [r0]
+; CHECK-NOFULLFP16-NEXT: ldrh r1, [r1]
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vmov s2, r1
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s1, s2
+; CHECK-NOFULLFP16-NEXT: bl powf
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: strh r0, [r4]
+; CHECK-NOFULLFP16-NEXT: pop {r4, pc}
%a = load half, ptr %p, align 2
%b = load half, ptr %q, align 2
%r = call half @llvm.pow.f16(half %a, half %b)
@@ -353,6 +646,20 @@ define void @test_exp(ptr %p) {
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vstr.16 s0, [r4]
; CHECK-NEXT: pop {r4, pc}
+;
+; CHECK-NOFULLFP16-LABEL: test_exp:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r4, lr}
+; CHECK-NOFULLFP16-NEXT: push {r4, lr}
+; CHECK-NOFULLFP16-NEXT: mov r4, r0
+; CHECK-NOFULLFP16-NEXT: ldrh r0, [r0]
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: bl expf
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: strh r0, [r4]
+; CHECK-NOFULLFP16-NEXT: pop {r4, pc}
%a = load half, ptr %p, align 2
%r = call half @llvm.exp.f16(half %a)
store half %r, ptr %p
@@ -370,6 +677,20 @@ define void @test_exp2(ptr %p) {
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vstr.16 s0, [r4]
; CHECK-NEXT: pop {r4, pc}
+;
+; CHECK-NOFULLFP16-LABEL: test_exp2:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r4, lr}
+; CHECK-NOFULLFP16-NEXT: push {r4, lr}
+; CHECK-NOFULLFP16-NEXT: mov r4, r0
+; CHECK-NOFULLFP16-NEXT: ldrh r0, [r0]
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: bl exp2f
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: strh r0, [r4]
+; CHECK-NOFULLFP16-NEXT: pop {r4, pc}
%a = load half, ptr %p, align 2
%r = call half @llvm.exp2.f16(half %a)
store half %r, ptr %p
@@ -387,6 +708,20 @@ define void @test_log(ptr %p) {
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vstr.16 s0, [r4]
; CHECK-NEXT: pop {r4, pc}
+;
+; CHECK-NOFULLFP16-LABEL: test_log:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r4, lr}
+; CHECK-NOFULLFP16-NEXT: push {r4, lr}
+; CHECK-NOFULLFP16-NEXT: mov r4, r0
+; CHECK-NOFULLFP16-NEXT: ldrh r0, [r0]
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: bl logf
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: strh r0, [r4]
+; CHECK-NOFULLFP16-NEXT: pop {r4, pc}
%a = load half, ptr %p, align 2
%r = call half @llvm.log.f16(half %a)
store half %r, ptr %p
@@ -404,6 +739,20 @@ define void @test_log10(ptr %p) {
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vstr.16 s0, [r4]
; CHECK-NEXT: pop {r4, pc}
+;
+; CHECK-NOFULLFP16-LABEL: test_log10:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r4, lr}
+; CHECK-NOFULLFP16-NEXT: push {r4, lr}
+; CHECK-NOFULLFP16-NEXT: mov r4, r0
+; CHECK-NOFULLFP16-NEXT: ldrh r0, [r0]
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: bl log10f
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: strh r0, [r4]
+; CHECK-NOFULLFP16-NEXT: pop {r4, pc}
%a = load half, ptr %p, align 2
%r = call half @llvm.log10.f16(half %a)
store half %r, ptr %p
@@ -421,6 +770,20 @@ define void @test_log2(ptr %p) {
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vstr.16 s0, [r4]
; CHECK-NEXT: pop {r4, pc}
+;
+; CHECK-NOFULLFP16-LABEL: test_log2:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r4, lr}
+; CHECK-NOFULLFP16-NEXT: push {r4, lr}
+; CHECK-NOFULLFP16-NEXT: mov r4, r0
+; CHECK-NOFULLFP16-NEXT: ldrh r0, [r0]
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: bl log2f
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: strh r0, [r4]
+; CHECK-NOFULLFP16-NEXT: pop {r4, pc}
%a = load half, ptr %p, align 2
%r = call half @llvm.log2.f16(half %a)
store half %r, ptr %p
@@ -435,6 +798,42 @@ define void @test_fma(ptr %p, ptr %q, ptr %r) {
; CHECK-NEXT: vfma.f16 s4, s2, s0
; CHECK-NEXT: vstr.16 s4, [r0]
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: test_fma:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r4, r5, r6, r7, r8, lr}
+; CHECK-NOFULLFP16-NEXT: push.w {r4, r5, r6, r7, r8, lr}
+; CHECK-NOFULLFP16-NEXT: mov r8, r0
+; CHECK-NOFULLFP16-NEXT: ldrh r4, [r0]
+; CHECK-NOFULLFP16-NEXT: ldrh r0, [r1]
+; CHECK-NOFULLFP16-NEXT: ldrh r7, [r2]
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: bl __aeabi_f2d
+; CHECK-NOFULLFP16-NEXT: vmov s0, r4
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: mov r5, r0
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: mov r6, r1
+; CHECK-NOFULLFP16-NEXT: bl __aeabi_f2d
+; CHECK-NOFULLFP16-NEXT: mov r2, r5
+; CHECK-NOFULLFP16-NEXT: mov r3, r6
+; CHECK-NOFULLFP16-NEXT: bl __aeabi_dmul
+; CHECK-NOFULLFP16-NEXT: vmov s0, r7
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: mov r5, r0
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: mov r6, r1
+; CHECK-NOFULLFP16-NEXT: bl __aeabi_f2d
+; CHECK-NOFULLFP16-NEXT: mov r2, r0
+; CHECK-NOFULLFP16-NEXT: mov r3, r1
+; CHECK-NOFULLFP16-NEXT: mov r0, r5
+; CHECK-NOFULLFP16-NEXT: mov r1, r6
+; CHECK-NOFULLFP16-NEXT: bl __aeabi_dadd
+; CHECK-NOFULLFP16-NEXT: bl __aeabi_d2h
+; CHECK-NOFULLFP16-NEXT: strh.w r0, [r8]
+; CHECK-NOFULLFP16-NEXT: pop.w {r4, r5, r6, r7, r8, pc}
%a = load half, ptr %p, align 2
%b = load half, ptr %q, align 2
%c = load half, ptr %r, align 2
@@ -449,6 +848,13 @@ define void @test_fabs(ptr %p) {
; CHECK-NEXT: vabs.f16 s0, s0
; CHECK-NEXT: vstr.16 s0, [r0]
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: test_fabs:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: ldrh r1, [r0]
+; CHECK-NOFULLFP16-NEXT: bfc r1, #15, #17
+; CHECK-NOFULLFP16-NEXT: strh r1, [r0]
+; CHECK-NOFULLFP16-NEXT: bx lr
%a = load half, ptr %p, align 2
%r = call half @llvm.fabs.f16(half %a)
store half %r, ptr %p
@@ -462,6 +868,20 @@ define void @test_minnum(ptr %p, ptr %q) {
; CHECK-NEXT: vminnm.f16 s0, s2, s0
; CHECK-NEXT: vstr.16 s0, [r0]
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: test_minnum:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: ldrh r1, [r1]
+; CHECK-NOFULLFP16-NEXT: ldrh r2, [r0]
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vmov s2, r2
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-NOFULLFP16-NEXT: vminnm.f32 s0, s2, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: strh r1, [r0]
+; CHECK-NOFULLFP16-NEXT: bx lr
%a = load half, ptr %p, align 2
%b = load half, ptr %q, align 2
%r = call half @llvm.minnum.f16(half %a, half %b)
@@ -476,6 +896,20 @@ define void @test_maxnum(ptr %p, ptr %q) {
; CHECK-NEXT: vmaxnm.f16 s0, s2, s0
; CHECK-NEXT: vstr.16 s0, [r0]
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: test_maxnum:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: ldrh r1, [r1]
+; CHECK-NOFULLFP16-NEXT: ldrh r2, [r0]
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vmov s2, r2
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-NOFULLFP16-NEXT: vmaxnm.f32 s0, s2, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: strh r1, [r0]
+; CHECK-NOFULLFP16-NEXT: bx lr
%a = load half, ptr %p, align 2
%b = load half, ptr %q, align 2
%r = call half @llvm.maxnum.f16(half %a, half %b)
@@ -492,6 +926,19 @@ define void @test_minimum(ptr %p) {
; CHECK-NEXT: vselge.f16 s0, s0, s2
; CHECK-NEXT: vstr.16 s0, [r0]
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: test_minimum:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: ldrh r1, [r0]
+; CHECK-NOFULLFP16-NEXT: vmov.f32 s2, #1.000000e+00
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcmp.f32 s0, s2
+; CHECK-NOFULLFP16-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFULLFP16-NEXT: it ge
+; CHECK-NOFULLFP16-NEXT: movge.w r1, #15360
+; CHECK-NOFULLFP16-NEXT: strh r1, [r0]
+; CHECK-NOFULLFP16-NEXT: bx lr
%a = load half, ptr %p, align 2
%c = fcmp ult half %a, 1.0
%r = select i1 %c, half %a, half 1.0
@@ -508,6 +955,19 @@ define void @test_maximum(ptr %p) {
; CHECK-NEXT: vselge.f16 s0, s0, s2
; CHECK-NEXT: vstr.16 s0, [r0]
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: test_maximum:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: ldrh r1, [r0]
+; CHECK-NOFULLFP16-NEXT: vmov.f32 s2, #1.000000e+00
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcmp.f32 s0, s2
+; CHECK-NOFULLFP16-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFULLFP16-NEXT: it ls
+; CHECK-NOFULLFP16-NEXT: movls.w r1, #15360
+; CHECK-NOFULLFP16-NEXT: strh r1, [r0]
+; CHECK-NOFULLFP16-NEXT: bx lr
%a = load half, ptr %p, align 2
%c = fcmp ugt half %a, 1.0
%r = select i1 %c, half %a, half 1.0
@@ -530,6 +990,16 @@ define void @test_copysign(ptr %p, ptr %q) {
; CHECK-NEXT: vstr.16 s0, [r0]
; CHECK-NEXT: add sp, sp, #4
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: test_copysign:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: ldrh r1, [r1]
+; CHECK-NOFULLFP16-NEXT: ldrh r2, [r0]
+; CHECK-NOFULLFP16-NEXT: and r1, r1, #32768
+; CHECK-NOFULLFP16-NEXT: bfc r2, #15, #17
+; CHECK-NOFULLFP16-NEXT: add r1, r2
+; CHECK-NOFULLFP16-NEXT: strh r1, [r0]
+; CHECK-NOFULLFP16-NEXT: bx lr
%a = load half, ptr %p, align 2
%b = load half, ptr %q, align 2
%r = call half @llvm.copysign.f16(half %a, half %b)
@@ -543,6 +1013,17 @@ define void @test_floor(ptr %p) {
; CHECK-NEXT: vrintm.f16 s0, s0
; CHECK-NEXT: vstr.16 s0, [r0]
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: test_floor:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: ldrh r1, [r0]
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vrintm.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: strh r1, [r0]
+; CHECK-NOFULLFP16-NEXT: bx lr
%a = load half, ptr %p, align 2
%r = call half @llvm.floor.f16(half %a)
store half %r, ptr %p
@@ -555,6 +1036,17 @@ define void @test_ceil(ptr %p) {
; CHECK-NEXT: vrintp.f16 s0, s0
; CHECK-NEXT: vstr.16 s0, [r0]
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: test_ceil:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: ldrh r1, [r0]
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vrintp.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: strh r1, [r0]
+; CHECK-NOFULLFP16-NEXT: bx lr
%a = load half, ptr %p, align 2
%r = call half @llvm.ceil.f16(half %a)
store half %r, ptr %p
@@ -567,6 +1059,17 @@ define void @test_trunc(ptr %p) {
; CHECK-NEXT: vrintz.f16 s0, s0
; CHECK-NEXT: vstr.16 s0, [r0]
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: test_trunc:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: ldrh r1, [r0]
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vrintz.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: strh r1, [r0]
+; CHECK-NOFULLFP16-NEXT: bx lr
%a = load half, ptr %p, align 2
%r = call half @llvm.trunc.f16(half %a)
store half %r, ptr %p
@@ -579,6 +1082,17 @@ define void @test_rint(ptr %p) {
; CHECK-NEXT: vrintx.f16 s0, s0
; CHECK-NEXT: vstr.16 s0, [r0]
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: test_rint:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: ldrh r1, [r0]
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vrintx.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: strh r1, [r0]
+; CHECK-NOFULLFP16-NEXT: bx lr
%a = load half, ptr %p, align 2
%r = call half @llvm.rint.f16(half %a)
store half %r, ptr %p
@@ -591,6 +1105,17 @@ define void @test_nearbyint(ptr %p) {
; CHECK-NEXT: vrintr.f16 s0, s0
; CHECK-NEXT: vstr.16 s0, [r0]
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: test_nearbyint:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: ldrh r1, [r0]
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vrintr.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: strh r1, [r0]
+; CHECK-NOFULLFP16-NEXT: bx lr
%a = load half, ptr %p, align 2
%r = call half @llvm.nearbyint.f16(half %a)
store half %r, ptr %p
@@ -603,6 +1128,17 @@ define void @test_round(ptr %p) {
; CHECK-NEXT: vrinta.f16 s0, s0
; CHECK-NEXT: vstr.16 s0, [r0]
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: test_round:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: ldrh r1, [r0]
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vrinta.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: strh r1, [r0]
+; CHECK-NOFULLFP16-NEXT: bx lr
%a = load half, ptr %p, align 2
%r = call half @llvm.round.f16(half %a)
store half %r, ptr %p
@@ -615,6 +1151,17 @@ define void @test_roundeven(ptr %p) {
; CHECK-NEXT: vrintn.f16 s0, s0
; CHECK-NEXT: vstr.16 s0, [r0]
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: test_roundeven:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: ldrh r1, [r0]
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vrintn.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: strh r1, [r0]
+; CHECK-NOFULLFP16-NEXT: bx lr
%a = load half, ptr %p, align 2
%r = call half @llvm.roundeven.f16(half %a)
store half %r, ptr %p
@@ -629,6 +1176,26 @@ define void @test_fmuladd(ptr %p, ptr %q, ptr %r) {
; CHECK-NEXT: vfma.f16 s4, s2, s0
; CHECK-NEXT: vstr.16 s4, [r0]
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: test_fmuladd:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: ldrh r3, [r0]
+; CHECK-NOFULLFP16-NEXT: ldrh r1, [r1]
+; CHECK-NOFULLFP16-NEXT: vmov s2, r3
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: ldrh r2, [r2]
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-NOFULLFP16-NEXT: vmul.f32 s0, s2, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov s2, r2
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-NOFULLFP16-NEXT: vadd.f32 s0, s0, s2
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: strh r1, [r0]
+; CHECK-NOFULLFP16-NEXT: bx lr
%a = load half, ptr %p, align 2
%b = load half, ptr %q, align 2
%c = load half, ptr %r, align 2
@@ -643,6 +1210,20 @@ define half @add_f16(half %x, half %y) #0 {
; CHECK-LABEL: add_f16:
; CHECK: vadd.f16 s0, s0, s1
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: add_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s1
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: uxth r1, r1
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vmov s2, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-NOFULLFP16-NEXT: vadd.f32 s0, s0, s2
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: bx lr
%val = call half @llvm.experimental.constrained.fadd.f16(half %x, half %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret half %val
}
@@ -651,6 +1232,20 @@ define half @sub_f16(half %x, half %y) #0 {
; CHECK-LABEL: sub_f16:
; CHECK: vsub.f16 s0, s0, s1
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: sub_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s1
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: uxth r1, r1
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vmov s2, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-NOFULLFP16-NEXT: vsub.f32 s0, s0, s2
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: bx lr
%val = call half @llvm.experimental.constrained.fsub.f16(half %x, half %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret half %val
}
@@ -659,6 +1254,20 @@ define half @mul_f16(half %x, half %y) #0 {
; CHECK-LABEL: mul_f16:
; CHECK: vmul.f16 s0, s0, s1
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: mul_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s1
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: uxth r1, r1
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vmov s2, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-NOFULLFP16-NEXT: vmul.f32 s0, s0, s2
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: bx lr
%val = call half @llvm.experimental.constrained.fmul.f16(half %x, half %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret half %val
}
@@ -667,6 +1276,20 @@ define half @div_f16(half %x, half %y) #0 {
; CHECK-LABEL: div_f16:
; CHECK: vdiv.f16 s0, s0, s1
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: div_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s1
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: uxth r1, r1
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vmov s2, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-NOFULLFP16-NEXT: vdiv.f32 s0, s0, s2
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: bx lr
%val = call half @llvm.experimental.constrained.fdiv.f16(half %x, half %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret half %val
}
@@ -680,6 +1303,22 @@ define half @frem_f16(half %x, half %y) #0 {
; CHECK-NEXT: bl fmodf
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: pop {r11, pc}
+;
+; CHECK-NOFULLFP16-LABEL: frem_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r7, lr}
+; CHECK-NOFULLFP16-NEXT: push {r7, lr}
+; CHECK-NOFULLFP16-NEXT: vmov r0, s1
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: uxth r1, r1
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vmov s2, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s1, s2
+; CHECK-NOFULLFP16-NEXT: bl fmodf
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: pop {r7, pc}
%val = call half @llvm.experimental.constrained.frem.f16(half %x, half %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret half %val
}
@@ -689,6 +1328,52 @@ define half @fma_f16(half %x, half %y, half %z) #0 {
; CHECK: vfma.f16 s2, s0, s1
; CHECK-NEXT: vmov.f32 s0, s2
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: fma_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r4, r5, r6, r7, r8, r9, lr}
+; CHECK-NOFULLFP16-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr}
+; CHECK-NOFULLFP16-NEXT: .pad #4
+; CHECK-NOFULLFP16-NEXT: sub sp, #4
+; CHECK-NOFULLFP16-NEXT: .vsave {d8, d9}
+; CHECK-NOFULLFP16-NEXT: vpush {d8, d9}
+; CHECK-NOFULLFP16-NEXT: vmov r2, s0
+; CHECK-NOFULLFP16-NEXT: vmov r1, s1
+; CHECK-NOFULLFP16-NEXT: uxth r2, r2
+; CHECK-NOFULLFP16-NEXT: vmov r0, s2
+; CHECK-NOFULLFP16-NEXT: uxth r1, r1
+; CHECK-NOFULLFP16-NEXT: vmov s0, r2
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov s2, r1
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s16, s2
+; CHECK-NOFULLFP16-NEXT: vmov s2, r0
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s18, s2
+; CHECK-NOFULLFP16-NEXT: bl __aeabi_f2d
+; CHECK-NOFULLFP16-NEXT: mov r4, r0
+; CHECK-NOFULLFP16-NEXT: vmov r0, s16
+; CHECK-NOFULLFP16-NEXT: mov r5, r1
+; CHECK-NOFULLFP16-NEXT: bl __aeabi_f2d
+; CHECK-NOFULLFP16-NEXT: mov r6, r0
+; CHECK-NOFULLFP16-NEXT: vmov r0, s18
+; CHECK-NOFULLFP16-NEXT: mov r7, r1
+; CHECK-NOFULLFP16-NEXT: bl __aeabi_f2d
+; CHECK-NOFULLFP16-NEXT: mov r8, r0
+; CHECK-NOFULLFP16-NEXT: mov r9, r1
+; CHECK-NOFULLFP16-NEXT: mov r0, r4
+; CHECK-NOFULLFP16-NEXT: mov r1, r5
+; CHECK-NOFULLFP16-NEXT: mov r2, r6
+; CHECK-NOFULLFP16-NEXT: mov r3, r7
+; CHECK-NOFULLFP16-NEXT: bl __aeabi_dmul
+; CHECK-NOFULLFP16-NEXT: mov r2, r8
+; CHECK-NOFULLFP16-NEXT: mov r3, r9
+; CHECK-NOFULLFP16-NEXT: bl __aeabi_dadd
+; CHECK-NOFULLFP16-NEXT: bl __aeabi_d2h
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vpop {d8, d9}
+; CHECK-NOFULLFP16-NEXT: add sp, #4
+; CHECK-NOFULLFP16-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc}
%val = call half @llvm.experimental.constrained.fma.f16(half %x, half %y, half %z, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret half %val
}
@@ -698,6 +1383,30 @@ define half @fmuladd_f16(half %x, half %y, half %z) #0 {
; CHECK: vfma.f16 s2, s0, s1
; CHECK-NEXT: vmov.f32 s0, s2
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: fmuladd_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r1, s1
+; CHECK-NOFULLFP16-NEXT: vmov r2, s0
+; CHECK-NOFULLFP16-NEXT: uxth r1, r1
+; CHECK-NOFULLFP16-NEXT: uxth r2, r2
+; CHECK-NOFULLFP16-NEXT: vmov r0, s2
+; CHECK-NOFULLFP16-NEXT: vmov s0, r2
+; CHECK-NOFULLFP16-NEXT: vmov s2, r1
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-NOFULLFP16-NEXT: vmul.f32 s0, s0, s2
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: uxth r1, r1
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vmov s2, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-NOFULLFP16-NEXT: vadd.f32 s0, s0, s2
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: bx lr
%val = call half @llvm.experimental.constrained.fmuladd.f16(half %x, half %y, half %z, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret half %val
}
@@ -707,6 +1416,16 @@ define i32 @fptosi_i32_f16(half %x) #0 {
; CHECK: vcvt.s32.f16 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: fptosi_i32_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvt.s32.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: bx lr
%val = call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %x, metadata !"fpexcept.strict") #0
ret i32 %val
}
@@ -716,6 +1435,16 @@ define i32 @fptoui_i32_f16(half %x) #0 {
; CHECK: vcvt.u32.f16 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: fptoui_i32_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvt.u32.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: bx lr
%val = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %x, metadata !"fpexcept.strict") #0
ret i32 %val
}
@@ -728,6 +1457,18 @@ define i64 @fptosi_i64_f16(half %x) #0 {
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: bl __fixhfdi
; CHECK-NEXT: pop {r11, pc}
+;
+; CHECK-NOFULLFP16-LABEL: fptosi_i64_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r7, lr}
+; CHECK-NOFULLFP16-NEXT: push {r7, lr}
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: bl __aeabi_f2lz
+; CHECK-NOFULLFP16-NEXT: pop {r7, pc}
%val = call i64 @llvm.experimental.constrained.fptosi.i64.f16(half %x, metadata !"fpexcept.strict") #0
ret i64 %val
}
@@ -740,6 +1481,18 @@ define i64 @fptoui_i64_f16(half %x) #0 {
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: bl __fixunshfdi
; CHECK-NEXT: pop {r11, pc}
+;
+; CHECK-NOFULLFP16-LABEL: fptoui_i64_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r7, lr}
+; CHECK-NOFULLFP16-NEXT: push {r7, lr}
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: bl __aeabi_f2ulz
+; CHECK-NOFULLFP16-NEXT: pop {r7, pc}
%val = call i64 @llvm.experimental.constrained.fptoui.i64.f16(half %x, metadata !"fpexcept.strict") #0
ret i64 %val
}
@@ -763,6 +1516,29 @@ define half @sitofp_f16_i32(i32 %x) #0 {
; CHECK-NEXT: .LCPI57_0:
; CHECK-NEXT: .long 2147483648
; CHECK-NEXT: .long 1127219200
+;
+; CHECK-NOFULLFP16-LABEL: sitofp_f16_i32:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r7, lr}
+; CHECK-NOFULLFP16-NEXT: push {r7, lr}
+; CHECK-NOFULLFP16-NEXT: .pad #8
+; CHECK-NOFULLFP16-NEXT: sub sp, #8
+; CHECK-NOFULLFP16-NEXT: vldr d0, .LCPI57_0
+; CHECK-NOFULLFP16-NEXT: movs r1, #0
+; CHECK-NOFULLFP16-NEXT: eor r0, r0, #-2147483648
+; CHECK-NOFULLFP16-NEXT: vmov r2, r3, d0
+; CHECK-NOFULLFP16-NEXT: movt r1, #17200
+; CHECK-NOFULLFP16-NEXT: bl __aeabi_dsub
+; CHECK-NOFULLFP16-NEXT: bl __aeabi_d2f
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: add sp, #8
+; CHECK-NOFULLFP16-NEXT: pop {r7, pc}
+; CHECK-NOFULLFP16-NEXT: .p2align 3
+; CHECK-NOFULLFP16-NEXT: @ %bb.1:
+; CHECK-NOFULLFP16-NEXT: .LCPI57_0:
+; CHECK-NOFULLFP16-NEXT: .long 2147483648 @ double 4503601774854144
+; CHECK-NOFULLFP16-NEXT: .long 1127219200
%val = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret half %val
}
@@ -785,6 +1561,28 @@ define half @uitofp_f16_i32(i32 %x) #0 {
; CHECK-NEXT: .LCPI58_0:
; CHECK-NEXT: .long 0
; CHECK-NEXT: .long 1127219200
+;
+; CHECK-NOFULLFP16-LABEL: uitofp_f16_i32:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r7, lr}
+; CHECK-NOFULLFP16-NEXT: push {r7, lr}
+; CHECK-NOFULLFP16-NEXT: .pad #8
+; CHECK-NOFULLFP16-NEXT: sub sp, #8
+; CHECK-NOFULLFP16-NEXT: vldr d0, .LCPI58_0
+; CHECK-NOFULLFP16-NEXT: movs r1, #0
+; CHECK-NOFULLFP16-NEXT: vmov r2, r3, d0
+; CHECK-NOFULLFP16-NEXT: movt r1, #17200
+; CHECK-NOFULLFP16-NEXT: bl __aeabi_dsub
+; CHECK-NOFULLFP16-NEXT: bl __aeabi_d2f
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: add sp, #8
+; CHECK-NOFULLFP16-NEXT: pop {r7, pc}
+; CHECK-NOFULLFP16-NEXT: .p2align 3
+; CHECK-NOFULLFP16-NEXT: @ %bb.1:
+; CHECK-NOFULLFP16-NEXT: .LCPI58_0:
+; CHECK-NOFULLFP16-NEXT: .long 0 @ double 4503599627370496
+; CHECK-NOFULLFP16-NEXT: .long 1127219200
%val = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret half %val
}
@@ -795,6 +1593,15 @@ define half @sitofp_f16_i64(i64 %x) #0 {
; CHECK-NEXT: push {r11, lr}
; CHECK-NEXT: bl __floatdihf
; CHECK-NEXT: pop {r11, pc}
+;
+; CHECK-NOFULLFP16-LABEL: sitofp_f16_i64:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r7, lr}
+; CHECK-NOFULLFP16-NEXT: push {r7, lr}
+; CHECK-NOFULLFP16-NEXT: bl __aeabi_l2f
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: pop {r7, pc}
%val = call half @llvm.experimental.constrained.sitofp.f16.i64(i64 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret half %val
}
@@ -805,6 +1612,15 @@ define half @uitofp_f16_i64(i64 %x) #0 {
; CHECK-NEXT: push {r11, lr}
; CHECK-NEXT: bl __floatundihf
; CHECK-NEXT: pop {r11, pc}
+;
+; CHECK-NOFULLFP16-LABEL: uitofp_f16_i64:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r7, lr}
+; CHECK-NOFULLFP16-NEXT: push {r7, lr}
+; CHECK-NOFULLFP16-NEXT: bl __aeabi_ul2f
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: pop {r7, pc}
%val = call half @llvm.experimental.constrained.uitofp.f16.i64(i64 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret half %val
}
@@ -815,6 +1631,14 @@ define half @sitofp_f16_i128(i128 %x) #0 {
; CHECK-NEXT: push {r11, lr}
; CHECK-NEXT: bl __floattihf
; CHECK-NEXT: pop {r11, pc}
+;
+; CHECK-NOFULLFP16-LABEL: sitofp_f16_i128:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r7, lr}
+; CHECK-NOFULLFP16-NEXT: push {r7, lr}
+; CHECK-NOFULLFP16-NEXT: bl __floattisf
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: pop {r7, pc}
%val = call half @llvm.experimental.constrained.sitofp.f16.i128(i128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret half %val
}
@@ -825,6 +1649,14 @@ define half @uitofp_f16_i128(i128 %x) #0 {
; CHECK-NEXT: push {r11, lr}
; CHECK-NEXT: bl __floatuntihf
; CHECK-NEXT: pop {r11, pc}
+;
+; CHECK-NOFULLFP16-LABEL: uitofp_f16_i128:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r7, lr}
+; CHECK-NOFULLFP16-NEXT: push {r7, lr}
+; CHECK-NOFULLFP16-NEXT: bl __floatuntisf
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: pop {r7, pc}
%val = call half @llvm.experimental.constrained.uitofp.f16.i128(i128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret half %val
}
@@ -833,6 +1665,16 @@ define half @sqrt_f16(half %x) #0 {
; CHECK-LABEL: sqrt_f16:
; CHECK: vsqrt.f16 s0, s0
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: sqrt_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vsqrt.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: bx lr
%val = call half @llvm.experimental.constrained.sqrt.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret half %val
}
@@ -845,6 +1687,18 @@ define half @powi_f16(half %x, i32 %y) #0 {
; CHECK-NEXT: bl __powisf2
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: pop {r11, pc}
+;
+; CHECK-NOFULLFP16-LABEL: powi_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r7, lr}
+; CHECK-NOFULLFP16-NEXT: push {r7, lr}
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: uxth r1, r1
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: bl __powisf2
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: pop {r7, pc}
%val = call half @llvm.experimental.constrained.powi.f16(half %x, i32 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret half %val
}
@@ -857,6 +1711,18 @@ define half @sin_f16(half %x) #0 {
; CHECK-NEXT: bl sinf
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: pop {r11, pc}
+;
+; CHECK-NOFULLFP16-LABEL: sin_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r7, lr}
+; CHECK-NOFULLFP16-NEXT: push {r7, lr}
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: bl sinf
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: pop {r7, pc}
%val = call half @llvm.experimental.constrained.sin.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret half %val
}
@@ -869,6 +1735,18 @@ define half @cos_f16(half %x) #0 {
; CHECK-NEXT: bl cosf
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: pop {r11, pc}
+;
+; CHECK-NOFULLFP16-LABEL: cos_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r7, lr}
+; CHECK-NOFULLFP16-NEXT: push {r7, lr}
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: bl cosf
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: pop {r7, pc}
%val = call half @llvm.experimental.constrained.cos.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret half %val
}
@@ -881,6 +1759,18 @@ define half @tan_f16(half %x) #0 {
; CHECK-NEXT: bl tanf
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: pop {r11, pc}
+;
+; CHECK-NOFULLFP16-LABEL: tan_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r7, lr}
+; CHECK-NOFULLFP16-NEXT: push {r7, lr}
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: bl tanf
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: pop {r7, pc}
%val = call half @llvm.experimental.constrained.tan.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret half %val
}
@@ -893,6 +1783,18 @@ define half @asin_f16(half %x) #0 {
; CHECK-NEXT: bl asinf
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: pop {r11, pc}
+;
+; CHECK-NOFULLFP16-LABEL: asin_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r7, lr}
+; CHECK-NOFULLFP16-NEXT: push {r7, lr}
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: bl asinf
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: pop {r7, pc}
%val = call half @llvm.experimental.constrained.asin.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret half %val
}
@@ -905,6 +1807,18 @@ define half @acos_f16(half %x) #0 {
; CHECK-NEXT: bl acosf
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: pop {r11, pc}
+;
+; CHECK-NOFULLFP16-LABEL: acos_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r7, lr}
+; CHECK-NOFULLFP16-NEXT: push {r7, lr}
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: bl acosf
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: pop {r7, pc}
%val = call half @llvm.experimental.constrained.acos.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret half %val
}
@@ -917,6 +1831,18 @@ define half @atan_f16(half %x) #0 {
; CHECK-NEXT: bl atanf
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: pop {r11, pc}
+;
+; CHECK-NOFULLFP16-LABEL: atan_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r7, lr}
+; CHECK-NOFULLFP16-NEXT: push {r7, lr}
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: bl atanf
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: pop {r7, pc}
%val = call half @llvm.experimental.constrained.atan.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret half %val
}
@@ -930,6 +1856,22 @@ define half @atan2_f16(half %x, half %y) #0 {
; CHECK-NEXT: bl atan2f
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: pop {r11, pc}
+;
+; CHECK-NOFULLFP16-LABEL: atan2_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r7, lr}
+; CHECK-NOFULLFP16-NEXT: push {r7, lr}
+; CHECK-NOFULLFP16-NEXT: vmov r0, s1
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: uxth r1, r1
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vmov s2, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s1, s2
+; CHECK-NOFULLFP16-NEXT: bl atan2f
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: pop {r7, pc}
%val = call half @llvm.experimental.constrained.atan2.f16(half %x, half %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret half %val
}
@@ -942,6 +1884,18 @@ define half @sinh_f16(half %x) #0 {
; CHECK-NEXT: bl sinhf
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: pop {r11, pc}
+;
+; CHECK-NOFULLFP16-LABEL: sinh_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r7, lr}
+; CHECK-NOFULLFP16-NEXT: push {r7, lr}
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: bl sinhf
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: pop {r7, pc}
%val = call half @llvm.experimental.constrained.sinh.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret half %val
}
@@ -954,6 +1908,18 @@ define half @cosh_f16(half %x) #0 {
; CHECK-NEXT: bl coshf
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: pop {r11, pc}
+;
+; CHECK-NOFULLFP16-LABEL: cosh_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r7, lr}
+; CHECK-NOFULLFP16-NEXT: push {r7, lr}
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: bl coshf
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: pop {r7, pc}
%val = call half @llvm.experimental.constrained.cosh.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret half %val
}
@@ -966,6 +1932,18 @@ define half @tanh_f16(half %x) #0 {
; CHECK-NEXT: bl tanhf
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: pop {r11, pc}
+;
+; CHECK-NOFULLFP16-LABEL: tanh_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r7, lr}
+; CHECK-NOFULLFP16-NEXT: push {r7, lr}
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: bl tanhf
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: pop {r7, pc}
%val = call half @llvm.experimental.constrained.tanh.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret half %val
}
@@ -979,6 +1957,22 @@ define half @pow_f16(half %x, half %y) #0 {
; CHECK-NEXT: bl powf
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: pop {r11, pc}
+;
+; CHECK-NOFULLFP16-LABEL: pow_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r7, lr}
+; CHECK-NOFULLFP16-NEXT: push {r7, lr}
+; CHECK-NOFULLFP16-NEXT: vmov r0, s1
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: uxth r1, r1
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vmov s2, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s1, s2
+; CHECK-NOFULLFP16-NEXT: bl powf
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: pop {r7, pc}
%val = call half @llvm.experimental.constrained.pow.f16(half %x, half %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret half %val
}
@@ -991,6 +1985,18 @@ define half @log_f16(half %x) #0 {
; CHECK-NEXT: bl logf
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: pop {r11, pc}
+;
+; CHECK-NOFULLFP16-LABEL: log_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r7, lr}
+; CHECK-NOFULLFP16-NEXT: push {r7, lr}
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: bl logf
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: pop {r7, pc}
%val = call half @llvm.experimental.constrained.log.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret half %val
}
@@ -1003,6 +2009,18 @@ define half @log10_f16(half %x) #0 {
; CHECK-NEXT: bl log10f
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: pop {r11, pc}
+;
+; CHECK-NOFULLFP16-LABEL: log10_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r7, lr}
+; CHECK-NOFULLFP16-NEXT: push {r7, lr}
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: bl log10f
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: pop {r7, pc}
%val = call half @llvm.experimental.constrained.log10.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret half %val
}
@@ -1015,6 +2033,18 @@ define half @log2_f16(half %x) #0 {
; CHECK-NEXT: bl log2f
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: pop {r11, pc}
+;
+; CHECK-NOFULLFP16-LABEL: log2_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r7, lr}
+; CHECK-NOFULLFP16-NEXT: push {r7, lr}
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: bl log2f
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: pop {r7, pc}
%val = call half @llvm.experimental.constrained.log2.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret half %val
}
@@ -1027,6 +2057,18 @@ define half @exp_f16(half %x) #0 {
; CHECK-NEXT: bl expf
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: pop {r11, pc}
+;
+; CHECK-NOFULLFP16-LABEL: exp_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r7, lr}
+; CHECK-NOFULLFP16-NEXT: push {r7, lr}
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: bl expf
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: pop {r7, pc}
%val = call half @llvm.experimental.constrained.exp.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret half %val
}
@@ -1039,6 +2081,18 @@ define half @exp2_f16(half %x) #0 {
; CHECK-NEXT: bl exp2f
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: pop {r11, pc}
+;
+; CHECK-NOFULLFP16-LABEL: exp2_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r7, lr}
+; CHECK-NOFULLFP16-NEXT: push {r7, lr}
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: bl exp2f
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: pop {r7, pc}
%val = call half @llvm.experimental.constrained.exp2.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret half %val
}
@@ -1047,6 +2101,16 @@ define half @rint_f16(half %x) #0 {
; CHECK-LABEL: rint_f16:
; CHECK: vrintx.f16 s0, s0
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: rint_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vrintx.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: bx lr
%val = call half @llvm.experimental.constrained.rint.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret half %val
}
@@ -1055,6 +2119,16 @@ define half @nearbyint_f16(half %x) #0 {
; CHECK-LABEL: nearbyint_f16:
; CHECK: vrintr.f16 s0, s0
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: nearbyint_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vrintr.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: bx lr
%val = call half @llvm.experimental.constrained.nearbyint.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret half %val
}
@@ -1066,6 +2140,17 @@ define i32 @lrint_f16(half %x) #0 {
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
; CHECK-NEXT: bl lrintf
; CHECK-NEXT: pop {r11, pc}
+;
+; CHECK-NOFULLFP16-LABEL: lrint_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r7, lr}
+; CHECK-NOFULLFP16-NEXT: push {r7, lr}
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: bl lrintf
+; CHECK-NOFULLFP16-NEXT: pop {r7, pc}
%val = call i32 @llvm.experimental.constrained.lrint.i32.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret i32 %val
}
@@ -1077,6 +2162,17 @@ define i64 @llrint_f16(half %x) #0 {
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
; CHECK-NEXT: bl llrintf
; CHECK-NEXT: pop {r11, pc}
+;
+; CHECK-NOFULLFP16-LABEL: llrint_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r7, lr}
+; CHECK-NOFULLFP16-NEXT: push {r7, lr}
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: bl llrintf
+; CHECK-NOFULLFP16-NEXT: pop {r7, pc}
%val = call i64 @llvm.experimental.constrained.llrint.i64.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret i64 %val
}
@@ -1085,6 +2181,20 @@ define half @maxnum_f16(half %x, half %y) #0 {
; CHECK-LABEL: maxnum_f16:
; CHECK: vmaxnm.f16 s0, s0, s1
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: maxnum_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s1
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: uxth r1, r1
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vmov s2, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-NOFULLFP16-NEXT: vmaxnm.f32 s0, s0, s2
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: bx lr
%val = call half @llvm.experimental.constrained.maxnum.f16(half %x, half %y, metadata !"fpexcept.strict") #0
ret half %val
}
@@ -1093,6 +2203,20 @@ define half @minnum_f16(half %x, half %y) #0 {
; CHECK-LABEL: minnum_f16:
; CHECK: vminnm.f16 s0, s0, s1
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: minnum_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s1
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: uxth r1, r1
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vmov s2, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-NOFULLFP16-NEXT: vminnm.f32 s0, s0, s2
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: bx lr
%val = call half @llvm.experimental.constrained.minnum.f16(half %x, half %y, metadata !"fpexcept.strict") #0
ret half %val
}
@@ -1101,6 +2225,16 @@ define half @ceil_f16(half %x) #0 {
; CHECK-LABEL: ceil_f16:
; CHECK: vrintp.f16 s0, s0
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: ceil_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vrintp.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: bx lr
%val = call half @llvm.experimental.constrained.ceil.f16(half %x, metadata !"fpexcept.strict") #0
ret half %val
}
@@ -1109,6 +2243,16 @@ define half @floor_f16(half %x) #0 {
; CHECK-LABEL: floor_f16:
; CHECK: vrintm.f16 s0, s0
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: floor_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vrintm.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: bx lr
%val = call half @llvm.experimental.constrained.floor.f16(half %x, metadata !"fpexcept.strict") #0
ret half %val
}
@@ -1120,6 +2264,17 @@ define i32 @lround_f16(half %x) #0 {
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
; CHECK-NEXT: bl lroundf
; CHECK-NEXT: pop {r11, pc}
+;
+; CHECK-NOFULLFP16-LABEL: lround_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r7, lr}
+; CHECK-NOFULLFP16-NEXT: push {r7, lr}
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: bl lroundf
+; CHECK-NOFULLFP16-NEXT: pop {r7, pc}
%val = call i32 @llvm.experimental.constrained.lround.i32.f16(half %x, metadata !"fpexcept.strict") #0
ret i32 %val
}
@@ -1131,6 +2286,17 @@ define i64 @llround_f16(half %x) #0 {
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
; CHECK-NEXT: bl llroundf
; CHECK-NEXT: pop {r11, pc}
+;
+; CHECK-NOFULLFP16-LABEL: llround_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r7, lr}
+; CHECK-NOFULLFP16-NEXT: push {r7, lr}
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: bl llroundf
+; CHECK-NOFULLFP16-NEXT: pop {r7, pc}
%val = call i64 @llvm.experimental.constrained.llround.i64.f16(half %x, metadata !"fpexcept.strict") #0
ret i64 %val
}
@@ -1139,6 +2305,16 @@ define half @round_f16(half %x) #0 {
; CHECK-LABEL: round_f16:
; CHECK: vrinta.f16 s0, s0
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: round_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vrinta.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: bx lr
%val = call half @llvm.experimental.constrained.round.f16(half %x, metadata !"fpexcept.strict") #0
ret half %val
}
@@ -1147,6 +2323,16 @@ define half @roundeven_f16(half %x) #0 {
; CHECK-LABEL: roundeven_f16:
; CHECK: vrintn.f16 s0, s0
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: roundeven_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vrintn.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: bx lr
%val = call half @llvm.experimental.constrained.roundeven.f16(half %x, metadata !"fpexcept.strict") #0
ret half %val
}
@@ -1155,6 +2341,16 @@ define half @trunc_f16(half %x) #0 {
; CHECK-LABEL: trunc_f16:
; CHECK: vrintz.f16 s0, s0
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: trunc_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vrintz.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: bx lr
%val = call half @llvm.experimental.constrained.trunc.f16(half %x, metadata !"fpexcept.strict") #0
ret half %val
}
@@ -1167,6 +2363,18 @@ define half @ldexp_f16(half %x, i32 %y) #0 {
; CHECK-NEXT: bl ldexpf
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: pop {r11, pc}
+;
+; CHECK-NOFULLFP16-LABEL: ldexp_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: .save {r7, lr}
+; CHECK-NOFULLFP16-NEXT: push {r7, lr}
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: uxth r1, r1
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: bl ldexpf
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: pop {r7, pc}
%val = call half @llvm.experimental.constrained.ldexp.f16.i32(half %x, i32 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret half %val
}
@@ -1178,6 +2386,23 @@ define i32 @fcmp_olt_f16(half %a, half %b) #0 {
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: movwmi r0, #1
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: fcmp_olt_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s1
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: uxth r1, r1
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vmov s2, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-NOFULLFP16-NEXT: vcmp.f32 s0, s2
+; CHECK-NOFULLFP16-NEXT: movs r0, #0
+; CHECK-NOFULLFP16-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFULLFP16-NEXT: it mi
+; CHECK-NOFULLFP16-NEXT: movmi r0, #1
+; CHECK-NOFULLFP16-NEXT: bx lr
%cmp = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"olt", metadata !"fpexcept.strict") #0
%conv = zext i1 %cmp to i32
ret i32 %conv
@@ -1190,6 +2415,23 @@ define i32 @fcmp_ole_f16(half %a, half %b) #0 {
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: movwls r0, #1
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: fcmp_ole_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s1
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: uxth r1, r1
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vmov s2, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-NOFULLFP16-NEXT: vcmp.f32 s0, s2
+; CHECK-NOFULLFP16-NEXT: movs r0, #0
+; CHECK-NOFULLFP16-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFULLFP16-NEXT: it ls
+; CHECK-NOFULLFP16-NEXT: movls r0, #1
+; CHECK-NOFULLFP16-NEXT: bx lr
%cmp = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"ole", metadata !"fpexcept.strict") #0
%conv = zext i1 %cmp to i32
ret i32 %conv
@@ -1202,6 +2444,23 @@ define i32 @fcmp_ogt_f16(half %a, half %b) #0 {
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: movwgt r0, #1
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: fcmp_ogt_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s1
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: uxth r1, r1
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vmov s2, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-NOFULLFP16-NEXT: vcmp.f32 s0, s2
+; CHECK-NOFULLFP16-NEXT: movs r0, #0
+; CHECK-NOFULLFP16-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFULLFP16-NEXT: it gt
+; CHECK-NOFULLFP16-NEXT: movgt r0, #1
+; CHECK-NOFULLFP16-NEXT: bx lr
%cmp = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"ogt", metadata !"fpexcept.strict") #0
%conv = zext i1 %cmp to i32
ret i32 %conv
@@ -1214,6 +2473,23 @@ define i32 @fcmp_oge_f16(half %a, half %b) #0 {
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: movwge r0, #1
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: fcmp_oge_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s1
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: uxth r1, r1
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vmov s2, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-NOFULLFP16-NEXT: vcmp.f32 s0, s2
+; CHECK-NOFULLFP16-NEXT: movs r0, #0
+; CHECK-NOFULLFP16-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFULLFP16-NEXT: it ge
+; CHECK-NOFULLFP16-NEXT: movge r0, #1
+; CHECK-NOFULLFP16-NEXT: bx lr
%cmp = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"oge", metadata !"fpexcept.strict") #0
%conv = zext i1 %cmp to i32
ret i32 %conv
@@ -1226,6 +2502,23 @@ define i32 @fcmp_oeq_f16(half %a, half %b) #0 {
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: movweq r0, #1
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: fcmp_oeq_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s1
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: uxth r1, r1
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vmov s2, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-NOFULLFP16-NEXT: vcmp.f32 s0, s2
+; CHECK-NOFULLFP16-NEXT: movs r0, #0
+; CHECK-NOFULLFP16-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFULLFP16-NEXT: it eq
+; CHECK-NOFULLFP16-NEXT: moveq r0, #1
+; CHECK-NOFULLFP16-NEXT: bx lr
%cmp = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"oeq", metadata !"fpexcept.strict") #0
%conv = zext i1 %cmp to i32
ret i32 %conv
@@ -1239,6 +2532,25 @@ define i32 @fcmp_one_f16(half %a, half %b) #0 {
; CHECK-NEXT: movwmi r0, #1
; CHECK-NEXT: movwgt r0, #1
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: fcmp_one_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s1
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: uxth r1, r1
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vmov s2, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-NOFULLFP16-NEXT: vcmp.f32 s0, s2
+; CHECK-NOFULLFP16-NEXT: movs r0, #0
+; CHECK-NOFULLFP16-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFULLFP16-NEXT: it mi
+; CHECK-NOFULLFP16-NEXT: movmi r0, #1
+; CHECK-NOFULLFP16-NEXT: it gt
+; CHECK-NOFULLFP16-NEXT: movgt r0, #1
+; CHECK-NOFULLFP16-NEXT: bx lr
%cmp = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"one", metadata !"fpexcept.strict") #0
%conv = zext i1 %cmp to i32
ret i32 %conv
@@ -1251,6 +2563,23 @@ define i32 @fcmp_ult_f16(half %a, half %b) #0 {
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: movwlt r0, #1
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: fcmp_ult_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s1
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: uxth r1, r1
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vmov s2, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-NOFULLFP16-NEXT: vcmp.f32 s0, s2
+; CHECK-NOFULLFP16-NEXT: movs r0, #0
+; CHECK-NOFULLFP16-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFULLFP16-NEXT: it lt
+; CHECK-NOFULLFP16-NEXT: movlt r0, #1
+; CHECK-NOFULLFP16-NEXT: bx lr
%cmp = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"ult", metadata !"fpexcept.strict") #0
%conv = zext i1 %cmp to i32
ret i32 %conv
@@ -1263,6 +2592,23 @@ define i32 @fcmp_ule_f16(half %a, half %b) #0 {
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: movwle r0, #1
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: fcmp_ule_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s1
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: uxth r1, r1
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vmov s2, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-NOFULLFP16-NEXT: vcmp.f32 s0, s2
+; CHECK-NOFULLFP16-NEXT: movs r0, #0
+; CHECK-NOFULLFP16-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFULLFP16-NEXT: it le
+; CHECK-NOFULLFP16-NEXT: movle r0, #1
+; CHECK-NOFULLFP16-NEXT: bx lr
%cmp = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"ule", metadata !"fpexcept.strict") #0
%conv = zext i1 %cmp to i32
ret i32 %conv
@@ -1275,6 +2621,23 @@ define i32 @fcmp_ugt_f16(half %a, half %b) #0 {
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: movwhi r0, #1
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: fcmp_ugt_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s1
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: uxth r1, r1
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vmov s2, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-NOFULLFP16-NEXT: vcmp.f32 s0, s2
+; CHECK-NOFULLFP16-NEXT: movs r0, #0
+; CHECK-NOFULLFP16-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFULLFP16-NEXT: it hi
+; CHECK-NOFULLFP16-NEXT: movhi r0, #1
+; CHECK-NOFULLFP16-NEXT: bx lr
%cmp = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"ugt", metadata !"fpexcept.strict") #0
%conv = zext i1 %cmp to i32
ret i32 %conv
@@ -1287,6 +2650,23 @@ define i32 @fcmp_uge_f16(half %a, half %b) #0 {
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: movwpl r0, #1
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: fcmp_uge_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s1
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: uxth r1, r1
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vmov s2, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-NOFULLFP16-NEXT: vcmp.f32 s0, s2
+; CHECK-NOFULLFP16-NEXT: movs r0, #0
+; CHECK-NOFULLFP16-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFULLFP16-NEXT: it pl
+; CHECK-NOFULLFP16-NEXT: movpl r0, #1
+; CHECK-NOFULLFP16-NEXT: bx lr
%cmp = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"uge", metadata !"fpexcept.strict") #0
%conv = zext i1 %cmp to i32
ret i32 %conv
@@ -1300,6 +2680,25 @@ define i32 @fcmp_ueq_f16(half %a, half %b) #0 {
; CHECK-NEXT: movweq r0, #1
; CHECK-NEXT: movwvs r0, #1
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: fcmp_ueq_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s1
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: uxth r1, r1
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vmov s2, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-NOFULLFP16-NEXT: vcmp.f32 s0, s2
+; CHECK-NOFULLFP16-NEXT: movs r0, #0
+; CHECK-NOFULLFP16-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFULLFP16-NEXT: it eq
+; CHECK-NOFULLFP16-NEXT: moveq r0, #1
+; CHECK-NOFULLFP16-NEXT: it vs
+; CHECK-NOFULLFP16-NEXT: movvs r0, #1
+; CHECK-NOFULLFP16-NEXT: bx lr
%cmp = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"ueq", metadata !"fpexcept.strict") #0
%conv = zext i1 %cmp to i32
ret i32 %conv
@@ -1312,6 +2711,23 @@ define i32 @fcmp_une_f16(half %a, half %b) #0 {
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: movwne r0, #1
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: fcmp_une_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s1
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: uxth r1, r1
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vmov s2, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-NOFULLFP16-NEXT: vcmp.f32 s0, s2
+; CHECK-NOFULLFP16-NEXT: movs r0, #0
+; CHECK-NOFULLFP16-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFULLFP16-NEXT: it ne
+; CHECK-NOFULLFP16-NEXT: movne r0, #1
+; CHECK-NOFULLFP16-NEXT: bx lr
%cmp = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"une", metadata !"fpexcept.strict") #0
%conv = zext i1 %cmp to i32
ret i32 %conv
@@ -1324,6 +2740,23 @@ define i32 @fcmps_olt_f16(half %a, half %b) #0 {
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: movwmi r0, #1
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: fcmps_olt_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s1
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: uxth r1, r1
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vmov s2, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-NOFULLFP16-NEXT: vcmpe.f32 s0, s2
+; CHECK-NOFULLFP16-NEXT: movs r0, #0
+; CHECK-NOFULLFP16-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFULLFP16-NEXT: it mi
+; CHECK-NOFULLFP16-NEXT: movmi r0, #1
+; CHECK-NOFULLFP16-NEXT: bx lr
%cmp = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"olt", metadata !"fpexcept.strict") #0
%conv = zext i1 %cmp to i32
ret i32 %conv
@@ -1336,6 +2769,23 @@ define i32 @fcmps_ole_f16(half %a, half %b) #0 {
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: movwls r0, #1
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: fcmps_ole_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s1
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: uxth r1, r1
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vmov s2, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-NOFULLFP16-NEXT: vcmpe.f32 s0, s2
+; CHECK-NOFULLFP16-NEXT: movs r0, #0
+; CHECK-NOFULLFP16-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFULLFP16-NEXT: it ls
+; CHECK-NOFULLFP16-NEXT: movls r0, #1
+; CHECK-NOFULLFP16-NEXT: bx lr
%cmp = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"ole", metadata !"fpexcept.strict") #0
%conv = zext i1 %cmp to i32
ret i32 %conv
@@ -1348,6 +2798,23 @@ define i32 @fcmps_ogt_f16(half %a, half %b) #0 {
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: movwgt r0, #1
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: fcmps_ogt_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s1
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: uxth r1, r1
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vmov s2, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-NOFULLFP16-NEXT: vcmpe.f32 s0, s2
+; CHECK-NOFULLFP16-NEXT: movs r0, #0
+; CHECK-NOFULLFP16-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFULLFP16-NEXT: it gt
+; CHECK-NOFULLFP16-NEXT: movgt r0, #1
+; CHECK-NOFULLFP16-NEXT: bx lr
%cmp = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"ogt", metadata !"fpexcept.strict") #0
%conv = zext i1 %cmp to i32
ret i32 %conv
@@ -1360,6 +2827,23 @@ define i32 @fcmps_oge_f16(half %a, half %b) #0 {
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: movwge r0, #1
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: fcmps_oge_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s1
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: uxth r1, r1
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vmov s2, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-NOFULLFP16-NEXT: vcmpe.f32 s0, s2
+; CHECK-NOFULLFP16-NEXT: movs r0, #0
+; CHECK-NOFULLFP16-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFULLFP16-NEXT: it ge
+; CHECK-NOFULLFP16-NEXT: movge r0, #1
+; CHECK-NOFULLFP16-NEXT: bx lr
%cmp = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"oge", metadata !"fpexcept.strict") #0
%conv = zext i1 %cmp to i32
ret i32 %conv
@@ -1372,6 +2856,23 @@ define i32 @fcmps_oeq_f16(half %a, half %b) #0 {
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: movweq r0, #1
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: fcmps_oeq_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s1
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: uxth r1, r1
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vmov s2, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-NOFULLFP16-NEXT: vcmpe.f32 s0, s2
+; CHECK-NOFULLFP16-NEXT: movs r0, #0
+; CHECK-NOFULLFP16-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFULLFP16-NEXT: it eq
+; CHECK-NOFULLFP16-NEXT: moveq r0, #1
+; CHECK-NOFULLFP16-NEXT: bx lr
%cmp = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"oeq", metadata !"fpexcept.strict") #0
%conv = zext i1 %cmp to i32
ret i32 %conv
@@ -1385,6 +2886,25 @@ define i32 @fcmps_one_f16(half %a, half %b) #0 {
; CHECK-NEXT: movwmi r0, #1
; CHECK-NEXT: movwgt r0, #1
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: fcmps_one_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s1
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: uxth r1, r1
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vmov s2, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-NOFULLFP16-NEXT: vcmpe.f32 s0, s2
+; CHECK-NOFULLFP16-NEXT: movs r0, #0
+; CHECK-NOFULLFP16-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFULLFP16-NEXT: it mi
+; CHECK-NOFULLFP16-NEXT: movmi r0, #1
+; CHECK-NOFULLFP16-NEXT: it gt
+; CHECK-NOFULLFP16-NEXT: movgt r0, #1
+; CHECK-NOFULLFP16-NEXT: bx lr
%cmp = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"one", metadata !"fpexcept.strict") #0
%conv = zext i1 %cmp to i32
ret i32 %conv
@@ -1397,6 +2917,23 @@ define i32 @fcmps_ult_f16(half %a, half %b) #0 {
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: movwlt r0, #1
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: fcmps_ult_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s1
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: uxth r1, r1
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vmov s2, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-NOFULLFP16-NEXT: vcmpe.f32 s0, s2
+; CHECK-NOFULLFP16-NEXT: movs r0, #0
+; CHECK-NOFULLFP16-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFULLFP16-NEXT: it lt
+; CHECK-NOFULLFP16-NEXT: movlt r0, #1
+; CHECK-NOFULLFP16-NEXT: bx lr
%cmp = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"ult", metadata !"fpexcept.strict") #0
%conv = zext i1 %cmp to i32
ret i32 %conv
@@ -1409,6 +2946,23 @@ define i32 @fcmps_ule_f16(half %a, half %b) #0 {
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: movwle r0, #1
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: fcmps_ule_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s1
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: uxth r1, r1
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vmov s2, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-NOFULLFP16-NEXT: vcmpe.f32 s0, s2
+; CHECK-NOFULLFP16-NEXT: movs r0, #0
+; CHECK-NOFULLFP16-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFULLFP16-NEXT: it le
+; CHECK-NOFULLFP16-NEXT: movle r0, #1
+; CHECK-NOFULLFP16-NEXT: bx lr
%cmp = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"ule", metadata !"fpexcept.strict") #0
%conv = zext i1 %cmp to i32
ret i32 %conv
@@ -1421,6 +2975,23 @@ define i32 @fcmps_ugt_f16(half %a, half %b) #0 {
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: movwhi r0, #1
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: fcmps_ugt_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s1
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: uxth r1, r1
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vmov s2, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-NOFULLFP16-NEXT: vcmpe.f32 s0, s2
+; CHECK-NOFULLFP16-NEXT: movs r0, #0
+; CHECK-NOFULLFP16-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFULLFP16-NEXT: it hi
+; CHECK-NOFULLFP16-NEXT: movhi r0, #1
+; CHECK-NOFULLFP16-NEXT: bx lr
%cmp = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"ugt", metadata !"fpexcept.strict") #0
%conv = zext i1 %cmp to i32
ret i32 %conv
@@ -1433,6 +3004,23 @@ define i32 @fcmps_uge_f16(half %a, half %b) #0 {
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: movwpl r0, #1
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: fcmps_uge_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s1
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: uxth r1, r1
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vmov s2, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-NOFULLFP16-NEXT: vcmpe.f32 s0, s2
+; CHECK-NOFULLFP16-NEXT: movs r0, #0
+; CHECK-NOFULLFP16-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFULLFP16-NEXT: it pl
+; CHECK-NOFULLFP16-NEXT: movpl r0, #1
+; CHECK-NOFULLFP16-NEXT: bx lr
%cmp = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"uge", metadata !"fpexcept.strict") #0
%conv = zext i1 %cmp to i32
ret i32 %conv
@@ -1446,6 +3034,25 @@ define i32 @fcmps_ueq_f16(half %a, half %b) #0 {
; CHECK-NEXT: movweq r0, #1
; CHECK-NEXT: movwvs r0, #1
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: fcmps_ueq_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s1
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: uxth r1, r1
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vmov s2, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-NOFULLFP16-NEXT: vcmpe.f32 s0, s2
+; CHECK-NOFULLFP16-NEXT: movs r0, #0
+; CHECK-NOFULLFP16-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFULLFP16-NEXT: it eq
+; CHECK-NOFULLFP16-NEXT: moveq r0, #1
+; CHECK-NOFULLFP16-NEXT: it vs
+; CHECK-NOFULLFP16-NEXT: movvs r0, #1
+; CHECK-NOFULLFP16-NEXT: bx lr
%cmp = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"ueq", metadata !"fpexcept.strict") #0
%conv = zext i1 %cmp to i32
ret i32 %conv
@@ -1458,18 +3065,39 @@ define i32 @fcmps_une_f16(half %a, half %b) #0 {
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: movwne r0, #1
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: fcmps_une_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s1
+; CHECK-NOFULLFP16-NEXT: vmov r1, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: uxth r1, r1
+; CHECK-NOFULLFP16-NEXT: vmov s0, r1
+; CHECK-NOFULLFP16-NEXT: vmov s2, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-NOFULLFP16-NEXT: vcmpe.f32 s0, s2
+; CHECK-NOFULLFP16-NEXT: movs r0, #0
+; CHECK-NOFULLFP16-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFULLFP16-NEXT: it ne
+; CHECK-NOFULLFP16-NEXT: movne r0, #1
+; CHECK-NOFULLFP16-NEXT: bx lr
%cmp = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"une", metadata !"fpexcept.strict") #0
%conv = zext i1 %cmp to i32
ret i32 %conv
}
-
; Intrinsics to convert between floating-point types
define half @fptrunc_f16_f32(float %x) #0 {
; CHECK-LABEL: fptrunc_f16_f32:
; CHECK: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: fptrunc_f16_f32:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-NOFULLFP16-NEXT: bx lr
%val = call half @llvm.experimental.constrained.fptrunc.f16.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret half %val
}
@@ -1478,82 +3106,17 @@ define float @fpext_f32_f16(half %x) #0 {
; CHECK-LABEL: fpext_f32_f16:
; CHECK: vcvtb.f32.f16 s0, s0
; CHECK-NEXT: bx lr
+;
+; CHECK-NOFULLFP16-LABEL: fpext_f32_f16:
+; CHECK-NOFULLFP16: @ %bb.0:
+; CHECK-NOFULLFP16-NEXT: vmov r0, s0
+; CHECK-NOFULLFP16-NEXT: uxth r0, r0
+; CHECK-NOFULLFP16-NEXT: vmov s0, r0
+; CHECK-NOFULLFP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-NOFULLFP16-NEXT: bx lr
%val = call float @llvm.experimental.constrained.fpext.f32.f16(half %x, metadata !"fpexcept.strict") #0
ret float %val
}
attributes #0 = { strictfp }
-
-declare half @llvm.experimental.constrained.fadd.f16(half, half, metadata, metadata)
-declare half @llvm.experimental.constrained.fsub.f16(half, half, metadata, metadata)
-declare half @llvm.experimental.constrained.fmul.f16(half, half, metadata, metadata)
-declare half @llvm.experimental.constrained.fdiv.f16(half, half, metadata, metadata)
-declare half @llvm.experimental.constrained.frem.f16(half, half, metadata, metadata)
-declare half @llvm.experimental.constrained.fma.f16(half, half, half, metadata, metadata)
-declare half @llvm.experimental.constrained.fmuladd.f16(half, half, half, metadata, metadata)
-declare i32 @llvm.experimental.constrained.fptosi.i32.f16(half, metadata)
-declare i32 @llvm.experimental.constrained.fptoui.i32.f16(half, metadata)
-declare i64 @llvm.experimental.constrained.fptosi.i64.f16(half, metadata)
-declare i64 @llvm.experimental.constrained.fptoui.i64.f16(half, metadata)
-declare half @llvm.experimental.constrained.sitofp.f16.i32(i32, metadata, metadata)
-declare half @llvm.experimental.constrained.uitofp.f16.i32(i32, metadata, metadata)
-declare half @llvm.experimental.constrained.sitofp.f16.i64(i64, metadata, metadata)
-declare half @llvm.experimental.constrained.uitofp.f16.i64(i64, metadata, metadata)
-declare half @llvm.experimental.constrained.sitofp.f16.i128(i128, metadata, metadata)
-declare half @llvm.experimental.constrained.uitofp.f16.i128(i128, metadata, metadata)
-declare half @llvm.experimental.constrained.sqrt.f16(half, metadata, metadata)
-declare half @llvm.experimental.constrained.powi.f16(half, i32, metadata, metadata)
-declare half @llvm.experimental.constrained.sin.f16(half, metadata, metadata)
-declare half @llvm.experimental.constrained.cos.f16(half, metadata, metadata)
-declare half @llvm.experimental.constrained.tan.f16(half, metadata, metadata)
-declare half @llvm.experimental.constrained.pow.f16(half, half, metadata, metadata)
-declare half @llvm.experimental.constrained.log.f16(half, metadata, metadata)
-declare half @llvm.experimental.constrained.log10.f16(half, metadata, metadata)
-declare half @llvm.experimental.constrained.log2.f16(half, metadata, metadata)
-declare half @llvm.experimental.constrained.exp.f16(half, metadata, metadata)
-declare half @llvm.experimental.constrained.exp2.f16(half, metadata, metadata)
-declare half @llvm.experimental.constrained.rint.f16(half, metadata, metadata)
-declare half @llvm.experimental.constrained.nearbyint.f16(half, metadata, metadata)
-declare i32 @llvm.experimental.constrained.lrint.i32.f16(half, metadata, metadata)
-declare i64 @llvm.experimental.constrained.llrint.i64.f16(half, metadata, metadata)
-declare half @llvm.experimental.constrained.maxnum.f16(half, half, metadata)
-declare half @llvm.experimental.constrained.minnum.f16(half, half, metadata)
-declare half @llvm.experimental.constrained.ceil.f16(half, metadata)
-declare half @llvm.experimental.constrained.floor.f16(half, metadata)
-declare i32 @llvm.experimental.constrained.lround.i32.f16(half, metadata)
-declare i64 @llvm.experimental.constrained.llround.i64.f16(half, metadata)
-declare half @llvm.experimental.constrained.round.f16(half, metadata)
-declare half @llvm.experimental.constrained.roundeven.f16(half, metadata)
-declare half @llvm.experimental.constrained.trunc.f16(half, metadata)
-declare i1 @llvm.experimental.constrained.fcmps.f16(half, half, metadata, metadata)
-declare i1 @llvm.experimental.constrained.fcmp.f16(half, half, metadata, metadata)
-
-declare half @llvm.experimental.constrained.fptrunc.f16.f32(float, metadata, metadata)
-declare float @llvm.experimental.constrained.fpext.f32.f16(half, metadata)
-
-
-declare half @llvm.sqrt.f16(half %a)
-declare half @llvm.powi.f16.i32(half %a, i32 %b)
-declare half @llvm.sin.f16(half %a)
-declare half @llvm.cos.f16(half %a)
-declare half @llvm.tan.f16(half %a)
-declare half @llvm.pow.f16(half %a, half %b)
-declare half @llvm.exp.f16(half %a)
-declare half @llvm.exp2.f16(half %a)
-declare half @llvm.log.f16(half %a)
-declare half @llvm.log10.f16(half %a)
-declare half @llvm.log2.f16(half %a)
-declare half @llvm.fma.f16(half %a, half %b, half %c)
-declare half @llvm.fabs.f16(half %a)
-declare half @llvm.minnum.f16(half %a, half %b)
-declare half @llvm.maxnum.f16(half %a, half %b)
-declare half @llvm.copysign.f16(half %a, half %b)
-declare half @llvm.floor.f16(half %a)
-declare half @llvm.ceil.f16(half %a)
-declare half @llvm.trunc.f16(half %a)
-declare half @llvm.rint.f16(half %a)
-declare half @llvm.nearbyint.f16(half %a)
-declare half @llvm.round.f16(half %a)
-declare half @llvm.roundeven.f16(half %a)
-declare half @llvm.fmuladd.f16(half %a, half %b, half %c)
More information about the llvm-commits
mailing list