[llvm] [AArch64] Add tests for vector rounding + float-to-int conversions (PR #173516)

via llvm-commits llvm-commits at lists.llvm.org
Wed Dec 24 16:54:59 PST 2025


https://github.com/valadaptive created https://github.com/llvm/llvm-project/pull/173516

Right now we only have tests for the scalar versions of these intrinsics.

>From c740066f0597c4e4cda42c73b3cdccc2f69dd6bc Mon Sep 17 00:00:00 2001
From: valadaptive <valadaptive at protonmail.com>
Date: Sat, 29 Nov 2025 18:54:40 -0500
Subject: [PATCH] [AArch64] Add tests for vector round+conversion fusion

---
 llvm/test/CodeGen/AArch64/arm64-vcvt-fptoi.ll | 1711 +++++++++++++++++
 1 file changed, 1711 insertions(+)
 create mode 100644 llvm/test/CodeGen/AArch64/arm64-vcvt-fptoi.ll

diff --git a/llvm/test/CodeGen/AArch64/arm64-vcvt-fptoi.ll b/llvm/test/CodeGen/AArch64/arm64-vcvt-fptoi.ll
new file mode 100644
index 0000000000000..98c328c6ae9e2
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/arm64-vcvt-fptoi.ll
@@ -0,0 +1,1711 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s --check-prefixes=CHECK,CHECK-NO16
+; RUN: llc < %s -mtriple=arm64-eabi -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-FP16
+; RUN: llc < %s -mtriple=aarch64-eabi -mattr=+fullfp16 -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-FP16
+
+;
+; Tests for fused round + convert to int patterns (FCVTAS, FCVTAU, FCVTMS, FCVTMU, etc.)
+;
+
+;
+; round + signed -> fcvtas
+;
+
+define <2 x i32> @fcvtas_2s(<2 x float> %A) nounwind {
+; CHECK-LABEL: fcvtas_2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frinta v0.2s, v0.2s
+; CHECK-NEXT:    fcvtzs v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x float> @llvm.round.v2f32(<2 x float> %A)
+  %tmp2 = fptosi <2 x float> %tmp1 to <2 x i32>
+  ret <2 x i32> %tmp2
+}
+
+define <2 x i32> @fcvtas_2s_sat(<2 x float> %A) nounwind {
+; CHECK-LABEL: fcvtas_2s_sat:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frinta v0.2s, v0.2s
+; CHECK-NEXT:    fcvtzs v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x float> @llvm.round.v2f32(<2 x float> %A)
+  %tmp2 = call <2 x i32> @llvm.fptosi.sat.v2i32.v2f32(<2 x float> %tmp1)
+  ret <2 x i32> %tmp2
+}
+
+
+define <4 x i32> @fcvtas_4s(<4 x float> %A) nounwind {
+; CHECK-LABEL: fcvtas_4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frinta v0.4s, v0.4s
+; CHECK-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %tmp1 = call <4 x float> @llvm.round.v4f32(<4 x float> %A)
+  %tmp2 = fptosi <4 x float> %tmp1 to <4 x i32>
+  ret <4 x i32> %tmp2
+}
+
+define <4 x i32> @fcvtas_4s_sat(<4 x float> %A) nounwind {
+; CHECK-LABEL: fcvtas_4s_sat:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frinta v0.4s, v0.4s
+; CHECK-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %tmp1 = call <4 x float> @llvm.round.v4f32(<4 x float> %A)
+  %tmp2 = call <4 x i32> @llvm.fptosi.sat.v4i32.v4f32(<4 x float> %tmp1)
+  ret <4 x i32> %tmp2
+}
+
+
+define <2 x i64> @fcvtas_2d(<2 x double> %A) nounwind {
+; CHECK-LABEL: fcvtas_2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frinta v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x double> @llvm.round.v2f64(<2 x double> %A)
+  %tmp2 = fptosi <2 x double> %tmp1 to <2 x i64>
+  ret <2 x i64> %tmp2
+}
+
+define <2 x i64> @fcvtas_2d_sat(<2 x double> %A) nounwind {
+; CHECK-LABEL: fcvtas_2d_sat:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frinta v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x double> @llvm.round.v2f64(<2 x double> %A)
+  %tmp2 = call <2 x i64> @llvm.fptosi.sat.v2i64.v2f64(<2 x double> %tmp1)
+  ret <2 x i64> %tmp2
+}
+
+
+;
+; round + unsigned -> fcvtau
+;
+
+define <2 x i32> @fcvtau_2s(<2 x float> %A) nounwind {
+; CHECK-LABEL: fcvtau_2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frinta v0.2s, v0.2s
+; CHECK-NEXT:    fcvtzu v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x float> @llvm.round.v2f32(<2 x float> %A)
+  %tmp2 = fptoui <2 x float> %tmp1 to <2 x i32>
+  ret <2 x i32> %tmp2
+}
+
+define <2 x i32> @fcvtau_2s_sat(<2 x float> %A) nounwind {
+; CHECK-LABEL: fcvtau_2s_sat:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frinta v0.2s, v0.2s
+; CHECK-NEXT:    fcvtzu v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x float> @llvm.round.v2f32(<2 x float> %A)
+  %tmp2 = call <2 x i32> @llvm.fptoui.sat.v2i32.v2f32(<2 x float> %tmp1)
+  ret <2 x i32> %tmp2
+}
+
+
+define <4 x i32> @fcvtau_4s(<4 x float> %A) nounwind {
+; CHECK-LABEL: fcvtau_4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frinta v0.4s, v0.4s
+; CHECK-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %tmp1 = call <4 x float> @llvm.round.v4f32(<4 x float> %A)
+  %tmp2 = fptoui <4 x float> %tmp1 to <4 x i32>
+  ret <4 x i32> %tmp2
+}
+
+define <4 x i32> @fcvtau_4s_sat(<4 x float> %A) nounwind {
+; CHECK-LABEL: fcvtau_4s_sat:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frinta v0.4s, v0.4s
+; CHECK-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %tmp1 = call <4 x float> @llvm.round.v4f32(<4 x float> %A)
+  %tmp2 = call <4 x i32> @llvm.fptoui.sat.v4i32.v4f32(<4 x float> %tmp1)
+  ret <4 x i32> %tmp2
+}
+
+
+define <2 x i64> @fcvtau_2d(<2 x double> %A) nounwind {
+; CHECK-LABEL: fcvtau_2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frinta v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzu v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x double> @llvm.round.v2f64(<2 x double> %A)
+  %tmp2 = fptoui <2 x double> %tmp1 to <2 x i64>
+  ret <2 x i64> %tmp2
+}
+
+define <2 x i64> @fcvtau_2d_sat(<2 x double> %A) nounwind {
+; CHECK-LABEL: fcvtau_2d_sat:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frinta v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzu v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x double> @llvm.round.v2f64(<2 x double> %A)
+  %tmp2 = call <2 x i64> @llvm.fptoui.sat.v2i64.v2f64(<2 x double> %tmp1)
+  ret <2 x i64> %tmp2
+}
+
+
+;
+; roundeven + signed -> fcvtns
+;
+
+define <2 x i32> @fcvtns_2s(<2 x float> %A) nounwind {
+; CHECK-LABEL: fcvtns_2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn v0.2s, v0.2s
+; CHECK-NEXT:    fcvtzs v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x float> @llvm.roundeven.v2f32(<2 x float> %A)
+  %tmp2 = fptosi <2 x float> %tmp1 to <2 x i32>
+  ret <2 x i32> %tmp2
+}
+
+define <2 x i32> @fcvtns_2s_sat(<2 x float> %A) nounwind {
+; CHECK-LABEL: fcvtns_2s_sat:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn v0.2s, v0.2s
+; CHECK-NEXT:    fcvtzs v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x float> @llvm.roundeven.v2f32(<2 x float> %A)
+  %tmp2 = call <2 x i32> @llvm.fptosi.sat.v2i32.v2f32(<2 x float> %tmp1)
+  ret <2 x i32> %tmp2
+}
+
+
+define <4 x i32> @fcvtns_4s(<4 x float> %A) nounwind {
+; CHECK-LABEL: fcvtns_4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn v0.4s, v0.4s
+; CHECK-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %tmp1 = call <4 x float> @llvm.roundeven.v4f32(<4 x float> %A)
+  %tmp2 = fptosi <4 x float> %tmp1 to <4 x i32>
+  ret <4 x i32> %tmp2
+}
+
+define <4 x i32> @fcvtns_4s_sat(<4 x float> %A) nounwind {
+; CHECK-LABEL: fcvtns_4s_sat:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn v0.4s, v0.4s
+; CHECK-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %tmp1 = call <4 x float> @llvm.roundeven.v4f32(<4 x float> %A)
+  %tmp2 = call <4 x i32> @llvm.fptosi.sat.v4i32.v4f32(<4 x float> %tmp1)
+  ret <4 x i32> %tmp2
+}
+
+
+define <2 x i64> @fcvtns_2d(<2 x double> %A) nounwind {
+; CHECK-LABEL: fcvtns_2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %A)
+  %tmp2 = fptosi <2 x double> %tmp1 to <2 x i64>
+  ret <2 x i64> %tmp2
+}
+
+define <2 x i64> @fcvtns_2d_sat(<2 x double> %A) nounwind {
+; CHECK-LABEL: fcvtns_2d_sat:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %A)
+  %tmp2 = call <2 x i64> @llvm.fptosi.sat.v2i64.v2f64(<2 x double> %tmp1)
+  ret <2 x i64> %tmp2
+}
+
+
+;
+; roundeven + unsigned -> fcvtnu
+;
+
+define <2 x i32> @fcvtnu_2s(<2 x float> %A) nounwind {
+; CHECK-LABEL: fcvtnu_2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn v0.2s, v0.2s
+; CHECK-NEXT:    fcvtzu v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x float> @llvm.roundeven.v2f32(<2 x float> %A)
+  %tmp2 = fptoui <2 x float> %tmp1 to <2 x i32>
+  ret <2 x i32> %tmp2
+}
+
+define <2 x i32> @fcvtnu_2s_sat(<2 x float> %A) nounwind {
+; CHECK-LABEL: fcvtnu_2s_sat:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn v0.2s, v0.2s
+; CHECK-NEXT:    fcvtzu v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x float> @llvm.roundeven.v2f32(<2 x float> %A)
+  %tmp2 = call <2 x i32> @llvm.fptoui.sat.v2i32.v2f32(<2 x float> %tmp1)
+  ret <2 x i32> %tmp2
+}
+
+
+define <4 x i32> @fcvtnu_4s(<4 x float> %A) nounwind {
+; CHECK-LABEL: fcvtnu_4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn v0.4s, v0.4s
+; CHECK-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %tmp1 = call <4 x float> @llvm.roundeven.v4f32(<4 x float> %A)
+  %tmp2 = fptoui <4 x float> %tmp1 to <4 x i32>
+  ret <4 x i32> %tmp2
+}
+
+define <4 x i32> @fcvtnu_4s_sat(<4 x float> %A) nounwind {
+; CHECK-LABEL: fcvtnu_4s_sat:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn v0.4s, v0.4s
+; CHECK-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %tmp1 = call <4 x float> @llvm.roundeven.v4f32(<4 x float> %A)
+  %tmp2 = call <4 x i32> @llvm.fptoui.sat.v4i32.v4f32(<4 x float> %tmp1)
+  ret <4 x i32> %tmp2
+}
+
+
+define <2 x i64> @fcvtnu_2d(<2 x double> %A) nounwind {
+; CHECK-LABEL: fcvtnu_2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzu v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %A)
+  %tmp2 = fptoui <2 x double> %tmp1 to <2 x i64>
+  ret <2 x i64> %tmp2
+}
+
+define <2 x i64> @fcvtnu_2d_sat(<2 x double> %A) nounwind {
+; CHECK-LABEL: fcvtnu_2d_sat:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzu v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %A)
+  %tmp2 = call <2 x i64> @llvm.fptoui.sat.v2i64.v2f64(<2 x double> %tmp1)
+  ret <2 x i64> %tmp2
+}
+
+
+;
+; floor + signed -> fcvtms
+;
+
+define <2 x i32> @fcvtms_2s(<2 x float> %A) nounwind {
+; CHECK-LABEL: fcvtms_2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintm v0.2s, v0.2s
+; CHECK-NEXT:    fcvtzs v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x float> @llvm.floor.v2f32(<2 x float> %A)
+  %tmp2 = fptosi <2 x float> %tmp1 to <2 x i32>
+  ret <2 x i32> %tmp2
+}
+
+define <2 x i32> @fcvtms_2s_sat(<2 x float> %A) nounwind {
+; CHECK-LABEL: fcvtms_2s_sat:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintm v0.2s, v0.2s
+; CHECK-NEXT:    fcvtzs v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x float> @llvm.floor.v2f32(<2 x float> %A)
+  %tmp2 = call <2 x i32> @llvm.fptosi.sat.v2i32.v2f32(<2 x float> %tmp1)
+  ret <2 x i32> %tmp2
+}
+
+
+define <4 x i32> @fcvtms_4s(<4 x float> %A) nounwind {
+; CHECK-LABEL: fcvtms_4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintm v0.4s, v0.4s
+; CHECK-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %tmp1 = call <4 x float> @llvm.floor.v4f32(<4 x float> %A)
+  %tmp2 = fptosi <4 x float> %tmp1 to <4 x i32>
+  ret <4 x i32> %tmp2
+}
+
+define <4 x i32> @fcvtms_4s_sat(<4 x float> %A) nounwind {
+; CHECK-LABEL: fcvtms_4s_sat:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintm v0.4s, v0.4s
+; CHECK-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %tmp1 = call <4 x float> @llvm.floor.v4f32(<4 x float> %A)
+  %tmp2 = call <4 x i32> @llvm.fptosi.sat.v4i32.v4f32(<4 x float> %tmp1)
+  ret <4 x i32> %tmp2
+}
+
+
+define <2 x i64> @fcvtms_2d(<2 x double> %A) nounwind {
+; CHECK-LABEL: fcvtms_2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintm v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x double> @llvm.floor.v2f64(<2 x double> %A)
+  %tmp2 = fptosi <2 x double> %tmp1 to <2 x i64>
+  ret <2 x i64> %tmp2
+}
+
+define <2 x i64> @fcvtms_2d_sat(<2 x double> %A) nounwind {
+; CHECK-LABEL: fcvtms_2d_sat:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintm v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x double> @llvm.floor.v2f64(<2 x double> %A)
+  %tmp2 = call <2 x i64> @llvm.fptosi.sat.v2i64.v2f64(<2 x double> %tmp1)
+  ret <2 x i64> %tmp2
+}
+
+
+;
+; floor + unsigned -> fcvtmu
+;
+
+define <2 x i32> @fcvtmu_2s(<2 x float> %A) nounwind {
+; CHECK-LABEL: fcvtmu_2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintm v0.2s, v0.2s
+; CHECK-NEXT:    fcvtzu v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x float> @llvm.floor.v2f32(<2 x float> %A)
+  %tmp2 = fptoui <2 x float> %tmp1 to <2 x i32>
+  ret <2 x i32> %tmp2
+}
+
+define <2 x i32> @fcvtmu_2s_sat(<2 x float> %A) nounwind {
+; CHECK-LABEL: fcvtmu_2s_sat:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintm v0.2s, v0.2s
+; CHECK-NEXT:    fcvtzu v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x float> @llvm.floor.v2f32(<2 x float> %A)
+  %tmp2 = call <2 x i32> @llvm.fptoui.sat.v2i32.v2f32(<2 x float> %tmp1)
+  ret <2 x i32> %tmp2
+}
+
+
+define <4 x i32> @fcvtmu_4s(<4 x float> %A) nounwind {
+; CHECK-LABEL: fcvtmu_4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintm v0.4s, v0.4s
+; CHECK-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %tmp1 = call <4 x float> @llvm.floor.v4f32(<4 x float> %A)
+  %tmp2 = fptoui <4 x float> %tmp1 to <4 x i32>
+  ret <4 x i32> %tmp2
+}
+
+define <4 x i32> @fcvtmu_4s_sat(<4 x float> %A) nounwind {
+; CHECK-LABEL: fcvtmu_4s_sat:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintm v0.4s, v0.4s
+; CHECK-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %tmp1 = call <4 x float> @llvm.floor.v4f32(<4 x float> %A)
+  %tmp2 = call <4 x i32> @llvm.fptoui.sat.v4i32.v4f32(<4 x float> %tmp1)
+  ret <4 x i32> %tmp2
+}
+
+
+define <2 x i64> @fcvtmu_2d(<2 x double> %A) nounwind {
+; CHECK-LABEL: fcvtmu_2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintm v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzu v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x double> @llvm.floor.v2f64(<2 x double> %A)
+  %tmp2 = fptoui <2 x double> %tmp1 to <2 x i64>
+  ret <2 x i64> %tmp2
+}
+
+define <2 x i64> @fcvtmu_2d_sat(<2 x double> %A) nounwind {
+; CHECK-LABEL: fcvtmu_2d_sat:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintm v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzu v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x double> @llvm.floor.v2f64(<2 x double> %A)
+  %tmp2 = call <2 x i64> @llvm.fptoui.sat.v2i64.v2f64(<2 x double> %tmp1)
+  ret <2 x i64> %tmp2
+}
+
+
+;
+; ceil + signed -> fcvtps
+;
+
+define <2 x i32> @fcvtps_2s(<2 x float> %A) nounwind {
+; CHECK-LABEL: fcvtps_2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintp v0.2s, v0.2s
+; CHECK-NEXT:    fcvtzs v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x float> @llvm.ceil.v2f32(<2 x float> %A)
+  %tmp2 = fptosi <2 x float> %tmp1 to <2 x i32>
+  ret <2 x i32> %tmp2
+}
+
+define <2 x i32> @fcvtps_2s_sat(<2 x float> %A) nounwind {
+; CHECK-LABEL: fcvtps_2s_sat:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintp v0.2s, v0.2s
+; CHECK-NEXT:    fcvtzs v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x float> @llvm.ceil.v2f32(<2 x float> %A)
+  %tmp2 = call <2 x i32> @llvm.fptosi.sat.v2i32.v2f32(<2 x float> %tmp1)
+  ret <2 x i32> %tmp2
+}
+
+
+define <4 x i32> @fcvtps_4s(<4 x float> %A) nounwind {
+; CHECK-LABEL: fcvtps_4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintp v0.4s, v0.4s
+; CHECK-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %tmp1 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %A)
+  %tmp2 = fptosi <4 x float> %tmp1 to <4 x i32>
+  ret <4 x i32> %tmp2
+}
+
+define <4 x i32> @fcvtps_4s_sat(<4 x float> %A) nounwind {
+; CHECK-LABEL: fcvtps_4s_sat:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintp v0.4s, v0.4s
+; CHECK-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %tmp1 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %A)
+  %tmp2 = call <4 x i32> @llvm.fptosi.sat.v4i32.v4f32(<4 x float> %tmp1)
+  ret <4 x i32> %tmp2
+}
+
+
+define <2 x i64> @fcvtps_2d(<2 x double> %A) nounwind {
+; CHECK-LABEL: fcvtps_2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintp v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x double> @llvm.ceil.v2f64(<2 x double> %A)
+  %tmp2 = fptosi <2 x double> %tmp1 to <2 x i64>
+  ret <2 x i64> %tmp2
+}
+
+define <2 x i64> @fcvtps_2d_sat(<2 x double> %A) nounwind {
+; CHECK-LABEL: fcvtps_2d_sat:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintp v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x double> @llvm.ceil.v2f64(<2 x double> %A)
+  %tmp2 = call <2 x i64> @llvm.fptosi.sat.v2i64.v2f64(<2 x double> %tmp1)
+  ret <2 x i64> %tmp2
+}
+
+
+;
+; ceil + unsigned -> fcvtpu
+;
+
+define <2 x i32> @fcvtpu_2s(<2 x float> %A) nounwind {
+; CHECK-LABEL: fcvtpu_2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintp v0.2s, v0.2s
+; CHECK-NEXT:    fcvtzu v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x float> @llvm.ceil.v2f32(<2 x float> %A)
+  %tmp2 = fptoui <2 x float> %tmp1 to <2 x i32>
+  ret <2 x i32> %tmp2
+}
+
+define <2 x i32> @fcvtpu_2s_sat(<2 x float> %A) nounwind {
+; CHECK-LABEL: fcvtpu_2s_sat:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintp v0.2s, v0.2s
+; CHECK-NEXT:    fcvtzu v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x float> @llvm.ceil.v2f32(<2 x float> %A)
+  %tmp2 = call <2 x i32> @llvm.fptoui.sat.v2i32.v2f32(<2 x float> %tmp1)
+  ret <2 x i32> %tmp2
+}
+
+
+define <4 x i32> @fcvtpu_4s(<4 x float> %A) nounwind {
+; CHECK-LABEL: fcvtpu_4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintp v0.4s, v0.4s
+; CHECK-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %tmp1 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %A)
+  %tmp2 = fptoui <4 x float> %tmp1 to <4 x i32>
+  ret <4 x i32> %tmp2
+}
+
+define <4 x i32> @fcvtpu_4s_sat(<4 x float> %A) nounwind {
+; CHECK-LABEL: fcvtpu_4s_sat:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintp v0.4s, v0.4s
+; CHECK-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %tmp1 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %A)
+  %tmp2 = call <4 x i32> @llvm.fptoui.sat.v4i32.v4f32(<4 x float> %tmp1)
+  ret <4 x i32> %tmp2
+}
+
+
+define <2 x i64> @fcvtpu_2d(<2 x double> %A) nounwind {
+; CHECK-LABEL: fcvtpu_2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintp v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzu v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x double> @llvm.ceil.v2f64(<2 x double> %A)
+  %tmp2 = fptoui <2 x double> %tmp1 to <2 x i64>
+  ret <2 x i64> %tmp2
+}
+
+define <2 x i64> @fcvtpu_2d_sat(<2 x double> %A) nounwind {
+; CHECK-LABEL: fcvtpu_2d_sat:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintp v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzu v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x double> @llvm.ceil.v2f64(<2 x double> %A)
+  %tmp2 = call <2 x i64> @llvm.fptoui.sat.v2i64.v2f64(<2 x double> %tmp1)
+  ret <2 x i64> %tmp2
+}
+
+
+;
+; trunc + signed -> fcvtzs (already the default, but test the fusion)
+;
+
+define <2 x i32> @fcvtzs_2s(<2 x float> %A) nounwind {
+; CHECK-LABEL: fcvtzs_2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintz v0.2s, v0.2s
+; CHECK-NEXT:    fcvtzs v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x float> @llvm.trunc.v2f32(<2 x float> %A)
+  %tmp2 = fptosi <2 x float> %tmp1 to <2 x i32>
+  ret <2 x i32> %tmp2
+}
+
+define <2 x i32> @fcvtzs_2s_sat(<2 x float> %A) nounwind {
+; CHECK-LABEL: fcvtzs_2s_sat:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintz v0.2s, v0.2s
+; CHECK-NEXT:    fcvtzs v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x float> @llvm.trunc.v2f32(<2 x float> %A)
+  %tmp2 = call <2 x i32> @llvm.fptosi.sat.v2i32.v2f32(<2 x float> %tmp1)
+  ret <2 x i32> %tmp2
+}
+
+
+define <4 x i32> @fcvtzs_4s(<4 x float> %A) nounwind {
+; CHECK-LABEL: fcvtzs_4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintz v0.4s, v0.4s
+; CHECK-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %tmp1 = call <4 x float> @llvm.trunc.v4f32(<4 x float> %A)
+  %tmp2 = fptosi <4 x float> %tmp1 to <4 x i32>
+  ret <4 x i32> %tmp2
+}
+
+define <4 x i32> @fcvtzs_4s_sat(<4 x float> %A) nounwind {
+; CHECK-LABEL: fcvtzs_4s_sat:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintz v0.4s, v0.4s
+; CHECK-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %tmp1 = call <4 x float> @llvm.trunc.v4f32(<4 x float> %A)
+  %tmp2 = call <4 x i32> @llvm.fptosi.sat.v4i32.v4f32(<4 x float> %tmp1)
+  ret <4 x i32> %tmp2
+}
+
+
+define <2 x i64> @fcvtzs_2d(<2 x double> %A) nounwind {
+; CHECK-LABEL: fcvtzs_2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintz v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x double> @llvm.trunc.v2f64(<2 x double> %A)
+  %tmp2 = fptosi <2 x double> %tmp1 to <2 x i64>
+  ret <2 x i64> %tmp2
+}
+
+define <2 x i64> @fcvtzs_2d_sat(<2 x double> %A) nounwind {
+; CHECK-LABEL: fcvtzs_2d_sat:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintz v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x double> @llvm.trunc.v2f64(<2 x double> %A)
+  %tmp2 = call <2 x i64> @llvm.fptosi.sat.v2i64.v2f64(<2 x double> %tmp1)
+  ret <2 x i64> %tmp2
+}
+
+
+;
+; trunc + unsigned -> fcvtzu
+;
+
+define <2 x i32> @fcvtzu_2s(<2 x float> %A) nounwind {
+; CHECK-LABEL: fcvtzu_2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintz v0.2s, v0.2s
+; CHECK-NEXT:    fcvtzu v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x float> @llvm.trunc.v2f32(<2 x float> %A)
+  %tmp2 = fptoui <2 x float> %tmp1 to <2 x i32>
+  ret <2 x i32> %tmp2
+}
+
+define <2 x i32> @fcvtzu_2s_sat(<2 x float> %A) nounwind {
+; CHECK-LABEL: fcvtzu_2s_sat:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintz v0.2s, v0.2s
+; CHECK-NEXT:    fcvtzu v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x float> @llvm.trunc.v2f32(<2 x float> %A)
+  %tmp2 = call <2 x i32> @llvm.fptoui.sat.v2i32.v2f32(<2 x float> %tmp1)
+  ret <2 x i32> %tmp2
+}
+
+
+define <4 x i32> @fcvtzu_4s(<4 x float> %A) nounwind {
+; CHECK-LABEL: fcvtzu_4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintz v0.4s, v0.4s
+; CHECK-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %tmp1 = call <4 x float> @llvm.trunc.v4f32(<4 x float> %A)
+  %tmp2 = fptoui <4 x float> %tmp1 to <4 x i32>
+  ret <4 x i32> %tmp2
+}
+
+define <4 x i32> @fcvtzu_4s_sat(<4 x float> %A) nounwind {
+; CHECK-LABEL: fcvtzu_4s_sat:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintz v0.4s, v0.4s
+; CHECK-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %tmp1 = call <4 x float> @llvm.trunc.v4f32(<4 x float> %A)
+  %tmp2 = call <4 x i32> @llvm.fptoui.sat.v4i32.v4f32(<4 x float> %tmp1)
+  ret <4 x i32> %tmp2
+}
+
+
+define <2 x i64> @fcvtzu_2d(<2 x double> %A) nounwind {
+; CHECK-LABEL: fcvtzu_2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintz v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzu v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x double> @llvm.trunc.v2f64(<2 x double> %A)
+  %tmp2 = fptoui <2 x double> %tmp1 to <2 x i64>
+  ret <2 x i64> %tmp2
+}
+
+define <2 x i64> @fcvtzu_2d_sat(<2 x double> %A) nounwind {
+; CHECK-LABEL: fcvtzu_2d_sat:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintz v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzu v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x double> @llvm.trunc.v2f64(<2 x double> %A)
+  %tmp2 = call <2 x i64> @llvm.fptoui.sat.v2i64.v2f64(<2 x double> %tmp1)
+  ret <2 x i64> %tmp2
+}
+
+
+;
+; f16 tests (require +fullfp16)
+;
+
+define <4 x i16> @fcvtas_4h(<4 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtas_4h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    frinta v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtas_4h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frinta v0.4h, v0.4h
+; CHECK-FP16-NEXT:    fcvtzs v0.4h, v0.4h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <4 x half> @llvm.round.v4f16(<4 x half> %A)
+  %tmp2 = fptosi <4 x half> %tmp1 to <4 x i16>
+  ret <4 x i16> %tmp2
+}
+
+define <4 x i16> @fcvtas_4h_sat(<4 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtas_4h_sat:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    frinta v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-NEXT:    sqxtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtas_4h_sat:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frinta v0.4h, v0.4h
+; CHECK-FP16-NEXT:    fcvtzs v0.4h, v0.4h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <4 x half> @llvm.round.v4f16(<4 x half> %A)
+  %tmp2 = call <4 x i16> @llvm.fptosi.sat.v4i16.v4f16(<4 x half> %tmp1)
+  ret <4 x i16> %tmp2
+}
+
+
+define <8 x i16> @fcvtas_8h(<8 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtas_8h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-NEXT:    frinta v1.4s, v1.4s
+; CHECK-NO16-NEXT:    frinta v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NO16-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v1.8h
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v1.4h
+; CHECK-NO16-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtzs v1.4s, v1.4s
+; CHECK-NO16-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtas_8h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frinta v0.8h, v0.8h
+; CHECK-FP16-NEXT:    fcvtzs v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <8 x half> @llvm.round.v8f16(<8 x half> %A)
+  %tmp2 = fptosi <8 x half> %tmp1 to <8 x i16>
+  ret <8 x i16> %tmp2
+}
+
+define <8 x i16> @fcvtas_8h_sat(<8 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtas_8h_sat:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-NEXT:    frinta v1.4s, v1.4s
+; CHECK-NO16-NEXT:    frinta v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NO16-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v1.4h
+; CHECK-NO16-NEXT:    fcvtl2 v1.4s, v1.8h
+; CHECK-NO16-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtzs v1.4s, v1.4s
+; CHECK-NO16-NEXT:    sqxtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    sqxtn2 v0.8h, v1.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtas_8h_sat:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frinta v0.8h, v0.8h
+; CHECK-FP16-NEXT:    fcvtzs v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <8 x half> @llvm.round.v8f16(<8 x half> %A)
+  %tmp2 = call <8 x i16> @llvm.fptosi.sat.v8i16.v8f16(<8 x half> %tmp1)
+  ret <8 x i16> %tmp2
+}
+
+
+define <4 x i16> @fcvtau_4h(<4 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtau_4h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    frinta v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NO16-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtau_4h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frinta v0.4h, v0.4h
+; CHECK-FP16-NEXT:    fcvtzu v0.4h, v0.4h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <4 x half> @llvm.round.v4f16(<4 x half> %A)
+  %tmp2 = fptoui <4 x half> %tmp1 to <4 x i16>
+  ret <4 x i16> %tmp2
+}
+
+define <4 x i16> @fcvtau_4h_sat(<4 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtau_4h_sat:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    frinta v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NO16-NEXT:    uqxtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtau_4h_sat:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frinta v0.4h, v0.4h
+; CHECK-FP16-NEXT:    fcvtzu v0.4h, v0.4h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <4 x half> @llvm.round.v4f16(<4 x half> %A)
+  %tmp2 = call <4 x i16> @llvm.fptoui.sat.v4i16.v4f16(<4 x half> %tmp1)
+  ret <4 x i16> %tmp2
+}
+
+
+define <8 x i16> @fcvtau_8h(<8 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtau_8h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-NEXT:    frinta v1.4s, v1.4s
+; CHECK-NO16-NEXT:    frinta v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NO16-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v1.8h
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v1.4h
+; CHECK-NO16-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtzu v1.4s, v1.4s
+; CHECK-NO16-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtau_8h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frinta v0.8h, v0.8h
+; CHECK-FP16-NEXT:    fcvtzu v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <8 x half> @llvm.round.v8f16(<8 x half> %A)
+  %tmp2 = fptoui <8 x half> %tmp1 to <8 x i16>
+  ret <8 x i16> %tmp2
+}
+
+define <8 x i16> @fcvtau_8h_sat(<8 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtau_8h_sat:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-NEXT:    frinta v1.4s, v1.4s
+; CHECK-NO16-NEXT:    frinta v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NO16-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v1.4h
+; CHECK-NO16-NEXT:    fcvtl2 v1.4s, v1.8h
+; CHECK-NO16-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtzu v1.4s, v1.4s
+; CHECK-NO16-NEXT:    uqxtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    uqxtn2 v0.8h, v1.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtau_8h_sat:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frinta v0.8h, v0.8h
+; CHECK-FP16-NEXT:    fcvtzu v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <8 x half> @llvm.round.v8f16(<8 x half> %A)
+  %tmp2 = call <8 x i16> @llvm.fptoui.sat.v8i16.v8f16(<8 x half> %tmp1)
+  ret <8 x i16> %tmp2
+}
+
+
+define <4 x i16> @fcvtns_4h(<4 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtns_4h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    frintn v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtns_4h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintn v0.4h, v0.4h
+; CHECK-FP16-NEXT:    fcvtzs v0.4h, v0.4h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <4 x half> @llvm.roundeven.v4f16(<4 x half> %A)
+  %tmp2 = fptosi <4 x half> %tmp1 to <4 x i16>
+  ret <4 x i16> %tmp2
+}
+
+define <4 x i16> @fcvtns_4h_sat(<4 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtns_4h_sat:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    frintn v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-NEXT:    sqxtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtns_4h_sat:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintn v0.4h, v0.4h
+; CHECK-FP16-NEXT:    fcvtzs v0.4h, v0.4h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <4 x half> @llvm.roundeven.v4f16(<4 x half> %A)
+  %tmp2 = call <4 x i16> @llvm.fptosi.sat.v4i16.v4f16(<4 x half> %tmp1)
+  ret <4 x i16> %tmp2
+}
+
+
+define <8 x i16> @fcvtns_8h(<8 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtns_8h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-NEXT:    frintn v1.4s, v1.4s
+; CHECK-NO16-NEXT:    frintn v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NO16-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v1.8h
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v1.4h
+; CHECK-NO16-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtzs v1.4s, v1.4s
+; CHECK-NO16-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtns_8h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintn v0.8h, v0.8h
+; CHECK-FP16-NEXT:    fcvtzs v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <8 x half> @llvm.roundeven.v8f16(<8 x half> %A)
+  %tmp2 = fptosi <8 x half> %tmp1 to <8 x i16>
+  ret <8 x i16> %tmp2
+}
+
+define <8 x i16> @fcvtns_8h_sat(<8 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtns_8h_sat:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-NEXT:    frintn v1.4s, v1.4s
+; CHECK-NO16-NEXT:    frintn v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NO16-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v1.4h
+; CHECK-NO16-NEXT:    fcvtl2 v1.4s, v1.8h
+; CHECK-NO16-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtzs v1.4s, v1.4s
+; CHECK-NO16-NEXT:    sqxtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    sqxtn2 v0.8h, v1.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtns_8h_sat:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintn v0.8h, v0.8h
+; CHECK-FP16-NEXT:    fcvtzs v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <8 x half> @llvm.roundeven.v8f16(<8 x half> %A)
+  %tmp2 = call <8 x i16> @llvm.fptosi.sat.v8i16.v8f16(<8 x half> %tmp1)
+  ret <8 x i16> %tmp2
+}
+
+
+define <4 x i16> @fcvtnu_4h(<4 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtnu_4h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    frintn v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NO16-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtnu_4h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintn v0.4h, v0.4h
+; CHECK-FP16-NEXT:    fcvtzu v0.4h, v0.4h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <4 x half> @llvm.roundeven.v4f16(<4 x half> %A)
+  %tmp2 = fptoui <4 x half> %tmp1 to <4 x i16>
+  ret <4 x i16> %tmp2
+}
+
+define <4 x i16> @fcvtnu_4h_sat(<4 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtnu_4h_sat:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    frintn v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NO16-NEXT:    uqxtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtnu_4h_sat:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintn v0.4h, v0.4h
+; CHECK-FP16-NEXT:    fcvtzu v0.4h, v0.4h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <4 x half> @llvm.roundeven.v4f16(<4 x half> %A)
+  %tmp2 = call <4 x i16> @llvm.fptoui.sat.v4i16.v4f16(<4 x half> %tmp1)
+  ret <4 x i16> %tmp2
+}
+
+
+define <8 x i16> @fcvtnu_8h(<8 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtnu_8h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-NEXT:    frintn v1.4s, v1.4s
+; CHECK-NO16-NEXT:    frintn v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NO16-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v1.8h
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v1.4h
+; CHECK-NO16-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtzu v1.4s, v1.4s
+; CHECK-NO16-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtnu_8h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintn v0.8h, v0.8h
+; CHECK-FP16-NEXT:    fcvtzu v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <8 x half> @llvm.roundeven.v8f16(<8 x half> %A)
+  %tmp2 = fptoui <8 x half> %tmp1 to <8 x i16>
+  ret <8 x i16> %tmp2
+}
+
+define <8 x i16> @fcvtnu_8h_sat(<8 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtnu_8h_sat:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-NEXT:    frintn v1.4s, v1.4s
+; CHECK-NO16-NEXT:    frintn v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NO16-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v1.4h
+; CHECK-NO16-NEXT:    fcvtl2 v1.4s, v1.8h
+; CHECK-NO16-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtzu v1.4s, v1.4s
+; CHECK-NO16-NEXT:    uqxtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    uqxtn2 v0.8h, v1.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtnu_8h_sat:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintn v0.8h, v0.8h
+; CHECK-FP16-NEXT:    fcvtzu v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <8 x half> @llvm.roundeven.v8f16(<8 x half> %A)
+  %tmp2 = call <8 x i16> @llvm.fptoui.sat.v8i16.v8f16(<8 x half> %tmp1)
+  ret <8 x i16> %tmp2
+}
+
+
+define <4 x i16> @fcvtms_4h(<4 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtms_4h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    frintm v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtms_4h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintm v0.4h, v0.4h
+; CHECK-FP16-NEXT:    fcvtzs v0.4h, v0.4h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <4 x half> @llvm.floor.v4f16(<4 x half> %A)
+  %tmp2 = fptosi <4 x half> %tmp1 to <4 x i16>
+  ret <4 x i16> %tmp2
+}
+
+define <4 x i16> @fcvtms_4h_sat(<4 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtms_4h_sat:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    frintm v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-NEXT:    sqxtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtms_4h_sat:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintm v0.4h, v0.4h
+; CHECK-FP16-NEXT:    fcvtzs v0.4h, v0.4h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <4 x half> @llvm.floor.v4f16(<4 x half> %A)
+  %tmp2 = call <4 x i16> @llvm.fptosi.sat.v4i16.v4f16(<4 x half> %tmp1)
+  ret <4 x i16> %tmp2
+}
+
+
+define <8 x i16> @fcvtms_8h(<8 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtms_8h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-NEXT:    frintm v1.4s, v1.4s
+; CHECK-NO16-NEXT:    frintm v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NO16-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v1.8h
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v1.4h
+; CHECK-NO16-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtzs v1.4s, v1.4s
+; CHECK-NO16-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtms_8h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintm v0.8h, v0.8h
+; CHECK-FP16-NEXT:    fcvtzs v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <8 x half> @llvm.floor.v8f16(<8 x half> %A)
+  %tmp2 = fptosi <8 x half> %tmp1 to <8 x i16>
+  ret <8 x i16> %tmp2
+}
+
+define <8 x i16> @fcvtms_8h_sat(<8 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtms_8h_sat:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-NEXT:    frintm v1.4s, v1.4s
+; CHECK-NO16-NEXT:    frintm v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NO16-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v1.4h
+; CHECK-NO16-NEXT:    fcvtl2 v1.4s, v1.8h
+; CHECK-NO16-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtzs v1.4s, v1.4s
+; CHECK-NO16-NEXT:    sqxtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    sqxtn2 v0.8h, v1.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtms_8h_sat:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintm v0.8h, v0.8h
+; CHECK-FP16-NEXT:    fcvtzs v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <8 x half> @llvm.floor.v8f16(<8 x half> %A)
+  %tmp2 = call <8 x i16> @llvm.fptosi.sat.v8i16.v8f16(<8 x half> %tmp1)
+  ret <8 x i16> %tmp2
+}
+
+
+define <4 x i16> @fcvtmu_4h(<4 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtmu_4h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    frintm v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NO16-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtmu_4h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintm v0.4h, v0.4h
+; CHECK-FP16-NEXT:    fcvtzu v0.4h, v0.4h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <4 x half> @llvm.floor.v4f16(<4 x half> %A)
+  %tmp2 = fptoui <4 x half> %tmp1 to <4 x i16>
+  ret <4 x i16> %tmp2
+}
+
+define <4 x i16> @fcvtmu_4h_sat(<4 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtmu_4h_sat:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    frintm v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NO16-NEXT:    uqxtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtmu_4h_sat:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintm v0.4h, v0.4h
+; CHECK-FP16-NEXT:    fcvtzu v0.4h, v0.4h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <4 x half> @llvm.floor.v4f16(<4 x half> %A)
+  %tmp2 = call <4 x i16> @llvm.fptoui.sat.v4i16.v4f16(<4 x half> %tmp1)
+  ret <4 x i16> %tmp2
+}
+
+
+define <8 x i16> @fcvtmu_8h(<8 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtmu_8h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-NEXT:    frintm v1.4s, v1.4s
+; CHECK-NO16-NEXT:    frintm v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NO16-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v1.8h
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v1.4h
+; CHECK-NO16-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtzu v1.4s, v1.4s
+; CHECK-NO16-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtmu_8h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintm v0.8h, v0.8h
+; CHECK-FP16-NEXT:    fcvtzu v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <8 x half> @llvm.floor.v8f16(<8 x half> %A)
+  %tmp2 = fptoui <8 x half> %tmp1 to <8 x i16>
+  ret <8 x i16> %tmp2
+}
+
+define <8 x i16> @fcvtmu_8h_sat(<8 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtmu_8h_sat:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-NEXT:    frintm v1.4s, v1.4s
+; CHECK-NO16-NEXT:    frintm v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NO16-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v1.4h
+; CHECK-NO16-NEXT:    fcvtl2 v1.4s, v1.8h
+; CHECK-NO16-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtzu v1.4s, v1.4s
+; CHECK-NO16-NEXT:    uqxtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    uqxtn2 v0.8h, v1.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtmu_8h_sat:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintm v0.8h, v0.8h
+; CHECK-FP16-NEXT:    fcvtzu v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <8 x half> @llvm.floor.v8f16(<8 x half> %A)
+  %tmp2 = call <8 x i16> @llvm.fptoui.sat.v8i16.v8f16(<8 x half> %tmp1)
+  ret <8 x i16> %tmp2
+}
+
+
+define <4 x i16> @fcvtps_4h(<4 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtps_4h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    frintp v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtps_4h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintp v0.4h, v0.4h
+; CHECK-FP16-NEXT:    fcvtzs v0.4h, v0.4h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <4 x half> @llvm.ceil.v4f16(<4 x half> %A)
+  %tmp2 = fptosi <4 x half> %tmp1 to <4 x i16>
+  ret <4 x i16> %tmp2
+}
+
+define <4 x i16> @fcvtps_4h_sat(<4 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtps_4h_sat:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    frintp v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-NEXT:    sqxtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtps_4h_sat:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintp v0.4h, v0.4h
+; CHECK-FP16-NEXT:    fcvtzs v0.4h, v0.4h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <4 x half> @llvm.ceil.v4f16(<4 x half> %A)
+  %tmp2 = call <4 x i16> @llvm.fptosi.sat.v4i16.v4f16(<4 x half> %tmp1)
+  ret <4 x i16> %tmp2
+}
+
+
+define <8 x i16> @fcvtps_8h(<8 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtps_8h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-NEXT:    frintp v1.4s, v1.4s
+; CHECK-NO16-NEXT:    frintp v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NO16-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v1.8h
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v1.4h
+; CHECK-NO16-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtzs v1.4s, v1.4s
+; CHECK-NO16-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtps_8h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintp v0.8h, v0.8h
+; CHECK-FP16-NEXT:    fcvtzs v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <8 x half> @llvm.ceil.v8f16(<8 x half> %A)
+  %tmp2 = fptosi <8 x half> %tmp1 to <8 x i16>
+  ret <8 x i16> %tmp2
+}
+
+define <8 x i16> @fcvtps_8h_sat(<8 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtps_8h_sat:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-NEXT:    frintp v1.4s, v1.4s
+; CHECK-NO16-NEXT:    frintp v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NO16-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v1.4h
+; CHECK-NO16-NEXT:    fcvtl2 v1.4s, v1.8h
+; CHECK-NO16-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtzs v1.4s, v1.4s
+; CHECK-NO16-NEXT:    sqxtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    sqxtn2 v0.8h, v1.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtps_8h_sat:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintp v0.8h, v0.8h
+; CHECK-FP16-NEXT:    fcvtzs v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <8 x half> @llvm.ceil.v8f16(<8 x half> %A)
+  %tmp2 = call <8 x i16> @llvm.fptosi.sat.v8i16.v8f16(<8 x half> %tmp1)
+  ret <8 x i16> %tmp2
+}
+
+
+define <4 x i16> @fcvtpu_4h(<4 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtpu_4h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    frintp v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NO16-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtpu_4h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintp v0.4h, v0.4h
+; CHECK-FP16-NEXT:    fcvtzu v0.4h, v0.4h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <4 x half> @llvm.ceil.v4f16(<4 x half> %A)
+  %tmp2 = fptoui <4 x half> %tmp1 to <4 x i16>
+  ret <4 x i16> %tmp2
+}
+
+define <4 x i16> @fcvtpu_4h_sat(<4 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtpu_4h_sat:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    frintp v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NO16-NEXT:    uqxtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtpu_4h_sat:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintp v0.4h, v0.4h
+; CHECK-FP16-NEXT:    fcvtzu v0.4h, v0.4h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <4 x half> @llvm.ceil.v4f16(<4 x half> %A)
+  %tmp2 = call <4 x i16> @llvm.fptoui.sat.v4i16.v4f16(<4 x half> %tmp1)
+  ret <4 x i16> %tmp2
+}
+
+
+define <8 x i16> @fcvtpu_8h(<8 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtpu_8h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-NEXT:    frintp v1.4s, v1.4s
+; CHECK-NO16-NEXT:    frintp v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NO16-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v1.8h
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v1.4h
+; CHECK-NO16-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtzu v1.4s, v1.4s
+; CHECK-NO16-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtpu_8h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintp v0.8h, v0.8h
+; CHECK-FP16-NEXT:    fcvtzu v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <8 x half> @llvm.ceil.v8f16(<8 x half> %A)
+  %tmp2 = fptoui <8 x half> %tmp1 to <8 x i16>
+  ret <8 x i16> %tmp2
+}
+
+define <8 x i16> @fcvtpu_8h_sat(<8 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtpu_8h_sat:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-NEXT:    frintp v1.4s, v1.4s
+; CHECK-NO16-NEXT:    frintp v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NO16-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v1.4h
+; CHECK-NO16-NEXT:    fcvtl2 v1.4s, v1.8h
+; CHECK-NO16-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtzu v1.4s, v1.4s
+; CHECK-NO16-NEXT:    uqxtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    uqxtn2 v0.8h, v1.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtpu_8h_sat:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintp v0.8h, v0.8h
+; CHECK-FP16-NEXT:    fcvtzu v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <8 x half> @llvm.ceil.v8f16(<8 x half> %A)
+  %tmp2 = call <8 x i16> @llvm.fptoui.sat.v8i16.v8f16(<8 x half> %tmp1)
+  ret <8 x i16> %tmp2
+}
+
+
+define <4 x i16> @fcvtzs_4h(<4 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtzs_4h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    frintz v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtzs_4h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintz v0.4h, v0.4h
+; CHECK-FP16-NEXT:    fcvtzs v0.4h, v0.4h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <4 x half> @llvm.trunc.v4f16(<4 x half> %A)
+  %tmp2 = fptosi <4 x half> %tmp1 to <4 x i16>
+  ret <4 x i16> %tmp2
+}
+
+define <4 x i16> @fcvtzs_4h_sat(<4 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtzs_4h_sat:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    frintz v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-NEXT:    sqxtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtzs_4h_sat:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintz v0.4h, v0.4h
+; CHECK-FP16-NEXT:    fcvtzs v0.4h, v0.4h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <4 x half> @llvm.trunc.v4f16(<4 x half> %A)
+  %tmp2 = call <4 x i16> @llvm.fptosi.sat.v4i16.v4f16(<4 x half> %tmp1)
+  ret <4 x i16> %tmp2
+}
+
+
+define <8 x i16> @fcvtzs_8h(<8 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtzs_8h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-NEXT:    frintz v1.4s, v1.4s
+; CHECK-NO16-NEXT:    frintz v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NO16-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v1.8h
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v1.4h
+; CHECK-NO16-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtzs v1.4s, v1.4s
+; CHECK-NO16-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtzs_8h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintz v0.8h, v0.8h
+; CHECK-FP16-NEXT:    fcvtzs v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <8 x half> @llvm.trunc.v8f16(<8 x half> %A)
+  %tmp2 = fptosi <8 x half> %tmp1 to <8 x i16>
+  ret <8 x i16> %tmp2
+}
+
+define <8 x i16> @fcvtzs_8h_sat(<8 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtzs_8h_sat:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-NEXT:    frintz v1.4s, v1.4s
+; CHECK-NO16-NEXT:    frintz v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NO16-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v1.4h
+; CHECK-NO16-NEXT:    fcvtl2 v1.4s, v1.8h
+; CHECK-NO16-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtzs v1.4s, v1.4s
+; CHECK-NO16-NEXT:    sqxtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    sqxtn2 v0.8h, v1.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtzs_8h_sat:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintz v0.8h, v0.8h
+; CHECK-FP16-NEXT:    fcvtzs v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <8 x half> @llvm.trunc.v8f16(<8 x half> %A)
+  %tmp2 = call <8 x i16> @llvm.fptosi.sat.v8i16.v8f16(<8 x half> %tmp1)
+  ret <8 x i16> %tmp2
+}
+
+
+define <4 x i16> @fcvtzu_4h(<4 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtzu_4h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    frintz v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NO16-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtzu_4h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintz v0.4h, v0.4h
+; CHECK-FP16-NEXT:    fcvtzu v0.4h, v0.4h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <4 x half> @llvm.trunc.v4f16(<4 x half> %A)
+  %tmp2 = fptoui <4 x half> %tmp1 to <4 x i16>
+  ret <4 x i16> %tmp2
+}
+
+define <4 x i16> @fcvtzu_4h_sat(<4 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtzu_4h_sat:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    frintz v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NO16-NEXT:    uqxtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtzu_4h_sat:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintz v0.4h, v0.4h
+; CHECK-FP16-NEXT:    fcvtzu v0.4h, v0.4h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <4 x half> @llvm.trunc.v4f16(<4 x half> %A)
+  %tmp2 = call <4 x i16> @llvm.fptoui.sat.v4i16.v4f16(<4 x half> %tmp1)
+  ret <4 x i16> %tmp2
+}
+
+
+define <8 x i16> @fcvtzu_8h(<8 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtzu_8h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-NEXT:    frintz v1.4s, v1.4s
+; CHECK-NO16-NEXT:    frintz v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NO16-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v1.8h
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v1.4h
+; CHECK-NO16-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtzu v1.4s, v1.4s
+; CHECK-NO16-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtzu_8h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintz v0.8h, v0.8h
+; CHECK-FP16-NEXT:    fcvtzu v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <8 x half> @llvm.trunc.v8f16(<8 x half> %A)
+  %tmp2 = fptoui <8 x half> %tmp1 to <8 x i16>
+  ret <8 x i16> %tmp2
+}
+
+define <8 x i16> @fcvtzu_8h_sat(<8 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtzu_8h_sat:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-NEXT:    frintz v1.4s, v1.4s
+; CHECK-NO16-NEXT:    frintz v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NO16-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v1.4h
+; CHECK-NO16-NEXT:    fcvtl2 v1.4s, v1.8h
+; CHECK-NO16-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtzu v1.4s, v1.4s
+; CHECK-NO16-NEXT:    uqxtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    uqxtn2 v0.8h, v1.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtzu_8h_sat:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintz v0.8h, v0.8h
+; CHECK-FP16-NEXT:    fcvtzu v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <8 x half> @llvm.trunc.v8f16(<8 x half> %A)
+  %tmp2 = call <8 x i16> @llvm.fptoui.sat.v8i16.v8f16(<8 x half> %tmp1)
+  ret <8 x i16> %tmp2
+}



More information about the llvm-commits mailing list