[llvm] [AArch64] Optimize more floating-point round+convert combinations into fcvt instructions (PR #170018)

via llvm-commits llvm-commits at lists.llvm.org
Sat Nov 29 17:39:01 PST 2025


https://github.com/valadaptive created https://github.com/llvm/llvm-project/pull/170018

Resolves https://github.com/llvm/llvm-project/issues/170010.

This PR adds more instruction selection patterns for lowering floating-point rounding operations (ceil, floor, trunc, round, etc.) to `FCVT[AMNPZ][SU]` instructions. There are two optimizations added here, which are somewhat related:

- A `roundeven` operation followed by a float-to-int conversion can be lowered to a `FCVTNS`/`FCVTNU`.

- These optimizations, which were previously only performed on *single* floats, are now done on vectors as well.

An open question is whether it's legal to optimize a `rint` or `nearbyint` + conversion into a `FCVTNS`/`FCVTNU`; https://github.com/llvm/llvm-project/issues/77561 will need to be resolved first, so I've not implemented those optimizations now.

>From dc4777e7be48c13d7fe0ff3d26d0c50a60551ed5 Mon Sep 17 00:00:00 2001
From: valadaptive <valadaptive at protonmail.com>
Date: Sat, 29 Nov 2025 18:54:23 -0500
Subject: [PATCH 1/4] [AArch64] Add tests for roundeven+conversion fusion

---
 .../CodeGen/AArch64/arm64-cvt-simd-fptoi.ll   | 372 ++++++++++++++++++
 1 file changed, 372 insertions(+)

diff --git a/llvm/test/CodeGen/AArch64/arm64-cvt-simd-fptoi.ll b/llvm/test/CodeGen/AArch64/arm64-cvt-simd-fptoi.ll
index a729772f2897a..55bc436824504 100644
--- a/llvm/test/CodeGen/AArch64/arm64-cvt-simd-fptoi.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-cvt-simd-fptoi.ll
@@ -543,6 +543,154 @@ define double @fcvtau_dd_round_simd(double %a) {
   ret double %bc
 }
 
+define double @fcvtns_ds_roundeven_simd(float %a) {
+; CHECK-NOFPRCVT-LABEL: fcvtns_ds_roundeven_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintn s0, s0
+; CHECK-NOFPRCVT-NEXT:    fcvtzs x8, s0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+;
+; CHECK-LABEL: fcvtns_ds_roundeven_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn s0, s0
+; CHECK-NEXT:    fcvtzs d0, s0
+; CHECK-NEXT:    ret
+  %r = call float @llvm.roundeven.f32(float %a)
+  %i = fptosi float %r to i64
+  %bc = bitcast i64 %i to double
+  ret double %bc
+}
+
+define float @fcvtns_sd_roundeven_simd(double %a) {
+; CHECK-NOFPRCVT-LABEL: fcvtns_sd_roundeven_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintn d0, d0
+; CHECK-NOFPRCVT-NEXT:    fcvtzs w8, d0
+; CHECK-NOFPRCVT-NEXT:    fmov s0, w8
+; CHECK-NOFPRCVT-NEXT:    ret
+;
+; CHECK-LABEL: fcvtns_sd_roundeven_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn d0, d0
+; CHECK-NEXT:    fcvtzs s0, d0
+; CHECK-NEXT:    ret
+  %r = call double @llvm.roundeven.f64(double %a)
+  %i = fptosi double %r to i32
+  %bc = bitcast i32 %i to float
+  ret float %bc
+}
+
+define float @fcvtns_ss_roundeven_simd(float %a) {
+; CHECK-NOFPRCVT-LABEL: fcvtns_ss_roundeven_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintn s0, s0
+; CHECK-NOFPRCVT-NEXT:    fcvtzs s0, s0
+; CHECK-NOFPRCVT-NEXT:    ret
+;
+; CHECK-LABEL: fcvtns_ss_roundeven_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn s0, s0
+; CHECK-NEXT:    fcvtzs s0, s0
+; CHECK-NEXT:    ret
+  %r = call float @llvm.roundeven.f32(float %a)
+  %i = fptosi float %r to i32
+  %bc = bitcast i32 %i to float
+  ret float %bc
+}
+
+define double @fcvtns_dd_roundeven_simd(double %a) {
+; CHECK-NOFPRCVT-LABEL: fcvtns_dd_roundeven_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintn d0, d0
+; CHECK-NOFPRCVT-NEXT:    fcvtzs d0, d0
+; CHECK-NOFPRCVT-NEXT:    ret
+;
+; CHECK-LABEL: fcvtns_dd_roundeven_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn d0, d0
+; CHECK-NEXT:    fcvtzs d0, d0
+; CHECK-NEXT:    ret
+  %r = call double @llvm.roundeven.f64(double %a)
+  %i = fptosi double %r to i64
+  %bc = bitcast i64 %i to double
+  ret double %bc
+}
+
+
+define double @fcvtnu_ds_roundeven_simd(float %a) {
+; CHECK-NOFPRCVT-LABEL: fcvtnu_ds_roundeven_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintn s0, s0
+; CHECK-NOFPRCVT-NEXT:    fcvtzu x8, s0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+;
+; CHECK-LABEL: fcvtnu_ds_roundeven_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn s0, s0
+; CHECK-NEXT:    fcvtzu d0, s0
+; CHECK-NEXT:    ret
+  %r = call float @llvm.roundeven.f32(float %a)
+  %i = fptoui float %r to i64
+  %bc = bitcast i64 %i to double
+  ret double %bc
+}
+
+define float @fcvtnu_sd_roundeven_simd(double %a) {
+; CHECK-NOFPRCVT-LABEL: fcvtnu_sd_roundeven_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintn d0, d0
+; CHECK-NOFPRCVT-NEXT:    fcvtzu w8, d0
+; CHECK-NOFPRCVT-NEXT:    fmov s0, w8
+; CHECK-NOFPRCVT-NEXT:    ret
+;
+; CHECK-LABEL: fcvtnu_sd_roundeven_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn d0, d0
+; CHECK-NEXT:    fcvtzu s0, d0
+; CHECK-NEXT:    ret
+  %r = call double @llvm.roundeven.f64(double %a)
+  %i = fptoui double %r to i32
+  %bc = bitcast i32 %i to float
+  ret float %bc
+}
+
+define float @fcvtnu_ss_roundeven_simd(float %a) {
+; CHECK-NOFPRCVT-LABEL: fcvtnu_ss_roundeven_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintn s0, s0
+; CHECK-NOFPRCVT-NEXT:    fcvtzu s0, s0
+; CHECK-NOFPRCVT-NEXT:    ret
+;
+; CHECK-LABEL: fcvtnu_ss_roundeven_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn s0, s0
+; CHECK-NEXT:    fcvtzu s0, s0
+; CHECK-NEXT:    ret
+  %r = call float @llvm.roundeven.f32(float %a)
+  %i = fptoui float %r to i32
+  %bc = bitcast i32 %i to float
+  ret float %bc
+}
+
+define double @fcvtnu_dd_roundeven_simd(double %a) {
+; CHECK-NOFPRCVT-LABEL: fcvtnu_dd_roundeven_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintn d0, d0
+; CHECK-NOFPRCVT-NEXT:    fcvtzu d0, d0
+; CHECK-NOFPRCVT-NEXT:    ret
+;
+; CHECK-LABEL: fcvtnu_dd_roundeven_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn d0, d0
+; CHECK-NEXT:    fcvtzu d0, d0
+; CHECK-NEXT:    ret
+  %r = call double @llvm.roundeven.f64(double %a)
+  %i = fptoui double %r to i64
+  %bc = bitcast i64 %i to double
+  ret double %bc
+}
 
 define double @fcvtms_ds_round_simd(float %a) {
 ; CHECK-NOFPRCVT-LABEL: fcvtms_ds_round_simd:
@@ -1342,6 +1490,230 @@ define double @fcvtau_dd_simd(double %a) {
   ret double %bc
 }
 
+define float @fcvtns_sh_simd(half %a) {
+; CHECK-NOFPRCVT-LABEL: fcvtns_sh_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintn h0, h0
+; CHECK-NOFPRCVT-NEXT:    fcvtzs w8, h0
+; CHECK-NOFPRCVT-NEXT:    fmov s0, w8
+; CHECK-NOFPRCVT-NEXT:    ret
+;
+; CHECK-LABEL: fcvtns_sh_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn h0, h0
+; CHECK-NEXT:    fcvtzs s0, h0
+; CHECK-NEXT:    ret
+  %r = call half @llvm.roundeven.f16(half %a) nounwind readnone
+  %i = call i32 @llvm.fptosi.sat.i32.f16(half %r)
+  %bc = bitcast i32 %i to float
+  ret float %bc
+}
+
+define double @fcvtns_dh_simd(half %a) {
+; CHECK-NOFPRCVT-LABEL: fcvtns_dh_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintn h0, h0
+; CHECK-NOFPRCVT-NEXT:    fcvtzs x8, h0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+;
+; CHECK-LABEL: fcvtns_dh_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn h0, h0
+; CHECK-NEXT:    fcvtzs d0, h0
+; CHECK-NEXT:    ret
+  %r = call half @llvm.roundeven.f16(half %a) nounwind readnone
+  %i = call i64 @llvm.fptosi.sat.i64.f16(half %r)
+  %bc = bitcast i64 %i to double
+  ret double %bc
+}
+
+define double @fcvtns_ds_simd(float %a) {
+; CHECK-NOFPRCVT-LABEL: fcvtns_ds_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintn s0, s0
+; CHECK-NOFPRCVT-NEXT:    fcvtzs x8, s0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+;
+; CHECK-LABEL: fcvtns_ds_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn s0, s0
+; CHECK-NEXT:    fcvtzs d0, s0
+; CHECK-NEXT:    ret
+  %r = call float @llvm.roundeven.f32(float %a)
+  %i = call i64 @llvm.fptosi.sat.i64.f32(float %r)
+  %bc = bitcast i64 %i to double
+  ret double %bc
+}
+
+define float @fcvtns_sd_simd(double %a) {
+; CHECK-NOFPRCVT-LABEL: fcvtns_sd_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintn d0, d0
+; CHECK-NOFPRCVT-NEXT:    fcvtzs w8, d0
+; CHECK-NOFPRCVT-NEXT:    fmov s0, w8
+; CHECK-NOFPRCVT-NEXT:    ret
+;
+; CHECK-LABEL: fcvtns_sd_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn d0, d0
+; CHECK-NEXT:    fcvtzs s0, d0
+; CHECK-NEXT:    ret
+  %r = call double @llvm.roundeven.f64(double %a)
+  %i = call i32 @llvm.fptosi.sat.i32.f64(double %r)
+  %bc = bitcast i32 %i to float
+  ret float %bc
+}
+
+define float @fcvtns_ss_simd(float %a) {
+; CHECK-NOFPRCVT-LABEL: fcvtns_ss_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintn s0, s0
+; CHECK-NOFPRCVT-NEXT:    fcvtzs s0, s0
+; CHECK-NOFPRCVT-NEXT:    ret
+;
+; CHECK-LABEL: fcvtns_ss_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn s0, s0
+; CHECK-NEXT:    fcvtzs s0, s0
+; CHECK-NEXT:    ret
+  %r = call float @llvm.roundeven.f32(float %a)
+  %i = call i32 @llvm.fptosi.sat.i32.f32(float %r)
+  %bc = bitcast i32 %i to float
+  ret float %bc
+}
+
+define double @fcvtns_dd_simd(double %a) {
+; CHECK-NOFPRCVT-LABEL: fcvtns_dd_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintn d0, d0
+; CHECK-NOFPRCVT-NEXT:    fcvtzs d0, d0
+; CHECK-NOFPRCVT-NEXT:    ret
+;
+; CHECK-LABEL: fcvtns_dd_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn d0, d0
+; CHECK-NEXT:    fcvtzs d0, d0
+; CHECK-NEXT:    ret
+  %r = call double @llvm.roundeven.f64(double %a)
+  %i = call i64 @llvm.fptosi.sat.i64.f64(double %r)
+  %bc = bitcast i64 %i to double
+  ret double %bc
+}
+
+define float @fcvtnu_sh_simd(half %a) {
+; CHECK-NOFPRCVT-LABEL: fcvtnu_sh_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintn h0, h0
+; CHECK-NOFPRCVT-NEXT:    fcvtzu w8, h0
+; CHECK-NOFPRCVT-NEXT:    fmov s0, w8
+; CHECK-NOFPRCVT-NEXT:    ret
+;
+; CHECK-LABEL: fcvtnu_sh_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn h0, h0
+; CHECK-NEXT:    fcvtzu s0, h0
+; CHECK-NEXT:    ret
+  %r = call half @llvm.roundeven.f16(half %a) nounwind readnone
+  %i = call i32 @llvm.fptoui.sat.i32.f16(half %r)
+  %bc = bitcast i32 %i to float
+  ret float %bc
+}
+
+define double @fcvtnu_dh_simd(half %a) {
+; CHECK-NOFPRCVT-LABEL: fcvtnu_dh_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintn h0, h0
+; CHECK-NOFPRCVT-NEXT:    fcvtzu x8, h0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+;
+; CHECK-LABEL: fcvtnu_dh_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn h0, h0
+; CHECK-NEXT:    fcvtzu d0, h0
+; CHECK-NEXT:    ret
+  %r = call half @llvm.roundeven.f16(half %a) nounwind readnone
+  %i = call i64 @llvm.fptoui.sat.i64.f16(half %r)
+  %bc = bitcast i64 %i to double
+  ret double %bc
+}
+
+define double @fcvtnu_ds_simd(float %a) {
+; CHECK-NOFPRCVT-LABEL: fcvtnu_ds_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintn s0, s0
+; CHECK-NOFPRCVT-NEXT:    fcvtzu x8, s0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+;
+; CHECK-LABEL: fcvtnu_ds_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn s0, s0
+; CHECK-NEXT:    fcvtzu d0, s0
+; CHECK-NEXT:    ret
+  %r = call float @llvm.roundeven.f32(float %a)
+  %i = call i64 @llvm.fptoui.sat.i64.f32(float %r)
+  %bc = bitcast i64 %i to double
+  ret double %bc
+}
+
+define float @fcvtnu_sd_simd(double %a) {
+; CHECK-NOFPRCVT-LABEL: fcvtnu_sd_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintn d0, d0
+; CHECK-NOFPRCVT-NEXT:    fcvtzu w8, d0
+; CHECK-NOFPRCVT-NEXT:    fmov s0, w8
+; CHECK-NOFPRCVT-NEXT:    ret
+;
+; CHECK-LABEL: fcvtnu_sd_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn d0, d0
+; CHECK-NEXT:    fcvtzu s0, d0
+; CHECK-NEXT:    ret
+  %r = call double @llvm.roundeven.f64(double %a)
+  %i = call i32 @llvm.fptoui.sat.i32.f64(double %r)
+  %bc = bitcast i32 %i to float
+  ret float %bc
+}
+
+define float @fcvtnu_ss_simd(float %a) {
+; CHECK-NOFPRCVT-LABEL: fcvtnu_ss_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintn s0, s0
+; CHECK-NOFPRCVT-NEXT:    fcvtzu s0, s0
+; CHECK-NOFPRCVT-NEXT:    ret
+;
+; CHECK-LABEL: fcvtnu_ss_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn s0, s0
+; CHECK-NEXT:    fcvtzu s0, s0
+; CHECK-NEXT:    ret
+  %r = call float @llvm.roundeven.f32(float %a)
+  %i = call i32 @llvm.fptoui.sat.i32.f32(float %r)
+  %bc = bitcast i32 %i to float
+  ret float %bc
+}
+
+define double @fcvtnu_dd_simd(double %a) {
+; CHECK-NOFPRCVT-LABEL: fcvtnu_dd_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintn d0, d0
+; CHECK-NOFPRCVT-NEXT:    fcvtzu d0, d0
+; CHECK-NOFPRCVT-NEXT:    ret
+;
+; CHECK-LABEL: fcvtnu_dd_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn d0, d0
+; CHECK-NEXT:    fcvtzu d0, d0
+; CHECK-NEXT:    ret
+  %r = call double @llvm.roundeven.f64(double %a)
+  %i = call i64 @llvm.fptoui.sat.i64.f64(double %r)
+  %bc = bitcast i64 %i to double
+  ret double %bc
+}
+
 define float @fcvtms_sh_simd(half %a) {
 ; CHECK-NOFPRCVT-LABEL: fcvtms_sh_simd:
 ; CHECK-NOFPRCVT:       // %bb.0:

>From c4763b27468f0de5da5dfde42bebd0c796bcad28 Mon Sep 17 00:00:00 2001
From: valadaptive <valadaptive at protonmail.com>
Date: Sat, 29 Nov 2025 18:54:40 -0500
Subject: [PATCH 2/4] [AArch64] Add tests for vector round+conversion fusion

---
 .../CodeGen/AArch64/arm64-vcvt-fused-round.ll | 882 ++++++++++++++++++
 1 file changed, 882 insertions(+)
 create mode 100644 llvm/test/CodeGen/AArch64/arm64-vcvt-fused-round.ll

diff --git a/llvm/test/CodeGen/AArch64/arm64-vcvt-fused-round.ll b/llvm/test/CodeGen/AArch64/arm64-vcvt-fused-round.ll
new file mode 100644
index 0000000000000..63638adbc6174
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/arm64-vcvt-fused-round.ll
@@ -0,0 +1,882 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s --check-prefixes=CHECK,CHECK-NO16
+; RUN: llc < %s -mtriple=arm64-eabi -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-FP16
+
+;
+; Tests for fused round + convert to int patterns (FCVTAS, FCVTAU, FCVTMS, FCVTMU, etc.)
+;
+
+;
+; round + signed -> fcvtas
+;
+
+define <2 x i32> @fcvtas_2s(<2 x float> %A) nounwind {
+; CHECK-LABEL: fcvtas_2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frinta v0.2s, v0.2s
+; CHECK-NEXT:    fcvtzs v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x float> @llvm.round.v2f32(<2 x float> %A)
+  %tmp2 = fptosi <2 x float> %tmp1 to <2 x i32>
+  ret <2 x i32> %tmp2
+}
+
+define <4 x i32> @fcvtas_4s(<4 x float> %A) nounwind {
+; CHECK-LABEL: fcvtas_4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frinta v0.4s, v0.4s
+; CHECK-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %tmp1 = call <4 x float> @llvm.round.v4f32(<4 x float> %A)
+  %tmp2 = fptosi <4 x float> %tmp1 to <4 x i32>
+  ret <4 x i32> %tmp2
+}
+
+define <2 x i64> @fcvtas_2d(<2 x double> %A) nounwind {
+; CHECK-LABEL: fcvtas_2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frinta v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x double> @llvm.round.v2f64(<2 x double> %A)
+  %tmp2 = fptosi <2 x double> %tmp1 to <2 x i64>
+  ret <2 x i64> %tmp2
+}
+
+;
+; round + unsigned -> fcvtau
+;
+
+define <2 x i32> @fcvtau_2s(<2 x float> %A) nounwind {
+; CHECK-LABEL: fcvtau_2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frinta v0.2s, v0.2s
+; CHECK-NEXT:    fcvtzu v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x float> @llvm.round.v2f32(<2 x float> %A)
+  %tmp2 = fptoui <2 x float> %tmp1 to <2 x i32>
+  ret <2 x i32> %tmp2
+}
+
+define <4 x i32> @fcvtau_4s(<4 x float> %A) nounwind {
+; CHECK-LABEL: fcvtau_4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frinta v0.4s, v0.4s
+; CHECK-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %tmp1 = call <4 x float> @llvm.round.v4f32(<4 x float> %A)
+  %tmp2 = fptoui <4 x float> %tmp1 to <4 x i32>
+  ret <4 x i32> %tmp2
+}
+
+define <2 x i64> @fcvtau_2d(<2 x double> %A) nounwind {
+; CHECK-LABEL: fcvtau_2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frinta v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzu v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x double> @llvm.round.v2f64(<2 x double> %A)
+  %tmp2 = fptoui <2 x double> %tmp1 to <2 x i64>
+  ret <2 x i64> %tmp2
+}
+
+;
+; roundeven + signed -> fcvtns
+;
+
+define <2 x i32> @fcvtns_2s(<2 x float> %A) nounwind {
+; CHECK-LABEL: fcvtns_2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn v0.2s, v0.2s
+; CHECK-NEXT:    fcvtzs v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x float> @llvm.roundeven.v2f32(<2 x float> %A)
+  %tmp2 = fptosi <2 x float> %tmp1 to <2 x i32>
+  ret <2 x i32> %tmp2
+}
+
+define <4 x i32> @fcvtns_4s(<4 x float> %A) nounwind {
+; CHECK-LABEL: fcvtns_4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn v0.4s, v0.4s
+; CHECK-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %tmp1 = call <4 x float> @llvm.roundeven.v4f32(<4 x float> %A)
+  %tmp2 = fptosi <4 x float> %tmp1 to <4 x i32>
+  ret <4 x i32> %tmp2
+}
+
+define <2 x i64> @fcvtns_2d(<2 x double> %A) nounwind {
+; CHECK-LABEL: fcvtns_2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %A)
+  %tmp2 = fptosi <2 x double> %tmp1 to <2 x i64>
+  ret <2 x i64> %tmp2
+}
+
+;
+; roundeven + unsigned -> fcvtnu
+;
+
+define <2 x i32> @fcvtnu_2s(<2 x float> %A) nounwind {
+; CHECK-LABEL: fcvtnu_2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn v0.2s, v0.2s
+; CHECK-NEXT:    fcvtzu v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x float> @llvm.roundeven.v2f32(<2 x float> %A)
+  %tmp2 = fptoui <2 x float> %tmp1 to <2 x i32>
+  ret <2 x i32> %tmp2
+}
+
+define <4 x i32> @fcvtnu_4s(<4 x float> %A) nounwind {
+; CHECK-LABEL: fcvtnu_4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn v0.4s, v0.4s
+; CHECK-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %tmp1 = call <4 x float> @llvm.roundeven.v4f32(<4 x float> %A)
+  %tmp2 = fptoui <4 x float> %tmp1 to <4 x i32>
+  ret <4 x i32> %tmp2
+}
+
+define <2 x i64> @fcvtnu_2d(<2 x double> %A) nounwind {
+; CHECK-LABEL: fcvtnu_2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzu v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %A)
+  %tmp2 = fptoui <2 x double> %tmp1 to <2 x i64>
+  ret <2 x i64> %tmp2
+}
+
+;
+; floor + signed -> fcvtms
+;
+
+define <2 x i32> @fcvtms_2s(<2 x float> %A) nounwind {
+; CHECK-LABEL: fcvtms_2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintm v0.2s, v0.2s
+; CHECK-NEXT:    fcvtzs v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x float> @llvm.floor.v2f32(<2 x float> %A)
+  %tmp2 = fptosi <2 x float> %tmp1 to <2 x i32>
+  ret <2 x i32> %tmp2
+}
+
+define <4 x i32> @fcvtms_4s(<4 x float> %A) nounwind {
+; CHECK-LABEL: fcvtms_4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintm v0.4s, v0.4s
+; CHECK-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %tmp1 = call <4 x float> @llvm.floor.v4f32(<4 x float> %A)
+  %tmp2 = fptosi <4 x float> %tmp1 to <4 x i32>
+  ret <4 x i32> %tmp2
+}
+
+define <2 x i64> @fcvtms_2d(<2 x double> %A) nounwind {
+; CHECK-LABEL: fcvtms_2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintm v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x double> @llvm.floor.v2f64(<2 x double> %A)
+  %tmp2 = fptosi <2 x double> %tmp1 to <2 x i64>
+  ret <2 x i64> %tmp2
+}
+
+;
+; floor + unsigned -> fcvtmu
+;
+
+define <2 x i32> @fcvtmu_2s(<2 x float> %A) nounwind {
+; CHECK-LABEL: fcvtmu_2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintm v0.2s, v0.2s
+; CHECK-NEXT:    fcvtzu v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x float> @llvm.floor.v2f32(<2 x float> %A)
+  %tmp2 = fptoui <2 x float> %tmp1 to <2 x i32>
+  ret <2 x i32> %tmp2
+}
+
+define <4 x i32> @fcvtmu_4s(<4 x float> %A) nounwind {
+; CHECK-LABEL: fcvtmu_4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintm v0.4s, v0.4s
+; CHECK-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %tmp1 = call <4 x float> @llvm.floor.v4f32(<4 x float> %A)
+  %tmp2 = fptoui <4 x float> %tmp1 to <4 x i32>
+  ret <4 x i32> %tmp2
+}
+
+define <2 x i64> @fcvtmu_2d(<2 x double> %A) nounwind {
+; CHECK-LABEL: fcvtmu_2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintm v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzu v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x double> @llvm.floor.v2f64(<2 x double> %A)
+  %tmp2 = fptoui <2 x double> %tmp1 to <2 x i64>
+  ret <2 x i64> %tmp2
+}
+
+;
+; ceil + signed -> fcvtps
+;
+
+define <2 x i32> @fcvtps_2s(<2 x float> %A) nounwind {
+; CHECK-LABEL: fcvtps_2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintp v0.2s, v0.2s
+; CHECK-NEXT:    fcvtzs v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x float> @llvm.ceil.v2f32(<2 x float> %A)
+  %tmp2 = fptosi <2 x float> %tmp1 to <2 x i32>
+  ret <2 x i32> %tmp2
+}
+
+define <4 x i32> @fcvtps_4s(<4 x float> %A) nounwind {
+; CHECK-LABEL: fcvtps_4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintp v0.4s, v0.4s
+; CHECK-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %tmp1 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %A)
+  %tmp2 = fptosi <4 x float> %tmp1 to <4 x i32>
+  ret <4 x i32> %tmp2
+}
+
+define <2 x i64> @fcvtps_2d(<2 x double> %A) nounwind {
+; CHECK-LABEL: fcvtps_2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintp v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x double> @llvm.ceil.v2f64(<2 x double> %A)
+  %tmp2 = fptosi <2 x double> %tmp1 to <2 x i64>
+  ret <2 x i64> %tmp2
+}
+
+;
+; ceil + unsigned -> fcvtpu
+;
+
+define <2 x i32> @fcvtpu_2s(<2 x float> %A) nounwind {
+; CHECK-LABEL: fcvtpu_2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintp v0.2s, v0.2s
+; CHECK-NEXT:    fcvtzu v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x float> @llvm.ceil.v2f32(<2 x float> %A)
+  %tmp2 = fptoui <2 x float> %tmp1 to <2 x i32>
+  ret <2 x i32> %tmp2
+}
+
+define <4 x i32> @fcvtpu_4s(<4 x float> %A) nounwind {
+; CHECK-LABEL: fcvtpu_4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintp v0.4s, v0.4s
+; CHECK-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %tmp1 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %A)
+  %tmp2 = fptoui <4 x float> %tmp1 to <4 x i32>
+  ret <4 x i32> %tmp2
+}
+
+define <2 x i64> @fcvtpu_2d(<2 x double> %A) nounwind {
+; CHECK-LABEL: fcvtpu_2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintp v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzu v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x double> @llvm.ceil.v2f64(<2 x double> %A)
+  %tmp2 = fptoui <2 x double> %tmp1 to <2 x i64>
+  ret <2 x i64> %tmp2
+}
+
+;
+; trunc + signed -> fcvtzs (already the default, but test the fusion)
+;
+
+define <2 x i32> @fcvtzs_2s(<2 x float> %A) nounwind {
+; CHECK-LABEL: fcvtzs_2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintz v0.2s, v0.2s
+; CHECK-NEXT:    fcvtzs v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x float> @llvm.trunc.v2f32(<2 x float> %A)
+  %tmp2 = fptosi <2 x float> %tmp1 to <2 x i32>
+  ret <2 x i32> %tmp2
+}
+
+define <4 x i32> @fcvtzs_4s(<4 x float> %A) nounwind {
+; CHECK-LABEL: fcvtzs_4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintz v0.4s, v0.4s
+; CHECK-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %tmp1 = call <4 x float> @llvm.trunc.v4f32(<4 x float> %A)
+  %tmp2 = fptosi <4 x float> %tmp1 to <4 x i32>
+  ret <4 x i32> %tmp2
+}
+
+define <2 x i64> @fcvtzs_2d(<2 x double> %A) nounwind {
+; CHECK-LABEL: fcvtzs_2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintz v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x double> @llvm.trunc.v2f64(<2 x double> %A)
+  %tmp2 = fptosi <2 x double> %tmp1 to <2 x i64>
+  ret <2 x i64> %tmp2
+}
+
+;
+; trunc + unsigned -> fcvtzu
+;
+
+define <2 x i32> @fcvtzu_2s(<2 x float> %A) nounwind {
+; CHECK-LABEL: fcvtzu_2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintz v0.2s, v0.2s
+; CHECK-NEXT:    fcvtzu v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x float> @llvm.trunc.v2f32(<2 x float> %A)
+  %tmp2 = fptoui <2 x float> %tmp1 to <2 x i32>
+  ret <2 x i32> %tmp2
+}
+
+define <4 x i32> @fcvtzu_4s(<4 x float> %A) nounwind {
+; CHECK-LABEL: fcvtzu_4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintz v0.4s, v0.4s
+; CHECK-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %tmp1 = call <4 x float> @llvm.trunc.v4f32(<4 x float> %A)
+  %tmp2 = fptoui <4 x float> %tmp1 to <4 x i32>
+  ret <4 x i32> %tmp2
+}
+
+define <2 x i64> @fcvtzu_2d(<2 x double> %A) nounwind {
+; CHECK-LABEL: fcvtzu_2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintz v0.2d, v0.2d
+; CHECK-NEXT:    fcvtzu v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x double> @llvm.trunc.v2f64(<2 x double> %A)
+  %tmp2 = fptoui <2 x double> %tmp1 to <2 x i64>
+  ret <2 x i64> %tmp2
+}
+
+;
+; f16 tests (require +fullfp16)
+;
+
+define <4 x i16> @fcvtas_4h(<4 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtas_4h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    frinta v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtas_4h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frinta v0.4h, v0.4h
+; CHECK-FP16-NEXT:    fcvtzs v0.4h, v0.4h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <4 x half> @llvm.round.v4f16(<4 x half> %A)
+  %tmp2 = fptosi <4 x half> %tmp1 to <4 x i16>
+  ret <4 x i16> %tmp2
+}
+
+define <8 x i16> @fcvtas_8h(<8 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtas_8h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-NEXT:    frinta v1.4s, v1.4s
+; CHECK-NO16-NEXT:    frinta v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NO16-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v1.8h
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v1.4h
+; CHECK-NO16-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtzs v1.4s, v1.4s
+; CHECK-NO16-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtas_8h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frinta v0.8h, v0.8h
+; CHECK-FP16-NEXT:    fcvtzs v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <8 x half> @llvm.round.v8f16(<8 x half> %A)
+  %tmp2 = fptosi <8 x half> %tmp1 to <8 x i16>
+  ret <8 x i16> %tmp2
+}
+
+define <4 x i16> @fcvtau_4h(<4 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtau_4h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    frinta v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NO16-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtau_4h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frinta v0.4h, v0.4h
+; CHECK-FP16-NEXT:    fcvtzu v0.4h, v0.4h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <4 x half> @llvm.round.v4f16(<4 x half> %A)
+  %tmp2 = fptoui <4 x half> %tmp1 to <4 x i16>
+  ret <4 x i16> %tmp2
+}
+
+define <8 x i16> @fcvtau_8h(<8 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtau_8h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-NEXT:    frinta v1.4s, v1.4s
+; CHECK-NO16-NEXT:    frinta v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NO16-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v1.8h
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v1.4h
+; CHECK-NO16-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtzu v1.4s, v1.4s
+; CHECK-NO16-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtau_8h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frinta v0.8h, v0.8h
+; CHECK-FP16-NEXT:    fcvtzu v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <8 x half> @llvm.round.v8f16(<8 x half> %A)
+  %tmp2 = fptoui <8 x half> %tmp1 to <8 x i16>
+  ret <8 x i16> %tmp2
+}
+
+define <4 x i16> @fcvtns_4h(<4 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtns_4h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    frintn v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtns_4h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintn v0.4h, v0.4h
+; CHECK-FP16-NEXT:    fcvtzs v0.4h, v0.4h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <4 x half> @llvm.roundeven.v4f16(<4 x half> %A)
+  %tmp2 = fptosi <4 x half> %tmp1 to <4 x i16>
+  ret <4 x i16> %tmp2
+}
+
+define <8 x i16> @fcvtns_8h(<8 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtns_8h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-NEXT:    frintn v1.4s, v1.4s
+; CHECK-NO16-NEXT:    frintn v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NO16-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v1.8h
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v1.4h
+; CHECK-NO16-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtzs v1.4s, v1.4s
+; CHECK-NO16-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtns_8h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintn v0.8h, v0.8h
+; CHECK-FP16-NEXT:    fcvtzs v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <8 x half> @llvm.roundeven.v8f16(<8 x half> %A)
+  %tmp2 = fptosi <8 x half> %tmp1 to <8 x i16>
+  ret <8 x i16> %tmp2
+}
+
+define <4 x i16> @fcvtnu_4h(<4 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtnu_4h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    frintn v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NO16-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtnu_4h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintn v0.4h, v0.4h
+; CHECK-FP16-NEXT:    fcvtzu v0.4h, v0.4h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <4 x half> @llvm.roundeven.v4f16(<4 x half> %A)
+  %tmp2 = fptoui <4 x half> %tmp1 to <4 x i16>
+  ret <4 x i16> %tmp2
+}
+
+define <8 x i16> @fcvtnu_8h(<8 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtnu_8h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-NEXT:    frintn v1.4s, v1.4s
+; CHECK-NO16-NEXT:    frintn v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NO16-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v1.8h
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v1.4h
+; CHECK-NO16-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtzu v1.4s, v1.4s
+; CHECK-NO16-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtnu_8h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintn v0.8h, v0.8h
+; CHECK-FP16-NEXT:    fcvtzu v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <8 x half> @llvm.roundeven.v8f16(<8 x half> %A)
+  %tmp2 = fptoui <8 x half> %tmp1 to <8 x i16>
+  ret <8 x i16> %tmp2
+}
+
+define <4 x i16> @fcvtms_4h(<4 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtms_4h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    frintm v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtms_4h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintm v0.4h, v0.4h
+; CHECK-FP16-NEXT:    fcvtzs v0.4h, v0.4h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <4 x half> @llvm.floor.v4f16(<4 x half> %A)
+  %tmp2 = fptosi <4 x half> %tmp1 to <4 x i16>
+  ret <4 x i16> %tmp2
+}
+
+define <8 x i16> @fcvtms_8h(<8 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtms_8h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-NEXT:    frintm v1.4s, v1.4s
+; CHECK-NO16-NEXT:    frintm v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NO16-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v1.8h
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v1.4h
+; CHECK-NO16-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtzs v1.4s, v1.4s
+; CHECK-NO16-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtms_8h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintm v0.8h, v0.8h
+; CHECK-FP16-NEXT:    fcvtzs v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <8 x half> @llvm.floor.v8f16(<8 x half> %A)
+  %tmp2 = fptosi <8 x half> %tmp1 to <8 x i16>
+  ret <8 x i16> %tmp2
+}
+
+define <4 x i16> @fcvtmu_4h(<4 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtmu_4h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    frintm v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NO16-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtmu_4h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintm v0.4h, v0.4h
+; CHECK-FP16-NEXT:    fcvtzu v0.4h, v0.4h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <4 x half> @llvm.floor.v4f16(<4 x half> %A)
+  %tmp2 = fptoui <4 x half> %tmp1 to <4 x i16>
+  ret <4 x i16> %tmp2
+}
+
+define <8 x i16> @fcvtmu_8h(<8 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtmu_8h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-NEXT:    frintm v1.4s, v1.4s
+; CHECK-NO16-NEXT:    frintm v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NO16-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v1.8h
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v1.4h
+; CHECK-NO16-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtzu v1.4s, v1.4s
+; CHECK-NO16-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtmu_8h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintm v0.8h, v0.8h
+; CHECK-FP16-NEXT:    fcvtzu v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <8 x half> @llvm.floor.v8f16(<8 x half> %A)
+  %tmp2 = fptoui <8 x half> %tmp1 to <8 x i16>
+  ret <8 x i16> %tmp2
+}
+
+define <4 x i16> @fcvtps_4h(<4 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtps_4h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    frintp v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtps_4h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintp v0.4h, v0.4h
+; CHECK-FP16-NEXT:    fcvtzs v0.4h, v0.4h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <4 x half> @llvm.ceil.v4f16(<4 x half> %A)
+  %tmp2 = fptosi <4 x half> %tmp1 to <4 x i16>
+  ret <4 x i16> %tmp2
+}
+
+define <8 x i16> @fcvtps_8h(<8 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtps_8h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-NEXT:    frintp v1.4s, v1.4s
+; CHECK-NO16-NEXT:    frintp v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NO16-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v1.8h
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v1.4h
+; CHECK-NO16-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtzs v1.4s, v1.4s
+; CHECK-NO16-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtps_8h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintp v0.8h, v0.8h
+; CHECK-FP16-NEXT:    fcvtzs v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <8 x half> @llvm.ceil.v8f16(<8 x half> %A)
+  %tmp2 = fptosi <8 x half> %tmp1 to <8 x i16>
+  ret <8 x i16> %tmp2
+}
+
+define <4 x i16> @fcvtpu_4h(<4 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtpu_4h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    frintp v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NO16-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtpu_4h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintp v0.4h, v0.4h
+; CHECK-FP16-NEXT:    fcvtzu v0.4h, v0.4h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <4 x half> @llvm.ceil.v4f16(<4 x half> %A)
+  %tmp2 = fptoui <4 x half> %tmp1 to <4 x i16>
+  ret <4 x i16> %tmp2
+}
+
+define <8 x i16> @fcvtpu_8h(<8 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtpu_8h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-NEXT:    frintp v1.4s, v1.4s
+; CHECK-NO16-NEXT:    frintp v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NO16-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v1.8h
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v1.4h
+; CHECK-NO16-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtzu v1.4s, v1.4s
+; CHECK-NO16-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtpu_8h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintp v0.8h, v0.8h
+; CHECK-FP16-NEXT:    fcvtzu v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <8 x half> @llvm.ceil.v8f16(<8 x half> %A)
+  %tmp2 = fptoui <8 x half> %tmp1 to <8 x i16>
+  ret <8 x i16> %tmp2
+}
+
+define <4 x i16> @fcvtzs_4h(<4 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtzs_4h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    frintz v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtzs_4h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintz v0.4h, v0.4h
+; CHECK-FP16-NEXT:    fcvtzs v0.4h, v0.4h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <4 x half> @llvm.trunc.v4f16(<4 x half> %A)
+  %tmp2 = fptosi <4 x half> %tmp1 to <4 x i16>
+  ret <4 x i16> %tmp2
+}
+
+define <8 x i16> @fcvtzs_8h(<8 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtzs_8h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-NEXT:    frintz v1.4s, v1.4s
+; CHECK-NO16-NEXT:    frintz v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NO16-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v1.8h
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v1.4h
+; CHECK-NO16-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtzs v1.4s, v1.4s
+; CHECK-NO16-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtzs_8h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintz v0.8h, v0.8h
+; CHECK-FP16-NEXT:    fcvtzs v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <8 x half> @llvm.trunc.v8f16(<8 x half> %A)
+  %tmp2 = fptosi <8 x half> %tmp1 to <8 x i16>
+  ret <8 x i16> %tmp2
+}
+
+define <4 x i16> @fcvtzu_4h(<4 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtzu_4h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    frintz v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NO16-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtzu_4h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintz v0.4h, v0.4h
+; CHECK-FP16-NEXT:    fcvtzu v0.4h, v0.4h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <4 x half> @llvm.trunc.v4f16(<4 x half> %A)
+  %tmp2 = fptoui <4 x half> %tmp1 to <4 x i16>
+  ret <4 x i16> %tmp2
+}
+
+define <8 x i16> @fcvtzu_8h(<8 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtzu_8h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-NEXT:    frintz v1.4s, v1.4s
+; CHECK-NO16-NEXT:    frintz v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NO16-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v1.8h
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v1.4h
+; CHECK-NO16-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtzu v1.4s, v1.4s
+; CHECK-NO16-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtzu_8h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    frintz v0.8h, v0.8h
+; CHECK-FP16-NEXT:    fcvtzu v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <8 x half> @llvm.trunc.v8f16(<8 x half> %A)
+  %tmp2 = fptoui <8 x half> %tmp1 to <8 x i16>
+  ret <8 x i16> %tmp2
+}
+
+; Intrinsic declarations
+declare <2 x float> @llvm.round.v2f32(<2 x float>) nounwind readnone
+declare <4 x float> @llvm.round.v4f32(<4 x float>) nounwind readnone
+declare <2 x double> @llvm.round.v2f64(<2 x double>) nounwind readnone
+declare <4 x half> @llvm.round.v4f16(<4 x half>) nounwind readnone
+declare <8 x half> @llvm.round.v8f16(<8 x half>) nounwind readnone
+
+declare <2 x float> @llvm.roundeven.v2f32(<2 x float>) nounwind readnone
+declare <4 x float> @llvm.roundeven.v4f32(<4 x float>) nounwind readnone
+declare <2 x double> @llvm.roundeven.v2f64(<2 x double>) nounwind readnone
+declare <4 x half> @llvm.roundeven.v4f16(<4 x half>) nounwind readnone
+declare <8 x half> @llvm.roundeven.v8f16(<8 x half>) nounwind readnone
+
+declare <2 x float> @llvm.floor.v2f32(<2 x float>) nounwind readnone
+declare <4 x float> @llvm.floor.v4f32(<4 x float>) nounwind readnone
+declare <2 x double> @llvm.floor.v2f64(<2 x double>) nounwind readnone
+declare <4 x half> @llvm.floor.v4f16(<4 x half>) nounwind readnone
+declare <8 x half> @llvm.floor.v8f16(<8 x half>) nounwind readnone
+
+declare <2 x float> @llvm.ceil.v2f32(<2 x float>) nounwind readnone
+declare <4 x float> @llvm.ceil.v4f32(<4 x float>) nounwind readnone
+declare <2 x double> @llvm.ceil.v2f64(<2 x double>) nounwind readnone
+declare <4 x half> @llvm.ceil.v4f16(<4 x half>) nounwind readnone
+declare <8 x half> @llvm.ceil.v8f16(<8 x half>) nounwind readnone
+
+declare <2 x float> @llvm.trunc.v2f32(<2 x float>) nounwind readnone
+declare <4 x float> @llvm.trunc.v4f32(<4 x float>) nounwind readnone
+declare <2 x double> @llvm.trunc.v2f64(<2 x double>) nounwind readnone
+declare <4 x half> @llvm.trunc.v4f16(<4 x half>) nounwind readnone
+declare <8 x half> @llvm.trunc.v8f16(<8 x half>) nounwind readnone

>From f6c8b7803fefca2f8182605d9d1ba35b49b2c5c8 Mon Sep 17 00:00:00 2001
From: valadaptive <valadaptive at protonmail.com>
Date: Sat, 29 Nov 2025 18:58:03 -0500
Subject: [PATCH 3/4] [AArch64] Add float-to-int codegen pattern for
 roundeven+fptoi

---
 llvm/lib/Target/AArch64/AArch64InstrInfo.td   |  18 +--
 .../CodeGen/AArch64/arm64-cvt-simd-fptoi.ll   | 120 ++++++------------
 2 files changed, 50 insertions(+), 88 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index da93a2b13fc11..e94c2e06d3594 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -6817,14 +6817,16 @@ multiclass FPToIntegerPats<SDNode to_int, SDNode to_int_sat, SDNode to_int_sat_g
             (!cast<Instruction>(INST # v1i64) f64:$Rn)>;
 }
 
-defm : FPToIntegerPats<fp_to_sint, fp_to_sint_sat, fp_to_sint_sat_gi, fceil,  "FCVTPS">;
-defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, fp_to_uint_sat_gi, fceil,  "FCVTPU">;
-defm : FPToIntegerPats<fp_to_sint, fp_to_sint_sat, fp_to_sint_sat_gi, ffloor, "FCVTMS">;
-defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, fp_to_uint_sat_gi, ffloor, "FCVTMU">;
-defm : FPToIntegerPats<fp_to_sint, fp_to_sint_sat, fp_to_sint_sat_gi, ftrunc, "FCVTZS">;
-defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, fp_to_uint_sat_gi, ftrunc, "FCVTZU">;
-defm : FPToIntegerPats<fp_to_sint, fp_to_sint_sat, fp_to_sint_sat_gi, fround, "FCVTAS">;
-defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, fp_to_uint_sat_gi, fround, "FCVTAU">;
+defm : FPToIntegerPats<fp_to_sint, fp_to_sint_sat, fp_to_sint_sat_gi, fceil,      "FCVTPS">;
+defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, fp_to_uint_sat_gi, fceil,      "FCVTPU">;
+defm : FPToIntegerPats<fp_to_sint, fp_to_sint_sat, fp_to_sint_sat_gi, ffloor,     "FCVTMS">;
+defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, fp_to_uint_sat_gi, ffloor,     "FCVTMU">;
+defm : FPToIntegerPats<fp_to_sint, fp_to_sint_sat, fp_to_sint_sat_gi, ftrunc,     "FCVTZS">;
+defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, fp_to_uint_sat_gi, ftrunc,     "FCVTZU">;
+defm : FPToIntegerPats<fp_to_sint, fp_to_sint_sat, fp_to_sint_sat_gi, fround,     "FCVTAS">;
+defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, fp_to_uint_sat_gi, fround,     "FCVTAU">;
+defm : FPToIntegerPats<fp_to_sint, fp_to_sint_sat, fp_to_sint_sat_gi, froundeven, "FCVTNS">;
+defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, fp_to_uint_sat_gi, froundeven, "FCVTNU">;
 
 // f16 -> s16 conversions
 let Predicates = [HasFullFP16] in {
diff --git a/llvm/test/CodeGen/AArch64/arm64-cvt-simd-fptoi.ll b/llvm/test/CodeGen/AArch64/arm64-cvt-simd-fptoi.ll
index 55bc436824504..48e7972b04a6c 100644
--- a/llvm/test/CodeGen/AArch64/arm64-cvt-simd-fptoi.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-cvt-simd-fptoi.ll
@@ -546,15 +546,13 @@ define double @fcvtau_dd_round_simd(double %a) {
 define double @fcvtns_ds_roundeven_simd(float %a) {
 ; CHECK-NOFPRCVT-LABEL: fcvtns_ds_roundeven_simd:
 ; CHECK-NOFPRCVT:       // %bb.0:
-; CHECK-NOFPRCVT-NEXT:    frintn s0, s0
-; CHECK-NOFPRCVT-NEXT:    fcvtzs x8, s0
+; CHECK-NOFPRCVT-NEXT:    fcvtns x8, s0
 ; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
 ; CHECK-NOFPRCVT-NEXT:    ret
 ;
 ; CHECK-LABEL: fcvtns_ds_roundeven_simd:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintn s0, s0
-; CHECK-NEXT:    fcvtzs d0, s0
+; CHECK-NEXT:    fcvtns d0, s0
 ; CHECK-NEXT:    ret
   %r = call float @llvm.roundeven.f32(float %a)
   %i = fptosi float %r to i64
@@ -565,15 +563,13 @@ define double @fcvtns_ds_roundeven_simd(float %a) {
 define float @fcvtns_sd_roundeven_simd(double %a) {
 ; CHECK-NOFPRCVT-LABEL: fcvtns_sd_roundeven_simd:
 ; CHECK-NOFPRCVT:       // %bb.0:
-; CHECK-NOFPRCVT-NEXT:    frintn d0, d0
-; CHECK-NOFPRCVT-NEXT:    fcvtzs w8, d0
+; CHECK-NOFPRCVT-NEXT:    fcvtns w8, d0
 ; CHECK-NOFPRCVT-NEXT:    fmov s0, w8
 ; CHECK-NOFPRCVT-NEXT:    ret
 ;
 ; CHECK-LABEL: fcvtns_sd_roundeven_simd:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintn d0, d0
-; CHECK-NEXT:    fcvtzs s0, d0
+; CHECK-NEXT:    fcvtns s0, d0
 ; CHECK-NEXT:    ret
   %r = call double @llvm.roundeven.f64(double %a)
   %i = fptosi double %r to i32
@@ -584,14 +580,12 @@ define float @fcvtns_sd_roundeven_simd(double %a) {
 define float @fcvtns_ss_roundeven_simd(float %a) {
 ; CHECK-NOFPRCVT-LABEL: fcvtns_ss_roundeven_simd:
 ; CHECK-NOFPRCVT:       // %bb.0:
-; CHECK-NOFPRCVT-NEXT:    frintn s0, s0
-; CHECK-NOFPRCVT-NEXT:    fcvtzs s0, s0
+; CHECK-NOFPRCVT-NEXT:    fcvtns s0, s0
 ; CHECK-NOFPRCVT-NEXT:    ret
 ;
 ; CHECK-LABEL: fcvtns_ss_roundeven_simd:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintn s0, s0
-; CHECK-NEXT:    fcvtzs s0, s0
+; CHECK-NEXT:    fcvtns s0, s0
 ; CHECK-NEXT:    ret
   %r = call float @llvm.roundeven.f32(float %a)
   %i = fptosi float %r to i32
@@ -602,14 +596,12 @@ define float @fcvtns_ss_roundeven_simd(float %a) {
 define double @fcvtns_dd_roundeven_simd(double %a) {
 ; CHECK-NOFPRCVT-LABEL: fcvtns_dd_roundeven_simd:
 ; CHECK-NOFPRCVT:       // %bb.0:
-; CHECK-NOFPRCVT-NEXT:    frintn d0, d0
-; CHECK-NOFPRCVT-NEXT:    fcvtzs d0, d0
+; CHECK-NOFPRCVT-NEXT:    fcvtns d0, d0
 ; CHECK-NOFPRCVT-NEXT:    ret
 ;
 ; CHECK-LABEL: fcvtns_dd_roundeven_simd:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintn d0, d0
-; CHECK-NEXT:    fcvtzs d0, d0
+; CHECK-NEXT:    fcvtns d0, d0
 ; CHECK-NEXT:    ret
   %r = call double @llvm.roundeven.f64(double %a)
   %i = fptosi double %r to i64
@@ -621,15 +613,13 @@ define double @fcvtns_dd_roundeven_simd(double %a) {
 define double @fcvtnu_ds_roundeven_simd(float %a) {
 ; CHECK-NOFPRCVT-LABEL: fcvtnu_ds_roundeven_simd:
 ; CHECK-NOFPRCVT:       // %bb.0:
-; CHECK-NOFPRCVT-NEXT:    frintn s0, s0
-; CHECK-NOFPRCVT-NEXT:    fcvtzu x8, s0
+; CHECK-NOFPRCVT-NEXT:    fcvtnu x8, s0
 ; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
 ; CHECK-NOFPRCVT-NEXT:    ret
 ;
 ; CHECK-LABEL: fcvtnu_ds_roundeven_simd:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintn s0, s0
-; CHECK-NEXT:    fcvtzu d0, s0
+; CHECK-NEXT:    fcvtnu d0, s0
 ; CHECK-NEXT:    ret
   %r = call float @llvm.roundeven.f32(float %a)
   %i = fptoui float %r to i64
@@ -640,15 +630,13 @@ define double @fcvtnu_ds_roundeven_simd(float %a) {
 define float @fcvtnu_sd_roundeven_simd(double %a) {
 ; CHECK-NOFPRCVT-LABEL: fcvtnu_sd_roundeven_simd:
 ; CHECK-NOFPRCVT:       // %bb.0:
-; CHECK-NOFPRCVT-NEXT:    frintn d0, d0
-; CHECK-NOFPRCVT-NEXT:    fcvtzu w8, d0
+; CHECK-NOFPRCVT-NEXT:    fcvtnu w8, d0
 ; CHECK-NOFPRCVT-NEXT:    fmov s0, w8
 ; CHECK-NOFPRCVT-NEXT:    ret
 ;
 ; CHECK-LABEL: fcvtnu_sd_roundeven_simd:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintn d0, d0
-; CHECK-NEXT:    fcvtzu s0, d0
+; CHECK-NEXT:    fcvtnu s0, d0
 ; CHECK-NEXT:    ret
   %r = call double @llvm.roundeven.f64(double %a)
   %i = fptoui double %r to i32
@@ -659,14 +647,12 @@ define float @fcvtnu_sd_roundeven_simd(double %a) {
 define float @fcvtnu_ss_roundeven_simd(float %a) {
 ; CHECK-NOFPRCVT-LABEL: fcvtnu_ss_roundeven_simd:
 ; CHECK-NOFPRCVT:       // %bb.0:
-; CHECK-NOFPRCVT-NEXT:    frintn s0, s0
-; CHECK-NOFPRCVT-NEXT:    fcvtzu s0, s0
+; CHECK-NOFPRCVT-NEXT:    fcvtnu s0, s0
 ; CHECK-NOFPRCVT-NEXT:    ret
 ;
 ; CHECK-LABEL: fcvtnu_ss_roundeven_simd:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintn s0, s0
-; CHECK-NEXT:    fcvtzu s0, s0
+; CHECK-NEXT:    fcvtnu s0, s0
 ; CHECK-NEXT:    ret
   %r = call float @llvm.roundeven.f32(float %a)
   %i = fptoui float %r to i32
@@ -677,14 +663,12 @@ define float @fcvtnu_ss_roundeven_simd(float %a) {
 define double @fcvtnu_dd_roundeven_simd(double %a) {
 ; CHECK-NOFPRCVT-LABEL: fcvtnu_dd_roundeven_simd:
 ; CHECK-NOFPRCVT:       // %bb.0:
-; CHECK-NOFPRCVT-NEXT:    frintn d0, d0
-; CHECK-NOFPRCVT-NEXT:    fcvtzu d0, d0
+; CHECK-NOFPRCVT-NEXT:    fcvtnu d0, d0
 ; CHECK-NOFPRCVT-NEXT:    ret
 ;
 ; CHECK-LABEL: fcvtnu_dd_roundeven_simd:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintn d0, d0
-; CHECK-NEXT:    fcvtzu d0, d0
+; CHECK-NEXT:    fcvtnu d0, d0
 ; CHECK-NEXT:    ret
   %r = call double @llvm.roundeven.f64(double %a)
   %i = fptoui double %r to i64
@@ -1493,15 +1477,13 @@ define double @fcvtau_dd_simd(double %a) {
 define float @fcvtns_sh_simd(half %a) {
 ; CHECK-NOFPRCVT-LABEL: fcvtns_sh_simd:
 ; CHECK-NOFPRCVT:       // %bb.0:
-; CHECK-NOFPRCVT-NEXT:    frintn h0, h0
-; CHECK-NOFPRCVT-NEXT:    fcvtzs w8, h0
+; CHECK-NOFPRCVT-NEXT:    fcvtns w8, h0
 ; CHECK-NOFPRCVT-NEXT:    fmov s0, w8
 ; CHECK-NOFPRCVT-NEXT:    ret
 ;
 ; CHECK-LABEL: fcvtns_sh_simd:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintn h0, h0
-; CHECK-NEXT:    fcvtzs s0, h0
+; CHECK-NEXT:    fcvtns s0, h0
 ; CHECK-NEXT:    ret
   %r = call half @llvm.roundeven.f16(half %a) nounwind readnone
   %i = call i32 @llvm.fptosi.sat.i32.f16(half %r)
@@ -1512,15 +1494,13 @@ define float @fcvtns_sh_simd(half %a) {
 define double @fcvtns_dh_simd(half %a) {
 ; CHECK-NOFPRCVT-LABEL: fcvtns_dh_simd:
 ; CHECK-NOFPRCVT:       // %bb.0:
-; CHECK-NOFPRCVT-NEXT:    frintn h0, h0
-; CHECK-NOFPRCVT-NEXT:    fcvtzs x8, h0
+; CHECK-NOFPRCVT-NEXT:    fcvtns x8, h0
 ; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
 ; CHECK-NOFPRCVT-NEXT:    ret
 ;
 ; CHECK-LABEL: fcvtns_dh_simd:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintn h0, h0
-; CHECK-NEXT:    fcvtzs d0, h0
+; CHECK-NEXT:    fcvtns d0, h0
 ; CHECK-NEXT:    ret
   %r = call half @llvm.roundeven.f16(half %a) nounwind readnone
   %i = call i64 @llvm.fptosi.sat.i64.f16(half %r)
@@ -1531,15 +1511,13 @@ define double @fcvtns_dh_simd(half %a) {
 define double @fcvtns_ds_simd(float %a) {
 ; CHECK-NOFPRCVT-LABEL: fcvtns_ds_simd:
 ; CHECK-NOFPRCVT:       // %bb.0:
-; CHECK-NOFPRCVT-NEXT:    frintn s0, s0
-; CHECK-NOFPRCVT-NEXT:    fcvtzs x8, s0
+; CHECK-NOFPRCVT-NEXT:    fcvtns x8, s0
 ; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
 ; CHECK-NOFPRCVT-NEXT:    ret
 ;
 ; CHECK-LABEL: fcvtns_ds_simd:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintn s0, s0
-; CHECK-NEXT:    fcvtzs d0, s0
+; CHECK-NEXT:    fcvtns d0, s0
 ; CHECK-NEXT:    ret
   %r = call float @llvm.roundeven.f32(float %a)
   %i = call i64 @llvm.fptosi.sat.i64.f32(float %r)
@@ -1550,15 +1528,13 @@ define double @fcvtns_ds_simd(float %a) {
 define float @fcvtns_sd_simd(double %a) {
 ; CHECK-NOFPRCVT-LABEL: fcvtns_sd_simd:
 ; CHECK-NOFPRCVT:       // %bb.0:
-; CHECK-NOFPRCVT-NEXT:    frintn d0, d0
-; CHECK-NOFPRCVT-NEXT:    fcvtzs w8, d0
+; CHECK-NOFPRCVT-NEXT:    fcvtns w8, d0
 ; CHECK-NOFPRCVT-NEXT:    fmov s0, w8
 ; CHECK-NOFPRCVT-NEXT:    ret
 ;
 ; CHECK-LABEL: fcvtns_sd_simd:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintn d0, d0
-; CHECK-NEXT:    fcvtzs s0, d0
+; CHECK-NEXT:    fcvtns s0, d0
 ; CHECK-NEXT:    ret
   %r = call double @llvm.roundeven.f64(double %a)
   %i = call i32 @llvm.fptosi.sat.i32.f64(double %r)
@@ -1569,14 +1545,12 @@ define float @fcvtns_sd_simd(double %a) {
 define float @fcvtns_ss_simd(float %a) {
 ; CHECK-NOFPRCVT-LABEL: fcvtns_ss_simd:
 ; CHECK-NOFPRCVT:       // %bb.0:
-; CHECK-NOFPRCVT-NEXT:    frintn s0, s0
-; CHECK-NOFPRCVT-NEXT:    fcvtzs s0, s0
+; CHECK-NOFPRCVT-NEXT:    fcvtns s0, s0
 ; CHECK-NOFPRCVT-NEXT:    ret
 ;
 ; CHECK-LABEL: fcvtns_ss_simd:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintn s0, s0
-; CHECK-NEXT:    fcvtzs s0, s0
+; CHECK-NEXT:    fcvtns s0, s0
 ; CHECK-NEXT:    ret
   %r = call float @llvm.roundeven.f32(float %a)
   %i = call i32 @llvm.fptosi.sat.i32.f32(float %r)
@@ -1587,14 +1561,12 @@ define float @fcvtns_ss_simd(float %a) {
 define double @fcvtns_dd_simd(double %a) {
 ; CHECK-NOFPRCVT-LABEL: fcvtns_dd_simd:
 ; CHECK-NOFPRCVT:       // %bb.0:
-; CHECK-NOFPRCVT-NEXT:    frintn d0, d0
-; CHECK-NOFPRCVT-NEXT:    fcvtzs d0, d0
+; CHECK-NOFPRCVT-NEXT:    fcvtns d0, d0
 ; CHECK-NOFPRCVT-NEXT:    ret
 ;
 ; CHECK-LABEL: fcvtns_dd_simd:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintn d0, d0
-; CHECK-NEXT:    fcvtzs d0, d0
+; CHECK-NEXT:    fcvtns d0, d0
 ; CHECK-NEXT:    ret
   %r = call double @llvm.roundeven.f64(double %a)
   %i = call i64 @llvm.fptosi.sat.i64.f64(double %r)
@@ -1605,15 +1577,13 @@ define double @fcvtns_dd_simd(double %a) {
 define float @fcvtnu_sh_simd(half %a) {
 ; CHECK-NOFPRCVT-LABEL: fcvtnu_sh_simd:
 ; CHECK-NOFPRCVT:       // %bb.0:
-; CHECK-NOFPRCVT-NEXT:    frintn h0, h0
-; CHECK-NOFPRCVT-NEXT:    fcvtzu w8, h0
+; CHECK-NOFPRCVT-NEXT:    fcvtnu w8, h0
 ; CHECK-NOFPRCVT-NEXT:    fmov s0, w8
 ; CHECK-NOFPRCVT-NEXT:    ret
 ;
 ; CHECK-LABEL: fcvtnu_sh_simd:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintn h0, h0
-; CHECK-NEXT:    fcvtzu s0, h0
+; CHECK-NEXT:    fcvtnu s0, h0
 ; CHECK-NEXT:    ret
   %r = call half @llvm.roundeven.f16(half %a) nounwind readnone
   %i = call i32 @llvm.fptoui.sat.i32.f16(half %r)
@@ -1624,15 +1594,13 @@ define float @fcvtnu_sh_simd(half %a) {
 define double @fcvtnu_dh_simd(half %a) {
 ; CHECK-NOFPRCVT-LABEL: fcvtnu_dh_simd:
 ; CHECK-NOFPRCVT:       // %bb.0:
-; CHECK-NOFPRCVT-NEXT:    frintn h0, h0
-; CHECK-NOFPRCVT-NEXT:    fcvtzu x8, h0
+; CHECK-NOFPRCVT-NEXT:    fcvtnu x8, h0
 ; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
 ; CHECK-NOFPRCVT-NEXT:    ret
 ;
 ; CHECK-LABEL: fcvtnu_dh_simd:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintn h0, h0
-; CHECK-NEXT:    fcvtzu d0, h0
+; CHECK-NEXT:    fcvtnu d0, h0
 ; CHECK-NEXT:    ret
   %r = call half @llvm.roundeven.f16(half %a) nounwind readnone
   %i = call i64 @llvm.fptoui.sat.i64.f16(half %r)
@@ -1643,15 +1611,13 @@ define double @fcvtnu_dh_simd(half %a) {
 define double @fcvtnu_ds_simd(float %a) {
 ; CHECK-NOFPRCVT-LABEL: fcvtnu_ds_simd:
 ; CHECK-NOFPRCVT:       // %bb.0:
-; CHECK-NOFPRCVT-NEXT:    frintn s0, s0
-; CHECK-NOFPRCVT-NEXT:    fcvtzu x8, s0
+; CHECK-NOFPRCVT-NEXT:    fcvtnu x8, s0
 ; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
 ; CHECK-NOFPRCVT-NEXT:    ret
 ;
 ; CHECK-LABEL: fcvtnu_ds_simd:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintn s0, s0
-; CHECK-NEXT:    fcvtzu d0, s0
+; CHECK-NEXT:    fcvtnu d0, s0
 ; CHECK-NEXT:    ret
   %r = call float @llvm.roundeven.f32(float %a)
   %i = call i64 @llvm.fptoui.sat.i64.f32(float %r)
@@ -1662,15 +1628,13 @@ define double @fcvtnu_ds_simd(float %a) {
 define float @fcvtnu_sd_simd(double %a) {
 ; CHECK-NOFPRCVT-LABEL: fcvtnu_sd_simd:
 ; CHECK-NOFPRCVT:       // %bb.0:
-; CHECK-NOFPRCVT-NEXT:    frintn d0, d0
-; CHECK-NOFPRCVT-NEXT:    fcvtzu w8, d0
+; CHECK-NOFPRCVT-NEXT:    fcvtnu w8, d0
 ; CHECK-NOFPRCVT-NEXT:    fmov s0, w8
 ; CHECK-NOFPRCVT-NEXT:    ret
 ;
 ; CHECK-LABEL: fcvtnu_sd_simd:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintn d0, d0
-; CHECK-NEXT:    fcvtzu s0, d0
+; CHECK-NEXT:    fcvtnu s0, d0
 ; CHECK-NEXT:    ret
   %r = call double @llvm.roundeven.f64(double %a)
   %i = call i32 @llvm.fptoui.sat.i32.f64(double %r)
@@ -1681,14 +1645,12 @@ define float @fcvtnu_sd_simd(double %a) {
 define float @fcvtnu_ss_simd(float %a) {
 ; CHECK-NOFPRCVT-LABEL: fcvtnu_ss_simd:
 ; CHECK-NOFPRCVT:       // %bb.0:
-; CHECK-NOFPRCVT-NEXT:    frintn s0, s0
-; CHECK-NOFPRCVT-NEXT:    fcvtzu s0, s0
+; CHECK-NOFPRCVT-NEXT:    fcvtnu s0, s0
 ; CHECK-NOFPRCVT-NEXT:    ret
 ;
 ; CHECK-LABEL: fcvtnu_ss_simd:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintn s0, s0
-; CHECK-NEXT:    fcvtzu s0, s0
+; CHECK-NEXT:    fcvtnu s0, s0
 ; CHECK-NEXT:    ret
   %r = call float @llvm.roundeven.f32(float %a)
   %i = call i32 @llvm.fptoui.sat.i32.f32(float %r)
@@ -1699,14 +1661,12 @@ define float @fcvtnu_ss_simd(float %a) {
 define double @fcvtnu_dd_simd(double %a) {
 ; CHECK-NOFPRCVT-LABEL: fcvtnu_dd_simd:
 ; CHECK-NOFPRCVT:       // %bb.0:
-; CHECK-NOFPRCVT-NEXT:    frintn d0, d0
-; CHECK-NOFPRCVT-NEXT:    fcvtzu d0, d0
+; CHECK-NOFPRCVT-NEXT:    fcvtnu d0, d0
 ; CHECK-NOFPRCVT-NEXT:    ret
 ;
 ; CHECK-LABEL: fcvtnu_dd_simd:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintn d0, d0
-; CHECK-NEXT:    fcvtzu d0, d0
+; CHECK-NEXT:    fcvtnu d0, d0
 ; CHECK-NEXT:    ret
   %r = call double @llvm.roundeven.f64(double %a)
   %i = call i64 @llvm.fptoui.sat.i64.f64(double %r)

>From d5b88fc7716052ea3dd5507d68b9db80d59bddaa Mon Sep 17 00:00:00 2001
From: valadaptive <valadaptive at protonmail.com>
Date: Sat, 29 Nov 2025 19:59:53 -0500
Subject: [PATCH 4/4] [AArch64] Use vector rounding conversion instructions

---
 llvm/lib/Target/AArch64/AArch64InstrInfo.td   |  27 ++++
 .../CodeGen/AArch64/arm64-vcvt-fused-round.ll | 130 ++++++------------
 llvm/test/CodeGen/AArch64/shuffle-tbl34.ll    |  26 ++--
 3 files changed, 76 insertions(+), 107 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index e94c2e06d3594..02ae9546f7ccf 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -5830,6 +5830,33 @@ multiclass SIMDTwoVectorFPToIntSatPats<SDNode to_int_sat, SDNode to_int_sat_gi,
 defm : SIMDTwoVectorFPToIntSatPats<fp_to_sint_sat, fp_to_sint_sat_gi, "FCVTZS">;
 defm : SIMDTwoVectorFPToIntSatPats<fp_to_uint_sat, fp_to_uint_sat_gi, "FCVTZU">;
 
+// Fused round + convert to int patterns for vectors
+multiclass SIMDTwoVectorFPToIntRoundPats<SDNode to_int, SDNode round, string INST> {
+  let Predicates = [HasFullFP16] in {
+  def : Pat<(v4i16 (to_int (round v4f16:$Rn))),
+            (!cast<Instruction>(INST # v4f16) v4f16:$Rn)>;
+  def : Pat<(v8i16 (to_int (round v8f16:$Rn))),
+            (!cast<Instruction>(INST # v8f16) v8f16:$Rn)>;
+  }
+  def : Pat<(v2i32 (to_int (round v2f32:$Rn))),
+            (!cast<Instruction>(INST # v2f32) v2f32:$Rn)>;
+  def : Pat<(v4i32 (to_int (round v4f32:$Rn))),
+            (!cast<Instruction>(INST # v4f32) v4f32:$Rn)>;
+  def : Pat<(v2i64 (to_int (round v2f64:$Rn))),
+            (!cast<Instruction>(INST # v2f64) v2f64:$Rn)>;
+}
+
+defm : SIMDTwoVectorFPToIntRoundPats<fp_to_sint, fceil,      "FCVTPS">;
+defm : SIMDTwoVectorFPToIntRoundPats<fp_to_uint, fceil,      "FCVTPU">;
+defm : SIMDTwoVectorFPToIntRoundPats<fp_to_sint, ffloor,     "FCVTMS">;
+defm : SIMDTwoVectorFPToIntRoundPats<fp_to_uint, ffloor,     "FCVTMU">;
+defm : SIMDTwoVectorFPToIntRoundPats<fp_to_sint, ftrunc,     "FCVTZS">;
+defm : SIMDTwoVectorFPToIntRoundPats<fp_to_uint, ftrunc,     "FCVTZU">;
+defm : SIMDTwoVectorFPToIntRoundPats<fp_to_sint, fround,     "FCVTAS">;
+defm : SIMDTwoVectorFPToIntRoundPats<fp_to_uint, fround,     "FCVTAU">;
+defm : SIMDTwoVectorFPToIntRoundPats<fp_to_sint, froundeven, "FCVTNS">;
+defm : SIMDTwoVectorFPToIntRoundPats<fp_to_uint, froundeven, "FCVTNU">;
+
 def : Pat<(v4i16 (int_aarch64_neon_fcvtzs v4f16:$Rn)), (FCVTZSv4f16 $Rn)>;
 def : Pat<(v8i16 (int_aarch64_neon_fcvtzs v8f16:$Rn)), (FCVTZSv8f16 $Rn)>;
 def : Pat<(v2i32 (int_aarch64_neon_fcvtzs v2f32:$Rn)), (FCVTZSv2f32 $Rn)>;
diff --git a/llvm/test/CodeGen/AArch64/arm64-vcvt-fused-round.ll b/llvm/test/CodeGen/AArch64/arm64-vcvt-fused-round.ll
index 63638adbc6174..5a2bc3bb80225 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vcvt-fused-round.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vcvt-fused-round.ll
@@ -13,8 +13,7 @@
 define <2 x i32> @fcvtas_2s(<2 x float> %A) nounwind {
 ; CHECK-LABEL: fcvtas_2s:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frinta v0.2s, v0.2s
-; CHECK-NEXT:    fcvtzs v0.2s, v0.2s
+; CHECK-NEXT:    fcvtas v0.2s, v0.2s
 ; CHECK-NEXT:    ret
   %tmp1 = call <2 x float> @llvm.round.v2f32(<2 x float> %A)
   %tmp2 = fptosi <2 x float> %tmp1 to <2 x i32>
@@ -24,8 +23,7 @@ define <2 x i32> @fcvtas_2s(<2 x float> %A) nounwind {
 define <4 x i32> @fcvtas_4s(<4 x float> %A) nounwind {
 ; CHECK-LABEL: fcvtas_4s:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frinta v0.4s, v0.4s
-; CHECK-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NEXT:    fcvtas v0.4s, v0.4s
 ; CHECK-NEXT:    ret
   %tmp1 = call <4 x float> @llvm.round.v4f32(<4 x float> %A)
   %tmp2 = fptosi <4 x float> %tmp1 to <4 x i32>
@@ -35,8 +33,7 @@ define <4 x i32> @fcvtas_4s(<4 x float> %A) nounwind {
 define <2 x i64> @fcvtas_2d(<2 x double> %A) nounwind {
 ; CHECK-LABEL: fcvtas_2d:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frinta v0.2d, v0.2d
-; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    fcvtas v0.2d, v0.2d
 ; CHECK-NEXT:    ret
   %tmp1 = call <2 x double> @llvm.round.v2f64(<2 x double> %A)
   %tmp2 = fptosi <2 x double> %tmp1 to <2 x i64>
@@ -50,8 +47,7 @@ define <2 x i64> @fcvtas_2d(<2 x double> %A) nounwind {
 define <2 x i32> @fcvtau_2s(<2 x float> %A) nounwind {
 ; CHECK-LABEL: fcvtau_2s:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frinta v0.2s, v0.2s
-; CHECK-NEXT:    fcvtzu v0.2s, v0.2s
+; CHECK-NEXT:    fcvtau v0.2s, v0.2s
 ; CHECK-NEXT:    ret
   %tmp1 = call <2 x float> @llvm.round.v2f32(<2 x float> %A)
   %tmp2 = fptoui <2 x float> %tmp1 to <2 x i32>
@@ -61,8 +57,7 @@ define <2 x i32> @fcvtau_2s(<2 x float> %A) nounwind {
 define <4 x i32> @fcvtau_4s(<4 x float> %A) nounwind {
 ; CHECK-LABEL: fcvtau_4s:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frinta v0.4s, v0.4s
-; CHECK-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NEXT:    fcvtau v0.4s, v0.4s
 ; CHECK-NEXT:    ret
   %tmp1 = call <4 x float> @llvm.round.v4f32(<4 x float> %A)
   %tmp2 = fptoui <4 x float> %tmp1 to <4 x i32>
@@ -72,8 +67,7 @@ define <4 x i32> @fcvtau_4s(<4 x float> %A) nounwind {
 define <2 x i64> @fcvtau_2d(<2 x double> %A) nounwind {
 ; CHECK-LABEL: fcvtau_2d:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frinta v0.2d, v0.2d
-; CHECK-NEXT:    fcvtzu v0.2d, v0.2d
+; CHECK-NEXT:    fcvtau v0.2d, v0.2d
 ; CHECK-NEXT:    ret
   %tmp1 = call <2 x double> @llvm.round.v2f64(<2 x double> %A)
   %tmp2 = fptoui <2 x double> %tmp1 to <2 x i64>
@@ -87,8 +81,7 @@ define <2 x i64> @fcvtau_2d(<2 x double> %A) nounwind {
 define <2 x i32> @fcvtns_2s(<2 x float> %A) nounwind {
 ; CHECK-LABEL: fcvtns_2s:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintn v0.2s, v0.2s
-; CHECK-NEXT:    fcvtzs v0.2s, v0.2s
+; CHECK-NEXT:    fcvtns v0.2s, v0.2s
 ; CHECK-NEXT:    ret
   %tmp1 = call <2 x float> @llvm.roundeven.v2f32(<2 x float> %A)
   %tmp2 = fptosi <2 x float> %tmp1 to <2 x i32>
@@ -98,8 +91,7 @@ define <2 x i32> @fcvtns_2s(<2 x float> %A) nounwind {
 define <4 x i32> @fcvtns_4s(<4 x float> %A) nounwind {
 ; CHECK-LABEL: fcvtns_4s:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintn v0.4s, v0.4s
-; CHECK-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NEXT:    fcvtns v0.4s, v0.4s
 ; CHECK-NEXT:    ret
   %tmp1 = call <4 x float> @llvm.roundeven.v4f32(<4 x float> %A)
   %tmp2 = fptosi <4 x float> %tmp1 to <4 x i32>
@@ -109,8 +101,7 @@ define <4 x i32> @fcvtns_4s(<4 x float> %A) nounwind {
 define <2 x i64> @fcvtns_2d(<2 x double> %A) nounwind {
 ; CHECK-LABEL: fcvtns_2d:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintn v0.2d, v0.2d
-; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    fcvtns v0.2d, v0.2d
 ; CHECK-NEXT:    ret
   %tmp1 = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %A)
   %tmp2 = fptosi <2 x double> %tmp1 to <2 x i64>
@@ -124,8 +115,7 @@ define <2 x i64> @fcvtns_2d(<2 x double> %A) nounwind {
 define <2 x i32> @fcvtnu_2s(<2 x float> %A) nounwind {
 ; CHECK-LABEL: fcvtnu_2s:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintn v0.2s, v0.2s
-; CHECK-NEXT:    fcvtzu v0.2s, v0.2s
+; CHECK-NEXT:    fcvtnu v0.2s, v0.2s
 ; CHECK-NEXT:    ret
   %tmp1 = call <2 x float> @llvm.roundeven.v2f32(<2 x float> %A)
   %tmp2 = fptoui <2 x float> %tmp1 to <2 x i32>
@@ -135,8 +125,7 @@ define <2 x i32> @fcvtnu_2s(<2 x float> %A) nounwind {
 define <4 x i32> @fcvtnu_4s(<4 x float> %A) nounwind {
 ; CHECK-LABEL: fcvtnu_4s:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintn v0.4s, v0.4s
-; CHECK-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NEXT:    fcvtnu v0.4s, v0.4s
 ; CHECK-NEXT:    ret
   %tmp1 = call <4 x float> @llvm.roundeven.v4f32(<4 x float> %A)
   %tmp2 = fptoui <4 x float> %tmp1 to <4 x i32>
@@ -146,8 +135,7 @@ define <4 x i32> @fcvtnu_4s(<4 x float> %A) nounwind {
 define <2 x i64> @fcvtnu_2d(<2 x double> %A) nounwind {
 ; CHECK-LABEL: fcvtnu_2d:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintn v0.2d, v0.2d
-; CHECK-NEXT:    fcvtzu v0.2d, v0.2d
+; CHECK-NEXT:    fcvtnu v0.2d, v0.2d
 ; CHECK-NEXT:    ret
   %tmp1 = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %A)
   %tmp2 = fptoui <2 x double> %tmp1 to <2 x i64>
@@ -161,8 +149,7 @@ define <2 x i64> @fcvtnu_2d(<2 x double> %A) nounwind {
 define <2 x i32> @fcvtms_2s(<2 x float> %A) nounwind {
 ; CHECK-LABEL: fcvtms_2s:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintm v0.2s, v0.2s
-; CHECK-NEXT:    fcvtzs v0.2s, v0.2s
+; CHECK-NEXT:    fcvtms v0.2s, v0.2s
 ; CHECK-NEXT:    ret
   %tmp1 = call <2 x float> @llvm.floor.v2f32(<2 x float> %A)
   %tmp2 = fptosi <2 x float> %tmp1 to <2 x i32>
@@ -172,8 +159,7 @@ define <2 x i32> @fcvtms_2s(<2 x float> %A) nounwind {
 define <4 x i32> @fcvtms_4s(<4 x float> %A) nounwind {
 ; CHECK-LABEL: fcvtms_4s:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintm v0.4s, v0.4s
-; CHECK-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NEXT:    fcvtms v0.4s, v0.4s
 ; CHECK-NEXT:    ret
   %tmp1 = call <4 x float> @llvm.floor.v4f32(<4 x float> %A)
   %tmp2 = fptosi <4 x float> %tmp1 to <4 x i32>
@@ -183,8 +169,7 @@ define <4 x i32> @fcvtms_4s(<4 x float> %A) nounwind {
 define <2 x i64> @fcvtms_2d(<2 x double> %A) nounwind {
 ; CHECK-LABEL: fcvtms_2d:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintm v0.2d, v0.2d
-; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    fcvtms v0.2d, v0.2d
 ; CHECK-NEXT:    ret
   %tmp1 = call <2 x double> @llvm.floor.v2f64(<2 x double> %A)
   %tmp2 = fptosi <2 x double> %tmp1 to <2 x i64>
@@ -198,8 +183,7 @@ define <2 x i64> @fcvtms_2d(<2 x double> %A) nounwind {
 define <2 x i32> @fcvtmu_2s(<2 x float> %A) nounwind {
 ; CHECK-LABEL: fcvtmu_2s:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintm v0.2s, v0.2s
-; CHECK-NEXT:    fcvtzu v0.2s, v0.2s
+; CHECK-NEXT:    fcvtmu v0.2s, v0.2s
 ; CHECK-NEXT:    ret
   %tmp1 = call <2 x float> @llvm.floor.v2f32(<2 x float> %A)
   %tmp2 = fptoui <2 x float> %tmp1 to <2 x i32>
@@ -209,8 +193,7 @@ define <2 x i32> @fcvtmu_2s(<2 x float> %A) nounwind {
 define <4 x i32> @fcvtmu_4s(<4 x float> %A) nounwind {
 ; CHECK-LABEL: fcvtmu_4s:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintm v0.4s, v0.4s
-; CHECK-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NEXT:    fcvtmu v0.4s, v0.4s
 ; CHECK-NEXT:    ret
   %tmp1 = call <4 x float> @llvm.floor.v4f32(<4 x float> %A)
   %tmp2 = fptoui <4 x float> %tmp1 to <4 x i32>
@@ -220,8 +203,7 @@ define <4 x i32> @fcvtmu_4s(<4 x float> %A) nounwind {
 define <2 x i64> @fcvtmu_2d(<2 x double> %A) nounwind {
 ; CHECK-LABEL: fcvtmu_2d:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintm v0.2d, v0.2d
-; CHECK-NEXT:    fcvtzu v0.2d, v0.2d
+; CHECK-NEXT:    fcvtmu v0.2d, v0.2d
 ; CHECK-NEXT:    ret
   %tmp1 = call <2 x double> @llvm.floor.v2f64(<2 x double> %A)
   %tmp2 = fptoui <2 x double> %tmp1 to <2 x i64>
@@ -235,8 +217,7 @@ define <2 x i64> @fcvtmu_2d(<2 x double> %A) nounwind {
 define <2 x i32> @fcvtps_2s(<2 x float> %A) nounwind {
 ; CHECK-LABEL: fcvtps_2s:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintp v0.2s, v0.2s
-; CHECK-NEXT:    fcvtzs v0.2s, v0.2s
+; CHECK-NEXT:    fcvtps v0.2s, v0.2s
 ; CHECK-NEXT:    ret
   %tmp1 = call <2 x float> @llvm.ceil.v2f32(<2 x float> %A)
   %tmp2 = fptosi <2 x float> %tmp1 to <2 x i32>
@@ -246,8 +227,7 @@ define <2 x i32> @fcvtps_2s(<2 x float> %A) nounwind {
 define <4 x i32> @fcvtps_4s(<4 x float> %A) nounwind {
 ; CHECK-LABEL: fcvtps_4s:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintp v0.4s, v0.4s
-; CHECK-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NEXT:    fcvtps v0.4s, v0.4s
 ; CHECK-NEXT:    ret
   %tmp1 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %A)
   %tmp2 = fptosi <4 x float> %tmp1 to <4 x i32>
@@ -257,8 +237,7 @@ define <4 x i32> @fcvtps_4s(<4 x float> %A) nounwind {
 define <2 x i64> @fcvtps_2d(<2 x double> %A) nounwind {
 ; CHECK-LABEL: fcvtps_2d:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintp v0.2d, v0.2d
-; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    fcvtps v0.2d, v0.2d
 ; CHECK-NEXT:    ret
   %tmp1 = call <2 x double> @llvm.ceil.v2f64(<2 x double> %A)
   %tmp2 = fptosi <2 x double> %tmp1 to <2 x i64>
@@ -272,8 +251,7 @@ define <2 x i64> @fcvtps_2d(<2 x double> %A) nounwind {
 define <2 x i32> @fcvtpu_2s(<2 x float> %A) nounwind {
 ; CHECK-LABEL: fcvtpu_2s:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintp v0.2s, v0.2s
-; CHECK-NEXT:    fcvtzu v0.2s, v0.2s
+; CHECK-NEXT:    fcvtpu v0.2s, v0.2s
 ; CHECK-NEXT:    ret
   %tmp1 = call <2 x float> @llvm.ceil.v2f32(<2 x float> %A)
   %tmp2 = fptoui <2 x float> %tmp1 to <2 x i32>
@@ -283,8 +261,7 @@ define <2 x i32> @fcvtpu_2s(<2 x float> %A) nounwind {
 define <4 x i32> @fcvtpu_4s(<4 x float> %A) nounwind {
 ; CHECK-LABEL: fcvtpu_4s:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintp v0.4s, v0.4s
-; CHECK-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NEXT:    fcvtpu v0.4s, v0.4s
 ; CHECK-NEXT:    ret
   %tmp1 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %A)
   %tmp2 = fptoui <4 x float> %tmp1 to <4 x i32>
@@ -294,8 +271,7 @@ define <4 x i32> @fcvtpu_4s(<4 x float> %A) nounwind {
 define <2 x i64> @fcvtpu_2d(<2 x double> %A) nounwind {
 ; CHECK-LABEL: fcvtpu_2d:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintp v0.2d, v0.2d
-; CHECK-NEXT:    fcvtzu v0.2d, v0.2d
+; CHECK-NEXT:    fcvtpu v0.2d, v0.2d
 ; CHECK-NEXT:    ret
   %tmp1 = call <2 x double> @llvm.ceil.v2f64(<2 x double> %A)
   %tmp2 = fptoui <2 x double> %tmp1 to <2 x i64>
@@ -309,7 +285,6 @@ define <2 x i64> @fcvtpu_2d(<2 x double> %A) nounwind {
 define <2 x i32> @fcvtzs_2s(<2 x float> %A) nounwind {
 ; CHECK-LABEL: fcvtzs_2s:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintz v0.2s, v0.2s
 ; CHECK-NEXT:    fcvtzs v0.2s, v0.2s
 ; CHECK-NEXT:    ret
   %tmp1 = call <2 x float> @llvm.trunc.v2f32(<2 x float> %A)
@@ -320,7 +295,6 @@ define <2 x i32> @fcvtzs_2s(<2 x float> %A) nounwind {
 define <4 x i32> @fcvtzs_4s(<4 x float> %A) nounwind {
 ; CHECK-LABEL: fcvtzs_4s:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintz v0.4s, v0.4s
 ; CHECK-NEXT:    fcvtzs v0.4s, v0.4s
 ; CHECK-NEXT:    ret
   %tmp1 = call <4 x float> @llvm.trunc.v4f32(<4 x float> %A)
@@ -331,7 +305,6 @@ define <4 x i32> @fcvtzs_4s(<4 x float> %A) nounwind {
 define <2 x i64> @fcvtzs_2d(<2 x double> %A) nounwind {
 ; CHECK-LABEL: fcvtzs_2d:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintz v0.2d, v0.2d
 ; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
 ; CHECK-NEXT:    ret
   %tmp1 = call <2 x double> @llvm.trunc.v2f64(<2 x double> %A)
@@ -346,7 +319,6 @@ define <2 x i64> @fcvtzs_2d(<2 x double> %A) nounwind {
 define <2 x i32> @fcvtzu_2s(<2 x float> %A) nounwind {
 ; CHECK-LABEL: fcvtzu_2s:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintz v0.2s, v0.2s
 ; CHECK-NEXT:    fcvtzu v0.2s, v0.2s
 ; CHECK-NEXT:    ret
   %tmp1 = call <2 x float> @llvm.trunc.v2f32(<2 x float> %A)
@@ -357,7 +329,6 @@ define <2 x i32> @fcvtzu_2s(<2 x float> %A) nounwind {
 define <4 x i32> @fcvtzu_4s(<4 x float> %A) nounwind {
 ; CHECK-LABEL: fcvtzu_4s:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintz v0.4s, v0.4s
 ; CHECK-NEXT:    fcvtzu v0.4s, v0.4s
 ; CHECK-NEXT:    ret
   %tmp1 = call <4 x float> @llvm.trunc.v4f32(<4 x float> %A)
@@ -368,7 +339,6 @@ define <4 x i32> @fcvtzu_4s(<4 x float> %A) nounwind {
 define <2 x i64> @fcvtzu_2d(<2 x double> %A) nounwind {
 ; CHECK-LABEL: fcvtzu_2d:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintz v0.2d, v0.2d
 ; CHECK-NEXT:    fcvtzu v0.2d, v0.2d
 ; CHECK-NEXT:    ret
   %tmp1 = call <2 x double> @llvm.trunc.v2f64(<2 x double> %A)
@@ -393,8 +363,7 @@ define <4 x i16> @fcvtas_4h(<4 x half> %A) nounwind {
 ;
 ; CHECK-FP16-LABEL: fcvtas_4h:
 ; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    frinta v0.4h, v0.4h
-; CHECK-FP16-NEXT:    fcvtzs v0.4h, v0.4h
+; CHECK-FP16-NEXT:    fcvtas v0.4h, v0.4h
 ; CHECK-FP16-NEXT:    ret
   %tmp1 = call <4 x half> @llvm.round.v4f16(<4 x half> %A)
   %tmp2 = fptosi <4 x half> %tmp1 to <4 x i16>
@@ -419,8 +388,7 @@ define <8 x i16> @fcvtas_8h(<8 x half> %A) nounwind {
 ;
 ; CHECK-FP16-LABEL: fcvtas_8h:
 ; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    frinta v0.8h, v0.8h
-; CHECK-FP16-NEXT:    fcvtzs v0.8h, v0.8h
+; CHECK-FP16-NEXT:    fcvtas v0.8h, v0.8h
 ; CHECK-FP16-NEXT:    ret
   %tmp1 = call <8 x half> @llvm.round.v8f16(<8 x half> %A)
   %tmp2 = fptosi <8 x half> %tmp1 to <8 x i16>
@@ -440,8 +408,7 @@ define <4 x i16> @fcvtau_4h(<4 x half> %A) nounwind {
 ;
 ; CHECK-FP16-LABEL: fcvtau_4h:
 ; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    frinta v0.4h, v0.4h
-; CHECK-FP16-NEXT:    fcvtzu v0.4h, v0.4h
+; CHECK-FP16-NEXT:    fcvtau v0.4h, v0.4h
 ; CHECK-FP16-NEXT:    ret
   %tmp1 = call <4 x half> @llvm.round.v4f16(<4 x half> %A)
   %tmp2 = fptoui <4 x half> %tmp1 to <4 x i16>
@@ -466,8 +433,7 @@ define <8 x i16> @fcvtau_8h(<8 x half> %A) nounwind {
 ;
 ; CHECK-FP16-LABEL: fcvtau_8h:
 ; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    frinta v0.8h, v0.8h
-; CHECK-FP16-NEXT:    fcvtzu v0.8h, v0.8h
+; CHECK-FP16-NEXT:    fcvtau v0.8h, v0.8h
 ; CHECK-FP16-NEXT:    ret
   %tmp1 = call <8 x half> @llvm.round.v8f16(<8 x half> %A)
   %tmp2 = fptoui <8 x half> %tmp1 to <8 x i16>
@@ -487,8 +453,7 @@ define <4 x i16> @fcvtns_4h(<4 x half> %A) nounwind {
 ;
 ; CHECK-FP16-LABEL: fcvtns_4h:
 ; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    frintn v0.4h, v0.4h
-; CHECK-FP16-NEXT:    fcvtzs v0.4h, v0.4h
+; CHECK-FP16-NEXT:    fcvtns v0.4h, v0.4h
 ; CHECK-FP16-NEXT:    ret
   %tmp1 = call <4 x half> @llvm.roundeven.v4f16(<4 x half> %A)
   %tmp2 = fptosi <4 x half> %tmp1 to <4 x i16>
@@ -513,8 +478,7 @@ define <8 x i16> @fcvtns_8h(<8 x half> %A) nounwind {
 ;
 ; CHECK-FP16-LABEL: fcvtns_8h:
 ; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    frintn v0.8h, v0.8h
-; CHECK-FP16-NEXT:    fcvtzs v0.8h, v0.8h
+; CHECK-FP16-NEXT:    fcvtns v0.8h, v0.8h
 ; CHECK-FP16-NEXT:    ret
   %tmp1 = call <8 x half> @llvm.roundeven.v8f16(<8 x half> %A)
   %tmp2 = fptosi <8 x half> %tmp1 to <8 x i16>
@@ -534,8 +498,7 @@ define <4 x i16> @fcvtnu_4h(<4 x half> %A) nounwind {
 ;
 ; CHECK-FP16-LABEL: fcvtnu_4h:
 ; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    frintn v0.4h, v0.4h
-; CHECK-FP16-NEXT:    fcvtzu v0.4h, v0.4h
+; CHECK-FP16-NEXT:    fcvtnu v0.4h, v0.4h
 ; CHECK-FP16-NEXT:    ret
   %tmp1 = call <4 x half> @llvm.roundeven.v4f16(<4 x half> %A)
   %tmp2 = fptoui <4 x half> %tmp1 to <4 x i16>
@@ -560,8 +523,7 @@ define <8 x i16> @fcvtnu_8h(<8 x half> %A) nounwind {
 ;
 ; CHECK-FP16-LABEL: fcvtnu_8h:
 ; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    frintn v0.8h, v0.8h
-; CHECK-FP16-NEXT:    fcvtzu v0.8h, v0.8h
+; CHECK-FP16-NEXT:    fcvtnu v0.8h, v0.8h
 ; CHECK-FP16-NEXT:    ret
   %tmp1 = call <8 x half> @llvm.roundeven.v8f16(<8 x half> %A)
   %tmp2 = fptoui <8 x half> %tmp1 to <8 x i16>
@@ -581,8 +543,7 @@ define <4 x i16> @fcvtms_4h(<4 x half> %A) nounwind {
 ;
 ; CHECK-FP16-LABEL: fcvtms_4h:
 ; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    frintm v0.4h, v0.4h
-; CHECK-FP16-NEXT:    fcvtzs v0.4h, v0.4h
+; CHECK-FP16-NEXT:    fcvtms v0.4h, v0.4h
 ; CHECK-FP16-NEXT:    ret
   %tmp1 = call <4 x half> @llvm.floor.v4f16(<4 x half> %A)
   %tmp2 = fptosi <4 x half> %tmp1 to <4 x i16>
@@ -607,8 +568,7 @@ define <8 x i16> @fcvtms_8h(<8 x half> %A) nounwind {
 ;
 ; CHECK-FP16-LABEL: fcvtms_8h:
 ; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    frintm v0.8h, v0.8h
-; CHECK-FP16-NEXT:    fcvtzs v0.8h, v0.8h
+; CHECK-FP16-NEXT:    fcvtms v0.8h, v0.8h
 ; CHECK-FP16-NEXT:    ret
   %tmp1 = call <8 x half> @llvm.floor.v8f16(<8 x half> %A)
   %tmp2 = fptosi <8 x half> %tmp1 to <8 x i16>
@@ -628,8 +588,7 @@ define <4 x i16> @fcvtmu_4h(<4 x half> %A) nounwind {
 ;
 ; CHECK-FP16-LABEL: fcvtmu_4h:
 ; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    frintm v0.4h, v0.4h
-; CHECK-FP16-NEXT:    fcvtzu v0.4h, v0.4h
+; CHECK-FP16-NEXT:    fcvtmu v0.4h, v0.4h
 ; CHECK-FP16-NEXT:    ret
   %tmp1 = call <4 x half> @llvm.floor.v4f16(<4 x half> %A)
   %tmp2 = fptoui <4 x half> %tmp1 to <4 x i16>
@@ -654,8 +613,7 @@ define <8 x i16> @fcvtmu_8h(<8 x half> %A) nounwind {
 ;
 ; CHECK-FP16-LABEL: fcvtmu_8h:
 ; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    frintm v0.8h, v0.8h
-; CHECK-FP16-NEXT:    fcvtzu v0.8h, v0.8h
+; CHECK-FP16-NEXT:    fcvtmu v0.8h, v0.8h
 ; CHECK-FP16-NEXT:    ret
   %tmp1 = call <8 x half> @llvm.floor.v8f16(<8 x half> %A)
   %tmp2 = fptoui <8 x half> %tmp1 to <8 x i16>
@@ -675,8 +633,7 @@ define <4 x i16> @fcvtps_4h(<4 x half> %A) nounwind {
 ;
 ; CHECK-FP16-LABEL: fcvtps_4h:
 ; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    frintp v0.4h, v0.4h
-; CHECK-FP16-NEXT:    fcvtzs v0.4h, v0.4h
+; CHECK-FP16-NEXT:    fcvtps v0.4h, v0.4h
 ; CHECK-FP16-NEXT:    ret
   %tmp1 = call <4 x half> @llvm.ceil.v4f16(<4 x half> %A)
   %tmp2 = fptosi <4 x half> %tmp1 to <4 x i16>
@@ -701,8 +658,7 @@ define <8 x i16> @fcvtps_8h(<8 x half> %A) nounwind {
 ;
 ; CHECK-FP16-LABEL: fcvtps_8h:
 ; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    frintp v0.8h, v0.8h
-; CHECK-FP16-NEXT:    fcvtzs v0.8h, v0.8h
+; CHECK-FP16-NEXT:    fcvtps v0.8h, v0.8h
 ; CHECK-FP16-NEXT:    ret
   %tmp1 = call <8 x half> @llvm.ceil.v8f16(<8 x half> %A)
   %tmp2 = fptosi <8 x half> %tmp1 to <8 x i16>
@@ -722,8 +678,7 @@ define <4 x i16> @fcvtpu_4h(<4 x half> %A) nounwind {
 ;
 ; CHECK-FP16-LABEL: fcvtpu_4h:
 ; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    frintp v0.4h, v0.4h
-; CHECK-FP16-NEXT:    fcvtzu v0.4h, v0.4h
+; CHECK-FP16-NEXT:    fcvtpu v0.4h, v0.4h
 ; CHECK-FP16-NEXT:    ret
   %tmp1 = call <4 x half> @llvm.ceil.v4f16(<4 x half> %A)
   %tmp2 = fptoui <4 x half> %tmp1 to <4 x i16>
@@ -748,8 +703,7 @@ define <8 x i16> @fcvtpu_8h(<8 x half> %A) nounwind {
 ;
 ; CHECK-FP16-LABEL: fcvtpu_8h:
 ; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    frintp v0.8h, v0.8h
-; CHECK-FP16-NEXT:    fcvtzu v0.8h, v0.8h
+; CHECK-FP16-NEXT:    fcvtpu v0.8h, v0.8h
 ; CHECK-FP16-NEXT:    ret
   %tmp1 = call <8 x half> @llvm.ceil.v8f16(<8 x half> %A)
   %tmp2 = fptoui <8 x half> %tmp1 to <8 x i16>
@@ -769,7 +723,6 @@ define <4 x i16> @fcvtzs_4h(<4 x half> %A) nounwind {
 ;
 ; CHECK-FP16-LABEL: fcvtzs_4h:
 ; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    frintz v0.4h, v0.4h
 ; CHECK-FP16-NEXT:    fcvtzs v0.4h, v0.4h
 ; CHECK-FP16-NEXT:    ret
   %tmp1 = call <4 x half> @llvm.trunc.v4f16(<4 x half> %A)
@@ -795,7 +748,6 @@ define <8 x i16> @fcvtzs_8h(<8 x half> %A) nounwind {
 ;
 ; CHECK-FP16-LABEL: fcvtzs_8h:
 ; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    frintz v0.8h, v0.8h
 ; CHECK-FP16-NEXT:    fcvtzs v0.8h, v0.8h
 ; CHECK-FP16-NEXT:    ret
   %tmp1 = call <8 x half> @llvm.trunc.v8f16(<8 x half> %A)
@@ -816,7 +768,6 @@ define <4 x i16> @fcvtzu_4h(<4 x half> %A) nounwind {
 ;
 ; CHECK-FP16-LABEL: fcvtzu_4h:
 ; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    frintz v0.4h, v0.4h
 ; CHECK-FP16-NEXT:    fcvtzu v0.4h, v0.4h
 ; CHECK-FP16-NEXT:    ret
   %tmp1 = call <4 x half> @llvm.trunc.v4f16(<4 x half> %A)
@@ -842,7 +793,6 @@ define <8 x i16> @fcvtzu_8h(<8 x half> %A) nounwind {
 ;
 ; CHECK-FP16-LABEL: fcvtzu_8h:
 ; CHECK-FP16:       // %bb.0:
-; CHECK-FP16-NEXT:    frintz v0.8h, v0.8h
 ; CHECK-FP16-NEXT:    fcvtzu v0.8h, v0.8h
 ; CHECK-FP16-NEXT:    ret
   %tmp1 = call <8 x half> @llvm.trunc.v8f16(<8 x half> %A)
diff --git a/llvm/test/CodeGen/AArch64/shuffle-tbl34.ll b/llvm/test/CodeGen/AArch64/shuffle-tbl34.ll
index fb571eff39fe5..9f4f00fda7cdf 100644
--- a/llvm/test/CodeGen/AArch64/shuffle-tbl34.ll
+++ b/llvm/test/CodeGen/AArch64/shuffle-tbl34.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
 ; RUN: llc -mtriple=aarch64 < %s | FileCheck %s
 
 ; CHECK: .LCPI0_0:
@@ -700,23 +700,15 @@ define <16 x i8> @insert4_v16i8(<8 x i8> %a, <16 x i8> %b, <8 x i8> %c, <16 x i8
 define <16 x i16> @test(<2 x double> %l213, <2 x double> %l231, <2 x double> %l249, <2 x double> %l267, <2 x double> %l285, <2 x double> %l303, <2 x double> %l321, <2 x double> %l339) {
 ; CHECK-LABEL: test:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintm v0.2d, v0.2d
-; CHECK-NEXT:    frintm v4.2d, v4.2d
+; CHECK-NEXT:    fcvtms v0.2d, v0.2d
+; CHECK-NEXT:    fcvtms v4.2d, v4.2d
 ; CHECK-NEXT:    adrp x8, .LCPI16_0
-; CHECK-NEXT:    frintm v1.2d, v1.2d
-; CHECK-NEXT:    frintm v5.2d, v5.2d
-; CHECK-NEXT:    frintm v2.2d, v2.2d
-; CHECK-NEXT:    frintm v6.2d, v6.2d
-; CHECK-NEXT:    frintm v3.2d, v3.2d
-; CHECK-NEXT:    frintm v7.2d, v7.2d
-; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
-; CHECK-NEXT:    fcvtzs v4.2d, v4.2d
-; CHECK-NEXT:    fcvtzs v1.2d, v1.2d
-; CHECK-NEXT:    fcvtzs v5.2d, v5.2d
-; CHECK-NEXT:    fcvtzs v2.2d, v2.2d
-; CHECK-NEXT:    fcvtzs v6.2d, v6.2d
-; CHECK-NEXT:    fcvtzs v3.2d, v3.2d
-; CHECK-NEXT:    fcvtzs v7.2d, v7.2d
+; CHECK-NEXT:    fcvtms v1.2d, v1.2d
+; CHECK-NEXT:    fcvtms v5.2d, v5.2d
+; CHECK-NEXT:    fcvtms v2.2d, v2.2d
+; CHECK-NEXT:    fcvtms v6.2d, v6.2d
+; CHECK-NEXT:    fcvtms v3.2d, v3.2d
+; CHECK-NEXT:    fcvtms v7.2d, v7.2d
 ; CHECK-NEXT:    xtn v16.2s, v0.2d
 ; CHECK-NEXT:    xtn v20.2s, v4.2d
 ; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI16_0]



More information about the llvm-commits mailing list