[llvm] [AArch64] Optimize more floating-point round+convert combinations into fcvt instructions (PR #170018)

via llvm-commits llvm-commits at lists.llvm.org
Tue Dec 2 08:58:09 PST 2025


================
@@ -0,0 +1,832 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s --check-prefixes=CHECK,CHECK-NO16
+; RUN: llc < %s -mtriple=arm64-eabi -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-FP16
+
+;
+; Tests for fused round + convert to int patterns (FCVTAS, FCVTAU, FCVTMS, FCVTMU, etc.)
+;
+
+;
+; round + signed -> fcvtas
+;
+
+define <2 x i32> @fcvtas_2s(<2 x float> %A) nounwind {
+; CHECK-LABEL: fcvtas_2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtas v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x float> @llvm.round.v2f32(<2 x float> %A)
+  %tmp2 = fptosi <2 x float> %tmp1 to <2 x i32>
+  ret <2 x i32> %tmp2
+}
+
+define <4 x i32> @fcvtas_4s(<4 x float> %A) nounwind {
+; CHECK-LABEL: fcvtas_4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtas v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %tmp1 = call <4 x float> @llvm.round.v4f32(<4 x float> %A)
+  %tmp2 = fptosi <4 x float> %tmp1 to <4 x i32>
+  ret <4 x i32> %tmp2
+}
+
+define <2 x i64> @fcvtas_2d(<2 x double> %A) nounwind {
+; CHECK-LABEL: fcvtas_2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtas v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x double> @llvm.round.v2f64(<2 x double> %A)
+  %tmp2 = fptosi <2 x double> %tmp1 to <2 x i64>
+  ret <2 x i64> %tmp2
+}
+
+;
+; round + unsigned -> fcvtau
+;
+
+define <2 x i32> @fcvtau_2s(<2 x float> %A) nounwind {
+; CHECK-LABEL: fcvtau_2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtau v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x float> @llvm.round.v2f32(<2 x float> %A)
+  %tmp2 = fptoui <2 x float> %tmp1 to <2 x i32>
+  ret <2 x i32> %tmp2
+}
+
+define <4 x i32> @fcvtau_4s(<4 x float> %A) nounwind {
+; CHECK-LABEL: fcvtau_4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtau v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %tmp1 = call <4 x float> @llvm.round.v4f32(<4 x float> %A)
+  %tmp2 = fptoui <4 x float> %tmp1 to <4 x i32>
+  ret <4 x i32> %tmp2
+}
+
+define <2 x i64> @fcvtau_2d(<2 x double> %A) nounwind {
+; CHECK-LABEL: fcvtau_2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtau v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x double> @llvm.round.v2f64(<2 x double> %A)
+  %tmp2 = fptoui <2 x double> %tmp1 to <2 x i64>
+  ret <2 x i64> %tmp2
+}
+
+;
+; roundeven + signed -> fcvtns
+;
+
+define <2 x i32> @fcvtns_2s(<2 x float> %A) nounwind {
+; CHECK-LABEL: fcvtns_2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtns v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x float> @llvm.roundeven.v2f32(<2 x float> %A)
+  %tmp2 = fptosi <2 x float> %tmp1 to <2 x i32>
+  ret <2 x i32> %tmp2
+}
+
+define <4 x i32> @fcvtns_4s(<4 x float> %A) nounwind {
+; CHECK-LABEL: fcvtns_4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtns v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %tmp1 = call <4 x float> @llvm.roundeven.v4f32(<4 x float> %A)
+  %tmp2 = fptosi <4 x float> %tmp1 to <4 x i32>
+  ret <4 x i32> %tmp2
+}
+
+define <2 x i64> @fcvtns_2d(<2 x double> %A) nounwind {
+; CHECK-LABEL: fcvtns_2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtns v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %A)
+  %tmp2 = fptosi <2 x double> %tmp1 to <2 x i64>
+  ret <2 x i64> %tmp2
+}
+
+;
+; roundeven + unsigned -> fcvtnu
+;
+
+define <2 x i32> @fcvtnu_2s(<2 x float> %A) nounwind {
+; CHECK-LABEL: fcvtnu_2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtnu v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x float> @llvm.roundeven.v2f32(<2 x float> %A)
+  %tmp2 = fptoui <2 x float> %tmp1 to <2 x i32>
+  ret <2 x i32> %tmp2
+}
+
+define <4 x i32> @fcvtnu_4s(<4 x float> %A) nounwind {
+; CHECK-LABEL: fcvtnu_4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtnu v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %tmp1 = call <4 x float> @llvm.roundeven.v4f32(<4 x float> %A)
+  %tmp2 = fptoui <4 x float> %tmp1 to <4 x i32>
+  ret <4 x i32> %tmp2
+}
+
+define <2 x i64> @fcvtnu_2d(<2 x double> %A) nounwind {
+; CHECK-LABEL: fcvtnu_2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtnu v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %A)
+  %tmp2 = fptoui <2 x double> %tmp1 to <2 x i64>
+  ret <2 x i64> %tmp2
+}
+
+;
+; floor + signed -> fcvtms
+;
+
+define <2 x i32> @fcvtms_2s(<2 x float> %A) nounwind {
+; CHECK-LABEL: fcvtms_2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtms v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x float> @llvm.floor.v2f32(<2 x float> %A)
+  %tmp2 = fptosi <2 x float> %tmp1 to <2 x i32>
+  ret <2 x i32> %tmp2
+}
+
+define <4 x i32> @fcvtms_4s(<4 x float> %A) nounwind {
+; CHECK-LABEL: fcvtms_4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtms v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %tmp1 = call <4 x float> @llvm.floor.v4f32(<4 x float> %A)
+  %tmp2 = fptosi <4 x float> %tmp1 to <4 x i32>
+  ret <4 x i32> %tmp2
+}
+
+define <2 x i64> @fcvtms_2d(<2 x double> %A) nounwind {
+; CHECK-LABEL: fcvtms_2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtms v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x double> @llvm.floor.v2f64(<2 x double> %A)
+  %tmp2 = fptosi <2 x double> %tmp1 to <2 x i64>
+  ret <2 x i64> %tmp2
+}
+
+;
+; floor + unsigned -> fcvtmu
+;
+
+define <2 x i32> @fcvtmu_2s(<2 x float> %A) nounwind {
+; CHECK-LABEL: fcvtmu_2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtmu v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x float> @llvm.floor.v2f32(<2 x float> %A)
+  %tmp2 = fptoui <2 x float> %tmp1 to <2 x i32>
+  ret <2 x i32> %tmp2
+}
+
+define <4 x i32> @fcvtmu_4s(<4 x float> %A) nounwind {
+; CHECK-LABEL: fcvtmu_4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtmu v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %tmp1 = call <4 x float> @llvm.floor.v4f32(<4 x float> %A)
+  %tmp2 = fptoui <4 x float> %tmp1 to <4 x i32>
+  ret <4 x i32> %tmp2
+}
+
+define <2 x i64> @fcvtmu_2d(<2 x double> %A) nounwind {
+; CHECK-LABEL: fcvtmu_2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtmu v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x double> @llvm.floor.v2f64(<2 x double> %A)
+  %tmp2 = fptoui <2 x double> %tmp1 to <2 x i64>
+  ret <2 x i64> %tmp2
+}
+
+;
+; ceil + signed -> fcvtps
+;
+
+define <2 x i32> @fcvtps_2s(<2 x float> %A) nounwind {
+; CHECK-LABEL: fcvtps_2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtps v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x float> @llvm.ceil.v2f32(<2 x float> %A)
+  %tmp2 = fptosi <2 x float> %tmp1 to <2 x i32>
+  ret <2 x i32> %tmp2
+}
+
+define <4 x i32> @fcvtps_4s(<4 x float> %A) nounwind {
+; CHECK-LABEL: fcvtps_4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtps v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %tmp1 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %A)
+  %tmp2 = fptosi <4 x float> %tmp1 to <4 x i32>
+  ret <4 x i32> %tmp2
+}
+
+define <2 x i64> @fcvtps_2d(<2 x double> %A) nounwind {
+; CHECK-LABEL: fcvtps_2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtps v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x double> @llvm.ceil.v2f64(<2 x double> %A)
+  %tmp2 = fptosi <2 x double> %tmp1 to <2 x i64>
+  ret <2 x i64> %tmp2
+}
+
+;
+; ceil + unsigned -> fcvtpu
+;
+
+define <2 x i32> @fcvtpu_2s(<2 x float> %A) nounwind {
+; CHECK-LABEL: fcvtpu_2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtpu v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x float> @llvm.ceil.v2f32(<2 x float> %A)
+  %tmp2 = fptoui <2 x float> %tmp1 to <2 x i32>
+  ret <2 x i32> %tmp2
+}
+
+define <4 x i32> @fcvtpu_4s(<4 x float> %A) nounwind {
+; CHECK-LABEL: fcvtpu_4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtpu v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %tmp1 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %A)
+  %tmp2 = fptoui <4 x float> %tmp1 to <4 x i32>
+  ret <4 x i32> %tmp2
+}
+
+define <2 x i64> @fcvtpu_2d(<2 x double> %A) nounwind {
+; CHECK-LABEL: fcvtpu_2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtpu v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x double> @llvm.ceil.v2f64(<2 x double> %A)
+  %tmp2 = fptoui <2 x double> %tmp1 to <2 x i64>
+  ret <2 x i64> %tmp2
+}
+
+;
+; trunc + signed -> fcvtzs (already the default, but test the fusion)
+;
+
+define <2 x i32> @fcvtzs_2s(<2 x float> %A) nounwind {
+; CHECK-LABEL: fcvtzs_2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtzs v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x float> @llvm.trunc.v2f32(<2 x float> %A)
+  %tmp2 = fptosi <2 x float> %tmp1 to <2 x i32>
+  ret <2 x i32> %tmp2
+}
+
+define <4 x i32> @fcvtzs_4s(<4 x float> %A) nounwind {
+; CHECK-LABEL: fcvtzs_4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %tmp1 = call <4 x float> @llvm.trunc.v4f32(<4 x float> %A)
+  %tmp2 = fptosi <4 x float> %tmp1 to <4 x i32>
+  ret <4 x i32> %tmp2
+}
+
+define <2 x i64> @fcvtzs_2d(<2 x double> %A) nounwind {
+; CHECK-LABEL: fcvtzs_2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x double> @llvm.trunc.v2f64(<2 x double> %A)
+  %tmp2 = fptosi <2 x double> %tmp1 to <2 x i64>
+  ret <2 x i64> %tmp2
+}
+
+;
+; trunc + unsigned -> fcvtzu
+;
+
+define <2 x i32> @fcvtzu_2s(<2 x float> %A) nounwind {
+; CHECK-LABEL: fcvtzu_2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtzu v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x float> @llvm.trunc.v2f32(<2 x float> %A)
+  %tmp2 = fptoui <2 x float> %tmp1 to <2 x i32>
+  ret <2 x i32> %tmp2
+}
+
+define <4 x i32> @fcvtzu_4s(<4 x float> %A) nounwind {
+; CHECK-LABEL: fcvtzu_4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %tmp1 = call <4 x float> @llvm.trunc.v4f32(<4 x float> %A)
+  %tmp2 = fptoui <4 x float> %tmp1 to <4 x i32>
+  ret <4 x i32> %tmp2
+}
+
+define <2 x i64> @fcvtzu_2d(<2 x double> %A) nounwind {
+; CHECK-LABEL: fcvtzu_2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtzu v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %tmp1 = call <2 x double> @llvm.trunc.v2f64(<2 x double> %A)
+  %tmp2 = fptoui <2 x double> %tmp1 to <2 x i64>
+  ret <2 x i64> %tmp2
+}
+
+;
+; f16 tests (require +fullfp16)
+;
+
+define <4 x i16> @fcvtas_4h(<4 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtas_4h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    frinta v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtas_4h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    fcvtas v0.4h, v0.4h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <4 x half> @llvm.round.v4f16(<4 x half> %A)
+  %tmp2 = fptosi <4 x half> %tmp1 to <4 x i16>
+  ret <4 x i16> %tmp2
+}
+
+define <8 x i16> @fcvtas_8h(<8 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtas_8h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-NEXT:    frinta v1.4s, v1.4s
+; CHECK-NO16-NEXT:    frinta v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NO16-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v1.8h
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v1.4h
+; CHECK-NO16-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtzs v1.4s, v1.4s
+; CHECK-NO16-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtas_8h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    fcvtas v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <8 x half> @llvm.round.v8f16(<8 x half> %A)
+  %tmp2 = fptosi <8 x half> %tmp1 to <8 x i16>
+  ret <8 x i16> %tmp2
+}
+
+define <4 x i16> @fcvtau_4h(<4 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtau_4h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    frinta v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NO16-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtau_4h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    fcvtau v0.4h, v0.4h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <4 x half> @llvm.round.v4f16(<4 x half> %A)
+  %tmp2 = fptoui <4 x half> %tmp1 to <4 x i16>
+  ret <4 x i16> %tmp2
+}
+
+define <8 x i16> @fcvtau_8h(<8 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtau_8h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-NEXT:    frinta v1.4s, v1.4s
+; CHECK-NO16-NEXT:    frinta v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NO16-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v1.8h
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v1.4h
+; CHECK-NO16-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtzu v1.4s, v1.4s
+; CHECK-NO16-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtau_8h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    fcvtau v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <8 x half> @llvm.round.v8f16(<8 x half> %A)
+  %tmp2 = fptoui <8 x half> %tmp1 to <8 x i16>
+  ret <8 x i16> %tmp2
+}
+
+define <4 x i16> @fcvtns_4h(<4 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtns_4h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    frintn v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtns_4h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    fcvtns v0.4h, v0.4h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <4 x half> @llvm.roundeven.v4f16(<4 x half> %A)
+  %tmp2 = fptosi <4 x half> %tmp1 to <4 x i16>
+  ret <4 x i16> %tmp2
+}
+
+define <8 x i16> @fcvtns_8h(<8 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtns_8h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-NEXT:    frintn v1.4s, v1.4s
+; CHECK-NO16-NEXT:    frintn v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NO16-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v1.8h
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v1.4h
+; CHECK-NO16-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtzs v1.4s, v1.4s
+; CHECK-NO16-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtns_8h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    fcvtns v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <8 x half> @llvm.roundeven.v8f16(<8 x half> %A)
+  %tmp2 = fptosi <8 x half> %tmp1 to <8 x i16>
+  ret <8 x i16> %tmp2
+}
+
+define <4 x i16> @fcvtnu_4h(<4 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtnu_4h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    frintn v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NO16-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtnu_4h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    fcvtnu v0.4h, v0.4h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <4 x half> @llvm.roundeven.v4f16(<4 x half> %A)
+  %tmp2 = fptoui <4 x half> %tmp1 to <4 x i16>
+  ret <4 x i16> %tmp2
+}
+
+define <8 x i16> @fcvtnu_8h(<8 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtnu_8h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-NEXT:    frintn v1.4s, v1.4s
+; CHECK-NO16-NEXT:    frintn v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NO16-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v1.8h
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v1.4h
+; CHECK-NO16-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtzu v1.4s, v1.4s
+; CHECK-NO16-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtnu_8h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    fcvtnu v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <8 x half> @llvm.roundeven.v8f16(<8 x half> %A)
+  %tmp2 = fptoui <8 x half> %tmp1 to <8 x i16>
+  ret <8 x i16> %tmp2
+}
+
+define <4 x i16> @fcvtms_4h(<4 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtms_4h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    frintm v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtms_4h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    fcvtms v0.4h, v0.4h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <4 x half> @llvm.floor.v4f16(<4 x half> %A)
+  %tmp2 = fptosi <4 x half> %tmp1 to <4 x i16>
+  ret <4 x i16> %tmp2
+}
+
+define <8 x i16> @fcvtms_8h(<8 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtms_8h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-NEXT:    frintm v1.4s, v1.4s
+; CHECK-NO16-NEXT:    frintm v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NO16-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v1.8h
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v1.4h
+; CHECK-NO16-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtzs v1.4s, v1.4s
+; CHECK-NO16-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtms_8h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    fcvtms v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <8 x half> @llvm.floor.v8f16(<8 x half> %A)
+  %tmp2 = fptosi <8 x half> %tmp1 to <8 x i16>
+  ret <8 x i16> %tmp2
+}
+
+define <4 x i16> @fcvtmu_4h(<4 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtmu_4h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    frintm v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NO16-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtmu_4h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    fcvtmu v0.4h, v0.4h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <4 x half> @llvm.floor.v4f16(<4 x half> %A)
+  %tmp2 = fptoui <4 x half> %tmp1 to <4 x i16>
+  ret <4 x i16> %tmp2
+}
+
+define <8 x i16> @fcvtmu_8h(<8 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtmu_8h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-NEXT:    frintm v1.4s, v1.4s
+; CHECK-NO16-NEXT:    frintm v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NO16-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v1.8h
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v1.4h
+; CHECK-NO16-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtzu v1.4s, v1.4s
+; CHECK-NO16-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtmu_8h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    fcvtmu v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <8 x half> @llvm.floor.v8f16(<8 x half> %A)
+  %tmp2 = fptoui <8 x half> %tmp1 to <8 x i16>
+  ret <8 x i16> %tmp2
+}
+
+define <4 x i16> @fcvtps_4h(<4 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtps_4h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    frintp v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtps_4h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    fcvtps v0.4h, v0.4h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <4 x half> @llvm.ceil.v4f16(<4 x half> %A)
+  %tmp2 = fptosi <4 x half> %tmp1 to <4 x i16>
+  ret <4 x i16> %tmp2
+}
+
+define <8 x i16> @fcvtps_8h(<8 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtps_8h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-NEXT:    frintp v1.4s, v1.4s
+; CHECK-NO16-NEXT:    frintp v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NO16-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v1.8h
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v1.4h
+; CHECK-NO16-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtzs v1.4s, v1.4s
+; CHECK-NO16-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtps_8h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    fcvtps v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <8 x half> @llvm.ceil.v8f16(<8 x half> %A)
+  %tmp2 = fptosi <8 x half> %tmp1 to <8 x i16>
+  ret <8 x i16> %tmp2
+}
+
+define <4 x i16> @fcvtpu_4h(<4 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtpu_4h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    frintp v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NO16-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtpu_4h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    fcvtpu v0.4h, v0.4h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <4 x half> @llvm.ceil.v4f16(<4 x half> %A)
+  %tmp2 = fptoui <4 x half> %tmp1 to <4 x i16>
+  ret <4 x i16> %tmp2
+}
+
+define <8 x i16> @fcvtpu_8h(<8 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtpu_8h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-NEXT:    frintp v1.4s, v1.4s
+; CHECK-NO16-NEXT:    frintp v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NO16-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v1.8h
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v1.4h
+; CHECK-NO16-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtzu v1.4s, v1.4s
+; CHECK-NO16-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtpu_8h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    fcvtpu v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <8 x half> @llvm.ceil.v8f16(<8 x half> %A)
+  %tmp2 = fptoui <8 x half> %tmp1 to <8 x i16>
+  ret <8 x i16> %tmp2
+}
+
+define <4 x i16> @fcvtzs_4h(<4 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtzs_4h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    frintz v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtzs_4h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    fcvtzs v0.4h, v0.4h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <4 x half> @llvm.trunc.v4f16(<4 x half> %A)
+  %tmp2 = fptosi <4 x half> %tmp1 to <4 x i16>
+  ret <4 x i16> %tmp2
+}
+
+define <8 x i16> @fcvtzs_8h(<8 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtzs_8h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-NEXT:    frintz v1.4s, v1.4s
+; CHECK-NO16-NEXT:    frintz v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NO16-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v1.8h
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v1.4h
+; CHECK-NO16-NEXT:    fcvtzs v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtzs v1.4s, v1.4s
+; CHECK-NO16-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtzs_8h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    fcvtzs v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <8 x half> @llvm.trunc.v8f16(<8 x half> %A)
+  %tmp2 = fptosi <8 x half> %tmp1 to <8 x i16>
+  ret <8 x i16> %tmp2
+}
+
+define <4 x i16> @fcvtzu_4h(<4 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtzu_4h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    frintz v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NO16-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtzu_4h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    fcvtzu v0.4h, v0.4h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <4 x half> @llvm.trunc.v4f16(<4 x half> %A)
+  %tmp2 = fptoui <4 x half> %tmp1 to <4 x i16>
+  ret <4 x i16> %tmp2
+}
+
+define <8 x i16> @fcvtzu_8h(<8 x half> %A) nounwind {
+; CHECK-NO16-LABEL: fcvtzu_8h:
+; CHECK-NO16:       // %bb.0:
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NO16-NEXT:    frintz v1.4s, v1.4s
+; CHECK-NO16-NEXT:    frintz v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtn v1.4h, v1.4s
+; CHECK-NO16-NEXT:    fcvtn2 v1.8h, v0.4s
+; CHECK-NO16-NEXT:    fcvtl2 v0.4s, v1.8h
+; CHECK-NO16-NEXT:    fcvtl v1.4s, v1.4h
+; CHECK-NO16-NEXT:    fcvtzu v0.4s, v0.4s
+; CHECK-NO16-NEXT:    fcvtzu v1.4s, v1.4s
+; CHECK-NO16-NEXT:    uzp1 v0.8h, v1.8h, v0.8h
+; CHECK-NO16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: fcvtzu_8h:
+; CHECK-FP16:       // %bb.0:
+; CHECK-FP16-NEXT:    fcvtzu v0.8h, v0.8h
+; CHECK-FP16-NEXT:    ret
+  %tmp1 = call <8 x half> @llvm.trunc.v8f16(<8 x half> %A)
+  %tmp2 = fptoui <8 x half> %tmp1 to <8 x i16>
+  ret <8 x i16> %tmp2
+}
+
+; Intrinsic declarations
+declare <2 x float> @llvm.round.v2f32(<2 x float>) nounwind readnone
----------------
Lukacma wrote:

I don't think this are necessary anymore.

https://github.com/llvm/llvm-project/pull/170018


More information about the llvm-commits mailing list