[llvm] [AArch64][GlobalISel] Add codegen for simd fpcvt intrinsics (PR #157680)
Amara Emerson via llvm-commits
llvm-commits at lists.llvm.org
Wed Sep 10 13:04:55 PDT 2025
================
@@ -0,0 +1,612 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple aarch64-unknown-unknown -mattr=+fprcvt,+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc < %s -mtriple aarch64-unknown-unknown -global-isel -global-isel-abort=2 -mattr=+fprcvt,+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+
+
+;
+; Intriniscs
+;
+
+define float @fcvtas_1s1d_simd(double %A) nounwind {
+; CHECK-LABEL: fcvtas_1s1d_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtas s0, d0
+; CHECK-NEXT: ret
+ %i = call i32 @llvm.aarch64.neon.fcvtas.i32.f64(double %A)
+ %f = bitcast i32 %i to float
+ ret float %f
+}
+
+define double @fcvtas_1d1s_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtas_1d1s_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtas d0, s0
+; CHECK-NEXT: ret
+ %i = call i64 @llvm.aarch64.neon.fcvtas.i64.f32(float %A)
+ %d = bitcast i64 %i to double
+ ret double %d
+}
+
+define dso_local float @fcvtas_1s1h_simd(half %a) {
+; CHECK-LABEL: fcvtas_1s1h_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtas s0, h0
+; CHECK-NEXT: ret
+ %fcvt = tail call i32 @llvm.aarch64.neon.fcvtas.i32.f16(half %a)
+ %f = bitcast i32 %fcvt to float
+ ret float %f
+}
+
+define dso_local double @fcvtas_1d1h_simd(half %a) {
+; CHECK-LABEL: fcvtas_1d1h_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtas d0, h0
+; CHECK-NEXT: ret
+ %vcvtah_s64_f16 = tail call i64 @llvm.aarch64.neon.fcvtas.i64.f16(half %a)
+ %d = bitcast i64 %vcvtah_s64_f16 to double
+ ret double %d
+}
+
+define dso_local double @fcvtas_1d1d_simd(double %a) {
+; CHECK-LABEL: fcvtas_1d1d_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtas d0, d0
+; CHECK-NEXT: ret
+ %vcvtah_s64_f64 = tail call i64 @llvm.aarch64.neon.fcvtas.i64.f64(double %a)
+ %d = bitcast i64 %vcvtah_s64_f64 to double
+ ret double %d
+}
+
+define dso_local float @fcvtas_1s1s_simd(float %a) {
+; CHECK-LABEL: fcvtas_1s1s_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtas s0, s0
+; CHECK-NEXT: ret
+ %vcvtah_s32_f32 = tail call i32 @llvm.aarch64.neon.fcvtas.i32.f32(float %a)
+ %d = bitcast i32 %vcvtah_s32_f32 to float
+ ret float %d
+}
+
+
+define float @fcvtau_1s1d_simd(double %A) nounwind {
+; CHECK-LABEL: fcvtau_1s1d_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtau s0, d0
+; CHECK-NEXT: ret
+ %i = call i32 @llvm.aarch64.neon.fcvtau.i32.f64(double %A)
+ %f = bitcast i32 %i to float
+ ret float %f
+}
+
+define double @fcvtau_1d1s_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtau_1d1s_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtau d0, s0
+; CHECK-NEXT: ret
+ %i = call i64 @llvm.aarch64.neon.fcvtau.i64.f32(float %A)
+ %d = bitcast i64 %i to double
+ ret double %d
+}
+
+define dso_local float @fcvtau_1s1h_simd(half %a) {
+; CHECK-LABEL: fcvtau_1s1h_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtau s0, h0
+; CHECK-NEXT: ret
+ %fcvt = tail call i32 @llvm.aarch64.neon.fcvtau.i32.f16(half %a)
+ %f = bitcast i32 %fcvt to float
+ ret float %f
+}
+
+define dso_local double @fcvtau_1d1h_simd(half %a) {
+; CHECK-LABEL: fcvtau_1d1h_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtau d0, h0
+; CHECK-NEXT: ret
+ %vcvtah_s64_f16 = tail call i64 @llvm.aarch64.neon.fcvtau.i64.f16(half %a)
+ %d = bitcast i64 %vcvtah_s64_f16 to double
+ ret double %d
+}
+
+define dso_local double @fcvtau_1d1d_simd(double %a) {
+; CHECK-LABEL: fcvtau_1d1d_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtau d0, d0
+; CHECK-NEXT: ret
+ %vcvtah_s64_f64 = tail call i64 @llvm.aarch64.neon.fcvtau.i64.f64(double %a)
+ %d = bitcast i64 %vcvtah_s64_f64 to double
+ ret double %d
+}
+
+define dso_local float @fcvtau_1s1s_simd(float %a) {
+; CHECK-LABEL: fcvtau_1s1s_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtau s0, s0
+; CHECK-NEXT: ret
+ %vcvtah_s32_f32 = tail call i32 @llvm.aarch64.neon.fcvtau.i32.f32(float %a)
+ %d = bitcast i32 %vcvtah_s32_f32 to float
+ ret float %d
+}
+
+define float @fcvtms_1s1d_simd(double %A) nounwind {
+; CHECK-LABEL: fcvtms_1s1d_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtms s0, d0
+; CHECK-NEXT: ret
+ %i = call i32 @llvm.aarch64.neon.fcvtms.i32.f64(double %A)
+ %f = bitcast i32 %i to float
+ ret float %f
+}
+
+define double @fcvtms_1d1s_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtms_1d1s_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtms d0, s0
+; CHECK-NEXT: ret
+ %i = call i64 @llvm.aarch64.neon.fcvtms.i64.f32(float %A)
+ %d = bitcast i64 %i to double
+ ret double %d
+}
+
+define dso_local float @fcvtms_1s1h_simd(half %a) {
+; CHECK-LABEL: fcvtms_1s1h_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtms s0, h0
+; CHECK-NEXT: ret
+ %fcvt = tail call i32 @llvm.aarch64.neon.fcvtms.i32.f16(half %a)
+ %f = bitcast i32 %fcvt to float
+ ret float %f
+}
+
+define dso_local double @fcvtms_1d1h_simd(half %a) {
+; CHECK-LABEL: fcvtms_1d1h_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtms d0, h0
+; CHECK-NEXT: ret
+ %vcvtah_s64_f16 = tail call i64 @llvm.aarch64.neon.fcvtms.i64.f16(half %a)
+ %d = bitcast i64 %vcvtah_s64_f16 to double
+ ret double %d
+}
+
+define dso_local double @fcvtms_1d1d_simd(double %a) {
+; CHECK-LABEL: fcvtms_1d1d_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtms d0, d0
+; CHECK-NEXT: ret
+ %vcvtah_s64_f64 = tail call i64 @llvm.aarch64.neon.fcvtms.i64.f64(double %a)
+ %d = bitcast i64 %vcvtah_s64_f64 to double
+ ret double %d
+}
+
+define dso_local float @fcvtms_1s1s_simd(float %a) {
+; CHECK-LABEL: fcvtms_1s1s_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtms s0, s0
+; CHECK-NEXT: ret
+ %vcvtah_s32_f32 = tail call i32 @llvm.aarch64.neon.fcvtms.i32.f32(float %a)
+ %d = bitcast i32 %vcvtah_s32_f32 to float
+ ret float %d
+}
+
+define float @fcvtmu_1s1d_simd(double %A) nounwind {
+; CHECK-LABEL: fcvtmu_1s1d_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtmu s0, d0
+; CHECK-NEXT: ret
+ %i = call i32 @llvm.aarch64.neon.fcvtmu.i32.f64(double %A)
+ %f = bitcast i32 %i to float
+ ret float %f
+}
+
+define double @fcvtmu_1d1s_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtmu_1d1s_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtmu d0, s0
+; CHECK-NEXT: ret
+ %i = call i64 @llvm.aarch64.neon.fcvtmu.i64.f32(float %A)
+ %d = bitcast i64 %i to double
+ ret double %d
+}
+
+define dso_local float @fcvtmu_1s1h_simd(half %a) {
+; CHECK-LABEL: fcvtmu_1s1h_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtmu s0, h0
+; CHECK-NEXT: ret
+ %fcvt = tail call i32 @llvm.aarch64.neon.fcvtmu.i32.f16(half %a)
+ %f = bitcast i32 %fcvt to float
+ ret float %f
+}
+
+define dso_local double @fcvtmu_1d1h_simd(half %a) {
+; CHECK-LABEL: fcvtmu_1d1h_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtmu d0, h0
+; CHECK-NEXT: ret
+ %vcvtah_s64_f16 = tail call i64 @llvm.aarch64.neon.fcvtmu.i64.f16(half %a)
+ %d = bitcast i64 %vcvtah_s64_f16 to double
+ ret double %d
+}
+
+define dso_local double @fcvtmu_1d1d_simd(double %a) {
+; CHECK-LABEL: fcvtmu_1d1d_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtmu d0, d0
+; CHECK-NEXT: ret
+ %vcvtah_s64_f64 = tail call i64 @llvm.aarch64.neon.fcvtmu.i64.f64(double %a)
+ %d = bitcast i64 %vcvtah_s64_f64 to double
+ ret double %d
+}
+
+define dso_local float @fcvtmu_1s1s_simd(float %a) {
+; CHECK-LABEL: fcvtmu_1s1s_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtmu s0, s0
+; CHECK-NEXT: ret
+ %vcvtah_s32_f32 = tail call i32 @llvm.aarch64.neon.fcvtmu.i32.f32(float %a)
+ %d = bitcast i32 %vcvtah_s32_f32 to float
+ ret float %d
+}
+
+define float @fcvtns_1s1d_simd(double %A) nounwind {
+; CHECK-LABEL: fcvtns_1s1d_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtns s0, d0
+; CHECK-NEXT: ret
+ %i = call i32 @llvm.aarch64.neon.fcvtns.i32.f64(double %A)
+ %f = bitcast i32 %i to float
+ ret float %f
+}
+
+define double @fcvtns_1d1s_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtns_1d1s_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtns d0, s0
+; CHECK-NEXT: ret
+ %i = call i64 @llvm.aarch64.neon.fcvtns.i64.f32(float %A)
+ %d = bitcast i64 %i to double
+ ret double %d
+}
+
+define dso_local float @fcvtns_1s1h_simd(half %a) {
+; CHECK-LABEL: fcvtns_1s1h_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtns s0, h0
+; CHECK-NEXT: ret
+ %fcvt = tail call i32 @llvm.aarch64.neon.fcvtns.i32.f16(half %a)
+ %f = bitcast i32 %fcvt to float
+ ret float %f
+}
+
+define dso_local double @fcvtns_1d1h_simd(half %a) {
+; CHECK-LABEL: fcvtns_1d1h_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtns d0, h0
+; CHECK-NEXT: ret
+ %vcvtah_s64_f16 = tail call i64 @llvm.aarch64.neon.fcvtns.i64.f16(half %a)
+ %d = bitcast i64 %vcvtah_s64_f16 to double
+ ret double %d
+}
+
+define dso_local double @fcvtns_1d1d_simd(double %a) {
+; CHECK-LABEL: fcvtns_1d1d_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtns d0, d0
+; CHECK-NEXT: ret
+ %vcvtah_s64_f64 = tail call i64 @llvm.aarch64.neon.fcvtns.i64.f64(double %a)
+ %d = bitcast i64 %vcvtah_s64_f64 to double
+ ret double %d
+}
+
+define dso_local float @fcvtns_1s1s_simd(float %a) {
+; CHECK-LABEL: fcvtns_1s1s_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtns s0, s0
+; CHECK-NEXT: ret
+ %vcvtah_s32_f32 = tail call i32 @llvm.aarch64.neon.fcvtns.i32.f32(float %a)
+ %d = bitcast i32 %vcvtah_s32_f32 to float
+ ret float %d
+}
+
+define float @fcvtnu_1s1d_simd(double %A) nounwind {
+; CHECK-LABEL: fcvtnu_1s1d_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtnu s0, d0
+; CHECK-NEXT: ret
+ %i = call i32 @llvm.aarch64.neon.fcvtnu.i32.f64(double %A)
+ %f = bitcast i32 %i to float
+ ret float %f
+}
+
+define double @fcvtnu_1d1s_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtnu_1d1s_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtnu d0, s0
+; CHECK-NEXT: ret
+ %i = call i64 @llvm.aarch64.neon.fcvtnu.i64.f32(float %A)
+ %d = bitcast i64 %i to double
+ ret double %d
+}
+
+define dso_local float @fcvtnu_1s1h_simd(half %a) {
+; CHECK-LABEL: fcvtnu_1s1h_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtnu s0, h0
+; CHECK-NEXT: ret
+ %fcvt = tail call i32 @llvm.aarch64.neon.fcvtnu.i32.f16(half %a)
+ %f = bitcast i32 %fcvt to float
+ ret float %f
+}
+
+define dso_local double @fcvtnu_1d1h_simd(half %a) {
+; CHECK-LABEL: fcvtnu_1d1h_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtnu d0, h0
+; CHECK-NEXT: ret
+ %vcvtah_s64_f16 = tail call i64 @llvm.aarch64.neon.fcvtnu.i64.f16(half %a)
+ %d = bitcast i64 %vcvtah_s64_f16 to double
+ ret double %d
+}
+
+define dso_local double @fcvtnu_1d1d_simd(double %a) {
+; CHECK-LABEL: fcvtnu_1d1d_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtnu d0, d0
+; CHECK-NEXT: ret
+ %vcvtah_s64_f64 = tail call i64 @llvm.aarch64.neon.fcvtnu.i64.f64(double %a)
+ %d = bitcast i64 %vcvtah_s64_f64 to double
+ ret double %d
+}
+
+define dso_local float @fcvtnu_1s1s_simd(float %a) {
+; CHECK-LABEL: fcvtnu_1s1s_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtnu s0, s0
+; CHECK-NEXT: ret
+ %vcvtah_s32_f32 = tail call i32 @llvm.aarch64.neon.fcvtnu.i32.f32(float %a)
+ %d = bitcast i32 %vcvtah_s32_f32 to float
+ ret float %d
+}
+
+define float @fcvtps_1s1d_simd(double %A) nounwind {
+; CHECK-LABEL: fcvtps_1s1d_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtps s0, d0
+; CHECK-NEXT: ret
+ %i = call i32 @llvm.aarch64.neon.fcvtps.i32.f64(double %A)
+ %f = bitcast i32 %i to float
+ ret float %f
+}
+
+define double @fcvtps_1d1s_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtps_1d1s_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtps d0, s0
+; CHECK-NEXT: ret
+ %i = call i64 @llvm.aarch64.neon.fcvtps.i64.f32(float %A)
+ %d = bitcast i64 %i to double
+ ret double %d
+}
+
+define dso_local float @fcvtps_1s1h_simd(half %a) {
+; CHECK-LABEL: fcvtps_1s1h_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtps s0, h0
+; CHECK-NEXT: ret
+ %fcvt = tail call i32 @llvm.aarch64.neon.fcvtps.i32.f16(half %a)
+ %f = bitcast i32 %fcvt to float
+ ret float %f
+}
+
+define dso_local double @fcvtps_1d1h_simd(half %a) {
+; CHECK-LABEL: fcvtps_1d1h_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtps d0, h0
+; CHECK-NEXT: ret
+ %vcvtah_s64_f16 = tail call i64 @llvm.aarch64.neon.fcvtps.i64.f16(half %a)
+ %d = bitcast i64 %vcvtah_s64_f16 to double
+ ret double %d
+}
+
+define dso_local double @fcvtps_1d1d_simd(double %a) {
+; CHECK-LABEL: fcvtps_1d1d_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtps d0, d0
+; CHECK-NEXT: ret
+ %vcvtah_s64_f64 = tail call i64 @llvm.aarch64.neon.fcvtps.i64.f64(double %a)
+ %d = bitcast i64 %vcvtah_s64_f64 to double
+ ret double %d
+}
+
+define dso_local float @fcvtps_1s1s_simd(float %a) {
+; CHECK-LABEL: fcvtps_1s1s_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtps s0, s0
+; CHECK-NEXT: ret
+ %vcvtah_s32_f32 = tail call i32 @llvm.aarch64.neon.fcvtps.i32.f32(float %a)
+ %d = bitcast i32 %vcvtah_s32_f32 to float
+ ret float %d
+}
+
+define float @fcvtpu_1s1d_simd(double %A) nounwind {
+; CHECK-LABEL: fcvtpu_1s1d_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtpu s0, d0
+; CHECK-NEXT: ret
+ %i = call i32 @llvm.aarch64.neon.fcvtpu.i32.f64(double %A)
+ %f = bitcast i32 %i to float
+ ret float %f
+}
+
+define double @fcvtpu_1d1s_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtpu_1d1s_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtpu d0, s0
+; CHECK-NEXT: ret
+ %i = call i64 @llvm.aarch64.neon.fcvtpu.i64.f32(float %A)
+ %d = bitcast i64 %i to double
+ ret double %d
+}
+
+define dso_local float @fcvtpu_1s1h_simd(half %a) {
+; CHECK-LABEL: fcvtpu_1s1h_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtpu s0, h0
+; CHECK-NEXT: ret
+ %fcvt = tail call i32 @llvm.aarch64.neon.fcvtpu.i32.f16(half %a)
+ %f = bitcast i32 %fcvt to float
+ ret float %f
+}
+
+define dso_local double @fcvtpu_1d1h_simd(half %a) {
+; CHECK-LABEL: fcvtpu_1d1h_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtpu d0, h0
+; CHECK-NEXT: ret
+ %vcvtah_s64_f16 = tail call i64 @llvm.aarch64.neon.fcvtpu.i64.f16(half %a)
+ %d = bitcast i64 %vcvtah_s64_f16 to double
+ ret double %d
+}
+
+define dso_local double @fcvtpu_1d1d_simd(double %a) {
+; CHECK-LABEL: fcvtpu_1d1d_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtpu d0, d0
+; CHECK-NEXT: ret
+ %vcvtah_s64_f64 = tail call i64 @llvm.aarch64.neon.fcvtpu.i64.f64(double %a)
+ %d = bitcast i64 %vcvtah_s64_f64 to double
+ ret double %d
+}
+
+define dso_local float @fcvtpu_1s1s_simd(float %a) {
+; CHECK-LABEL: fcvtpu_1s1s_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtpu s0, s0
+; CHECK-NEXT: ret
+ %vcvtah_s32_f32 = tail call i32 @llvm.aarch64.neon.fcvtpu.i32.f32(float %a)
+ %d = bitcast i32 %vcvtah_s32_f32 to float
+ ret float %d
+}
+
+define float @fcvtzs_1s1d_simd(double %A) nounwind {
+; CHECK-LABEL: fcvtzs_1s1d_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtzs s0, d0
+; CHECK-NEXT: ret
+ %i = call i32 @llvm.aarch64.neon.fcvtzs.i32.f64(double %A)
+ %f = bitcast i32 %i to float
+ ret float %f
+}
+
+define double @fcvtzs_1d1s_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtzs_1d1s_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtzs d0, s0
+; CHECK-NEXT: ret
+ %i = call i64 @llvm.aarch64.neon.fcvtzs.i64.f32(float %A)
+ %d = bitcast i64 %i to double
+ ret double %d
+}
+
+define dso_local float @fcvtzs_1s1h_simd(half %a) {
+; CHECK-LABEL: fcvtzs_1s1h_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtzs s0, h0
+; CHECK-NEXT: ret
+ %fcvt = tail call i32 @llvm.aarch64.neon.fcvtzs.i32.f16(half %a)
+ %f = bitcast i32 %fcvt to float
+ ret float %f
+}
+
+define dso_local double @fcvtzs_1d1h_simd(half %a) {
+; CHECK-LABEL: fcvtzs_1d1h_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtzs d0, h0
+; CHECK-NEXT: ret
+ %vcvtah_s64_f16 = tail call i64 @llvm.aarch64.neon.fcvtzs.i64.f16(half %a)
+ %d = bitcast i64 %vcvtah_s64_f16 to double
+ ret double %d
+}
+
+define dso_local double @fcvtzs_1d1d_simd(double %a) {
+; CHECK-LABEL: fcvtzs_1d1d_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtzs d0, d0
+; CHECK-NEXT: ret
+ %vcvtah_s64_f64 = tail call i64 @llvm.aarch64.neon.fcvtzs.i64.f64(double %a)
+ %d = bitcast i64 %vcvtah_s64_f64 to double
+ ret double %d
+}
+
+define dso_local float @fcvtzs_1s1s_simd(float %a) {
+; CHECK-LABEL: fcvtzs_1s1s_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtzs s0, s0
+; CHECK-NEXT: ret
+ %vcvtah_s32_f32 = tail call i32 @llvm.aarch64.neon.fcvtzs.i32.f32(float %a)
+ %d = bitcast i32 %vcvtah_s32_f32 to float
+ ret float %d
+}
+
+define float @fcvtzu_1s1d_simd(double %A) nounwind {
+; CHECK-LABEL: fcvtzu_1s1d_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtzu s0, d0
+; CHECK-NEXT: ret
+ %i = call i32 @llvm.aarch64.neon.fcvtzu.i32.f64(double %A)
+ %f = bitcast i32 %i to float
+ ret float %f
+}
+
+define double @fcvtzu_1d1s_simd(float %A) nounwind {
+; CHECK-LABEL: fcvtzu_1d1s_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtzu d0, s0
+; CHECK-NEXT: ret
+ %i = call i64 @llvm.aarch64.neon.fcvtzu.i64.f32(float %A)
+ %d = bitcast i64 %i to double
+ ret double %d
+}
+
+define dso_local float @fcvtzu_1s1h_simd(half %a) {
+; CHECK-LABEL: fcvtzu_1s1h_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtzu s0, h0
+; CHECK-NEXT: ret
+ %fcvt = tail call i32 @llvm.aarch64.neon.fcvtzu.i32.f16(half %a)
+ %f = bitcast i32 %fcvt to float
+ ret float %f
+}
+
+define dso_local double @fcvtzu_1d1h_simd(half %a) {
+; CHECK-LABEL: fcvtzu_1d1h_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtzu d0, h0
+; CHECK-NEXT: ret
+ %vcvtah_s64_f16 = tail call i64 @llvm.aarch64.neon.fcvtzu.i64.f16(half %a)
+ %d = bitcast i64 %vcvtah_s64_f16 to double
+ ret double %d
+}
+
+define dso_local double @fcvtzu_1d1d_simd(double %a) {
+; CHECK-LABEL: fcvtzu_1d1d_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtzu d0, d0
+; CHECK-NEXT: ret
+ %vcvtah_s64_f64 = tail call i64 @llvm.aarch64.neon.fcvtzu.i64.f64(double %a)
+ %d = bitcast i64 %vcvtah_s64_f64 to double
+ ret double %d
+}
+
+define dso_local float @fcvtzu_1s1s_simd(float %a) {
+; CHECK-LABEL: fcvtzu_1s1s_simd:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fcvtzu s0, s0
+; CHECK-NEXT: ret
+ %vcvtah_s32_f32 = tail call i32 @llvm.aarch64.neon.fcvtzu.i32.f32(float %a)
+ %d = bitcast i32 %vcvtah_s32_f32 to float
+ ret float %d
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK-GI: {{.*}}
+; CHECK-SD: {{.*}}
----------------
aemerson wrote:
If the output matches completely let's just remove these and use only `CHECK`.
https://github.com/llvm/llvm-project/pull/157680
More information about the llvm-commits
mailing list