[llvm] Fix legalizing `FNEG` and `FABS` with `TypeSoftPromoteHalf` (PR #156343)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Sep 1 08:22:58 PDT 2025
https://github.com/beetrees created https://github.com/llvm/llvm-project/pull/156343
Based on #156341.
`FNEG` and `FABS` must preserve signalling NaNs, meaning they should not convert to f32 to perform the operation. Instead legalize to `XOR` and `AND`.
Fixes #104915
>From f3d297d8385bcb48d8d2d312ebcf7b94d82a3ef7 Mon Sep 17 00:00:00 2001
From: beetrees <b at beetr.ee>
Date: Mon, 1 Sep 2025 15:04:17 +0100
Subject: [PATCH 1/2] [ARM] Improve fp16-promote.ll test (NFC)
---
llvm/test/CodeGen/ARM/fp16-promote.ll | 2391 ++++++++++++++++++++-----
1 file changed, 1941 insertions(+), 450 deletions(-)
diff --git a/llvm/test/CodeGen/ARM/fp16-promote.ll b/llvm/test/CodeGen/ARM/fp16-promote.ll
index 1bd01508808c8..800ee87b95ca8 100644
--- a/llvm/test/CodeGen/ARM/fp16-promote.ll
+++ b/llvm/test/CodeGen/ARM/fp16-promote.ll
@@ -1,20 +1,59 @@
-; RUN: llc -asm-verbose=false < %s -mattr=+vfp3,+fp16 | FileCheck -allow-deprecated-dag-overlap %s -check-prefix=CHECK-FP16 --check-prefix=CHECK-VFP -check-prefix=CHECK-ALL
-; RUN: llc -asm-verbose=false < %s | FileCheck -allow-deprecated-dag-overlap %s -check-prefix=CHECK-LIBCALL --check-prefix=CHECK-VFP -check-prefix=CHECK-ALL --check-prefix=CHECK-LIBCALL-VFP
-; RUN: llc -asm-verbose=false < %s -mattr=-fpregs | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK-LIBCALL -check-prefix=CHECK-NOVFP -check-prefix=CHECK-ALL
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -asm-verbose=false < %s -mattr=+vfp3,+fp16 | FileCheck %s --check-prefixes=CHECK-ALL,CHECK-VFP,CHECK-FP16
+; RUN: llc -asm-verbose=false < %s | FileCheck %s --check-prefixes=CHECK-ALL,CHECK-LIBCALL,CHECK-VFP,CHECK-LIBCALL-VFP
+; RUN: llc -asm-verbose=false < %s -mattr=-fpregs | FileCheck %s --check-prefixes=CHECK-ALL,CHECK-LIBCALL,CHECK-NOVFP
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32"
target triple = "armv7---eabihf"
define void @test_fadd(ptr %p, ptr %q) #0 {
-; CHECK-ALL-LABEL: test_fadd:
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-VFP: vadd.f32
-; CHECK-NOVFP: bl __aeabi_fadd
-; CHECK-FP16: vcvtb.f16.f32
-; CHECK-LIBCALL: bl __aeabi_f2h
+; CHECK-FP16-LABEL: test_fadd:
+; CHECK-FP16: ldrh r2, [r0]
+; CHECK-FP16-NEXT: ldrh r1, [r1]
+; CHECK-FP16-NEXT: vmov s0, r2
+; CHECK-FP16-NEXT: vmov s2, r1
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-FP16-NEXT: vadd.f32 s0, s0, s2
+; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-FP16-NEXT: vmov r1, s0
+; CHECK-FP16-NEXT: strh r1, [r0]
+; CHECK-FP16-NEXT: bx lr
+;
+; CHECK-LIBCALL-VFP-LABEL: test_fadd:
+; CHECK-LIBCALL-VFP: .save {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: .vsave {d8}
+; CHECK-LIBCALL-VFP-NEXT: vpush {d8}
+; CHECK-LIBCALL-VFP-NEXT: mov r4, r0
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r1]
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: ldrh r1, [r4]
+; CHECK-LIBCALL-VFP-NEXT: vmov s16, r0
+; CHECK-LIBCALL-VFP-NEXT: mov r0, r1
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: vadd.f32 s0, s0, s16
+; CHECK-LIBCALL-VFP-NEXT: vmov r0, s0
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_f2h
+; CHECK-LIBCALL-VFP-NEXT: strh r0, [r4]
+; CHECK-LIBCALL-VFP-NEXT: vpop {d8}
+; CHECK-LIBCALL-VFP-NEXT: pop {r4, pc}
+;
+; CHECK-NOVFP-LABEL: test_fadd:
+; CHECK-NOVFP: .save {r4, r5, r11, lr}
+; CHECK-NOVFP-NEXT: push {r4, r5, r11, lr}
+; CHECK-NOVFP-NEXT: mov r4, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r1]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: mov r5, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r4]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: mov r1, r5
+; CHECK-NOVFP-NEXT: bl __aeabi_fadd
+; CHECK-NOVFP-NEXT: bl __aeabi_f2h
+; CHECK-NOVFP-NEXT: strh r0, [r4]
+; CHECK-NOVFP-NEXT: pop {r4, r5, r11, pc}
%a = load half, ptr %p, align 2
%b = load half, ptr %q, align 2
%r = fadd half %a, %b
@@ -23,15 +62,53 @@ define void @test_fadd(ptr %p, ptr %q) #0 {
}
define void @test_fsub(ptr %p, ptr %q) #0 {
-; CHECK-ALL-LABEL: test_fsub:
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-VFP: vsub.f32
-; CHECK-NOVFP: bl __aeabi_fsub
-; CHECK-FP16: vcvtb.f16.f32
-; CHECK-LIBCALL: bl __aeabi_f2h
+; CHECK-FP16-LABEL: test_fsub:
+; CHECK-FP16: ldrh r2, [r0]
+; CHECK-FP16-NEXT: ldrh r1, [r1]
+; CHECK-FP16-NEXT: vmov s0, r2
+; CHECK-FP16-NEXT: vmov s2, r1
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-FP16-NEXT: vsub.f32 s0, s0, s2
+; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-FP16-NEXT: vmov r1, s0
+; CHECK-FP16-NEXT: strh r1, [r0]
+; CHECK-FP16-NEXT: bx lr
+;
+; CHECK-LIBCALL-VFP-LABEL: test_fsub:
+; CHECK-LIBCALL-VFP: .save {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: .vsave {d8}
+; CHECK-LIBCALL-VFP-NEXT: vpush {d8}
+; CHECK-LIBCALL-VFP-NEXT: mov r4, r0
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r1]
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: ldrh r1, [r4]
+; CHECK-LIBCALL-VFP-NEXT: vmov s16, r0
+; CHECK-LIBCALL-VFP-NEXT: mov r0, r1
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: vsub.f32 s0, s0, s16
+; CHECK-LIBCALL-VFP-NEXT: vmov r0, s0
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_f2h
+; CHECK-LIBCALL-VFP-NEXT: strh r0, [r4]
+; CHECK-LIBCALL-VFP-NEXT: vpop {d8}
+; CHECK-LIBCALL-VFP-NEXT: pop {r4, pc}
+;
+; CHECK-NOVFP-LABEL: test_fsub:
+; CHECK-NOVFP: .save {r4, r5, r11, lr}
+; CHECK-NOVFP-NEXT: push {r4, r5, r11, lr}
+; CHECK-NOVFP-NEXT: mov r4, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r1]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: mov r5, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r4]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: mov r1, r5
+; CHECK-NOVFP-NEXT: bl __aeabi_fsub
+; CHECK-NOVFP-NEXT: bl __aeabi_f2h
+; CHECK-NOVFP-NEXT: strh r0, [r4]
+; CHECK-NOVFP-NEXT: pop {r4, r5, r11, pc}
%a = load half, ptr %p, align 2
%b = load half, ptr %q, align 2
%r = fsub half %a, %b
@@ -40,15 +117,53 @@ define void @test_fsub(ptr %p, ptr %q) #0 {
}
define void @test_fmul(ptr %p, ptr %q) #0 {
-; CHECK-ALL-LABEL: test_fmul:
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-VFP: vmul.f32
-; CHECK-NOVFP: bl __aeabi_fmul
-; CHECK-FP16: vcvtb.f16.f32
-; CHECK-LIBCALL: bl __aeabi_f2h
+; CHECK-FP16-LABEL: test_fmul:
+; CHECK-FP16: ldrh r2, [r0]
+; CHECK-FP16-NEXT: ldrh r1, [r1]
+; CHECK-FP16-NEXT: vmov s0, r2
+; CHECK-FP16-NEXT: vmov s2, r1
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-FP16-NEXT: vmul.f32 s0, s0, s2
+; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-FP16-NEXT: vmov r1, s0
+; CHECK-FP16-NEXT: strh r1, [r0]
+; CHECK-FP16-NEXT: bx lr
+;
+; CHECK-LIBCALL-VFP-LABEL: test_fmul:
+; CHECK-LIBCALL-VFP: .save {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: .vsave {d8}
+; CHECK-LIBCALL-VFP-NEXT: vpush {d8}
+; CHECK-LIBCALL-VFP-NEXT: mov r4, r0
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r1]
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: ldrh r1, [r4]
+; CHECK-LIBCALL-VFP-NEXT: vmov s16, r0
+; CHECK-LIBCALL-VFP-NEXT: mov r0, r1
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: vmul.f32 s0, s0, s16
+; CHECK-LIBCALL-VFP-NEXT: vmov r0, s0
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_f2h
+; CHECK-LIBCALL-VFP-NEXT: strh r0, [r4]
+; CHECK-LIBCALL-VFP-NEXT: vpop {d8}
+; CHECK-LIBCALL-VFP-NEXT: pop {r4, pc}
+;
+; CHECK-NOVFP-LABEL: test_fmul:
+; CHECK-NOVFP: .save {r4, r5, r11, lr}
+; CHECK-NOVFP-NEXT: push {r4, r5, r11, lr}
+; CHECK-NOVFP-NEXT: mov r4, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r1]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: mov r5, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r4]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: mov r1, r5
+; CHECK-NOVFP-NEXT: bl __aeabi_fmul
+; CHECK-NOVFP-NEXT: bl __aeabi_f2h
+; CHECK-NOVFP-NEXT: strh r0, [r4]
+; CHECK-NOVFP-NEXT: pop {r4, r5, r11, pc}
%a = load half, ptr %p, align 2
%b = load half, ptr %q, align 2
%r = fmul half %a, %b
@@ -57,15 +172,53 @@ define void @test_fmul(ptr %p, ptr %q) #0 {
}
define void @test_fdiv(ptr %p, ptr %q) #0 {
-; CHECK-ALL-LABEL: test_fdiv:
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-VFP: vdiv.f32
-; CHECK-NOVFP: bl __aeabi_fdiv
-; CHECK-FP16: vcvtb.f16.f32
-; CHECK-LIBCALL: bl __aeabi_f2h
+; CHECK-FP16-LABEL: test_fdiv:
+; CHECK-FP16: ldrh r2, [r0]
+; CHECK-FP16-NEXT: ldrh r1, [r1]
+; CHECK-FP16-NEXT: vmov s0, r2
+; CHECK-FP16-NEXT: vmov s2, r1
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-FP16-NEXT: vdiv.f32 s0, s0, s2
+; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-FP16-NEXT: vmov r1, s0
+; CHECK-FP16-NEXT: strh r1, [r0]
+; CHECK-FP16-NEXT: bx lr
+;
+; CHECK-LIBCALL-VFP-LABEL: test_fdiv:
+; CHECK-LIBCALL-VFP: .save {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: .vsave {d8}
+; CHECK-LIBCALL-VFP-NEXT: vpush {d8}
+; CHECK-LIBCALL-VFP-NEXT: mov r4, r0
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r1]
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: ldrh r1, [r4]
+; CHECK-LIBCALL-VFP-NEXT: vmov s16, r0
+; CHECK-LIBCALL-VFP-NEXT: mov r0, r1
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: vdiv.f32 s0, s0, s16
+; CHECK-LIBCALL-VFP-NEXT: vmov r0, s0
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_f2h
+; CHECK-LIBCALL-VFP-NEXT: strh r0, [r4]
+; CHECK-LIBCALL-VFP-NEXT: vpop {d8}
+; CHECK-LIBCALL-VFP-NEXT: pop {r4, pc}
+;
+; CHECK-NOVFP-LABEL: test_fdiv:
+; CHECK-NOVFP: .save {r4, r5, r11, lr}
+; CHECK-NOVFP-NEXT: push {r4, r5, r11, lr}
+; CHECK-NOVFP-NEXT: mov r4, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r1]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: mov r5, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r4]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: mov r1, r5
+; CHECK-NOVFP-NEXT: bl __aeabi_fdiv
+; CHECK-NOVFP-NEXT: bl __aeabi_f2h
+; CHECK-NOVFP-NEXT: strh r0, [r4]
+; CHECK-NOVFP-NEXT: pop {r4, r5, r11, pc}
%a = load half, ptr %p, align 2
%b = load half, ptr %q, align 2
%r = fdiv half %a, %b
@@ -74,14 +227,53 @@ define void @test_fdiv(ptr %p, ptr %q) #0 {
}
define void @test_frem(ptr %p, ptr %q) #0 {
-; CHECK-ALL-LABEL: test_frem:
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bl fmodf
-; CHECK-FP16: vcvtb.f16.f32
-; CHECK-LIBCALL: bl __aeabi_f2h
+; CHECK-FP16-LABEL: test_frem:
+; CHECK-FP16: .save {r4, lr}
+; CHECK-FP16-NEXT: push {r4, lr}
+; CHECK-FP16-NEXT: mov r4, r0
+; CHECK-FP16-NEXT: ldrh r0, [r0]
+; CHECK-FP16-NEXT: ldrh r1, [r1]
+; CHECK-FP16-NEXT: vmov s0, r0
+; CHECK-FP16-NEXT: vmov s2, r1
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s1, s2
+; CHECK-FP16-NEXT: bl fmodf
+; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-FP16-NEXT: vmov r0, s0
+; CHECK-FP16-NEXT: strh r0, [r4]
+; CHECK-FP16-NEXT: pop {r4, pc}
+;
+; CHECK-LIBCALL-VFP-LABEL: test_frem:
+; CHECK-LIBCALL-VFP: .save {r4, r5, r11, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r4, r5, r11, lr}
+; CHECK-LIBCALL-VFP-NEXT: mov r4, r0
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r1]
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: mov r5, r0
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r4]
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: vmov s1, r5
+; CHECK-LIBCALL-VFP-NEXT: bl fmodf
+; CHECK-LIBCALL-VFP-NEXT: vmov r0, s0
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_f2h
+; CHECK-LIBCALL-VFP-NEXT: strh r0, [r4]
+; CHECK-LIBCALL-VFP-NEXT: pop {r4, r5, r11, pc}
+;
+; CHECK-NOVFP-LABEL: test_frem:
+; CHECK-NOVFP: .save {r4, r5, r11, lr}
+; CHECK-NOVFP-NEXT: push {r4, r5, r11, lr}
+; CHECK-NOVFP-NEXT: mov r4, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r1]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: mov r5, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r4]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: mov r1, r5
+; CHECK-NOVFP-NEXT: bl fmodf
+; CHECK-NOVFP-NEXT: bl __aeabi_f2h
+; CHECK-NOVFP-NEXT: strh r0, [r4]
+; CHECK-NOVFP-NEXT: pop {r4, r5, r11, pc}
%a = load half, ptr %p, align 2
%b = load half, ptr %q, align 2
%r = frem half %a, %b
@@ -91,9 +283,9 @@ define void @test_frem(ptr %p, ptr %q) #0 {
define void @test_load_store(ptr %p, ptr %q) #0 {
; CHECK-ALL-LABEL: test_load_store:
-; CHECK-ALL-NEXT: .fnstart
-; CHECK-ALL: ldrh {{r[0-9]+}}, [{{r[0-9]+}}]
-; CHECK-ALL: strh {{r[0-9]+}}, [{{r[0-9]+}}]
+; CHECK-ALL: ldrh r0, [r0]
+; CHECK-ALL-NEXT: strh r0, [r1]
+; CHECK-ALL-NEXT: bx lr
%a = load half, ptr %p, align 2
store half %a, ptr %q
ret void
@@ -106,42 +298,48 @@ declare half @test_callee(half %a, half %b) #0
define half @test_call(half %a, half %b) #0 {
; CHECK-ALL-LABEL: test_call:
-; CHECK-ALL-NEXT: .fnstart
-; CHECK-ALL-NEXT: .save {r11, lr}
-; CHECK-ALL-NEXT: push {r11, lr}
-; CHECK-ALL-NEXT: bl test_callee
-; CHECK-ALL-NEXT: pop {r11, pc}
+; CHECK-ALL: .save {r11, lr}
+; CHECK-ALL-NEXT: push {r11, lr}
+; CHECK-ALL-NEXT: bl test_callee
+; CHECK-ALL-NEXT: pop {r11, pc}
%r = call half @test_callee(half %a, half %b)
ret half %r
}
define half @test_call_flipped(half %a, half %b) #0 {
-; CHECK-ALL-LABEL: test_call_flipped:
-; CHECK-ALL-NEXT: .fnstart
-; CHECK-ALL-NEXT: .save {r11, lr}
-; CHECK-ALL-NEXT: push {r11, lr}
-; CHECK-VFP-NEXT: vmov.f32 s2, s0
-; CHECK-VFP-NEXT: vmov.f32 s0, s1
-; CHECK-VFP-NEXT: vmov.f32 s1, s2
-; CHECK-NOVFP-NEXT: mov r2, r0
-; CHECK-NOVFP-NEXT: mov r0, r1
-; CHECK-NOVFP-NEXT: mov r1, r2
-; CHECK-ALL-NEXT: bl test_callee
-; CHECK-ALL-NEXT: pop {r11, pc}
+; CHECK-VFP-LABEL: test_call_flipped:
+; CHECK-VFP: .save {r11, lr}
+; CHECK-VFP-NEXT: push {r11, lr}
+; CHECK-VFP-NEXT: vmov.f32 s2, s0
+; CHECK-VFP-NEXT: vmov.f32 s0, s1
+; CHECK-VFP-NEXT: vmov.f32 s1, s2
+; CHECK-VFP-NEXT: bl test_callee
+; CHECK-VFP-NEXT: pop {r11, pc}
+;
+; CHECK-NOVFP-LABEL: test_call_flipped:
+; CHECK-NOVFP: .save {r11, lr}
+; CHECK-NOVFP-NEXT: push {r11, lr}
+; CHECK-NOVFP-NEXT: mov r2, r0
+; CHECK-NOVFP-NEXT: mov r0, r1
+; CHECK-NOVFP-NEXT: mov r1, r2
+; CHECK-NOVFP-NEXT: bl test_callee
+; CHECK-NOVFP-NEXT: pop {r11, pc}
%r = call half @test_callee(half %b, half %a)
ret half %r
}
define half @test_tailcall_flipped(half %a, half %b) #0 {
-; CHECK-ALL-LABEL: test_tailcall_flipped:
-; CHECK-ALL-NEXT: .fnstart
-; CHECK-VFP-NEXT: vmov.f32 s2, s0
-; CHECK-VFP-NEXT: vmov.f32 s0, s1
-; CHECK-VFP-NEXT: vmov.f32 s1, s2
-; CHECK-NOVFP-NEXT: mov r2, r0
-; CHECK-NOVFP-NEXT: mov r0, r1
-; CHECK-NOVFP-NEXT: mov r1, r2
-; CHECK-ALL-NEXT: b test_callee
+; CHECK-VFP-LABEL: test_tailcall_flipped:
+; CHECK-VFP: vmov.f32 s2, s0
+; CHECK-VFP-NEXT: vmov.f32 s0, s1
+; CHECK-VFP-NEXT: vmov.f32 s1, s2
+; CHECK-VFP-NEXT: b test_callee
+;
+; CHECK-NOVFP-LABEL: test_tailcall_flipped:
+; CHECK-NOVFP: mov r2, r0
+; CHECK-NOVFP-NEXT: mov r0, r1
+; CHECK-NOVFP-NEXT: mov r1, r2
+; CHECK-NOVFP-NEXT: b test_callee
%r = tail call half @test_callee(half %b, half %a)
ret half %r
}
@@ -150,10 +348,11 @@ define half @test_tailcall_flipped(half %a, half %b) #0 {
; No conversion is needed
define void @test_select(ptr %p, ptr %q, i1 zeroext %c) #0 {
; CHECK-ALL-LABEL: test_select:
-; CHECK-ALL: cmp {{r[0-9]+}}, #0
-; CHECK-ALL: movne {{r[0-9]+}}, {{r[0-9]+}}
-; CHECK-ALL: ldrh {{r[0-9]+}}, [{{r[0-9]+}}]
-; CHECK-ALL: strh {{r[0-9]+}}, [{{r[0-9]+}}]
+; CHECK-ALL: cmp r2, #0
+; CHECK-ALL-NEXT: movne r1, r0
+; CHECK-ALL-NEXT: ldrh r1, [r1]
+; CHECK-ALL-NEXT: strh r1, [r0]
+; CHECK-ALL-NEXT: bx lr
%a = load half, ptr %p, align 2
%b = load half, ptr %q, align 2
%r = select i1 %c, half %a, half %b
@@ -164,17 +363,53 @@ define void @test_select(ptr %p, ptr %q, i1 zeroext %c) #0 {
; Test only two variants of fcmp. These get translated to f32 vcmp
; instructions anyway.
define i1 @test_fcmp_une(ptr %p, ptr %q) #0 {
-; CHECK-ALL-LABEL: test_fcmp_une:
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-VFP: vcmp.f32
-; CHECK-NOVFP: bl __aeabi_fcmpeq
-; CHECK-VFP-NEXT: vmrs APSR_nzcv, fpscr
-; CHECK-VFP-NEXT: movwne
-; CHECK-NOVFP-NEXT: clz r0, r0
-; CHECK-NOVFP-NEXT: lsr r0, r0, #5
+; CHECK-FP16-LABEL: test_fcmp_une:
+; CHECK-FP16: ldrh r2, [r0]
+; CHECK-FP16-NEXT: mov r0, #0
+; CHECK-FP16-NEXT: ldrh r1, [r1]
+; CHECK-FP16-NEXT: vmov s0, r2
+; CHECK-FP16-NEXT: vmov s2, r1
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-FP16-NEXT: vcmp.f32 s0, s2
+; CHECK-FP16-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-FP16-NEXT: movwne r0, #1
+; CHECK-FP16-NEXT: bx lr
+;
+; CHECK-LIBCALL-VFP-LABEL: test_fcmp_une:
+; CHECK-LIBCALL-VFP: .save {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: .vsave {d8}
+; CHECK-LIBCALL-VFP-NEXT: vpush {d8}
+; CHECK-LIBCALL-VFP-NEXT: mov r4, r0
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r1]
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: ldrh r1, [r4]
+; CHECK-LIBCALL-VFP-NEXT: vmov s16, r0
+; CHECK-LIBCALL-VFP-NEXT: mov r0, r1
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: mov r0, #0
+; CHECK-LIBCALL-VFP-NEXT: vcmp.f32 s0, s16
+; CHECK-LIBCALL-VFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-LIBCALL-VFP-NEXT: movwne r0, #1
+; CHECK-LIBCALL-VFP-NEXT: vpop {d8}
+; CHECK-LIBCALL-VFP-NEXT: pop {r4, pc}
+;
+; CHECK-NOVFP-LABEL: test_fcmp_une:
+; CHECK-NOVFP: .save {r4, r5, r11, lr}
+; CHECK-NOVFP-NEXT: push {r4, r5, r11, lr}
+; CHECK-NOVFP-NEXT: mov r4, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r1]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: mov r5, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r4]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: mov r1, r5
+; CHECK-NOVFP-NEXT: bl __aeabi_fcmpeq
+; CHECK-NOVFP-NEXT: clz r0, r0
+; CHECK-NOVFP-NEXT: lsr r0, r0, #5
+; CHECK-NOVFP-NEXT: pop {r4, r5, r11, pc}
%a = load half, ptr %p, align 2
%b = load half, ptr %q, align 2
%r = fcmp une half %a, %b
@@ -182,15 +417,60 @@ define i1 @test_fcmp_une(ptr %p, ptr %q) #0 {
}
define i1 @test_fcmp_ueq(ptr %p, ptr %q) #0 {
-; CHECK-ALL-LABEL: test_fcmp_ueq:
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-VFP: vcmp.f32
-; CHECK-NOVFP: bl __aeabi_fcmpeq
-; CHECK-FP16: vmrs APSR_nzcv, fpscr
-; CHECK-LIBCALL: movw{{ne|eq}}
+; CHECK-FP16-LABEL: test_fcmp_ueq:
+; CHECK-FP16: ldrh r2, [r0]
+; CHECK-FP16-NEXT: mov r0, #0
+; CHECK-FP16-NEXT: ldrh r1, [r1]
+; CHECK-FP16-NEXT: vmov s0, r2
+; CHECK-FP16-NEXT: vmov s2, r1
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-FP16-NEXT: vcmp.f32 s0, s2
+; CHECK-FP16-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-FP16-NEXT: movweq r0, #1
+; CHECK-FP16-NEXT: movwvs r0, #1
+; CHECK-FP16-NEXT: bx lr
+;
+; CHECK-LIBCALL-VFP-LABEL: test_fcmp_ueq:
+; CHECK-LIBCALL-VFP: .save {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: .vsave {d8}
+; CHECK-LIBCALL-VFP-NEXT: vpush {d8}
+; CHECK-LIBCALL-VFP-NEXT: mov r4, r0
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r1]
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: ldrh r1, [r4]
+; CHECK-LIBCALL-VFP-NEXT: vmov s16, r0
+; CHECK-LIBCALL-VFP-NEXT: mov r0, r1
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: mov r0, #0
+; CHECK-LIBCALL-VFP-NEXT: vcmp.f32 s0, s16
+; CHECK-LIBCALL-VFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-LIBCALL-VFP-NEXT: movweq r0, #1
+; CHECK-LIBCALL-VFP-NEXT: movwvs r0, #1
+; CHECK-LIBCALL-VFP-NEXT: vpop {d8}
+; CHECK-LIBCALL-VFP-NEXT: pop {r4, pc}
+;
+; CHECK-NOVFP-LABEL: test_fcmp_ueq:
+; CHECK-NOVFP: .save {r4, r5, r6, lr}
+; CHECK-NOVFP-NEXT: push {r4, r5, r6, lr}
+; CHECK-NOVFP-NEXT: mov r4, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r1]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: mov r5, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r4]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: mov r1, r5
+; CHECK-NOVFP-NEXT: mov r4, r0
+; CHECK-NOVFP-NEXT: bl __aeabi_fcmpeq
+; CHECK-NOVFP-NEXT: mov r6, r0
+; CHECK-NOVFP-NEXT: mov r0, r4
+; CHECK-NOVFP-NEXT: mov r1, r5
+; CHECK-NOVFP-NEXT: bl __aeabi_fcmpun
+; CHECK-NOVFP-NEXT: orrs r0, r0, r6
+; CHECK-NOVFP-NEXT: movwne r0, #1
+; CHECK-NOVFP-NEXT: pop {r4, r5, r6, pc}
%a = load half, ptr %p, align 2
%b = load half, ptr %q, align 2
%r = fcmp ueq half %a, %b
@@ -198,17 +478,61 @@ define i1 @test_fcmp_ueq(ptr %p, ptr %q) #0 {
}
define void @test_br_cc(ptr %p, ptr %q, ptr %p1, ptr %p2) #0 {
-; CHECK-ALL-LABEL: test_br_cc:
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-VFP: vcmp.f32
-; CHECK-NOVFP: bl __aeabi_fcmplt
-; CHECK-FP16: vmrs APSR_nzcv, fpscr
-; CHECK-VFP: movmi
-; CHECK-VFP: str
-; CHECK-NOVFP: str
+; CHECK-FP16-LABEL: test_br_cc:
+; CHECK-FP16: ldrh r0, [r0]
+; CHECK-FP16-NEXT: ldrh r1, [r1]
+; CHECK-FP16-NEXT: vmov s0, r0
+; CHECK-FP16-NEXT: mov r0, #0
+; CHECK-FP16-NEXT: vmov s2, r1
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-FP16-NEXT: vcmp.f32 s0, s2
+; CHECK-FP16-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-FP16-NEXT: movmi r2, r3
+; CHECK-FP16-NEXT: str r0, [r2]
+; CHECK-FP16-NEXT: bx lr
+;
+; CHECK-LIBCALL-VFP-LABEL: test_br_cc:
+; CHECK-LIBCALL-VFP: .save {r4, r5, r6, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r4, r5, r6, lr}
+; CHECK-LIBCALL-VFP-NEXT: .vsave {d8}
+; CHECK-LIBCALL-VFP-NEXT: vpush {d8}
+; CHECK-LIBCALL-VFP-NEXT: mov r6, r0
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r1]
+; CHECK-LIBCALL-VFP-NEXT: mov r4, r3
+; CHECK-LIBCALL-VFP-NEXT: mov r5, r2
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: ldrh r1, [r6]
+; CHECK-LIBCALL-VFP-NEXT: vmov s16, r0
+; CHECK-LIBCALL-VFP-NEXT: mov r0, r1
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: mov r0, #0
+; CHECK-LIBCALL-VFP-NEXT: vcmp.f32 s0, s16
+; CHECK-LIBCALL-VFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-LIBCALL-VFP-NEXT: movmi r5, r4
+; CHECK-LIBCALL-VFP-NEXT: str r0, [r5]
+; CHECK-LIBCALL-VFP-NEXT: vpop {d8}
+; CHECK-LIBCALL-VFP-NEXT: pop {r4, r5, r6, pc}
+;
+; CHECK-NOVFP-LABEL: test_br_cc:
+; CHECK-NOVFP: .save {r4, r5, r6, r7, r11, lr}
+; CHECK-NOVFP-NEXT: push {r4, r5, r6, r7, r11, lr}
+; CHECK-NOVFP-NEXT: mov r6, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r1]
+; CHECK-NOVFP-NEXT: mov r4, r3
+; CHECK-NOVFP-NEXT: mov r5, r2
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: mov r7, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r6]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: mov r1, r7
+; CHECK-NOVFP-NEXT: bl __aeabi_fcmplt
+; CHECK-NOVFP-NEXT: cmp r0, #0
+; CHECK-NOVFP-NEXT: mov r0, #0
+; CHECK-NOVFP-NEXT: movne r5, r4
+; CHECK-NOVFP-NEXT: str r0, [r5]
+; CHECK-NOVFP-NEXT: pop {r4, r5, r6, r7, r11, pc}
%a = load half, ptr %p, align 2
%b = load half, ptr %q, align 2
%c = fcmp uge half %a, %b
@@ -222,14 +546,37 @@ else:
}
declare i1 @test_dummy(ptr %p) #0
-; CHECK-ALL-LABEL: test_phi:
-; CHECK-FP16: [[LOOP:.LBB[0-9_]+]]:
-; CHECK-FP16: bl test_dummy
-; CHECK-FP16: bne [[LOOP]]
-; CHECK-LIBCALL: [[LOOP:.LBB[0-9_]+]]:
-; CHECK-LIBCALL: bl test_dummy
-; CHECK-LIBCALL: bne [[LOOP]]
+
define void @test_phi(ptr %p) #0 {
+; CHECK-VFP-LABEL: test_phi:
+; CHECK-VFP: .save {r4, r5, r6, lr}
+; CHECK-VFP-NEXT: push {r4, r5, r6, lr}
+; CHECK-VFP-NEXT: ldrh r6, [r0]
+; CHECK-VFP-NEXT: mov r4, r0
+; CHECK-VFP-NEXT: .LBB13_1:
+; CHECK-VFP-NEXT: mov r0, r4
+; CHECK-VFP-NEXT: mov r5, r6
+; CHECK-VFP-NEXT: ldrh r6, [r4]
+; CHECK-VFP-NEXT: bl test_dummy
+; CHECK-VFP-NEXT: tst r0, #1
+; CHECK-VFP-NEXT: bne .LBB13_1
+; CHECK-VFP-NEXT: strh r5, [r4]
+; CHECK-VFP-NEXT: pop {r4, r5, r6, pc}
+;
+; CHECK-NOVFP-LABEL: test_phi:
+; CHECK-NOVFP: .save {r4, r5, r6, lr}
+; CHECK-NOVFP-NEXT: push {r4, r5, r6, lr}
+; CHECK-NOVFP-NEXT: ldrh r5, [r0]
+; CHECK-NOVFP-NEXT: mov r4, r0
+; CHECK-NOVFP-NEXT: .LBB13_1:
+; CHECK-NOVFP-NEXT: mov r0, r4
+; CHECK-NOVFP-NEXT: mov r6, r5
+; CHECK-NOVFP-NEXT: ldrh r5, [r4]
+; CHECK-NOVFP-NEXT: bl test_dummy
+; CHECK-NOVFP-NEXT: tst r0, #1
+; CHECK-NOVFP-NEXT: bne .LBB13_1
+; CHECK-NOVFP-NEXT: strh r6, [r4]
+; CHECK-NOVFP-NEXT: pop {r4, r5, r6, pc}
entry:
%a = load half, ptr %p
br label %loop
@@ -244,84 +591,224 @@ return:
}
define i32 @test_fptosi_i32(ptr %p) #0 {
-; CHECK-ALL-LABEL: test_fptosi_i32:
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-VFP: vcvt.s32.f32
-; CHECK-NOVFP: bl __aeabi_f2iz
+; CHECK-FP16-LABEL: test_fptosi_i32:
+; CHECK-FP16: ldrh r0, [r0]
+; CHECK-FP16-NEXT: vmov s0, r0
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: vcvt.s32.f32 s0, s0
+; CHECK-FP16-NEXT: vmov r0, s0
+; CHECK-FP16-NEXT: bx lr
+;
+; CHECK-LIBCALL-VFP-LABEL: test_fptosi_i32:
+; CHECK-LIBCALL-VFP: .save {r11, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r11, lr}
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r0]
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: vcvt.s32.f32 s0, s0
+; CHECK-LIBCALL-VFP-NEXT: vmov r0, s0
+; CHECK-LIBCALL-VFP-NEXT: pop {r11, pc}
+;
+; CHECK-NOVFP-LABEL: test_fptosi_i32:
+; CHECK-NOVFP: .save {r11, lr}
+; CHECK-NOVFP-NEXT: push {r11, lr}
+; CHECK-NOVFP-NEXT: ldrh r0, [r0]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: bl __aeabi_f2iz
+; CHECK-NOVFP-NEXT: pop {r11, pc}
%a = load half, ptr %p, align 2
%r = fptosi half %a to i32
ret i32 %r
}
define i64 @test_fptosi_i64(ptr %p) #0 {
-; CHECK-ALL-LABEL: test_fptosi_i64:
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-ALL: bl __aeabi_f2lz
+; CHECK-FP16-LABEL: test_fptosi_i64:
+; CHECK-FP16: .save {r11, lr}
+; CHECK-FP16-NEXT: push {r11, lr}
+; CHECK-FP16-NEXT: ldrh r0, [r0]
+; CHECK-FP16-NEXT: vmov s0, r0
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: vmov r0, s0
+; CHECK-FP16-NEXT: bl __aeabi_f2lz
+; CHECK-FP16-NEXT: pop {r11, pc}
+;
+; CHECK-LIBCALL-LABEL: test_fptosi_i64:
+; CHECK-LIBCALL: .save {r11, lr}
+; CHECK-LIBCALL-NEXT: push {r11, lr}
+; CHECK-LIBCALL-NEXT: ldrh r0, [r0]
+; CHECK-LIBCALL-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-NEXT: bl __aeabi_f2lz
+; CHECK-LIBCALL-NEXT: pop {r11, pc}
%a = load half, ptr %p, align 2
%r = fptosi half %a to i64
ret i64 %r
}
define i32 @test_fptoui_i32(ptr %p) #0 {
-; CHECK-ALL-LABEL: test_fptoui_i32:
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-VFP: vcvt.u32.f32
-; CHECK-NOVFP: bl __aeabi_f2uiz
+; CHECK-FP16-LABEL: test_fptoui_i32:
+; CHECK-FP16: ldrh r0, [r0]
+; CHECK-FP16-NEXT: vmov s0, r0
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: vcvt.u32.f32 s0, s0
+; CHECK-FP16-NEXT: vmov r0, s0
+; CHECK-FP16-NEXT: bx lr
+;
+; CHECK-LIBCALL-VFP-LABEL: test_fptoui_i32:
+; CHECK-LIBCALL-VFP: .save {r11, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r11, lr}
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r0]
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: vcvt.u32.f32 s0, s0
+; CHECK-LIBCALL-VFP-NEXT: vmov r0, s0
+; CHECK-LIBCALL-VFP-NEXT: pop {r11, pc}
+;
+; CHECK-NOVFP-LABEL: test_fptoui_i32:
+; CHECK-NOVFP: .save {r11, lr}
+; CHECK-NOVFP-NEXT: push {r11, lr}
+; CHECK-NOVFP-NEXT: ldrh r0, [r0]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: bl __aeabi_f2uiz
+; CHECK-NOVFP-NEXT: pop {r11, pc}
%a = load half, ptr %p, align 2
%r = fptoui half %a to i32
ret i32 %r
}
define i64 @test_fptoui_i64(ptr %p) #0 {
-; CHECK-ALL-LABEL: test_fptoui_i64:
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-ALL: bl __aeabi_f2ulz
+; CHECK-FP16-LABEL: test_fptoui_i64:
+; CHECK-FP16: .save {r11, lr}
+; CHECK-FP16-NEXT: push {r11, lr}
+; CHECK-FP16-NEXT: ldrh r0, [r0]
+; CHECK-FP16-NEXT: vmov s0, r0
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: vmov r0, s0
+; CHECK-FP16-NEXT: bl __aeabi_f2ulz
+; CHECK-FP16-NEXT: pop {r11, pc}
+;
+; CHECK-LIBCALL-LABEL: test_fptoui_i64:
+; CHECK-LIBCALL: .save {r11, lr}
+; CHECK-LIBCALL-NEXT: push {r11, lr}
+; CHECK-LIBCALL-NEXT: ldrh r0, [r0]
+; CHECK-LIBCALL-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-NEXT: bl __aeabi_f2ulz
+; CHECK-LIBCALL-NEXT: pop {r11, pc}
%a = load half, ptr %p, align 2
%r = fptoui half %a to i64
ret i64 %r
}
define void @test_sitofp_i32(i32 %a, ptr %p) #0 {
-; CHECK-ALL-LABEL: test_sitofp_i32:
-; CHECK-VFP: vcvt.f32.s32
-; CHECK-NOVFP: bl __aeabi_i2f
-; CHECK-FP16: vcvtb.f16.f32
-; CHECK-LIBCALL: bl __aeabi_f2h
+; CHECK-FP16-LABEL: test_sitofp_i32:
+; CHECK-FP16: vmov s0, r0
+; CHECK-FP16-NEXT: vcvt.f32.s32 s0, s0
+; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-FP16-NEXT: vmov r0, s0
+; CHECK-FP16-NEXT: strh r0, [r1]
+; CHECK-FP16-NEXT: bx lr
+;
+; CHECK-LIBCALL-VFP-LABEL: test_sitofp_i32:
+; CHECK-LIBCALL-VFP: .save {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: mov r4, r1
+; CHECK-LIBCALL-VFP-NEXT: vcvt.f32.s32 s0, s0
+; CHECK-LIBCALL-VFP-NEXT: vmov r0, s0
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_f2h
+; CHECK-LIBCALL-VFP-NEXT: strh r0, [r4]
+; CHECK-LIBCALL-VFP-NEXT: pop {r4, pc}
+;
+; CHECK-NOVFP-LABEL: test_sitofp_i32:
+; CHECK-NOVFP: .save {r4, lr}
+; CHECK-NOVFP-NEXT: push {r4, lr}
+; CHECK-NOVFP-NEXT: mov r4, r1
+; CHECK-NOVFP-NEXT: bl __aeabi_i2f
+; CHECK-NOVFP-NEXT: bl __aeabi_f2h
+; CHECK-NOVFP-NEXT: strh r0, [r4]
+; CHECK-NOVFP-NEXT: pop {r4, pc}
%r = sitofp i32 %a to half
store half %r, ptr %p
ret void
}
define void @test_uitofp_i32(i32 %a, ptr %p) #0 {
-; CHECK-ALL-LABEL: test_uitofp_i32:
-; CHECK-VFP: vcvt.f32.u32
-; CHECK-NOVFP: bl __aeabi_ui2f
-; CHECK-FP16: vcvtb.f16.f32
-; CHECK-LIBCALL: bl __aeabi_f2h
+; CHECK-FP16-LABEL: test_uitofp_i32:
+; CHECK-FP16: vmov s0, r0
+; CHECK-FP16-NEXT: vcvt.f32.u32 s0, s0
+; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-FP16-NEXT: vmov r0, s0
+; CHECK-FP16-NEXT: strh r0, [r1]
+; CHECK-FP16-NEXT: bx lr
+;
+; CHECK-LIBCALL-VFP-LABEL: test_uitofp_i32:
+; CHECK-LIBCALL-VFP: .save {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: mov r4, r1
+; CHECK-LIBCALL-VFP-NEXT: vcvt.f32.u32 s0, s0
+; CHECK-LIBCALL-VFP-NEXT: vmov r0, s0
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_f2h
+; CHECK-LIBCALL-VFP-NEXT: strh r0, [r4]
+; CHECK-LIBCALL-VFP-NEXT: pop {r4, pc}
+;
+; CHECK-NOVFP-LABEL: test_uitofp_i32:
+; CHECK-NOVFP: .save {r4, lr}
+; CHECK-NOVFP-NEXT: push {r4, lr}
+; CHECK-NOVFP-NEXT: mov r4, r1
+; CHECK-NOVFP-NEXT: bl __aeabi_ui2f
+; CHECK-NOVFP-NEXT: bl __aeabi_f2h
+; CHECK-NOVFP-NEXT: strh r0, [r4]
+; CHECK-NOVFP-NEXT: pop {r4, pc}
%r = uitofp i32 %a to half
store half %r, ptr %p
ret void
}
define void @test_sitofp_i64(i64 %a, ptr %p) #0 {
-; CHECK-ALL-LABEL: test_sitofp_i64:
-; CHECK-ALL: bl __aeabi_l2f
-; CHECK-FP16: vcvtb.f16.f32
-; CHECK-LIBCALL: bl __aeabi_f2h
+; CHECK-FP16-LABEL: test_sitofp_i64:
+; CHECK-FP16: .save {r4, lr}
+; CHECK-FP16-NEXT: push {r4, lr}
+; CHECK-FP16-NEXT: mov r4, r2
+; CHECK-FP16-NEXT: bl __aeabi_l2f
+; CHECK-FP16-NEXT: vmov s0, r0
+; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-FP16-NEXT: vmov r0, s0
+; CHECK-FP16-NEXT: strh r0, [r4]
+; CHECK-FP16-NEXT: pop {r4, pc}
+;
+; CHECK-LIBCALL-LABEL: test_sitofp_i64:
+; CHECK-LIBCALL: .save {r4, lr}
+; CHECK-LIBCALL-NEXT: push {r4, lr}
+; CHECK-LIBCALL-NEXT: mov r4, r2
+; CHECK-LIBCALL-NEXT: bl __aeabi_l2f
+; CHECK-LIBCALL-NEXT: bl __aeabi_f2h
+; CHECK-LIBCALL-NEXT: strh r0, [r4]
+; CHECK-LIBCALL-NEXT: pop {r4, pc}
%r = sitofp i64 %a to half
store half %r, ptr %p
ret void
}
define void @test_uitofp_i64(i64 %a, ptr %p) #0 {
-; CHECK-ALL-LABEL: test_uitofp_i64:
-; CHECK-ALL: bl __aeabi_ul2f
-; CHECK-FP16: vcvtb.f16.f32
-; CHECK-LIBCALL: bl __aeabi_f2h
+; CHECK-FP16-LABEL: test_uitofp_i64:
+; CHECK-FP16: .save {r4, lr}
+; CHECK-FP16-NEXT: push {r4, lr}
+; CHECK-FP16-NEXT: mov r4, r2
+; CHECK-FP16-NEXT: bl __aeabi_ul2f
+; CHECK-FP16-NEXT: vmov s0, r0
+; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-FP16-NEXT: vmov r0, s0
+; CHECK-FP16-NEXT: strh r0, [r4]
+; CHECK-FP16-NEXT: pop {r4, pc}
+;
+; CHECK-LIBCALL-LABEL: test_uitofp_i64:
+; CHECK-LIBCALL: .save {r4, lr}
+; CHECK-LIBCALL-NEXT: push {r4, lr}
+; CHECK-LIBCALL-NEXT: mov r4, r2
+; CHECK-LIBCALL-NEXT: bl __aeabi_ul2f
+; CHECK-LIBCALL-NEXT: bl __aeabi_f2h
+; CHECK-LIBCALL-NEXT: strh r0, [r4]
+; CHECK-LIBCALL-NEXT: pop {r4, pc}
%r = uitofp i64 %a to half
store half %r, ptr %p
ret void
@@ -329,19 +816,49 @@ define void @test_uitofp_i64(i64 %a, ptr %p) #0 {
define void @test_fptrunc_float(float %f, ptr %p) #0 {
; CHECK-FP16-LABEL: test_fptrunc_float:
-; CHECK-FP16: vcvtb.f16.f32
-; CHECK-LIBCALL-LABEL: test_fptrunc_float:
-; CHECK-LIBCALL: bl __aeabi_f2h
+; CHECK-FP16: vcvtb.f16.f32 s0, s0
+; CHECK-FP16-NEXT: vmov r1, s0
+; CHECK-FP16-NEXT: strh r1, [r0]
+; CHECK-FP16-NEXT: bx lr
+;
+; CHECK-LIBCALL-VFP-LABEL: test_fptrunc_float:
+; CHECK-LIBCALL-VFP: .save {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: mov r4, r0
+; CHECK-LIBCALL-VFP-NEXT: vmov r0, s0
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_f2h
+; CHECK-LIBCALL-VFP-NEXT: strh r0, [r4]
+; CHECK-LIBCALL-VFP-NEXT: pop {r4, pc}
+;
+; CHECK-NOVFP-LABEL: test_fptrunc_float:
+; CHECK-NOVFP: .save {r4, lr}
+; CHECK-NOVFP-NEXT: push {r4, lr}
+; CHECK-NOVFP-NEXT: mov r4, r1
+; CHECK-NOVFP-NEXT: bl __aeabi_f2h
+; CHECK-NOVFP-NEXT: strh r0, [r4]
+; CHECK-NOVFP-NEXT: pop {r4, pc}
%a = fptrunc float %f to half
store half %a, ptr %p
ret void
}
define void @test_fptrunc_double(double %d, ptr %p) #0 {
-; CHECK-FP16-LABEL: test_fptrunc_double:
-; CHECK-FP16: bl __aeabi_d2h
-; CHECK-LIBCALL-LABEL: test_fptrunc_double:
-; CHECK-LIBCALL: bl __aeabi_d2h
+; CHECK-VFP-LABEL: test_fptrunc_double:
+; CHECK-VFP: .save {r4, lr}
+; CHECK-VFP-NEXT: push {r4, lr}
+; CHECK-VFP-NEXT: mov r4, r0
+; CHECK-VFP-NEXT: vmov r0, r1, d0
+; CHECK-VFP-NEXT: bl __aeabi_d2h
+; CHECK-VFP-NEXT: strh r0, [r4]
+; CHECK-VFP-NEXT: pop {r4, pc}
+;
+; CHECK-NOVFP-LABEL: test_fptrunc_double:
+; CHECK-NOVFP: .save {r4, lr}
+; CHECK-NOVFP-NEXT: push {r4, lr}
+; CHECK-NOVFP-NEXT: mov r4, r2
+; CHECK-NOVFP-NEXT: bl __aeabi_d2h
+; CHECK-NOVFP-NEXT: strh r0, [r4]
+; CHECK-NOVFP-NEXT: pop {r4, pc}
%a = fptrunc double %d to half
store half %a, ptr %p
ret void
@@ -349,9 +866,25 @@ define void @test_fptrunc_double(double %d, ptr %p) #0 {
define float @test_fpextend_float(ptr %p) {
; CHECK-FP16-LABEL: test_fpextend_float:
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-LIBCALL-LABEL: test_fpextend_float:
-; CHECK-LIBCALL: bl __aeabi_h2f
+; CHECK-FP16: ldrh r0, [r0]
+; CHECK-FP16-NEXT: vmov s0, r0
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: bx lr
+;
+; CHECK-LIBCALL-VFP-LABEL: test_fpextend_float:
+; CHECK-LIBCALL-VFP: .save {r11, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r11, lr}
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r0]
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: pop {r11, pc}
+;
+; CHECK-NOVFP-LABEL: test_fpextend_float:
+; CHECK-NOVFP: .save {r11, lr}
+; CHECK-NOVFP-NEXT: push {r11, lr}
+; CHECK-NOVFP-NEXT: ldrh r0, [r0]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: pop {r11, pc}
%a = load half, ptr %p, align 2
%r = fpext half %a to float
ret float %r
@@ -359,11 +892,28 @@ define float @test_fpextend_float(ptr %p) {
define double @test_fpextend_double(ptr %p) {
; CHECK-FP16-LABEL: test_fpextend_double:
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-LIBCALL-LABEL: test_fpextend_double:
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-VFP: vcvt.f64.f32
-; CHECK-NOVFP: bl __aeabi_f2d
+; CHECK-FP16: ldrh r0, [r0]
+; CHECK-FP16-NEXT: vmov s0, r0
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: vcvt.f64.f32 d0, s0
+; CHECK-FP16-NEXT: bx lr
+;
+; CHECK-LIBCALL-VFP-LABEL: test_fpextend_double:
+; CHECK-LIBCALL-VFP: .save {r11, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r11, lr}
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r0]
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: vcvt.f64.f32 d0, s0
+; CHECK-LIBCALL-VFP-NEXT: pop {r11, pc}
+;
+; CHECK-NOVFP-LABEL: test_fpextend_double:
+; CHECK-NOVFP: .save {r11, lr}
+; CHECK-NOVFP-NEXT: push {r11, lr}
+; CHECK-NOVFP-NEXT: ldrh r0, [r0]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: bl __aeabi_f2d
+; CHECK-NOVFP-NEXT: pop {r11, pc}
%a = load half, ptr %p, align 2
%r = fpext half %a to double
ret double %r
@@ -371,9 +921,8 @@ define double @test_fpextend_double(ptr %p) {
define i16 @test_bitcast_halftoi16(ptr %p) #0 {
; CHECK-ALL-LABEL: test_bitcast_halftoi16:
-; CHECK-ALL-NEXT: .fnstart
-; CHECK-ALL-NEXT: ldrh r0, [r0]
-; CHECK-ALL-NEXT: bx lr
+; CHECK-ALL: ldrh r0, [r0]
+; CHECK-ALL-NEXT: bx lr
%a = load half, ptr %p, align 2
%r = bitcast half %a to i16
ret i16 %r
@@ -381,9 +930,8 @@ define i16 @test_bitcast_halftoi16(ptr %p) #0 {
define void @test_bitcast_i16tohalf(i16 %a, ptr %p) #0 {
; CHECK-ALL-LABEL: test_bitcast_i16tohalf:
-; CHECK-ALL-NEXT: .fnstart
-; CHECK-ALL-NEXT: strh r0, [r1]
-; CHECK-ALL-NEXT: bx lr
+; CHECK-ALL: strh r0, [r1]
+; CHECK-ALL-NEXT: bx lr
%r = bitcast i16 %a to half
store half %r, ptr %p
ret void
@@ -415,14 +963,39 @@ declare half @llvm.roundeven.f16(half %a) #0
declare half @llvm.fmuladd.f16(half %a, half %b, half %c) #0
define void @test_sqrt(ptr %p) #0 {
-; CHECK-ALL-LABEL: test_sqrt:
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: vsqrt.f32
-; CHECK-FP16: vcvtb.f16.f32
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL-VFP: vsqrt.f32
-; CHECK-NOVFP: bl sqrtf
-; CHECK-LIBCALL: bl __aeabi_f2h
+; CHECK-FP16-LABEL: test_sqrt:
+; CHECK-FP16: ldrh r1, [r0]
+; CHECK-FP16-NEXT: vmov s0, r1
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: vsqrt.f32 s0, s0
+; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-FP16-NEXT: vmov r1, s0
+; CHECK-FP16-NEXT: strh r1, [r0]
+; CHECK-FP16-NEXT: bx lr
+;
+; CHECK-LIBCALL-VFP-LABEL: test_sqrt:
+; CHECK-LIBCALL-VFP: .save {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: mov r4, r0
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r0]
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: vsqrt.f32 s0, s0
+; CHECK-LIBCALL-VFP-NEXT: vmov r0, s0
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_f2h
+; CHECK-LIBCALL-VFP-NEXT: strh r0, [r4]
+; CHECK-LIBCALL-VFP-NEXT: pop {r4, pc}
+;
+; CHECK-NOVFP-LABEL: test_sqrt:
+; CHECK-NOVFP: .save {r4, lr}
+; CHECK-NOVFP-NEXT: push {r4, lr}
+; CHECK-NOVFP-NEXT: mov r4, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r0]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: bl sqrtf
+; CHECK-NOVFP-NEXT: bl __aeabi_f2h
+; CHECK-NOVFP-NEXT: strh r0, [r4]
+; CHECK-NOVFP-NEXT: pop {r4, pc}
%a = load half, ptr %p, align 2
%r = call half @llvm.sqrt.f16(half %a)
store half %r, ptr %p
@@ -431,13 +1004,46 @@ define void @test_sqrt(ptr %p) #0 {
define void @test_fpowi(ptr %p, i32 %b) #0 {
; CHECK-FP16-LABEL: test_fpowi:
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: bl __powisf2
-; CHECK-FP16: vcvtb.f16.f32
-; CHECK-LIBCALL-LABEL: test_fpowi:
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bl __powisf2
-; CHECK-LIBCALL: bl __aeabi_f2h
+; CHECK-FP16: .save {r4, lr}
+; CHECK-FP16-NEXT: push {r4, lr}
+; CHECK-FP16-NEXT: mov r4, r0
+; CHECK-FP16-NEXT: ldrh r0, [r0]
+; CHECK-FP16-NEXT: vmov s0, r0
+; CHECK-FP16-NEXT: mov r0, r1
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: bl __powisf2
+; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-FP16-NEXT: vmov r0, s0
+; CHECK-FP16-NEXT: strh r0, [r4]
+; CHECK-FP16-NEXT: pop {r4, pc}
+;
+; CHECK-LIBCALL-VFP-LABEL: test_fpowi:
+; CHECK-LIBCALL-VFP: .save {r4, r5, r11, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r4, r5, r11, lr}
+; CHECK-LIBCALL-VFP-NEXT: mov r5, r0
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r0]
+; CHECK-LIBCALL-VFP-NEXT: mov r4, r1
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: mov r0, r4
+; CHECK-LIBCALL-VFP-NEXT: bl __powisf2
+; CHECK-LIBCALL-VFP-NEXT: vmov r0, s0
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_f2h
+; CHECK-LIBCALL-VFP-NEXT: strh r0, [r5]
+; CHECK-LIBCALL-VFP-NEXT: pop {r4, r5, r11, pc}
+;
+; CHECK-NOVFP-LABEL: test_fpowi:
+; CHECK-NOVFP: .save {r4, r5, r11, lr}
+; CHECK-NOVFP-NEXT: push {r4, r5, r11, lr}
+; CHECK-NOVFP-NEXT: mov r5, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r0]
+; CHECK-NOVFP-NEXT: mov r4, r1
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: mov r1, r4
+; CHECK-NOVFP-NEXT: bl __powisf2
+; CHECK-NOVFP-NEXT: bl __aeabi_f2h
+; CHECK-NOVFP-NEXT: strh r0, [r5]
+; CHECK-NOVFP-NEXT: pop {r4, r5, r11, pc}
%a = load half, ptr %p, align 2
%r = call half @llvm.powi.f16.i32(half %a, i32 %b)
store half %r, ptr %p
@@ -446,13 +1052,41 @@ define void @test_fpowi(ptr %p, i32 %b) #0 {
define void @test_sin(ptr %p) #0 {
; CHECK-FP16-LABEL: test_sin:
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: bl sinf
-; CHECK-FP16: vcvtb.f16.f32
-; CHECK-LIBCALL-LABEL: test_sin:
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bl sinf
-; CHECK-LIBCALL: bl __aeabi_f2h
+; CHECK-FP16: .save {r4, lr}
+; CHECK-FP16-NEXT: push {r4, lr}
+; CHECK-FP16-NEXT: mov r4, r0
+; CHECK-FP16-NEXT: ldrh r0, [r0]
+; CHECK-FP16-NEXT: vmov s0, r0
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: bl sinf
+; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-FP16-NEXT: vmov r0, s0
+; CHECK-FP16-NEXT: strh r0, [r4]
+; CHECK-FP16-NEXT: pop {r4, pc}
+;
+; CHECK-LIBCALL-VFP-LABEL: test_sin:
+; CHECK-LIBCALL-VFP: .save {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: mov r4, r0
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r0]
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: bl sinf
+; CHECK-LIBCALL-VFP-NEXT: vmov r0, s0
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_f2h
+; CHECK-LIBCALL-VFP-NEXT: strh r0, [r4]
+; CHECK-LIBCALL-VFP-NEXT: pop {r4, pc}
+;
+; CHECK-NOVFP-LABEL: test_sin:
+; CHECK-NOVFP: .save {r4, lr}
+; CHECK-NOVFP-NEXT: push {r4, lr}
+; CHECK-NOVFP-NEXT: mov r4, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r0]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: bl sinf
+; CHECK-NOVFP-NEXT: bl __aeabi_f2h
+; CHECK-NOVFP-NEXT: strh r0, [r4]
+; CHECK-NOVFP-NEXT: pop {r4, pc}
%a = load half, ptr %p, align 2
%r = call half @llvm.sin.f16(half %a)
store half %r, ptr %p
@@ -461,13 +1095,41 @@ define void @test_sin(ptr %p) #0 {
define void @test_cos(ptr %p) #0 {
; CHECK-FP16-LABEL: test_cos:
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: bl cosf
-; CHECK-FP16: vcvtb.f16.f32
-; CHECK-LIBCALL-LABEL: test_cos:
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bl cosf
-; CHECK-LIBCALL: bl __aeabi_f2h
+; CHECK-FP16: .save {r4, lr}
+; CHECK-FP16-NEXT: push {r4, lr}
+; CHECK-FP16-NEXT: mov r4, r0
+; CHECK-FP16-NEXT: ldrh r0, [r0]
+; CHECK-FP16-NEXT: vmov s0, r0
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: bl cosf
+; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-FP16-NEXT: vmov r0, s0
+; CHECK-FP16-NEXT: strh r0, [r4]
+; CHECK-FP16-NEXT: pop {r4, pc}
+;
+; CHECK-LIBCALL-VFP-LABEL: test_cos:
+; CHECK-LIBCALL-VFP: .save {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: mov r4, r0
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r0]
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: bl cosf
+; CHECK-LIBCALL-VFP-NEXT: vmov r0, s0
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_f2h
+; CHECK-LIBCALL-VFP-NEXT: strh r0, [r4]
+; CHECK-LIBCALL-VFP-NEXT: pop {r4, pc}
+;
+; CHECK-NOVFP-LABEL: test_cos:
+; CHECK-NOVFP: .save {r4, lr}
+; CHECK-NOVFP-NEXT: push {r4, lr}
+; CHECK-NOVFP-NEXT: mov r4, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r0]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: bl cosf
+; CHECK-NOVFP-NEXT: bl __aeabi_f2h
+; CHECK-NOVFP-NEXT: strh r0, [r4]
+; CHECK-NOVFP-NEXT: pop {r4, pc}
%a = load half, ptr %p, align 2
%r = call half @llvm.cos.f16(half %a)
store half %r, ptr %p
@@ -476,13 +1138,41 @@ define void @test_cos(ptr %p) #0 {
define void @test_tan(ptr %p) #0 {
; CHECK-FP16-LABEL: test_tan:
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: bl tanf
-; CHECK-FP16: vcvtb.f16.f32
-; CHECK-LIBCALL-LABEL: test_tan:
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bl tanf
-; CHECK-LIBCALL: bl __aeabi_f2h
+; CHECK-FP16: .save {r4, lr}
+; CHECK-FP16-NEXT: push {r4, lr}
+; CHECK-FP16-NEXT: mov r4, r0
+; CHECK-FP16-NEXT: ldrh r0, [r0]
+; CHECK-FP16-NEXT: vmov s0, r0
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: bl tanf
+; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-FP16-NEXT: vmov r0, s0
+; CHECK-FP16-NEXT: strh r0, [r4]
+; CHECK-FP16-NEXT: pop {r4, pc}
+;
+; CHECK-LIBCALL-VFP-LABEL: test_tan:
+; CHECK-LIBCALL-VFP: .save {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: mov r4, r0
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r0]
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: bl tanf
+; CHECK-LIBCALL-VFP-NEXT: vmov r0, s0
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_f2h
+; CHECK-LIBCALL-VFP-NEXT: strh r0, [r4]
+; CHECK-LIBCALL-VFP-NEXT: pop {r4, pc}
+;
+; CHECK-NOVFP-LABEL: test_tan:
+; CHECK-NOVFP: .save {r4, lr}
+; CHECK-NOVFP-NEXT: push {r4, lr}
+; CHECK-NOVFP-NEXT: mov r4, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r0]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: bl tanf
+; CHECK-NOVFP-NEXT: bl __aeabi_f2h
+; CHECK-NOVFP-NEXT: strh r0, [r4]
+; CHECK-NOVFP-NEXT: pop {r4, pc}
%a = load half, ptr %p, align 2
%r = call half @llvm.tan.f16(half %a)
store half %r, ptr %p
@@ -491,15 +1181,52 @@ define void @test_tan(ptr %p) #0 {
define void @test_pow(ptr %p, ptr %q) #0 {
; CHECK-FP16-LABEL: test_pow:
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: bl powf
-; CHECK-FP16: vcvtb.f16.f32
-; CHECK-LIBCALL-LABEL: test_pow:
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bl powf
-; CHECK-LIBCALL: bl __aeabi_f2h
+; CHECK-FP16: .save {r4, lr}
+; CHECK-FP16-NEXT: push {r4, lr}
+; CHECK-FP16-NEXT: mov r4, r0
+; CHECK-FP16-NEXT: ldrh r0, [r0]
+; CHECK-FP16-NEXT: ldrh r1, [r1]
+; CHECK-FP16-NEXT: vmov s0, r0
+; CHECK-FP16-NEXT: vmov s2, r1
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s1, s2
+; CHECK-FP16-NEXT: bl powf
+; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-FP16-NEXT: vmov r0, s0
+; CHECK-FP16-NEXT: strh r0, [r4]
+; CHECK-FP16-NEXT: pop {r4, pc}
+;
+; CHECK-LIBCALL-VFP-LABEL: test_pow:
+; CHECK-LIBCALL-VFP: .save {r4, r5, r11, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r4, r5, r11, lr}
+; CHECK-LIBCALL-VFP-NEXT: mov r4, r0
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r1]
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: mov r5, r0
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r4]
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: vmov s1, r5
+; CHECK-LIBCALL-VFP-NEXT: bl powf
+; CHECK-LIBCALL-VFP-NEXT: vmov r0, s0
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_f2h
+; CHECK-LIBCALL-VFP-NEXT: strh r0, [r4]
+; CHECK-LIBCALL-VFP-NEXT: pop {r4, r5, r11, pc}
+;
+; CHECK-NOVFP-LABEL: test_pow:
+; CHECK-NOVFP: .save {r4, r5, r11, lr}
+; CHECK-NOVFP-NEXT: push {r4, r5, r11, lr}
+; CHECK-NOVFP-NEXT: mov r4, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r1]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: mov r5, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r4]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: mov r1, r5
+; CHECK-NOVFP-NEXT: bl powf
+; CHECK-NOVFP-NEXT: bl __aeabi_f2h
+; CHECK-NOVFP-NEXT: strh r0, [r4]
+; CHECK-NOVFP-NEXT: pop {r4, r5, r11, pc}
%a = load half, ptr %p, align 2
%b = load half, ptr %q, align 2
%r = call half @llvm.pow.f16(half %a, half %b)
@@ -509,13 +1236,51 @@ define void @test_pow(ptr %p, ptr %q) #0 {
define void @test_cbrt(ptr %p) #0 {
; CHECK-FP16-LABEL: test_cbrt:
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: bl powf
-; CHECK-FP16: vcvtb.f16.f32
-; CHECK-LIBCALL-LABEL: test_cbrt:
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bl powf
-; CHECK-LIBCALL: bl __aeabi_f2h
+; CHECK-FP16: .save {r4, lr}
+; CHECK-FP16-NEXT: push {r4, lr}
+; CHECK-FP16-NEXT: mov r4, r0
+; CHECK-FP16-NEXT: ldrh r0, [r0]
+; CHECK-FP16-NEXT: vldr s1, .LCPI34_0
+; CHECK-FP16-NEXT: vmov s0, r0
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: bl powf
+; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-FP16-NEXT: vmov r0, s0
+; CHECK-FP16-NEXT: strh r0, [r4]
+; CHECK-FP16-NEXT: pop {r4, pc}
+; CHECK-FP16-NEXT: .p2align 2
+; CHECK-FP16-NEXT: .LCPI34_0:
+; CHECK-FP16-NEXT: .long 0x3eaaa000
+;
+; CHECK-LIBCALL-VFP-LABEL: test_cbrt:
+; CHECK-LIBCALL-VFP: .save {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: mov r4, r0
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r0]
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: vldr s1, .LCPI34_0
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: bl powf
+; CHECK-LIBCALL-VFP-NEXT: vmov r0, s0
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_f2h
+; CHECK-LIBCALL-VFP-NEXT: strh r0, [r4]
+; CHECK-LIBCALL-VFP-NEXT: pop {r4, pc}
+; CHECK-LIBCALL-VFP-NEXT: .p2align 2
+; CHECK-LIBCALL-VFP-NEXT: .LCPI34_0:
+; CHECK-LIBCALL-VFP-NEXT: .long 0x3eaaa000
+;
+; CHECK-NOVFP-LABEL: test_cbrt:
+; CHECK-NOVFP: .save {r4, lr}
+; CHECK-NOVFP-NEXT: push {r4, lr}
+; CHECK-NOVFP-NEXT: mov r4, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r0]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: movw r1, #40960
+; CHECK-NOVFP-NEXT: movt r1, #16042
+; CHECK-NOVFP-NEXT: bl powf
+; CHECK-NOVFP-NEXT: bl __aeabi_f2h
+; CHECK-NOVFP-NEXT: strh r0, [r4]
+; CHECK-NOVFP-NEXT: pop {r4, pc}
%a = load half, ptr %p, align 2
%r = call half @llvm.pow.f16(half %a, half 0x3FD5540000000000)
store half %r, ptr %p
@@ -524,13 +1289,41 @@ define void @test_cbrt(ptr %p) #0 {
define void @test_exp(ptr %p) #0 {
; CHECK-FP16-LABEL: test_exp:
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: bl expf
-; CHECK-FP16: vcvtb.f16.f32
-; CHECK-LIBCALL-LABEL: test_exp:
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bl expf
-; CHECK-LIBCALL: bl __aeabi_f2h
+; CHECK-FP16: .save {r4, lr}
+; CHECK-FP16-NEXT: push {r4, lr}
+; CHECK-FP16-NEXT: mov r4, r0
+; CHECK-FP16-NEXT: ldrh r0, [r0]
+; CHECK-FP16-NEXT: vmov s0, r0
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: bl expf
+; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-FP16-NEXT: vmov r0, s0
+; CHECK-FP16-NEXT: strh r0, [r4]
+; CHECK-FP16-NEXT: pop {r4, pc}
+;
+; CHECK-LIBCALL-VFP-LABEL: test_exp:
+; CHECK-LIBCALL-VFP: .save {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: mov r4, r0
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r0]
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: bl expf
+; CHECK-LIBCALL-VFP-NEXT: vmov r0, s0
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_f2h
+; CHECK-LIBCALL-VFP-NEXT: strh r0, [r4]
+; CHECK-LIBCALL-VFP-NEXT: pop {r4, pc}
+;
+; CHECK-NOVFP-LABEL: test_exp:
+; CHECK-NOVFP: .save {r4, lr}
+; CHECK-NOVFP-NEXT: push {r4, lr}
+; CHECK-NOVFP-NEXT: mov r4, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r0]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: bl expf
+; CHECK-NOVFP-NEXT: bl __aeabi_f2h
+; CHECK-NOVFP-NEXT: strh r0, [r4]
+; CHECK-NOVFP-NEXT: pop {r4, pc}
%a = load half, ptr %p, align 2
%r = call half @llvm.exp.f16(half %a)
store half %r, ptr %p
@@ -539,13 +1332,41 @@ define void @test_exp(ptr %p) #0 {
define void @test_exp2(ptr %p) #0 {
; CHECK-FP16-LABEL: test_exp2:
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: bl exp2f
-; CHECK-FP16: vcvtb.f16.f32
-; CHECK-LIBCALL-LABEL: test_exp2:
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bl exp2f
-; CHECK-LIBCALL: bl __aeabi_f2h
+; CHECK-FP16: .save {r4, lr}
+; CHECK-FP16-NEXT: push {r4, lr}
+; CHECK-FP16-NEXT: mov r4, r0
+; CHECK-FP16-NEXT: ldrh r0, [r0]
+; CHECK-FP16-NEXT: vmov s0, r0
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: bl exp2f
+; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-FP16-NEXT: vmov r0, s0
+; CHECK-FP16-NEXT: strh r0, [r4]
+; CHECK-FP16-NEXT: pop {r4, pc}
+;
+; CHECK-LIBCALL-VFP-LABEL: test_exp2:
+; CHECK-LIBCALL-VFP: .save {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: mov r4, r0
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r0]
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: bl exp2f
+; CHECK-LIBCALL-VFP-NEXT: vmov r0, s0
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_f2h
+; CHECK-LIBCALL-VFP-NEXT: strh r0, [r4]
+; CHECK-LIBCALL-VFP-NEXT: pop {r4, pc}
+;
+; CHECK-NOVFP-LABEL: test_exp2:
+; CHECK-NOVFP: .save {r4, lr}
+; CHECK-NOVFP-NEXT: push {r4, lr}
+; CHECK-NOVFP-NEXT: mov r4, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r0]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: bl exp2f
+; CHECK-NOVFP-NEXT: bl __aeabi_f2h
+; CHECK-NOVFP-NEXT: strh r0, [r4]
+; CHECK-NOVFP-NEXT: pop {r4, pc}
%a = load half, ptr %p, align 2
%r = call half @llvm.exp2.f16(half %a)
store half %r, ptr %p
@@ -554,13 +1375,41 @@ define void @test_exp2(ptr %p) #0 {
define void @test_log(ptr %p) #0 {
; CHECK-FP16-LABEL: test_log:
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: bl logf
-; CHECK-FP16: vcvtb.f16.f32
-; CHECK-LIBCALL-LABEL: test_log:
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bl logf
-; CHECK-LIBCALL: bl __aeabi_f2h
+; CHECK-FP16: .save {r4, lr}
+; CHECK-FP16-NEXT: push {r4, lr}
+; CHECK-FP16-NEXT: mov r4, r0
+; CHECK-FP16-NEXT: ldrh r0, [r0]
+; CHECK-FP16-NEXT: vmov s0, r0
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: bl logf
+; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-FP16-NEXT: vmov r0, s0
+; CHECK-FP16-NEXT: strh r0, [r4]
+; CHECK-FP16-NEXT: pop {r4, pc}
+;
+; CHECK-LIBCALL-VFP-LABEL: test_log:
+; CHECK-LIBCALL-VFP: .save {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: mov r4, r0
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r0]
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: bl logf
+; CHECK-LIBCALL-VFP-NEXT: vmov r0, s0
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_f2h
+; CHECK-LIBCALL-VFP-NEXT: strh r0, [r4]
+; CHECK-LIBCALL-VFP-NEXT: pop {r4, pc}
+;
+; CHECK-NOVFP-LABEL: test_log:
+; CHECK-NOVFP: .save {r4, lr}
+; CHECK-NOVFP-NEXT: push {r4, lr}
+; CHECK-NOVFP-NEXT: mov r4, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r0]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: bl logf
+; CHECK-NOVFP-NEXT: bl __aeabi_f2h
+; CHECK-NOVFP-NEXT: strh r0, [r4]
+; CHECK-NOVFP-NEXT: pop {r4, pc}
%a = load half, ptr %p, align 2
%r = call half @llvm.log.f16(half %a)
store half %r, ptr %p
@@ -569,13 +1418,41 @@ define void @test_log(ptr %p) #0 {
define void @test_log10(ptr %p) #0 {
; CHECK-FP16-LABEL: test_log10:
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: bl log10f
-; CHECK-FP16: vcvtb.f16.f32
-; CHECK-LIBCALL-LABEL: test_log10:
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bl log10f
-; CHECK-LIBCALL: bl __aeabi_f2h
+; CHECK-FP16: .save {r4, lr}
+; CHECK-FP16-NEXT: push {r4, lr}
+; CHECK-FP16-NEXT: mov r4, r0
+; CHECK-FP16-NEXT: ldrh r0, [r0]
+; CHECK-FP16-NEXT: vmov s0, r0
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: bl log10f
+; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-FP16-NEXT: vmov r0, s0
+; CHECK-FP16-NEXT: strh r0, [r4]
+; CHECK-FP16-NEXT: pop {r4, pc}
+;
+; CHECK-LIBCALL-VFP-LABEL: test_log10:
+; CHECK-LIBCALL-VFP: .save {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: mov r4, r0
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r0]
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: bl log10f
+; CHECK-LIBCALL-VFP-NEXT: vmov r0, s0
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_f2h
+; CHECK-LIBCALL-VFP-NEXT: strh r0, [r4]
+; CHECK-LIBCALL-VFP-NEXT: pop {r4, pc}
+;
+; CHECK-NOVFP-LABEL: test_log10:
+; CHECK-NOVFP: .save {r4, lr}
+; CHECK-NOVFP-NEXT: push {r4, lr}
+; CHECK-NOVFP-NEXT: mov r4, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r0]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: bl log10f
+; CHECK-NOVFP-NEXT: bl __aeabi_f2h
+; CHECK-NOVFP-NEXT: strh r0, [r4]
+; CHECK-NOVFP-NEXT: pop {r4, pc}
%a = load half, ptr %p, align 2
%r = call half @llvm.log10.f16(half %a)
store half %r, ptr %p
@@ -584,13 +1461,41 @@ define void @test_log10(ptr %p) #0 {
define void @test_log2(ptr %p) #0 {
; CHECK-FP16-LABEL: test_log2:
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: bl log2f
-; CHECK-FP16: vcvtb.f16.f32
-; CHECK-LIBCALL-LABEL: test_log2:
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bl log2f
-; CHECK-LIBCALL: bl __aeabi_f2h
+; CHECK-FP16: .save {r4, lr}
+; CHECK-FP16-NEXT: push {r4, lr}
+; CHECK-FP16-NEXT: mov r4, r0
+; CHECK-FP16-NEXT: ldrh r0, [r0]
+; CHECK-FP16-NEXT: vmov s0, r0
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: bl log2f
+; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-FP16-NEXT: vmov r0, s0
+; CHECK-FP16-NEXT: strh r0, [r4]
+; CHECK-FP16-NEXT: pop {r4, pc}
+;
+; CHECK-LIBCALL-VFP-LABEL: test_log2:
+; CHECK-LIBCALL-VFP: .save {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: mov r4, r0
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r0]
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: bl log2f
+; CHECK-LIBCALL-VFP-NEXT: vmov r0, s0
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_f2h
+; CHECK-LIBCALL-VFP-NEXT: strh r0, [r4]
+; CHECK-LIBCALL-VFP-NEXT: pop {r4, pc}
+;
+; CHECK-NOVFP-LABEL: test_log2:
+; CHECK-NOVFP: .save {r4, lr}
+; CHECK-NOVFP-NEXT: push {r4, lr}
+; CHECK-NOVFP-NEXT: mov r4, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r0]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: bl log2f
+; CHECK-NOVFP-NEXT: bl __aeabi_f2h
+; CHECK-NOVFP-NEXT: strh r0, [r4]
+; CHECK-NOVFP-NEXT: pop {r4, pc}
%a = load half, ptr %p, align 2
%r = call half @llvm.log2.f16(half %a)
store half %r, ptr %p
@@ -599,17 +1504,65 @@ define void @test_log2(ptr %p) #0 {
define void @test_fma(ptr %p, ptr %q, ptr %r) #0 {
; CHECK-FP16-LABEL: test_fma:
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: bl fmaf
-; CHECK-FP16: vcvtb.f16.f32
-; CHECK-LIBCALL-LABEL: test_fma:
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bl fmaf
-; CHECK-LIBCALL: bl __aeabi_f2h
+; CHECK-FP16: .save {r4, lr}
+; CHECK-FP16-NEXT: push {r4, lr}
+; CHECK-FP16-NEXT: mov r4, r0
+; CHECK-FP16-NEXT: ldrh r0, [r1]
+; CHECK-FP16-NEXT: ldrh r1, [r4]
+; CHECK-FP16-NEXT: ldrh r2, [r2]
+; CHECK-FP16-NEXT: vmov s2, r0
+; CHECK-FP16-NEXT: vmov s0, r1
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s1, s2
+; CHECK-FP16-NEXT: vmov s2, r2
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-FP16-NEXT: bl fmaf
+; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-FP16-NEXT: vmov r0, s0
+; CHECK-FP16-NEXT: strh r0, [r4]
+; CHECK-FP16-NEXT: pop {r4, pc}
+;
+; CHECK-LIBCALL-VFP-LABEL: test_fma:
+; CHECK-LIBCALL-VFP: .save {r4, r5, r6, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r4, r5, r6, lr}
+; CHECK-LIBCALL-VFP-NEXT: mov r4, r0
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r2]
+; CHECK-LIBCALL-VFP-NEXT: mov r5, r1
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: mov r6, r0
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r5]
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: mov r5, r0
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r4]
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: vmov s1, r5
+; CHECK-LIBCALL-VFP-NEXT: vmov s2, r6
+; CHECK-LIBCALL-VFP-NEXT: bl fmaf
+; CHECK-LIBCALL-VFP-NEXT: vmov r0, s0
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_f2h
+; CHECK-LIBCALL-VFP-NEXT: strh r0, [r4]
+; CHECK-LIBCALL-VFP-NEXT: pop {r4, r5, r6, pc}
+;
+; CHECK-NOVFP-LABEL: test_fma:
+; CHECK-NOVFP: .save {r4, r5, r6, lr}
+; CHECK-NOVFP-NEXT: push {r4, r5, r6, lr}
+; CHECK-NOVFP-NEXT: mov r4, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r1]
+; CHECK-NOVFP-NEXT: mov r5, r2
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: mov r6, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r5]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: mov r5, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r4]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: mov r1, r6
+; CHECK-NOVFP-NEXT: mov r2, r5
+; CHECK-NOVFP-NEXT: bl fmaf
+; CHECK-NOVFP-NEXT: bl __aeabi_f2h
+; CHECK-NOVFP-NEXT: strh r0, [r4]
+; CHECK-NOVFP-NEXT: pop {r4, r5, r6, pc}
%a = load half, ptr %p, align 2
%b = load half, ptr %q, align 2
%c = load half, ptr %r, align 2
@@ -620,13 +1573,25 @@ define void @test_fma(ptr %p, ptr %q, ptr %r) #0 {
define void @test_fabs(ptr %p) {
; CHECK-FP16-LABEL: test_fabs:
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: vabs.f32
-; CHECK-FP16: vcvtb.f16.f32
+; CHECK-FP16: ldrh r1, [r0]
+; CHECK-FP16-NEXT: vmov s0, r1
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: vabs.f32 s0, s0
+; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-FP16-NEXT: vmov r1, s0
+; CHECK-FP16-NEXT: strh r1, [r0]
+; CHECK-FP16-NEXT: bx lr
+;
; CHECK-LIBCALL-LABEL: test_fabs:
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bic
-; CHECK-LIBCALL: bl __aeabi_f2h
+; CHECK-LIBCALL: .save {r4, lr}
+; CHECK-LIBCALL-NEXT: push {r4, lr}
+; CHECK-LIBCALL-NEXT: mov r4, r0
+; CHECK-LIBCALL-NEXT: ldrh r0, [r0]
+; CHECK-LIBCALL-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-NEXT: bic r0, r0, #-2147483648
+; CHECK-LIBCALL-NEXT: bl __aeabi_f2h
+; CHECK-LIBCALL-NEXT: strh r0, [r4]
+; CHECK-LIBCALL-NEXT: pop {r4, pc}
%a = load half, ptr %p, align 2
%r = call half @llvm.fabs.f16(half %a)
store half %r, ptr %p
@@ -635,15 +1600,52 @@ define void @test_fabs(ptr %p) {
define void @test_minnum(ptr %p, ptr %q) #0 {
; CHECK-FP16-LABEL: test_minnum:
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: bl fminf
-; CHECK-FP16: vcvtb.f16.f32
-; CHECK-LIBCALL-LABEL: test_minnum:
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bl fminf
-; CHECK-LIBCALL: bl __aeabi_f2h
+; CHECK-FP16: .save {r4, lr}
+; CHECK-FP16-NEXT: push {r4, lr}
+; CHECK-FP16-NEXT: mov r4, r0
+; CHECK-FP16-NEXT: ldrh r0, [r0]
+; CHECK-FP16-NEXT: ldrh r1, [r1]
+; CHECK-FP16-NEXT: vmov s0, r0
+; CHECK-FP16-NEXT: vmov s2, r1
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s1, s2
+; CHECK-FP16-NEXT: bl fminf
+; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-FP16-NEXT: vmov r0, s0
+; CHECK-FP16-NEXT: strh r0, [r4]
+; CHECK-FP16-NEXT: pop {r4, pc}
+;
+; CHECK-LIBCALL-VFP-LABEL: test_minnum:
+; CHECK-LIBCALL-VFP: .save {r4, r5, r11, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r4, r5, r11, lr}
+; CHECK-LIBCALL-VFP-NEXT: mov r4, r0
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r1]
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: mov r5, r0
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r4]
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: vmov s1, r5
+; CHECK-LIBCALL-VFP-NEXT: bl fminf
+; CHECK-LIBCALL-VFP-NEXT: vmov r0, s0
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_f2h
+; CHECK-LIBCALL-VFP-NEXT: strh r0, [r4]
+; CHECK-LIBCALL-VFP-NEXT: pop {r4, r5, r11, pc}
+;
+; CHECK-NOVFP-LABEL: test_minnum:
+; CHECK-NOVFP: .save {r4, r5, r11, lr}
+; CHECK-NOVFP-NEXT: push {r4, r5, r11, lr}
+; CHECK-NOVFP-NEXT: mov r4, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r1]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: mov r5, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r4]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: mov r1, r5
+; CHECK-NOVFP-NEXT: bl fminf
+; CHECK-NOVFP-NEXT: bl __aeabi_f2h
+; CHECK-NOVFP-NEXT: strh r0, [r4]
+; CHECK-NOVFP-NEXT: pop {r4, r5, r11, pc}
%a = load half, ptr %p, align 2
%b = load half, ptr %q, align 2
%r = call half @llvm.minnum.f16(half %a, half %b)
@@ -653,15 +1655,52 @@ define void @test_minnum(ptr %p, ptr %q) #0 {
define void @test_maxnum(ptr %p, ptr %q) #0 {
; CHECK-FP16-LABEL: test_maxnum:
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: bl fmaxf
-; CHECK-FP16: vcvtb.f16.f32
-; CHECK-LIBCALL-LABEL: test_maxnum:
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bl fmaxf
-; CHECK-LIBCALL: bl __aeabi_f2h
+; CHECK-FP16: .save {r4, lr}
+; CHECK-FP16-NEXT: push {r4, lr}
+; CHECK-FP16-NEXT: mov r4, r0
+; CHECK-FP16-NEXT: ldrh r0, [r0]
+; CHECK-FP16-NEXT: ldrh r1, [r1]
+; CHECK-FP16-NEXT: vmov s0, r0
+; CHECK-FP16-NEXT: vmov s2, r1
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s1, s2
+; CHECK-FP16-NEXT: bl fmaxf
+; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-FP16-NEXT: vmov r0, s0
+; CHECK-FP16-NEXT: strh r0, [r4]
+; CHECK-FP16-NEXT: pop {r4, pc}
+;
+; CHECK-LIBCALL-VFP-LABEL: test_maxnum:
+; CHECK-LIBCALL-VFP: .save {r4, r5, r11, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r4, r5, r11, lr}
+; CHECK-LIBCALL-VFP-NEXT: mov r4, r0
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r1]
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: mov r5, r0
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r4]
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: vmov s1, r5
+; CHECK-LIBCALL-VFP-NEXT: bl fmaxf
+; CHECK-LIBCALL-VFP-NEXT: vmov r0, s0
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_f2h
+; CHECK-LIBCALL-VFP-NEXT: strh r0, [r4]
+; CHECK-LIBCALL-VFP-NEXT: pop {r4, r5, r11, pc}
+;
+; CHECK-NOVFP-LABEL: test_maxnum:
+; CHECK-NOVFP: .save {r4, r5, r11, lr}
+; CHECK-NOVFP-NEXT: push {r4, r5, r11, lr}
+; CHECK-NOVFP-NEXT: mov r4, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r1]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: mov r5, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r4]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: mov r1, r5
+; CHECK-NOVFP-NEXT: bl fmaxf
+; CHECK-NOVFP-NEXT: bl __aeabi_f2h
+; CHECK-NOVFP-NEXT: strh r0, [r4]
+; CHECK-NOVFP-NEXT: pop {r4, r5, r11, pc}
%a = load half, ptr %p, align 2
%b = load half, ptr %q, align 2
%r = call half @llvm.maxnum.f16(half %a, half %b)
@@ -670,16 +1709,45 @@ define void @test_maxnum(ptr %p, ptr %q) #0 {
}
define void @test_minimum(ptr %p) #0 {
-; CHECK-ALL-LABEL: test_minimum:
-; CHECK-FP16: vmov.f32 s0, #1.000000e+00
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL-VFP: vmov.f32 s{{[0-9]+}}, #1.000000e+00
-; CHECK-NOVFP: mov r{{[0-9]+}}, #1065353216
-; CHECK-VFP: vcmp.f32
-; CHECK-VFP: vmrs
-; CHECK-VFP: movge
-; CHECK-NOVFP: bl __aeabi_fcmpge
+; CHECK-FP16-LABEL: test_minimum:
+; CHECK-FP16: vmov.f32 s0, #1.000000e+00
+; CHECK-FP16-NEXT: ldrh r1, [r0]
+; CHECK-FP16-NEXT: vmov s2, r1
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-FP16-NEXT: vcmp.f32 s2, s0
+; CHECK-FP16-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-FP16-NEXT: movge r1, #15360
+; CHECK-FP16-NEXT: strh r1, [r0]
+; CHECK-FP16-NEXT: bx lr
+;
+; CHECK-LIBCALL-VFP-LABEL: test_minimum:
+; CHECK-LIBCALL-VFP: .save {r4, r5, r11, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r4, r5, r11, lr}
+; CHECK-LIBCALL-VFP-NEXT: ldrh r5, [r0]
+; CHECK-LIBCALL-VFP-NEXT: mov r4, r0
+; CHECK-LIBCALL-VFP-NEXT: mov r0, r5
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: vmov.f32 s0, #1.000000e+00
+; CHECK-LIBCALL-VFP-NEXT: vmov s2, r0
+; CHECK-LIBCALL-VFP-NEXT: vcmp.f32 s2, s0
+; CHECK-LIBCALL-VFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-LIBCALL-VFP-NEXT: movge r5, #15360
+; CHECK-LIBCALL-VFP-NEXT: strh r5, [r4]
+; CHECK-LIBCALL-VFP-NEXT: pop {r4, r5, r11, pc}
+;
+; CHECK-NOVFP-LABEL: test_minimum:
+; CHECK-NOVFP: .save {r4, r5, r11, lr}
+; CHECK-NOVFP-NEXT: push {r4, r5, r11, lr}
+; CHECK-NOVFP-NEXT: ldrh r5, [r0]
+; CHECK-NOVFP-NEXT: mov r4, r0
+; CHECK-NOVFP-NEXT: mov r0, r5
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: mov r1, #1065353216
+; CHECK-NOVFP-NEXT: bl __aeabi_fcmpge
+; CHECK-NOVFP-NEXT: cmp r0, #0
+; CHECK-NOVFP-NEXT: movne r5, #15360
+; CHECK-NOVFP-NEXT: strh r5, [r4]
+; CHECK-NOVFP-NEXT: pop {r4, r5, r11, pc}
%a = load half, ptr %p, align 2
%c = fcmp ult half %a, 1.0
%r = select i1 %c, half %a, half 1.0
@@ -688,16 +1756,45 @@ define void @test_minimum(ptr %p) #0 {
}
define void @test_maximum(ptr %p) #0 {
-; CHECK-ALL-LABEL: test_maximum:
-; CHECK-FP16: vmov.f32 s0, #1.000000e+00
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL-VFP: vmov.f32 s0, #1.000000e+00
-; CHECK-NOVFP: mov r{{[0-9]+}}, #1065353216
-; CHECK-VFP: vcmp.f32
-; CHECK-VFP: vmrs
-; CHECK-VFP: movls
-; CHECK-NOVFP: bl __aeabi_fcmple
+; CHECK-FP16-LABEL: test_maximum:
+; CHECK-FP16: vmov.f32 s0, #1.000000e+00
+; CHECK-FP16-NEXT: ldrh r1, [r0]
+; CHECK-FP16-NEXT: vmov s2, r1
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-FP16-NEXT: vcmp.f32 s2, s0
+; CHECK-FP16-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-FP16-NEXT: movls r1, #15360
+; CHECK-FP16-NEXT: strh r1, [r0]
+; CHECK-FP16-NEXT: bx lr
+;
+; CHECK-LIBCALL-VFP-LABEL: test_maximum:
+; CHECK-LIBCALL-VFP: .save {r4, r5, r11, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r4, r5, r11, lr}
+; CHECK-LIBCALL-VFP-NEXT: ldrh r5, [r0]
+; CHECK-LIBCALL-VFP-NEXT: mov r4, r0
+; CHECK-LIBCALL-VFP-NEXT: mov r0, r5
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: vmov.f32 s0, #1.000000e+00
+; CHECK-LIBCALL-VFP-NEXT: vmov s2, r0
+; CHECK-LIBCALL-VFP-NEXT: vcmp.f32 s2, s0
+; CHECK-LIBCALL-VFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-LIBCALL-VFP-NEXT: movls r5, #15360
+; CHECK-LIBCALL-VFP-NEXT: strh r5, [r4]
+; CHECK-LIBCALL-VFP-NEXT: pop {r4, r5, r11, pc}
+;
+; CHECK-NOVFP-LABEL: test_maximum:
+; CHECK-NOVFP: .save {r4, r5, r11, lr}
+; CHECK-NOVFP-NEXT: push {r4, r5, r11, lr}
+; CHECK-NOVFP-NEXT: ldrh r5, [r0]
+; CHECK-NOVFP-NEXT: mov r4, r0
+; CHECK-NOVFP-NEXT: mov r0, r5
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: mov r1, #1065353216
+; CHECK-NOVFP-NEXT: bl __aeabi_fcmple
+; CHECK-NOVFP-NEXT: cmp r0, #0
+; CHECK-NOVFP-NEXT: movne r5, #15360
+; CHECK-NOVFP-NEXT: strh r5, [r4]
+; CHECK-NOVFP-NEXT: pop {r4, r5, r11, pc}
%a = load half, ptr %p, align 2
%c = fcmp ugt half %a, 1.0
%r = select i1 %c, half %a, half 1.0
@@ -723,13 +1820,41 @@ define void @test_copysign(ptr %p, ptr %q) #0 {
define void @test_floor(ptr %p) {
; CHECK-FP16-LABEL: test_floor:
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: bl floorf
-; CHECK-FP16: vcvtb.f16.f32
-; CHECK-LIBCALL-LABEL: test_floor:
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bl floorf
-; CHECK-LIBCALL: bl __aeabi_f2h
+; CHECK-FP16: .save {r4, lr}
+; CHECK-FP16-NEXT: push {r4, lr}
+; CHECK-FP16-NEXT: mov r4, r0
+; CHECK-FP16-NEXT: ldrh r0, [r0]
+; CHECK-FP16-NEXT: vmov s0, r0
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: bl floorf
+; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-FP16-NEXT: vmov r0, s0
+; CHECK-FP16-NEXT: strh r0, [r4]
+; CHECK-FP16-NEXT: pop {r4, pc}
+;
+; CHECK-LIBCALL-VFP-LABEL: test_floor:
+; CHECK-LIBCALL-VFP: .save {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: mov r4, r0
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r0]
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: bl floorf
+; CHECK-LIBCALL-VFP-NEXT: vmov r0, s0
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_f2h
+; CHECK-LIBCALL-VFP-NEXT: strh r0, [r4]
+; CHECK-LIBCALL-VFP-NEXT: pop {r4, pc}
+;
+; CHECK-NOVFP-LABEL: test_floor:
+; CHECK-NOVFP: .save {r4, lr}
+; CHECK-NOVFP-NEXT: push {r4, lr}
+; CHECK-NOVFP-NEXT: mov r4, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r0]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: bl floorf
+; CHECK-NOVFP-NEXT: bl __aeabi_f2h
+; CHECK-NOVFP-NEXT: strh r0, [r4]
+; CHECK-NOVFP-NEXT: pop {r4, pc}
%a = load half, ptr %p, align 2
%r = call half @llvm.floor.f16(half %a)
store half %r, ptr %p
@@ -738,13 +1863,41 @@ define void @test_floor(ptr %p) {
define void @test_ceil(ptr %p) {
; CHECK-FP16-LABEL: test_ceil:
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: bl ceilf
-; CHECK-FP16: vcvtb.f16.f32
-; CHECK-LIBCALL-LABEL: test_ceil:
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bl ceilf
-; CHECK-LIBCALL: bl __aeabi_f2h
+; CHECK-FP16: .save {r4, lr}
+; CHECK-FP16-NEXT: push {r4, lr}
+; CHECK-FP16-NEXT: mov r4, r0
+; CHECK-FP16-NEXT: ldrh r0, [r0]
+; CHECK-FP16-NEXT: vmov s0, r0
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: bl ceilf
+; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-FP16-NEXT: vmov r0, s0
+; CHECK-FP16-NEXT: strh r0, [r4]
+; CHECK-FP16-NEXT: pop {r4, pc}
+;
+; CHECK-LIBCALL-VFP-LABEL: test_ceil:
+; CHECK-LIBCALL-VFP: .save {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: mov r4, r0
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r0]
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: bl ceilf
+; CHECK-LIBCALL-VFP-NEXT: vmov r0, s0
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_f2h
+; CHECK-LIBCALL-VFP-NEXT: strh r0, [r4]
+; CHECK-LIBCALL-VFP-NEXT: pop {r4, pc}
+;
+; CHECK-NOVFP-LABEL: test_ceil:
+; CHECK-NOVFP: .save {r4, lr}
+; CHECK-NOVFP-NEXT: push {r4, lr}
+; CHECK-NOVFP-NEXT: mov r4, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r0]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: bl ceilf
+; CHECK-NOVFP-NEXT: bl __aeabi_f2h
+; CHECK-NOVFP-NEXT: strh r0, [r4]
+; CHECK-NOVFP-NEXT: pop {r4, pc}
%a = load half, ptr %p, align 2
%r = call half @llvm.ceil.f16(half %a)
store half %r, ptr %p
@@ -753,13 +1906,41 @@ define void @test_ceil(ptr %p) {
define void @test_trunc(ptr %p) {
; CHECK-FP16-LABEL: test_trunc:
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: bl truncf
-; CHECK-FP16: vcvtb.f16.f32
-; CHECK-LIBCALL-LABEL: test_trunc:
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bl truncf
-; CHECK-LIBCALL: bl __aeabi_f2h
+; CHECK-FP16: .save {r4, lr}
+; CHECK-FP16-NEXT: push {r4, lr}
+; CHECK-FP16-NEXT: mov r4, r0
+; CHECK-FP16-NEXT: ldrh r0, [r0]
+; CHECK-FP16-NEXT: vmov s0, r0
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: bl truncf
+; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-FP16-NEXT: vmov r0, s0
+; CHECK-FP16-NEXT: strh r0, [r4]
+; CHECK-FP16-NEXT: pop {r4, pc}
+;
+; CHECK-LIBCALL-VFP-LABEL: test_trunc:
+; CHECK-LIBCALL-VFP: .save {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: mov r4, r0
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r0]
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: bl truncf
+; CHECK-LIBCALL-VFP-NEXT: vmov r0, s0
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_f2h
+; CHECK-LIBCALL-VFP-NEXT: strh r0, [r4]
+; CHECK-LIBCALL-VFP-NEXT: pop {r4, pc}
+;
+; CHECK-NOVFP-LABEL: test_trunc:
+; CHECK-NOVFP: .save {r4, lr}
+; CHECK-NOVFP-NEXT: push {r4, lr}
+; CHECK-NOVFP-NEXT: mov r4, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r0]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: bl truncf
+; CHECK-NOVFP-NEXT: bl __aeabi_f2h
+; CHECK-NOVFP-NEXT: strh r0, [r4]
+; CHECK-NOVFP-NEXT: pop {r4, pc}
%a = load half, ptr %p, align 2
%r = call half @llvm.trunc.f16(half %a)
store half %r, ptr %p
@@ -768,13 +1949,41 @@ define void @test_trunc(ptr %p) {
define void @test_rint(ptr %p) {
; CHECK-FP16-LABEL: test_rint:
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: bl rintf
-; CHECK-FP16: vcvtb.f16.f32
-; CHECK-LIBCALL-LABEL: test_rint:
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bl rintf
-; CHECK-LIBCALL: bl __aeabi_f2h
+; CHECK-FP16: .save {r4, lr}
+; CHECK-FP16-NEXT: push {r4, lr}
+; CHECK-FP16-NEXT: mov r4, r0
+; CHECK-FP16-NEXT: ldrh r0, [r0]
+; CHECK-FP16-NEXT: vmov s0, r0
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: bl rintf
+; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-FP16-NEXT: vmov r0, s0
+; CHECK-FP16-NEXT: strh r0, [r4]
+; CHECK-FP16-NEXT: pop {r4, pc}
+;
+; CHECK-LIBCALL-VFP-LABEL: test_rint:
+; CHECK-LIBCALL-VFP: .save {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: mov r4, r0
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r0]
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: bl rintf
+; CHECK-LIBCALL-VFP-NEXT: vmov r0, s0
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_f2h
+; CHECK-LIBCALL-VFP-NEXT: strh r0, [r4]
+; CHECK-LIBCALL-VFP-NEXT: pop {r4, pc}
+;
+; CHECK-NOVFP-LABEL: test_rint:
+; CHECK-NOVFP: .save {r4, lr}
+; CHECK-NOVFP-NEXT: push {r4, lr}
+; CHECK-NOVFP-NEXT: mov r4, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r0]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: bl rintf
+; CHECK-NOVFP-NEXT: bl __aeabi_f2h
+; CHECK-NOVFP-NEXT: strh r0, [r4]
+; CHECK-NOVFP-NEXT: pop {r4, pc}
%a = load half, ptr %p, align 2
%r = call half @llvm.rint.f16(half %a)
store half %r, ptr %p
@@ -783,13 +1992,41 @@ define void @test_rint(ptr %p) {
define void @test_nearbyint(ptr %p) {
; CHECK-FP16-LABEL: test_nearbyint:
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: bl nearbyintf
-; CHECK-FP16: vcvtb.f16.f32
-; CHECK-LIBCALL-LABEL: test_nearbyint:
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bl nearbyintf
-; CHECK-LIBCALL: bl __aeabi_f2h
+; CHECK-FP16: .save {r4, lr}
+; CHECK-FP16-NEXT: push {r4, lr}
+; CHECK-FP16-NEXT: mov r4, r0
+; CHECK-FP16-NEXT: ldrh r0, [r0]
+; CHECK-FP16-NEXT: vmov s0, r0
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: bl nearbyintf
+; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-FP16-NEXT: vmov r0, s0
+; CHECK-FP16-NEXT: strh r0, [r4]
+; CHECK-FP16-NEXT: pop {r4, pc}
+;
+; CHECK-LIBCALL-VFP-LABEL: test_nearbyint:
+; CHECK-LIBCALL-VFP: .save {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: mov r4, r0
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r0]
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: bl nearbyintf
+; CHECK-LIBCALL-VFP-NEXT: vmov r0, s0
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_f2h
+; CHECK-LIBCALL-VFP-NEXT: strh r0, [r4]
+; CHECK-LIBCALL-VFP-NEXT: pop {r4, pc}
+;
+; CHECK-NOVFP-LABEL: test_nearbyint:
+; CHECK-NOVFP: .save {r4, lr}
+; CHECK-NOVFP-NEXT: push {r4, lr}
+; CHECK-NOVFP-NEXT: mov r4, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r0]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: bl nearbyintf
+; CHECK-NOVFP-NEXT: bl __aeabi_f2h
+; CHECK-NOVFP-NEXT: strh r0, [r4]
+; CHECK-NOVFP-NEXT: pop {r4, pc}
%a = load half, ptr %p, align 2
%r = call half @llvm.nearbyint.f16(half %a)
store half %r, ptr %p
@@ -798,13 +2035,41 @@ define void @test_nearbyint(ptr %p) {
define void @test_round(ptr %p) {
; CHECK-FP16-LABEL: test_round:
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: bl roundf
-; CHECK-FP16: vcvtb.f16.f32
-; CHECK-LIBCALL-LABEL: test_round:
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bl roundf
-; CHECK-LIBCALL: bl __aeabi_f2h
+; CHECK-FP16: .save {r4, lr}
+; CHECK-FP16-NEXT: push {r4, lr}
+; CHECK-FP16-NEXT: mov r4, r0
+; CHECK-FP16-NEXT: ldrh r0, [r0]
+; CHECK-FP16-NEXT: vmov s0, r0
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: bl roundf
+; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-FP16-NEXT: vmov r0, s0
+; CHECK-FP16-NEXT: strh r0, [r4]
+; CHECK-FP16-NEXT: pop {r4, pc}
+;
+; CHECK-LIBCALL-VFP-LABEL: test_round:
+; CHECK-LIBCALL-VFP: .save {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: mov r4, r0
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r0]
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: bl roundf
+; CHECK-LIBCALL-VFP-NEXT: vmov r0, s0
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_f2h
+; CHECK-LIBCALL-VFP-NEXT: strh r0, [r4]
+; CHECK-LIBCALL-VFP-NEXT: pop {r4, pc}
+;
+; CHECK-NOVFP-LABEL: test_round:
+; CHECK-NOVFP: .save {r4, lr}
+; CHECK-NOVFP-NEXT: push {r4, lr}
+; CHECK-NOVFP-NEXT: mov r4, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r0]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: bl roundf
+; CHECK-NOVFP-NEXT: bl __aeabi_f2h
+; CHECK-NOVFP-NEXT: strh r0, [r4]
+; CHECK-NOVFP-NEXT: pop {r4, pc}
%a = load half, ptr %p, align 2
%r = call half @llvm.round.f16(half %a)
store half %r, ptr %p
@@ -813,13 +2078,41 @@ define void @test_round(ptr %p) {
define void @test_roundeven(ptr %p) {
; CHECK-FP16-LABEL: test_roundeven:
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: bl roundevenf
-; CHECK-FP16: vcvtb.f16.f32
-; CHECK-LIBCALL-LABEL: test_roundeven:
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bl roundevenf
-; CHECK-LIBCALL: bl __aeabi_f2h
+; CHECK-FP16: .save {r4, lr}
+; CHECK-FP16-NEXT: push {r4, lr}
+; CHECK-FP16-NEXT: mov r4, r0
+; CHECK-FP16-NEXT: ldrh r0, [r0]
+; CHECK-FP16-NEXT: vmov s0, r0
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: bl roundevenf
+; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-FP16-NEXT: vmov r0, s0
+; CHECK-FP16-NEXT: strh r0, [r4]
+; CHECK-FP16-NEXT: pop {r4, pc}
+;
+; CHECK-LIBCALL-VFP-LABEL: test_roundeven:
+; CHECK-LIBCALL-VFP: .save {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: mov r4, r0
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r0]
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: bl roundevenf
+; CHECK-LIBCALL-VFP-NEXT: vmov r0, s0
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_f2h
+; CHECK-LIBCALL-VFP-NEXT: strh r0, [r4]
+; CHECK-LIBCALL-VFP-NEXT: pop {r4, pc}
+;
+; CHECK-NOVFP-LABEL: test_roundeven:
+; CHECK-NOVFP: .save {r4, lr}
+; CHECK-NOVFP-NEXT: push {r4, lr}
+; CHECK-NOVFP-NEXT: mov r4, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r0]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: bl roundevenf
+; CHECK-NOVFP-NEXT: bl __aeabi_f2h
+; CHECK-NOVFP-NEXT: strh r0, [r4]
+; CHECK-NOVFP-NEXT: pop {r4, pc}
%a = load half, ptr %p, align 2
%r = call half @llvm.roundeven.f16(half %a)
store half %r, ptr %p
@@ -828,25 +2121,78 @@ define void @test_roundeven(ptr %p) {
define void @test_fmuladd(ptr %p, ptr %q, ptr %r) #0 {
; CHECK-FP16-LABEL: test_fmuladd:
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: vmul.f32
-; CHECK-FP16: vcvtb.f16.f32
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: vadd.f32
-; CHECK-FP16: vcvtb.f16.f32
-; CHECK-LIBCALL-LABEL: test_fmuladd:
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL-VFP: vmul.f32
-; CHECK-NOVFP: bl __aeabi_fmul
-; CHECK-LIBCALL: bl __aeabi_f2h
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL-VFP: vadd.f32
-; CHECK-NOVFP: bl __aeabi_fadd
-; CHECK-LIBCALL: bl __aeabi_f2h
+; CHECK-FP16: ldrh r3, [r0]
+; CHECK-FP16-NEXT: ldrh r1, [r1]
+; CHECK-FP16-NEXT: ldrh r2, [r2]
+; CHECK-FP16-NEXT: vmov s0, r3
+; CHECK-FP16-NEXT: vmov s2, r1
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-FP16-NEXT: vmul.f32 s0, s0, s2
+; CHECK-FP16-NEXT: vmov s2, r2
+; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: vadd.f32 s0, s0, s2
+; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-FP16-NEXT: vmov r1, s0
+; CHECK-FP16-NEXT: strh r1, [r0]
+; CHECK-FP16-NEXT: bx lr
+;
+; CHECK-LIBCALL-VFP-LABEL: test_fmuladd:
+; CHECK-LIBCALL-VFP: .save {r4, r5, r6, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r4, r5, r6, lr}
+; CHECK-LIBCALL-VFP-NEXT: .vsave {d8}
+; CHECK-LIBCALL-VFP-NEXT: vpush {d8}
+; CHECK-LIBCALL-VFP-NEXT: mov r4, r0
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r1]
+; CHECK-LIBCALL-VFP-NEXT: mov r5, r2
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: ldrh r1, [r4]
+; CHECK-LIBCALL-VFP-NEXT: vmov s16, r0
+; CHECK-LIBCALL-VFP-NEXT: mov r0, r1
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: vmul.f32 s0, s0, s16
+; CHECK-LIBCALL-VFP-NEXT: vmov r0, s0
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_f2h
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: mov r6, r0
+; CHECK-LIBCALL-VFP-NEXT: ldrh r0, [r5]
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: vmov s2, r6
+; CHECK-LIBCALL-VFP-NEXT: vadd.f32 s0, s2, s0
+; CHECK-LIBCALL-VFP-NEXT: vmov r0, s0
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_f2h
+; CHECK-LIBCALL-VFP-NEXT: strh r0, [r4]
+; CHECK-LIBCALL-VFP-NEXT: vpop {d8}
+; CHECK-LIBCALL-VFP-NEXT: pop {r4, r5, r6, pc}
+;
+; CHECK-NOVFP-LABEL: test_fmuladd:
+; CHECK-NOVFP: .save {r4, r5, r6, lr}
+; CHECK-NOVFP-NEXT: push {r4, r5, r6, lr}
+; CHECK-NOVFP-NEXT: mov r4, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r1]
+; CHECK-NOVFP-NEXT: mov r5, r2
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: mov r6, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r4]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: mov r1, r6
+; CHECK-NOVFP-NEXT: bl __aeabi_fmul
+; CHECK-NOVFP-NEXT: bl __aeabi_f2h
+; CHECK-NOVFP-NEXT: mov r6, r0
+; CHECK-NOVFP-NEXT: ldrh r0, [r5]
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: mov r5, r0
+; CHECK-NOVFP-NEXT: uxth r0, r6
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: mov r1, r5
+; CHECK-NOVFP-NEXT: bl __aeabi_fadd
+; CHECK-NOVFP-NEXT: bl __aeabi_f2h
+; CHECK-NOVFP-NEXT: strh r0, [r4]
+; CHECK-NOVFP-NEXT: pop {r4, r5, r6, pc}
%a = load half, ptr %p, align 2
%b = load half, ptr %q, align 2
%c = load half, ptr %r, align 2
@@ -859,26 +2205,53 @@ define void @test_fmuladd(ptr %p, ptr %q, ptr %r) #0 {
; to the register, but are stored in the stack instead. Hence insertelement
; and extractelement have these extra loads and stores.
define void @test_insertelement(ptr %p, ptr %q, i32 %i) #0 {
-; CHECK-ALL-LABEL: test_insertelement:
-; CHECK-ALL: sub sp, sp, #8
-
-; CHECK-ALL-DAG: and
-; CHECK-ALL-DAG: mov
-; CHECK-ALL-DAG: ldrd
-; CHECK-ALL-DAG: orr
-; CHECK-ALL-DAG: ldrh
-; CHECK-ALL-DAG: stm
-; CHECK-ALL: ldrh
-; CHECK-ALL-DAG: ldrh
-; CHECK-ALL-DAG: ldrh
-; CHECK-ALL-DAG: ldrh
-; CHECK-ALL-DAG: strh
-; CHECK-ALL-DAG: strh
-; CHECK-ALL-DAG: strh
-; CHECK-ALL-DAG: strh
-; CHECK-ALL: strh
-
-; CHECK-ALL: add sp, sp, #8
+; CHECK-VFP-LABEL: test_insertelement:
+; CHECK-VFP: .save {r4, r5, r11, lr}
+; CHECK-VFP-NEXT: push {r4, r5, r11, lr}
+; CHECK-VFP-NEXT: .pad #8
+; CHECK-VFP-NEXT: sub sp, sp, #8
+; CHECK-VFP-NEXT: and r2, r2, #3
+; CHECK-VFP-NEXT: mov r3, sp
+; CHECK-VFP-NEXT: ldrd r4, r5, [r1]
+; CHECK-VFP-NEXT: orr r2, r3, r2, lsl #1
+; CHECK-VFP-NEXT: ldrh r0, [r0]
+; CHECK-VFP-NEXT: stm sp, {r4, r5}
+; CHECK-VFP-NEXT: strh r0, [r2]
+; CHECK-VFP-NEXT: ldrh r0, [sp, #6]
+; CHECK-VFP-NEXT: ldrh r2, [sp, #4]
+; CHECK-VFP-NEXT: ldrh r3, [sp, #2]
+; CHECK-VFP-NEXT: ldrh r5, [sp]
+; CHECK-VFP-NEXT: strh r0, [r1, #6]
+; CHECK-VFP-NEXT: strh r2, [r1, #4]
+; CHECK-VFP-NEXT: strh r3, [r1, #2]
+; CHECK-VFP-NEXT: strh r5, [r1]
+; CHECK-VFP-NEXT: add sp, sp, #8
+; CHECK-VFP-NEXT: pop {r4, r5, r11, pc}
+;
+; CHECK-NOVFP-LABEL: test_insertelement:
+; CHECK-NOVFP: .save {r4, r5, r11, lr}
+; CHECK-NOVFP-NEXT: push {r4, r5, r11, lr}
+; CHECK-NOVFP-NEXT: .pad #8
+; CHECK-NOVFP-NEXT: sub sp, sp, #8
+; CHECK-NOVFP-NEXT: ldrd r4, r5, [r1]
+; CHECK-NOVFP-NEXT: and r2, r2, #3
+; CHECK-NOVFP-NEXT: mov r3, sp
+; CHECK-NOVFP-NEXT: stm sp, {r4, r5}
+; CHECK-NOVFP-NEXT: orr r2, r3, r2, lsl #1
+; CHECK-NOVFP-NEXT: ldrh r0, [r0]
+; CHECK-NOVFP-NEXT: strh r0, [r2]
+; CHECK-NOVFP-NEXT: ldrh r0, [sp, #6]
+; CHECK-NOVFP-NEXT: strh r0, [r1, #6]
+; CHECK-NOVFP-NEXT: ldrh r0, [sp, #4]
+; CHECK-NOVFP-NEXT: strh r0, [r1, #4]
+; CHECK-NOVFP-NEXT: ldrh r0, [sp, #2]
+; CHECK-NOVFP-NEXT: strh r0, [r1, #2]
+; CHECK-NOVFP-NEXT: ldrh r0, [sp]
+; CHECK-NOVFP-NEXT: strh r0, [r1]
+; CHECK-NOVFP-NEXT: add sp, sp, #8
+; CHECK-NOVFP-NEXT: pop {r4, r5, r11, pc}
+
+
%a = load half, ptr %p, align 2
%b = load <4 x half>, ptr %q, align 8
%c = insertelement <4 x half> %b, half %a, i32 %i
@@ -888,15 +2261,19 @@ define void @test_insertelement(ptr %p, ptr %q, i32 %i) #0 {
define void @test_extractelement(ptr %p, ptr %q, i32 %i) #0 {
; CHECK-ALL-LABEL: test_extractelement:
-; CHECK-ALL: push {{{.*}}, lr}
-; CHECK-ALL: sub sp, sp, #8
-; CHECK-ALL: ldrd
-; CHECK-ALL: mov
-; CHECK-ALL: orr
-; CHECK-ALL: ldrh
-; CHECK-ALL: strh
-; CHECK-ALL: add sp, sp, #8
-; CHECK-ALL: pop {{{.*}}, pc}
+; CHECK-ALL: .save {r4, r5, r11, lr}
+; CHECK-ALL-NEXT: push {r4, r5, r11, lr}
+; CHECK-ALL-NEXT: .pad #8
+; CHECK-ALL-NEXT: sub sp, sp, #8
+; CHECK-ALL-NEXT: ldrd r4, r5, [r1]
+; CHECK-ALL-NEXT: and r1, r2, #3
+; CHECK-ALL-NEXT: mov r2, sp
+; CHECK-ALL-NEXT: orr r1, r2, r1, lsl #1
+; CHECK-ALL-NEXT: stm sp, {r4, r5}
+; CHECK-ALL-NEXT: ldrh r1, [r1]
+; CHECK-ALL-NEXT: strh r1, [r0]
+; CHECK-ALL-NEXT: add sp, sp, #8
+; CHECK-ALL-NEXT: pop {r4, r5, r11, pc}
%a = load <4 x half>, ptr %q, align 8
%b = extractelement <4 x half> %a, i32 %i
store half %b, ptr %p
@@ -909,10 +2286,9 @@ define void @test_extractelement(ptr %p, ptr %q, i32 %i) #0 {
define void @test_insertvalue(ptr %p, ptr %q) {
; CHECK-ALL-LABEL: test_insertvalue:
-; CHECK-ALL-DAG: ldr
-; CHECK-ALL-DAG: ldrh
-; CHECK-ALL-DAG: strh
-; CHECK-ALL-DAG: str
+; CHECK-ALL: ldrh r1, [r1]
+; CHECK-ALL-NEXT: strh r1, [r0, #4]
+; CHECK-ALL-NEXT: bx lr
%a = load %struct.dummy, ptr %p
%b = load half, ptr %q
%c = insertvalue %struct.dummy %a, half %b, 1
@@ -922,9 +2298,9 @@ define void @test_insertvalue(ptr %p, ptr %q) {
define void @test_extractvalue(ptr %p, ptr %q) {
; CHECK-ALL-LABEL: test_extractvalue:
-; CHECK-ALL: .fnstart
-; CHECK-ALL: ldrh
-; CHECK-ALL: strh
+; CHECK-ALL: ldrh r0, [r0, #4]
+; CHECK-ALL-NEXT: strh r0, [r1]
+; CHECK-ALL-NEXT: bx lr
%a = load %struct.dummy, ptr %p
%b = extractvalue %struct.dummy %a, 1
store half %b, ptr %q
@@ -932,61 +2308,176 @@ define void @test_extractvalue(ptr %p, ptr %q) {
}
define %struct.dummy @test_struct_return(ptr %p) {
-; CHECK-ALL-LABEL: test_struct_return:
+; CHECK-VFP-LABEL: test_struct_return:
+; CHECK-VFP: ldrh r1, [r0, #4]
+; CHECK-VFP-NEXT: ldr r0, [r0]
+; CHECK-VFP-NEXT: vmov s0, r1
+; CHECK-VFP-NEXT: bx lr
+;
+; CHECK-NOVFP-LABEL: test_struct_return:
+; CHECK-NOVFP: ldr r2, [r0]
+; CHECK-NOVFP-NEXT: ldrh r1, [r0, #4]
+; CHECK-NOVFP-NEXT: mov r0, r2
+; CHECK-NOVFP-NEXT: bx lr
; CHECK-VFP-LIBCALL: bl __aeabi_h2f
-; CHECK-NOVFP-DAG: ldr
-; CHECK-NOVFP-DAG: ldrh
%a = load %struct.dummy, ptr %p
ret %struct.dummy %a
}
define half @test_struct_arg(%struct.dummy %p) {
-; CHECK-ALL-LABEL: test_struct_arg:
-; CHECK-ALL-NEXT: .fnstart
-; CHECK-NOVFP-NEXT: mov r0, r1
-; CHECK-ALL-NEXT: bx lr
+; CHECK-VFP-LABEL: test_struct_arg:
+; CHECK-VFP: bx lr
+;
+; CHECK-NOVFP-LABEL: test_struct_arg:
+; CHECK-NOVFP: mov r0, r1
+; CHECK-NOVFP-NEXT: bx lr
%a = extractvalue %struct.dummy %p, 1
ret half %a
}
define half @test_uitofp_i32_fadd(i32 %a, half %b) #0 {
-; CHECK-LABEL: test_uitofp_i32_fadd:
-; CHECK-VFP-DAG: vcvt.f32.u32
-; CHECK-NOVFP-DAG: bl __aeabi_ui2f
+; CHECK-FP16-LABEL: test_uitofp_i32_fadd:
+; CHECK-FP16: vmov s2, r0
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: vcvt.f32.u32 s2, s2
+; CHECK-FP16-NEXT: vcvtb.f16.f32 s2, s2
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-FP16-NEXT: vadd.f32 s0, s0, s2
+; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-FP16-NEXT: bx lr
+;
+; CHECK-LIBCALL-VFP-LABEL: test_uitofp_i32_fadd:
+; CHECK-LIBCALL-VFP: .save {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: .vsave {d8}
+; CHECK-LIBCALL-VFP-NEXT: vpush {d8}
+; CHECK-LIBCALL-VFP-NEXT: vmov r1, s0
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: vcvt.f32.u32 s16, s0
+; CHECK-LIBCALL-VFP-NEXT: mov r0, r1
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: mov r4, r0
+; CHECK-LIBCALL-VFP-NEXT: vmov r0, s16
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_f2h
+; CHECK-LIBCALL-VFP-NEXT: vmov s16, r4
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: vadd.f32 s0, s16, s0
+; CHECK-LIBCALL-VFP-NEXT: vmov r0, s0
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_f2h
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: vpop {d8}
+; CHECK-LIBCALL-VFP-NEXT: pop {r4, pc}
+;
+; CHECK-NOVFP-LABEL: test_uitofp_i32_fadd:
+; CHECK-NOVFP: .save {r4, r5, r11, lr}
+; CHECK-NOVFP-NEXT: push {r4, r5, r11, lr}
+; CHECK-NOVFP-NEXT: mov r4, r1
+; CHECK-NOVFP-NEXT: bl __aeabi_ui2f
+; CHECK-NOVFP-NEXT: bl __aeabi_f2h
+; CHECK-NOVFP-NEXT: mov r5, r0
+; CHECK-NOVFP-NEXT: uxth r0, r4
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: mov r4, r0
+; CHECK-NOVFP-NEXT: uxth r0, r5
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: mov r1, r0
+; CHECK-NOVFP-NEXT: mov r0, r4
+; CHECK-NOVFP-NEXT: bl __aeabi_fadd
+; CHECK-NOVFP-NEXT: bl __aeabi_f2h
+; CHECK-NOVFP-NEXT: pop {r4, r5, r11, pc}
-; CHECK-FP16-DAG: vcvtb.f16.f32
-; CHECK-FP16-DAG: vcvtb.f32.f16
-; CHECK-LIBCALL-DAG: bl __aeabi_h2f
-; CHECK-LIBCALL-DAG: bl __aeabi_h2f
-; CHECK-VFP-DAG: vadd.f32
-; CHECK-NOVFP-DAG: bl __aeabi_fadd
-; CHECK-FP16-DAG: vcvtb.f16.f32
-; CHECK-LIBCALL-DAG: bl __aeabi_f2h
%c = uitofp i32 %a to half
%r = fadd half %b, %c
ret half %r
}
define half @test_sitofp_i32_fadd(i32 %a, half %b) #0 {
-; CHECK-LABEL: test_sitofp_i32_fadd:
-; CHECK-VFP-DAG: vcvt.f32.s32
-; CHECK-NOVFP-DAG: bl __aeabi_i2f
+; CHECK-FP16-LABEL: test_sitofp_i32_fadd:
+; CHECK-FP16: vmov s2, r0
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: vcvt.f32.s32 s2, s2
+; CHECK-FP16-NEXT: vcvtb.f16.f32 s2, s2
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s2, s2
+; CHECK-FP16-NEXT: vadd.f32 s0, s0, s2
+; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-FP16-NEXT: bx lr
+;
+; CHECK-LIBCALL-VFP-LABEL: test_sitofp_i32_fadd:
+; CHECK-LIBCALL-VFP: .save {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: push {r4, lr}
+; CHECK-LIBCALL-VFP-NEXT: .vsave {d8}
+; CHECK-LIBCALL-VFP-NEXT: vpush {d8}
+; CHECK-LIBCALL-VFP-NEXT: vmov r1, s0
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: vcvt.f32.s32 s16, s0
+; CHECK-LIBCALL-VFP-NEXT: mov r0, r1
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: mov r4, r0
+; CHECK-LIBCALL-VFP-NEXT: vmov r0, s16
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_f2h
+; CHECK-LIBCALL-VFP-NEXT: vmov s16, r4
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: vadd.f32 s0, s16, s0
+; CHECK-LIBCALL-VFP-NEXT: vmov r0, s0
+; CHECK-LIBCALL-VFP-NEXT: bl __aeabi_f2h
+; CHECK-LIBCALL-VFP-NEXT: vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT: vpop {d8}
+; CHECK-LIBCALL-VFP-NEXT: pop {r4, pc}
+;
+; CHECK-NOVFP-LABEL: test_sitofp_i32_fadd:
+; CHECK-NOVFP: .save {r4, r5, r11, lr}
+; CHECK-NOVFP-NEXT: push {r4, r5, r11, lr}
+; CHECK-NOVFP-NEXT: mov r4, r1
+; CHECK-NOVFP-NEXT: bl __aeabi_i2f
+; CHECK-NOVFP-NEXT: bl __aeabi_f2h
+; CHECK-NOVFP-NEXT: mov r5, r0
+; CHECK-NOVFP-NEXT: uxth r0, r4
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: mov r4, r0
+; CHECK-NOVFP-NEXT: uxth r0, r5
+; CHECK-NOVFP-NEXT: bl __aeabi_h2f
+; CHECK-NOVFP-NEXT: mov r1, r0
+; CHECK-NOVFP-NEXT: mov r0, r4
+; CHECK-NOVFP-NEXT: bl __aeabi_fadd
+; CHECK-NOVFP-NEXT: bl __aeabi_f2h
+; CHECK-NOVFP-NEXT: pop {r4, r5, r11, pc}
-; CHECK-FP16-DAG: vcvtb.f16.f32
-; CHECK-FP16-DAG: vcvtb.f32.f16
-; CHECK-LIBCALL-DAG: bl __aeabi_h2f
-; CHECK-LIBCALL-DAG: bl __aeabi_h2f
-; CHECK-VFP-DAG: vadd.f32
-; CHECK-NOVFP-DAG: bl __aeabi_fadd
-; CHECK-FP16-DAG: vcvtb.f16.f32
-; CHECK-LIBCALL-DAG: bl __aeabi_f2h
%c = sitofp i32 %a to half
%r = fadd half %b, %c
ret half %r
}
+define void @test_fneg(ptr %p1, ptr %p2) #0 {
+; CHECK-FP16-LABEL: test_fneg:
+; CHECK-FP16: ldrh r0, [r0]
+; CHECK-FP16-NEXT: vmov s0, r0
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: vneg.f32 s0, s0
+; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
+; CHECK-FP16-NEXT: vmov r0, s0
+; CHECK-FP16-NEXT: strh r0, [r1]
+; CHECK-FP16-NEXT: bx lr
+;
+; CHECK-LIBCALL-LABEL: test_fneg:
+; CHECK-LIBCALL: .save {r4, lr}
+; CHECK-LIBCALL-NEXT: push {r4, lr}
+; CHECK-LIBCALL-NEXT: ldrh r0, [r0]
+; CHECK-LIBCALL-NEXT: mov r4, r1
+; CHECK-LIBCALL-NEXT: bl __aeabi_h2f
+; CHECK-LIBCALL-NEXT: eor r0, r0, #-2147483648
+; CHECK-LIBCALL-NEXT: bl __aeabi_f2h
+; CHECK-LIBCALL-NEXT: strh r0, [r4]
+; CHECK-LIBCALL-NEXT: pop {r4, pc}
+ %v = load half, ptr %p1, align 2
+ %res = fneg half %v
+ store half %res, ptr %p2, align 2
+ ret void
+}
+
attributes #0 = { nounwind }
>From 17e82f5dec79b51e896442f8f25ffa44159f1684 Mon Sep 17 00:00:00 2001
From: beetrees <b at beetr.ee>
Date: Mon, 1 Sep 2025 16:18:01 +0100
Subject: [PATCH 2/2] Fix legalizing `FNEG` and `FABS` with
`TypeSoftPromoteHalf`
---
.../SelectionDAG/LegalizeFloatTypes.cpp | 26 +-
llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h | 2 +
llvm/test/CodeGen/AMDGPU/bf16.ll | 32 +-
llvm/test/CodeGen/AMDGPU/fabs.bf16.ll | 48 ++-
llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll | 151 +++------
llvm/test/CodeGen/AMDGPU/fneg.bf16.ll | 85 +++--
llvm/test/CodeGen/ARM/fp16-promote.ll | 50 +--
llvm/test/CodeGen/Generic/half-neg-abs.ll | 75 +++++
llvm/test/CodeGen/RISCV/half-arith.ll | 300 +++++++-----------
llvm/test/CodeGen/Thumb2/mve-vabd.ll | 44 +--
10 files changed, 346 insertions(+), 467 deletions(-)
create mode 100644 llvm/test/CodeGen/Generic/half-neg-abs.ll
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
index 83bb1dfe86c6a..d8933826ebb05 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
@@ -3313,7 +3313,6 @@ void DAGTypeLegalizer::SoftPromoteHalfResult(SDNode *N, unsigned ResNo) {
case ISD::FP_ROUND: R = SoftPromoteHalfRes_FP_ROUND(N); break;
// Unary FP Operations
- case ISD::FABS:
case ISD::FACOS:
case ISD::FASIN:
case ISD::FATAN:
@@ -3329,7 +3328,6 @@ void DAGTypeLegalizer::SoftPromoteHalfResult(SDNode *N, unsigned ResNo) {
case ISD::FLOG2:
case ISD::FLOG10:
case ISD::FNEARBYINT:
- case ISD::FNEG:
case ISD::FREEZE:
case ISD::FRINT:
case ISD::FROUND:
@@ -3341,6 +3339,12 @@ void DAGTypeLegalizer::SoftPromoteHalfResult(SDNode *N, unsigned ResNo) {
case ISD::FTAN:
case ISD::FTANH:
case ISD::FCANONICALIZE: R = SoftPromoteHalfRes_UnaryOp(N); break;
+ case ISD::FABS:
+ R = SoftPromoteHalfRes_FABS(N);
+ break;
+ case ISD::FNEG:
+ R = SoftPromoteHalfRes_FNEG(N);
+ break;
case ISD::AssertNoFPClass:
R = SoftPromoteHalfRes_AssertNoFPClass(N);
break;
@@ -3670,6 +3674,24 @@ SDValue DAGTypeLegalizer::SoftPromoteHalfRes_UnaryOp(SDNode *N) {
return DAG.getNode(GetPromotionOpcode(NVT, OVT), dl, MVT::i16, Res);
}
+SDValue DAGTypeLegalizer::SoftPromoteHalfRes_FABS(SDNode *N) {
+ SDValue Op = GetSoftPromotedHalf(N->getOperand(0));
+ SDLoc dl(N);
+
+ // Clear the sign bit.
+ return DAG.getNode(ISD::AND, dl, MVT::i16, Op,
+ DAG.getConstant(0x7fff, dl, MVT::i16));
+}
+
+SDValue DAGTypeLegalizer::SoftPromoteHalfRes_FNEG(SDNode *N) {
+ SDValue Op = GetSoftPromotedHalf(N->getOperand(0));
+ SDLoc dl(N);
+
+ // Invert the sign bit.
+ return DAG.getNode(ISD::XOR, dl, MVT::i16, Op,
+ DAG.getConstant(0x8000, dl, MVT::i16));
+}
+
SDValue DAGTypeLegalizer::SoftPromoteHalfRes_AssertNoFPClass(SDNode *N) {
return GetSoftPromotedHalf(N->getOperand(0));
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index 65fd863e55ac9..8f0915139f6dd 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -830,6 +830,8 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
SDValue SoftPromoteHalfRes_SELECT(SDNode *N);
SDValue SoftPromoteHalfRes_SELECT_CC(SDNode *N);
SDValue SoftPromoteHalfRes_UnaryOp(SDNode *N);
+ SDValue SoftPromoteHalfRes_FABS(SDNode *N);
+ SDValue SoftPromoteHalfRes_FNEG(SDNode *N);
SDValue SoftPromoteHalfRes_AssertNoFPClass(SDNode *N);
SDValue SoftPromoteHalfRes_XINT_TO_FP(SDNode *N);
SDValue SoftPromoteHalfRes_UNDEF(SDNode *N);
diff --git a/llvm/test/CodeGen/AMDGPU/bf16.ll b/llvm/test/CodeGen/AMDGPU/bf16.ll
index 10e523d1a0cf1..e4a5e0b6dfde1 100644
--- a/llvm/test/CodeGen/AMDGPU/bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/bf16.ll
@@ -19846,18 +19846,14 @@ define bfloat @v_fabs_bf16(bfloat %a) {
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
-; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
-; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GCN-NEXT: v_and_b32_e32 v0, 0x7fff0000, v0
; GCN-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: v_fabs_bf16:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
-; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
-; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX7-NEXT: v_and_b32_e32 v0, 0x7fff0000, v0
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_fabs_bf16:
@@ -20040,10 +20036,7 @@ define bfloat @v_fneg_fabs_bf16(bfloat %a) {
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
-; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GCN-NEXT: v_or_b32_e32 v0, 0x80000000, v0
; GCN-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: v_fneg_fabs_bf16:
@@ -20051,10 +20044,7 @@ define bfloat @v_fneg_fabs_bf16(bfloat %a) {
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
-; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX7-NEXT: v_or_b32_e32 v0, 0x80000000, v0
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_fneg_fabs_bf16:
@@ -20096,23 +20086,17 @@ define amdgpu_ps i32 @s_fneg_fabs_bf16(bfloat inreg %a) {
; GCN-LABEL: s_fneg_fabs_bf16:
; GCN: ; %bb.0:
; GCN-NEXT: v_mul_f32_e64 v0, 1.0, s0
+; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GCN-NEXT: v_or_b32_e32 v0, 0x8000, v0
; GCN-NEXT: v_readfirstlane_b32 s0, v0
-; GCN-NEXT: s_and_b32 s0, s0, 0xffff0000
-; GCN-NEXT: s_bitset0_b32 s0, 31
-; GCN-NEXT: s_and_b32 s0, s0, 0xffff0000
-; GCN-NEXT: s_xor_b32 s0, s0, 0x80000000
-; GCN-NEXT: s_lshr_b32 s0, s0, 16
; GCN-NEXT: ; return to shader part epilog
;
; GFX7-LABEL: s_fneg_fabs_bf16:
; GFX7: ; %bb.0:
; GFX7-NEXT: v_mul_f32_e64 v0, 1.0, s0
+; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX7-NEXT: v_or_b32_e32 v0, 0x8000, v0
; GFX7-NEXT: v_readfirstlane_b32 s0, v0
-; GFX7-NEXT: s_and_b32 s0, s0, 0xffff0000
-; GFX7-NEXT: s_bitset0_b32 s0, 31
-; GFX7-NEXT: s_and_b32 s0, s0, 0xffff0000
-; GFX7-NEXT: s_xor_b32 s0, s0, 0x80000000
-; GFX7-NEXT: s_lshr_b32 s0, s0, 16
; GFX7-NEXT: ; return to shader part epilog
;
; GFX8-LABEL: s_fneg_fabs_bf16:
diff --git a/llvm/test/CodeGen/AMDGPU/fabs.bf16.ll b/llvm/test/CodeGen/AMDGPU/fabs.bf16.ll
index d8f81db70e309..3b2340e06bf6b 100644
--- a/llvm/test/CodeGen/AMDGPU/fabs.bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fabs.bf16.ll
@@ -218,19 +218,11 @@ define amdgpu_kernel void @s_fabs_v4bf16(ptr addrspace(1) %out, <4 x bfloat> %in
; CI-NEXT: s_mov_b32 flat_scratch_lo, s13
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: s_and_b32 s4, s3, 0xffff0000
-; CI-NEXT: s_lshl_b32 s3, s3, 16
-; CI-NEXT: s_and_b32 s5, s2, 0xffff0000
-; CI-NEXT: v_mul_f32_e64 v0, 1.0, |s4|
-; CI-NEXT: v_mul_f32_e64 v1, 1.0, |s3|
-; CI-NEXT: v_mul_f32_e64 v2, 1.0, |s5|
-; CI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; CI-NEXT: s_lshl_b32 s2, s2, 16
-; CI-NEXT: v_alignbit_b32 v1, v0, v1, 16
-; CI-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; CI-NEXT: v_mul_f32_e64 v2, 1.0, |s2|
-; CI-NEXT: v_alignbit_b32 v0, v0, v2, 16
+; CI-NEXT: s_and_b32 s3, s3, 0x7fff7fff
+; CI-NEXT: s_and_b32 s2, s2, 0x7fff7fff
; CI-NEXT: v_mov_b32_e32 v3, s1
+; CI-NEXT: v_mov_b32_e32 v0, s2
+; CI-NEXT: v_mov_b32_e32 v1, s3
; CI-NEXT: v_mov_b32_e32 v2, s0
; CI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; CI-NEXT: s_endpgm
@@ -537,16 +529,15 @@ define amdgpu_kernel void @v_fabs_fold_self_v2bf16(ptr addrspace(1) %out, ptr ad
; CI-NEXT: v_mov_b32_e32 v0, s0
; CI-NEXT: v_mov_b32_e32 v1, s1
; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
-; CI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; CI-NEXT: v_mul_f32_e64 v4, 1.0, |v3|
-; CI-NEXT: v_mul_f32_e64 v5, 1.0, |v2|
-; CI-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; CI-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; CI-NEXT: v_mul_f32_e32 v3, v4, v3
-; CI-NEXT: v_mul_f32_e32 v2, v5, v2
-; CI-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; CI-NEXT: v_alignbit_b32 v2, v3, v2, 16
+; CI-NEXT: v_and_b32_e32 v3, 0x7fff, v2
+; CI-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; CI-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; CI-NEXT: v_and_b32_e32 v2, 0x7fff0000, v2
+; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; CI-NEXT: v_mul_f32_e32 v2, v2, v5
+; CI-NEXT: v_mul_f32_e32 v3, v3, v4
+; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; CI-NEXT: v_alignbit_b32 v2, v2, v3, 16
; CI-NEXT: flat_store_dword v[0:1], v2
; CI-NEXT: s_endpgm
;
@@ -900,16 +891,13 @@ define amdgpu_kernel void @v_extract_fabs_fold_v2bf16(ptr addrspace(1) %in) #0 {
; CI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; CI-NEXT: flat_load_dword v0, v[0:1]
; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: v_lshlrev_b32_e32 v1, 16, v0
-; CI-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; CI-NEXT: v_mul_f32_e64 v1, 1.0, |v1|
-; CI-NEXT: v_mul_f32_e64 v0, 1.0, |v0|
-; CI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; CI-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; CI-NEXT: v_mul_f32_e32 v1, 4.0, v1
+; CI-NEXT: v_and_b32_e32 v1, 0x7fff, v0
+; CI-NEXT: v_and_b32_e32 v0, 0x7fff0000, v0
+; CI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; CI-NEXT: v_add_f32_e32 v0, 2.0, v0
-; CI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; CI-NEXT: v_mul_f32_e32 v1, 4.0, v1
; CI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; CI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; CI-NEXT: flat_store_short v[0:1], v1
; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: flat_store_short v[0:1], v0
diff --git a/llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll b/llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll
index 64a9727330cfd..76da0aaf251b2 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll
@@ -107,12 +107,10 @@ define amdgpu_kernel void @fneg_fabs_fmul_bf16(ptr addrspace(1) %out, bfloat %x,
; CI-NEXT: s_mov_b32 flat_scratch_lo, s13
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: s_and_b32 s3, s2, 0x7fff
-; CI-NEXT: s_lshl_b32 s3, s3, 16
-; CI-NEXT: v_mul_f32_e64 v0, -1.0, s3
+; CI-NEXT: s_lshl_b32 s3, s2, 16
; CI-NEXT: s_and_b32 s2, s2, 0xffff0000
-; CI-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; CI-NEXT: v_mul_f32_e32 v0, s2, v0
+; CI-NEXT: v_mov_b32_e32 v0, s3
+; CI-NEXT: v_mul_f32_e64 v0, s2, -|v0|
; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v0
; CI-NEXT: v_mov_b32_e32 v0, s0
; CI-NEXT: v_mov_b32_e32 v1, s1
@@ -204,12 +202,10 @@ define amdgpu_kernel void @fneg_fabs_free_bf16(ptr addrspace(1) %out, i16 %in) {
; CI-NEXT: s_mov_b32 flat_scratch_lo, s13
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: s_and_b32 s2, s2, 0x7fff
-; CI-NEXT: s_lshl_b32 s2, s2, 16
-; CI-NEXT: v_mul_f32_e64 v0, -1.0, s2
-; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; CI-NEXT: s_bitset1_b32 s2, 15
; CI-NEXT: v_mov_b32_e32 v0, s0
; CI-NEXT: v_mov_b32_e32 v1, s1
+; CI-NEXT: v_mov_b32_e32 v2, s2
; CI-NEXT: flat_store_short v[0:1], v2
; CI-NEXT: s_endpgm
;
@@ -279,12 +275,10 @@ define amdgpu_kernel void @fneg_fabs_bf16(ptr addrspace(1) %out, bfloat %in) {
; CI-NEXT: s_mov_b32 flat_scratch_lo, s13
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: s_and_b32 s2, s2, 0x7fff
-; CI-NEXT: s_lshl_b32 s2, s2, 16
-; CI-NEXT: v_mul_f32_e64 v0, -1.0, s2
-; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; CI-NEXT: s_bitset1_b32 s2, 15
; CI-NEXT: v_mov_b32_e32 v0, s0
; CI-NEXT: v_mov_b32_e32 v1, s1
+; CI-NEXT: v_mov_b32_e32 v2, s2
; CI-NEXT: flat_store_short v[0:1], v2
; CI-NEXT: s_endpgm
;
@@ -345,43 +339,22 @@ define amdgpu_kernel void @fneg_fabs_bf16(ptr addrspace(1) %out, bfloat %in) {
}
define amdgpu_kernel void @v_fneg_fabs_bf16(ptr addrspace(1) %out, ptr addrspace(1) %in) {
-; CI-LABEL: v_fneg_fabs_bf16:
-; CI: ; %bb.0:
-; CI-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
-; CI-NEXT: s_add_i32 s12, s12, s17
-; CI-NEXT: s_mov_b32 flat_scratch_lo, s13
-; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
-; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: v_mov_b32_e32 v0, s2
-; CI-NEXT: v_mov_b32_e32 v1, s3
-; CI-NEXT: flat_load_ushort v2, v[0:1]
-; CI-NEXT: v_mov_b32_e32 v0, s0
-; CI-NEXT: v_mov_b32_e32 v1, s1
-; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; CI-NEXT: v_mul_f32_e64 v2, 1.0, |v2|
-; CI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; CI-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
-; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; CI-NEXT: flat_store_short v[0:1], v2
-; CI-NEXT: s_endpgm
-;
-; VI-LABEL: v_fneg_fabs_bf16:
-; VI: ; %bb.0:
-; VI-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
-; VI-NEXT: s_add_i32 s12, s12, s17
-; VI-NEXT: s_mov_b32 flat_scratch_lo, s13
-; VI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
-; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v0, s2
-; VI-NEXT: v_mov_b32_e32 v1, s3
-; VI-NEXT: flat_load_ushort v2, v[0:1]
-; VI-NEXT: v_mov_b32_e32 v0, s0
-; VI-NEXT: v_mov_b32_e32 v1, s1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_e32 v2, 0x8000, v2
-; VI-NEXT: flat_store_short v[0:1], v2
-; VI-NEXT: s_endpgm
+; CIVI-LABEL: v_fneg_fabs_bf16:
+; CIVI: ; %bb.0:
+; CIVI-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
+; CIVI-NEXT: s_add_i32 s12, s12, s17
+; CIVI-NEXT: s_mov_b32 flat_scratch_lo, s13
+; CIVI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
+; CIVI-NEXT: s_waitcnt lgkmcnt(0)
+; CIVI-NEXT: v_mov_b32_e32 v0, s2
+; CIVI-NEXT: v_mov_b32_e32 v1, s3
+; CIVI-NEXT: flat_load_ushort v2, v[0:1]
+; CIVI-NEXT: v_mov_b32_e32 v0, s0
+; CIVI-NEXT: v_mov_b32_e32 v1, s1
+; CIVI-NEXT: s_waitcnt vmcnt(0)
+; CIVI-NEXT: v_or_b32_e32 v2, 0x8000, v2
+; CIVI-NEXT: flat_store_short v[0:1], v2
+; CIVI-NEXT: s_endpgm
;
; GFX9-LABEL: v_fneg_fabs_bf16:
; GFX9: ; %bb.0:
@@ -431,21 +404,13 @@ define amdgpu_kernel void @s_fneg_fabs_v2bf16_non_bc_src(ptr addrspace(1) %out,
; CI-NEXT: s_mov_b32 flat_scratch_lo, s13
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: s_and_b32 s3, s2, 0xffff0000
-; CI-NEXT: s_lshl_b32 s2, s2, 16
-; CI-NEXT: v_add_f32_e64 v0, s3, 2.0
-; CI-NEXT: v_add_f32_e64 v1, s2, 1.0
-; CI-NEXT: v_readfirstlane_b32 s2, v0
+; CI-NEXT: s_lshl_b32 s3, s2, 16
; CI-NEXT: s_and_b32 s2, s2, 0xffff0000
-; CI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; CI-NEXT: s_bitset0_b32 s2, 31
-; CI-NEXT: v_and_b32_e32 v0, 0x7fffffff, v1
-; CI-NEXT: s_and_b32 s2, s2, 0xffff0000
-; CI-NEXT: s_xor_b32 s2, s2, 0x80000000
-; CI-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; CI-NEXT: s_lshr_b32 s2, s2, 16
-; CI-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; CI-NEXT: v_alignbit_b32 v2, s2, v0, 16
+; CI-NEXT: v_add_f32_e64 v1, s2, 2.0
+; CI-NEXT: v_add_f32_e64 v0, s3, 1.0
+; CI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; CI-NEXT: v_alignbit_b32 v0, v1, v0, 16
+; CI-NEXT: v_or_b32_e32 v2, 0x80008000, v0
; CI-NEXT: v_mov_b32_e32 v0, s0
; CI-NEXT: v_mov_b32_e32 v1, s1
; CI-NEXT: flat_store_dword v[0:1], v2
@@ -566,15 +531,10 @@ define amdgpu_kernel void @s_fneg_fabs_v2bf16_bc_src(ptr addrspace(1) %out, <2 x
; CI-NEXT: s_mov_b32 flat_scratch_lo, s13
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: s_and_b32 s3, s2, 0x7fff
-; CI-NEXT: s_and_b32 s2, s2, 0x7fff0000
-; CI-NEXT: v_mul_f32_e64 v0, -1.0, s2
-; CI-NEXT: s_lshl_b32 s2, s3, 16
-; CI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; CI-NEXT: v_mul_f32_e64 v1, -1.0, s2
-; CI-NEXT: v_alignbit_b32 v2, v0, v1, 16
+; CI-NEXT: s_or_b32 s2, s2, 0x80008000
; CI-NEXT: v_mov_b32_e32 v0, s0
; CI-NEXT: v_mov_b32_e32 v1, s1
+; CI-NEXT: v_mov_b32_e32 v2, s2
; CI-NEXT: flat_store_dword v[0:1], v2
; CI-NEXT: s_endpgm
;
@@ -629,27 +589,11 @@ define amdgpu_kernel void @fneg_fabs_v4bf16(ptr addrspace(1) %out, <4 x bfloat>
; CI-NEXT: s_mov_b32 flat_scratch_lo, s13
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: s_lshl_b32 s4, s2, 16
-; CI-NEXT: s_and_b32 s2, s2, 0xffff0000
-; CI-NEXT: v_mul_f32_e64 v2, 1.0, |s2|
-; CI-NEXT: s_and_b32 s2, s3, 0xffff0000
-; CI-NEXT: s_lshl_b32 s5, s3, 16
-; CI-NEXT: v_mul_f32_e64 v3, 1.0, |s2|
-; CI-NEXT: v_mul_f32_e64 v0, 1.0, |s4|
-; CI-NEXT: v_mul_f32_e64 v1, 1.0, |s5|
-; CI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; CI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; CI-NEXT: v_xor_b32_e32 v3, 0x80000000, v3
-; CI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; CI-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
-; CI-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; CI-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; CI-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
-; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; CI-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; CI-NEXT: v_alignbit_b32 v1, v3, v1, 16
-; CI-NEXT: v_alignbit_b32 v0, v2, v0, 16
+; CI-NEXT: s_or_b32 s3, s3, 0x80008000
+; CI-NEXT: s_or_b32 s2, s2, 0x80008000
; CI-NEXT: v_mov_b32_e32 v3, s1
+; CI-NEXT: v_mov_b32_e32 v0, s2
+; CI-NEXT: v_mov_b32_e32 v1, s3
; CI-NEXT: v_mov_b32_e32 v2, s0
; CI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; CI-NEXT: s_endpgm
@@ -860,21 +804,20 @@ define amdgpu_kernel void @s_fneg_multi_use_fabs_v2bf16(ptr addrspace(1) %out0,
; CI-NEXT: s_mov_b32 flat_scratch_lo, s13
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: v_mov_b32_e32 v1, s1
-; CI-NEXT: v_mov_b32_e32 v2, s2
-; CI-NEXT: s_and_b32 s1, s4, 0x7fff
-; CI-NEXT: s_and_b32 s2, s4, 0x7fff0000
-; CI-NEXT: v_mul_f32_e64 v4, -1.0, s2
-; CI-NEXT: s_lshl_b32 s1, s1, 16
; CI-NEXT: v_mov_b32_e32 v0, s0
; CI-NEXT: s_and_b32 s0, s4, 0x7fff7fff
-; CI-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; CI-NEXT: v_mul_f32_e64 v5, -1.0, s1
-; CI-NEXT: v_alignbit_b32 v4, v4, v5, 16
-; CI-NEXT: v_mov_b32_e32 v5, s0
+; CI-NEXT: v_mov_b32_e32 v2, s2
+; CI-NEXT: s_or_b32 s2, s0, 0x8000
+; CI-NEXT: v_mov_b32_e32 v1, s1
+; CI-NEXT: s_and_b32 s1, s4, 0x7fff0000
+; CI-NEXT: s_and_b32 s2, s2, 0xffff
+; CI-NEXT: s_or_b32 s1, s1, s2
+; CI-NEXT: s_bitset1_b32 s1, 31
+; CI-NEXT: v_mov_b32_e32 v4, s0
; CI-NEXT: v_mov_b32_e32 v3, s3
-; CI-NEXT: flat_store_dword v[0:1], v5
-; CI-NEXT: flat_store_dword v[2:3], v4
+; CI-NEXT: flat_store_dword v[0:1], v4
+; CI-NEXT: v_mov_b32_e32 v0, s1
+; CI-NEXT: flat_store_dword v[2:3], v0
; CI-NEXT: s_endpgm
;
; VI-LABEL: s_fneg_multi_use_fabs_v2bf16:
@@ -1086,5 +1029,3 @@ declare <4 x bfloat> @llvm.fabs.v4bf16(<4 x bfloat>) #1
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; CIVI: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/fneg.bf16.ll b/llvm/test/CodeGen/AMDGPU/fneg.bf16.ll
index d232693b46ad9..98044a72870fb 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg.bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg.bf16.ll
@@ -14,11 +14,10 @@ define amdgpu_kernel void @s_fneg_bf16(ptr addrspace(1) %out, bfloat %in) #0 {
; CI-NEXT: s_mov_b32 flat_scratch_lo, s13
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: s_lshl_b32 s2, s2, 16
-; CI-NEXT: v_mul_f32_e64 v0, -1.0, s2
-; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; CI-NEXT: s_xor_b32 s2, s2, 0x8000
; CI-NEXT: v_mov_b32_e32 v0, s0
; CI-NEXT: v_mov_b32_e32 v1, s1
+; CI-NEXT: v_mov_b32_e32 v2, s2
; CI-NEXT: flat_store_short v[0:1], v2
; CI-NEXT: s_endpgm
;
@@ -93,9 +92,7 @@ define amdgpu_kernel void @v_fneg_bf16(ptr addrspace(1) %out, ptr addrspace(1) %
; CI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; CI-NEXT: flat_load_ushort v2, v[0:1]
; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; CI-NEXT: v_mul_f32_e32 v2, -1.0, v2
-; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; CI-NEXT: v_xor_b32_e32 v2, 0x8000, v2
; CI-NEXT: flat_store_short v[0:1], v2
; CI-NEXT: s_endpgm
;
@@ -170,11 +167,10 @@ define amdgpu_kernel void @s_fneg_free_bf16(ptr addrspace(1) %out, i16 %in) #0 {
; CI-NEXT: s_mov_b32 flat_scratch_lo, s13
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: s_lshl_b32 s2, s2, 16
-; CI-NEXT: v_mul_f32_e64 v0, -1.0, s2
-; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; CI-NEXT: s_xor_b32 s2, s2, 0x8000
; CI-NEXT: v_mov_b32_e32 v0, s0
; CI-NEXT: v_mov_b32_e32 v1, s1
+; CI-NEXT: v_mov_b32_e32 v2, s2
; CI-NEXT: flat_store_short v[0:1], v2
; CI-NEXT: s_endpgm
;
@@ -248,9 +244,9 @@ define amdgpu_kernel void @v_fneg_fold_bf16(ptr addrspace(1) %out, ptr addrspace
; CI-NEXT: v_mov_b32_e32 v0, s0
; CI-NEXT: v_mov_b32_e32 v1, s1
; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: v_xor_b32_e32 v3, 0x8000, v2
; CI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; CI-NEXT: v_mul_f32_e32 v3, -1.0, v2
-; CI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; CI-NEXT: v_mul_f32_e32 v2, v3, v2
; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v2
; CI-NEXT: flat_store_short v[0:1], v2
@@ -365,13 +361,13 @@ define amdgpu_kernel void @s_fneg_v2bf16(ptr addrspace(1) %out, <2 x bfloat> %in
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: s_and_b32 s3, s2, 0xffff0000
-; CI-NEXT: s_lshl_b32 s2, s2, 16
-; CI-NEXT: v_mul_f32_e64 v0, -1.0, s3
-; CI-NEXT: v_mul_f32_e64 v1, -1.0, s2
-; CI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; CI-NEXT: v_alignbit_b32 v2, v0, v1, 16
+; CI-NEXT: s_xor_b32 s2, s2, 0x8000
+; CI-NEXT: s_and_b32 s2, s2, 0xffff
+; CI-NEXT: s_or_b32 s2, s2, s3
+; CI-NEXT: s_add_i32 s2, s2, 0x80000000
; CI-NEXT: v_mov_b32_e32 v0, s0
; CI-NEXT: v_mov_b32_e32 v1, s1
+; CI-NEXT: v_mov_b32_e32 v2, s2
; CI-NEXT: flat_store_dword v[0:1], v2
; CI-NEXT: s_endpgm
;
@@ -426,16 +422,16 @@ define amdgpu_kernel void @s_fneg_v2bf16_nonload(ptr addrspace(1) %out) #0 {
; CI-NEXT: ; def s2
; CI-NEXT: ;;#ASMEND
; CI-NEXT: s_and_b32 s3, s2, 0xffff0000
-; CI-NEXT: v_mul_f32_e64 v0, -1.0, s3
-; CI-NEXT: s_lshl_b32 s2, s2, 16
-; CI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; CI-NEXT: v_mul_f32_e64 v1, -1.0, s2
-; CI-NEXT: v_alignbit_b32 v2, v0, v1, 16
+; CI-NEXT: s_xor_b32 s2, s2, 0x8000
+; CI-NEXT: s_and_b32 s2, s2, 0xffff
+; CI-NEXT: s_or_b32 s2, s2, s3
+; CI-NEXT: s_add_i32 s2, s2, 0x80000000
; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: v_mov_b32_e32 v0, s0
; CI-NEXT: s_mov_b32 flat_scratch_lo, s13
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; CI-NEXT: v_mov_b32_e32 v1, s1
+; CI-NEXT: v_mov_b32_e32 v2, s2
; CI-NEXT: flat_store_dword v[0:1], v2
; CI-NEXT: s_endpgm
;
@@ -501,13 +497,11 @@ define amdgpu_kernel void @v_fneg_v2bf16(ptr addrspace(1) %out, ptr addrspace(1)
; CI-NEXT: v_add_i32_e32 v0, vcc, s0, v0
; CI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; CI-NEXT: flat_load_dword v2, v[0:1]
+; CI-NEXT: s_mov_b32 s0, 0xffff
; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
-; CI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; CI-NEXT: v_mul_f32_e32 v3, -1.0, v3
-; CI-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; CI-NEXT: v_mul_f32_e32 v2, -1.0, v2
-; CI-NEXT: v_alignbit_b32 v2, v3, v2, 16
+; CI-NEXT: v_xor_b32_e32 v3, 0x8000, v2
+; CI-NEXT: v_bfi_b32 v2, s0, v3, v2
+; CI-NEXT: v_add_i32_e32 v2, vcc, 0x80000000, v2
; CI-NEXT: flat_store_dword v[0:1], v2
; CI-NEXT: s_endpgm
;
@@ -570,13 +564,13 @@ define amdgpu_kernel void @fneg_free_v2bf16(ptr addrspace(1) %out, i32 %in) #0 {
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: s_and_b32 s3, s2, 0xffff0000
-; CI-NEXT: s_lshl_b32 s2, s2, 16
-; CI-NEXT: v_mul_f32_e64 v0, -1.0, s3
-; CI-NEXT: v_mul_f32_e64 v1, -1.0, s2
-; CI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; CI-NEXT: v_alignbit_b32 v2, v0, v1, 16
+; CI-NEXT: s_xor_b32 s2, s2, 0x8000
+; CI-NEXT: s_and_b32 s2, s2, 0xffff
+; CI-NEXT: s_or_b32 s2, s2, s3
+; CI-NEXT: s_add_i32 s2, s2, 0x80000000
; CI-NEXT: v_mov_b32_e32 v0, s0
; CI-NEXT: v_mov_b32_e32 v1, s1
+; CI-NEXT: v_mov_b32_e32 v2, s2
; CI-NEXT: flat_store_dword v[0:1], v2
; CI-NEXT: s_endpgm
;
@@ -637,16 +631,14 @@ define amdgpu_kernel void @v_fneg_fold_v2bf16(ptr addrspace(1) %out, ptr addrspa
; CI-NEXT: v_mov_b32_e32 v0, s0
; CI-NEXT: v_mov_b32_e32 v1, s1
; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
-; CI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; CI-NEXT: v_mul_f32_e32 v4, -1.0, v3
-; CI-NEXT: v_mul_f32_e32 v5, -1.0, v2
-; CI-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; CI-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; CI-NEXT: v_mul_f32_e32 v3, v4, v3
-; CI-NEXT: v_mul_f32_e32 v2, v5, v2
-; CI-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; CI-NEXT: v_alignbit_b32 v2, v3, v2, 16
+; CI-NEXT: v_xor_b32_e32 v3, 0x8000, v2
+; CI-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; CI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; CI-NEXT: v_mul_f32_e64 v2, -v2, v2
+; CI-NEXT: v_mul_f32_e32 v3, v3, v4
+; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; CI-NEXT: v_alignbit_b32 v2, v2, v3, 16
; CI-NEXT: flat_store_dword v[0:1], v2
; CI-NEXT: s_endpgm
;
@@ -912,12 +904,9 @@ define amdgpu_kernel void @v_extract_fneg_no_fold_v2bf16(ptr addrspace(1) %in) #
; CI-NEXT: v_mov_b32_e32 v1, s1
; CI-NEXT: flat_load_dword v0, v[0:1]
; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: v_and_b32_e32 v1, 0xffff0000, v0
-; CI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; CI-NEXT: v_mul_f32_e32 v1, -1.0, v1
-; CI-NEXT: v_mul_f32_e32 v0, -1.0, v0
-; CI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; CI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; CI-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; CI-NEXT: v_xor_b32_e32 v0, 0x8000, v0
+; CI-NEXT: v_xor_b32_e32 v1, 0x8000, v1
; CI-NEXT: flat_store_short v[0:1], v0
; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: flat_store_short v[0:1], v1
diff --git a/llvm/test/CodeGen/ARM/fp16-promote.ll b/llvm/test/CodeGen/ARM/fp16-promote.ll
index 800ee87b95ca8..8230e47259dd8 100644
--- a/llvm/test/CodeGen/ARM/fp16-promote.ll
+++ b/llvm/test/CodeGen/ARM/fp16-promote.ll
@@ -1572,26 +1572,11 @@ define void @test_fma(ptr %p, ptr %q, ptr %r) #0 {
}
define void @test_fabs(ptr %p) {
-; CHECK-FP16-LABEL: test_fabs:
-; CHECK-FP16: ldrh r1, [r0]
-; CHECK-FP16-NEXT: vmov s0, r1
-; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
-; CHECK-FP16-NEXT: vabs.f32 s0, s0
-; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
-; CHECK-FP16-NEXT: vmov r1, s0
-; CHECK-FP16-NEXT: strh r1, [r0]
-; CHECK-FP16-NEXT: bx lr
-;
-; CHECK-LIBCALL-LABEL: test_fabs:
-; CHECK-LIBCALL: .save {r4, lr}
-; CHECK-LIBCALL-NEXT: push {r4, lr}
-; CHECK-LIBCALL-NEXT: mov r4, r0
-; CHECK-LIBCALL-NEXT: ldrh r0, [r0]
-; CHECK-LIBCALL-NEXT: bl __aeabi_h2f
-; CHECK-LIBCALL-NEXT: bic r0, r0, #-2147483648
-; CHECK-LIBCALL-NEXT: bl __aeabi_f2h
-; CHECK-LIBCALL-NEXT: strh r0, [r4]
-; CHECK-LIBCALL-NEXT: pop {r4, pc}
+; CHECK-ALL-LABEL: test_fabs:
+; CHECK-ALL: ldrh r1, [r0]
+; CHECK-ALL-NEXT: bfc r1, #15, #17
+; CHECK-ALL-NEXT: strh r1, [r0]
+; CHECK-ALL-NEXT: bx lr
%a = load half, ptr %p, align 2
%r = call half @llvm.fabs.f16(half %a)
store half %r, ptr %p
@@ -2454,26 +2439,11 @@ define half @test_sitofp_i32_fadd(i32 %a, half %b) #0 {
}
define void @test_fneg(ptr %p1, ptr %p2) #0 {
-; CHECK-FP16-LABEL: test_fneg:
-; CHECK-FP16: ldrh r0, [r0]
-; CHECK-FP16-NEXT: vmov s0, r0
-; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
-; CHECK-FP16-NEXT: vneg.f32 s0, s0
-; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
-; CHECK-FP16-NEXT: vmov r0, s0
-; CHECK-FP16-NEXT: strh r0, [r1]
-; CHECK-FP16-NEXT: bx lr
-;
-; CHECK-LIBCALL-LABEL: test_fneg:
-; CHECK-LIBCALL: .save {r4, lr}
-; CHECK-LIBCALL-NEXT: push {r4, lr}
-; CHECK-LIBCALL-NEXT: ldrh r0, [r0]
-; CHECK-LIBCALL-NEXT: mov r4, r1
-; CHECK-LIBCALL-NEXT: bl __aeabi_h2f
-; CHECK-LIBCALL-NEXT: eor r0, r0, #-2147483648
-; CHECK-LIBCALL-NEXT: bl __aeabi_f2h
-; CHECK-LIBCALL-NEXT: strh r0, [r4]
-; CHECK-LIBCALL-NEXT: pop {r4, pc}
+; CHECK-ALL-LABEL: test_fneg:
+; CHECK-ALL: ldrh r0, [r0]
+; CHECK-ALL-NEXT: eor r0, r0, #32768
+; CHECK-ALL-NEXT: strh r0, [r1]
+; CHECK-ALL-NEXT: bx lr
%v = load half, ptr %p1, align 2
%res = fneg half %v
store half %res, ptr %p2, align 2
diff --git a/llvm/test/CodeGen/Generic/half-neg-abs.ll b/llvm/test/CodeGen/Generic/half-neg-abs.ll
new file mode 100644
index 0000000000000..84d75a3ab153e
--- /dev/null
+++ b/llvm/test/CodeGen/Generic/half-neg-abs.ll
@@ -0,0 +1,75 @@
+; Same as `half.ll`, but for `fneg` and `fabs`. Can be merged back into `half.ll` once BPF doesn't have a compiler error.
+
+; RUN: %if aarch64-registered-target %{ llc %s -o - -mtriple=aarch64-apple-darwin | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if aarch64-registered-target %{ llc %s -o - -mtriple=aarch64-pc-windows-msvc | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if aarch64-registered-target %{ llc %s -o - -mtriple=aarch64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if aarch64-registered-target %{ llc %s -o - -mtriple=arm64ec-pc-windows-msvc | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if amdgpu-registered-target %{ llc %s -o - -mtriple=amdgcn-amd-amdhsa | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if arc-registered-target %{ llc %s -o - -mtriple=arc-elf | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if arm-registered-target %{ llc %s -o - -mtriple=arm-unknown-linux-gnueabi | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if arm-registered-target %{ llc %s -o - -mtriple=thumbv7em-none-eabi | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if avr-registered-target %{ llc %s -o - -mtriple=avr-none | FileCheck %s --check-prefixes=ALL,CHECK %}
+; FIXME: BPF has a compiler error
+; RUN: %if csky-registered-target %{ llc %s -o - -mtriple=csky-unknown-linux-gnuabiv2 | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if csky-registered-target %{ llc %s -o - -mtriple=csky-unknown-linux-gnuabiv2 -mcpu=ck860fv -mattr=+hard-float | FileCheck %s --check-prefixes=ALL,BAD %}
+; RUN: %if directx-registered-target %{ llc %s -o - -mtriple=dxil-pc-shadermodel6.3-library | FileCheck %s --check-prefixes=NOCRASH %}
+; RUN: %if hexagon-registered-target %{ llc %s -o - -mtriple=hexagon-unknown-linux-musl | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if lanai-registered-target %{ llc %s -o - -mtriple=lanai-unknown-unknown | FileCheck %s --check-prefixes=ALL,BAD %}
+; RUN: %if loongarch-registered-target %{ llc %s -o - -mtriple=loongarch32-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if loongarch-registered-target %{ llc %s -o - -mtriple=loongarch64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if loongarch-registered-target %{ llc %s -o - -mtriple=loongarch64-unknown-linux-gnu -mattr=+f | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if m68k-registered-target %{ llc %s -o - -mtriple=m68k-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if mips-registered-target %{ llc %s -o - -mtriple=mips-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if mips-registered-target %{ llc %s -o - -mtriple=mips64-unknown-linux-gnuabi64 | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if mips-registered-target %{ llc %s -o - -mtriple=mips64el-unknown-linux-gnuabi64 | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if mips-registered-target %{ llc %s -o - -mtriple=mipsel-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if msp430-registered-target %{ llc %s -o - -mtriple=msp430-none-elf | FileCheck %s --check-prefixes=ALL,BAD %}
+; RUN: %if nvptx-registered-target %{ llc %s -o - -mtriple=nvptx64-nvidia-cuda | FileCheck %s --check-prefixes=NOCRASH %}
+; RUN: %if powerpc-registered-target %{ llc %s -o - -mtriple=powerpc-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,BAD %}
+; RUN: %if powerpc-registered-target %{ llc %s -o - -mtriple=powerpc64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,BAD %}
+; RUN: %if powerpc-registered-target %{ llc %s -o - -mtriple=powerpc64le-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,BAD %}
+; RUN: %if riscv-registered-target %{ llc %s -o - -mtriple=riscv32-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if riscv-registered-target %{ llc %s -o - -mtriple=riscv64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if sparc-registered-target %{ llc %s -o - -mtriple=sparc-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if sparc-registered-target %{ llc %s -o - -mtriple=sparc64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if spirv-registered-target %{ llc %s -o - -mtriple=spirv-unknown-unknown | FileCheck %s --check-prefixes=NOCRASH %}
+; RUN: %if systemz-registered-target %{ llc %s -o - -mtriple=s390x-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if ve-registered-target %{ llc %s -o - -mtriple=ve-unknown-unknown | FileCheck %s --check-prefixes=ALL,BAD %}
+; RUN: %if webassembly-registered-target %{ llc %s -o - -mtriple=wasm32-unknown-unknown | FileCheck %s --check-prefixes=ALL,BAD %}
+; RUN: %if x86-registered-target %{ llc %s -o - -mtriple=i686-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if x86-registered-target %{ llc %s -o - -mtriple=x86_64-pc-windows-msvc | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if x86-registered-target %{ llc %s -o - -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if xcore-registered-target %{ llc %s -o - -mtriple=xcore-unknown-unknown | FileCheck %s --check-prefixes=ALL,BAD %}
+; RUN: %if xtensa-registered-target %{ llc %s -o - -mtriple=xtensa-none-elf | FileCheck %s --check-prefixes=ALL,CHECK %}
+
+; Note that arm64ec labels are quoted, hence the `{{"?}}:`.
+
+; Codegen tests don't work the same for graphics targets. Add a dummy directive
+; for filecheck, just make sure we don't crash.
+; NOCRASH: {{.*}}
+
+; fneg and fabs both need to not quieten signalling NaNs, so should not call any conversion functions which do.
+
+define void @test_fneg(ptr %p1, ptr %p2) #0 {
+; ALL-LABEL: test_fneg{{"?}}:
+; CHECK-NOT: __extend
+; CHECK-NOT: __trunc
+; CHECK-NOT: __gnu
+; BAD: __extendhfsf2
+ %v = load half, ptr %p1
+ %res = fneg half %v
+ store half %res, ptr %p2
+ ret void
+}
+
+define void @test_fabs(ptr %p1, ptr %p2) {
+; ALL-LABEL: test_fabs{{"?}}:
+; CHECK-NOT: __extend
+; CHECK-NOT: __trunc
+; CHECK-NOT: __gnu
+; BAD: __extendhfsf2
+ %a = load half, ptr %p1
+ %r = call half @llvm.fabs.f16(half %a)
+ store half %r, ptr %p2
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/half-arith.ll b/llvm/test/CodeGen/RISCV/half-arith.ll
index 84163b52bb98d..a11bce9bae319 100644
--- a/llvm/test/CodeGen/RISCV/half-arith.ll
+++ b/llvm/test/CodeGen/RISCV/half-arith.ll
@@ -514,6 +514,7 @@ define i32 @fneg_h(half %a, half %b) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
; RV32I-NEXT: lui a1, 16
; RV32I-NEXT: addi s1, a1, -1
; RV32I-NEXT: and a0, a0, s1
@@ -521,13 +522,12 @@ define i32 @fneg_h(half %a, half %b) nounwind {
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: call __addsf3
; RV32I-NEXT: call __truncsfhf2
+; RV32I-NEXT: lui a1, 8
+; RV32I-NEXT: xor s2, a0, a1
; RV32I-NEXT: and a0, a0, s1
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
-; RV32I-NEXT: lui a0, 524288
-; RV32I-NEXT: xor a0, s0, a0
-; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: and a0, a0, s1
+; RV32I-NEXT: and a0, s2, s1
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: mv a0, s0
@@ -536,6 +536,7 @@ define i32 @fneg_h(half %a, half %b) nounwind {
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
@@ -545,6 +546,7 @@ define i32 @fneg_h(half %a, half %b) nounwind {
; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: lui a1, 16
; RV64I-NEXT: addi s1, a1, -1
; RV64I-NEXT: and a0, a0, s1
@@ -552,13 +554,12 @@ define i32 @fneg_h(half %a, half %b) nounwind {
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: call __addsf3
; RV64I-NEXT: call __truncsfhf2
+; RV64I-NEXT: lui a1, 8
+; RV64I-NEXT: xor s2, a0, a1
; RV64I-NEXT: and a0, a0, s1
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
-; RV64I-NEXT: lui a0, 524288
-; RV64I-NEXT: xor a0, s0, a0
-; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: and a0, a0, s1
+; RV64I-NEXT: and a0, s2, s1
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: mv a0, s0
@@ -567,6 +568,7 @@ define i32 @fneg_h(half %a, half %b) nounwind {
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
;
@@ -638,11 +640,7 @@ define half @fsgnjn_h(half %a, half %b) nounwind {
; RV32I-NEXT: mv a0, s2
; RV32I-NEXT: call __addsf3
; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: call __extendhfsf2
-; RV32I-NEXT: lui a1, 524288
-; RV32I-NEXT: xor a0, a0, a1
-; RV32I-NEXT: call __truncsfhf2
+; RV32I-NEXT: not a0, a0
; RV32I-NEXT: lui a1, 1048568
; RV32I-NEXT: slli s1, s1, 17
; RV32I-NEXT: and a0, a0, a1
@@ -677,11 +675,7 @@ define half @fsgnjn_h(half %a, half %b) nounwind {
; RV64I-NEXT: mv a0, s2
; RV64I-NEXT: call __addsf3
; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: and a0, a0, s3
-; RV64I-NEXT: call __extendhfsf2
-; RV64I-NEXT: lui a1, 524288
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: call __truncsfhf2
+; RV64I-NEXT: not a0, a0
; RV64I-NEXT: lui a1, 1048568
; RV64I-NEXT: slli s1, s1, 49
; RV64I-NEXT: and a0, a0, a1
@@ -804,15 +798,14 @@ define half @fabs_h(half %a, half %b) nounwind {
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: call __addsf3
; RV32I-NEXT: call __truncsfhf2
+; RV32I-NEXT: slli s0, a0, 17
+; RV32I-NEXT: srli s0, s0, 17
; RV32I-NEXT: and a0, a0, s2
; RV32I-NEXT: call __extendhfsf2
-; RV32I-NEXT: mv s0, a0
-; RV32I-NEXT: slli a0, a0, 1
-; RV32I-NEXT: srli a0, a0, 1
-; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: and a0, a0, s2
+; RV32I-NEXT: mv s1, a0
+; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: call __extendhfsf2
-; RV32I-NEXT: mv a1, s0
+; RV32I-NEXT: mv a1, s1
; RV32I-NEXT: call __addsf3
; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -841,15 +834,14 @@ define half @fabs_h(half %a, half %b) nounwind {
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: call __addsf3
; RV64I-NEXT: call __truncsfhf2
+; RV64I-NEXT: slli s0, a0, 49
+; RV64I-NEXT: srli s0, s0, 49
; RV64I-NEXT: and a0, a0, s2
; RV64I-NEXT: call __extendhfsf2
-; RV64I-NEXT: mv s0, a0
-; RV64I-NEXT: slli a0, a0, 33
-; RV64I-NEXT: srli a0, a0, 33
-; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: and a0, a0, s2
+; RV64I-NEXT: mv s1, a0
+; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: call __extendhfsf2
-; RV64I-NEXT: mv a1, s0
+; RV64I-NEXT: mv a1, s1
; RV64I-NEXT: call __addsf3
; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -1217,25 +1209,21 @@ define half @fmsub_h(half %a, half %b, half %c) nounwind {
; RV32I-NEXT: mv s0, a1
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: lui a0, 16
-; RV32I-NEXT: addi s3, a0, -1
-; RV32I-NEXT: and a0, a2, s3
+; RV32I-NEXT: addi s2, a0, -1
+; RV32I-NEXT: and a0, a2, s2
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: li a1, 0
; RV32I-NEXT: call __addsf3
; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: call __extendhfsf2
-; RV32I-NEXT: lui a1, 524288
-; RV32I-NEXT: xor a0, a0, a1
-; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: mv s2, a0
-; RV32I-NEXT: and a0, s1, s3
+; RV32I-NEXT: lui a1, 8
+; RV32I-NEXT: xor s3, a0, a1
+; RV32I-NEXT: and a0, s1, s2
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: and a0, s0, s3
+; RV32I-NEXT: and a0, s0, s2
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
-; RV32I-NEXT: and a0, s2, s3
+; RV32I-NEXT: and a0, s3, s2
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a2, a0
; RV32I-NEXT: mv a0, s1
@@ -1261,25 +1249,21 @@ define half @fmsub_h(half %a, half %b, half %c) nounwind {
; RV64I-NEXT: mv s0, a1
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: lui a0, 16
-; RV64I-NEXT: addi s3, a0, -1
-; RV64I-NEXT: and a0, a2, s3
+; RV64I-NEXT: addi s2, a0, -1
+; RV64I-NEXT: and a0, a2, s2
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: li a1, 0
; RV64I-NEXT: call __addsf3
; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: and a0, a0, s3
-; RV64I-NEXT: call __extendhfsf2
-; RV64I-NEXT: lui a1, 524288
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: mv s2, a0
-; RV64I-NEXT: and a0, s1, s3
+; RV64I-NEXT: lui a1, 8
+; RV64I-NEXT: xor s3, a0, a1
+; RV64I-NEXT: and a0, s1, s2
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s1, a0
-; RV64I-NEXT: and a0, s0, s3
+; RV64I-NEXT: and a0, s0, s2
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
-; RV64I-NEXT: and a0, s2, s3
+; RV64I-NEXT: and a0, s3, s2
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a2, a0
; RV64I-NEXT: mv a0, s1
@@ -1355,43 +1339,34 @@ define half @fnmadd_h(half %a, half %b, half %c) nounwind {
; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: mv s1, a2
-; RV32I-NEXT: mv s0, a1
-; RV32I-NEXT: lui s3, 16
-; RV32I-NEXT: addi s3, s3, -1
+; RV32I-NEXT: mv s0, a2
+; RV32I-NEXT: mv s1, a1
+; RV32I-NEXT: lui a1, 16
+; RV32I-NEXT: addi s3, a1, -1
; RV32I-NEXT: and a0, a0, s3
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: li a1, 0
; RV32I-NEXT: call __addsf3
; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: mv s2, a0
-; RV32I-NEXT: and a0, s1, s3
+; RV32I-NEXT: and a0, s0, s3
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: li a1, 0
; RV32I-NEXT: call __addsf3
; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: and a0, s2, s3
-; RV32I-NEXT: call __extendhfsf2
-; RV32I-NEXT: lui s4, 524288
-; RV32I-NEXT: xor a0, a0, s4
-; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: mv s2, a0
+; RV32I-NEXT: lui a1, 8
+; RV32I-NEXT: xor s2, s2, a1
+; RV32I-NEXT: xor s4, a0, a1
; RV32I-NEXT: and a0, s1, s3
; RV32I-NEXT: call __extendhfsf2
-; RV32I-NEXT: xor a0, a0, s4
-; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: and a0, s0, s3
-; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: and a0, s2, s3
; RV32I-NEXT: call __extendhfsf2
-; RV32I-NEXT: mv s2, a0
-; RV32I-NEXT: and a0, s1, s3
+; RV32I-NEXT: mv s1, a0
+; RV32I-NEXT: and a0, s4, s3
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a2, a0
-; RV32I-NEXT: mv a0, s2
+; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
; RV32I-NEXT: call fmaf
; RV32I-NEXT: call __truncsfhf2
@@ -1413,43 +1388,34 @@ define half @fnmadd_h(half %a, half %b, half %c) nounwind {
; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s3, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s4, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT: mv s1, a2
-; RV64I-NEXT: mv s0, a1
-; RV64I-NEXT: lui s3, 16
-; RV64I-NEXT: addi s3, s3, -1
+; RV64I-NEXT: mv s0, a2
+; RV64I-NEXT: mv s1, a1
+; RV64I-NEXT: lui a1, 16
+; RV64I-NEXT: addi s3, a1, -1
; RV64I-NEXT: and a0, a0, s3
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: li a1, 0
; RV64I-NEXT: call __addsf3
; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: mv s2, a0
-; RV64I-NEXT: and a0, s1, s3
+; RV64I-NEXT: and a0, s0, s3
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: li a1, 0
; RV64I-NEXT: call __addsf3
; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: mv s1, a0
-; RV64I-NEXT: and a0, s2, s3
-; RV64I-NEXT: call __extendhfsf2
-; RV64I-NEXT: lui s4, 524288
-; RV64I-NEXT: xor a0, a0, s4
-; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: mv s2, a0
+; RV64I-NEXT: lui a1, 8
+; RV64I-NEXT: xor s2, s2, a1
+; RV64I-NEXT: xor s4, a0, a1
; RV64I-NEXT: and a0, s1, s3
; RV64I-NEXT: call __extendhfsf2
-; RV64I-NEXT: xor a0, a0, s4
-; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: mv s1, a0
-; RV64I-NEXT: and a0, s0, s3
-; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: and a0, s2, s3
; RV64I-NEXT: call __extendhfsf2
-; RV64I-NEXT: mv s2, a0
-; RV64I-NEXT: and a0, s1, s3
+; RV64I-NEXT: mv s1, a0
+; RV64I-NEXT: and a0, s4, s3
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a2, a0
-; RV64I-NEXT: mv a0, s2
+; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: mv a1, s0
; RV64I-NEXT: call fmaf
; RV64I-NEXT: call __truncsfhf2
@@ -1535,44 +1501,35 @@ define half @fnmadd_h_2(half %a, half %b, half %c) nounwind {
; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: mv s1, a2
-; RV32I-NEXT: mv s0, a0
-; RV32I-NEXT: lui s3, 16
-; RV32I-NEXT: addi s3, s3, -1
+; RV32I-NEXT: mv s0, a2
+; RV32I-NEXT: mv s1, a0
+; RV32I-NEXT: lui a0, 16
+; RV32I-NEXT: addi s3, a0, -1
; RV32I-NEXT: and a0, a1, s3
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: li a1, 0
; RV32I-NEXT: call __addsf3
; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: mv s2, a0
-; RV32I-NEXT: and a0, s1, s3
+; RV32I-NEXT: and a0, s0, s3
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: li a1, 0
; RV32I-NEXT: call __addsf3
; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: and a0, s2, s3
-; RV32I-NEXT: call __extendhfsf2
-; RV32I-NEXT: lui s4, 524288
-; RV32I-NEXT: xor a0, a0, s4
-; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: mv s2, a0
+; RV32I-NEXT: lui a1, 8
+; RV32I-NEXT: xor s2, s2, a1
+; RV32I-NEXT: xor s4, a0, a1
; RV32I-NEXT: and a0, s1, s3
; RV32I-NEXT: call __extendhfsf2
-; RV32I-NEXT: xor a0, a0, s4
-; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: and a0, s0, s3
-; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: and a0, s2, s3
; RV32I-NEXT: call __extendhfsf2
-; RV32I-NEXT: mv s2, a0
-; RV32I-NEXT: and a0, s1, s3
+; RV32I-NEXT: mv s1, a0
+; RV32I-NEXT: and a0, s4, s3
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a2, a0
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: mv a1, s2
+; RV32I-NEXT: mv a1, s1
; RV32I-NEXT: call fmaf
; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
@@ -1593,44 +1550,35 @@ define half @fnmadd_h_2(half %a, half %b, half %c) nounwind {
; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s3, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s4, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT: mv s1, a2
-; RV64I-NEXT: mv s0, a0
-; RV64I-NEXT: lui s3, 16
-; RV64I-NEXT: addi s3, s3, -1
+; RV64I-NEXT: mv s0, a2
+; RV64I-NEXT: mv s1, a0
+; RV64I-NEXT: lui a0, 16
+; RV64I-NEXT: addi s3, a0, -1
; RV64I-NEXT: and a0, a1, s3
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: li a1, 0
; RV64I-NEXT: call __addsf3
; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: mv s2, a0
-; RV64I-NEXT: and a0, s1, s3
+; RV64I-NEXT: and a0, s0, s3
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: li a1, 0
; RV64I-NEXT: call __addsf3
; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: mv s1, a0
-; RV64I-NEXT: and a0, s2, s3
-; RV64I-NEXT: call __extendhfsf2
-; RV64I-NEXT: lui s4, 524288
-; RV64I-NEXT: xor a0, a0, s4
-; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: mv s2, a0
+; RV64I-NEXT: lui a1, 8
+; RV64I-NEXT: xor s2, s2, a1
+; RV64I-NEXT: xor s4, a0, a1
; RV64I-NEXT: and a0, s1, s3
; RV64I-NEXT: call __extendhfsf2
-; RV64I-NEXT: xor a0, a0, s4
-; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: mv s1, a0
-; RV64I-NEXT: and a0, s0, s3
-; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: and a0, s2, s3
; RV64I-NEXT: call __extendhfsf2
-; RV64I-NEXT: mv s2, a0
-; RV64I-NEXT: and a0, s1, s3
+; RV64I-NEXT: mv s1, a0
+; RV64I-NEXT: and a0, s4, s3
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a2, a0
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: mv a1, s2
+; RV64I-NEXT: mv a1, s1
; RV64I-NEXT: call fmaf
; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
@@ -1960,25 +1908,21 @@ define half @fnmsub_h(half %a, half %b, half %c) nounwind {
; RV32I-NEXT: mv s0, a2
; RV32I-NEXT: mv s1, a1
; RV32I-NEXT: lui a1, 16
-; RV32I-NEXT: addi s3, a1, -1
-; RV32I-NEXT: and a0, a0, s3
+; RV32I-NEXT: addi s2, a1, -1
+; RV32I-NEXT: and a0, a0, s2
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: li a1, 0
; RV32I-NEXT: call __addsf3
; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: call __extendhfsf2
-; RV32I-NEXT: lui a1, 524288
-; RV32I-NEXT: xor a0, a0, a1
-; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: mv s2, a0
-; RV32I-NEXT: and a0, s1, s3
+; RV32I-NEXT: lui a1, 8
+; RV32I-NEXT: xor s3, a0, a1
+; RV32I-NEXT: and a0, s1, s2
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: and a0, s0, s3
+; RV32I-NEXT: and a0, s0, s2
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
-; RV32I-NEXT: and a0, s2, s3
+; RV32I-NEXT: and a0, s3, s2
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a1, s1
; RV32I-NEXT: mv a2, s0
@@ -2003,25 +1947,21 @@ define half @fnmsub_h(half %a, half %b, half %c) nounwind {
; RV64I-NEXT: mv s0, a2
; RV64I-NEXT: mv s1, a1
; RV64I-NEXT: lui a1, 16
-; RV64I-NEXT: addi s3, a1, -1
-; RV64I-NEXT: and a0, a0, s3
+; RV64I-NEXT: addi s2, a1, -1
+; RV64I-NEXT: and a0, a0, s2
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: li a1, 0
; RV64I-NEXT: call __addsf3
; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: and a0, a0, s3
-; RV64I-NEXT: call __extendhfsf2
-; RV64I-NEXT: lui a1, 524288
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: mv s2, a0
-; RV64I-NEXT: and a0, s1, s3
+; RV64I-NEXT: lui a1, 8
+; RV64I-NEXT: xor s3, a0, a1
+; RV64I-NEXT: and a0, s1, s2
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s1, a0
-; RV64I-NEXT: and a0, s0, s3
+; RV64I-NEXT: and a0, s0, s2
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
-; RV64I-NEXT: and a0, s2, s3
+; RV64I-NEXT: and a0, s3, s2
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a1, s1
; RV64I-NEXT: mv a2, s0
@@ -2096,25 +2036,21 @@ define half @fnmsub_h_2(half %a, half %b, half %c) nounwind {
; RV32I-NEXT: mv s0, a2
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: lui a0, 16
-; RV32I-NEXT: addi s3, a0, -1
-; RV32I-NEXT: and a0, a1, s3
+; RV32I-NEXT: addi s2, a0, -1
+; RV32I-NEXT: and a0, a1, s2
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: li a1, 0
; RV32I-NEXT: call __addsf3
; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: call __extendhfsf2
-; RV32I-NEXT: lui a1, 524288
-; RV32I-NEXT: xor a0, a0, a1
-; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: mv s2, a0
-; RV32I-NEXT: and a0, s1, s3
+; RV32I-NEXT: lui a1, 8
+; RV32I-NEXT: xor s3, a0, a1
+; RV32I-NEXT: and a0, s1, s2
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: and a0, s0, s3
+; RV32I-NEXT: and a0, s0, s2
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
-; RV32I-NEXT: and a0, s2, s3
+; RV32I-NEXT: and a0, s3, s2
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: mv a0, s1
@@ -2140,25 +2076,21 @@ define half @fnmsub_h_2(half %a, half %b, half %c) nounwind {
; RV64I-NEXT: mv s0, a2
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: lui a0, 16
-; RV64I-NEXT: addi s3, a0, -1
-; RV64I-NEXT: and a0, a1, s3
+; RV64I-NEXT: addi s2, a0, -1
+; RV64I-NEXT: and a0, a1, s2
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: li a1, 0
; RV64I-NEXT: call __addsf3
; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: and a0, a0, s3
-; RV64I-NEXT: call __extendhfsf2
-; RV64I-NEXT: lui a1, 524288
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: mv s2, a0
-; RV64I-NEXT: and a0, s1, s3
+; RV64I-NEXT: lui a1, 8
+; RV64I-NEXT: xor s3, a0, a1
+; RV64I-NEXT: and a0, s1, s2
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s1, a0
-; RV64I-NEXT: and a0, s0, s3
+; RV64I-NEXT: and a0, s0, s2
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
-; RV64I-NEXT: and a0, s2, s3
+; RV64I-NEXT: and a0, s3, s2
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: mv a0, s1
@@ -2519,12 +2451,8 @@ define half @fnmadd_h_contract(half %a, half %b, half %c) nounwind {
; RV32I-NEXT: mv a0, s2
; RV32I-NEXT: call __mulsf3
; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: call __extendhfsf2
-; RV32I-NEXT: lui a1, 524288
-; RV32I-NEXT: xor a0, a0, a1
-; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: mv s1, a0
+; RV32I-NEXT: lui a1, 8
+; RV32I-NEXT: xor s1, a0, a1
; RV32I-NEXT: and a0, s0, s3
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
@@ -2580,12 +2508,8 @@ define half @fnmadd_h_contract(half %a, half %b, half %c) nounwind {
; RV64I-NEXT: mv a0, s2
; RV64I-NEXT: call __mulsf3
; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: and a0, a0, s3
-; RV64I-NEXT: call __extendhfsf2
-; RV64I-NEXT: lui a1, 524288
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: mv s1, a0
+; RV64I-NEXT: lui a1, 8
+; RV64I-NEXT: xor s1, a0, a1
; RV64I-NEXT: and a0, s0, s3
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
diff --git a/llvm/test/CodeGen/Thumb2/mve-vabd.ll b/llvm/test/CodeGen/Thumb2/mve-vabd.ll
index 8d52fe52d9360..3c35a29c0a84c 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vabd.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vabd.ll
@@ -63,34 +63,30 @@ define arm_aapcs_vfpcc void @vabd_v8f16(<8 x half> %x, <8 x half> %y, ptr %z) {
; CHECK-MVE-NEXT: .vsave {d8, d9, d10, d11, d12, d13}
; CHECK-MVE-NEXT: vpush {d8, d9, d10, d11, d12, d13}
; CHECK-MVE-NEXT: mov r4, r0
-; CHECK-MVE-NEXT: vmov.u16 r0, q1[0]
+; CHECK-MVE-NEXT: vmov.u16 r0, q1[1]
; CHECK-MVE-NEXT: vmov q5, q1
; CHECK-MVE-NEXT: vmov q4, q0
; CHECK-MVE-NEXT: bl __aeabi_h2f
; CHECK-MVE-NEXT: mov r5, r0
-; CHECK-MVE-NEXT: vmov.u16 r0, q4[0]
+; CHECK-MVE-NEXT: vmov.u16 r0, q4[1]
; CHECK-MVE-NEXT: bl __aeabi_h2f
; CHECK-MVE-NEXT: mov r1, r5
; CHECK-MVE-NEXT: bl __aeabi_fsub
; CHECK-MVE-NEXT: bl __aeabi_f2h
-; CHECK-MVE-NEXT: bl __aeabi_h2f
-; CHECK-MVE-NEXT: bic r0, r0, #-2147483648
-; CHECK-MVE-NEXT: bl __aeabi_f2h
; CHECK-MVE-NEXT: mov r5, r0
-; CHECK-MVE-NEXT: vmov.u16 r0, q5[1]
+; CHECK-MVE-NEXT: vmov.u16 r0, q5[0]
; CHECK-MVE-NEXT: bl __aeabi_h2f
; CHECK-MVE-NEXT: mov r6, r0
-; CHECK-MVE-NEXT: vmov.u16 r0, q4[1]
+; CHECK-MVE-NEXT: vmov.u16 r0, q4[0]
; CHECK-MVE-NEXT: bl __aeabi_h2f
; CHECK-MVE-NEXT: mov r1, r6
; CHECK-MVE-NEXT: bl __aeabi_fsub
; CHECK-MVE-NEXT: bl __aeabi_f2h
-; CHECK-MVE-NEXT: vmov.16 q6[0], r5
-; CHECK-MVE-NEXT: bl __aeabi_h2f
-; CHECK-MVE-NEXT: bic r0, r0, #-2147483648
-; CHECK-MVE-NEXT: bl __aeabi_f2h
-; CHECK-MVE-NEXT: vmov.16 q6[1], r0
+; CHECK-MVE-NEXT: bfc r0, #15, #17
+; CHECK-MVE-NEXT: bfc r5, #15, #17
+; CHECK-MVE-NEXT: vmov.16 q6[0], r0
; CHECK-MVE-NEXT: vmov.u16 r0, q5[2]
+; CHECK-MVE-NEXT: vmov.16 q6[1], r5
; CHECK-MVE-NEXT: bl __aeabi_h2f
; CHECK-MVE-NEXT: mov r5, r0
; CHECK-MVE-NEXT: vmov.u16 r0, q4[2]
@@ -98,9 +94,7 @@ define arm_aapcs_vfpcc void @vabd_v8f16(<8 x half> %x, <8 x half> %y, ptr %z) {
; CHECK-MVE-NEXT: mov r1, r5
; CHECK-MVE-NEXT: bl __aeabi_fsub
; CHECK-MVE-NEXT: bl __aeabi_f2h
-; CHECK-MVE-NEXT: bl __aeabi_h2f
-; CHECK-MVE-NEXT: bic r0, r0, #-2147483648
-; CHECK-MVE-NEXT: bl __aeabi_f2h
+; CHECK-MVE-NEXT: bfc r0, #15, #17
; CHECK-MVE-NEXT: vmov.16 q6[2], r0
; CHECK-MVE-NEXT: vmov.u16 r0, q5[3]
; CHECK-MVE-NEXT: bl __aeabi_h2f
@@ -110,9 +104,7 @@ define arm_aapcs_vfpcc void @vabd_v8f16(<8 x half> %x, <8 x half> %y, ptr %z) {
; CHECK-MVE-NEXT: mov r1, r5
; CHECK-MVE-NEXT: bl __aeabi_fsub
; CHECK-MVE-NEXT: bl __aeabi_f2h
-; CHECK-MVE-NEXT: bl __aeabi_h2f
-; CHECK-MVE-NEXT: bic r0, r0, #-2147483648
-; CHECK-MVE-NEXT: bl __aeabi_f2h
+; CHECK-MVE-NEXT: bfc r0, #15, #17
; CHECK-MVE-NEXT: vmov.16 q6[3], r0
; CHECK-MVE-NEXT: vmov.u16 r0, q5[4]
; CHECK-MVE-NEXT: bl __aeabi_h2f
@@ -122,9 +114,7 @@ define arm_aapcs_vfpcc void @vabd_v8f16(<8 x half> %x, <8 x half> %y, ptr %z) {
; CHECK-MVE-NEXT: mov r1, r5
; CHECK-MVE-NEXT: bl __aeabi_fsub
; CHECK-MVE-NEXT: bl __aeabi_f2h
-; CHECK-MVE-NEXT: bl __aeabi_h2f
-; CHECK-MVE-NEXT: bic r0, r0, #-2147483648
-; CHECK-MVE-NEXT: bl __aeabi_f2h
+; CHECK-MVE-NEXT: bfc r0, #15, #17
; CHECK-MVE-NEXT: vmov.16 q6[4], r0
; CHECK-MVE-NEXT: vmov.u16 r0, q5[5]
; CHECK-MVE-NEXT: bl __aeabi_h2f
@@ -134,9 +124,7 @@ define arm_aapcs_vfpcc void @vabd_v8f16(<8 x half> %x, <8 x half> %y, ptr %z) {
; CHECK-MVE-NEXT: mov r1, r5
; CHECK-MVE-NEXT: bl __aeabi_fsub
; CHECK-MVE-NEXT: bl __aeabi_f2h
-; CHECK-MVE-NEXT: bl __aeabi_h2f
-; CHECK-MVE-NEXT: bic r0, r0, #-2147483648
-; CHECK-MVE-NEXT: bl __aeabi_f2h
+; CHECK-MVE-NEXT: bfc r0, #15, #17
; CHECK-MVE-NEXT: vmov.16 q6[5], r0
; CHECK-MVE-NEXT: vmov.u16 r0, q5[6]
; CHECK-MVE-NEXT: bl __aeabi_h2f
@@ -146,9 +134,7 @@ define arm_aapcs_vfpcc void @vabd_v8f16(<8 x half> %x, <8 x half> %y, ptr %z) {
; CHECK-MVE-NEXT: mov r1, r5
; CHECK-MVE-NEXT: bl __aeabi_fsub
; CHECK-MVE-NEXT: bl __aeabi_f2h
-; CHECK-MVE-NEXT: bl __aeabi_h2f
-; CHECK-MVE-NEXT: bic r0, r0, #-2147483648
-; CHECK-MVE-NEXT: bl __aeabi_f2h
+; CHECK-MVE-NEXT: bfc r0, #15, #17
; CHECK-MVE-NEXT: vmov.16 q6[6], r0
; CHECK-MVE-NEXT: vmov.u16 r0, q5[7]
; CHECK-MVE-NEXT: bl __aeabi_h2f
@@ -158,9 +144,7 @@ define arm_aapcs_vfpcc void @vabd_v8f16(<8 x half> %x, <8 x half> %y, ptr %z) {
; CHECK-MVE-NEXT: mov r1, r5
; CHECK-MVE-NEXT: bl __aeabi_fsub
; CHECK-MVE-NEXT: bl __aeabi_f2h
-; CHECK-MVE-NEXT: bl __aeabi_h2f
-; CHECK-MVE-NEXT: bic r0, r0, #-2147483648
-; CHECK-MVE-NEXT: bl __aeabi_f2h
+; CHECK-MVE-NEXT: bfc r0, #15, #17
; CHECK-MVE-NEXT: vmov.16 q6[7], r0
; CHECK-MVE-NEXT: vstrw.32 q6, [r4]
; CHECK-MVE-NEXT: vpop {d8, d9, d10, d11, d12, d13}
More information about the llvm-commits
mailing list