[clang] [llvm] [ARM] enable FENV_ACCESS pragma support for hard-float targets (PR #137101)

Erik Enikeev via llvm-commits llvm-commits at lists.llvm.org
Thu Nov 27 17:11:51 PST 2025


https://github.com/Varnike updated https://github.com/llvm/llvm-project/pull/137101

>From 3609937e9d171528fd75c24c12ebd6c5f4ab01e1 Mon Sep 17 00:00:00 2001
From: Erik Enikeev <evonatarius at gmail.com>
Date: Mon, 9 Jun 2025 12:08:15 -0400
Subject: [PATCH 1/5] [ARM] Enable strict fp support

---
 clang/lib/Basic/Targets/ARM.cpp    | 2 ++
 clang/test/Parser/pragma-fp-warn.c | 2 +-
 2 files changed, 3 insertions(+), 1 deletion(-)

diff --git a/clang/lib/Basic/Targets/ARM.cpp b/clang/lib/Basic/Targets/ARM.cpp
index 394b50b9ee222..f393a42b190db 100644
--- a/clang/lib/Basic/Targets/ARM.cpp
+++ b/clang/lib/Basic/Targets/ARM.cpp
@@ -367,6 +367,8 @@ ARMTargetInfo::ARMTargetInfo(const llvm::Triple &Triple,
                            : "\01mcount";
 
   SoftFloatABI = llvm::is_contained(Opts.FeaturesAsWritten, "+soft-float-abi");
+  if (!SoftFloatABI)
+    HasStrictFP = true;
 }
 
 StringRef ARMTargetInfo::getABI() const { return ABI; }
diff --git a/clang/test/Parser/pragma-fp-warn.c b/clang/test/Parser/pragma-fp-warn.c
index c52bd4e4805ab..f743cb87997dc 100644
--- a/clang/test/Parser/pragma-fp-warn.c
+++ b/clang/test/Parser/pragma-fp-warn.c
@@ -1,6 +1,6 @@
 
 // RUN: %clang_cc1 -triple wasm32 -fsyntax-only -Wno-unknown-pragmas -Wignored-pragmas -verify %s
-// RUN: %clang_cc1 -triple thumbv7 -fsyntax-only -Wno-unknown-pragmas -Wignored-pragmas -verify %s
+// RUN: %clang_cc1 -triple thumbv7 -fsyntax-only -target-feature +soft-float-abi  -Wno-unknown-pragmas -Wignored-pragmas -verify %s
 // RUN: %clang_cc1 -DEXPOK -triple aarch64 -fsyntax-only -Wno-unknown-pragmas -Wignored-pragmas -verify %s
 // RUN: %clang_cc1 -DEXPOK -triple x86_64 -fsyntax-only -Wno-unknown-pragmas -Wignored-pragmas -verify %s
 // RUN: %clang_cc1 -DEXPOK -triple systemz -fsyntax-only -Wno-unknown-pragmas -Wignored-pragmas -verify %s

>From 60ea03bc5ffa746aa500d27ea1acaf6111fe7d09 Mon Sep 17 00:00:00 2001
From: Erik Enikeev <evonatarius at gmail.com>
Date: Fri, 14 Nov 2025 03:52:39 +0300
Subject: [PATCH 2/5] Add tests for strict fp vector intrinsics

---
 llvm/test/CodeGen/ARM/fp-intrinsics-vector.ll | 1879 +++++++++++++++++
 1 file changed, 1879 insertions(+)
 create mode 100644 llvm/test/CodeGen/ARM/fp-intrinsics-vector.ll

diff --git a/llvm/test/CodeGen/ARM/fp-intrinsics-vector.ll b/llvm/test/CodeGen/ARM/fp-intrinsics-vector.ll
new file mode 100644
index 0000000000000..cb41a5cfb8250
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/fp-intrinsics-vector.ll
@@ -0,0 +1,1879 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -mtriple=arm-none-eabi -mcpu=cortex-a55 %s -disable-strictnode-mutation -o - | FileCheck %s --check-prefixes=CHECK
+
+define <4 x float> @add_v4f32(<4 x float> %x, <4 x float> %y) #0 {
+; CHECK-LABEL: add_v4f32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vmov d1, r2, r3
+; CHECK-NEXT:    vld1.64 {d2, d3}, [r12]
+; CHECK-NEXT:    vmov d0, r0, r1
+; CHECK-NEXT:    vadd.f32 s11, s3, s7
+; CHECK-NEXT:    vadd.f32 s10, s2, s6
+; CHECK-NEXT:    vadd.f32 s9, s1, s5
+; CHECK-NEXT:    vadd.f32 s8, s0, s4
+; CHECK-NEXT:    vmov r2, r3, d5
+; CHECK-NEXT:    vmov r0, r1, d4
+; CHECK-NEXT:    bx lr
+  %val = call <4 x float> @llvm.experimental.constrained.fadd.v4f32(<4 x float> %x, <4 x float> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret <4 x float> %val
+}
+
+define <4 x float> @sub_v4f32(<4 x float> %x, <4 x float> %y) #0 {
+; CHECK-LABEL: sub_v4f32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vmov d1, r2, r3
+; CHECK-NEXT:    vld1.64 {d2, d3}, [r12]
+; CHECK-NEXT:    vmov d0, r0, r1
+; CHECK-NEXT:    vsub.f32 s11, s3, s7
+; CHECK-NEXT:    vsub.f32 s10, s2, s6
+; CHECK-NEXT:    vsub.f32 s9, s1, s5
+; CHECK-NEXT:    vsub.f32 s8, s0, s4
+; CHECK-NEXT:    vmov r2, r3, d5
+; CHECK-NEXT:    vmov r0, r1, d4
+; CHECK-NEXT:    bx lr
+  %val = call <4 x float> @llvm.experimental.constrained.fsub.v4f32(<4 x float> %x, <4 x float> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret <4 x float> %val
+}
+
+define <4 x float> @mul_v4f32(<4 x float> %x, <4 x float> %y) #0 {
+; CHECK-LABEL: mul_v4f32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vmov d1, r2, r3
+; CHECK-NEXT:    vld1.64 {d2, d3}, [r12]
+; CHECK-NEXT:    vmov d0, r0, r1
+; CHECK-NEXT:    vmul.f32 s11, s3, s7
+; CHECK-NEXT:    vmul.f32 s10, s2, s6
+; CHECK-NEXT:    vmul.f32 s9, s1, s5
+; CHECK-NEXT:    vmul.f32 s8, s0, s4
+; CHECK-NEXT:    vmov r2, r3, d5
+; CHECK-NEXT:    vmov r0, r1, d4
+; CHECK-NEXT:    bx lr
+  %val = call <4 x float> @llvm.experimental.constrained.fmul.v4f32(<4 x float> %x, <4 x float> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret <4 x float> %val
+}
+
+define <4 x float> @div_v4f32(<4 x float> %x, <4 x float> %y) #0 {
+; CHECK-LABEL: div_v4f32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vmov d1, r2, r3
+; CHECK-NEXT:    vld1.64 {d2, d3}, [r12]
+; CHECK-NEXT:    vmov d0, r0, r1
+; CHECK-NEXT:    vdiv.f32 s11, s3, s7
+; CHECK-NEXT:    vdiv.f32 s10, s2, s6
+; CHECK-NEXT:    vdiv.f32 s9, s1, s5
+; CHECK-NEXT:    vdiv.f32 s8, s0, s4
+; CHECK-NEXT:    vmov r2, r3, d5
+; CHECK-NEXT:    vmov r0, r1, d4
+; CHECK-NEXT:    bx lr
+  %val = call <4 x float> @llvm.experimental.constrained.fdiv.v4f32(<4 x float> %x, <4 x float> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret <4 x float> %val
+}
+
+define <4 x float> @fma_v4f32(<4 x float> %x, <4 x float> %y, <4 x float> %z) #0 {
+; CHECK-LABEL: fma_v4f32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r11, lr}
+; CHECK-NEXT:    push {r11, lr}
+; CHECK-NEXT:    add r12, sp, #24
+; CHECK-NEXT:    add lr, sp, #8
+; CHECK-NEXT:    vmov d3, r2, r3
+; CHECK-NEXT:    vld1.64 {d4, d5}, [lr]
+; CHECK-NEXT:    vld1.64 {d0, d1}, [r12]
+; CHECK-NEXT:    vmov d2, r0, r1
+; CHECK-NEXT:    vfma.f32 s3, s7, s11
+; CHECK-NEXT:    vfma.f32 s2, s6, s10
+; CHECK-NEXT:    vfma.f32 s1, s5, s9
+; CHECK-NEXT:    vfma.f32 s0, s4, s8
+; CHECK-NEXT:    vmov r2, r3, d1
+; CHECK-NEXT:    vmov r0, r1, d0
+; CHECK-NEXT:    pop {r11, pc}
+  %val = call <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float> %x, <4 x float> %y, <4 x float> %z, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret <4 x float> %val
+}
+
+define <4 x i32> @fptosi_v4i32_v4f32(<4 x float> %x) #0 {
+; CHECK-LABEL: fptosi_v4i32_v4f32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r0, r3
+; CHECK-NEXT:    mov r4, r2
+; CHECK-NEXT:    mov r5, r1
+; CHECK-NEXT:    bl __aeabi_f2iz
+; CHECK-NEXT:    mov r7, r0
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    bl __aeabi_f2iz
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    bl __aeabi_f2iz
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    bl __aeabi_f2iz
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r4
+; CHECK-NEXT:    mov r3, r7
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+  %val = call <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
+  ret <4 x i32> %val
+}
+
+define <4 x i32> @fptoui_v4i32_v4f32(<4 x float> %x) #0 {
+; CHECK-LABEL: fptoui_v4i32_v4f32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    .vsave {d8, d9, d10}
+; CHECK-NEXT:    vpush {d8, d9, d10}
+; CHECK-NEXT:    vldr s4, .LCPI6_0
+; CHECK-NEXT:    vmov d1, r2, r3
+; CHECK-NEXT:    mov r7, #-2147483648
+; CHECK-NEXT:    mov r6, #-2147483648
+; CHECK-NEXT:    mov r2, #0
+; CHECK-NEXT:    mov r5, #-2147483648
+; CHECK-NEXT:    mov r3, #0
+; CHECK-NEXT:    vldr s6, .LCPI6_1
+; CHECK-NEXT:    mov r4, #-2147483648
+; CHECK-NEXT:    vmov d0, r0, r1
+; CHECK-NEXT:    mov r1, #0
+; CHECK-NEXT:    mov r0, #0
+; CHECK-NEXT:    vcmpe.f32 s3, s4
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmpe.f32 s2, s4
+; CHECK-NEXT:    movwlt r7, #0
+; CHECK-NEXT:    movwlt r1, #1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmpe.f32 s1, s4
+; CHECK-NEXT:    movwlt r6, #0
+; CHECK-NEXT:    movwlt r2, #1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmpe.f32 s0, s4
+; CHECK-NEXT:    movwlt r5, #0
+; CHECK-NEXT:    movwlt r3, #1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movwlt r0, #1
+; CHECK-NEXT:    movwlt r4, #0
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    vseleq.f32 s8, s4, s6
+; CHECK-NEXT:    cmp r3, #0
+; CHECK-NEXT:    vsub.f32 s8, s0, s8
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    vseleq.f32 s8, s4, s6
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    vseleq.f32 s10, s4, s6
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    vsub.f32 s16, s1, s8
+; CHECK-NEXT:    vseleq.f32 s4, s4, s6
+; CHECK-NEXT:    vsub.f32 s18, s2, s10
+; CHECK-NEXT:    vsub.f32 s20, s3, s4
+; CHECK-NEXT:    bl __aeabi_f2iz
+; CHECK-NEXT:    eor r4, r0, r4
+; CHECK-NEXT:    vmov r0, s16
+; CHECK-NEXT:    bl __aeabi_f2iz
+; CHECK-NEXT:    eor r5, r0, r5
+; CHECK-NEXT:    vmov r0, s18
+; CHECK-NEXT:    bl __aeabi_f2iz
+; CHECK-NEXT:    eor r6, r0, r6
+; CHECK-NEXT:    vmov r0, s20
+; CHECK-NEXT:    bl __aeabi_f2iz
+; CHECK-NEXT:    eor r3, r0, r7
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r6
+; CHECK-NEXT:    vpop {d8, d9, d10}
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI6_0:
+; CHECK-NEXT:    .long 0x4f000000 @ float 2.14748365E+9
+; CHECK-NEXT:  .LCPI6_1:
+; CHECK-NEXT:    .long 0x00000000 @ float 0
+  %val = call <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
+  ret <4 x i32> %val
+}
+
+define <4 x i64> @fptosi_v4i64_v4f32(<4 x float> %x) #0 {
+; CHECK-LABEL: fptosi_v4i64_v4f32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-NEXT:    vmov d8, r2, r3
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    vmov r0, s16
+; CHECK-NEXT:    bl __aeabi_f2lz
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    vmov r0, s17
+; CHECK-NEXT:    mov r6, r1
+; CHECK-NEXT:    bl __aeabi_f2lz
+; CHECK-NEXT:    vldr d8, [sp, #56]
+; CHECK-NEXT:    mov r7, r1
+; CHECK-NEXT:    vmov.32 d11[0], r0
+; CHECK-NEXT:    vmov.32 d10[0], r5
+; CHECK-NEXT:    vmov r1, s17
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    bl __aeabi_f2lz
+; CHECK-NEXT:    mov r5, r1
+; CHECK-NEXT:    vmov r1, s16
+; CHECK-NEXT:    vmov.32 d11[1], r7
+; CHECK-NEXT:    vmov.32 d9[0], r0
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    vmov.32 d10[1], r6
+; CHECK-NEXT:    bl __aeabi_f2lz
+; CHECK-NEXT:    vmov.32 d8[0], r0
+; CHECK-NEXT:    vst1.64 {d10, d11}, [r4:128]!
+; CHECK-NEXT:    vmov.32 d9[1], r5
+; CHECK-NEXT:    vmov.32 d8[1], r1
+; CHECK-NEXT:    vst1.64 {d8, d9}, [r4:128]
+; CHECK-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+  %val = call <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
+  ret <4 x i64> %val
+}
+
+define <4 x i64> @fptoui_v4i64_v4f32(<4 x float> %x) #0 {
+; CHECK-LABEL: fptoui_v4i64_v4f32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-NEXT:    vmov d8, r2, r3
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    vmov r0, s16
+; CHECK-NEXT:    bl __aeabi_f2ulz
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    vmov r0, s17
+; CHECK-NEXT:    mov r6, r1
+; CHECK-NEXT:    bl __aeabi_f2ulz
+; CHECK-NEXT:    vldr d8, [sp, #56]
+; CHECK-NEXT:    mov r7, r1
+; CHECK-NEXT:    vmov.32 d11[0], r0
+; CHECK-NEXT:    vmov.32 d10[0], r5
+; CHECK-NEXT:    vmov r1, s17
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    bl __aeabi_f2ulz
+; CHECK-NEXT:    mov r5, r1
+; CHECK-NEXT:    vmov r1, s16
+; CHECK-NEXT:    vmov.32 d11[1], r7
+; CHECK-NEXT:    vmov.32 d9[0], r0
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    vmov.32 d10[1], r6
+; CHECK-NEXT:    bl __aeabi_f2ulz
+; CHECK-NEXT:    vmov.32 d8[0], r0
+; CHECK-NEXT:    vst1.64 {d10, d11}, [r4:128]!
+; CHECK-NEXT:    vmov.32 d9[1], r5
+; CHECK-NEXT:    vmov.32 d8[1], r1
+; CHECK-NEXT:    vst1.64 {d8, d9}, [r4:128]
+; CHECK-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+  %val = call <4 x i64> @llvm.experimental.constrained.fptoui.v4i64.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
+  ret <4 x i64> %val
+}
+
+define <4 x float> @sitofp_v4f32_v4i32(<4 x i32> %x) #0 {
+; CHECK-LABEL: sitofp_v4f32_v4i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, lr}
+; CHECK-NEXT:    push {r4, r5, r6, lr}
+; CHECK-NEXT:    .vsave {d8}
+; CHECK-NEXT:    vpush {d8}
+; CHECK-NEXT:    .pad #32
+; CHECK-NEXT:    sub sp, sp, #32
+; CHECK-NEXT:    movw r6, #0
+; CHECK-NEXT:    eor r1, r1, #-2147483648
+; CHECK-NEXT:    vldr d8, .LCPI9_0
+; CHECK-NEXT:    eor r0, r0, #-2147483648
+; CHECK-NEXT:    movt r6, #17200
+; CHECK-NEXT:    str r1, [sp, #8]
+; CHECK-NEXT:    eor r1, r2, #-2147483648
+; CHECK-NEXT:    str r6, [sp, #12]
+; CHECK-NEXT:    vldr d16, [sp, #8]
+; CHECK-NEXT:    str r1, [sp, #16]
+; CHECK-NEXT:    eor r1, r3, #-2147483648
+; CHECK-NEXT:    str r6, [sp, #20]
+; CHECK-NEXT:    str r6, [sp, #28]
+; CHECK-NEXT:    str r1, [sp, #24]
+; CHECK-NEXT:    str r0, [sp]
+; CHECK-NEXT:    str r6, [sp, #4]
+; CHECK-NEXT:    vsub.f64 d16, d16, d8
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    bl __aeabi_d2f
+; CHECK-NEXT:    vldr d16, [sp, #16]
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    vsub.f64 d16, d16, d8
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    bl __aeabi_d2f
+; CHECK-NEXT:    vldr d16, [sp, #24]
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    vsub.f64 d16, d16, d8
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    bl __aeabi_d2f
+; CHECK-NEXT:    vldr d16, [sp]
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    vsub.f64 d16, d16, d8
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    bl __aeabi_d2f
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r2, r5
+; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    add sp, sp, #32
+; CHECK-NEXT:    vpop {d8}
+; CHECK-NEXT:    pop {r4, r5, r6, pc}
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI9_0:
+; CHECK-NEXT:    .long 2147483648 @ double 4503601774854144
+; CHECK-NEXT:    .long 1127219200
+  %val = call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i32(<4 x i32> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret <4 x float> %val
+}
+
+define <4 x float> @uitofp_v4f32_v4i32(<4 x i32> %x) #0 {
+; CHECK-LABEL: uitofp_v4f32_v4i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, lr}
+; CHECK-NEXT:    push {r4, r5, r6, lr}
+; CHECK-NEXT:    .vsave {d8}
+; CHECK-NEXT:    vpush {d8}
+; CHECK-NEXT:    .pad #32
+; CHECK-NEXT:    sub sp, sp, #32
+; CHECK-NEXT:    movw r6, #0
+; CHECK-NEXT:    str r1, [sp, #8]
+; CHECK-NEXT:    vldr d8, .LCPI10_0
+; CHECK-NEXT:    movt r6, #17200
+; CHECK-NEXT:    str r6, [sp, #12]
+; CHECK-NEXT:    vldr d16, [sp, #8]
+; CHECK-NEXT:    str r6, [sp, #20]
+; CHECK-NEXT:    str r2, [sp, #16]
+; CHECK-NEXT:    str r6, [sp, #28]
+; CHECK-NEXT:    str r3, [sp, #24]
+; CHECK-NEXT:    stm sp, {r0, r6}
+; CHECK-NEXT:    vsub.f64 d16, d16, d8
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    bl __aeabi_d2f
+; CHECK-NEXT:    vldr d16, [sp, #16]
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    vsub.f64 d16, d16, d8
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    bl __aeabi_d2f
+; CHECK-NEXT:    vldr d16, [sp, #24]
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    vsub.f64 d16, d16, d8
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    bl __aeabi_d2f
+; CHECK-NEXT:    vldr d16, [sp]
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    vsub.f64 d16, d16, d8
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    bl __aeabi_d2f
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    mov r2, r5
+; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    add sp, sp, #32
+; CHECK-NEXT:    vpop {d8}
+; CHECK-NEXT:    pop {r4, r5, r6, pc}
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI10_0:
+; CHECK-NEXT:    .long 0 @ double 4503599627370496
+; CHECK-NEXT:    .long 1127219200
+  %val = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i32(<4 x i32> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret <4 x float> %val
+}
+
+define <4 x float> @sitofp_v4f32_v4i64(<4 x i64> %x) #0 {
+; CHECK-LABEL: sitofp_v4f32_v4i64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, lr}
+; CHECK-NEXT:    push {r4, r5, r6, lr}
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    mov r5, r3
+; CHECK-NEXT:    mov r6, r2
+; CHECK-NEXT:    bl __aeabi_l2f
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    bl __aeabi_l2f
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    add r0, sp, #32
+; CHECK-NEXT:    vld1.64 {d8, d9}, [r0]
+; CHECK-NEXT:    vmov r0, r1, d9
+; CHECK-NEXT:    bl __aeabi_l2f
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    vmov r0, r1, d8
+; CHECK-NEXT:    bl __aeabi_l2f
+; CHECK-NEXT:    mov r2, r0
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    pop {r4, r5, r6, pc}
+  %val = call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i64(<4 x i64> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret <4 x float> %val
+}
+
+define <4 x float> @uitofp_v4f32_v4i64(<4 x i64> %x) #0 {
+; CHECK-LABEL: uitofp_v4f32_v4i64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, lr}
+; CHECK-NEXT:    push {r4, r5, r6, lr}
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    mov r5, r3
+; CHECK-NEXT:    mov r6, r2
+; CHECK-NEXT:    bl __aeabi_ul2f
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    bl __aeabi_ul2f
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    add r0, sp, #32
+; CHECK-NEXT:    vld1.64 {d8, d9}, [r0]
+; CHECK-NEXT:    vmov r0, r1, d9
+; CHECK-NEXT:    bl __aeabi_ul2f
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    vmov r0, r1, d8
+; CHECK-NEXT:    bl __aeabi_ul2f
+; CHECK-NEXT:    mov r2, r0
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    pop {r4, r5, r6, pc}
+  %val = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i64(<4 x i64> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret <4 x float> %val
+}
+
+define <4 x float> @sqrt_v4f32(<4 x float> %x) #0 {
+; CHECK-LABEL: sqrt_v4f32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d1, r2, r3
+; CHECK-NEXT:    vmov d0, r0, r1
+; CHECK-NEXT:    vsqrt.f32 s7, s3
+; CHECK-NEXT:    vsqrt.f32 s6, s2
+; CHECK-NEXT:    vsqrt.f32 s5, s1
+; CHECK-NEXT:    vsqrt.f32 s4, s0
+; CHECK-NEXT:    vmov r2, r3, d3
+; CHECK-NEXT:    vmov r0, r1, d2
+; CHECK-NEXT:    bx lr
+  %val = call <4 x float> @llvm.experimental.constrained.sqrt.v4f32(<4 x float> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret <4 x float> %val
+}
+
+define <4 x float> @rint_v4f32(<4 x float> %x) #0 {
+; CHECK-LABEL: rint_v4f32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    mov r4, r3
+; CHECK-NEXT:    mov r5, r2
+; CHECK-NEXT:    bl rintf
+; CHECK-NEXT:    mov r7, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    bl rintf
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    bl rintf
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    bl rintf
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    mov r2, r5
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+  %val = call <4 x float> @llvm.experimental.constrained.rint.v4f32(<4 x float> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret <4 x float> %val
+}
+
+define <4 x float> @nearbyint_v4f32(<4 x float> %x) #0 {
+; CHECK-LABEL: nearbyint_v4f32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    mov r4, r3
+; CHECK-NEXT:    mov r5, r2
+; CHECK-NEXT:    bl nearbyintf
+; CHECK-NEXT:    mov r7, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    bl nearbyintf
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    bl nearbyintf
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    bl nearbyintf
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    mov r2, r5
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+  %val = call <4 x float> @llvm.experimental.constrained.nearbyint.v4f32(<4 x float> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret <4 x float> %val
+}
+
+define <4 x float> @maxnum_v4f32(<4 x float> %x, <4 x float> %y) #0 {
+; CHECK-LABEL: maxnum_v4f32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r11, lr}
+; CHECK-NEXT:    push {r4, r5, r6, r7, r8, r9, r11, lr}
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    add r0, sp, #48
+; CHECK-NEXT:    mov r5, r2
+; CHECK-NEXT:    mov r2, r1
+; CHECK-NEXT:    mov r4, r3
+; CHECK-NEXT:    vld1.64 {d8, d9}, [r0]
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    vmov r7, r1, d8
+; CHECK-NEXT:    bl fmaxf
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    vmov r1, r6, d9
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    bl fmaxf
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl fmaxf
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    bl fmaxf
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    mov r2, r5
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r8, r9, r11, pc}
+  %val = call <4 x float> @llvm.experimental.constrained.maxnum.v4f32(<4 x float> %x, <4 x float> %y, metadata !"fpexcept.strict") #0
+  ret <4 x float> %val
+}
+
+define <4 x float> @minnum_v4f32(<4 x float> %x, <4 x float> %y) #0 {
+; CHECK-LABEL: minnum_v4f32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r11, lr}
+; CHECK-NEXT:    push {r4, r5, r6, r7, r8, r9, r11, lr}
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    mov r9, r0
+; CHECK-NEXT:    add r0, sp, #48
+; CHECK-NEXT:    mov r5, r2
+; CHECK-NEXT:    mov r2, r1
+; CHECK-NEXT:    mov r4, r3
+; CHECK-NEXT:    vld1.64 {d8, d9}, [r0]
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    vmov r7, r1, d8
+; CHECK-NEXT:    bl fminf
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    vmov r1, r6, d9
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    bl fminf
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl fminf
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r9
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    bl fminf
+; CHECK-NEXT:    mov r1, r8
+; CHECK-NEXT:    mov r2, r5
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r8, r9, r11, pc}
+  %val = call <4 x float> @llvm.experimental.constrained.minnum.v4f32(<4 x float> %x, <4 x float> %y, metadata !"fpexcept.strict") #0
+  ret <4 x float> %val
+}
+
+define <4 x float> @ceil_v4f32(<4 x float> %x) #0 {
+; CHECK-LABEL: ceil_v4f32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    mov r4, r3
+; CHECK-NEXT:    mov r5, r2
+; CHECK-NEXT:    bl ceilf
+; CHECK-NEXT:    mov r7, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    bl ceilf
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    bl ceilf
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    bl ceilf
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    mov r2, r5
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+  %val = call <4 x float> @llvm.experimental.constrained.ceil.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
+  ret <4 x float> %val
+}
+
+define <4 x float> @floor_v4f32(<4 x float> %x) #0 {
+; CHECK-LABEL: floor_v4f32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    mov r4, r3
+; CHECK-NEXT:    mov r5, r2
+; CHECK-NEXT:    bl floorf
+; CHECK-NEXT:    mov r7, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    bl floorf
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    bl floorf
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    bl floorf
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    mov r2, r5
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+  %val = call <4 x float> @llvm.experimental.constrained.floor.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
+  ret <4 x float> %val
+}
+
+define <4 x float> @round_v4f32(<4 x float> %x) #0 {
+; CHECK-LABEL: round_v4f32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    mov r4, r3
+; CHECK-NEXT:    mov r5, r2
+; CHECK-NEXT:    bl roundf
+; CHECK-NEXT:    mov r7, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    bl roundf
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    bl roundf
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    bl roundf
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    mov r2, r5
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+  %val = call <4 x float> @llvm.experimental.constrained.round.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
+  ret <4 x float> %val
+}
+
+define <4 x float> @roundeven_v4f32(<4 x float> %x) #0 {
+; CHECK-LABEL: roundeven_v4f32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    mov r4, r3
+; CHECK-NEXT:    mov r5, r2
+; CHECK-NEXT:    bl roundevenf
+; CHECK-NEXT:    mov r7, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    bl roundevenf
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    bl roundevenf
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    bl roundevenf
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    mov r2, r5
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+  %val = call <4 x float> @llvm.experimental.constrained.roundeven.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
+  ret <4 x float> %val
+}
+
+define <4 x float> @trunc_v4f32(<4 x float> %x) #0 {
+; CHECK-LABEL: trunc_v4f32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    mov r4, r3
+; CHECK-NEXT:    mov r5, r2
+; CHECK-NEXT:    bl truncf
+; CHECK-NEXT:    mov r7, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    bl truncf
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    bl truncf
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    bl truncf
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    mov r2, r5
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+  %val = call <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
+  ret <4 x float> %val
+}
+
+define <4 x i1> @fcmp_v4f32(<4 x float> %x, <4 x float> %y) #0 {
+; CHECK-LABEL: fcmp_v4f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vmov d1, r2, r3
+; CHECK-NEXT:    mov r2, #0
+; CHECK-NEXT:    mov r3, #0
+; CHECK-NEXT:    vld1.64 {d2, d3}, [r12]
+; CHECK-NEXT:    vmov d0, r0, r1
+; CHECK-NEXT:    mov r0, #0
+; CHECK-NEXT:    mov r1, #0
+; CHECK-NEXT:    vcmp.f32 s1, s5
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s3, s7
+; CHECK-NEXT:    movweq r0, #1
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mvnne r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s0, s4
+; CHECK-NEXT:    movweq r2, #1
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    mvnne r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s2, s6
+; CHECK-NEXT:    movweq r3, #1
+; CHECK-NEXT:    cmp r3, #0
+; CHECK-NEXT:    mvnne r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movweq r1, #1
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    mvnne r1, #0
+; CHECK-NEXT:    vmov.32 d17[0], r1
+; CHECK-NEXT:    vmov.32 d16[0], r3
+; CHECK-NEXT:    vmov.32 d17[1], r2
+; CHECK-NEXT:    vmov.32 d16[1], r0
+; CHECK-NEXT:    vmovn.i32 d16, q8
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    bx lr
+entry:
+  %val = call <4 x i1> @llvm.experimental.constrained.fcmp.v4f64(<4 x float> %x, <4 x float> %y, metadata !"oeq", metadata !"fpexcept.strict")
+  ret <4 x i1> %val
+}
+
+define <4 x i1> @fcmps_v4f32(<4 x float> %x, <4 x float> %y) #0 {
+; CHECK-LABEL: fcmps_v4f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vmov d1, r2, r3
+; CHECK-NEXT:    mov r2, #0
+; CHECK-NEXT:    mov r3, #0
+; CHECK-NEXT:    vld1.64 {d2, d3}, [r12]
+; CHECK-NEXT:    vmov d0, r0, r1
+; CHECK-NEXT:    mov r0, #0
+; CHECK-NEXT:    mov r1, #0
+; CHECK-NEXT:    vcmpe.f32 s1, s5
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmpe.f32 s3, s7
+; CHECK-NEXT:    movweq r0, #1
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mvnne r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmpe.f32 s0, s4
+; CHECK-NEXT:    movweq r2, #1
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    mvnne r2, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmpe.f32 s2, s6
+; CHECK-NEXT:    movweq r3, #1
+; CHECK-NEXT:    cmp r3, #0
+; CHECK-NEXT:    mvnne r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movweq r1, #1
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    mvnne r1, #0
+; CHECK-NEXT:    vmov.32 d17[0], r1
+; CHECK-NEXT:    vmov.32 d16[0], r3
+; CHECK-NEXT:    vmov.32 d17[1], r2
+; CHECK-NEXT:    vmov.32 d16[1], r0
+; CHECK-NEXT:    vmovn.i32 d16, q8
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    bx lr
+entry:
+  %val = call <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float> %x, <4 x float> %y, metadata !"oeq", metadata !"fpexcept.strict")
+  ret <4 x i1> %val
+}
+
+
+
+define <2 x double> @add_v2f64(<2 x double> %x, <2 x double> %y) #0 {
+; CHECK-LABEL: add_v2f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vmov d18, r0, r1
+; CHECK-NEXT:    vmov d19, r2, r3
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT:    vadd.f64 d18, d18, d16
+; CHECK-NEXT:    vadd.f64 d16, d19, d17
+; CHECK-NEXT:    vmov r0, r1, d18
+; CHECK-NEXT:    vmov r2, r3, d16
+; CHECK-NEXT:    bx lr
+  %val = call <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double> %x, <2 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret <2 x double> %val
+}
+
+define <2 x double> @sub_v2f64(<2 x double> %x, <2 x double> %y) #0 {
+; CHECK-LABEL: sub_v2f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vmov d18, r0, r1
+; CHECK-NEXT:    vmov d19, r2, r3
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT:    vsub.f64 d18, d18, d16
+; CHECK-NEXT:    vsub.f64 d16, d19, d17
+; CHECK-NEXT:    vmov r0, r1, d18
+; CHECK-NEXT:    vmov r2, r3, d16
+; CHECK-NEXT:    bx lr
+  %val = call <2 x double> @llvm.experimental.constrained.fsub.v2f64(<2 x double> %x, <2 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret <2 x double> %val
+}
+
+define <2 x double> @mul_v2f64(<2 x double> %x, <2 x double> %y) #0 {
+; CHECK-LABEL: mul_v2f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vmov d18, r0, r1
+; CHECK-NEXT:    vmov d19, r2, r3
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT:    vmul.f64 d18, d18, d16
+; CHECK-NEXT:    vmul.f64 d16, d19, d17
+; CHECK-NEXT:    vmov r0, r1, d18
+; CHECK-NEXT:    vmov r2, r3, d16
+; CHECK-NEXT:    bx lr
+  %val = call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> %x, <2 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret <2 x double> %val
+}
+
+define <2 x double> @div_v2f64(<2 x double> %x, <2 x double> %y) #0 {
+; CHECK-LABEL: div_v2f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vmov d18, r0, r1
+; CHECK-NEXT:    vmov d19, r2, r3
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT:    vdiv.f64 d18, d18, d16
+; CHECK-NEXT:    vdiv.f64 d16, d19, d17
+; CHECK-NEXT:    vmov r0, r1, d18
+; CHECK-NEXT:    vmov r2, r3, d16
+; CHECK-NEXT:    bx lr
+  %val = call <2 x double> @llvm.experimental.constrained.fdiv.v2f64(<2 x double> %x, <2 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret <2 x double> %val
+}
+
+define <2 x double> @fma_v2f64(<2 x double> %x, <2 x double> %y, <2 x double> %z) #0 {
+; CHECK-LABEL: fma_v2f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r11, lr}
+; CHECK-NEXT:    push {r11, lr}
+; CHECK-NEXT:    add r12, sp, #24
+; CHECK-NEXT:    add lr, sp, #8
+; CHECK-NEXT:    vmov d20, r0, r1
+; CHECK-NEXT:    vmov d21, r2, r3
+; CHECK-NEXT:    vld1.64 {d16, d17}, [lr]
+; CHECK-NEXT:    vld1.64 {d18, d19}, [r12]
+; CHECK-NEXT:    vfma.f64 d18, d20, d16
+; CHECK-NEXT:    vfma.f64 d19, d21, d17
+; CHECK-NEXT:    vmov r0, r1, d18
+; CHECK-NEXT:    vmov r2, r3, d19
+; CHECK-NEXT:    pop {r11, pc}
+  %val = call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %x, <2 x double> %y, <2 x double> %z, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret <2 x double> %val
+}
+
+define <2 x i32> @fptosi_v2i32_v2f64(<2 x double> %x) #0 {
+; CHECK-LABEL: fptosi_v2i32_v2f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, lr}
+; CHECK-NEXT:    push {r4, r5, r6, lr}
+; CHECK-NEXT:    mov r4, r1
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    mov r1, r3
+; CHECK-NEXT:    bl __aeabi_d2iz
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl __aeabi_d2iz
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    pop {r4, r5, r6, pc}
+  %val = call <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
+  ret <2 x i32> %val
+}
+
+define <2 x i32> @fptoui_v2i32_v2f64(<2 x double> %x) #0 {
+; CHECK-LABEL: fptoui_v2i32_v2f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r11, lr}
+; CHECK-NEXT:    push {r4, r5, r11, lr}
+; CHECK-NEXT:    .vsave {d8}
+; CHECK-NEXT:    vpush {d8}
+; CHECK-NEXT:    vldr d16, .LCPI31_0
+; CHECK-NEXT:    vmov d17, r0, r1
+; CHECK-NEXT:    vmov d18, r2, r3
+; CHECK-NEXT:    mov r5, #-2147483648
+; CHECK-NEXT:    mov r2, #0
+; CHECK-NEXT:    mov r0, #0
+; CHECK-NEXT:    mov r4, #-2147483648
+; CHECK-NEXT:    vmov.i32 d19, #0x0
+; CHECK-NEXT:    vcmpe.f64 d17, d16
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmpe.f64 d18, d16
+; CHECK-NEXT:    movwlt r5, #0
+; CHECK-NEXT:    movwlt r2, #1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movwlt r0, #1
+; CHECK-NEXT:    movwlt r4, #0
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    vseleq.f64 d20, d16, d19
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    vsub.f64 d18, d18, d20
+; CHECK-NEXT:    vseleq.f64 d16, d16, d19
+; CHECK-NEXT:    vmov r0, r1, d18
+; CHECK-NEXT:    vsub.f64 d8, d17, d16
+; CHECK-NEXT:    bl __aeabi_d2iz
+; CHECK-NEXT:    eor r4, r0, r4
+; CHECK-NEXT:    vmov r0, r1, d8
+; CHECK-NEXT:    bl __aeabi_d2iz
+; CHECK-NEXT:    eor r0, r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    vpop {d8}
+; CHECK-NEXT:    pop {r4, r5, r11, pc}
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI31_0:
+; CHECK-NEXT:    .long 0 @ double 2147483648
+; CHECK-NEXT:    .long 1105199104
+  %val = call <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
+  ret <2 x i32> %val
+}
+
+define <2 x i64> @fptosi_v2i64_v2f64(<2 x double> %x) #0 {
+; CHECK-LABEL: fptosi_v2i64_v2f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    mov r4, r1
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    mov r1, r3
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r7, r1
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    mov r2, r6
+; CHECK-NEXT:    mov r3, r7
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+  %val = call <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
+  ret <2 x i64> %val
+}
+
+define <2 x i64> @fptoui_v2i64_v2f64(<2 x double> %x) #0 {
+; CHECK-LABEL: fptoui_v2i64_v2f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    mov r4, r1
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    mov r1, r3
+; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r7, r1
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    mov r2, r6
+; CHECK-NEXT:    mov r3, r7
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+  %val = call <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
+  ret <2 x i64> %val
+}
+
+define <2 x double> @sitofp_v2f64_v2i32(<2 x i32> %x) #0 {
+; CHECK-LABEL: sitofp_v2f64_v2i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .pad #16
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    movw r2, #0
+; CHECK-NEXT:    vmov.32 r0, d16[0]
+; CHECK-NEXT:    movt r2, #17200
+; CHECK-NEXT:    eor r0, r0, #-2147483648
+; CHECK-NEXT:    str r2, [sp, #12]
+; CHECK-NEXT:    str r0, [sp, #8]
+; CHECK-NEXT:    vmov.32 r0, d16[1]
+; CHECK-NEXT:    str r2, [sp, #4]
+; CHECK-NEXT:    vldr d16, .LCPI34_0
+; CHECK-NEXT:    eor r0, r0, #-2147483648
+; CHECK-NEXT:    str r0, [sp]
+; CHECK-NEXT:    vldr d17, [sp, #8]
+; CHECK-NEXT:    vldr d18, [sp]
+; CHECK-NEXT:    vsub.f64 d17, d17, d16
+; CHECK-NEXT:    vsub.f64 d16, d18, d16
+; CHECK-NEXT:    vmov r0, r1, d17
+; CHECK-NEXT:    vmov r2, r3, d16
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI34_0:
+; CHECK-NEXT:    .long 2147483648 @ double 4503601774854144
+; CHECK-NEXT:    .long 1127219200
+  %val = call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i32(<2 x i32> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret <2 x double> %val
+}
+
+define <2 x double> @uitofp_v2f64_v2i32(<2 x i32> %x) #0 {
+; CHECK-LABEL: uitofp_v2f64_v2i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .pad #16
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    add r2, sp, #8
+; CHECK-NEXT:    mov r0, sp
+; CHECK-NEXT:    vst1.32 {d16[0]}, [r2:32]
+; CHECK-NEXT:    vst1.32 {d16[1]}, [r0:32]
+; CHECK-NEXT:    movw r0, #0
+; CHECK-NEXT:    vldr d16, .LCPI35_0
+; CHECK-NEXT:    movt r0, #17200
+; CHECK-NEXT:    str r0, [sp, #12]
+; CHECK-NEXT:    str r0, [sp, #4]
+; CHECK-NEXT:    vldr d17, [sp, #8]
+; CHECK-NEXT:    vldr d18, [sp]
+; CHECK-NEXT:    vsub.f64 d17, d17, d16
+; CHECK-NEXT:    vsub.f64 d16, d18, d16
+; CHECK-NEXT:    vmov r0, r1, d17
+; CHECK-NEXT:    vmov r2, r3, d16
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI35_0:
+; CHECK-NEXT:    .long 0 @ double 4503599627370496
+; CHECK-NEXT:    .long 1127219200
+  %val = call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i32(<2 x i32> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret <2 x double> %val
+}
+
+define <2 x double> @sitofp_v2f64_v2i64(<2 x i64> %x) #0 {
+; CHECK-LABEL: sitofp_v2f64_v2i64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    mov r4, r1
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    mov r1, r3
+; CHECK-NEXT:    bl __aeabi_l2d
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r7, r1
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl __aeabi_l2d
+; CHECK-NEXT:    mov r2, r6
+; CHECK-NEXT:    mov r3, r7
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+  %val = call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret <2 x double> %val
+}
+
+define <2 x double> @uitofp_v2f64_v2i64(<2 x i64> %x) #0 {
+; CHECK-LABEL: uitofp_v2f64_v2i64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    mov r4, r1
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    mov r1, r3
+; CHECK-NEXT:    bl __aeabi_ul2d
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r7, r1
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl __aeabi_ul2d
+; CHECK-NEXT:    mov r2, r6
+; CHECK-NEXT:    mov r3, r7
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+  %val = call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i64(<2 x i64> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret <2 x double> %val
+}
+
+define <2 x double> @sqrt_v2f64(<2 x double> %x) #0 {
+; CHECK-LABEL: sqrt_v2f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vmov d17, r2, r3
+; CHECK-NEXT:    vsqrt.f64 d16, d16
+; CHECK-NEXT:    vsqrt.f64 d17, d17
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    bx lr
+  %val = call <2 x double> @llvm.experimental.constrained.sqrt.v2f64(<2 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret <2 x double> %val
+}
+
+define <2 x double> @rint_v2f64(<2 x double> %x) #0 {
+; CHECK-LABEL: rint_v2f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    mov r4, r1
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    mov r1, r3
+; CHECK-NEXT:    bl rint
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r7, r1
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl rint
+; CHECK-NEXT:    mov r2, r6
+; CHECK-NEXT:    mov r3, r7
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+  %val = call <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret <2 x double> %val
+}
+
+define <2 x double> @nearbyint_v2f64(<2 x double> %x) #0 {
+; CHECK-LABEL: nearbyint_v2f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    mov r4, r1
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    mov r1, r3
+; CHECK-NEXT:    bl nearbyint
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r7, r1
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl nearbyint
+; CHECK-NEXT:    mov r2, r6
+; CHECK-NEXT:    mov r3, r7
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+  %val = call <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(<2 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret <2 x double> %val
+}
+
+define <2 x double> @maxnum_v2f64(<2 x double> %x, <2 x double> %y) #0 {
+; CHECK-LABEL: maxnum_v2f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    add r0, sp, #40
+; CHECK-NEXT:    mov r6, r3
+; CHECK-NEXT:    mov r4, r2
+; CHECK-NEXT:    mov r7, r1
+; CHECK-NEXT:    vld1.64 {d8, d9}, [r0]
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    vmov r2, r3, d9
+; CHECK-NEXT:    bl fmax
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r6, r1
+; CHECK-NEXT:    vmov r2, r3, d8
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    bl fmax
+; CHECK-NEXT:    mov r2, r4
+; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+  %val = call <2 x double> @llvm.experimental.constrained.maxnum.v2f64(<2 x double> %x, <2 x double> %y, metadata !"fpexcept.strict") #0
+  ret <2 x double> %val
+}
+
+define <2 x double> @minnum_v2f64(<2 x double> %x, <2 x double> %y) #0 {
+; CHECK-LABEL: minnum_v2f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    add r0, sp, #40
+; CHECK-NEXT:    mov r6, r3
+; CHECK-NEXT:    mov r4, r2
+; CHECK-NEXT:    mov r7, r1
+; CHECK-NEXT:    vld1.64 {d8, d9}, [r0]
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    vmov r2, r3, d9
+; CHECK-NEXT:    bl fmin
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r6, r1
+; CHECK-NEXT:    vmov r2, r3, d8
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    bl fmin
+; CHECK-NEXT:    mov r2, r4
+; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+  %val = call <2 x double> @llvm.experimental.constrained.minnum.v2f64(<2 x double> %x, <2 x double> %y, metadata !"fpexcept.strict") #0
+  ret <2 x double> %val
+}
+
+define <2 x double> @ceil_v2f64(<2 x double> %x) #0 {
+; CHECK-LABEL: ceil_v2f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    mov r4, r1
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    mov r1, r3
+; CHECK-NEXT:    bl ceil
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r7, r1
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl ceil
+; CHECK-NEXT:    mov r2, r6
+; CHECK-NEXT:    mov r3, r7
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+  %val = call <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
+  ret <2 x double> %val
+}
+
+define <2 x double> @floor_v2f64(<2 x double> %x) #0 {
+; CHECK-LABEL: floor_v2f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    mov r4, r1
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    mov r1, r3
+; CHECK-NEXT:    bl floor
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r7, r1
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl floor
+; CHECK-NEXT:    mov r2, r6
+; CHECK-NEXT:    mov r3, r7
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+  %val = call <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
+  ret <2 x double> %val
+}
+
+define <2 x double> @round_v2f64(<2 x double> %x) #0 {
+; CHECK-LABEL: round_v2f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    mov r4, r1
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    mov r1, r3
+; CHECK-NEXT:    bl round
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r7, r1
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl round
+; CHECK-NEXT:    mov r2, r6
+; CHECK-NEXT:    mov r3, r7
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+  %val = call <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
+  ret <2 x double> %val
+}
+
+define <2 x double> @roundeven_v2f64(<2 x double> %x) #0 {
+; CHECK-LABEL: roundeven_v2f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    mov r4, r1
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    mov r1, r3
+; CHECK-NEXT:    bl roundeven
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r7, r1
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl roundeven
+; CHECK-NEXT:    mov r2, r6
+; CHECK-NEXT:    mov r3, r7
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+  %val = call <2 x double> @llvm.experimental.constrained.roundeven.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
+  ret <2 x double> %val
+}
+
+define <2 x double> @trunc_v2f64(<2 x double> %x) #0 {
+; CHECK-LABEL: trunc_v2f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    mov r4, r1
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    mov r1, r3
+; CHECK-NEXT:    bl trunc
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r7, r1
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl trunc
+; CHECK-NEXT:    mov r2, r6
+; CHECK-NEXT:    mov r3, r7
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+  %val = call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
+  ret <2 x double> %val
+}
+
+define <2 x i1> @fcmp_v2f64(<2 x double> %x, <2 x double> %y) #0 {
+; CHECK-LABEL: fcmp_v2f64:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vmov d18, r2, r3
+; CHECK-NEXT:    mov r3, #0
+; CHECK-NEXT:    mov r2, #0
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT:    vcmp.f64 d18, d17
+; CHECK-NEXT:    vmov d18, r0, r1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f64 d18, d16
+; CHECK-NEXT:    movweq r3, #1
+; CHECK-NEXT:    cmp r3, #0
+; CHECK-NEXT:    mvnne r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movweq r2, #1
+; CHECK-NEXT:    mov r1, r3
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    mvnne r2, #0
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %val = call <2 x i1> @llvm.experimental.constrained.fcmp.v2f64(<2 x double> %x, <2 x double> %y, metadata !"oeq", metadata !"fpexcept.strict")
+  ret <2 x i1> %val
+}
+
+define <2 x i1> @fcmps_v2f64(<2 x double> %x, <2 x double> %y) #0 {
+; CHECK-LABEL: fcmps_v2f64:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vmov d18, r2, r3
+; CHECK-NEXT:    mov r3, #0
+; CHECK-NEXT:    mov r2, #0
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT:    vcmpe.f64 d18, d17
+; CHECK-NEXT:    vmov d18, r0, r1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmpe.f64 d18, d16
+; CHECK-NEXT:    movweq r3, #1
+; CHECK-NEXT:    cmp r3, #0
+; CHECK-NEXT:    mvnne r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movweq r2, #1
+; CHECK-NEXT:    mov r1, r3
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    mvnne r2, #0
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %val = call <2 x i1> @llvm.experimental.constrained.fcmps.v2f64(<2 x double> %x, <2 x double> %y, metadata !"oeq", metadata !"fpexcept.strict")
+  ret <2 x i1> %val
+}
+
+
+
+define <1 x double> @add_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+; CHECK-LABEL: add_v1f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d16, r2, r3
+; CHECK-NEXT:    vmov d17, r0, r1
+; CHECK-NEXT:    vadd.f64 d16, d17, d16
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    bx lr
+  %val = call <1 x double> @llvm.experimental.constrained.fadd.v1f64(<1 x double> %x, <1 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret <1 x double> %val
+}
+
+define <1 x double> @sub_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+; CHECK-LABEL: sub_v1f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d16, r2, r3
+; CHECK-NEXT:    vmov d17, r0, r1
+; CHECK-NEXT:    vsub.f64 d16, d17, d16
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    bx lr
+  %val = call <1 x double> @llvm.experimental.constrained.fsub.v1f64(<1 x double> %x, <1 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret <1 x double> %val
+}
+
+define <1 x double> @mul_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+; CHECK-LABEL: mul_v1f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d16, r2, r3
+; CHECK-NEXT:    vmov d17, r0, r1
+; CHECK-NEXT:    vmul.f64 d16, d17, d16
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    bx lr
+  %val = call <1 x double> @llvm.experimental.constrained.fmul.v1f64(<1 x double> %x, <1 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret <1 x double> %val
+}
+
+define <1 x double> @div_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+; CHECK-LABEL: div_v1f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d16, r2, r3
+; CHECK-NEXT:    vmov d17, r0, r1
+; CHECK-NEXT:    vdiv.f64 d16, d17, d16
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    bx lr
+  %val = call <1 x double> @llvm.experimental.constrained.fdiv.v1f64(<1 x double> %x, <1 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret <1 x double> %val
+}
+
+define <1 x double> @fma_v1f64(<1 x double> %x, <1 x double> %y, <1 x double> %z) #0 {
+; CHECK-LABEL: fma_v1f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr d16, [sp]
+; CHECK-NEXT:    vmov d17, r2, r3
+; CHECK-NEXT:    vmov d18, r0, r1
+; CHECK-NEXT:    vfma.f64 d16, d18, d17
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    bx lr
+  %val = call <1 x double> @llvm.experimental.constrained.fma.v1f64(<1 x double> %x, <1 x double> %y, <1 x double> %z, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret <1 x double> %val
+}
+
+define <1 x i32> @fptosi_v1i32_v1f64(<1 x double> %x) #0 {
+; CHECK-LABEL: fptosi_v1i32_v1f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r11, lr}
+; CHECK-NEXT:    push {r11, lr}
+; CHECK-NEXT:    bl __aeabi_d2iz
+; CHECK-NEXT:    pop {r11, pc}
+  %val = call <1 x i32> @llvm.experimental.constrained.fptosi.v1i32.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
+  ret <1 x i32> %val
+}
+
+define <1 x i32> @fptoui_v1i32_v1f64(<1 x double> %x) #0 {
+; CHECK-LABEL: fptoui_v1i32_v1f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, lr}
+; CHECK-NEXT:    push {r4, lr}
+; CHECK-NEXT:    vldr d16, .LCPI56_0
+; CHECK-NEXT:    vmov d17, r0, r1
+; CHECK-NEXT:    mov r0, #0
+; CHECK-NEXT:    mov r4, #-2147483648
+; CHECK-NEXT:    vmov.i32 d18, #0x0
+; CHECK-NEXT:    vcmpe.f64 d17, d16
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movwlt r0, #1
+; CHECK-NEXT:    movwlt r4, #0
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    vseleq.f64 d16, d16, d18
+; CHECK-NEXT:    vsub.f64 d16, d17, d16
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    bl __aeabi_d2iz
+; CHECK-NEXT:    eor r0, r0, r4
+; CHECK-NEXT:    pop {r4, pc}
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI56_0:
+; CHECK-NEXT:    .long 0 @ double 2147483648
+; CHECK-NEXT:    .long 1105199104
+  %val = call <1 x i32> @llvm.experimental.constrained.fptoui.v1i32.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
+  ret <1 x i32> %val
+}
+
+define <1 x i64> @fptosi_v1i64_v1f64(<1 x double> %x) #0 {
+; CHECK-LABEL: fptosi_v1i64_v1f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r11, lr}
+; CHECK-NEXT:    push {r11, lr}
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    pop {r11, pc}
+  %val = call <1 x i64> @llvm.experimental.constrained.fptosi.v1i64.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
+  ret <1 x i64> %val
+}
+
+define <1 x i64> @fptoui_v1i64_v1f64(<1 x double> %x) #0 {
+; CHECK-LABEL: fptoui_v1i64_v1f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r11, lr}
+; CHECK-NEXT:    push {r11, lr}
+; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    pop {r11, pc}
+  %val = call <1 x i64> @llvm.experimental.constrained.fptoui.v1i64.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
+  ret <1 x i64> %val
+}
+
+define <1 x double> @sitofp_v1f64_v1i32(<1 x i32> %x) #0 {
+; CHECK-LABEL: sitofp_v1f64_v1i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .pad #8
+; CHECK-NEXT:    sub sp, sp, #8
+; CHECK-NEXT:    movw r1, #0
+; CHECK-NEXT:    eor r0, r0, #-2147483648
+; CHECK-NEXT:    vldr d16, .LCPI59_0
+; CHECK-NEXT:    movt r1, #17200
+; CHECK-NEXT:    str r0, [sp]
+; CHECK-NEXT:    str r1, [sp, #4]
+; CHECK-NEXT:    vldr d17, [sp]
+; CHECK-NEXT:    vsub.f64 d16, d17, d16
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    add sp, sp, #8
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI59_0:
+; CHECK-NEXT:    .long 2147483648 @ double 4503601774854144
+; CHECK-NEXT:    .long 1127219200
+  %val = call <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i32(<1 x i32> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret <1 x double> %val
+}
+
+define <1 x double> @uitofp_v1f64_v1i32(<1 x i32> %x) #0 {
+; CHECK-LABEL: uitofp_v1f64_v1i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .pad #8
+; CHECK-NEXT:    sub sp, sp, #8
+; CHECK-NEXT:    movw r1, #0
+; CHECK-NEXT:    vldr d16, .LCPI60_0
+; CHECK-NEXT:    movt r1, #17200
+; CHECK-NEXT:    stm sp, {r0, r1}
+; CHECK-NEXT:    vldr d17, [sp]
+; CHECK-NEXT:    vsub.f64 d16, d17, d16
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    add sp, sp, #8
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI60_0:
+; CHECK-NEXT:    .long 0 @ double 4503599627370496
+; CHECK-NEXT:    .long 1127219200
+  %val = call <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i32(<1 x i32> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret <1 x double> %val
+}
+
+define <1 x double> @sitofp_v1f64_v1i64(<1 x i64> %x) #0 {
+; CHECK-LABEL: sitofp_v1f64_v1i64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r11, lr}
+; CHECK-NEXT:    push {r11, lr}
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vmov.32 r0, d16[0]
+; CHECK-NEXT:    vmov.32 r1, d16[1]
+; CHECK-NEXT:    bl __aeabi_l2d
+; CHECK-NEXT:    pop {r11, pc}
+  %val = call <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i64(<1 x i64> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret <1 x double> %val
+}
+
+define <1 x double> @uitofp_v1f64_v1i64(<1 x i64> %x) #0 {
+; CHECK-LABEL: uitofp_v1f64_v1i64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r11, lr}
+; CHECK-NEXT:    push {r11, lr}
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vmov.32 r0, d16[0]
+; CHECK-NEXT:    vmov.32 r1, d16[1]
+; CHECK-NEXT:    bl __aeabi_ul2d
+; CHECK-NEXT:    pop {r11, pc}
+  %val = call <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i64(<1 x i64> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret <1 x double> %val
+}
+
+define <1 x double> @sqrt_v1f64(<1 x double> %x) #0 {
+; CHECK-LABEL: sqrt_v1f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vsqrt.f64 d16, d16
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    bx lr
+  %val = call <1 x double> @llvm.experimental.constrained.sqrt.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret <1 x double> %val
+}
+
+define <1 x double> @rint_v1f64(<1 x double> %x) #0 {
+; CHECK-LABEL: rint_v1f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r11, lr}
+; CHECK-NEXT:    push {r11, lr}
+; CHECK-NEXT:    bl rint
+; CHECK-NEXT:    pop {r11, pc}
+  %val = call <1 x double> @llvm.experimental.constrained.rint.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret <1 x double> %val
+}
+
+define <1 x double> @nearbyint_v1f64(<1 x double> %x) #0 {
+; CHECK-LABEL: nearbyint_v1f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r11, lr}
+; CHECK-NEXT:    push {r11, lr}
+; CHECK-NEXT:    bl nearbyint
+; CHECK-NEXT:    pop {r11, pc}
+  %val = call <1 x double> @llvm.experimental.constrained.nearbyint.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret <1 x double> %val
+}
+
+define <1 x double> @maxnum_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+; CHECK-LABEL: maxnum_v1f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r11, lr}
+; CHECK-NEXT:    push {r11, lr}
+; CHECK-NEXT:    bl fmax
+; CHECK-NEXT:    pop {r11, pc}
+  %val = call <1 x double> @llvm.experimental.constrained.maxnum.v1f64(<1 x double> %x, <1 x double> %y, metadata !"fpexcept.strict") #0
+  ret <1 x double> %val
+}
+
+define <1 x double> @minnum_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+; CHECK-LABEL: minnum_v1f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r11, lr}
+; CHECK-NEXT:    push {r11, lr}
+; CHECK-NEXT:    bl fmin
+; CHECK-NEXT:    pop {r11, pc}
+  %val = call <1 x double> @llvm.experimental.constrained.minnum.v1f64(<1 x double> %x, <1 x double> %y, metadata !"fpexcept.strict") #0
+  ret <1 x double> %val
+}
+
+define <1 x double> @ceil_v1f64(<1 x double> %x) #0 {
+; CHECK-LABEL: ceil_v1f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r11, lr}
+; CHECK-NEXT:    push {r11, lr}
+; CHECK-NEXT:    bl ceil
+; CHECK-NEXT:    pop {r11, pc}
+  %val = call <1 x double> @llvm.experimental.constrained.ceil.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
+  ret <1 x double> %val
+}
+
+define <1 x double> @floor_v1f64(<1 x double> %x) #0 {
+; CHECK-LABEL: floor_v1f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r11, lr}
+; CHECK-NEXT:    push {r11, lr}
+; CHECK-NEXT:    bl floor
+; CHECK-NEXT:    pop {r11, pc}
+  %val = call <1 x double> @llvm.experimental.constrained.floor.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
+  ret <1 x double> %val
+}
+
+define <1 x double> @round_v1f64(<1 x double> %x) #0 {
+; CHECK-LABEL: round_v1f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r11, lr}
+; CHECK-NEXT:    push {r11, lr}
+; CHECK-NEXT:    bl round
+; CHECK-NEXT:    pop {r11, pc}
+  %val = call <1 x double> @llvm.experimental.constrained.round.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
+  ret <1 x double> %val
+}
+
+define <1 x double> @roundeven_v1f64(<1 x double> %x) #0 {
+; CHECK-LABEL: roundeven_v1f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r11, lr}
+; CHECK-NEXT:    push {r11, lr}
+; CHECK-NEXT:    bl roundeven
+; CHECK-NEXT:    pop {r11, pc}
+  %val = call <1 x double> @llvm.experimental.constrained.roundeven.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
+  ret <1 x double> %val
+}
+
+define <1 x double> @trunc_v1f64(<1 x double> %x) #0 {
+; CHECK-LABEL: trunc_v1f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r11, lr}
+; CHECK-NEXT:    push {r11, lr}
+; CHECK-NEXT:    bl trunc
+; CHECK-NEXT:    pop {r11, pc}
+  %val = call <1 x double> @llvm.experimental.constrained.trunc.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
+  ret <1 x double> %val
+}
+
+define <1 x i1> @fcmp_v1f61(<1 x double> %x, <1 x double> %y) #0 {
+; CHECK-LABEL: fcmp_v1f61:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov d17, r0, r1
+; CHECK-NEXT:    vmov d16, r2, r3
+; CHECK-NEXT:    mov r0, #0
+; CHECK-NEXT:    vcmp.f64 d17, d16
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movweq r0, #1
+; CHECK-NEXT:    bx lr
+entry:
+  %val = call <1 x i1> @llvm.experimental.constrained.fcmp.v1f64(<1 x double> %x, <1 x double> %y, metadata !"oeq", metadata !"fpexcept.strict")
+  ret <1 x i1> %val
+}
+
+define <1 x i1> @fcmps_v1f61(<1 x double> %x, <1 x double> %y) #0 {
+; CHECK-LABEL: fcmps_v1f61:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov d17, r0, r1
+; CHECK-NEXT:    vmov d16, r2, r3
+; CHECK-NEXT:    mov r0, #0
+; CHECK-NEXT:    vcmpe.f64 d17, d16
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movweq r0, #1
+; CHECK-NEXT:    bx lr
+entry:
+  %val = call <1 x i1> @llvm.experimental.constrained.fcmps.v1f64(<1 x double> %x, <1 x double> %y, metadata !"oeq", metadata !"fpexcept.strict")
+  ret <1 x i1> %val
+}
+
+
+
+define <2 x float> @fptrunc_v2f32_v2f64(<2 x double> %x) #0 {
+; CHECK-LABEL: fptrunc_v2f32_v2f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, lr}
+; CHECK-NEXT:    push {r4, r5, r6, lr}
+; CHECK-NEXT:    mov r4, r1
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    mov r1, r3
+; CHECK-NEXT:    bl __aeabi_d2f
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl __aeabi_d2f
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    pop {r4, r5, r6, pc}
+  %val = call <2 x float> @llvm.experimental.constrained.fptrunc.v2f32.v2f64(<2 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  ret <2 x float> %val
+}
+
+define <2 x double> @fpext_v2f64_v2f32(<2 x float> %x) #0 {
+; CHECK-LABEL: fpext_v2f64_v2f32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r11, lr}
+; CHECK-NEXT:    push {r4, r5, r11, lr}
+; CHECK-NEXT:    .vsave {d8}
+; CHECK-NEXT:    vpush {d8}
+; CHECK-NEXT:    vmov d8, r0, r1
+; CHECK-NEXT:    vmov r0, s17
+; CHECK-NEXT:    bl __aeabi_f2d
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    vmov r0, s16
+; CHECK-NEXT:    mov r5, r1
+; CHECK-NEXT:    bl __aeabi_f2d
+; CHECK-NEXT:    mov r2, r4
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    vpop {d8}
+; CHECK-NEXT:    pop {r4, r5, r11, pc}
+  %val = call <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f32(<2 x float> %x, metadata !"fpexcept.strict") #0
+  ret <2 x double> %val
+}
+
+
+attributes #0 = { strictfp }
+
+declare <4 x float> @llvm.experimental.constrained.fadd.v4f32(<4 x float>, <4 x float>, metadata, metadata)
+declare <4 x float> @llvm.experimental.constrained.fsub.v4f32(<4 x float>, <4 x float>, metadata, metadata)
+declare <4 x float> @llvm.experimental.constrained.fmul.v4f32(<4 x float>, <4 x float>, metadata, metadata)
+declare <4 x float> @llvm.experimental.constrained.fdiv.v4f32(<4 x float>, <4 x float>, metadata, metadata)
+declare <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float>, <4 x float>, <4 x float>, metadata, metadata)
+declare <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f32(<4 x float>, metadata)
+declare <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f32(<4 x float>, metadata)
+declare <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f32(<4 x float>, metadata)
+declare <4 x i64> @llvm.experimental.constrained.fptoui.v4i64.v4f32(<4 x float>, metadata)
+declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i32(<4 x i32>, metadata, metadata)
+declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i32(<4 x i32>, metadata, metadata)
+declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i64(<4 x i64>, metadata, metadata)
+declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i64(<4 x i64>, metadata, metadata)
+declare <4 x float> @llvm.experimental.constrained.sqrt.v4f32(<4 x float>, metadata, metadata)
+declare <4 x float> @llvm.experimental.constrained.rint.v4f32(<4 x float>, metadata, metadata)
+declare <4 x float> @llvm.experimental.constrained.nearbyint.v4f32(<4 x float>, metadata, metadata)
+declare <4 x float> @llvm.experimental.constrained.maxnum.v4f32(<4 x float>, <4 x float>, metadata)
+declare <4 x float> @llvm.experimental.constrained.minnum.v4f32(<4 x float>, <4 x float>, metadata)
+declare <4 x float> @llvm.experimental.constrained.ceil.v4f32(<4 x float>, metadata)
+declare <4 x float> @llvm.experimental.constrained.floor.v4f32(<4 x float>, metadata)
+declare <4 x float> @llvm.experimental.constrained.round.v4f32(<4 x float>, metadata)
+declare <4 x float> @llvm.experimental.constrained.roundeven.v4f32(<4 x float>, metadata)
+declare <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float>, metadata)
+declare <4 x i1> @llvm.experimental.constrained.fcmp.v4f32(<4 x float>, <4 x float>, metadata, metadata)
+declare <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float>, <4 x float>, metadata, metadata)
+
+declare <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double>, <2 x double>, metadata, metadata)
+declare <2 x double> @llvm.experimental.constrained.fsub.v2f64(<2 x double>, <2 x double>, metadata, metadata)
+declare <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double>, <2 x double>, metadata, metadata)
+declare <2 x double> @llvm.experimental.constrained.fdiv.v2f64(<2 x double>, <2 x double>, metadata, metadata)
+declare <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double>, <2 x double>, <2 x double>, metadata, metadata)
+declare <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f64(<2 x double>, metadata)
+declare <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f64(<2 x double>, metadata)
+declare <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f64(<2 x double>, metadata)
+declare <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f64(<2 x double>, metadata)
+declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i32(<2 x i32>, metadata, metadata)
+declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i32(<2 x i32>, metadata, metadata)
+declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64>, metadata, metadata)
+declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i64(<2 x i64>, metadata, metadata)
+declare <2 x double> @llvm.experimental.constrained.sqrt.v2f64(<2 x double>, metadata, metadata)
+declare <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double>, metadata, metadata)
+declare <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(<2 x double>, metadata, metadata)
+declare <2 x double> @llvm.experimental.constrained.maxnum.v2f64(<2 x double>, <2 x double>, metadata)
+declare <2 x double> @llvm.experimental.constrained.minnum.v2f64(<2 x double>, <2 x double>, metadata)
+declare <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double>, metadata)
+declare <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double>, metadata)
+declare <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double>, metadata)
+declare <2 x double> @llvm.experimental.constrained.roundeven.v2f64(<2 x double>, metadata)
+declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata)
+declare <2 x i1> @llvm.experimental.constrained.fcmp.v2f64(<2 x double>, <2 x double>, metadata, metadata)
+declare <2 x i1> @llvm.experimental.constrained.fcmps.v2f64(<2 x double>, <2 x double>, metadata, metadata)
+
+declare <1 x double> @llvm.experimental.constrained.fadd.v1f64(<1 x double>, <1 x double>, metadata, metadata)
+declare <1 x double> @llvm.experimental.constrained.fsub.v1f64(<1 x double>, <1 x double>, metadata, metadata)
+declare <1 x double> @llvm.experimental.constrained.fmul.v1f64(<1 x double>, <1 x double>, metadata, metadata)
+declare <1 x double> @llvm.experimental.constrained.fdiv.v1f64(<1 x double>, <1 x double>, metadata, metadata)
+declare <1 x double> @llvm.experimental.constrained.fma.v1f64(<1 x double>, <1 x double>, <1 x double>, metadata, metadata)
+declare <1 x i32> @llvm.experimental.constrained.fptosi.v1i32.v1f64(<1 x double>, metadata)
+declare <1 x i32> @llvm.experimental.constrained.fptoui.v1i32.v1f64(<1 x double>, metadata)
+declare <1 x i64> @llvm.experimental.constrained.fptosi.v1i64.v1f64(<1 x double>, metadata)
+declare <1 x i64> @llvm.experimental.constrained.fptoui.v1i64.v1f64(<1 x double>, metadata)
+declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i32(<1 x i32>, metadata, metadata)
+declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i32(<1 x i32>, metadata, metadata)
+declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i64(<1 x i64>, metadata, metadata)
+declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i64(<1 x i64>, metadata, metadata)
+declare <1 x double> @llvm.experimental.constrained.sqrt.v1f64(<1 x double>, metadata, metadata)
+declare <1 x double> @llvm.experimental.constrained.rint.v1f64(<1 x double>, metadata, metadata)
+declare <1 x double> @llvm.experimental.constrained.nearbyint.v1f64(<1 x double>, metadata, metadata)
+declare <1 x double> @llvm.experimental.constrained.maxnum.v1f64(<1 x double>, <1 x double>, metadata)
+declare <1 x double> @llvm.experimental.constrained.minnum.v1f64(<1 x double>, <1 x double>, metadata)
+declare <1 x double> @llvm.experimental.constrained.ceil.v1f64(<1 x double>, metadata)
+declare <1 x double> @llvm.experimental.constrained.floor.v1f64(<1 x double>, metadata)
+declare <1 x double> @llvm.experimental.constrained.round.v1f64(<1 x double>, metadata)
+declare <1 x double> @llvm.experimental.constrained.roundeven.v1f64(<1 x double>, metadata)
+declare <1 x double> @llvm.experimental.constrained.trunc.v1f64(<1 x double>, metadata)
+declare <1 x i1> @llvm.experimental.constrained.fcmp.v1f64(<1 x double>, <1 x double>, metadata, metadata)
+declare <1 x i1> @llvm.experimental.constrained.fcmps.v1f64(<1 x double>, <1 x double>, metadata, metadata)
+
+declare <2 x float> @llvm.experimental.constrained.fptrunc.v2f32.v2f64(<2 x double>, metadata, metadata)
+declare <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f32(<2 x float>, metadata)
+

>From 4cbb7b5dd28ee5e0a28f00c3afa9eacf9a21d3ad Mon Sep 17 00:00:00 2001
From: Erik Enikeev <evonatarius at gmail.com>
Date: Tue, 25 Nov 2025 19:30:21 +0300
Subject: [PATCH 3/5] [ARM] Disable strict node mutation and use correct
 lowering for several strict ops

---
 llvm/lib/Target/ARM/ARMISelLowering.cpp       |  57 +-
 llvm/test/CodeGen/ARM/fp-intrinsics-vector.ll | 952 ++++++------------
 llvm/test/CodeGen/ARM/fp16-fullfp16.ll        |   8 +-
 3 files changed, 331 insertions(+), 686 deletions(-)

diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 2ad8f877ff11b..6bd40c753d49d 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -614,16 +614,30 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM_,
       for (auto Op : {ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL,
                       ISD::STRICT_FDIV, ISD::STRICT_FMA, ISD::STRICT_FSQRT})
         setOperationAction(Op, MVT::f64, Legal);
+      
+      setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
+      setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
     }
   }
 
   if (Subtarget->hasFullFP16()) {
+    for (auto Op : {ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL,
+                    ISD::STRICT_FDIV, ISD::STRICT_FMA, ISD::STRICT_FSQRT})
+      setOperationAction(Op, MVT::f16, Legal);
+
     addRegisterClass(MVT::f16, &ARM::HPRRegClass);
     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
     setOperationAction(ISD::BITCAST, MVT::f16, Custom);
 
     setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
     setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
+    setOperationAction(ISD::STRICT_FMINNUM, MVT::f16, Legal);
+    setOperationAction(ISD::STRICT_FMAXNUM, MVT::f16, Legal);
+
+    if (Subtarget->hasVFPv3()) {
+      setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal);
+      setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Legal);
+    }
   }
 
   if (Subtarget->hasBF16()) {
@@ -933,13 +947,14 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM_,
     setOperationAction(ISD::FP_TO_SINT, MVT::f64, Custom);
     setOperationAction(ISD::FP_TO_UINT, MVT::f64, Custom);
     setOperationAction(ISD::FP_ROUND,   MVT::f32, Custom);
-    setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
-    setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::f64, Custom);
     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::f64, Custom);
     setOperationAction(ISD::STRICT_FP_ROUND,   MVT::f32, Custom);
   }
 
+  setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
+  setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
+
   if (!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) {
     setOperationAction(ISD::FP_EXTEND,  MVT::f64, Custom);
     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Custom);
@@ -1316,34 +1331,26 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM_,
   setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
 
   // FP-ARMv8 implements a lot of rounding-like FP operations.
-  if (Subtarget->hasFPARMv8Base()) {
-    setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
-    setOperationAction(ISD::FCEIL, MVT::f32, Legal);
-    setOperationAction(ISD::FROUND, MVT::f32, Legal);
-    setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
-    setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
-    setOperationAction(ISD::FRINT, MVT::f32, Legal);
-    setOperationAction(ISD::FROUNDEVEN, MVT::f32, Legal);
-    setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
-    setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
+  if (Subtarget->hasFPARMv8Base()) {    
+    for (auto Op :
+         {ISD::FFLOOR,            ISD::FCEIL,             ISD::FROUND,
+          ISD::FTRUNC,            ISD::FNEARBYINT,        ISD::FRINT,
+          ISD::FROUNDEVEN,        ISD::FMINNUM,           ISD::FMAXNUM,
+          ISD::STRICT_FFLOOR,     ISD::STRICT_FCEIL,      ISD::STRICT_FROUND,
+          ISD::STRICT_FTRUNC,     ISD::STRICT_FNEARBYINT, ISD::STRICT_FRINT,
+          ISD::STRICT_FROUNDEVEN, ISD::STRICT_FMINNUM,    ISD::STRICT_FMAXNUM}) {
+      setOperationAction(Op, MVT::f32, Legal);
+
+      if (Subtarget->hasFP64())
+        setOperationAction(Op, MVT::f64, Legal);
+    }
+
     if (Subtarget->hasNEON()) {
       setOperationAction(ISD::FMINNUM, MVT::v2f32, Legal);
       setOperationAction(ISD::FMAXNUM, MVT::v2f32, Legal);
       setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal);
       setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal);
     }
-
-    if (Subtarget->hasFP64()) {
-      setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
-      setOperationAction(ISD::FCEIL, MVT::f64, Legal);
-      setOperationAction(ISD::FROUND, MVT::f64, Legal);
-      setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
-      setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
-      setOperationAction(ISD::FRINT, MVT::f64, Legal);
-      setOperationAction(ISD::FROUNDEVEN, MVT::f64, Legal);
-      setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
-      setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
-    }
   }
 
   // FP16 often need to be promoted to call lib functions
@@ -1498,6 +1505,8 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM_,
       Align(1ULL << Subtarget->getPreferBranchLogAlignment()));
 
   setMinFunctionAlignment(Subtarget->isThumb() ? Align(2) : Align(4));
+
+  IsStrictFPEnabled = true;
 }
 
 bool ARMTargetLowering::useSoftFloat() const {
diff --git a/llvm/test/CodeGen/ARM/fp-intrinsics-vector.ll b/llvm/test/CodeGen/ARM/fp-intrinsics-vector.ll
index cb41a5cfb8250..c767579cf4194 100644
--- a/llvm/test/CodeGen/ARM/fp-intrinsics-vector.ll
+++ b/llvm/test/CodeGen/ARM/fp-intrinsics-vector.ll
@@ -98,26 +98,17 @@ define <4 x float> @fma_v4f32(<4 x float> %x, <4 x float> %y, <4 x float> %z) #0
 define <4 x i32> @fptosi_v4i32_v4f32(<4 x float> %x) #0 {
 ; CHECK-LABEL: fptosi_v4i32_v4f32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT:    mov r6, r0
-; CHECK-NEXT:    mov r0, r3
-; CHECK-NEXT:    mov r4, r2
-; CHECK-NEXT:    mov r5, r1
-; CHECK-NEXT:    bl __aeabi_f2iz
-; CHECK-NEXT:    mov r7, r0
-; CHECK-NEXT:    mov r0, r4
-; CHECK-NEXT:    bl __aeabi_f2iz
-; CHECK-NEXT:    mov r4, r0
-; CHECK-NEXT:    mov r0, r5
-; CHECK-NEXT:    bl __aeabi_f2iz
-; CHECK-NEXT:    mov r5, r0
-; CHECK-NEXT:    mov r0, r6
-; CHECK-NEXT:    bl __aeabi_f2iz
-; CHECK-NEXT:    mov r1, r5
-; CHECK-NEXT:    mov r2, r4
-; CHECK-NEXT:    mov r3, r7
-; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+; CHECK-NEXT:    vmov d1, r2, r3
+; CHECK-NEXT:    vmov d0, r0, r1
+; CHECK-NEXT:    vcvt.s32.f32 s8, s2
+; CHECK-NEXT:    vcvt.s32.f32 s4, s0
+; CHECK-NEXT:    vcvt.s32.f32 s6, s1
+; CHECK-NEXT:    vcvt.s32.f32 s0, s3
+; CHECK-NEXT:    vmov r2, s8
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vmov r1, s6
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    bx lr
   %val = call <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
   ret <4 x i32> %val
 }
@@ -125,73 +116,17 @@ define <4 x i32> @fptosi_v4i32_v4f32(<4 x float> %x) #0 {
 define <4 x i32> @fptoui_v4i32_v4f32(<4 x float> %x) #0 {
 ; CHECK-LABEL: fptoui_v4i32_v4f32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT:    .vsave {d8, d9, d10}
-; CHECK-NEXT:    vpush {d8, d9, d10}
-; CHECK-NEXT:    vldr s4, .LCPI6_0
 ; CHECK-NEXT:    vmov d1, r2, r3
-; CHECK-NEXT:    mov r7, #-2147483648
-; CHECK-NEXT:    mov r6, #-2147483648
-; CHECK-NEXT:    mov r2, #0
-; CHECK-NEXT:    mov r5, #-2147483648
-; CHECK-NEXT:    mov r3, #0
-; CHECK-NEXT:    vldr s6, .LCPI6_1
-; CHECK-NEXT:    mov r4, #-2147483648
 ; CHECK-NEXT:    vmov d0, r0, r1
-; CHECK-NEXT:    mov r1, #0
-; CHECK-NEXT:    mov r0, #0
-; CHECK-NEXT:    vcmpe.f32 s3, s4
-; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
-; CHECK-NEXT:    vcmpe.f32 s2, s4
-; CHECK-NEXT:    movwlt r7, #0
-; CHECK-NEXT:    movwlt r1, #1
-; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
-; CHECK-NEXT:    vcmpe.f32 s1, s4
-; CHECK-NEXT:    movwlt r6, #0
-; CHECK-NEXT:    movwlt r2, #1
-; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
-; CHECK-NEXT:    vcmpe.f32 s0, s4
-; CHECK-NEXT:    movwlt r5, #0
-; CHECK-NEXT:    movwlt r3, #1
-; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
-; CHECK-NEXT:    movwlt r0, #1
-; CHECK-NEXT:    movwlt r4, #0
-; CHECK-NEXT:    cmp r0, #0
-; CHECK-NEXT:    vseleq.f32 s8, s4, s6
-; CHECK-NEXT:    cmp r3, #0
-; CHECK-NEXT:    vsub.f32 s8, s0, s8
-; CHECK-NEXT:    vmov r0, s8
-; CHECK-NEXT:    vseleq.f32 s8, s4, s6
-; CHECK-NEXT:    cmp r2, #0
-; CHECK-NEXT:    vseleq.f32 s10, s4, s6
-; CHECK-NEXT:    cmp r1, #0
-; CHECK-NEXT:    vsub.f32 s16, s1, s8
-; CHECK-NEXT:    vseleq.f32 s4, s4, s6
-; CHECK-NEXT:    vsub.f32 s18, s2, s10
-; CHECK-NEXT:    vsub.f32 s20, s3, s4
-; CHECK-NEXT:    bl __aeabi_f2iz
-; CHECK-NEXT:    eor r4, r0, r4
-; CHECK-NEXT:    vmov r0, s16
-; CHECK-NEXT:    bl __aeabi_f2iz
-; CHECK-NEXT:    eor r5, r0, r5
-; CHECK-NEXT:    vmov r0, s18
-; CHECK-NEXT:    bl __aeabi_f2iz
-; CHECK-NEXT:    eor r6, r0, r6
-; CHECK-NEXT:    vmov r0, s20
-; CHECK-NEXT:    bl __aeabi_f2iz
-; CHECK-NEXT:    eor r3, r0, r7
-; CHECK-NEXT:    mov r0, r4
-; CHECK-NEXT:    mov r1, r5
-; CHECK-NEXT:    mov r2, r6
-; CHECK-NEXT:    vpop {d8, d9, d10}
-; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
-; CHECK-NEXT:    .p2align 2
-; CHECK-NEXT:  @ %bb.1:
-; CHECK-NEXT:  .LCPI6_0:
-; CHECK-NEXT:    .long 0x4f000000 @ float 2.14748365E+9
-; CHECK-NEXT:  .LCPI6_1:
-; CHECK-NEXT:    .long 0x00000000 @ float 0
+; CHECK-NEXT:    vcvt.u32.f32 s8, s2
+; CHECK-NEXT:    vcvt.u32.f32 s4, s0
+; CHECK-NEXT:    vcvt.u32.f32 s6, s1
+; CHECK-NEXT:    vcvt.u32.f32 s0, s3
+; CHECK-NEXT:    vmov r2, s8
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vmov r1, s6
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    bx lr
   %val = call <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
   ret <4 x i32> %val
 }
@@ -279,52 +214,39 @@ define <4 x i64> @fptoui_v4i64_v4f32(<4 x float> %x) #0 {
 define <4 x float> @sitofp_v4f32_v4i32(<4 x i32> %x) #0 {
 ; CHECK-LABEL: sitofp_v4f32_v4i32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r4, r5, r6, lr}
-; CHECK-NEXT:    push {r4, r5, r6, lr}
-; CHECK-NEXT:    .vsave {d8}
-; CHECK-NEXT:    vpush {d8}
 ; CHECK-NEXT:    .pad #32
 ; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    movw r6, #0
+; CHECK-NEXT:    movw r12, #0
+; CHECK-NEXT:    eor r3, r3, #-2147483648
+; CHECK-NEXT:    eor r2, r2, #-2147483648
 ; CHECK-NEXT:    eor r1, r1, #-2147483648
-; CHECK-NEXT:    vldr d8, .LCPI9_0
 ; CHECK-NEXT:    eor r0, r0, #-2147483648
-; CHECK-NEXT:    movt r6, #17200
+; CHECK-NEXT:    vldr d16, .LCPI9_0
+; CHECK-NEXT:    movt r12, #17200
+; CHECK-NEXT:    str r3, [sp, #24]
+; CHECK-NEXT:    str r12, [sp, #28]
+; CHECK-NEXT:    str r12, [sp, #20]
+; CHECK-NEXT:    str r2, [sp, #16]
+; CHECK-NEXT:    str r12, [sp, #12]
 ; CHECK-NEXT:    str r1, [sp, #8]
-; CHECK-NEXT:    eor r1, r2, #-2147483648
-; CHECK-NEXT:    str r6, [sp, #12]
-; CHECK-NEXT:    vldr d16, [sp, #8]
-; CHECK-NEXT:    str r1, [sp, #16]
-; CHECK-NEXT:    eor r1, r3, #-2147483648
-; CHECK-NEXT:    str r6, [sp, #20]
-; CHECK-NEXT:    str r6, [sp, #28]
-; CHECK-NEXT:    str r1, [sp, #24]
+; CHECK-NEXT:    str r12, [sp, #4]
 ; CHECK-NEXT:    str r0, [sp]
-; CHECK-NEXT:    str r6, [sp, #4]
-; CHECK-NEXT:    vsub.f64 d16, d16, d8
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    bl __aeabi_d2f
-; CHECK-NEXT:    vldr d16, [sp, #16]
-; CHECK-NEXT:    mov r4, r0
-; CHECK-NEXT:    vsub.f64 d16, d16, d8
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    bl __aeabi_d2f
-; CHECK-NEXT:    vldr d16, [sp, #24]
-; CHECK-NEXT:    mov r5, r0
-; CHECK-NEXT:    vsub.f64 d16, d16, d8
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    bl __aeabi_d2f
-; CHECK-NEXT:    vldr d16, [sp]
-; CHECK-NEXT:    mov r6, r0
-; CHECK-NEXT:    vsub.f64 d16, d16, d8
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    bl __aeabi_d2f
-; CHECK-NEXT:    mov r1, r4
-; CHECK-NEXT:    mov r2, r5
-; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    vldr d17, [sp, #24]
+; CHECK-NEXT:    vldr d18, [sp, #16]
+; CHECK-NEXT:    vldr d19, [sp, #8]
+; CHECK-NEXT:    vldr d20, [sp]
+; CHECK-NEXT:    vsub.f64 d17, d17, d16
+; CHECK-NEXT:    vsub.f64 d18, d18, d16
+; CHECK-NEXT:    vsub.f64 d19, d19, d16
+; CHECK-NEXT:    vsub.f64 d16, d20, d16
+; CHECK-NEXT:    vcvt.f32.f64 s3, d17
+; CHECK-NEXT:    vcvt.f32.f64 s2, d18
+; CHECK-NEXT:    vcvt.f32.f64 s1, d19
+; CHECK-NEXT:    vcvt.f32.f64 s0, d16
+; CHECK-NEXT:    vmov r2, r3, d1
+; CHECK-NEXT:    vmov r0, r1, d0
 ; CHECK-NEXT:    add sp, sp, #32
-; CHECK-NEXT:    vpop {d8}
-; CHECK-NEXT:    pop {r4, r5, r6, pc}
+; CHECK-NEXT:    bx lr
 ; CHECK-NEXT:    .p2align 3
 ; CHECK-NEXT:  @ %bb.1:
 ; CHECK-NEXT:  .LCPI9_0:
@@ -337,47 +259,34 @@ define <4 x float> @sitofp_v4f32_v4i32(<4 x i32> %x) #0 {
 define <4 x float> @uitofp_v4f32_v4i32(<4 x i32> %x) #0 {
 ; CHECK-LABEL: uitofp_v4f32_v4i32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r4, r5, r6, lr}
-; CHECK-NEXT:    push {r4, r5, r6, lr}
-; CHECK-NEXT:    .vsave {d8}
-; CHECK-NEXT:    vpush {d8}
 ; CHECK-NEXT:    .pad #32
 ; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    movw r6, #0
-; CHECK-NEXT:    str r1, [sp, #8]
-; CHECK-NEXT:    vldr d8, .LCPI10_0
-; CHECK-NEXT:    movt r6, #17200
-; CHECK-NEXT:    str r6, [sp, #12]
-; CHECK-NEXT:    vldr d16, [sp, #8]
-; CHECK-NEXT:    str r6, [sp, #20]
-; CHECK-NEXT:    str r2, [sp, #16]
-; CHECK-NEXT:    str r6, [sp, #28]
+; CHECK-NEXT:    movw r12, #0
 ; CHECK-NEXT:    str r3, [sp, #24]
-; CHECK-NEXT:    stm sp, {r0, r6}
-; CHECK-NEXT:    vsub.f64 d16, d16, d8
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    bl __aeabi_d2f
-; CHECK-NEXT:    vldr d16, [sp, #16]
-; CHECK-NEXT:    mov r4, r0
-; CHECK-NEXT:    vsub.f64 d16, d16, d8
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    bl __aeabi_d2f
-; CHECK-NEXT:    vldr d16, [sp, #24]
-; CHECK-NEXT:    mov r5, r0
-; CHECK-NEXT:    vsub.f64 d16, d16, d8
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    bl __aeabi_d2f
-; CHECK-NEXT:    vldr d16, [sp]
-; CHECK-NEXT:    mov r6, r0
-; CHECK-NEXT:    vsub.f64 d16, d16, d8
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    bl __aeabi_d2f
-; CHECK-NEXT:    mov r1, r4
-; CHECK-NEXT:    mov r2, r5
-; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    vldr d16, .LCPI10_0
+; CHECK-NEXT:    movt r12, #17200
+; CHECK-NEXT:    str r12, [sp, #28]
+; CHECK-NEXT:    str r12, [sp, #20]
+; CHECK-NEXT:    str r2, [sp, #16]
+; CHECK-NEXT:    str r12, [sp, #12]
+; CHECK-NEXT:    str r1, [sp, #8]
+; CHECK-NEXT:    stm sp, {r0, r12}
+; CHECK-NEXT:    vldr d17, [sp, #24]
+; CHECK-NEXT:    vldr d18, [sp, #16]
+; CHECK-NEXT:    vldr d19, [sp, #8]
+; CHECK-NEXT:    vldr d20, [sp]
+; CHECK-NEXT:    vsub.f64 d17, d17, d16
+; CHECK-NEXT:    vsub.f64 d18, d18, d16
+; CHECK-NEXT:    vsub.f64 d19, d19, d16
+; CHECK-NEXT:    vsub.f64 d16, d20, d16
+; CHECK-NEXT:    vcvt.f32.f64 s3, d17
+; CHECK-NEXT:    vcvt.f32.f64 s2, d18
+; CHECK-NEXT:    vcvt.f32.f64 s1, d19
+; CHECK-NEXT:    vcvt.f32.f64 s0, d16
+; CHECK-NEXT:    vmov r2, r3, d1
+; CHECK-NEXT:    vmov r0, r1, d0
 ; CHECK-NEXT:    add sp, sp, #32
-; CHECK-NEXT:    vpop {d8}
-; CHECK-NEXT:    pop {r4, r5, r6, pc}
+; CHECK-NEXT:    bx lr
 ; CHECK-NEXT:    .p2align 3
 ; CHECK-NEXT:  @ %bb.1:
 ; CHECK-NEXT:  .LCPI10_0:
@@ -470,26 +379,15 @@ define <4 x float> @sqrt_v4f32(<4 x float> %x) #0 {
 define <4 x float> @rint_v4f32(<4 x float> %x) #0 {
 ; CHECK-LABEL: rint_v4f32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT:    mov r6, r0
-; CHECK-NEXT:    mov r0, r1
-; CHECK-NEXT:    mov r4, r3
-; CHECK-NEXT:    mov r5, r2
-; CHECK-NEXT:    bl rintf
-; CHECK-NEXT:    mov r7, r0
-; CHECK-NEXT:    mov r0, r5
-; CHECK-NEXT:    bl rintf
-; CHECK-NEXT:    mov r5, r0
-; CHECK-NEXT:    mov r0, r4
-; CHECK-NEXT:    bl rintf
-; CHECK-NEXT:    mov r4, r0
-; CHECK-NEXT:    mov r0, r6
-; CHECK-NEXT:    bl rintf
-; CHECK-NEXT:    mov r1, r7
-; CHECK-NEXT:    mov r2, r5
-; CHECK-NEXT:    mov r3, r4
-; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+; CHECK-NEXT:    vmov d1, r2, r3
+; CHECK-NEXT:    vmov d0, r0, r1
+; CHECK-NEXT:    vrintx.f32 s7, s3
+; CHECK-NEXT:    vrintx.f32 s6, s2
+; CHECK-NEXT:    vrintx.f32 s5, s1
+; CHECK-NEXT:    vrintx.f32 s4, s0
+; CHECK-NEXT:    vmov r2, r3, d3
+; CHECK-NEXT:    vmov r0, r1, d2
+; CHECK-NEXT:    bx lr
   %val = call <4 x float> @llvm.experimental.constrained.rint.v4f32(<4 x float> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret <4 x float> %val
 }
@@ -497,26 +395,15 @@ define <4 x float> @rint_v4f32(<4 x float> %x) #0 {
 define <4 x float> @nearbyint_v4f32(<4 x float> %x) #0 {
 ; CHECK-LABEL: nearbyint_v4f32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT:    mov r6, r0
-; CHECK-NEXT:    mov r0, r1
-; CHECK-NEXT:    mov r4, r3
-; CHECK-NEXT:    mov r5, r2
-; CHECK-NEXT:    bl nearbyintf
-; CHECK-NEXT:    mov r7, r0
-; CHECK-NEXT:    mov r0, r5
-; CHECK-NEXT:    bl nearbyintf
-; CHECK-NEXT:    mov r5, r0
-; CHECK-NEXT:    mov r0, r4
-; CHECK-NEXT:    bl nearbyintf
-; CHECK-NEXT:    mov r4, r0
-; CHECK-NEXT:    mov r0, r6
-; CHECK-NEXT:    bl nearbyintf
-; CHECK-NEXT:    mov r1, r7
-; CHECK-NEXT:    mov r2, r5
-; CHECK-NEXT:    mov r3, r4
-; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+; CHECK-NEXT:    vmov d1, r2, r3
+; CHECK-NEXT:    vmov d0, r0, r1
+; CHECK-NEXT:    vrintr.f32 s7, s3
+; CHECK-NEXT:    vrintr.f32 s6, s2
+; CHECK-NEXT:    vrintr.f32 s5, s1
+; CHECK-NEXT:    vrintr.f32 s4, s0
+; CHECK-NEXT:    vmov r2, r3, d3
+; CHECK-NEXT:    vmov r0, r1, d2
+; CHECK-NEXT:    bx lr
   %val = call <4 x float> @llvm.experimental.constrained.nearbyint.v4f32(<4 x float> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret <4 x float> %val
 }
@@ -524,36 +411,17 @@ define <4 x float> @nearbyint_v4f32(<4 x float> %x) #0 {
 define <4 x float> @maxnum_v4f32(<4 x float> %x, <4 x float> %y) #0 {
 ; CHECK-LABEL: maxnum_v4f32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r11, lr}
-; CHECK-NEXT:    push {r4, r5, r6, r7, r8, r9, r11, lr}
-; CHECK-NEXT:    .vsave {d8, d9}
-; CHECK-NEXT:    vpush {d8, d9}
-; CHECK-NEXT:    mov r9, r0
-; CHECK-NEXT:    add r0, sp, #48
-; CHECK-NEXT:    mov r5, r2
-; CHECK-NEXT:    mov r2, r1
-; CHECK-NEXT:    mov r4, r3
-; CHECK-NEXT:    vld1.64 {d8, d9}, [r0]
-; CHECK-NEXT:    mov r0, r2
-; CHECK-NEXT:    vmov r7, r1, d8
-; CHECK-NEXT:    bl fmaxf
-; CHECK-NEXT:    mov r8, r0
-; CHECK-NEXT:    vmov r1, r6, d9
-; CHECK-NEXT:    mov r0, r5
-; CHECK-NEXT:    bl fmaxf
-; CHECK-NEXT:    mov r5, r0
-; CHECK-NEXT:    mov r0, r4
-; CHECK-NEXT:    mov r1, r6
-; CHECK-NEXT:    bl fmaxf
-; CHECK-NEXT:    mov r4, r0
-; CHECK-NEXT:    mov r0, r9
-; CHECK-NEXT:    mov r1, r7
-; CHECK-NEXT:    bl fmaxf
-; CHECK-NEXT:    mov r1, r8
-; CHECK-NEXT:    mov r2, r5
-; CHECK-NEXT:    mov r3, r4
-; CHECK-NEXT:    vpop {d8, d9}
-; CHECK-NEXT:    pop {r4, r5, r6, r7, r8, r9, r11, pc}
+; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vmov d1, r2, r3
+; CHECK-NEXT:    vld1.64 {d2, d3}, [r12]
+; CHECK-NEXT:    vmov d0, r0, r1
+; CHECK-NEXT:    vmaxnm.f32 s11, s3, s7
+; CHECK-NEXT:    vmaxnm.f32 s10, s2, s6
+; CHECK-NEXT:    vmaxnm.f32 s9, s1, s5
+; CHECK-NEXT:    vmaxnm.f32 s8, s0, s4
+; CHECK-NEXT:    vmov r2, r3, d5
+; CHECK-NEXT:    vmov r0, r1, d4
+; CHECK-NEXT:    bx lr
   %val = call <4 x float> @llvm.experimental.constrained.maxnum.v4f32(<4 x float> %x, <4 x float> %y, metadata !"fpexcept.strict") #0
   ret <4 x float> %val
 }
@@ -561,36 +429,17 @@ define <4 x float> @maxnum_v4f32(<4 x float> %x, <4 x float> %y) #0 {
 define <4 x float> @minnum_v4f32(<4 x float> %x, <4 x float> %y) #0 {
 ; CHECK-LABEL: minnum_v4f32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r11, lr}
-; CHECK-NEXT:    push {r4, r5, r6, r7, r8, r9, r11, lr}
-; CHECK-NEXT:    .vsave {d8, d9}
-; CHECK-NEXT:    vpush {d8, d9}
-; CHECK-NEXT:    mov r9, r0
-; CHECK-NEXT:    add r0, sp, #48
-; CHECK-NEXT:    mov r5, r2
-; CHECK-NEXT:    mov r2, r1
-; CHECK-NEXT:    mov r4, r3
-; CHECK-NEXT:    vld1.64 {d8, d9}, [r0]
-; CHECK-NEXT:    mov r0, r2
-; CHECK-NEXT:    vmov r7, r1, d8
-; CHECK-NEXT:    bl fminf
-; CHECK-NEXT:    mov r8, r0
-; CHECK-NEXT:    vmov r1, r6, d9
-; CHECK-NEXT:    mov r0, r5
-; CHECK-NEXT:    bl fminf
-; CHECK-NEXT:    mov r5, r0
-; CHECK-NEXT:    mov r0, r4
-; CHECK-NEXT:    mov r1, r6
-; CHECK-NEXT:    bl fminf
-; CHECK-NEXT:    mov r4, r0
-; CHECK-NEXT:    mov r0, r9
-; CHECK-NEXT:    mov r1, r7
-; CHECK-NEXT:    bl fminf
-; CHECK-NEXT:    mov r1, r8
-; CHECK-NEXT:    mov r2, r5
-; CHECK-NEXT:    mov r3, r4
-; CHECK-NEXT:    vpop {d8, d9}
-; CHECK-NEXT:    pop {r4, r5, r6, r7, r8, r9, r11, pc}
+; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vmov d1, r2, r3
+; CHECK-NEXT:    vld1.64 {d2, d3}, [r12]
+; CHECK-NEXT:    vmov d0, r0, r1
+; CHECK-NEXT:    vminnm.f32 s11, s3, s7
+; CHECK-NEXT:    vminnm.f32 s10, s2, s6
+; CHECK-NEXT:    vminnm.f32 s9, s1, s5
+; CHECK-NEXT:    vminnm.f32 s8, s0, s4
+; CHECK-NEXT:    vmov r2, r3, d5
+; CHECK-NEXT:    vmov r0, r1, d4
+; CHECK-NEXT:    bx lr
   %val = call <4 x float> @llvm.experimental.constrained.minnum.v4f32(<4 x float> %x, <4 x float> %y, metadata !"fpexcept.strict") #0
   ret <4 x float> %val
 }
@@ -598,26 +447,15 @@ define <4 x float> @minnum_v4f32(<4 x float> %x, <4 x float> %y) #0 {
 define <4 x float> @ceil_v4f32(<4 x float> %x) #0 {
 ; CHECK-LABEL: ceil_v4f32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT:    mov r6, r0
-; CHECK-NEXT:    mov r0, r1
-; CHECK-NEXT:    mov r4, r3
-; CHECK-NEXT:    mov r5, r2
-; CHECK-NEXT:    bl ceilf
-; CHECK-NEXT:    mov r7, r0
-; CHECK-NEXT:    mov r0, r5
-; CHECK-NEXT:    bl ceilf
-; CHECK-NEXT:    mov r5, r0
-; CHECK-NEXT:    mov r0, r4
-; CHECK-NEXT:    bl ceilf
-; CHECK-NEXT:    mov r4, r0
-; CHECK-NEXT:    mov r0, r6
-; CHECK-NEXT:    bl ceilf
-; CHECK-NEXT:    mov r1, r7
-; CHECK-NEXT:    mov r2, r5
-; CHECK-NEXT:    mov r3, r4
-; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+; CHECK-NEXT:    vmov d1, r2, r3
+; CHECK-NEXT:    vmov d0, r0, r1
+; CHECK-NEXT:    vrintp.f32 s7, s3
+; CHECK-NEXT:    vrintp.f32 s6, s2
+; CHECK-NEXT:    vrintp.f32 s5, s1
+; CHECK-NEXT:    vrintp.f32 s4, s0
+; CHECK-NEXT:    vmov r2, r3, d3
+; CHECK-NEXT:    vmov r0, r1, d2
+; CHECK-NEXT:    bx lr
   %val = call <4 x float> @llvm.experimental.constrained.ceil.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
   ret <4 x float> %val
 }
@@ -625,26 +463,15 @@ define <4 x float> @ceil_v4f32(<4 x float> %x) #0 {
 define <4 x float> @floor_v4f32(<4 x float> %x) #0 {
 ; CHECK-LABEL: floor_v4f32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT:    mov r6, r0
-; CHECK-NEXT:    mov r0, r1
-; CHECK-NEXT:    mov r4, r3
-; CHECK-NEXT:    mov r5, r2
-; CHECK-NEXT:    bl floorf
-; CHECK-NEXT:    mov r7, r0
-; CHECK-NEXT:    mov r0, r5
-; CHECK-NEXT:    bl floorf
-; CHECK-NEXT:    mov r5, r0
-; CHECK-NEXT:    mov r0, r4
-; CHECK-NEXT:    bl floorf
-; CHECK-NEXT:    mov r4, r0
-; CHECK-NEXT:    mov r0, r6
-; CHECK-NEXT:    bl floorf
-; CHECK-NEXT:    mov r1, r7
-; CHECK-NEXT:    mov r2, r5
-; CHECK-NEXT:    mov r3, r4
-; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+; CHECK-NEXT:    vmov d1, r2, r3
+; CHECK-NEXT:    vmov d0, r0, r1
+; CHECK-NEXT:    vrintm.f32 s7, s3
+; CHECK-NEXT:    vrintm.f32 s6, s2
+; CHECK-NEXT:    vrintm.f32 s5, s1
+; CHECK-NEXT:    vrintm.f32 s4, s0
+; CHECK-NEXT:    vmov r2, r3, d3
+; CHECK-NEXT:    vmov r0, r1, d2
+; CHECK-NEXT:    bx lr
   %val = call <4 x float> @llvm.experimental.constrained.floor.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
   ret <4 x float> %val
 }
@@ -652,26 +479,15 @@ define <4 x float> @floor_v4f32(<4 x float> %x) #0 {
 define <4 x float> @round_v4f32(<4 x float> %x) #0 {
 ; CHECK-LABEL: round_v4f32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT:    mov r6, r0
-; CHECK-NEXT:    mov r0, r1
-; CHECK-NEXT:    mov r4, r3
-; CHECK-NEXT:    mov r5, r2
-; CHECK-NEXT:    bl roundf
-; CHECK-NEXT:    mov r7, r0
-; CHECK-NEXT:    mov r0, r5
-; CHECK-NEXT:    bl roundf
-; CHECK-NEXT:    mov r5, r0
-; CHECK-NEXT:    mov r0, r4
-; CHECK-NEXT:    bl roundf
-; CHECK-NEXT:    mov r4, r0
-; CHECK-NEXT:    mov r0, r6
-; CHECK-NEXT:    bl roundf
-; CHECK-NEXT:    mov r1, r7
-; CHECK-NEXT:    mov r2, r5
-; CHECK-NEXT:    mov r3, r4
-; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+; CHECK-NEXT:    vmov d1, r2, r3
+; CHECK-NEXT:    vmov d0, r0, r1
+; CHECK-NEXT:    vrinta.f32 s7, s3
+; CHECK-NEXT:    vrinta.f32 s6, s2
+; CHECK-NEXT:    vrinta.f32 s5, s1
+; CHECK-NEXT:    vrinta.f32 s4, s0
+; CHECK-NEXT:    vmov r2, r3, d3
+; CHECK-NEXT:    vmov r0, r1, d2
+; CHECK-NEXT:    bx lr
   %val = call <4 x float> @llvm.experimental.constrained.round.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
   ret <4 x float> %val
 }
@@ -679,26 +495,15 @@ define <4 x float> @round_v4f32(<4 x float> %x) #0 {
 define <4 x float> @roundeven_v4f32(<4 x float> %x) #0 {
 ; CHECK-LABEL: roundeven_v4f32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT:    mov r6, r0
-; CHECK-NEXT:    mov r0, r1
-; CHECK-NEXT:    mov r4, r3
-; CHECK-NEXT:    mov r5, r2
-; CHECK-NEXT:    bl roundevenf
-; CHECK-NEXT:    mov r7, r0
-; CHECK-NEXT:    mov r0, r5
-; CHECK-NEXT:    bl roundevenf
-; CHECK-NEXT:    mov r5, r0
-; CHECK-NEXT:    mov r0, r4
-; CHECK-NEXT:    bl roundevenf
-; CHECK-NEXT:    mov r4, r0
-; CHECK-NEXT:    mov r0, r6
-; CHECK-NEXT:    bl roundevenf
-; CHECK-NEXT:    mov r1, r7
-; CHECK-NEXT:    mov r2, r5
-; CHECK-NEXT:    mov r3, r4
-; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+; CHECK-NEXT:    vmov d1, r2, r3
+; CHECK-NEXT:    vmov d0, r0, r1
+; CHECK-NEXT:    vrintn.f32 s7, s3
+; CHECK-NEXT:    vrintn.f32 s6, s2
+; CHECK-NEXT:    vrintn.f32 s5, s1
+; CHECK-NEXT:    vrintn.f32 s4, s0
+; CHECK-NEXT:    vmov r2, r3, d3
+; CHECK-NEXT:    vmov r0, r1, d2
+; CHECK-NEXT:    bx lr
   %val = call <4 x float> @llvm.experimental.constrained.roundeven.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
   ret <4 x float> %val
 }
@@ -706,26 +511,15 @@ define <4 x float> @roundeven_v4f32(<4 x float> %x) #0 {
 define <4 x float> @trunc_v4f32(<4 x float> %x) #0 {
 ; CHECK-LABEL: trunc_v4f32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT:    mov r6, r0
-; CHECK-NEXT:    mov r0, r1
-; CHECK-NEXT:    mov r4, r3
-; CHECK-NEXT:    mov r5, r2
-; CHECK-NEXT:    bl truncf
-; CHECK-NEXT:    mov r7, r0
-; CHECK-NEXT:    mov r0, r5
-; CHECK-NEXT:    bl truncf
-; CHECK-NEXT:    mov r5, r0
-; CHECK-NEXT:    mov r0, r4
-; CHECK-NEXT:    bl truncf
-; CHECK-NEXT:    mov r4, r0
-; CHECK-NEXT:    mov r0, r6
-; CHECK-NEXT:    bl truncf
-; CHECK-NEXT:    mov r1, r7
-; CHECK-NEXT:    mov r2, r5
-; CHECK-NEXT:    mov r3, r4
-; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+; CHECK-NEXT:    vmov d1, r2, r3
+; CHECK-NEXT:    vmov d0, r0, r1
+; CHECK-NEXT:    vrintz.f32 s7, s3
+; CHECK-NEXT:    vrintz.f32 s6, s2
+; CHECK-NEXT:    vrintz.f32 s5, s1
+; CHECK-NEXT:    vrintz.f32 s4, s0
+; CHECK-NEXT:    vmov r2, r3, d3
+; CHECK-NEXT:    vmov r0, r1, d2
+; CHECK-NEXT:    bx lr
   %val = call <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
   ret <4 x float> %val
 }
@@ -905,19 +699,13 @@ define <2 x double> @fma_v2f64(<2 x double> %x, <2 x double> %y, <2 x double> %z
 define <2 x i32> @fptosi_v2i32_v2f64(<2 x double> %x) #0 {
 ; CHECK-LABEL: fptosi_v2i32_v2f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r4, r5, r6, lr}
-; CHECK-NEXT:    push {r4, r5, r6, lr}
-; CHECK-NEXT:    mov r4, r1
-; CHECK-NEXT:    mov r5, r0
-; CHECK-NEXT:    mov r0, r2
-; CHECK-NEXT:    mov r1, r3
-; CHECK-NEXT:    bl __aeabi_d2iz
-; CHECK-NEXT:    mov r6, r0
-; CHECK-NEXT:    mov r0, r5
-; CHECK-NEXT:    mov r1, r4
-; CHECK-NEXT:    bl __aeabi_d2iz
-; CHECK-NEXT:    mov r1, r6
-; CHECK-NEXT:    pop {r4, r5, r6, pc}
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vmov d17, r2, r3
+; CHECK-NEXT:    vcvt.s32.f64 s0, d16
+; CHECK-NEXT:    vcvt.s32.f64 s2, d17
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    bx lr
   %val = call <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
   ret <2 x i32> %val
 }
@@ -925,46 +713,13 @@ define <2 x i32> @fptosi_v2i32_v2f64(<2 x double> %x) #0 {
 define <2 x i32> @fptoui_v2i32_v2f64(<2 x double> %x) #0 {
 ; CHECK-LABEL: fptoui_v2i32_v2f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r4, r5, r11, lr}
-; CHECK-NEXT:    push {r4, r5, r11, lr}
-; CHECK-NEXT:    .vsave {d8}
-; CHECK-NEXT:    vpush {d8}
-; CHECK-NEXT:    vldr d16, .LCPI31_0
-; CHECK-NEXT:    vmov d17, r0, r1
-; CHECK-NEXT:    vmov d18, r2, r3
-; CHECK-NEXT:    mov r5, #-2147483648
-; CHECK-NEXT:    mov r2, #0
-; CHECK-NEXT:    mov r0, #0
-; CHECK-NEXT:    mov r4, #-2147483648
-; CHECK-NEXT:    vmov.i32 d19, #0x0
-; CHECK-NEXT:    vcmpe.f64 d17, d16
-; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
-; CHECK-NEXT:    vcmpe.f64 d18, d16
-; CHECK-NEXT:    movwlt r5, #0
-; CHECK-NEXT:    movwlt r2, #1
-; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
-; CHECK-NEXT:    movwlt r0, #1
-; CHECK-NEXT:    movwlt r4, #0
-; CHECK-NEXT:    cmp r0, #0
-; CHECK-NEXT:    vseleq.f64 d20, d16, d19
-; CHECK-NEXT:    cmp r2, #0
-; CHECK-NEXT:    vsub.f64 d18, d18, d20
-; CHECK-NEXT:    vseleq.f64 d16, d16, d19
-; CHECK-NEXT:    vmov r0, r1, d18
-; CHECK-NEXT:    vsub.f64 d8, d17, d16
-; CHECK-NEXT:    bl __aeabi_d2iz
-; CHECK-NEXT:    eor r4, r0, r4
-; CHECK-NEXT:    vmov r0, r1, d8
-; CHECK-NEXT:    bl __aeabi_d2iz
-; CHECK-NEXT:    eor r0, r0, r5
-; CHECK-NEXT:    mov r1, r4
-; CHECK-NEXT:    vpop {d8}
-; CHECK-NEXT:    pop {r4, r5, r11, pc}
-; CHECK-NEXT:    .p2align 3
-; CHECK-NEXT:  @ %bb.1:
-; CHECK-NEXT:  .LCPI31_0:
-; CHECK-NEXT:    .long 0 @ double 2147483648
-; CHECK-NEXT:    .long 1105199104
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vmov d17, r2, r3
+; CHECK-NEXT:    vcvt.u32.f64 s0, d16
+; CHECK-NEXT:    vcvt.u32.f64 s2, d17
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    bx lr
   %val = call <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
   ret <2 x i32> %val
 }
@@ -1140,21 +895,13 @@ define <2 x double> @sqrt_v2f64(<2 x double> %x) #0 {
 define <2 x double> @rint_v2f64(<2 x double> %x) #0 {
 ; CHECK-LABEL: rint_v2f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT:    mov r4, r1
-; CHECK-NEXT:    mov r5, r0
-; CHECK-NEXT:    mov r0, r2
-; CHECK-NEXT:    mov r1, r3
-; CHECK-NEXT:    bl rint
-; CHECK-NEXT:    mov r6, r0
-; CHECK-NEXT:    mov r7, r1
-; CHECK-NEXT:    mov r0, r5
-; CHECK-NEXT:    mov r1, r4
-; CHECK-NEXT:    bl rint
-; CHECK-NEXT:    mov r2, r6
-; CHECK-NEXT:    mov r3, r7
-; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vmov d17, r2, r3
+; CHECK-NEXT:    vrintx.f64 d16, d16
+; CHECK-NEXT:    vrintx.f64 d17, d17
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    bx lr
   %val = call <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret <2 x double> %val
 }
@@ -1162,21 +909,13 @@ define <2 x double> @rint_v2f64(<2 x double> %x) #0 {
 define <2 x double> @nearbyint_v2f64(<2 x double> %x) #0 {
 ; CHECK-LABEL: nearbyint_v2f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT:    mov r4, r1
-; CHECK-NEXT:    mov r5, r0
-; CHECK-NEXT:    mov r0, r2
-; CHECK-NEXT:    mov r1, r3
-; CHECK-NEXT:    bl nearbyint
-; CHECK-NEXT:    mov r6, r0
-; CHECK-NEXT:    mov r7, r1
-; CHECK-NEXT:    mov r0, r5
-; CHECK-NEXT:    mov r1, r4
-; CHECK-NEXT:    bl nearbyint
-; CHECK-NEXT:    mov r2, r6
-; CHECK-NEXT:    mov r3, r7
-; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vmov d17, r2, r3
+; CHECK-NEXT:    vrintr.f64 d16, d16
+; CHECK-NEXT:    vrintr.f64 d17, d17
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    bx lr
   %val = call <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(<2 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret <2 x double> %val
 }
@@ -1184,30 +923,15 @@ define <2 x double> @nearbyint_v2f64(<2 x double> %x) #0 {
 define <2 x double> @maxnum_v2f64(<2 x double> %x, <2 x double> %y) #0 {
 ; CHECK-LABEL: maxnum_v2f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT:    .vsave {d8, d9}
-; CHECK-NEXT:    vpush {d8, d9}
-; CHECK-NEXT:    mov r5, r0
-; CHECK-NEXT:    add r0, sp, #40
-; CHECK-NEXT:    mov r6, r3
-; CHECK-NEXT:    mov r4, r2
-; CHECK-NEXT:    mov r7, r1
-; CHECK-NEXT:    vld1.64 {d8, d9}, [r0]
-; CHECK-NEXT:    mov r0, r4
-; CHECK-NEXT:    mov r1, r6
-; CHECK-NEXT:    vmov r2, r3, d9
-; CHECK-NEXT:    bl fmax
-; CHECK-NEXT:    mov r4, r0
-; CHECK-NEXT:    mov r6, r1
-; CHECK-NEXT:    vmov r2, r3, d8
-; CHECK-NEXT:    mov r0, r5
-; CHECK-NEXT:    mov r1, r7
-; CHECK-NEXT:    bl fmax
-; CHECK-NEXT:    mov r2, r4
-; CHECK-NEXT:    mov r3, r6
-; CHECK-NEXT:    vpop {d8, d9}
-; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vmov d18, r0, r1
+; CHECK-NEXT:    vmov d19, r2, r3
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT:    vmaxnm.f64 d18, d18, d16
+; CHECK-NEXT:    vmaxnm.f64 d16, d19, d17
+; CHECK-NEXT:    vmov r0, r1, d18
+; CHECK-NEXT:    vmov r2, r3, d16
+; CHECK-NEXT:    bx lr
   %val = call <2 x double> @llvm.experimental.constrained.maxnum.v2f64(<2 x double> %x, <2 x double> %y, metadata !"fpexcept.strict") #0
   ret <2 x double> %val
 }
@@ -1215,30 +939,15 @@ define <2 x double> @maxnum_v2f64(<2 x double> %x, <2 x double> %y) #0 {
 define <2 x double> @minnum_v2f64(<2 x double> %x, <2 x double> %y) #0 {
 ; CHECK-LABEL: minnum_v2f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT:    .vsave {d8, d9}
-; CHECK-NEXT:    vpush {d8, d9}
-; CHECK-NEXT:    mov r5, r0
-; CHECK-NEXT:    add r0, sp, #40
-; CHECK-NEXT:    mov r6, r3
-; CHECK-NEXT:    mov r4, r2
-; CHECK-NEXT:    mov r7, r1
-; CHECK-NEXT:    vld1.64 {d8, d9}, [r0]
-; CHECK-NEXT:    mov r0, r4
-; CHECK-NEXT:    mov r1, r6
-; CHECK-NEXT:    vmov r2, r3, d9
-; CHECK-NEXT:    bl fmin
-; CHECK-NEXT:    mov r4, r0
-; CHECK-NEXT:    mov r6, r1
-; CHECK-NEXT:    vmov r2, r3, d8
-; CHECK-NEXT:    mov r0, r5
-; CHECK-NEXT:    mov r1, r7
-; CHECK-NEXT:    bl fmin
-; CHECK-NEXT:    mov r2, r4
-; CHECK-NEXT:    mov r3, r6
-; CHECK-NEXT:    vpop {d8, d9}
-; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vmov d18, r0, r1
+; CHECK-NEXT:    vmov d19, r2, r3
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT:    vminnm.f64 d18, d18, d16
+; CHECK-NEXT:    vminnm.f64 d16, d19, d17
+; CHECK-NEXT:    vmov r0, r1, d18
+; CHECK-NEXT:    vmov r2, r3, d16
+; CHECK-NEXT:    bx lr
   %val = call <2 x double> @llvm.experimental.constrained.minnum.v2f64(<2 x double> %x, <2 x double> %y, metadata !"fpexcept.strict") #0
   ret <2 x double> %val
 }
@@ -1246,21 +955,13 @@ define <2 x double> @minnum_v2f64(<2 x double> %x, <2 x double> %y) #0 {
 define <2 x double> @ceil_v2f64(<2 x double> %x) #0 {
 ; CHECK-LABEL: ceil_v2f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT:    mov r4, r1
-; CHECK-NEXT:    mov r5, r0
-; CHECK-NEXT:    mov r0, r2
-; CHECK-NEXT:    mov r1, r3
-; CHECK-NEXT:    bl ceil
-; CHECK-NEXT:    mov r6, r0
-; CHECK-NEXT:    mov r7, r1
-; CHECK-NEXT:    mov r0, r5
-; CHECK-NEXT:    mov r1, r4
-; CHECK-NEXT:    bl ceil
-; CHECK-NEXT:    mov r2, r6
-; CHECK-NEXT:    mov r3, r7
-; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vmov d17, r2, r3
+; CHECK-NEXT:    vrintp.f64 d16, d16
+; CHECK-NEXT:    vrintp.f64 d17, d17
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    bx lr
   %val = call <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
   ret <2 x double> %val
 }
@@ -1268,21 +969,13 @@ define <2 x double> @ceil_v2f64(<2 x double> %x) #0 {
 define <2 x double> @floor_v2f64(<2 x double> %x) #0 {
 ; CHECK-LABEL: floor_v2f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT:    mov r4, r1
-; CHECK-NEXT:    mov r5, r0
-; CHECK-NEXT:    mov r0, r2
-; CHECK-NEXT:    mov r1, r3
-; CHECK-NEXT:    bl floor
-; CHECK-NEXT:    mov r6, r0
-; CHECK-NEXT:    mov r7, r1
-; CHECK-NEXT:    mov r0, r5
-; CHECK-NEXT:    mov r1, r4
-; CHECK-NEXT:    bl floor
-; CHECK-NEXT:    mov r2, r6
-; CHECK-NEXT:    mov r3, r7
-; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vmov d17, r2, r3
+; CHECK-NEXT:    vrintm.f64 d16, d16
+; CHECK-NEXT:    vrintm.f64 d17, d17
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    bx lr
   %val = call <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
   ret <2 x double> %val
 }
@@ -1290,21 +983,13 @@ define <2 x double> @floor_v2f64(<2 x double> %x) #0 {
 define <2 x double> @round_v2f64(<2 x double> %x) #0 {
 ; CHECK-LABEL: round_v2f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT:    mov r4, r1
-; CHECK-NEXT:    mov r5, r0
-; CHECK-NEXT:    mov r0, r2
-; CHECK-NEXT:    mov r1, r3
-; CHECK-NEXT:    bl round
-; CHECK-NEXT:    mov r6, r0
-; CHECK-NEXT:    mov r7, r1
-; CHECK-NEXT:    mov r0, r5
-; CHECK-NEXT:    mov r1, r4
-; CHECK-NEXT:    bl round
-; CHECK-NEXT:    mov r2, r6
-; CHECK-NEXT:    mov r3, r7
-; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vmov d17, r2, r3
+; CHECK-NEXT:    vrinta.f64 d16, d16
+; CHECK-NEXT:    vrinta.f64 d17, d17
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    bx lr
   %val = call <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
   ret <2 x double> %val
 }
@@ -1312,21 +997,13 @@ define <2 x double> @round_v2f64(<2 x double> %x) #0 {
 define <2 x double> @roundeven_v2f64(<2 x double> %x) #0 {
 ; CHECK-LABEL: roundeven_v2f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT:    mov r4, r1
-; CHECK-NEXT:    mov r5, r0
-; CHECK-NEXT:    mov r0, r2
-; CHECK-NEXT:    mov r1, r3
-; CHECK-NEXT:    bl roundeven
-; CHECK-NEXT:    mov r6, r0
-; CHECK-NEXT:    mov r7, r1
-; CHECK-NEXT:    mov r0, r5
-; CHECK-NEXT:    mov r1, r4
-; CHECK-NEXT:    bl roundeven
-; CHECK-NEXT:    mov r2, r6
-; CHECK-NEXT:    mov r3, r7
-; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vmov d17, r2, r3
+; CHECK-NEXT:    vrintn.f64 d16, d16
+; CHECK-NEXT:    vrintn.f64 d17, d17
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    bx lr
   %val = call <2 x double> @llvm.experimental.constrained.roundeven.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
   ret <2 x double> %val
 }
@@ -1334,21 +1011,13 @@ define <2 x double> @roundeven_v2f64(<2 x double> %x) #0 {
 define <2 x double> @trunc_v2f64(<2 x double> %x) #0 {
 ; CHECK-LABEL: trunc_v2f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT:    mov r4, r1
-; CHECK-NEXT:    mov r5, r0
-; CHECK-NEXT:    mov r0, r2
-; CHECK-NEXT:    mov r1, r3
-; CHECK-NEXT:    bl trunc
-; CHECK-NEXT:    mov r6, r0
-; CHECK-NEXT:    mov r7, r1
-; CHECK-NEXT:    mov r0, r5
-; CHECK-NEXT:    mov r1, r4
-; CHECK-NEXT:    bl trunc
-; CHECK-NEXT:    mov r2, r6
-; CHECK-NEXT:    mov r3, r7
-; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vmov d17, r2, r3
+; CHECK-NEXT:    vrintz.f64 d16, d16
+; CHECK-NEXT:    vrintz.f64 d17, d17
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    bx lr
   %val = call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
   ret <2 x double> %val
 }
@@ -1473,10 +1142,10 @@ define <1 x double> @fma_v1f64(<1 x double> %x, <1 x double> %y, <1 x double> %z
 define <1 x i32> @fptosi_v1i32_v1f64(<1 x double> %x) #0 {
 ; CHECK-LABEL: fptosi_v1i32_v1f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r11, lr}
-; CHECK-NEXT:    push {r11, lr}
-; CHECK-NEXT:    bl __aeabi_d2iz
-; CHECK-NEXT:    pop {r11, pc}
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vcvt.s32.f64 s0, d16
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    bx lr
   %val = call <1 x i32> @llvm.experimental.constrained.fptosi.v1i32.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
   ret <1 x i32> %val
 }
@@ -1484,29 +1153,10 @@ define <1 x i32> @fptosi_v1i32_v1f64(<1 x double> %x) #0 {
 define <1 x i32> @fptoui_v1i32_v1f64(<1 x double> %x) #0 {
 ; CHECK-LABEL: fptoui_v1i32_v1f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r4, lr}
-; CHECK-NEXT:    push {r4, lr}
-; CHECK-NEXT:    vldr d16, .LCPI56_0
-; CHECK-NEXT:    vmov d17, r0, r1
-; CHECK-NEXT:    mov r0, #0
-; CHECK-NEXT:    mov r4, #-2147483648
-; CHECK-NEXT:    vmov.i32 d18, #0x0
-; CHECK-NEXT:    vcmpe.f64 d17, d16
-; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
-; CHECK-NEXT:    movwlt r0, #1
-; CHECK-NEXT:    movwlt r4, #0
-; CHECK-NEXT:    cmp r0, #0
-; CHECK-NEXT:    vseleq.f64 d16, d16, d18
-; CHECK-NEXT:    vsub.f64 d16, d17, d16
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    bl __aeabi_d2iz
-; CHECK-NEXT:    eor r0, r0, r4
-; CHECK-NEXT:    pop {r4, pc}
-; CHECK-NEXT:    .p2align 3
-; CHECK-NEXT:  @ %bb.1:
-; CHECK-NEXT:  .LCPI56_0:
-; CHECK-NEXT:    .long 0 @ double 2147483648
-; CHECK-NEXT:    .long 1105199104
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vcvt.u32.f64 s0, d16
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    bx lr
   %val = call <1 x i32> @llvm.experimental.constrained.fptoui.v1i32.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
   ret <1 x i32> %val
 }
@@ -1623,10 +1273,10 @@ define <1 x double> @sqrt_v1f64(<1 x double> %x) #0 {
 define <1 x double> @rint_v1f64(<1 x double> %x) #0 {
 ; CHECK-LABEL: rint_v1f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r11, lr}
-; CHECK-NEXT:    push {r11, lr}
-; CHECK-NEXT:    bl rint
-; CHECK-NEXT:    pop {r11, pc}
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vrintx.f64 d16, d16
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    bx lr
   %val = call <1 x double> @llvm.experimental.constrained.rint.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret <1 x double> %val
 }
@@ -1634,10 +1284,10 @@ define <1 x double> @rint_v1f64(<1 x double> %x) #0 {
 define <1 x double> @nearbyint_v1f64(<1 x double> %x) #0 {
 ; CHECK-LABEL: nearbyint_v1f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r11, lr}
-; CHECK-NEXT:    push {r11, lr}
-; CHECK-NEXT:    bl nearbyint
-; CHECK-NEXT:    pop {r11, pc}
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vrintr.f64 d16, d16
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    bx lr
   %val = call <1 x double> @llvm.experimental.constrained.nearbyint.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret <1 x double> %val
 }
@@ -1645,10 +1295,11 @@ define <1 x double> @nearbyint_v1f64(<1 x double> %x) #0 {
 define <1 x double> @maxnum_v1f64(<1 x double> %x, <1 x double> %y) #0 {
 ; CHECK-LABEL: maxnum_v1f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r11, lr}
-; CHECK-NEXT:    push {r11, lr}
-; CHECK-NEXT:    bl fmax
-; CHECK-NEXT:    pop {r11, pc}
+; CHECK-NEXT:    vmov d16, r2, r3
+; CHECK-NEXT:    vmov d17, r0, r1
+; CHECK-NEXT:    vmaxnm.f64 d16, d17, d16
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    bx lr
   %val = call <1 x double> @llvm.experimental.constrained.maxnum.v1f64(<1 x double> %x, <1 x double> %y, metadata !"fpexcept.strict") #0
   ret <1 x double> %val
 }
@@ -1656,10 +1307,11 @@ define <1 x double> @maxnum_v1f64(<1 x double> %x, <1 x double> %y) #0 {
 define <1 x double> @minnum_v1f64(<1 x double> %x, <1 x double> %y) #0 {
 ; CHECK-LABEL: minnum_v1f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r11, lr}
-; CHECK-NEXT:    push {r11, lr}
-; CHECK-NEXT:    bl fmin
-; CHECK-NEXT:    pop {r11, pc}
+; CHECK-NEXT:    vmov d16, r2, r3
+; CHECK-NEXT:    vmov d17, r0, r1
+; CHECK-NEXT:    vminnm.f64 d16, d17, d16
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    bx lr
   %val = call <1 x double> @llvm.experimental.constrained.minnum.v1f64(<1 x double> %x, <1 x double> %y, metadata !"fpexcept.strict") #0
   ret <1 x double> %val
 }
@@ -1667,10 +1319,10 @@ define <1 x double> @minnum_v1f64(<1 x double> %x, <1 x double> %y) #0 {
 define <1 x double> @ceil_v1f64(<1 x double> %x) #0 {
 ; CHECK-LABEL: ceil_v1f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r11, lr}
-; CHECK-NEXT:    push {r11, lr}
-; CHECK-NEXT:    bl ceil
-; CHECK-NEXT:    pop {r11, pc}
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vrintp.f64 d16, d16
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    bx lr
   %val = call <1 x double> @llvm.experimental.constrained.ceil.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
   ret <1 x double> %val
 }
@@ -1678,10 +1330,10 @@ define <1 x double> @ceil_v1f64(<1 x double> %x) #0 {
 define <1 x double> @floor_v1f64(<1 x double> %x) #0 {
 ; CHECK-LABEL: floor_v1f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r11, lr}
-; CHECK-NEXT:    push {r11, lr}
-; CHECK-NEXT:    bl floor
-; CHECK-NEXT:    pop {r11, pc}
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vrintm.f64 d16, d16
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    bx lr
   %val = call <1 x double> @llvm.experimental.constrained.floor.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
   ret <1 x double> %val
 }
@@ -1689,10 +1341,10 @@ define <1 x double> @floor_v1f64(<1 x double> %x) #0 {
 define <1 x double> @round_v1f64(<1 x double> %x) #0 {
 ; CHECK-LABEL: round_v1f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r11, lr}
-; CHECK-NEXT:    push {r11, lr}
-; CHECK-NEXT:    bl round
-; CHECK-NEXT:    pop {r11, pc}
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vrinta.f64 d16, d16
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    bx lr
   %val = call <1 x double> @llvm.experimental.constrained.round.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
   ret <1 x double> %val
 }
@@ -1700,10 +1352,10 @@ define <1 x double> @round_v1f64(<1 x double> %x) #0 {
 define <1 x double> @roundeven_v1f64(<1 x double> %x) #0 {
 ; CHECK-LABEL: roundeven_v1f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r11, lr}
-; CHECK-NEXT:    push {r11, lr}
-; CHECK-NEXT:    bl roundeven
-; CHECK-NEXT:    pop {r11, pc}
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vrintn.f64 d16, d16
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    bx lr
   %val = call <1 x double> @llvm.experimental.constrained.roundeven.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
   ret <1 x double> %val
 }
@@ -1711,10 +1363,10 @@ define <1 x double> @roundeven_v1f64(<1 x double> %x) #0 {
 define <1 x double> @trunc_v1f64(<1 x double> %x) #0 {
 ; CHECK-LABEL: trunc_v1f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r11, lr}
-; CHECK-NEXT:    push {r11, lr}
-; CHECK-NEXT:    bl trunc
-; CHECK-NEXT:    pop {r11, pc}
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vrintz.f64 d16, d16
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    bx lr
   %val = call <1 x double> @llvm.experimental.constrained.trunc.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
   ret <1 x double> %val
 }
@@ -1754,19 +1406,12 @@ entry:
 define <2 x float> @fptrunc_v2f32_v2f64(<2 x double> %x) #0 {
 ; CHECK-LABEL: fptrunc_v2f32_v2f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r4, r5, r6, lr}
-; CHECK-NEXT:    push {r4, r5, r6, lr}
-; CHECK-NEXT:    mov r4, r1
-; CHECK-NEXT:    mov r5, r0
-; CHECK-NEXT:    mov r0, r2
-; CHECK-NEXT:    mov r1, r3
-; CHECK-NEXT:    bl __aeabi_d2f
-; CHECK-NEXT:    mov r6, r0
-; CHECK-NEXT:    mov r0, r5
-; CHECK-NEXT:    mov r1, r4
-; CHECK-NEXT:    bl __aeabi_d2f
-; CHECK-NEXT:    mov r1, r6
-; CHECK-NEXT:    pop {r4, r5, r6, pc}
+; CHECK-NEXT:    vmov d16, r2, r3
+; CHECK-NEXT:    vmov d17, r0, r1
+; CHECK-NEXT:    vcvt.f32.f64 s1, d16
+; CHECK-NEXT:    vcvt.f32.f64 s0, d17
+; CHECK-NEXT:    vmov r0, r1, d0
+; CHECK-NEXT:    bx lr
   %val = call <2 x float> @llvm.experimental.constrained.fptrunc.v2f32.v2f64(<2 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret <2 x float> %val
 }
@@ -1774,21 +1419,12 @@ define <2 x float> @fptrunc_v2f32_v2f64(<2 x double> %x) #0 {
 define <2 x double> @fpext_v2f64_v2f32(<2 x float> %x) #0 {
 ; CHECK-LABEL: fpext_v2f64_v2f32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r4, r5, r11, lr}
-; CHECK-NEXT:    push {r4, r5, r11, lr}
-; CHECK-NEXT:    .vsave {d8}
-; CHECK-NEXT:    vpush {d8}
-; CHECK-NEXT:    vmov d8, r0, r1
-; CHECK-NEXT:    vmov r0, s17
-; CHECK-NEXT:    bl __aeabi_f2d
-; CHECK-NEXT:    mov r4, r0
-; CHECK-NEXT:    vmov r0, s16
-; CHECK-NEXT:    mov r5, r1
-; CHECK-NEXT:    bl __aeabi_f2d
-; CHECK-NEXT:    mov r2, r4
-; CHECK-NEXT:    mov r3, r5
-; CHECK-NEXT:    vpop {d8}
-; CHECK-NEXT:    pop {r4, r5, r11, pc}
+; CHECK-NEXT:    vmov d0, r0, r1
+; CHECK-NEXT:    vcvt.f64.f32 d16, s0
+; CHECK-NEXT:    vcvt.f64.f32 d17, s1
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    bx lr
   %val = call <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f32(<2 x float> %x, metadata !"fpexcept.strict") #0
   ret <2 x double> %val
 }
diff --git a/llvm/test/CodeGen/ARM/fp16-fullfp16.ll b/llvm/test/CodeGen/ARM/fp16-fullfp16.ll
index b4060d5fdb574..7b9474313e5bf 100644
--- a/llvm/test/CodeGen/ARM/fp16-fullfp16.ll
+++ b/llvm/test/CodeGen/ARM/fp16-fullfp16.ll
@@ -675,8 +675,8 @@ define half @frem_f16(half %x, half %y) #0 {
 ; CHECK-LABEL: frem_f16:
 ; CHECK:         .save {r11, lr}
 ; CHECK-NEXT:    push {r11, lr}
-; CHECK-NEXT:    vcvtb.f32.f16 s0, s0
 ; CHECK-NEXT:    vcvtb.f32.f16 s1, s1
+; CHECK-NEXT:    vcvtb.f32.f16 s0, s0
 ; CHECK-NEXT:    bl fmodf
 ; CHECK-NEXT:    vcvtb.f16.f32 s0, s0
 ; CHECK-NEXT:    pop {r11, pc}
@@ -713,7 +713,7 @@ define i32 @fptosi_i32_f16(half %x) #0 {
 
 define i32 @fptoui_i32_f16(half %x) #0 {
 ; CHECK-LABEL: fptoui_i32_f16:
-; CHECK:         vcvt.s32.f16 s0, s0
+; CHECK:         vcvt.u32.f16 s0, s0
 ; CHECK-NEXT:    vmov r0, s0
 ; CHECK-NEXT:    bx lr
   %val = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %x, metadata !"fpexcept.strict") #0
@@ -925,8 +925,8 @@ define half @atan2_f16(half %x, half %y) #0 {
 ; CHECK-LABEL: atan2_f16:
 ; CHECK:         .save {r11, lr}
 ; CHECK-NEXT:    push {r11, lr}
-; CHECK-NEXT:    vcvtb.f32.f16 s0, s0
 ; CHECK-NEXT:    vcvtb.f32.f16 s1, s1
+; CHECK-NEXT:    vcvtb.f32.f16 s0, s0
 ; CHECK-NEXT:    bl atan2f
 ; CHECK-NEXT:    vcvtb.f16.f32 s0, s0
 ; CHECK-NEXT:    pop {r11, pc}
@@ -974,8 +974,8 @@ define half @pow_f16(half %x, half %y) #0 {
 ; CHECK-LABEL: pow_f16:
 ; CHECK:         .save {r11, lr}
 ; CHECK-NEXT:    push {r11, lr}
-; CHECK-NEXT:    vcvtb.f32.f16 s0, s0
 ; CHECK-NEXT:    vcvtb.f32.f16 s1, s1
+; CHECK-NEXT:    vcvtb.f32.f16 s0, s0
 ; CHECK-NEXT:    bl powf
 ; CHECK-NEXT:    vcvtb.f16.f32 s0, s0
 ; CHECK-NEXT:    pop {r11, pc}

>From 13e4ab91c29e0f5a036360703899fb9c60c6da6a Mon Sep 17 00:00:00 2001
From: Erik Enikeev <evonatarius at gmail.com>
Date: Tue, 25 Nov 2025 21:15:26 +0300
Subject: [PATCH 4/5] Post-review fix

---
 llvm/lib/Target/ARM/ARMISelLowering.cpp       |   16 +-
 llvm/lib/Target/ARM/ARMInstrVFP.td            |    8 +-
 llvm/test/CodeGen/ARM/fp-intrinsics-vector.ll | 1118 +++++++++--------
 3 files changed, 598 insertions(+), 544 deletions(-)

diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 6bd40c753d49d..7628550fad879 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -633,11 +633,6 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM_,
     setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
     setOperationAction(ISD::STRICT_FMINNUM, MVT::f16, Legal);
     setOperationAction(ISD::STRICT_FMAXNUM, MVT::f16, Legal);
-
-    if (Subtarget->hasVFPv3()) {
-      setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal);
-      setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Legal);
-    }
   }
 
   if (Subtarget->hasBF16()) {
@@ -967,6 +962,9 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM_,
   if (!Subtarget->hasFP16()) {
     setOperationAction(ISD::FP_EXTEND,  MVT::f32, Custom);
     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Custom);
+  } else {
+    setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal);
+    setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Legal);
   }
 
   computeRegisterProperties(Subtarget->getRegisterInfo());
@@ -1306,16 +1304,16 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM_,
     if (!Subtarget->hasFPARMv8Base() || !Subtarget->hasFP64()) {
       setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
       setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
-      setOperationAction(ISD::STRICT_FP16_TO_FP, MVT::f64, LibCall);
-      setOperationAction(ISD::STRICT_FP_TO_FP16, MVT::f64, LibCall);
+      setOperationAction(ISD::STRICT_FP16_TO_FP, MVT::f64, Expand);
+      setOperationAction(ISD::STRICT_FP_TO_FP16, MVT::f64, Expand);
     }
 
     // fp16 is a special v7 extension that adds f16 <-> f32 conversions.
     if (!Subtarget->hasFP16()) {
       setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
       setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
-      setOperationAction(ISD::STRICT_FP16_TO_FP, MVT::f32, LibCall);
-      setOperationAction(ISD::STRICT_FP_TO_FP16, MVT::f32, LibCall);
+      setOperationAction(ISD::STRICT_FP16_TO_FP, MVT::f32, Expand);
+      setOperationAction(ISD::STRICT_FP_TO_FP16, MVT::f32, Expand);
     }
 
     // Strict floating-point comparisons need custom lowering.
diff --git a/llvm/lib/Target/ARM/ARMInstrVFP.td b/llvm/lib/Target/ARM/ARMInstrVFP.td
index 65c61c259d465..5f5f703fbabf1 100644
--- a/llvm/lib/Target/ARM/ARMInstrVFP.td
+++ b/llvm/lib/Target/ARM/ARMInstrVFP.td
@@ -814,7 +814,7 @@ def VCVTBHS: ASuI<0b11101, 0b11, 0b0010, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sm),
 
 def : FP16Pat<(f32 (any_fpextend (f16 HPR:$Sm))),
               (VCVTBHS (COPY_TO_REGCLASS (f16 HPR:$Sm), SPR))>;
-def : FP16Pat<(f16_to_fp GPR:$a),
+def : FP16Pat<(any_f16_to_fp GPR:$a),
               (VCVTBHS (COPY_TO_REGCLASS GPR:$a, SPR))>;
 
 let hasSideEffects = 0, mayRaiseFPException = 1, Uses = [FPSCR_RM] in
@@ -826,7 +826,7 @@ def VCVTBSH: ASuI<0b11101, 0b11, 0b0011, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sda,
 
 def : FP16Pat<(f16 (any_fpround SPR:$Sm)),
               (COPY_TO_REGCLASS (VCVTBSH (IMPLICIT_DEF), SPR:$Sm), HPR)>;
-def : FP16Pat<(fp_to_f16 SPR:$a),
+def : FP16Pat<(any_fp_to_f16 SPR:$a),
               (i32 (COPY_TO_REGCLASS (VCVTBSH (IMPLICIT_DEF), SPR:$a), GPR))>;
 def : FP16Pat<(insertelt (v8f16 MQPR:$src1), (f16 (any_fpround (f32 SPR:$src2))), imm_even:$lane),
               (v8f16 (INSERT_SUBREG (v8f16 MQPR:$src1),
@@ -891,7 +891,7 @@ def VCVTBHD : ADuI<0b11101, 0b11, 0b0010, 0b01, 0,
 def : FullFP16Pat<(f64 (any_fpextend (f16 HPR:$Sm))),
                   (VCVTBHD (COPY_TO_REGCLASS (f16 HPR:$Sm), SPR))>,
                   Requires<[HasFPARMv8, HasDPVFP]>;
-def : FP16Pat<(f64 (f16_to_fp GPR:$a)),
+def : FP16Pat<(f64 (any_f16_to_fp GPR:$a)),
               (VCVTBHD (COPY_TO_REGCLASS GPR:$a, SPR))>,
               Requires<[HasFPARMv8, HasDPVFP]>;
 
@@ -917,7 +917,7 @@ def VCVTBDH : ADuI<0b11101, 0b11, 0b0011, 0b01, 0,
 def : FullFP16Pat<(f16 (any_fpround DPR:$Dm)),
                   (COPY_TO_REGCLASS (VCVTBDH (IMPLICIT_DEF), DPR:$Dm), HPR)>,
                   Requires<[HasFPARMv8, HasDPVFP]>;
-def : FP16Pat<(fp_to_f16 (f64 DPR:$a)),
+def : FP16Pat<(any_fp_to_f16 (f64 DPR:$a)),
               (i32 (COPY_TO_REGCLASS (VCVTBDH (IMPLICIT_DEF), DPR:$a), GPR))>,
                    Requires<[HasFPARMv8, HasDPVFP]>;
 
diff --git a/llvm/test/CodeGen/ARM/fp-intrinsics-vector.ll b/llvm/test/CodeGen/ARM/fp-intrinsics-vector.ll
index c767579cf4194..dc52ad51c0241 100644
--- a/llvm/test/CodeGen/ARM/fp-intrinsics-vector.ll
+++ b/llvm/test/CodeGen/ARM/fp-intrinsics-vector.ll
@@ -1,12 +1,12 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
-; RUN: llc -mtriple=arm-none-eabi -mcpu=cortex-a55 %s -disable-strictnode-mutation -o - | FileCheck %s --check-prefixes=CHECK
+; RUN: llc -mtriple=armv7a-- -mattr=+neon,+vfp4 %s -o - | FileCheck %s
 
 define <4 x float> @add_v4f32(<4 x float> %x, <4 x float> %y) #0 {
 ; CHECK-LABEL: add_v4f32:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    mov r12, sp
-; CHECK-NEXT:    vmov d1, r2, r3
 ; CHECK-NEXT:    vld1.64 {d2, d3}, [r12]
+; CHECK-NEXT:    vmov d1, r2, r3
 ; CHECK-NEXT:    vmov d0, r0, r1
 ; CHECK-NEXT:    vadd.f32 s11, s3, s7
 ; CHECK-NEXT:    vadd.f32 s10, s2, s6
@@ -23,8 +23,8 @@ define <4 x float> @sub_v4f32(<4 x float> %x, <4 x float> %y) #0 {
 ; CHECK-LABEL: sub_v4f32:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    mov r12, sp
-; CHECK-NEXT:    vmov d1, r2, r3
 ; CHECK-NEXT:    vld1.64 {d2, d3}, [r12]
+; CHECK-NEXT:    vmov d1, r2, r3
 ; CHECK-NEXT:    vmov d0, r0, r1
 ; CHECK-NEXT:    vsub.f32 s11, s3, s7
 ; CHECK-NEXT:    vsub.f32 s10, s2, s6
@@ -41,8 +41,8 @@ define <4 x float> @mul_v4f32(<4 x float> %x, <4 x float> %y) #0 {
 ; CHECK-LABEL: mul_v4f32:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    mov r12, sp
-; CHECK-NEXT:    vmov d1, r2, r3
 ; CHECK-NEXT:    vld1.64 {d2, d3}, [r12]
+; CHECK-NEXT:    vmov d1, r2, r3
 ; CHECK-NEXT:    vmov d0, r0, r1
 ; CHECK-NEXT:    vmul.f32 s11, s3, s7
 ; CHECK-NEXT:    vmul.f32 s10, s2, s6
@@ -59,15 +59,15 @@ define <4 x float> @div_v4f32(<4 x float> %x, <4 x float> %y) #0 {
 ; CHECK-LABEL: div_v4f32:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    mov r12, sp
-; CHECK-NEXT:    vmov d1, r2, r3
 ; CHECK-NEXT:    vld1.64 {d2, d3}, [r12]
+; CHECK-NEXT:    vmov d1, r2, r3
 ; CHECK-NEXT:    vmov d0, r0, r1
 ; CHECK-NEXT:    vdiv.f32 s11, s3, s7
 ; CHECK-NEXT:    vdiv.f32 s10, s2, s6
 ; CHECK-NEXT:    vdiv.f32 s9, s1, s5
 ; CHECK-NEXT:    vdiv.f32 s8, s0, s4
-; CHECK-NEXT:    vmov r2, r3, d5
 ; CHECK-NEXT:    vmov r0, r1, d4
+; CHECK-NEXT:    vmov r2, r3, d5
 ; CHECK-NEXT:    bx lr
   %val = call <4 x float> @llvm.experimental.constrained.fdiv.v4f32(<4 x float> %x, <4 x float> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret <4 x float> %val
@@ -76,18 +76,17 @@ define <4 x float> @div_v4f32(<4 x float> %x, <4 x float> %y) #0 {
 define <4 x float> @fma_v4f32(<4 x float> %x, <4 x float> %y, <4 x float> %z) #0 {
 ; CHECK-LABEL: fma_v4f32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r11, lr}
 ; CHECK-NEXT:    push {r11, lr}
-; CHECK-NEXT:    add r12, sp, #24
 ; CHECK-NEXT:    add lr, sp, #8
-; CHECK-NEXT:    vmov d3, r2, r3
-; CHECK-NEXT:    vld1.64 {d4, d5}, [lr]
+; CHECK-NEXT:    add r12, sp, #24
+; CHECK-NEXT:    vld1.64 {d2, d3}, [lr]
+; CHECK-NEXT:    vmov d5, r2, r3
+; CHECK-NEXT:    vmov d4, r0, r1
 ; CHECK-NEXT:    vld1.64 {d0, d1}, [r12]
-; CHECK-NEXT:    vmov d2, r0, r1
-; CHECK-NEXT:    vfma.f32 s3, s7, s11
-; CHECK-NEXT:    vfma.f32 s2, s6, s10
-; CHECK-NEXT:    vfma.f32 s1, s5, s9
-; CHECK-NEXT:    vfma.f32 s0, s4, s8
+; CHECK-NEXT:    vfma.f32 s3, s11, s7
+; CHECK-NEXT:    vfma.f32 s2, s10, s6
+; CHECK-NEXT:    vfma.f32 s1, s9, s5
+; CHECK-NEXT:    vfma.f32 s0, s8, s4
 ; CHECK-NEXT:    vmov r2, r3, d1
 ; CHECK-NEXT:    vmov r0, r1, d0
 ; CHECK-NEXT:    pop {r11, pc}
@@ -134,39 +133,37 @@ define <4 x i32> @fptoui_v4i32_v4f32(<4 x float> %x) #0 {
 define <4 x i64> @fptosi_v4i64_v4f32(<4 x float> %x) #0 {
 ; CHECK-LABEL: fptosi_v4i64_v4f32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-NEXT:    push {r4, r5, r6, r7, r8, lr}
 ; CHECK-NEXT:    vpush {d8, d9, d10, d11}
-; CHECK-NEXT:    vmov d8, r2, r3
+; CHECK-NEXT:    vldr d0, [sp, #56]
 ; CHECK-NEXT:    mov r4, r0
-; CHECK-NEXT:    vmov r0, s16
-; CHECK-NEXT:    bl __aeabi_f2lz
-; CHECK-NEXT:    mov r5, r0
-; CHECK-NEXT:    vmov r0, s17
-; CHECK-NEXT:    mov r6, r1
-; CHECK-NEXT:    bl __aeabi_f2lz
-; CHECK-NEXT:    vldr d8, [sp, #56]
-; CHECK-NEXT:    mov r7, r1
-; CHECK-NEXT:    vmov.32 d11[0], r0
-; CHECK-NEXT:    vmov.32 d10[0], r5
-; CHECK-NEXT:    vmov r1, s17
-; CHECK-NEXT:    mov r0, r1
-; CHECK-NEXT:    bl __aeabi_f2lz
-; CHECK-NEXT:    mov r5, r1
-; CHECK-NEXT:    vmov r1, s16
-; CHECK-NEXT:    vmov.32 d11[1], r7
-; CHECK-NEXT:    vmov.32 d9[0], r0
-; CHECK-NEXT:    mov r0, r1
-; CHECK-NEXT:    vmov.32 d10[1], r6
-; CHECK-NEXT:    bl __aeabi_f2lz
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    vmov r5, s1
+; CHECK-NEXT:    vmov d0, r2, r3
+; CHECK-NEXT:    vmov r6, s1
+; CHECK-NEXT:    vmov r7, s0
+; CHECK-NEXT:    bl __fixsfdi
 ; CHECK-NEXT:    vmov.32 d8[0], r0
-; CHECK-NEXT:    vst1.64 {d10, d11}, [r4:128]!
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r8, r1
+; CHECK-NEXT:    bl __fixsfdi
+; CHECK-NEXT:    vmov.32 d9[0], r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r5, r1
+; CHECK-NEXT:    bl __fixsfdi
+; CHECK-NEXT:    vmov.32 d11[0], r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r6, r1
+; CHECK-NEXT:    bl __fixsfdi
+; CHECK-NEXT:    vmov.32 d10[0], r0
+; CHECK-NEXT:    vmov.32 d11[1], r6
+; CHECK-NEXT:    vmov.32 d10[1], r1
 ; CHECK-NEXT:    vmov.32 d9[1], r5
-; CHECK-NEXT:    vmov.32 d8[1], r1
+; CHECK-NEXT:    vmov.32 d8[1], r8
+; CHECK-NEXT:    vst1.64 {d10, d11}, [r4:128]!
 ; CHECK-NEXT:    vst1.64 {d8, d9}, [r4:128]
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11}
-; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r8, pc}
   %val = call <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
   ret <4 x i64> %val
 }
@@ -174,39 +171,37 @@ define <4 x i64> @fptosi_v4i64_v4f32(<4 x float> %x) #0 {
 define <4 x i64> @fptoui_v4i64_v4f32(<4 x float> %x) #0 {
 ; CHECK-LABEL: fptoui_v4i64_v4f32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-NEXT:    push {r4, r5, r6, r7, r8, lr}
 ; CHECK-NEXT:    vpush {d8, d9, d10, d11}
-; CHECK-NEXT:    vmov d8, r2, r3
+; CHECK-NEXT:    vldr d0, [sp, #56]
 ; CHECK-NEXT:    mov r4, r0
-; CHECK-NEXT:    vmov r0, s16
-; CHECK-NEXT:    bl __aeabi_f2ulz
-; CHECK-NEXT:    mov r5, r0
-; CHECK-NEXT:    vmov r0, s17
-; CHECK-NEXT:    mov r6, r1
-; CHECK-NEXT:    bl __aeabi_f2ulz
-; CHECK-NEXT:    vldr d8, [sp, #56]
-; CHECK-NEXT:    mov r7, r1
-; CHECK-NEXT:    vmov.32 d11[0], r0
-; CHECK-NEXT:    vmov.32 d10[0], r5
-; CHECK-NEXT:    vmov r1, s17
-; CHECK-NEXT:    mov r0, r1
-; CHECK-NEXT:    bl __aeabi_f2ulz
-; CHECK-NEXT:    mov r5, r1
-; CHECK-NEXT:    vmov r1, s16
-; CHECK-NEXT:    vmov.32 d11[1], r7
-; CHECK-NEXT:    vmov.32 d9[0], r0
-; CHECK-NEXT:    mov r0, r1
-; CHECK-NEXT:    vmov.32 d10[1], r6
-; CHECK-NEXT:    bl __aeabi_f2ulz
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    vmov r5, s1
+; CHECK-NEXT:    vmov d0, r2, r3
+; CHECK-NEXT:    vmov r6, s1
+; CHECK-NEXT:    vmov r7, s0
+; CHECK-NEXT:    bl __fixunssfdi
 ; CHECK-NEXT:    vmov.32 d8[0], r0
-; CHECK-NEXT:    vst1.64 {d10, d11}, [r4:128]!
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r8, r1
+; CHECK-NEXT:    bl __fixunssfdi
+; CHECK-NEXT:    vmov.32 d9[0], r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r5, r1
+; CHECK-NEXT:    bl __fixunssfdi
+; CHECK-NEXT:    vmov.32 d11[0], r0
+; CHECK-NEXT:    mov r0, r7
+; CHECK-NEXT:    mov r6, r1
+; CHECK-NEXT:    bl __fixunssfdi
+; CHECK-NEXT:    vmov.32 d10[0], r0
+; CHECK-NEXT:    vmov.32 d11[1], r6
+; CHECK-NEXT:    vmov.32 d10[1], r1
 ; CHECK-NEXT:    vmov.32 d9[1], r5
-; CHECK-NEXT:    vmov.32 d8[1], r1
+; CHECK-NEXT:    vmov.32 d8[1], r8
+; CHECK-NEXT:    vst1.64 {d10, d11}, [r4:128]!
 ; CHECK-NEXT:    vst1.64 {d8, d9}, [r4:128]
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11}
-; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r8, pc}
   %val = call <4 x i64> @llvm.experimental.constrained.fptoui.v4i64.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
   ret <4 x i64> %val
 }
@@ -214,37 +209,36 @@ define <4 x i64> @fptoui_v4i64_v4f32(<4 x float> %x) #0 {
 define <4 x float> @sitofp_v4f32_v4i32(<4 x i32> %x) #0 {
 ; CHECK-LABEL: sitofp_v4f32_v4i32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .pad #32
 ; CHECK-NEXT:    sub sp, sp, #32
 ; CHECK-NEXT:    movw r12, #0
-; CHECK-NEXT:    eor r3, r3, #-2147483648
-; CHECK-NEXT:    eor r2, r2, #-2147483648
 ; CHECK-NEXT:    eor r1, r1, #-2147483648
-; CHECK-NEXT:    eor r0, r0, #-2147483648
-; CHECK-NEXT:    vldr d16, .LCPI9_0
 ; CHECK-NEXT:    movt r12, #17200
-; CHECK-NEXT:    str r3, [sp, #24]
-; CHECK-NEXT:    str r12, [sp, #28]
-; CHECK-NEXT:    str r12, [sp, #20]
-; CHECK-NEXT:    str r2, [sp, #16]
-; CHECK-NEXT:    str r12, [sp, #12]
 ; CHECK-NEXT:    str r1, [sp, #8]
+; CHECK-NEXT:    str r12, [sp, #12]
+; CHECK-NEXT:    eor r1, r2, #-2147483648
+; CHECK-NEXT:    str r12, [sp, #20]
+; CHECK-NEXT:    eor r0, r0, #-2147483648
+; CHECK-NEXT:    vldr d17, [sp, #8]
+; CHECK-NEXT:    str r1, [sp, #16]
+; CHECK-NEXT:    eor r1, r3, #-2147483648
+; CHECK-NEXT:    str r12, [sp, #28]
+; CHECK-NEXT:    vldr d18, [sp, #16]
+; CHECK-NEXT:    str r1, [sp, #24]
 ; CHECK-NEXT:    str r12, [sp, #4]
+; CHECK-NEXT:    vldr d16, .LCPI9_0
+; CHECK-NEXT:    vldr d19, [sp, #24]
 ; CHECK-NEXT:    str r0, [sp]
-; CHECK-NEXT:    vldr d17, [sp, #24]
-; CHECK-NEXT:    vldr d18, [sp, #16]
-; CHECK-NEXT:    vldr d19, [sp, #8]
-; CHECK-NEXT:    vldr d20, [sp]
-; CHECK-NEXT:    vsub.f64 d17, d17, d16
 ; CHECK-NEXT:    vsub.f64 d18, d18, d16
 ; CHECK-NEXT:    vsub.f64 d19, d19, d16
+; CHECK-NEXT:    vldr d20, [sp]
+; CHECK-NEXT:    vsub.f64 d17, d17, d16
 ; CHECK-NEXT:    vsub.f64 d16, d20, d16
-; CHECK-NEXT:    vcvt.f32.f64 s3, d17
+; CHECK-NEXT:    vcvt.f32.f64 s3, d19
 ; CHECK-NEXT:    vcvt.f32.f64 s2, d18
-; CHECK-NEXT:    vcvt.f32.f64 s1, d19
+; CHECK-NEXT:    vcvt.f32.f64 s1, d17
 ; CHECK-NEXT:    vcvt.f32.f64 s0, d16
-; CHECK-NEXT:    vmov r2, r3, d1
 ; CHECK-NEXT:    vmov r0, r1, d0
+; CHECK-NEXT:    vmov r2, r3, d1
 ; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    bx lr
 ; CHECK-NEXT:    .p2align 3
@@ -259,32 +253,32 @@ define <4 x float> @sitofp_v4f32_v4i32(<4 x i32> %x) #0 {
 define <4 x float> @uitofp_v4f32_v4i32(<4 x i32> %x) #0 {
 ; CHECK-LABEL: uitofp_v4f32_v4i32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .pad #32
 ; CHECK-NEXT:    sub sp, sp, #32
 ; CHECK-NEXT:    movw r12, #0
-; CHECK-NEXT:    str r3, [sp, #24]
-; CHECK-NEXT:    vldr d16, .LCPI10_0
+; CHECK-NEXT:    str r1, [sp, #8]
 ; CHECK-NEXT:    movt r12, #17200
-; CHECK-NEXT:    str r12, [sp, #28]
+; CHECK-NEXT:    vldr d16, .LCPI10_0
+; CHECK-NEXT:    str r12, [sp, #12]
 ; CHECK-NEXT:    str r12, [sp, #20]
+; CHECK-NEXT:    vldr d17, [sp, #8]
 ; CHECK-NEXT:    str r2, [sp, #16]
-; CHECK-NEXT:    str r12, [sp, #12]
-; CHECK-NEXT:    str r1, [sp, #8]
-; CHECK-NEXT:    stm sp, {r0, r12}
-; CHECK-NEXT:    vldr d17, [sp, #24]
-; CHECK-NEXT:    vldr d18, [sp, #16]
-; CHECK-NEXT:    vldr d19, [sp, #8]
-; CHECK-NEXT:    vldr d20, [sp]
+; CHECK-NEXT:    str r12, [sp, #28]
 ; CHECK-NEXT:    vsub.f64 d17, d17, d16
+; CHECK-NEXT:    vldr d18, [sp, #16]
+; CHECK-NEXT:    str r3, [sp, #24]
+; CHECK-NEXT:    str r12, [sp, #4]
 ; CHECK-NEXT:    vsub.f64 d18, d18, d16
+; CHECK-NEXT:    vldr d19, [sp, #24]
+; CHECK-NEXT:    str r0, [sp]
 ; CHECK-NEXT:    vsub.f64 d19, d19, d16
+; CHECK-NEXT:    vldr d20, [sp]
+; CHECK-NEXT:    vcvt.f32.f64 s3, d19
 ; CHECK-NEXT:    vsub.f64 d16, d20, d16
-; CHECK-NEXT:    vcvt.f32.f64 s3, d17
 ; CHECK-NEXT:    vcvt.f32.f64 s2, d18
-; CHECK-NEXT:    vcvt.f32.f64 s1, d19
+; CHECK-NEXT:    vcvt.f32.f64 s1, d17
 ; CHECK-NEXT:    vcvt.f32.f64 s0, d16
-; CHECK-NEXT:    vmov r2, r3, d1
 ; CHECK-NEXT:    vmov r0, r1, d0
+; CHECK-NEXT:    vmov r2, r3, d1
 ; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    bx lr
 ; CHECK-NEXT:    .p2align 3
@@ -299,31 +293,29 @@ define <4 x float> @uitofp_v4f32_v4i32(<4 x i32> %x) #0 {
 define <4 x float> @sitofp_v4f32_v4i64(<4 x i64> %x) #0 {
 ; CHECK-LABEL: sitofp_v4f32_v4i64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r4, r5, r6, lr}
-; CHECK-NEXT:    push {r4, r5, r6, lr}
-; CHECK-NEXT:    .vsave {d8, d9}
-; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    push {r4, r5, r6, r7, r8, lr}
 ; CHECK-NEXT:    mov r5, r3
 ; CHECK-NEXT:    mov r6, r2
-; CHECK-NEXT:    bl __aeabi_l2f
-; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    bl __floatdisf
+; CHECK-NEXT:    mov r8, r0
 ; CHECK-NEXT:    mov r0, r6
 ; CHECK-NEXT:    mov r1, r5
-; CHECK-NEXT:    bl __aeabi_l2f
+; CHECK-NEXT:    bl __floatdisf
 ; CHECK-NEXT:    mov r5, r0
-; CHECK-NEXT:    add r0, sp, #32
-; CHECK-NEXT:    vld1.64 {d8, d9}, [r0]
-; CHECK-NEXT:    vmov r0, r1, d9
-; CHECK-NEXT:    bl __aeabi_l2f
-; CHECK-NEXT:    mov r6, r0
-; CHECK-NEXT:    vmov r0, r1, d8
-; CHECK-NEXT:    bl __aeabi_l2f
+; CHECK-NEXT:    add r0, sp, #24
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT:    vmov r0, r1, d17
+; CHECK-NEXT:    vmov r6, r7, d16
+; CHECK-NEXT:    bl __floatdisf
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    bl __floatdisf
 ; CHECK-NEXT:    mov r2, r0
-; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    mov r0, r8
 ; CHECK-NEXT:    mov r1, r5
-; CHECK-NEXT:    mov r3, r6
-; CHECK-NEXT:    vpop {d8, d9}
-; CHECK-NEXT:    pop {r4, r5, r6, pc}
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r8, pc}
   %val = call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i64(<4 x i64> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret <4 x float> %val
 }
@@ -331,31 +323,29 @@ define <4 x float> @sitofp_v4f32_v4i64(<4 x i64> %x) #0 {
 define <4 x float> @uitofp_v4f32_v4i64(<4 x i64> %x) #0 {
 ; CHECK-LABEL: uitofp_v4f32_v4i64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r4, r5, r6, lr}
-; CHECK-NEXT:    push {r4, r5, r6, lr}
-; CHECK-NEXT:    .vsave {d8, d9}
-; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    push {r4, r5, r6, r7, r8, lr}
 ; CHECK-NEXT:    mov r5, r3
 ; CHECK-NEXT:    mov r6, r2
-; CHECK-NEXT:    bl __aeabi_ul2f
-; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    bl __floatundisf
+; CHECK-NEXT:    mov r8, r0
 ; CHECK-NEXT:    mov r0, r6
 ; CHECK-NEXT:    mov r1, r5
-; CHECK-NEXT:    bl __aeabi_ul2f
+; CHECK-NEXT:    bl __floatundisf
 ; CHECK-NEXT:    mov r5, r0
-; CHECK-NEXT:    add r0, sp, #32
-; CHECK-NEXT:    vld1.64 {d8, d9}, [r0]
-; CHECK-NEXT:    vmov r0, r1, d9
-; CHECK-NEXT:    bl __aeabi_ul2f
-; CHECK-NEXT:    mov r6, r0
-; CHECK-NEXT:    vmov r0, r1, d8
-; CHECK-NEXT:    bl __aeabi_ul2f
+; CHECK-NEXT:    add r0, sp, #24
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT:    vmov r0, r1, d17
+; CHECK-NEXT:    vmov r6, r7, d16
+; CHECK-NEXT:    bl __floatundisf
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    bl __floatundisf
 ; CHECK-NEXT:    mov r2, r0
-; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    mov r0, r8
 ; CHECK-NEXT:    mov r1, r5
-; CHECK-NEXT:    mov r3, r6
-; CHECK-NEXT:    vpop {d8, d9}
-; CHECK-NEXT:    pop {r4, r5, r6, pc}
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r8, pc}
   %val = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i64(<4 x i64> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret <4 x float> %val
 }
@@ -369,8 +359,8 @@ define <4 x float> @sqrt_v4f32(<4 x float> %x) #0 {
 ; CHECK-NEXT:    vsqrt.f32 s6, s2
 ; CHECK-NEXT:    vsqrt.f32 s5, s1
 ; CHECK-NEXT:    vsqrt.f32 s4, s0
-; CHECK-NEXT:    vmov r2, r3, d3
 ; CHECK-NEXT:    vmov r0, r1, d2
+; CHECK-NEXT:    vmov r2, r3, d3
 ; CHECK-NEXT:    bx lr
   %val = call <4 x float> @llvm.experimental.constrained.sqrt.v4f32(<4 x float> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret <4 x float> %val
@@ -379,15 +369,25 @@ define <4 x float> @sqrt_v4f32(<4 x float> %x) #0 {
 define <4 x float> @rint_v4f32(<4 x float> %x) #0 {
 ; CHECK-LABEL: rint_v4f32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d1, r2, r3
-; CHECK-NEXT:    vmov d0, r0, r1
-; CHECK-NEXT:    vrintx.f32 s7, s3
-; CHECK-NEXT:    vrintx.f32 s6, s2
-; CHECK-NEXT:    vrintx.f32 s5, s1
-; CHECK-NEXT:    vrintx.f32 s4, s0
-; CHECK-NEXT:    vmov r2, r3, d3
-; CHECK-NEXT:    vmov r0, r1, d2
-; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    mov r4, r3
+; CHECK-NEXT:    mov r5, r2
+; CHECK-NEXT:    bl rintf
+; CHECK-NEXT:    mov r7, r0
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    bl rintf
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    bl rintf
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    bl rintf
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    mov r2, r5
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
   %val = call <4 x float> @llvm.experimental.constrained.rint.v4f32(<4 x float> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret <4 x float> %val
 }
@@ -395,15 +395,25 @@ define <4 x float> @rint_v4f32(<4 x float> %x) #0 {
 define <4 x float> @nearbyint_v4f32(<4 x float> %x) #0 {
 ; CHECK-LABEL: nearbyint_v4f32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d1, r2, r3
-; CHECK-NEXT:    vmov d0, r0, r1
-; CHECK-NEXT:    vrintr.f32 s7, s3
-; CHECK-NEXT:    vrintr.f32 s6, s2
-; CHECK-NEXT:    vrintr.f32 s5, s1
-; CHECK-NEXT:    vrintr.f32 s4, s0
-; CHECK-NEXT:    vmov r2, r3, d3
-; CHECK-NEXT:    vmov r0, r1, d2
-; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    mov r4, r3
+; CHECK-NEXT:    mov r5, r2
+; CHECK-NEXT:    bl nearbyintf
+; CHECK-NEXT:    mov r7, r0
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    bl nearbyintf
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    bl nearbyintf
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    bl nearbyintf
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    mov r2, r5
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
   %val = call <4 x float> @llvm.experimental.constrained.nearbyint.v4f32(<4 x float> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret <4 x float> %val
 }
@@ -411,17 +421,33 @@ define <4 x float> @nearbyint_v4f32(<4 x float> %x) #0 {
 define <4 x float> @maxnum_v4f32(<4 x float> %x, <4 x float> %y) #0 {
 ; CHECK-LABEL: maxnum_v4f32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    mov r12, sp
-; CHECK-NEXT:    vmov d1, r2, r3
-; CHECK-NEXT:    vld1.64 {d2, d3}, [r12]
-; CHECK-NEXT:    vmov d0, r0, r1
-; CHECK-NEXT:    vmaxnm.f32 s11, s3, s7
-; CHECK-NEXT:    vmaxnm.f32 s10, s2, s6
-; CHECK-NEXT:    vmaxnm.f32 s9, s1, s5
-; CHECK-NEXT:    vmaxnm.f32 s8, s0, s4
-; CHECK-NEXT:    vmov r2, r3, d5
-; CHECK-NEXT:    vmov r0, r1, d4
-; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    push {r4, r5, r6, r7, r8, lr}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    add r0, sp, #40
+; CHECK-NEXT:    vld1.64 {d8, d9}, [r0]
+; CHECK-NEXT:    mov r5, r1
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    mov r4, r3
+; CHECK-NEXT:    vmov r1, r7, d9
+; CHECK-NEXT:    bl fmaxf
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    bl fmaxf
+; CHECK-NEXT:    vmov r7, r1, d8
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    bl fmaxf
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    bl fmaxf
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r8
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r8, pc}
   %val = call <4 x float> @llvm.experimental.constrained.maxnum.v4f32(<4 x float> %x, <4 x float> %y, metadata !"fpexcept.strict") #0
   ret <4 x float> %val
 }
@@ -429,17 +455,33 @@ define <4 x float> @maxnum_v4f32(<4 x float> %x, <4 x float> %y) #0 {
 define <4 x float> @minnum_v4f32(<4 x float> %x, <4 x float> %y) #0 {
 ; CHECK-LABEL: minnum_v4f32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    mov r12, sp
-; CHECK-NEXT:    vmov d1, r2, r3
-; CHECK-NEXT:    vld1.64 {d2, d3}, [r12]
-; CHECK-NEXT:    vmov d0, r0, r1
-; CHECK-NEXT:    vminnm.f32 s11, s3, s7
-; CHECK-NEXT:    vminnm.f32 s10, s2, s6
-; CHECK-NEXT:    vminnm.f32 s9, s1, s5
-; CHECK-NEXT:    vminnm.f32 s8, s0, s4
-; CHECK-NEXT:    vmov r2, r3, d5
-; CHECK-NEXT:    vmov r0, r1, d4
-; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    push {r4, r5, r6, r7, r8, lr}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    add r0, sp, #40
+; CHECK-NEXT:    vld1.64 {d8, d9}, [r0]
+; CHECK-NEXT:    mov r5, r1
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    mov r4, r3
+; CHECK-NEXT:    vmov r1, r7, d9
+; CHECK-NEXT:    bl fminf
+; CHECK-NEXT:    mov r8, r0
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    bl fminf
+; CHECK-NEXT:    vmov r7, r1, d8
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    bl fminf
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    bl fminf
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    mov r2, r8
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r8, pc}
   %val = call <4 x float> @llvm.experimental.constrained.minnum.v4f32(<4 x float> %x, <4 x float> %y, metadata !"fpexcept.strict") #0
   ret <4 x float> %val
 }
@@ -447,15 +489,25 @@ define <4 x float> @minnum_v4f32(<4 x float> %x, <4 x float> %y) #0 {
 define <4 x float> @ceil_v4f32(<4 x float> %x) #0 {
 ; CHECK-LABEL: ceil_v4f32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d1, r2, r3
-; CHECK-NEXT:    vmov d0, r0, r1
-; CHECK-NEXT:    vrintp.f32 s7, s3
-; CHECK-NEXT:    vrintp.f32 s6, s2
-; CHECK-NEXT:    vrintp.f32 s5, s1
-; CHECK-NEXT:    vrintp.f32 s4, s0
-; CHECK-NEXT:    vmov r2, r3, d3
-; CHECK-NEXT:    vmov r0, r1, d2
-; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    mov r4, r3
+; CHECK-NEXT:    mov r5, r2
+; CHECK-NEXT:    bl ceilf
+; CHECK-NEXT:    mov r7, r0
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    bl ceilf
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    bl ceilf
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    bl ceilf
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    mov r2, r5
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
   %val = call <4 x float> @llvm.experimental.constrained.ceil.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
   ret <4 x float> %val
 }
@@ -463,15 +515,25 @@ define <4 x float> @ceil_v4f32(<4 x float> %x) #0 {
 define <4 x float> @floor_v4f32(<4 x float> %x) #0 {
 ; CHECK-LABEL: floor_v4f32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d1, r2, r3
-; CHECK-NEXT:    vmov d0, r0, r1
-; CHECK-NEXT:    vrintm.f32 s7, s3
-; CHECK-NEXT:    vrintm.f32 s6, s2
-; CHECK-NEXT:    vrintm.f32 s5, s1
-; CHECK-NEXT:    vrintm.f32 s4, s0
-; CHECK-NEXT:    vmov r2, r3, d3
-; CHECK-NEXT:    vmov r0, r1, d2
-; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    mov r4, r3
+; CHECK-NEXT:    mov r5, r2
+; CHECK-NEXT:    bl floorf
+; CHECK-NEXT:    mov r7, r0
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    bl floorf
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    bl floorf
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    bl floorf
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    mov r2, r5
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
   %val = call <4 x float> @llvm.experimental.constrained.floor.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
   ret <4 x float> %val
 }
@@ -479,15 +541,25 @@ define <4 x float> @floor_v4f32(<4 x float> %x) #0 {
 define <4 x float> @round_v4f32(<4 x float> %x) #0 {
 ; CHECK-LABEL: round_v4f32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d1, r2, r3
-; CHECK-NEXT:    vmov d0, r0, r1
-; CHECK-NEXT:    vrinta.f32 s7, s3
-; CHECK-NEXT:    vrinta.f32 s6, s2
-; CHECK-NEXT:    vrinta.f32 s5, s1
-; CHECK-NEXT:    vrinta.f32 s4, s0
-; CHECK-NEXT:    vmov r2, r3, d3
-; CHECK-NEXT:    vmov r0, r1, d2
-; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    mov r4, r3
+; CHECK-NEXT:    mov r5, r2
+; CHECK-NEXT:    bl roundf
+; CHECK-NEXT:    mov r7, r0
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    bl roundf
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    bl roundf
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    bl roundf
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    mov r2, r5
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
   %val = call <4 x float> @llvm.experimental.constrained.round.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
   ret <4 x float> %val
 }
@@ -495,15 +567,25 @@ define <4 x float> @round_v4f32(<4 x float> %x) #0 {
 define <4 x float> @roundeven_v4f32(<4 x float> %x) #0 {
 ; CHECK-LABEL: roundeven_v4f32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d1, r2, r3
-; CHECK-NEXT:    vmov d0, r0, r1
-; CHECK-NEXT:    vrintn.f32 s7, s3
-; CHECK-NEXT:    vrintn.f32 s6, s2
-; CHECK-NEXT:    vrintn.f32 s5, s1
-; CHECK-NEXT:    vrintn.f32 s4, s0
-; CHECK-NEXT:    vmov r2, r3, d3
-; CHECK-NEXT:    vmov r0, r1, d2
-; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    mov r4, r3
+; CHECK-NEXT:    mov r5, r2
+; CHECK-NEXT:    bl roundevenf
+; CHECK-NEXT:    mov r7, r0
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    bl roundevenf
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    bl roundevenf
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    bl roundevenf
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    mov r2, r5
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
   %val = call <4 x float> @llvm.experimental.constrained.roundeven.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
   ret <4 x float> %val
 }
@@ -511,15 +593,25 @@ define <4 x float> @roundeven_v4f32(<4 x float> %x) #0 {
 define <4 x float> @trunc_v4f32(<4 x float> %x) #0 {
 ; CHECK-LABEL: trunc_v4f32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d1, r2, r3
-; CHECK-NEXT:    vmov d0, r0, r1
-; CHECK-NEXT:    vrintz.f32 s7, s3
-; CHECK-NEXT:    vrintz.f32 s6, s2
-; CHECK-NEXT:    vrintz.f32 s5, s1
-; CHECK-NEXT:    vrintz.f32 s4, s0
-; CHECK-NEXT:    vmov r2, r3, d3
-; CHECK-NEXT:    vmov r0, r1, d2
-; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    mov r4, r3
+; CHECK-NEXT:    mov r5, r2
+; CHECK-NEXT:    bl truncf
+; CHECK-NEXT:    mov r7, r0
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    bl truncf
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    bl truncf
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r6
+; CHECK-NEXT:    bl truncf
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    mov r2, r5
+; CHECK-NEXT:    mov r3, r4
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
   %val = call <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
   ret <4 x float> %val
 }
@@ -528,36 +620,36 @@ define <4 x i1> @fcmp_v4f32(<4 x float> %x, <4 x float> %y) #0 {
 ; CHECK-LABEL: fcmp_v4f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vld1.64 {d2, d3}, [r12]
 ; CHECK-NEXT:    vmov d1, r2, r3
 ; CHECK-NEXT:    mov r2, #0
-; CHECK-NEXT:    mov r3, #0
-; CHECK-NEXT:    vld1.64 {d2, d3}, [r12]
 ; CHECK-NEXT:    vmov d0, r0, r1
-; CHECK-NEXT:    mov r0, #0
 ; CHECK-NEXT:    mov r1, #0
-; CHECK-NEXT:    vcmp.f32 s1, s5
-; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
 ; CHECK-NEXT:    vcmp.f32 s3, s7
-; CHECK-NEXT:    movweq r0, #1
-; CHECK-NEXT:    cmp r0, #0
-; CHECK-NEXT:    mvnne r0, #0
+; CHECK-NEXT:    mov r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f32 s2, s6
+; CHECK-NEXT:    mov r0, #0
+; CHECK-NEXT:    movweq r1, #1
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    mvnne r1, #0
 ; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
 ; CHECK-NEXT:    vcmp.f32 s0, s4
 ; CHECK-NEXT:    movweq r2, #1
 ; CHECK-NEXT:    cmp r2, #0
 ; CHECK-NEXT:    mvnne r2, #0
 ; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
-; CHECK-NEXT:    vcmp.f32 s2, s6
+; CHECK-NEXT:    vcmp.f32 s1, s5
+; CHECK-NEXT:    vmov.32 d17[0], r2
 ; CHECK-NEXT:    movweq r3, #1
 ; CHECK-NEXT:    cmp r3, #0
 ; CHECK-NEXT:    mvnne r3, #0
 ; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
-; CHECK-NEXT:    movweq r1, #1
-; CHECK-NEXT:    cmp r1, #0
-; CHECK-NEXT:    mvnne r1, #0
-; CHECK-NEXT:    vmov.32 d17[0], r1
 ; CHECK-NEXT:    vmov.32 d16[0], r3
-; CHECK-NEXT:    vmov.32 d17[1], r2
+; CHECK-NEXT:    vmov.32 d17[1], r1
+; CHECK-NEXT:    movweq r0, #1
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mvnne r0, #0
 ; CHECK-NEXT:    vmov.32 d16[1], r0
 ; CHECK-NEXT:    vmovn.i32 d16, q8
 ; CHECK-NEXT:    vmov r0, r1, d16
@@ -571,36 +663,36 @@ define <4 x i1> @fcmps_v4f32(<4 x float> %x, <4 x float> %y) #0 {
 ; CHECK-LABEL: fcmps_v4f32:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vld1.64 {d2, d3}, [r12]
 ; CHECK-NEXT:    vmov d1, r2, r3
 ; CHECK-NEXT:    mov r2, #0
-; CHECK-NEXT:    mov r3, #0
-; CHECK-NEXT:    vld1.64 {d2, d3}, [r12]
 ; CHECK-NEXT:    vmov d0, r0, r1
-; CHECK-NEXT:    mov r0, #0
 ; CHECK-NEXT:    mov r1, #0
-; CHECK-NEXT:    vcmpe.f32 s1, s5
-; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
 ; CHECK-NEXT:    vcmpe.f32 s3, s7
-; CHECK-NEXT:    movweq r0, #1
-; CHECK-NEXT:    cmp r0, #0
-; CHECK-NEXT:    mvnne r0, #0
+; CHECK-NEXT:    mov r3, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmpe.f32 s2, s6
+; CHECK-NEXT:    mov r0, #0
+; CHECK-NEXT:    movweq r1, #1
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    mvnne r1, #0
 ; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
 ; CHECK-NEXT:    vcmpe.f32 s0, s4
 ; CHECK-NEXT:    movweq r2, #1
 ; CHECK-NEXT:    cmp r2, #0
 ; CHECK-NEXT:    mvnne r2, #0
 ; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
-; CHECK-NEXT:    vcmpe.f32 s2, s6
+; CHECK-NEXT:    vcmpe.f32 s1, s5
+; CHECK-NEXT:    vmov.32 d17[0], r2
 ; CHECK-NEXT:    movweq r3, #1
 ; CHECK-NEXT:    cmp r3, #0
 ; CHECK-NEXT:    mvnne r3, #0
 ; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
-; CHECK-NEXT:    movweq r1, #1
-; CHECK-NEXT:    cmp r1, #0
-; CHECK-NEXT:    mvnne r1, #0
-; CHECK-NEXT:    vmov.32 d17[0], r1
 ; CHECK-NEXT:    vmov.32 d16[0], r3
-; CHECK-NEXT:    vmov.32 d17[1], r2
+; CHECK-NEXT:    vmov.32 d17[1], r1
+; CHECK-NEXT:    movweq r0, #1
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mvnne r0, #0
 ; CHECK-NEXT:    vmov.32 d16[1], r0
 ; CHECK-NEXT:    vmovn.i32 d16, q8
 ; CHECK-NEXT:    vmov r0, r1, d16
@@ -616,9 +708,9 @@ define <2 x double> @add_v2f64(<2 x double> %x, <2 x double> %y) #0 {
 ; CHECK-LABEL: add_v2f64:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
 ; CHECK-NEXT:    vmov d18, r0, r1
 ; CHECK-NEXT:    vmov d19, r2, r3
-; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
 ; CHECK-NEXT:    vadd.f64 d18, d18, d16
 ; CHECK-NEXT:    vadd.f64 d16, d19, d17
 ; CHECK-NEXT:    vmov r0, r1, d18
@@ -632,9 +724,9 @@ define <2 x double> @sub_v2f64(<2 x double> %x, <2 x double> %y) #0 {
 ; CHECK-LABEL: sub_v2f64:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
 ; CHECK-NEXT:    vmov d18, r0, r1
 ; CHECK-NEXT:    vmov d19, r2, r3
-; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
 ; CHECK-NEXT:    vsub.f64 d18, d18, d16
 ; CHECK-NEXT:    vsub.f64 d16, d19, d17
 ; CHECK-NEXT:    vmov r0, r1, d18
@@ -648,9 +740,9 @@ define <2 x double> @mul_v2f64(<2 x double> %x, <2 x double> %y) #0 {
 ; CHECK-LABEL: mul_v2f64:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
 ; CHECK-NEXT:    vmov d18, r0, r1
 ; CHECK-NEXT:    vmov d19, r2, r3
-; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
 ; CHECK-NEXT:    vmul.f64 d18, d18, d16
 ; CHECK-NEXT:    vmul.f64 d16, d19, d17
 ; CHECK-NEXT:    vmov r0, r1, d18
@@ -664,9 +756,9 @@ define <2 x double> @div_v2f64(<2 x double> %x, <2 x double> %y) #0 {
 ; CHECK-LABEL: div_v2f64:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
 ; CHECK-NEXT:    vmov d18, r0, r1
 ; CHECK-NEXT:    vmov d19, r2, r3
-; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
 ; CHECK-NEXT:    vdiv.f64 d18, d18, d16
 ; CHECK-NEXT:    vdiv.f64 d16, d19, d17
 ; CHECK-NEXT:    vmov r0, r1, d18
@@ -679,19 +771,17 @@ define <2 x double> @div_v2f64(<2 x double> %x, <2 x double> %y) #0 {
 define <2 x double> @fma_v2f64(<2 x double> %x, <2 x double> %y, <2 x double> %z) #0 {
 ; CHECK-LABEL: fma_v2f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r11, lr}
-; CHECK-NEXT:    push {r11, lr}
-; CHECK-NEXT:    add r12, sp, #24
-; CHECK-NEXT:    add lr, sp, #8
+; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT:    add r12, sp, #16
 ; CHECK-NEXT:    vmov d20, r0, r1
-; CHECK-NEXT:    vmov d21, r2, r3
-; CHECK-NEXT:    vld1.64 {d16, d17}, [lr]
 ; CHECK-NEXT:    vld1.64 {d18, d19}, [r12]
+; CHECK-NEXT:    vmov d21, r2, r3
 ; CHECK-NEXT:    vfma.f64 d18, d20, d16
 ; CHECK-NEXT:    vfma.f64 d19, d21, d17
 ; CHECK-NEXT:    vmov r0, r1, d18
 ; CHECK-NEXT:    vmov r2, r3, d19
-; CHECK-NEXT:    pop {r11, pc}
+; CHECK-NEXT:    bx lr
   %val = call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %x, <2 x double> %y, <2 x double> %z, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret <2 x double> %val
 }
@@ -727,18 +817,17 @@ define <2 x i32> @fptoui_v2i32_v2f64(<2 x double> %x) #0 {
 define <2 x i64> @fptosi_v2i64_v2f64(<2 x double> %x) #0 {
 ; CHECK-LABEL: fptosi_v2i64_v2f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
 ; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
 ; CHECK-NEXT:    mov r4, r1
 ; CHECK-NEXT:    mov r5, r0
 ; CHECK-NEXT:    mov r0, r2
 ; CHECK-NEXT:    mov r1, r3
-; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    bl __fixdfdi
 ; CHECK-NEXT:    mov r6, r0
 ; CHECK-NEXT:    mov r7, r1
 ; CHECK-NEXT:    mov r0, r5
 ; CHECK-NEXT:    mov r1, r4
-; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    bl __fixdfdi
 ; CHECK-NEXT:    mov r2, r6
 ; CHECK-NEXT:    mov r3, r7
 ; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
@@ -749,18 +838,17 @@ define <2 x i64> @fptosi_v2i64_v2f64(<2 x double> %x) #0 {
 define <2 x i64> @fptoui_v2i64_v2f64(<2 x double> %x) #0 {
 ; CHECK-LABEL: fptoui_v2i64_v2f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
 ; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
 ; CHECK-NEXT:    mov r4, r1
 ; CHECK-NEXT:    mov r5, r0
 ; CHECK-NEXT:    mov r0, r2
 ; CHECK-NEXT:    mov r1, r3
-; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    bl __fixunsdfdi
 ; CHECK-NEXT:    mov r6, r0
 ; CHECK-NEXT:    mov r7, r1
 ; CHECK-NEXT:    mov r0, r5
 ; CHECK-NEXT:    mov r1, r4
-; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    bl __fixunsdfdi
 ; CHECK-NEXT:    mov r2, r6
 ; CHECK-NEXT:    mov r3, r7
 ; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
@@ -771,21 +859,20 @@ define <2 x i64> @fptoui_v2i64_v2f64(<2 x double> %x) #0 {
 define <2 x double> @sitofp_v2f64_v2i32(<2 x i32> %x) #0 {
 ; CHECK-LABEL: sitofp_v2f64_v2i32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .pad #16
 ; CHECK-NEXT:    sub sp, sp, #16
 ; CHECK-NEXT:    vmov d16, r0, r1
 ; CHECK-NEXT:    movw r2, #0
-; CHECK-NEXT:    vmov.32 r0, d16[0]
 ; CHECK-NEXT:    movt r2, #17200
-; CHECK-NEXT:    eor r0, r0, #-2147483648
+; CHECK-NEXT:    vmov.32 r0, d16[0]
 ; CHECK-NEXT:    str r2, [sp, #12]
-; CHECK-NEXT:    str r0, [sp, #8]
-; CHECK-NEXT:    vmov.32 r0, d16[1]
-; CHECK-NEXT:    str r2, [sp, #4]
+; CHECK-NEXT:    vmov.32 r1, d16[1]
 ; CHECK-NEXT:    vldr d16, .LCPI34_0
 ; CHECK-NEXT:    eor r0, r0, #-2147483648
-; CHECK-NEXT:    str r0, [sp]
+; CHECK-NEXT:    str r0, [sp, #8]
+; CHECK-NEXT:    str r2, [sp, #4]
+; CHECK-NEXT:    eor r0, r1, #-2147483648
 ; CHECK-NEXT:    vldr d17, [sp, #8]
+; CHECK-NEXT:    str r0, [sp]
 ; CHECK-NEXT:    vldr d18, [sp]
 ; CHECK-NEXT:    vsub.f64 d17, d17, d16
 ; CHECK-NEXT:    vsub.f64 d16, d18, d16
@@ -805,23 +892,22 @@ define <2 x double> @sitofp_v2f64_v2i32(<2 x i32> %x) #0 {
 define <2 x double> @uitofp_v2f64_v2i32(<2 x i32> %x) #0 {
 ; CHECK-LABEL: uitofp_v2f64_v2i32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .pad #16
 ; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    add r2, sp, #8
+; CHECK-NEXT:    movw r2, #0
+; CHECK-NEXT:    vmov d17, r0, r1
+; CHECK-NEXT:    add r0, sp, #8
+; CHECK-NEXT:    movt r2, #17200
+; CHECK-NEXT:    str r2, [sp, #12]
+; CHECK-NEXT:    vst1.32 {d17[0]}, [r0:32]
 ; CHECK-NEXT:    mov r0, sp
-; CHECK-NEXT:    vst1.32 {d16[0]}, [r2:32]
-; CHECK-NEXT:    vst1.32 {d16[1]}, [r0:32]
-; CHECK-NEXT:    movw r0, #0
+; CHECK-NEXT:    vldr d18, [sp, #8]
+; CHECK-NEXT:    vst1.32 {d17[1]}, [r0:32]
+; CHECK-NEXT:    str r2, [sp, #4]
 ; CHECK-NEXT:    vldr d16, .LCPI35_0
-; CHECK-NEXT:    movt r0, #17200
-; CHECK-NEXT:    str r0, [sp, #12]
-; CHECK-NEXT:    str r0, [sp, #4]
-; CHECK-NEXT:    vldr d17, [sp, #8]
-; CHECK-NEXT:    vldr d18, [sp]
-; CHECK-NEXT:    vsub.f64 d17, d17, d16
-; CHECK-NEXT:    vsub.f64 d16, d18, d16
-; CHECK-NEXT:    vmov r0, r1, d17
+; CHECK-NEXT:    vldr d17, [sp]
+; CHECK-NEXT:    vsub.f64 d18, d18, d16
+; CHECK-NEXT:    vsub.f64 d16, d17, d16
+; CHECK-NEXT:    vmov r0, r1, d18
 ; CHECK-NEXT:    vmov r2, r3, d16
 ; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    bx lr
@@ -837,18 +923,17 @@ define <2 x double> @uitofp_v2f64_v2i32(<2 x i32> %x) #0 {
 define <2 x double> @sitofp_v2f64_v2i64(<2 x i64> %x) #0 {
 ; CHECK-LABEL: sitofp_v2f64_v2i64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
 ; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
 ; CHECK-NEXT:    mov r4, r1
 ; CHECK-NEXT:    mov r5, r0
 ; CHECK-NEXT:    mov r0, r2
 ; CHECK-NEXT:    mov r1, r3
-; CHECK-NEXT:    bl __aeabi_l2d
+; CHECK-NEXT:    bl __floatdidf
 ; CHECK-NEXT:    mov r6, r0
 ; CHECK-NEXT:    mov r7, r1
 ; CHECK-NEXT:    mov r0, r5
 ; CHECK-NEXT:    mov r1, r4
-; CHECK-NEXT:    bl __aeabi_l2d
+; CHECK-NEXT:    bl __floatdidf
 ; CHECK-NEXT:    mov r2, r6
 ; CHECK-NEXT:    mov r3, r7
 ; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
@@ -859,18 +944,17 @@ define <2 x double> @sitofp_v2f64_v2i64(<2 x i64> %x) #0 {
 define <2 x double> @uitofp_v2f64_v2i64(<2 x i64> %x) #0 {
 ; CHECK-LABEL: uitofp_v2f64_v2i64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
 ; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
 ; CHECK-NEXT:    mov r4, r1
 ; CHECK-NEXT:    mov r5, r0
 ; CHECK-NEXT:    mov r0, r2
 ; CHECK-NEXT:    mov r1, r3
-; CHECK-NEXT:    bl __aeabi_ul2d
+; CHECK-NEXT:    bl __floatundidf
 ; CHECK-NEXT:    mov r6, r0
 ; CHECK-NEXT:    mov r7, r1
 ; CHECK-NEXT:    mov r0, r5
 ; CHECK-NEXT:    mov r1, r4
-; CHECK-NEXT:    bl __aeabi_ul2d
+; CHECK-NEXT:    bl __floatundidf
 ; CHECK-NEXT:    mov r2, r6
 ; CHECK-NEXT:    mov r3, r7
 ; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
@@ -895,13 +979,20 @@ define <2 x double> @sqrt_v2f64(<2 x double> %x) #0 {
 define <2 x double> @rint_v2f64(<2 x double> %x) #0 {
 ; CHECK-LABEL: rint_v2f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov d17, r2, r3
-; CHECK-NEXT:    vrintx.f64 d16, d16
-; CHECK-NEXT:    vrintx.f64 d17, d17
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    vmov r2, r3, d17
-; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    mov r4, r1
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    mov r1, r3
+; CHECK-NEXT:    bl rint
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r7, r1
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl rint
+; CHECK-NEXT:    mov r2, r6
+; CHECK-NEXT:    mov r3, r7
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
   %val = call <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret <2 x double> %val
 }
@@ -909,13 +1000,20 @@ define <2 x double> @rint_v2f64(<2 x double> %x) #0 {
 define <2 x double> @nearbyint_v2f64(<2 x double> %x) #0 {
 ; CHECK-LABEL: nearbyint_v2f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov d17, r2, r3
-; CHECK-NEXT:    vrintr.f64 d16, d16
-; CHECK-NEXT:    vrintr.f64 d17, d17
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    vmov r2, r3, d17
-; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    mov r4, r1
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    mov r1, r3
+; CHECK-NEXT:    bl nearbyint
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r7, r1
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl nearbyint
+; CHECK-NEXT:    mov r2, r6
+; CHECK-NEXT:    mov r3, r7
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
   %val = call <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(<2 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret <2 x double> %val
 }
@@ -923,15 +1021,28 @@ define <2 x double> @nearbyint_v2f64(<2 x double> %x) #0 {
 define <2 x double> @maxnum_v2f64(<2 x double> %x, <2 x double> %y) #0 {
 ; CHECK-LABEL: maxnum_v2f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    mov r12, sp
-; CHECK-NEXT:    vmov d18, r0, r1
-; CHECK-NEXT:    vmov d19, r2, r3
-; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
-; CHECK-NEXT:    vmaxnm.f64 d18, d18, d16
-; CHECK-NEXT:    vmaxnm.f64 d16, d19, d17
-; CHECK-NEXT:    vmov r0, r1, d18
-; CHECK-NEXT:    vmov r2, r3, d16
-; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    add r0, sp, #40
+; CHECK-NEXT:    vld1.64 {d8, d9}, [r0]
+; CHECK-NEXT:    mov r4, r2
+; CHECK-NEXT:    mov r6, r3
+; CHECK-NEXT:    mov r7, r1
+; CHECK-NEXT:    vmov r2, r3, d9
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl fmax
+; CHECK-NEXT:    vmov r2, r3, d8
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r6, r1
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    bl fmax
+; CHECK-NEXT:    mov r2, r4
+; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
   %val = call <2 x double> @llvm.experimental.constrained.maxnum.v2f64(<2 x double> %x, <2 x double> %y, metadata !"fpexcept.strict") #0
   ret <2 x double> %val
 }
@@ -939,15 +1050,28 @@ define <2 x double> @maxnum_v2f64(<2 x double> %x, <2 x double> %y) #0 {
 define <2 x double> @minnum_v2f64(<2 x double> %x, <2 x double> %y) #0 {
 ; CHECK-LABEL: minnum_v2f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    mov r12, sp
-; CHECK-NEXT:    vmov d18, r0, r1
-; CHECK-NEXT:    vmov d19, r2, r3
-; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
-; CHECK-NEXT:    vminnm.f64 d18, d18, d16
-; CHECK-NEXT:    vminnm.f64 d16, d19, d17
-; CHECK-NEXT:    vmov r0, r1, d18
-; CHECK-NEXT:    vmov r2, r3, d16
-; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    add r0, sp, #40
+; CHECK-NEXT:    vld1.64 {d8, d9}, [r0]
+; CHECK-NEXT:    mov r4, r2
+; CHECK-NEXT:    mov r6, r3
+; CHECK-NEXT:    mov r7, r1
+; CHECK-NEXT:    vmov r2, r3, d9
+; CHECK-NEXT:    mov r0, r4
+; CHECK-NEXT:    mov r1, r6
+; CHECK-NEXT:    bl fmin
+; CHECK-NEXT:    vmov r2, r3, d8
+; CHECK-NEXT:    mov r4, r0
+; CHECK-NEXT:    mov r6, r1
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r7
+; CHECK-NEXT:    bl fmin
+; CHECK-NEXT:    mov r2, r4
+; CHECK-NEXT:    mov r3, r6
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
   %val = call <2 x double> @llvm.experimental.constrained.minnum.v2f64(<2 x double> %x, <2 x double> %y, metadata !"fpexcept.strict") #0
   ret <2 x double> %val
 }
@@ -955,13 +1079,20 @@ define <2 x double> @minnum_v2f64(<2 x double> %x, <2 x double> %y) #0 {
 define <2 x double> @ceil_v2f64(<2 x double> %x) #0 {
 ; CHECK-LABEL: ceil_v2f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov d17, r2, r3
-; CHECK-NEXT:    vrintp.f64 d16, d16
-; CHECK-NEXT:    vrintp.f64 d17, d17
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    vmov r2, r3, d17
-; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    mov r4, r1
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    mov r1, r3
+; CHECK-NEXT:    bl ceil
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r7, r1
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl ceil
+; CHECK-NEXT:    mov r2, r6
+; CHECK-NEXT:    mov r3, r7
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
   %val = call <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
   ret <2 x double> %val
 }
@@ -969,13 +1100,20 @@ define <2 x double> @ceil_v2f64(<2 x double> %x) #0 {
 define <2 x double> @floor_v2f64(<2 x double> %x) #0 {
 ; CHECK-LABEL: floor_v2f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov d17, r2, r3
-; CHECK-NEXT:    vrintm.f64 d16, d16
-; CHECK-NEXT:    vrintm.f64 d17, d17
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    vmov r2, r3, d17
-; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    mov r4, r1
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    mov r1, r3
+; CHECK-NEXT:    bl floor
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r7, r1
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl floor
+; CHECK-NEXT:    mov r2, r6
+; CHECK-NEXT:    mov r3, r7
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
   %val = call <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
   ret <2 x double> %val
 }
@@ -983,13 +1121,20 @@ define <2 x double> @floor_v2f64(<2 x double> %x) #0 {
 define <2 x double> @round_v2f64(<2 x double> %x) #0 {
 ; CHECK-LABEL: round_v2f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov d17, r2, r3
-; CHECK-NEXT:    vrinta.f64 d16, d16
-; CHECK-NEXT:    vrinta.f64 d17, d17
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    vmov r2, r3, d17
-; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    mov r4, r1
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    mov r1, r3
+; CHECK-NEXT:    bl round
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r7, r1
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl round
+; CHECK-NEXT:    mov r2, r6
+; CHECK-NEXT:    mov r3, r7
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
   %val = call <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
   ret <2 x double> %val
 }
@@ -997,13 +1142,20 @@ define <2 x double> @round_v2f64(<2 x double> %x) #0 {
 define <2 x double> @roundeven_v2f64(<2 x double> %x) #0 {
 ; CHECK-LABEL: roundeven_v2f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov d17, r2, r3
-; CHECK-NEXT:    vrintn.f64 d16, d16
-; CHECK-NEXT:    vrintn.f64 d17, d17
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    vmov r2, r3, d17
-; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    mov r4, r1
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    mov r1, r3
+; CHECK-NEXT:    bl roundeven
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r7, r1
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl roundeven
+; CHECK-NEXT:    mov r2, r6
+; CHECK-NEXT:    mov r3, r7
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
   %val = call <2 x double> @llvm.experimental.constrained.roundeven.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
   ret <2 x double> %val
 }
@@ -1011,13 +1163,20 @@ define <2 x double> @roundeven_v2f64(<2 x double> %x) #0 {
 define <2 x double> @trunc_v2f64(<2 x double> %x) #0 {
 ; CHECK-LABEL: trunc_v2f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov d17, r2, r3
-; CHECK-NEXT:    vrintz.f64 d16, d16
-; CHECK-NEXT:    vrintz.f64 d17, d17
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    vmov r2, r3, d17
-; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    mov r4, r1
+; CHECK-NEXT:    mov r5, r0
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    mov r1, r3
+; CHECK-NEXT:    bl trunc
+; CHECK-NEXT:    mov r6, r0
+; CHECK-NEXT:    mov r7, r1
+; CHECK-NEXT:    mov r0, r5
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl trunc
+; CHECK-NEXT:    mov r2, r6
+; CHECK-NEXT:    mov r3, r7
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, pc}
   %val = call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
   ret <2 x double> %val
 }
@@ -1026,23 +1185,21 @@ define <2 x i1> @fcmp_v2f64(<2 x double> %x, <2 x double> %y) #0 {
 ; CHECK-LABEL: fcmp_v2f64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    mov r12, sp
-; CHECK-NEXT:    vmov d18, r2, r3
-; CHECK-NEXT:    mov r3, #0
-; CHECK-NEXT:    mov r2, #0
 ; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
-; CHECK-NEXT:    vcmp.f64 d18, d17
-; CHECK-NEXT:    vmov d18, r0, r1
+; CHECK-NEXT:    vmov d19, r0, r1
+; CHECK-NEXT:    mov r0, #0
+; CHECK-NEXT:    vcmp.f64 d19, d16
+; CHECK-NEXT:    mov r1, #0
 ; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
-; CHECK-NEXT:    vcmp.f64 d18, d16
-; CHECK-NEXT:    movweq r3, #1
-; CHECK-NEXT:    cmp r3, #0
-; CHECK-NEXT:    mvnne r3, #0
+; CHECK-NEXT:    vmov d18, r2, r3
+; CHECK-NEXT:    vcmp.f64 d18, d17
+; CHECK-NEXT:    movweq r0, #1
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mvnne r0, #0
 ; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
-; CHECK-NEXT:    movweq r2, #1
-; CHECK-NEXT:    mov r1, r3
-; CHECK-NEXT:    cmp r2, #0
-; CHECK-NEXT:    mvnne r2, #0
-; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    movweq r1, #1
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    mvnne r1, #0
 ; CHECK-NEXT:    bx lr
 entry:
   %val = call <2 x i1> @llvm.experimental.constrained.fcmp.v2f64(<2 x double> %x, <2 x double> %y, metadata !"oeq", metadata !"fpexcept.strict")
@@ -1053,23 +1210,21 @@ define <2 x i1> @fcmps_v2f64(<2 x double> %x, <2 x double> %y) #0 {
 ; CHECK-LABEL: fcmps_v2f64:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    mov r12, sp
-; CHECK-NEXT:    vmov d18, r2, r3
-; CHECK-NEXT:    mov r3, #0
-; CHECK-NEXT:    mov r2, #0
 ; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
-; CHECK-NEXT:    vcmpe.f64 d18, d17
-; CHECK-NEXT:    vmov d18, r0, r1
+; CHECK-NEXT:    vmov d19, r0, r1
+; CHECK-NEXT:    mov r0, #0
+; CHECK-NEXT:    vcmpe.f64 d19, d16
+; CHECK-NEXT:    mov r1, #0
 ; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
-; CHECK-NEXT:    vcmpe.f64 d18, d16
-; CHECK-NEXT:    movweq r3, #1
-; CHECK-NEXT:    cmp r3, #0
-; CHECK-NEXT:    mvnne r3, #0
+; CHECK-NEXT:    vmov d18, r2, r3
+; CHECK-NEXT:    vcmpe.f64 d18, d17
+; CHECK-NEXT:    movweq r0, #1
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mvnne r0, #0
 ; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
-; CHECK-NEXT:    movweq r2, #1
-; CHECK-NEXT:    mov r1, r3
-; CHECK-NEXT:    cmp r2, #0
-; CHECK-NEXT:    mvnne r2, #0
-; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    movweq r1, #1
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    mvnne r1, #0
 ; CHECK-NEXT:    bx lr
 entry:
   %val = call <2 x i1> @llvm.experimental.constrained.fcmps.v2f64(<2 x double> %x, <2 x double> %y, metadata !"oeq", metadata !"fpexcept.strict")
@@ -1129,11 +1284,11 @@ define <1 x double> @div_v1f64(<1 x double> %x, <1 x double> %y) #0 {
 define <1 x double> @fma_v1f64(<1 x double> %x, <1 x double> %y, <1 x double> %z) #0 {
 ; CHECK-LABEL: fma_v1f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vldr d16, [sp]
-; CHECK-NEXT:    vmov d17, r2, r3
-; CHECK-NEXT:    vmov d18, r0, r1
-; CHECK-NEXT:    vfma.f64 d16, d18, d17
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov d16, r2, r3
+; CHECK-NEXT:    vldr d18, [sp]
+; CHECK-NEXT:    vmov d17, r0, r1
+; CHECK-NEXT:    vfma.f64 d18, d17, d16
+; CHECK-NEXT:    vmov r0, r1, d18
 ; CHECK-NEXT:    bx lr
   %val = call <1 x double> @llvm.experimental.constrained.fma.v1f64(<1 x double> %x, <1 x double> %y, <1 x double> %z, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret <1 x double> %val
@@ -1164,9 +1319,8 @@ define <1 x i32> @fptoui_v1i32_v1f64(<1 x double> %x) #0 {
 define <1 x i64> @fptosi_v1i64_v1f64(<1 x double> %x) #0 {
 ; CHECK-LABEL: fptosi_v1i64_v1f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r11, lr}
 ; CHECK-NEXT:    push {r11, lr}
-; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    bl __fixdfdi
 ; CHECK-NEXT:    pop {r11, pc}
   %val = call <1 x i64> @llvm.experimental.constrained.fptosi.v1i64.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
   ret <1 x i64> %val
@@ -1175,9 +1329,8 @@ define <1 x i64> @fptosi_v1i64_v1f64(<1 x double> %x) #0 {
 define <1 x i64> @fptoui_v1i64_v1f64(<1 x double> %x) #0 {
 ; CHECK-LABEL: fptoui_v1i64_v1f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r11, lr}
 ; CHECK-NEXT:    push {r11, lr}
-; CHECK-NEXT:    bl __aeabi_d2ulz
+; CHECK-NEXT:    bl __fixunsdfdi
 ; CHECK-NEXT:    pop {r11, pc}
   %val = call <1 x i64> @llvm.experimental.constrained.fptoui.v1i64.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
   ret <1 x i64> %val
@@ -1186,14 +1339,13 @@ define <1 x i64> @fptoui_v1i64_v1f64(<1 x double> %x) #0 {
 define <1 x double> @sitofp_v1f64_v1i32(<1 x i32> %x) #0 {
 ; CHECK-LABEL: sitofp_v1f64_v1i32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .pad #8
 ; CHECK-NEXT:    sub sp, sp, #8
 ; CHECK-NEXT:    movw r1, #0
 ; CHECK-NEXT:    eor r0, r0, #-2147483648
-; CHECK-NEXT:    vldr d16, .LCPI59_0
 ; CHECK-NEXT:    movt r1, #17200
 ; CHECK-NEXT:    str r0, [sp]
 ; CHECK-NEXT:    str r1, [sp, #4]
+; CHECK-NEXT:    vldr d16, .LCPI59_0
 ; CHECK-NEXT:    vldr d17, [sp]
 ; CHECK-NEXT:    vsub.f64 d16, d17, d16
 ; CHECK-NEXT:    vmov r0, r1, d16
@@ -1211,12 +1363,12 @@ define <1 x double> @sitofp_v1f64_v1i32(<1 x i32> %x) #0 {
 define <1 x double> @uitofp_v1f64_v1i32(<1 x i32> %x) #0 {
 ; CHECK-LABEL: uitofp_v1f64_v1i32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .pad #8
 ; CHECK-NEXT:    sub sp, sp, #8
 ; CHECK-NEXT:    movw r1, #0
-; CHECK-NEXT:    vldr d16, .LCPI60_0
+; CHECK-NEXT:    str r0, [sp]
 ; CHECK-NEXT:    movt r1, #17200
-; CHECK-NEXT:    stm sp, {r0, r1}
+; CHECK-NEXT:    vldr d16, .LCPI60_0
+; CHECK-NEXT:    str r1, [sp, #4]
 ; CHECK-NEXT:    vldr d17, [sp]
 ; CHECK-NEXT:    vsub.f64 d16, d17, d16
 ; CHECK-NEXT:    vmov r0, r1, d16
@@ -1234,12 +1386,11 @@ define <1 x double> @uitofp_v1f64_v1i32(<1 x i32> %x) #0 {
 define <1 x double> @sitofp_v1f64_v1i64(<1 x i64> %x) #0 {
 ; CHECK-LABEL: sitofp_v1f64_v1i64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r11, lr}
 ; CHECK-NEXT:    push {r11, lr}
 ; CHECK-NEXT:    vmov d16, r0, r1
 ; CHECK-NEXT:    vmov.32 r0, d16[0]
 ; CHECK-NEXT:    vmov.32 r1, d16[1]
-; CHECK-NEXT:    bl __aeabi_l2d
+; CHECK-NEXT:    bl __floatdidf
 ; CHECK-NEXT:    pop {r11, pc}
   %val = call <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i64(<1 x i64> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret <1 x double> %val
@@ -1248,12 +1399,11 @@ define <1 x double> @sitofp_v1f64_v1i64(<1 x i64> %x) #0 {
 define <1 x double> @uitofp_v1f64_v1i64(<1 x i64> %x) #0 {
 ; CHECK-LABEL: uitofp_v1f64_v1i64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r11, lr}
 ; CHECK-NEXT:    push {r11, lr}
 ; CHECK-NEXT:    vmov d16, r0, r1
 ; CHECK-NEXT:    vmov.32 r0, d16[0]
 ; CHECK-NEXT:    vmov.32 r1, d16[1]
-; CHECK-NEXT:    bl __aeabi_ul2d
+; CHECK-NEXT:    bl __floatundidf
 ; CHECK-NEXT:    pop {r11, pc}
   %val = call <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i64(<1 x i64> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret <1 x double> %val
@@ -1273,10 +1423,9 @@ define <1 x double> @sqrt_v1f64(<1 x double> %x) #0 {
 define <1 x double> @rint_v1f64(<1 x double> %x) #0 {
 ; CHECK-LABEL: rint_v1f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vrintx.f64 d16, d16
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    push {r11, lr}
+; CHECK-NEXT:    bl rint
+; CHECK-NEXT:    pop {r11, pc}
   %val = call <1 x double> @llvm.experimental.constrained.rint.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret <1 x double> %val
 }
@@ -1284,10 +1433,9 @@ define <1 x double> @rint_v1f64(<1 x double> %x) #0 {
 define <1 x double> @nearbyint_v1f64(<1 x double> %x) #0 {
 ; CHECK-LABEL: nearbyint_v1f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vrintr.f64 d16, d16
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    push {r11, lr}
+; CHECK-NEXT:    bl nearbyint
+; CHECK-NEXT:    pop {r11, pc}
   %val = call <1 x double> @llvm.experimental.constrained.nearbyint.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret <1 x double> %val
 }
@@ -1295,11 +1443,9 @@ define <1 x double> @nearbyint_v1f64(<1 x double> %x) #0 {
 define <1 x double> @maxnum_v1f64(<1 x double> %x, <1 x double> %y) #0 {
 ; CHECK-LABEL: maxnum_v1f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r2, r3
-; CHECK-NEXT:    vmov d17, r0, r1
-; CHECK-NEXT:    vmaxnm.f64 d16, d17, d16
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    push {r11, lr}
+; CHECK-NEXT:    bl fmax
+; CHECK-NEXT:    pop {r11, pc}
   %val = call <1 x double> @llvm.experimental.constrained.maxnum.v1f64(<1 x double> %x, <1 x double> %y, metadata !"fpexcept.strict") #0
   ret <1 x double> %val
 }
@@ -1307,11 +1453,9 @@ define <1 x double> @maxnum_v1f64(<1 x double> %x, <1 x double> %y) #0 {
 define <1 x double> @minnum_v1f64(<1 x double> %x, <1 x double> %y) #0 {
 ; CHECK-LABEL: minnum_v1f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r2, r3
-; CHECK-NEXT:    vmov d17, r0, r1
-; CHECK-NEXT:    vminnm.f64 d16, d17, d16
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    push {r11, lr}
+; CHECK-NEXT:    bl fmin
+; CHECK-NEXT:    pop {r11, pc}
   %val = call <1 x double> @llvm.experimental.constrained.minnum.v1f64(<1 x double> %x, <1 x double> %y, metadata !"fpexcept.strict") #0
   ret <1 x double> %val
 }
@@ -1319,10 +1463,9 @@ define <1 x double> @minnum_v1f64(<1 x double> %x, <1 x double> %y) #0 {
 define <1 x double> @ceil_v1f64(<1 x double> %x) #0 {
 ; CHECK-LABEL: ceil_v1f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vrintp.f64 d16, d16
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    push {r11, lr}
+; CHECK-NEXT:    bl ceil
+; CHECK-NEXT:    pop {r11, pc}
   %val = call <1 x double> @llvm.experimental.constrained.ceil.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
   ret <1 x double> %val
 }
@@ -1330,10 +1473,9 @@ define <1 x double> @ceil_v1f64(<1 x double> %x) #0 {
 define <1 x double> @floor_v1f64(<1 x double> %x) #0 {
 ; CHECK-LABEL: floor_v1f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vrintm.f64 d16, d16
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    push {r11, lr}
+; CHECK-NEXT:    bl floor
+; CHECK-NEXT:    pop {r11, pc}
   %val = call <1 x double> @llvm.experimental.constrained.floor.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
   ret <1 x double> %val
 }
@@ -1341,10 +1483,9 @@ define <1 x double> @floor_v1f64(<1 x double> %x) #0 {
 define <1 x double> @round_v1f64(<1 x double> %x) #0 {
 ; CHECK-LABEL: round_v1f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vrinta.f64 d16, d16
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    push {r11, lr}
+; CHECK-NEXT:    bl round
+; CHECK-NEXT:    pop {r11, pc}
   %val = call <1 x double> @llvm.experimental.constrained.round.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
   ret <1 x double> %val
 }
@@ -1352,10 +1493,9 @@ define <1 x double> @round_v1f64(<1 x double> %x) #0 {
 define <1 x double> @roundeven_v1f64(<1 x double> %x) #0 {
 ; CHECK-LABEL: roundeven_v1f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vrintn.f64 d16, d16
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    push {r11, lr}
+; CHECK-NEXT:    bl roundeven
+; CHECK-NEXT:    pop {r11, pc}
   %val = call <1 x double> @llvm.experimental.constrained.roundeven.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
   ret <1 x double> %val
 }
@@ -1363,10 +1503,9 @@ define <1 x double> @roundeven_v1f64(<1 x double> %x) #0 {
 define <1 x double> @trunc_v1f64(<1 x double> %x) #0 {
 ; CHECK-LABEL: trunc_v1f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vrintz.f64 d16, d16
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    push {r11, lr}
+; CHECK-NEXT:    bl trunc
+; CHECK-NEXT:    pop {r11, pc}
   %val = call <1 x double> @llvm.experimental.constrained.trunc.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
   ret <1 x double> %val
 }
@@ -1374,8 +1513,8 @@ define <1 x double> @trunc_v1f64(<1 x double> %x) #0 {
 define <1 x i1> @fcmp_v1f61(<1 x double> %x, <1 x double> %y) #0 {
 ; CHECK-LABEL: fcmp_v1f61:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov d17, r0, r1
 ; CHECK-NEXT:    vmov d16, r2, r3
+; CHECK-NEXT:    vmov d17, r0, r1
 ; CHECK-NEXT:    mov r0, #0
 ; CHECK-NEXT:    vcmp.f64 d17, d16
 ; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
@@ -1389,8 +1528,8 @@ entry:
 define <1 x i1> @fcmps_v1f61(<1 x double> %x, <1 x double> %y) #0 {
 ; CHECK-LABEL: fcmps_v1f61:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov d17, r0, r1
 ; CHECK-NEXT:    vmov d16, r2, r3
+; CHECK-NEXT:    vmov d17, r0, r1
 ; CHECK-NEXT:    mov r0, #0
 ; CHECK-NEXT:    vcmpe.f64 d17, d16
 ; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
@@ -1429,87 +1568,4 @@ define <2 x double> @fpext_v2f64_v2f32(<2 x float> %x) #0 {
   ret <2 x double> %val
 }
 
-
 attributes #0 = { strictfp }
-
-declare <4 x float> @llvm.experimental.constrained.fadd.v4f32(<4 x float>, <4 x float>, metadata, metadata)
-declare <4 x float> @llvm.experimental.constrained.fsub.v4f32(<4 x float>, <4 x float>, metadata, metadata)
-declare <4 x float> @llvm.experimental.constrained.fmul.v4f32(<4 x float>, <4 x float>, metadata, metadata)
-declare <4 x float> @llvm.experimental.constrained.fdiv.v4f32(<4 x float>, <4 x float>, metadata, metadata)
-declare <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float>, <4 x float>, <4 x float>, metadata, metadata)
-declare <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f32(<4 x float>, metadata)
-declare <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f32(<4 x float>, metadata)
-declare <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f32(<4 x float>, metadata)
-declare <4 x i64> @llvm.experimental.constrained.fptoui.v4i64.v4f32(<4 x float>, metadata)
-declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i32(<4 x i32>, metadata, metadata)
-declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i32(<4 x i32>, metadata, metadata)
-declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i64(<4 x i64>, metadata, metadata)
-declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i64(<4 x i64>, metadata, metadata)
-declare <4 x float> @llvm.experimental.constrained.sqrt.v4f32(<4 x float>, metadata, metadata)
-declare <4 x float> @llvm.experimental.constrained.rint.v4f32(<4 x float>, metadata, metadata)
-declare <4 x float> @llvm.experimental.constrained.nearbyint.v4f32(<4 x float>, metadata, metadata)
-declare <4 x float> @llvm.experimental.constrained.maxnum.v4f32(<4 x float>, <4 x float>, metadata)
-declare <4 x float> @llvm.experimental.constrained.minnum.v4f32(<4 x float>, <4 x float>, metadata)
-declare <4 x float> @llvm.experimental.constrained.ceil.v4f32(<4 x float>, metadata)
-declare <4 x float> @llvm.experimental.constrained.floor.v4f32(<4 x float>, metadata)
-declare <4 x float> @llvm.experimental.constrained.round.v4f32(<4 x float>, metadata)
-declare <4 x float> @llvm.experimental.constrained.roundeven.v4f32(<4 x float>, metadata)
-declare <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float>, metadata)
-declare <4 x i1> @llvm.experimental.constrained.fcmp.v4f32(<4 x float>, <4 x float>, metadata, metadata)
-declare <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float>, <4 x float>, metadata, metadata)
-
-declare <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double>, <2 x double>, metadata, metadata)
-declare <2 x double> @llvm.experimental.constrained.fsub.v2f64(<2 x double>, <2 x double>, metadata, metadata)
-declare <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double>, <2 x double>, metadata, metadata)
-declare <2 x double> @llvm.experimental.constrained.fdiv.v2f64(<2 x double>, <2 x double>, metadata, metadata)
-declare <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double>, <2 x double>, <2 x double>, metadata, metadata)
-declare <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f64(<2 x double>, metadata)
-declare <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f64(<2 x double>, metadata)
-declare <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f64(<2 x double>, metadata)
-declare <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f64(<2 x double>, metadata)
-declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i32(<2 x i32>, metadata, metadata)
-declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i32(<2 x i32>, metadata, metadata)
-declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64>, metadata, metadata)
-declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i64(<2 x i64>, metadata, metadata)
-declare <2 x double> @llvm.experimental.constrained.sqrt.v2f64(<2 x double>, metadata, metadata)
-declare <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double>, metadata, metadata)
-declare <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(<2 x double>, metadata, metadata)
-declare <2 x double> @llvm.experimental.constrained.maxnum.v2f64(<2 x double>, <2 x double>, metadata)
-declare <2 x double> @llvm.experimental.constrained.minnum.v2f64(<2 x double>, <2 x double>, metadata)
-declare <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double>, metadata)
-declare <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double>, metadata)
-declare <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double>, metadata)
-declare <2 x double> @llvm.experimental.constrained.roundeven.v2f64(<2 x double>, metadata)
-declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata)
-declare <2 x i1> @llvm.experimental.constrained.fcmp.v2f64(<2 x double>, <2 x double>, metadata, metadata)
-declare <2 x i1> @llvm.experimental.constrained.fcmps.v2f64(<2 x double>, <2 x double>, metadata, metadata)
-
-declare <1 x double> @llvm.experimental.constrained.fadd.v1f64(<1 x double>, <1 x double>, metadata, metadata)
-declare <1 x double> @llvm.experimental.constrained.fsub.v1f64(<1 x double>, <1 x double>, metadata, metadata)
-declare <1 x double> @llvm.experimental.constrained.fmul.v1f64(<1 x double>, <1 x double>, metadata, metadata)
-declare <1 x double> @llvm.experimental.constrained.fdiv.v1f64(<1 x double>, <1 x double>, metadata, metadata)
-declare <1 x double> @llvm.experimental.constrained.fma.v1f64(<1 x double>, <1 x double>, <1 x double>, metadata, metadata)
-declare <1 x i32> @llvm.experimental.constrained.fptosi.v1i32.v1f64(<1 x double>, metadata)
-declare <1 x i32> @llvm.experimental.constrained.fptoui.v1i32.v1f64(<1 x double>, metadata)
-declare <1 x i64> @llvm.experimental.constrained.fptosi.v1i64.v1f64(<1 x double>, metadata)
-declare <1 x i64> @llvm.experimental.constrained.fptoui.v1i64.v1f64(<1 x double>, metadata)
-declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i32(<1 x i32>, metadata, metadata)
-declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i32(<1 x i32>, metadata, metadata)
-declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i64(<1 x i64>, metadata, metadata)
-declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i64(<1 x i64>, metadata, metadata)
-declare <1 x double> @llvm.experimental.constrained.sqrt.v1f64(<1 x double>, metadata, metadata)
-declare <1 x double> @llvm.experimental.constrained.rint.v1f64(<1 x double>, metadata, metadata)
-declare <1 x double> @llvm.experimental.constrained.nearbyint.v1f64(<1 x double>, metadata, metadata)
-declare <1 x double> @llvm.experimental.constrained.maxnum.v1f64(<1 x double>, <1 x double>, metadata)
-declare <1 x double> @llvm.experimental.constrained.minnum.v1f64(<1 x double>, <1 x double>, metadata)
-declare <1 x double> @llvm.experimental.constrained.ceil.v1f64(<1 x double>, metadata)
-declare <1 x double> @llvm.experimental.constrained.floor.v1f64(<1 x double>, metadata)
-declare <1 x double> @llvm.experimental.constrained.round.v1f64(<1 x double>, metadata)
-declare <1 x double> @llvm.experimental.constrained.roundeven.v1f64(<1 x double>, metadata)
-declare <1 x double> @llvm.experimental.constrained.trunc.v1f64(<1 x double>, metadata)
-declare <1 x i1> @llvm.experimental.constrained.fcmp.v1f64(<1 x double>, <1 x double>, metadata, metadata)
-declare <1 x i1> @llvm.experimental.constrained.fcmps.v1f64(<1 x double>, <1 x double>, metadata, metadata)
-
-declare <2 x float> @llvm.experimental.constrained.fptrunc.v2f32.v2f64(<2 x double>, metadata, metadata)
-declare <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f32(<2 x float>, metadata)
-

>From ac41dbfd6440a47dcc2b214bcb4b6d7f219a8c24 Mon Sep 17 00:00:00 2001
From: Erik Enikeev <evonatarius at gmail.com>
Date: Fri, 28 Nov 2025 03:40:22 +0300
Subject: [PATCH 5/5] fixup

---
 llvm/lib/Target/ARM/ARMISelLowering.cpp       | 5 +++--
 llvm/test/CodeGen/ARM/fp-intrinsics-vector.ll | 2 +-
 2 files changed, 4 insertions(+), 3 deletions(-)

diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 7628550fad879..02a5dd54eeace 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -614,9 +614,8 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM_,
       for (auto Op : {ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL,
                       ISD::STRICT_FDIV, ISD::STRICT_FMA, ISD::STRICT_FSQRT})
         setOperationAction(Op, MVT::f64, Legal);
-      
+
       setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
-      setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
     }
   }
 
@@ -957,6 +956,8 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM_,
       setOperationAction(ISD::FP_ROUND,  MVT::f16, Custom);
       setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Custom);
     }
+  } else {
+    setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
   }
 
   if (!Subtarget->hasFP16()) {
diff --git a/llvm/test/CodeGen/ARM/fp-intrinsics-vector.ll b/llvm/test/CodeGen/ARM/fp-intrinsics-vector.ll
index dc52ad51c0241..6d47240f06f80 100644
--- a/llvm/test/CodeGen/ARM/fp-intrinsics-vector.ll
+++ b/llvm/test/CodeGen/ARM/fp-intrinsics-vector.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
-; RUN: llc -mtriple=armv7a-- -mattr=+neon,+vfp4 %s -o - | FileCheck %s
+; RUN: llc -mtriple=armv7a-none-eablhf -mattr=+neon,+vfp4 %s -o - | FileCheck %s
 
 define <4 x float> @add_v4f32(<4 x float> %x, <4 x float> %y) #0 {
 ; CHECK-LABEL: add_v4f32:



More information about the llvm-commits mailing list