[llvm] 02746f8 - [ARM] Remove -fno-unsafe-math from a number of tests. NFC

David Green via llvm-commits llvm-commits at lists.llvm.org
Fri Sep 26 03:48:39 PDT 2025


Author: David Green
Date: 2025-09-26T11:48:34+01:00
New Revision: 02746f80c1fa1d17997849a9bf5a41a91523ca41

URL: https://github.com/llvm/llvm-project/commit/02746f80c1fa1d17997849a9bf5a41a91523ca41
DIFF: https://github.com/llvm/llvm-project/commit/02746f80c1fa1d17997849a9bf5a41a91523ca41.diff

LOG: [ARM] Remove -fno-unsafe-math from a number of tests. NFC

llvm.convert/to.fp16 and from.fp16 are no longer used / deprecated and do not
need to be tested any more.

Added: 
    

Modified: 
    llvm/test/CodeGen/ARM/2009-10-02-NEONSubregsBug.ll
    llvm/test/CodeGen/ARM/2012-04-10-DAGCombine.ll
    llvm/test/CodeGen/ARM/fnmul.ll
    llvm/test/CodeGen/ARM/fp16-vminmaxnm.ll
    llvm/test/CodeGen/ARM/sincos.ll
    llvm/test/CodeGen/ARM/vminmaxnm.ll

Removed: 
    llvm/test/CodeGen/ARM/fp16.ll


################################################################################
diff  --git a/llvm/test/CodeGen/ARM/2009-10-02-NEONSubregsBug.ll b/llvm/test/CodeGen/ARM/2009-10-02-NEONSubregsBug.ll
index 484ad93bebeab..0e8d47347286b 100644
--- a/llvm/test/CodeGen/ARM/2009-10-02-NEONSubregsBug.ll
+++ b/llvm/test/CodeGen/ARM/2009-10-02-NEONSubregsBug.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=armv7-eabi -mcpu=cortex-a8 -enable-unsafe-fp-math < %s
+; RUN: llc -mtriple=armv7-eabi -mcpu=cortex-a8 < %s
 ; PR5367
 
 define arm_aapcs_vfpcc void @_Z27Benchmark_SceDualQuaternionPvm(ptr nocapture %pBuffer, i32 %numItems) nounwind {

diff  --git a/llvm/test/CodeGen/ARM/2012-04-10-DAGCombine.ll b/llvm/test/CodeGen/ARM/2012-04-10-DAGCombine.ll
index 80c1968c85743..593fb9348506b 100644
--- a/llvm/test/CodeGen/ARM/2012-04-10-DAGCombine.ll
+++ b/llvm/test/CodeGen/ARM/2012-04-10-DAGCombine.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a9 -enable-unsafe-fp-math %s -o /dev/null
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a9 %s -o /dev/null
 ;target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32-S64"
 ;target triple = "armv7-none-linux-gnueabi"
 

diff  --git a/llvm/test/CodeGen/ARM/fnmul.ll b/llvm/test/CodeGen/ARM/fnmul.ll
index b021de8b7ad00..655c9f8415402 100644
--- a/llvm/test/CodeGen/ARM/fnmul.ll
+++ b/llvm/test/CodeGen/ARM/fnmul.ll
@@ -1,15 +1,30 @@
-; RUN: llc -mtriple=arm-eabi -mattr=+v6,+vfp2 %s -o -                        | FileCheck %s -check-prefix STRICT
-
-; RUN: llc -mtriple=arm-eabi -mattr=+v6,+vfp2 -enable-unsafe-fp-math %s -o - | FileCheck %s -check-prefix UNSAFE
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=arm-eabi -mattr=+v6,+vfp2 %s -o - | FileCheck %s
 
 define double @t1(double %a, double %b) {
-; STRICT:    vnmul.f64
-;
-; UNSAFE:    vnmul.f64
+; CHECK-LABEL: t1:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov d0, r2, r3
+; CHECK-NEXT:    vmov d1, r0, r1
+; CHECK-NEXT:    vnmul.f64 d0, d1, d0
+; CHECK-NEXT:    vmov r0, r1, d0
+; CHECK-NEXT:    bx lr
 entry:
-        %tmp2 = fsub double -0.000000e+00, %a            ; <double> [#uses=1]
-        %tmp4 = fmul double %tmp2, %b            ; <double> [#uses=1]
-        ret double %tmp4
+  %tmp2 = fsub double -0.000000e+00, %a
+  %tmp4 = fmul double %tmp2, %b
+  ret double %tmp4
 }
 
-
+define double @tfast(double %a, double %b) {
+; CHECK-LABEL: tfast:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov d0, r2, r3
+; CHECK-NEXT:    vmov d1, r0, r1
+; CHECK-NEXT:    vnmul.f64 d0, d1, d0
+; CHECK-NEXT:    vmov r0, r1, d0
+; CHECK-NEXT:    bx lr
+entry:
+  %tmp2 = fsub fast double -0.000000e+00, %a
+  %tmp4 = fmul fast double %tmp2, %b
+  ret double %tmp4
+}

diff  --git a/llvm/test/CodeGen/ARM/fp16-vminmaxnm.ll b/llvm/test/CodeGen/ARM/fp16-vminmaxnm.ll
index 33ff71e8c473e..9d0ea0e2d37cf 100644
--- a/llvm/test/CodeGen/ARM/fp16-vminmaxnm.ll
+++ b/llvm/test/CodeGen/ARM/fp16-vminmaxnm.ll
@@ -1,5 +1,6 @@
-; RUN: llc < %s -mtriple=arm-eabi -mattr=+fullfp16 -enable-unsafe-fp-math -enable-no-nans-fp-math | FileCheck %s
-; RUN: llc < %s -mtriple thumbv7a -mattr=+fullfp16 -enable-unsafe-fp-math -enable-no-nans-fp-math | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=armv7-none-eabihf -mattr=+fullfp16 -enable-no-nans-fp-math | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7a-none-eabihf -mattr=+fullfp16 -enable-no-nans-fp-math | FileCheck %s
 
 ; TODO: we can't pass half-precision arguments as "half" types yet. We do
 ; that for the time being by passing "float %f.coerce" and the necessary
@@ -9,9 +10,11 @@
 
 define half @fp16_vminnm_o(i16 signext %a, i16 signext %b) {
 ; CHECK-LABEL: fp16_vminnm_o:
-; CHECK:    vmov.f16 [[S0:s[0-9]]], r{{.}}
-; CHECK:    vmov.f16 [[S2:s[0-9]]], r{{.}}
-; CHECK:    vminnm.f16 s0, [[S2]], [[S0]]
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.f16 s0, r1
+; CHECK-NEXT:    vmov.f16 s2, r0
+; CHECK-NEXT:    vminnm.f16 s0, s2, s0
+; CHECK-NEXT:    bx lr
 entry:
   %0 = bitcast i16 %a to half
   %1 = bitcast i16 %b to half
@@ -22,9 +25,11 @@ entry:
 
 define half @fp16_vminnm_o_rev(i16 signext %a, i16 signext %b) {
 ; CHECK-LABEL: fp16_vminnm_o_rev:
-; CHECK:    vmov.f16 [[S0:s[0-9]]], r{{.}}
-; CHECK:    vmov.f16 [[S2:s[0-9]]], r{{.}}
-; CHECK:    vminnm.f16 s0, [[S2]], [[S0]]
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.f16 s0, r1
+; CHECK-NEXT:    vmov.f16 s2, r0
+; CHECK-NEXT:    vminnm.f16 s0, s2, s0
+; CHECK-NEXT:    bx lr
 entry:
   %0 = bitcast i16 %a to half
   %1 = bitcast i16 %b to half
@@ -35,9 +40,11 @@ entry:
 
 define half @fp16_vminnm_u(i16 signext %a, i16 signext %b) {
 ; CHECK-LABEL: fp16_vminnm_u:
-; CHECK:    vmov.f16 [[S0:s[0-9]]], r{{.}}
-; CHECK:    vmov.f16 [[S2:s[0-9]]], r{{.}}
-; CHECK:    vminnm.f16 s0, [[S2]], [[S0]]
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.f16 s0, r1
+; CHECK-NEXT:    vmov.f16 s2, r0
+; CHECK-NEXT:    vminnm.f16 s0, s2, s0
+; CHECK-NEXT:    bx lr
 entry:
   %0 = bitcast i16 %a to half
   %1 = bitcast i16 %b to half
@@ -48,9 +55,11 @@ entry:
 
 define half @fp16_vminnm_ule(i16 signext %a, i16 signext %b) {
 ; CHECK-LABEL: fp16_vminnm_ule:
-; CHECK:    vmov.f16 [[S0:s[0-9]]], r{{.}}
-; CHECK:    vmov.f16 [[S2:s[0-9]]], r{{.}}
-; CHECK:    vminnm.f16 s0, [[S2]], [[S0]]
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.f16 s0, r1
+; CHECK-NEXT:    vmov.f16 s2, r0
+; CHECK-NEXT:    vminnm.f16 s0, s2, s0
+; CHECK-NEXT:    bx lr
 entry:
   %0 = bitcast i16 %a to half
   %1 = bitcast i16 %b to half
@@ -61,9 +70,11 @@ entry:
 
 define half @fp16_vminnm_u_rev(i16 signext %a, i16 signext %b) {
 ; CHECK-LABEL: fp16_vminnm_u_rev:
-; CHECK:    vmov.f16 [[S0:s[0-9]]], r{{.}}
-; CHECK:    vmov.f16 [[S2:s[0-9]]], r{{.}}
-; CHECK:    vminnm.f16 s0, [[S2]], [[S0]]
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.f16 s0, r1
+; CHECK-NEXT:    vmov.f16 s2, r0
+; CHECK-NEXT:    vminnm.f16 s0, s2, s0
+; CHECK-NEXT:    bx lr
 entry:
   %0 = bitcast i16 %a to half
   %1 = bitcast i16 %b to half
@@ -74,9 +85,11 @@ entry:
 
 define half @fp16_vmaxnm_o(i16 signext %a, i16 signext %b) {
 ; CHECK-LABEL: fp16_vmaxnm_o:
-; CHECK:    vmov.f16 [[S0:s[0-9]]], r{{.}}
-; CHECK:    vmov.f16 [[S2:s[0-9]]], r{{.}}
-; CHECK:    vmaxnm.f16 s0, [[S2]], [[S0]]
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.f16 s0, r1
+; CHECK-NEXT:    vmov.f16 s2, r0
+; CHECK-NEXT:    vmaxnm.f16 s0, s2, s0
+; CHECK-NEXT:    bx lr
 entry:
   %0 = bitcast i16 %a to half
   %1 = bitcast i16 %b to half
@@ -87,9 +100,11 @@ entry:
 
 define half @fp16_vmaxnm_oge(i16 signext %a, i16 signext %b) {
 ; CHECK-LABEL: fp16_vmaxnm_oge:
-; CHECK:    vmov.f16 [[S0:s[0-9]]], r{{.}}
-; CHECK:    vmov.f16 [[S2:s[0-9]]], r{{.}}
-; CHECK:    vmaxnm.f16 s0, [[S2]], [[S0]]
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.f16 s0, r1
+; CHECK-NEXT:    vmov.f16 s2, r0
+; CHECK-NEXT:    vmaxnm.f16 s0, s2, s0
+; CHECK-NEXT:    bx lr
 entry:
   %0 = bitcast i16 %a to half
   %1 = bitcast i16 %b to half
@@ -100,9 +115,11 @@ entry:
 
 define half @fp16_vmaxnm_o_rev(i16 signext %a, i16 signext %b) {
 ; CHECK-LABEL: fp16_vmaxnm_o_rev:
-; CHECK:    vmov.f16 [[S0:s[0-9]]], r{{.}}
-; CHECK:    vmov.f16 [[S2:s[0-9]]], r{{.}}
-; CHECK:    vmaxnm.f16 s0, [[S2]], [[S0]]
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.f16 s0, r1
+; CHECK-NEXT:    vmov.f16 s2, r0
+; CHECK-NEXT:    vmaxnm.f16 s0, s2, s0
+; CHECK-NEXT:    bx lr
 entry:
   %0 = bitcast i16 %a to half
   %1 = bitcast i16 %b to half
@@ -113,9 +130,11 @@ entry:
 
 define half @fp16_vmaxnm_ole_rev(i16 signext %a, i16 signext %b) {
 ; CHECK-LABEL: fp16_vmaxnm_ole_rev:
-; CHECK:    vmov.f16 [[S0:s[0-9]]], r{{.}}
-; CHECK:    vmov.f16 [[S2:s[0-9]]], r{{.}}
-; CHECK:    vmaxnm.f16 s0, [[S2]], [[S0]]
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.f16 s0, r1
+; CHECK-NEXT:    vmov.f16 s2, r0
+; CHECK-NEXT:    vmaxnm.f16 s0, s2, s0
+; CHECK-NEXT:    bx lr
 entry:
   %0 = bitcast i16 %a to half
   %1 = bitcast i16 %b to half
@@ -126,9 +145,11 @@ entry:
 
 define half @fp16_vmaxnm_u(i16 signext %a, i16 signext %b) {
 ; CHECK-LABEL: fp16_vmaxnm_u:
-; CHECK:    vmov.f16 [[S0:s[0-9]]], r{{.}}
-; CHECK:    vmov.f16 [[S2:s[0-9]]], r{{.}}
-; CHECK:    vmaxnm.f16 s0, [[S2]], [[S0]]
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.f16 s0, r1
+; CHECK-NEXT:    vmov.f16 s2, r0
+; CHECK-NEXT:    vmaxnm.f16 s0, s2, s0
+; CHECK-NEXT:    bx lr
 entry:
   %0 = bitcast i16 %a to half
   %1 = bitcast i16 %b to half
@@ -139,9 +160,11 @@ entry:
 
 define half @fp16_vmaxnm_uge(i16 signext %a, i16 signext %b) {
 ; CHECK-LABEL: fp16_vmaxnm_uge:
-; CHECK:    vmov.f16 [[S0:s[0-9]]], r{{.}}
-; CHECK:    vmov.f16 [[S2:s[0-9]]], r{{.}}
-; CHECK:    vmaxnm.f16 s0, [[S2]], [[S0]]
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.f16 s0, r1
+; CHECK-NEXT:    vmov.f16 s2, r0
+; CHECK-NEXT:    vmaxnm.f16 s0, s2, s0
+; CHECK-NEXT:    bx lr
 entry:
   %0 = bitcast i16 %a to half
   %1 = bitcast i16 %b to half
@@ -152,9 +175,11 @@ entry:
 
 define half @fp16_vmaxnm_u_rev(i16 signext %a, i16 signext %b) {
 ; CHECK-LABEL: fp16_vmaxnm_u_rev:
-; CHECK:    vmov.f16 [[S0:s[0-9]]], r{{.}}
-; CHECK:    vmov.f16 [[S2:s[0-9]]], r{{.}}
-; CHECK:    vmaxnm.f16 s0, [[S2]], [[S0]]
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.f16 s0, r1
+; CHECK-NEXT:    vmov.f16 s2, r0
+; CHECK-NEXT:    vmaxnm.f16 s0, s2, s0
+; CHECK-NEXT:    bx lr
 entry:
   %0 = bitcast i16 %a to half
   %1 = bitcast i16 %b to half
@@ -167,11 +192,17 @@ entry:
 
 define half @fp16_vminnm_NNNo(i16 signext %a) {
 ; CHECK-LABEL: fp16_vminnm_NNNo:
-; CHECK:    vmov.f16 [[S0:s[0-9]]], r{{.}}
-; CHECK:    vmov.f16 [[S2:s[0-9]]], #1.200000e+01
-; CHECK:    vminnm.f16 s0, [[S0]], [[S2]]
-; CHECK:    vldr.16 s2, .LCPI{{.*}}
-; CHECK:    vminnm.f16 s0, [[S0]], [[S2]]
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.f16 s0, r0
+; CHECK-NEXT:    vmov.f16 s2, #1.200000e+01
+; CHECK-NEXT:    vminnm.f16 s0, s0, s2
+; CHECK-NEXT:    vldr.16 s2, .LCPI12_0
+; CHECK-NEXT:    vminnm.f16 s0, s0, s2
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 1
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI12_0:
+; CHECK-NEXT:    .short 0x5040 @ half 34
 entry:
   %0 = bitcast i16 %a to half
   %cmp1 = fcmp fast olt half %0, 12.
@@ -183,11 +214,19 @@ entry:
 
 define half @fp16_vminnm_NNNo_rev(i16 signext %a) {
 ; CHECK-LABEL: fp16_vminnm_NNNo_rev:
-; CHECK:    vldr.16 s2, .LCPI{{.*}}
-; CHECK:    vmov.f16 [[S0:s[0-9]]], r{{.}}
-; CHECK:    vminnm.f16 s0, [[S0]], [[S2]]
-; CHECK:    vldr.16 s2, .LCPI{{.*}}
-; CHECK:    vminnm.f16 s0, [[S0]], [[S2]]
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldr.16 s2, .LCPI13_0
+; CHECK-NEXT:    vmov.f16 s0, r0
+; CHECK-NEXT:    vminnm.f16 s0, s0, s2
+; CHECK-NEXT:    vldr.16 s2, .LCPI13_1
+; CHECK-NEXT:    vminnm.f16 s0, s0, s2
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 1
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI13_0:
+; CHECK-NEXT:    .short 0x5300 @ half 56
+; CHECK-NEXT:  .LCPI13_1:
+; CHECK-NEXT:    .short 0x54e0 @ half 78
 entry:
   %0 = bitcast i16 %a to half
   %cmp1 = fcmp fast ogt half %0, 56.
@@ -199,11 +238,17 @@ entry:
 
 define half @fp16_vminnm_NNNu(i16 signext %b) {
 ; CHECK-LABEL: fp16_vminnm_NNNu:
-; CHECK:    vmov.f16 [[S0:s[0-9]]], r{{.}}
-; CHECK:    vmov.f16 [[S2:s[0-9]]], #1.200000e+01
-; CHECK:    vminnm.f16 s0, [[S0]], [[S2]]
-; CHECK:    vldr.16 s2, .LCPI{{.*}}
-; CHECK:    vminnm.f16 s0, [[S0]], [[S2]]
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.f16 s0, r0
+; CHECK-NEXT:    vmov.f16 s2, #1.200000e+01
+; CHECK-NEXT:    vminnm.f16 s0, s0, s2
+; CHECK-NEXT:    vldr.16 s2, .LCPI14_0
+; CHECK-NEXT:    vminnm.f16 s0, s0, s2
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 1
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI14_0:
+; CHECK-NEXT:    .short 0x5040 @ half 34
 entry:
   %0 = bitcast i16 %b to half
   %cmp1 = fcmp fast ult half 12., %0
@@ -215,11 +260,19 @@ entry:
 
 define half @fp16_vminnm_NNNule(i16 signext %b) {
 ; CHECK-LABEL: fp16_vminnm_NNNule:
-; CHECK:    vldr.16 s2, .LCPI{{.*}}
-; CHECK:    vmov.f16 [[S0:s[0-9]]], r{{.}}
-; CHECK:    vminnm.f16 s0, [[S0]], [[S2]]
-; CHECK:    vldr.16 s2, .LCPI{{.*}}
-; CHECK:    vminnm.f16 s0, [[S0]], [[S2]]
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldr.16 s2, .LCPI15_0
+; CHECK-NEXT:    vmov.f16 s0, r0
+; CHECK-NEXT:    vminnm.f16 s0, s0, s2
+; CHECK-NEXT:    vldr.16 s2, .LCPI15_1
+; CHECK-NEXT:    vminnm.f16 s0, s0, s2
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 1
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI15_0:
+; CHECK-NEXT:    .short 0x5040 @ half 34
+; CHECK-NEXT:  .LCPI15_1:
+; CHECK-NEXT:    .short 0x5300 @ half 56
 entry:
   %0 = bitcast i16 %b to half
   %cmp1 = fcmp fast ule half 34., %0
@@ -231,11 +284,19 @@ entry:
 
 define half @fp16_vminnm_NNNu_rev(i16 signext %b) {
 ; CHECK-LABEL: fp16_vminnm_NNNu_rev:
-; CHECK:    vldr.16 s2, .LCPI{{.*}}
-; CHECK:    vmov.f16 [[S0:s[0-9]]], r{{.}}
-; CHECK:    vminnm.f16 s0, [[S0]], [[S2]]
-; CHECK:    vldr.16 s2, .LCPI{{.*}}
-; CHECK:    vminnm.f16 s0, [[S0]], [[S2]]
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldr.16 s2, .LCPI16_0
+; CHECK-NEXT:    vmov.f16 s0, r0
+; CHECK-NEXT:    vminnm.f16 s0, s0, s2
+; CHECK-NEXT:    vldr.16 s2, .LCPI16_1
+; CHECK-NEXT:    vminnm.f16 s0, s0, s2
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 1
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI16_0:
+; CHECK-NEXT:    .short 0x5300 @ half 56
+; CHECK-NEXT:  .LCPI16_1:
+; CHECK-NEXT:    .short 0x54e0 @ half 78
 entry:
   %0 = bitcast i16 %b to half
   %cmp1 = fcmp fast ugt half 56., %0
@@ -247,11 +308,17 @@ entry:
 
 define half @fp16_vmaxnm_NNNo(i16 signext %a) {
 ; CHECK-LABEL: fp16_vmaxnm_NNNo:
-; CHECK:    vmov.f16 [[S0:s[0-9]]], r{{.}}
-; CHECK:    vmov.f16 [[S2:s[0-9]]], #1.200000e+01
-; CHECK:    vmaxnm.f16 s0, [[S0]], [[S2]]
-; CHECK:    vldr.16 s2, .LCPI{{.*}}
-; CHECK:    vmaxnm.f16 s0, [[S0]], [[S2]]
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.f16 s0, r0
+; CHECK-NEXT:    vmov.f16 s2, #1.200000e+01
+; CHECK-NEXT:    vmaxnm.f16 s0, s0, s2
+; CHECK-NEXT:    vldr.16 s2, .LCPI17_0
+; CHECK-NEXT:    vmaxnm.f16 s0, s0, s2
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 1
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI17_0:
+; CHECK-NEXT:    .short 0x5040 @ half 34
 entry:
   %0 = bitcast i16 %a to half
   %cmp1 = fcmp fast ogt half %0, 12.
@@ -263,11 +330,19 @@ entry:
 
 define half @fp16_vmaxnm_NNNoge(i16 signext %a) {
 ; CHECK-LABEL: fp16_vmaxnm_NNNoge:
-; CHECK:    vldr.16 s2, .LCPI{{.*}}
-; CHECK:    vmov.f16 [[S0:s[0-9]]], r{{.}}
-; CHECK:    vmaxnm.f16 s0, [[S0]], [[S2]]
-; CHECK:    vldr.16 s2, .LCPI{{.*}}
-; CHECK:    vmaxnm.f16 s0, [[S0]], [[S2]]
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldr.16 s2, .LCPI18_0
+; CHECK-NEXT:    vmov.f16 s0, r0
+; CHECK-NEXT:    vmaxnm.f16 s0, s0, s2
+; CHECK-NEXT:    vldr.16 s2, .LCPI18_1
+; CHECK-NEXT:    vmaxnm.f16 s0, s0, s2
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 1
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI18_0:
+; CHECK-NEXT:    .short 0x5040 @ half 34
+; CHECK-NEXT:  .LCPI18_1:
+; CHECK-NEXT:    .short 0x5300 @ half 56
 entry:
   %0 = bitcast i16 %a to half
   %cmp1 = fcmp fast oge half %0, 34.
@@ -279,11 +354,19 @@ entry:
 
 define half @fp16_vmaxnm_NNNo_rev(i16 signext %a) {
 ; CHECK-LABEL: fp16_vmaxnm_NNNo_rev:
-; CHECK:    vldr.16 s2, .LCPI{{.*}}
-; CHECK:    vmov.f16 [[S0:s[0-9]]], r{{.}}
-; CHECK:    vmaxnm.f16 s0, [[S0]], [[S2]]
-; CHECK:    vldr.16 s2, .LCPI{{.*}}
-; CHECK:    vmaxnm.f16 s0, [[S0]], [[S2]]
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldr.16 s2, .LCPI19_0
+; CHECK-NEXT:    vmov.f16 s0, r0
+; CHECK-NEXT:    vmaxnm.f16 s0, s0, s2
+; CHECK-NEXT:    vldr.16 s2, .LCPI19_1
+; CHECK-NEXT:    vmaxnm.f16 s0, s0, s2
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 1
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI19_0:
+; CHECK-NEXT:    .short 0x5300 @ half 56
+; CHECK-NEXT:  .LCPI19_1:
+; CHECK-NEXT:    .short 0x54e0 @ half 78
 entry:
   %0 = bitcast i16 %a to half
   %cmp1 = fcmp fast olt half %0, 56.
@@ -295,11 +378,19 @@ entry:
 
 define half @fp16_vmaxnm_NNNole_rev(i16 signext %a) {
 ; CHECK-LABEL: fp16_vmaxnm_NNNole_rev:
-; CHECK:    vldr.16 s2, .LCPI{{.*}}
-; CHECK:    vmov.f16 [[S0:s[0-9]]], r{{.}}
-; CHECK:    vmaxnm.f16 s0, [[S0]], [[S2]]
-; CHECK:    vldr.16 s2, .LCPI{{.*}}
-; CHECK:    vmaxnm.f16 s0, [[S0]], [[S2]]
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldr.16 s2, .LCPI20_0
+; CHECK-NEXT:    vmov.f16 s0, r0
+; CHECK-NEXT:    vmaxnm.f16 s0, s0, s2
+; CHECK-NEXT:    vldr.16 s2, .LCPI20_1
+; CHECK-NEXT:    vmaxnm.f16 s0, s0, s2
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 1
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI20_0:
+; CHECK-NEXT:    .short 0x54e0 @ half 78
+; CHECK-NEXT:  .LCPI20_1:
+; CHECK-NEXT:    .short 0x55a0 @ half 90
 entry:
   %0 = bitcast i16 %a to half
   %cmp1 = fcmp fast ole half %0, 78.
@@ -311,11 +402,17 @@ entry:
 
 define half @fp16_vmaxnm_NNNu(i16 signext %b) {
 ; CHECK-LABEL: fp16_vmaxnm_NNNu:
-; CHECK:    vmov.f16 [[S0:s[0-9]]], r{{.}}
-; CHECK:    vmov.f16 [[S2:s[0-9]]], #1.200000e+01
-; CHECK:    vmaxnm.f16 s0, [[S0]], [[S2]]
-; CHECK:    vldr.16 s2, .LCPI{{.*}}
-; CHECK:    vmaxnm.f16 s0, [[S0]], [[S2]]
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.f16 s0, r0
+; CHECK-NEXT:    vmov.f16 s2, #1.200000e+01
+; CHECK-NEXT:    vmaxnm.f16 s0, s0, s2
+; CHECK-NEXT:    vldr.16 s2, .LCPI21_0
+; CHECK-NEXT:    vmaxnm.f16 s0, s0, s2
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 1
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI21_0:
+; CHECK-NEXT:    .short 0x5040 @ half 34
 entry:
   %0 = bitcast i16 %b to half
   %cmp1 = fcmp fast ugt half 12., %0
@@ -327,11 +424,19 @@ entry:
 
 define half @fp16_vmaxnm_NNNuge(i16 signext %b) {
 ; CHECK-LABEL: fp16_vmaxnm_NNNuge:
-; CHECK:    vldr.16 s2, .LCPI{{.*}}
-; CHECK:    vmov.f16 [[S0:s[0-9]]], r{{.}}
-; CHECK:    vmaxnm.f16 s0, [[S0]], [[S2]]
-; CHECK:    vldr.16 s2, .LCPI{{.*}}
-; CHECK:    vmaxnm.f16 s0, [[S0]], [[S2]]
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldr.16 s2, .LCPI22_0
+; CHECK-NEXT:    vmov.f16 s0, r0
+; CHECK-NEXT:    vmaxnm.f16 s0, s0, s2
+; CHECK-NEXT:    vldr.16 s2, .LCPI22_1
+; CHECK-NEXT:    vmaxnm.f16 s0, s0, s2
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 1
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI22_0:
+; CHECK-NEXT:    .short 0x5040 @ half 34
+; CHECK-NEXT:  .LCPI22_1:
+; CHECK-NEXT:    .short 0x5300 @ half 56
 entry:
   %0 = bitcast i16 %b to half
   %cmp1 = fcmp fast uge half 34., %0
@@ -343,11 +448,19 @@ entry:
 
 define half @fp16_vmaxnm_NNNu_rev(i16 signext %b) {
 ; CHECK-LABEL: fp16_vmaxnm_NNNu_rev:
-; CHECK:    vldr.16 s2, .LCPI{{.*}}
-; CHECK:    vmov.f16 [[S0:s[0-9]]], r{{.}}
-; CHECK:    vmaxnm.f16 s0, [[S0]], [[S2]]
-; CHECK:    vldr.16 s2, .LCPI{{.*}}
-; CHECK:    vmaxnm.f16 s0, [[S0]], [[S2]]
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldr.16 s2, .LCPI23_0
+; CHECK-NEXT:    vmov.f16 s0, r0
+; CHECK-NEXT:    vmaxnm.f16 s0, s0, s2
+; CHECK-NEXT:    vldr.16 s2, .LCPI23_1
+; CHECK-NEXT:    vmaxnm.f16 s0, s0, s2
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 1
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI23_0:
+; CHECK-NEXT:    .short 0x5300 @ half 56
+; CHECK-NEXT:  .LCPI23_1:
+; CHECK-NEXT:    .short 0x54e0 @ half 78
 entry:
   %0 = bitcast i16 %b to half
   %cmp1 = fcmp fast ult half 56., %0
@@ -359,10 +472,16 @@ entry:
 
 define half @fp16_vminmaxnm_0(i16 signext %a) {
 ; CHECK-LABEL: fp16_vminmaxnm_0:
-; CHECK:    vldr.16 s0, .LCPI{{.*}}
-; CHECK:    vmov.f16 [[S2:s[0-9]]], r{{.}}
-; CHECK:    vminnm.f16 s2, s2, s0
-; CHECK:    vmaxnm.f16 s0, [[S2]], [[S0]]
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldr.16 s0, .LCPI24_0
+; CHECK-NEXT:    vmov.f16 s2, r0
+; CHECK-NEXT:    vminnm.f16 s2, s2, s0
+; CHECK-NEXT:    vmaxnm.f16 s0, s2, s0
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 1
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI24_0:
+; CHECK-NEXT:    .short 0x0000 @ half 0
 entry:
   %0 = bitcast i16 %a to half
   %cmp1 = fcmp fast olt half %0, 0.
@@ -374,10 +493,16 @@ entry:
 
 define half @fp16_vminmaxnm_neg0(i16 signext %a) {
 ; CHECK-LABEL: fp16_vminmaxnm_neg0:
-; CHECK:    vldr.16 s0, .LCPI{{.*}}
-; CHECK:    vmov.f16 [[S2:s[0-9]]], r{{.}}
-; CHECK:    vminnm.f16 s2, s2, s0
-; CHECK:    vmaxnm.f16 s0, [[S2]], [[S0]]
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldr.16 s0, .LCPI25_0
+; CHECK-NEXT:    vmov.f16 s2, r0
+; CHECK-NEXT:    vminnm.f16 s2, s2, s0
+; CHECK-NEXT:    vmaxnm.f16 s0, s2, s0
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 1
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI25_0:
+; CHECK-NEXT:    .short 0x8000 @ half -0
 entry:
   %0 = bitcast i16 %a to half
   %cmp1 = fcmp fast olt half %0, -0.
@@ -389,10 +514,16 @@ entry:
 
 define half @fp16_vminmaxnm_e_0(i16 signext %a) {
 ; CHECK-LABEL: fp16_vminmaxnm_e_0:
-; CHECK:    vldr.16 s0, .LCPI{{.*}}
-; CHECK:    vmov.f16 [[S2:s[0-9]]], r{{.}}
-; CHECK:    vminnm.f16 s2, s2, s0
-; CHECK:    vmaxnm.f16 s0, [[S2]], [[S0]]
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldr.16 s0, .LCPI26_0
+; CHECK-NEXT:    vmov.f16 s2, r0
+; CHECK-NEXT:    vminnm.f16 s2, s2, s0
+; CHECK-NEXT:    vmaxnm.f16 s0, s2, s0
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 1
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI26_0:
+; CHECK-NEXT:    .short 0x0000 @ half 0
 entry:
   %0 = bitcast i16 %a to half
   %cmp1 = fcmp fast ule half 0., %0
@@ -404,10 +535,16 @@ entry:
 
 define half @fp16_vminmaxnm_e_neg0(i16 signext %a) {
 ; CHECK-LABEL: fp16_vminmaxnm_e_neg0:
-; CHECK:    vldr.16 s0, .LCPI{{.*}}
-; CHECK:    vmov.f16 [[S2:s[0-9]]], r{{.}}
-; CHECK:    vminnm.f16 s2, s2, s0
-; CHECK:    vmaxnm.f16 s0, [[S2]], [[S0]]
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldr.16 s0, .LCPI27_0
+; CHECK-NEXT:    vmov.f16 s2, r0
+; CHECK-NEXT:    vminnm.f16 s2, s2, s0
+; CHECK-NEXT:    vmaxnm.f16 s0, s2, s0
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 1
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI27_0:
+; CHECK-NEXT:    .short 0x8000 @ half -0
 entry:
   %0 = bitcast i16 %a to half
   %cmp1 = fcmp fast ule half -0., %0

diff  --git a/llvm/test/CodeGen/ARM/fp16.ll b/llvm/test/CodeGen/ARM/fp16.ll
deleted file mode 100644
index 9ff701050ac7e..0000000000000
--- a/llvm/test/CodeGen/ARM/fp16.ll
+++ /dev/null
@@ -1,105 +0,0 @@
-; RUN: llc -mtriple=armv7a--none-eabi < %s | FileCheck --check-prefix=CHECK --check-prefix=CHECK-HARDFLOAT-EABI %s
-; RUN: llc -mtriple=armv7a--none-gnueabi < %s | FileCheck --check-prefix=CHECK --check-prefix=CHECK-HARDFLOAT-GNU %s
-; RUN: llc -mtriple=armv7a--none-musleabi < %s | FileCheck --check-prefix=CHECK --check-prefix=CHECK-HARDFLOAT-GNU %s
-; RUN: llc -mtriple=armv8-eabihf < %s | FileCheck --check-prefix=CHECK --check-prefix=CHECK-ARMV8 %s
-; RUN: llc -mtriple=thumbv7m-eabi < %s | FileCheck --check-prefix=CHECK --check-prefix=CHECK-SOFTFLOAT-EABI %s
-; RUN: llc -mtriple=thumbv7m-gnueabi < %s | FileCheck --check-prefix=CHECK --check-prefix=CHECK-SOFTFLOAT-GNU %s
-; RUN: llc -mtriple=thumbv7m-musleabi < %s | FileCheck --check-prefix=CHECK --check-prefix=CHECK-SOFTFLOAT-GNU %s
-
-;; +fp16 is special: it has f32->f16 (unlike v7), but not f64->f16 (unlike v8).
-;; This exposes unsafe-fp-math optimization opportunities; test that.
-; RUN: llc -mattr=+vfp3,+fp16 < %s |\
-; RUN:   FileCheck --check-prefix=CHECK --check-prefix=CHECK-FP16 --check-prefix=CHECK-FP16-SAFE %s
-; RUN: llc -mattr=+vfp3,+fp16 < %s -enable-unsafe-fp-math |\
-; RUN:   FileCheck --check-prefix=CHECK --check-prefix=CHECK-FP16 --check-prefix=CHECK-FP16-UNSAFE %s
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32"
-target triple = "armv7---eabihf"
-
- at x = global i16 12902
- at y = global i16 0
- at z = common global i16 0
-
-define void @foo() nounwind {
-; CHECK-LABEL: foo:
-entry:
-  %0 = load i16, ptr @x, align 2
-  %1 = load i16, ptr @y, align 2
-  %2 = tail call float @llvm.convert.from.fp16.f32(i16 %0)
-; CHECK-HARDFLOAT-EABI: __aeabi_h2f
-; CHECK-HARDFLOAT-GNU: __gnu_h2f_ieee
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-ARMV8: vcvtb.f32.f16
-; CHECK-SOFTFLOAT-EABI: __aeabi_h2f
-; CHECK-SOFTFLOAT-GNU: __gnu_h2f_ieee
-  %3 = tail call float @llvm.convert.from.fp16.f32(i16 %1)
-; CHECK-HARDFLOAT-EABI: __aeabi_h2f
-; CHECK-HARDFLOAT-GNU: __gnu_h2f_ieee
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-ARMV8: vcvtb.f32.f16
-; CHECK-SOFTFLOAT-EABI: __aeabi_h2f
-; CHECK-SOFTFLOAT-GNU: __gnu_h2f_ieee
-  %4 = fadd float %2, %3
-  %5 = tail call i16 @llvm.convert.to.fp16.f32(float %4)
-; CHECK-HARDFLOAT-EABI: __aeabi_f2h
-; CHECK-HARDFLOAT-GNU: __gnu_f2h_ieee
-; CHECK-FP16: vcvtb.f16.f32
-; CHECK-ARMV8: vcvtb.f16.f32
-; CHECK-SOFTFLOAT-EABI: __aeabi_f2h
-; CHECK-SOFTFLOAT-GNU: __gnu_f2h_ieee
-  store i16 %5, ptr @x, align 2
-  ret void
-}
-
-define double @test_from_fp16(i16 %in) {
-; CHECK-LABEL: test_from_fp16:
-  %val = call double @llvm.convert.from.fp16.f64(i16 %in)
-; CHECK-HARDFLOAT-EABI: bl __aeabi_h2f
-; CHECK-HARDFLOAT-EABI: vmov [[TMP:s[0-9]+]], r0
-; CHECK-HARDFLOAT-EABI: vcvt.f64.f32 {{d[0-9]+}}, [[TMP]]
-
-; CHECK-HARDFLOAT-GNU: bl __gnu_h2f_ieee
-; CHECK-HARDFLOAT-GNU: vmov [[TMP:s[0-9]+]], r0
-; CHECK-HARDFLOAT-GNU: vcvt.f64.f32 {{d[0-9]+}}, [[TMP]]
-
-; CHECK-FP16: vmov [[TMP16:s[0-9]+]], r0
-; CHECK-FP16: vcvtb.f32.f16 [[TMP32:s[0-9]+]], [[TMP16]]
-; CHECK-FP16: vcvt.f64.f32 d0, [[TMP32]]
-
-; CHECK-ARMV8: vmov [[TMP:s[0-9]+]], r0
-; CHECK-ARMV8: vcvtb.f64.f16 d0, [[TMP]]
-
-; CHECK-SOFTFLOAT-EABI: bl __aeabi_h2f
-; CHECK-SOFTFLOAT-EABI: bl __aeabi_f2d
-
-; CHECK-SOFTFLOAT-GNU: bl __gnu_h2f_ieee
-; CHECK-SOFTFLOAT-GNU: bl __aeabi_f2d
-  ret double %val
-}
-
-define i16 @test_to_fp16(double %in) {
-; CHECK-LABEL: test_to_fp16:
-  %val = call i16 @llvm.convert.to.fp16.f64(double %in)
-; CHECK-HARDFLOAT-EABI: bl __aeabi_d2h
-
-; CHECK-HARDFLOAT-GNU: bl __aeabi_d2h
-
-; CHECK-FP16-SAFE: bl __aeabi_d2h
-
-; CHECK-FP16-UNSAFE:      vmov r0, r1, d0
-; CHECK-FP16-UNSAFE-NEXT: bl __aeabi_d2h
-
-; CHECK-ARMV8: vcvtb.f16.f64 [[TMP:s[0-9]+]], d0
-; CHECK-ARMV8: vmov r0, [[TMP]]
-
-; CHECK-SOFTFLOAT-EABI: bl __aeabi_d2h
-
-; CHECK-SOFTFLOAT-GNU: bl __aeabi_d2h
-  ret i16 %val
-}
-
-declare float @llvm.convert.from.fp16.f32(i16) nounwind readnone
-declare double @llvm.convert.from.fp16.f64(i16) nounwind readnone
-
-declare i16 @llvm.convert.to.fp16.f32(float) nounwind readnone
-declare i16 @llvm.convert.to.fp16.f64(double) nounwind readnone

diff  --git a/llvm/test/CodeGen/ARM/sincos.ll b/llvm/test/CodeGen/ARM/sincos.ll
index dc8fdf69ca610..e1b683a8a6657 100644
--- a/llvm/test/CodeGen/ARM/sincos.ll
+++ b/llvm/test/CodeGen/ARM/sincos.ll
@@ -1,8 +1,7 @@
 ; RUN: llc < %s -mtriple=armv7-apple-ios6 -mcpu=cortex-a8 | FileCheck %s --check-prefix=NOOPT
 ; RUN: llc < %s -mtriple=armv7-apple-ios7 -mcpu=cortex-a8 | FileCheck %s --check-prefix=SINCOS
 ; RUN: llc < %s -mtriple=armv7-linux-gnu -mcpu=cortex-a8 | FileCheck %s --check-prefix=SINCOS-GNU
-; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 \
-; RUN:   --enable-unsafe-fp-math | FileCheck %s --check-prefix=SINCOS-GNU
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 | FileCheck %s --check-prefix=SINCOS-GNU
 ; RUN: llc < %s -mtriple=armv7-linux-android -mcpu=cortex-a8 | FileCheck %s --check-prefix=NOOPT-ANDROID
 ; RUN: llc < %s -mtriple=armv7-linux-android9 -mcpu=cortex-a8 | FileCheck %s --check-prefix=SINCOS-GNU
 
@@ -33,6 +32,28 @@ entry:
   ret float %add
 }
 
+define float @test1_fast(float %x) nounwind {
+entry:
+; SINCOS-LABEL: test1_fast:
+; SINCOS: bl ___sincosf_stret
+
+; SINCOS-GNU-LABEL: test1_fast:
+; SINCOS-GNU: bl sincosf
+
+; NOOPT-LABEL: test1_fast:
+; NOOPT: bl _sinf
+; NOOPT: bl _cosf
+
+; NOOPT-ANDROID-LABEL: test1_fast:
+; NOOPT-ANDROID: bl sinf
+; NOOPT-ANDROID: bl cosf
+
+  %call = tail call fast float @sinf(float %x) readnone
+  %call1 = tail call fast float @cosf(float %x) readnone
+  %add = fadd float %call, %call1
+  ret float %add
+}
+
 define float @test1_errno(float %x) nounwind {
 entry:
 ; SINCOS-LABEL: test1_errno:
@@ -79,6 +100,28 @@ entry:
   ret double %add
 }
 
+define double @test2_fast(double %x) nounwind {
+entry:
+; SINCOS-LABEL: test2_fast:
+; SINCOS: bl ___sincos_stret
+
+; SINCOS-GNU-LABEL: test2_fast:
+; SINCOS-GNU: bl sincos
+
+; NOOPT-LABEL: test2_fast:
+; NOOPT: bl _sin
+; NOOPT: bl _cos
+
+; NOOPT-ANDROID-LABEL: test2_fast:
+; NOOPT-ANDROID: bl sin
+; NOOPT-ANDROID: bl cos
+
+  %call = tail call fast double @sin(double %x) readnone
+  %call1 = tail call fast double @cos(double %x) readnone
+  %add = fadd double %call, %call1
+  ret double %add
+}
+
 define double @test2_errno(double %x) nounwind {
 entry:
 ; SINCOS-LABEL: test2_errno:

diff  --git a/llvm/test/CodeGen/ARM/vminmaxnm.ll b/llvm/test/CodeGen/ARM/vminmaxnm.ll
index bb3ea3067541e..be33dbfc61b04 100644
--- a/llvm/test/CodeGen/ARM/vminmaxnm.ll
+++ b/llvm/test/CodeGen/ARM/vminmaxnm.ll
@@ -1,146 +1,163 @@
-; RUN: llc < %s -mtriple armv8 -mattr=+neon,+fp-armv8 -enable-unsafe-fp-math -enable-no-nans-fp-math | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple armv8-none-eabihf -mattr=+neon,+fp-armv8 -enable-no-nans-fp-math | FileCheck %s
 
 ; scalars
 
-define float @fp-armv8_vminnm_o(float %a, float %b) {
-; CHECK-LABEL: "fp-armv8_vminnm_o":
-; CHECK-NOT: vcmp
-; CHECK: vminnm.f32
+define float @fparmv8_vminnm_o(float %a, float %b) {
+; CHECK-LABEL: fparmv8_vminnm_o:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vminnm.f32 s0, s0, s1
+; CHECK-NEXT:    bx lr
   %cmp = fcmp fast olt float %a, %b
   %cond = select nsz i1 %cmp, float %a, float %b
   ret float %cond
 }
 
-define double @fp-armv8_vminnm_ole(double %a, double %b) {
-; CHECK-LABEL: "fp-armv8_vminnm_ole":
-; CHECK-NOT: vcmp
-; CHECK: vminnm.f64
+define double @fparmv8_vminnm_ole(double %a, double %b) {
+; CHECK-LABEL: fparmv8_vminnm_ole:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vminnm.f64 d0, d0, d1
+; CHECK-NEXT:    bx lr
   %cmp = fcmp fast ole double %a, %b
   %cond = select nsz i1 %cmp, double %a, double %b
   ret double %cond
 }
 
-define float @fp-armv8_vminnm_o_rev(float %a, float %b) {
-; CHECK-LABEL: "fp-armv8_vminnm_o_rev":
-; CHECK-NOT: vcmp
-; CHECK: vminnm.f32
+define float @fparmv8_vminnm_o_rev(float %a, float %b) {
+; CHECK-LABEL: fparmv8_vminnm_o_rev:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vminnm.f32 s0, s0, s1
+; CHECK-NEXT:    bx lr
   %cmp = fcmp fast ogt float %a, %b
   %cond = select nsz i1 %cmp, float %b, float %a
   ret float %cond
 }
 
-define double @fp-armv8_vminnm_oge_rev(double %a, double %b) {
-; CHECK-LABEL: "fp-armv8_vminnm_oge_rev":
-; CHECK-NOT: vcmp
-; CHECK: vminnm.f64
+define double @fparmv8_vminnm_oge_rev(double %a, double %b) {
+; CHECK-LABEL: fparmv8_vminnm_oge_rev:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vminnm.f64 d0, d0, d1
+; CHECK-NEXT:    bx lr
   %cmp = fcmp fast oge double %a, %b
   %cond = select nsz i1 %cmp, double %b, double %a
   ret double %cond
 }
 
-define float @fp-armv8_vminnm_u(float %a, float %b) {
-; CHECK-LABEL: "fp-armv8_vminnm_u":
-; CHECK-NOT: vcmp
-; CHECK: vminnm.f32
+define float @fparmv8_vminnm_u(float %a, float %b) {
+; CHECK-LABEL: fparmv8_vminnm_u:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vminnm.f32 s0, s0, s1
+; CHECK-NEXT:    bx lr
   %cmp = fcmp fast ult float %a, %b
   %cond = select nsz i1 %cmp, float %a, float %b
   ret float %cond
 }
 
-define float @fp-armv8_vminnm_ule(float %a, float %b) {
-; CHECK-LABEL: "fp-armv8_vminnm_ule":
-; CHECK-NOT: vcmp
-; CHECK: vminnm.f32
+define float @fparmv8_vminnm_ule(float %a, float %b) {
+; CHECK-LABEL: fparmv8_vminnm_ule:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vminnm.f32 s0, s0, s1
+; CHECK-NEXT:    bx lr
   %cmp = fcmp fast ule float %a, %b
   %cond = select nsz i1 %cmp, float %a, float %b
   ret float %cond
 }
 
-define float @fp-armv8_vminnm_u_rev(float %a, float %b) {
-; CHECK-LABEL: "fp-armv8_vminnm_u_rev":
-; CHECK-NOT: vcmp
-; CHECK: vminnm.f32
+define float @fparmv8_vminnm_u_rev(float %a, float %b) {
+; CHECK-LABEL: fparmv8_vminnm_u_rev:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vminnm.f32 s0, s0, s1
+; CHECK-NEXT:    bx lr
   %cmp = fcmp fast ugt float %a, %b
   %cond = select nsz i1 %cmp, float %b, float %a
   ret float %cond
 }
 
-define double @fp-armv8_vminnm_uge_rev(double %a, double %b) {
-; CHECK-LABEL: "fp-armv8_vminnm_uge_rev":
-; CHECK-NOT: vcmp
-; CHECK: vminnm.f64
+define double @fparmv8_vminnm_uge_rev(double %a, double %b) {
+; CHECK-LABEL: fparmv8_vminnm_uge_rev:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vminnm.f64 d0, d0, d1
+; CHECK-NEXT:    bx lr
   %cmp = fcmp fast uge double %a, %b
   %cond = select nsz i1 %cmp, double %b, double %a
   ret double %cond
 }
 
-define float @fp-armv8_vmaxnm_o(float %a, float %b) {
-; CHECK-LABEL: "fp-armv8_vmaxnm_o":
-; CHECK-NOT: vcmp
-; CHECK: vmaxnm.f32
+define float @fparmv8_vmaxnm_o(float %a, float %b) {
+; CHECK-LABEL: fparmv8_vmaxnm_o:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s1
+; CHECK-NEXT:    bx lr
   %cmp = fcmp fast ogt float %a, %b
   %cond = select nsz i1 %cmp, float %a, float %b
   ret float %cond
 }
 
-define float @fp-armv8_vmaxnm_oge(float %a, float %b) {
-; CHECK-LABEL: "fp-armv8_vmaxnm_oge":
-; CHECK-NOT: vcmp
-; CHECK: vmaxnm.f32
+define float @fparmv8_vmaxnm_oge(float %a, float %b) {
+; CHECK-LABEL: fparmv8_vmaxnm_oge:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s1
+; CHECK-NEXT:    bx lr
   %cmp = fcmp fast oge float %a, %b
   %cond = select nsz i1 %cmp, float %a, float %b
   ret float %cond
 }
 
-define float @fp-armv8_vmaxnm_o_rev(float %a, float %b) {
-; CHECK-LABEL: "fp-armv8_vmaxnm_o_rev":
-; CHECK-NOT: vcmp
-; CHECK: vmaxnm.f32
+define float @fparmv8_vmaxnm_o_rev(float %a, float %b) {
+; CHECK-LABEL: fparmv8_vmaxnm_o_rev:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s1
+; CHECK-NEXT:    bx lr
   %cmp = fcmp fast olt float %a, %b
   %cond = select nsz i1 %cmp, float %b, float %a
   ret float %cond
 }
 
-define float @fp-armv8_vmaxnm_ole_rev(float %a, float %b) {
-; CHECK-LABEL: "fp-armv8_vmaxnm_ole_rev":
-; CHECK-NOT: vcmp
-; CHECK: vmaxnm.f32
+define float @fparmv8_vmaxnm_ole_rev(float %a, float %b) {
+; CHECK-LABEL: fparmv8_vmaxnm_ole_rev:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s1
+; CHECK-NEXT:    bx lr
   %cmp = fcmp fast ole float %a, %b
   %cond = select nsz i1 %cmp, float %b, float %a
   ret float %cond
 }
 
-define float @fp-armv8_vmaxnm_u(float %a, float %b) {
-; CHECK-LABEL: "fp-armv8_vmaxnm_u":
-; CHECK-NOT: vcmp
-; CHECK: vmaxnm.f32
+define float @fparmv8_vmaxnm_u(float %a, float %b) {
+; CHECK-LABEL: fparmv8_vmaxnm_u:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s1
+; CHECK-NEXT:    bx lr
   %cmp = fcmp fast ugt float %a, %b
   %cond = select nsz i1 %cmp, float %a, float %b
   ret float %cond
 }
 
-define float @fp-armv8_vmaxnm_uge(float %a, float %b) {
-; CHECK-LABEL: "fp-armv8_vmaxnm_uge":
-; CHECK-NOT: vcmp
-; CHECK: vmaxnm.f32
+define float @fparmv8_vmaxnm_uge(float %a, float %b) {
+; CHECK-LABEL: fparmv8_vmaxnm_uge:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s1
+; CHECK-NEXT:    bx lr
   %cmp = fcmp fast uge float %a, %b
   %cond = select nsz i1 %cmp, float %a, float %b
   ret float %cond
 }
 
-define float @fp-armv8_vmaxnm_u_rev(float %a, float %b) {
-; CHECK-LABEL: "fp-armv8_vmaxnm_u_rev":
-; CHECK-NOT: vcmp
-; CHECK: vmaxnm.f32
+define float @fparmv8_vmaxnm_u_rev(float %a, float %b) {
+; CHECK-LABEL: fparmv8_vmaxnm_u_rev:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s1
+; CHECK-NEXT:    bx lr
   %cmp = fcmp fast ult float %a, %b
   %cond = select nsz i1 %cmp, float %b, float %a
   ret float %cond
 }
 
-define double @fp-armv8_vmaxnm_ule_rev(double %a, double %b) {
-; CHECK-LABEL: "fp-armv8_vmaxnm_ule_rev":
-; CHECK-NOT: vcmp
-; CHECK: vmaxnm.f64
+define double @fparmv8_vmaxnm_ule_rev(double %a, double %b) {
+; CHECK-LABEL: fparmv8_vmaxnm_ule_rev:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmaxnm.f64 d0, d0, d1
+; CHECK-NEXT:    bx lr
   %cmp = fcmp fast ule double %a, %b
   %cond = select nsz i1 %cmp, double %b, double %a
   ret double %cond
@@ -148,10 +165,18 @@ define double @fp-armv8_vmaxnm_ule_rev(double %a, double %b) {
 
 ; known non-NaNs
 
-define float @fp-armv8_vminnm_NNNo(float %a) {
-; CHECK-LABEL: "fp-armv8_vminnm_NNNo":
-; CHECK: vminnm.f32
-; CHECK: vminnm.f32
+define float @fparmv8_vminnm_NNNo(float %a) {
+; CHECK-LABEL: fparmv8_vminnm_NNNo:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.f32 s2, #1.200000e+01
+; CHECK-NEXT:    vldr s4, .LCPI16_0
+; CHECK-NEXT:    vminnm.f32 s0, s0, s2
+; CHECK-NEXT:    vminnm.f32 s0, s0, s4
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI16_0:
+; CHECK-NEXT:    .long 0x42080000 @ float 34
   %cmp1 = fcmp fast olt float %a, 12.
   %cond1 = select nsz i1 %cmp1, float %a, float 12.
   %cmp2 = fcmp fast olt float 34., %cond1
@@ -159,10 +184,22 @@ define float @fp-armv8_vminnm_NNNo(float %a) {
   ret float %cond2
 }
 
-define double @fp-armv8_vminnm_NNNole(double %a) {
-; CHECK-LABEL: "fp-armv8_vminnm_NNNole":
-; CHECK: vminnm.f64
-; CHECK: vminnm.f64
+define double @fparmv8_vminnm_NNNole(double %a) {
+; CHECK-LABEL: fparmv8_vminnm_NNNole:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr d16, .LCPI17_0
+; CHECK-NEXT:    vldr d17, .LCPI17_1
+; CHECK-NEXT:    vminnm.f64 d16, d0, d16
+; CHECK-NEXT:    vminnm.f64 d0, d16, d17
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI17_0:
+; CHECK-NEXT:    .long 0 @ double 34
+; CHECK-NEXT:    .long 1078001664
+; CHECK-NEXT:  .LCPI17_1:
+; CHECK-NEXT:    .long 0 @ double 56
+; CHECK-NEXT:    .long 1078722560
   %cmp1 = fcmp fast ole double %a, 34.
   %cond1 = select nsz i1 %cmp1, double %a, double 34.
   %cmp2 = fcmp fast ole double 56., %cond1
@@ -170,10 +207,20 @@ define double @fp-armv8_vminnm_NNNole(double %a) {
   ret double %cond2
 }
 
-define float @fp-armv8_vminnm_NNNo_rev(float %a) {
-; CHECK-LABEL: "fp-armv8_vminnm_NNNo_rev":
-; CHECK: vminnm.f32
-; CHECK: vminnm.f32
+define float @fparmv8_vminnm_NNNo_rev(float %a) {
+; CHECK-LABEL: fparmv8_vminnm_NNNo_rev:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr s2, .LCPI18_0
+; CHECK-NEXT:    vldr s4, .LCPI18_1
+; CHECK-NEXT:    vminnm.f32 s0, s0, s2
+; CHECK-NEXT:    vminnm.f32 s0, s0, s4
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI18_0:
+; CHECK-NEXT:    .long 0x42600000 @ float 56
+; CHECK-NEXT:  .LCPI18_1:
+; CHECK-NEXT:    .long 0x429c0000 @ float 78
   %cmp1 = fcmp fast ogt float %a, 56.
   %cond1 = select nsz i1 %cmp1, float 56., float %a
   %cmp2 = fcmp fast ogt float 78., %cond1
@@ -181,10 +228,22 @@ define float @fp-armv8_vminnm_NNNo_rev(float %a) {
   ret float %cond2
 }
 
-define double @fp-armv8_vminnm_NNNoge_rev(double %a) {
-; CHECK-LABEL: "fp-armv8_vminnm_NNNoge_rev":
-; CHECK: vminnm.f64
-; CHECK: vminnm.f64
+define double @fparmv8_vminnm_NNNoge_rev(double %a) {
+; CHECK-LABEL: fparmv8_vminnm_NNNoge_rev:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr d16, .LCPI19_0
+; CHECK-NEXT:    vldr d17, .LCPI19_1
+; CHECK-NEXT:    vminnm.f64 d16, d0, d16
+; CHECK-NEXT:    vminnm.f64 d0, d16, d17
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI19_0:
+; CHECK-NEXT:    .long 0 @ double 78
+; CHECK-NEXT:    .long 1079214080
+; CHECK-NEXT:  .LCPI19_1:
+; CHECK-NEXT:    .long 0 @ double 90
+; CHECK-NEXT:    .long 1079410688
   %cmp1 = fcmp fast oge double %a, 78.
   %cond1 = select nsz i1 %cmp1, double 78., double %a
   %cmp2 = fcmp fast oge double 90., %cond1
@@ -192,10 +251,18 @@ define double @fp-armv8_vminnm_NNNoge_rev(double %a) {
   ret double %cond2
 }
 
-define float @fp-armv8_vminnm_NNNu(float %b) {
-; CHECK-LABEL: "fp-armv8_vminnm_NNNu":
-; CHECK: vminnm.f32
-; CHECK: vminnm.f32
+define float @fparmv8_vminnm_NNNu(float %b) {
+; CHECK-LABEL: fparmv8_vminnm_NNNu:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.f32 s2, #1.200000e+01
+; CHECK-NEXT:    vldr s4, .LCPI20_0
+; CHECK-NEXT:    vminnm.f32 s0, s0, s2
+; CHECK-NEXT:    vminnm.f32 s0, s0, s4
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI20_0:
+; CHECK-NEXT:    .long 0x42080000 @ float 34
   %cmp1 = fcmp fast ult float 12., %b
   %cond1 = select nsz i1 %cmp1, float 12., float %b
   %cmp2 = fcmp fast ult float %cond1, 34.
@@ -203,10 +270,20 @@ define float @fp-armv8_vminnm_NNNu(float %b) {
   ret float %cond2
 }
 
-define float @fp-armv8_vminnm_NNNule(float %b) {
-; CHECK-LABEL: "fp-armv8_vminnm_NNNule":
-; CHECK: vminnm.f32
-; CHECK: vminnm.f32
+define float @fparmv8_vminnm_NNNule(float %b) {
+; CHECK-LABEL: fparmv8_vminnm_NNNule:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr s2, .LCPI21_0
+; CHECK-NEXT:    vldr s4, .LCPI21_1
+; CHECK-NEXT:    vminnm.f32 s0, s0, s2
+; CHECK-NEXT:    vminnm.f32 s0, s0, s4
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI21_0:
+; CHECK-NEXT:    .long 0x42080000 @ float 34
+; CHECK-NEXT:  .LCPI21_1:
+; CHECK-NEXT:    .long 0x42600000 @ float 56
   %cmp1 = fcmp fast ule float 34., %b
   %cond1 = select nsz i1 %cmp1, float 34., float %b
   %cmp2 = fcmp fast ule float %cond1, 56.
@@ -214,10 +291,20 @@ define float @fp-armv8_vminnm_NNNule(float %b) {
   ret float %cond2
 }
 
-define float @fp-armv8_vminnm_NNNu_rev(float %b) {
-; CHECK-LABEL: "fp-armv8_vminnm_NNNu_rev":
-; CHECK: vminnm.f32
-; CHECK: vminnm.f32
+define float @fparmv8_vminnm_NNNu_rev(float %b) {
+; CHECK-LABEL: fparmv8_vminnm_NNNu_rev:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr s2, .LCPI22_0
+; CHECK-NEXT:    vldr s4, .LCPI22_1
+; CHECK-NEXT:    vminnm.f32 s0, s0, s2
+; CHECK-NEXT:    vminnm.f32 s0, s0, s4
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI22_0:
+; CHECK-NEXT:    .long 0x42600000 @ float 56
+; CHECK-NEXT:  .LCPI22_1:
+; CHECK-NEXT:    .long 0x429c0000 @ float 78
   %cmp1 = fcmp fast ugt float 56., %b
   %cond1 = select nsz i1 %cmp1, float %b, float 56.
   %cmp2 = fcmp fast ugt float %cond1, 78.
@@ -225,10 +312,22 @@ define float @fp-armv8_vminnm_NNNu_rev(float %b) {
   ret float %cond2
 }
 
-define double @fp-armv8_vminnm_NNNuge_rev(double %b) {
-; CHECK-LABEL: "fp-armv8_vminnm_NNNuge_rev":
-; CHECK: vminnm.f64
-; CHECK: vminnm.f64
+define double @fparmv8_vminnm_NNNuge_rev(double %b) {
+; CHECK-LABEL: fparmv8_vminnm_NNNuge_rev:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr d16, .LCPI23_0
+; CHECK-NEXT:    vldr d17, .LCPI23_1
+; CHECK-NEXT:    vminnm.f64 d16, d0, d16
+; CHECK-NEXT:    vminnm.f64 d0, d16, d17
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI23_0:
+; CHECK-NEXT:    .long 0 @ double 78
+; CHECK-NEXT:    .long 1079214080
+; CHECK-NEXT:  .LCPI23_1:
+; CHECK-NEXT:    .long 0 @ double 90
+; CHECK-NEXT:    .long 1079410688
   %cmp1 = fcmp fast uge double 78., %b
   %cond1 = select nsz i1 %cmp1, double %b, double 78.
   %cmp2 = fcmp fast uge double %cond1, 90.
@@ -236,10 +335,18 @@ define double @fp-armv8_vminnm_NNNuge_rev(double %b) {
   ret double %cond2
 }
 
-define float @fp-armv8_vmaxnm_NNNo(float %a) {
-; CHECK-LABEL: "fp-armv8_vmaxnm_NNNo":
-; CHECK: vmaxnm.f32
-; CHECK: vmaxnm.f32
+define float @fparmv8_vmaxnm_NNNo(float %a) {
+; CHECK-LABEL: fparmv8_vmaxnm_NNNo:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.f32 s2, #1.200000e+01
+; CHECK-NEXT:    vldr s4, .LCPI24_0
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s2
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s4
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI24_0:
+; CHECK-NEXT:    .long 0x42080000 @ float 34
   %cmp1 = fcmp fast ogt float %a, 12.
   %cond1 = select nsz i1 %cmp1, float %a, float 12.
   %cmp2 = fcmp fast ogt float 34., %cond1
@@ -247,10 +354,20 @@ define float @fp-armv8_vmaxnm_NNNo(float %a) {
   ret float %cond2
 }
 
-define float @fp-armv8_vmaxnm_NNNoge(float %a) {
-; CHECK-LABEL: "fp-armv8_vmaxnm_NNNoge":
-; CHECK: vmaxnm.f32
-; CHECK: vmaxnm.f32
+define float @fparmv8_vmaxnm_NNNoge(float %a) {
+; CHECK-LABEL: fparmv8_vmaxnm_NNNoge:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr s2, .LCPI25_0
+; CHECK-NEXT:    vldr s4, .LCPI25_1
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s2
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s4
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI25_0:
+; CHECK-NEXT:    .long 0x42080000 @ float 34
+; CHECK-NEXT:  .LCPI25_1:
+; CHECK-NEXT:    .long 0x42600000 @ float 56
   %cmp1 = fcmp fast oge float %a, 34.
   %cond1 = select nsz i1 %cmp1, float %a, float 34.
   %cmp2 = fcmp fast oge float 56., %cond1
@@ -258,10 +375,20 @@ define float @fp-armv8_vmaxnm_NNNoge(float %a) {
   ret float %cond2
 }
 
-define float @fp-armv8_vmaxnm_NNNo_rev(float %a) {
-; CHECK-LABEL: "fp-armv8_vmaxnm_NNNo_rev":
-; CHECK: vmaxnm.f32
-; CHECK: vmaxnm.f32
+define float @fparmv8_vmaxnm_NNNo_rev(float %a) {
+; CHECK-LABEL: fparmv8_vmaxnm_NNNo_rev:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr s2, .LCPI26_0
+; CHECK-NEXT:    vldr s4, .LCPI26_1
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s2
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s4
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI26_0:
+; CHECK-NEXT:    .long 0x42600000 @ float 56
+; CHECK-NEXT:  .LCPI26_1:
+; CHECK-NEXT:    .long 0x429c0000 @ float 78
   %cmp1 = fcmp fast olt float %a, 56.
   %cond1 = select nsz i1 %cmp1, float 56., float %a
   %cmp2 = fcmp fast olt float 78., %cond1
@@ -269,10 +396,20 @@ define float @fp-armv8_vmaxnm_NNNo_rev(float %a) {
   ret float %cond2
 }
 
-define float @fp-armv8_vmaxnm_NNNole_rev(float %a) {
-; CHECK-LABEL: "fp-armv8_vmaxnm_NNNole_rev":
-; CHECK: vmaxnm.f32
-; CHECK: vmaxnm.f32
+define float @fparmv8_vmaxnm_NNNole_rev(float %a) {
+; CHECK-LABEL: fparmv8_vmaxnm_NNNole_rev:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr s2, .LCPI27_0
+; CHECK-NEXT:    vldr s4, .LCPI27_1
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s2
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s4
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI27_0:
+; CHECK-NEXT:    .long 0x429c0000 @ float 78
+; CHECK-NEXT:  .LCPI27_1:
+; CHECK-NEXT:    .long 0x42b40000 @ float 90
   %cmp1 = fcmp fast ole float %a, 78.
   %cond1 = select nsz i1 %cmp1, float 78., float %a
   %cmp2 = fcmp fast ole float 90., %cond1
@@ -280,10 +417,18 @@ define float @fp-armv8_vmaxnm_NNNole_rev(float %a) {
   ret float %cond2
 }
 
-define float @fp-armv8_vmaxnm_NNNu(float %b) {
-; CHECK-LABEL: "fp-armv8_vmaxnm_NNNu":
-; CHECK: vmaxnm.f32
-; CHECK: vmaxnm.f32
+define float @fparmv8_vmaxnm_NNNu(float %b) {
+; CHECK-LABEL: fparmv8_vmaxnm_NNNu:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.f32 s2, #1.200000e+01
+; CHECK-NEXT:    vldr s4, .LCPI28_0
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s2
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s4
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI28_0:
+; CHECK-NEXT:    .long 0x42080000 @ float 34
   %cmp1 = fcmp fast ugt float 12., %b
   %cond1 = select nsz i1 %cmp1, float 12., float %b
   %cmp2 = fcmp fast ugt float %cond1, 34.
@@ -291,10 +436,20 @@ define float @fp-armv8_vmaxnm_NNNu(float %b) {
   ret float %cond2
 }
 
-define float @fp-armv8_vmaxnm_NNNuge(float %b) {
-; CHECK-LABEL: "fp-armv8_vmaxnm_NNNuge":
-; CHECK: vmaxnm.f32
-; CHECK: vmaxnm.f32
+define float @fparmv8_vmaxnm_NNNuge(float %b) {
+; CHECK-LABEL: fparmv8_vmaxnm_NNNuge:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr s2, .LCPI29_0
+; CHECK-NEXT:    vldr s4, .LCPI29_1
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s2
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s4
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI29_0:
+; CHECK-NEXT:    .long 0x42080000 @ float 34
+; CHECK-NEXT:  .LCPI29_1:
+; CHECK-NEXT:    .long 0x42600000 @ float 56
   %cmp1 = fcmp fast uge float 34., %b
   %cond1 = select nsz i1 %cmp1, float 34., float %b
   %cmp2 = fcmp fast uge float %cond1, 56.
@@ -302,10 +457,20 @@ define float @fp-armv8_vmaxnm_NNNuge(float %b) {
   ret float %cond2
 }
 
-define float @fp-armv8_vmaxnm_NNNu_rev(float %b) {
-; CHECK-LABEL: "fp-armv8_vmaxnm_NNNu_rev":
-; CHECK: vmaxnm.f32
-; CHECK: vmaxnm.f32
+define float @fparmv8_vmaxnm_NNNu_rev(float %b) {
+; CHECK-LABEL: fparmv8_vmaxnm_NNNu_rev:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr s2, .LCPI30_0
+; CHECK-NEXT:    vldr s4, .LCPI30_1
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s2
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s4
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI30_0:
+; CHECK-NEXT:    .long 0x42600000 @ float 56
+; CHECK-NEXT:  .LCPI30_1:
+; CHECK-NEXT:    .long 0x429c0000 @ float 78
   %cmp1 = fcmp fast ult float 56., %b
   %cond1 = select nsz i1 %cmp1, float %b, float 56.
   %cmp2 = fcmp fast ult float %cond1, 78.
@@ -313,10 +478,22 @@ define float @fp-armv8_vmaxnm_NNNu_rev(float %b) {
   ret float %cond2
 }
 
-define double @fp-armv8_vmaxnm_NNNule_rev( double %b) {
-; CHECK-LABEL: "fp-armv8_vmaxnm_NNNule_rev":
-; CHECK: vmaxnm.f64
-; CHECK: vmaxnm.f64
+define double @fparmv8_vmaxnm_NNNule_rev( double %b) {
+; CHECK-LABEL: fparmv8_vmaxnm_NNNule_rev:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr d16, .LCPI31_0
+; CHECK-NEXT:    vldr d17, .LCPI31_1
+; CHECK-NEXT:    vmaxnm.f64 d16, d0, d16
+; CHECK-NEXT:    vmaxnm.f64 d0, d16, d17
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI31_0:
+; CHECK-NEXT:    .long 0 @ double 78
+; CHECK-NEXT:    .long 1079214080
+; CHECK-NEXT:  .LCPI31_1:
+; CHECK-NEXT:    .long 0 @ double 90
+; CHECK-NEXT:    .long 1079410688
   %cmp1 = fcmp fast ule double 78., %b
   %cond1 = select nsz i1 %cmp1, double %b, double 78.
   %cmp2 = fcmp fast ule double %cond1, 90.
@@ -324,11 +501,17 @@ define double @fp-armv8_vmaxnm_NNNule_rev( double %b) {
   ret double %cond2
 }
 
-define float @fp-armv8_vminmaxnm_0(float %a) {
-; CHECK-LABEL: "fp-armv8_vminmaxnm_0":
-; CHECK-NOT: vcmp
-; CHECK: vminnm.f32
-; CHECK: vmaxnm.f32
+define float @fparmv8_vminmaxnm_0(float %a) {
+; CHECK-LABEL: fparmv8_vminmaxnm_0:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr s2, .LCPI32_0
+; CHECK-NEXT:    vminnm.f32 s0, s0, s2
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s2
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI32_0:
+; CHECK-NEXT:    .long 0x00000000 @ float 0
   %cmp1 = fcmp fast olt float %a, 0.
   %cond1 = select nsz i1 %cmp1, float %a, float 0.
   %cmp2 = fcmp fast ogt float %cond1, 0.
@@ -336,11 +519,17 @@ define float @fp-armv8_vminmaxnm_0(float %a) {
   ret float %cond2
 }
 
-define float @fp-armv8_vminmaxnm_neg0(float %a) {
-; CHECK-LABEL: "fp-armv8_vminmaxnm_neg0":
-; CHECK-NOT: vcmp
-; CHECK: vminnm.f32
-; CHECK: vmaxnm.f32
+define float @fparmv8_vminmaxnm_neg0(float %a) {
+; CHECK-LABEL: fparmv8_vminmaxnm_neg0:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr s2, .LCPI33_0
+; CHECK-NEXT:    vminnm.f32 s0, s0, s2
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s2
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI33_0:
+; CHECK-NEXT:    .long 0x80000000 @ float -0
   %cmp1 = fcmp fast olt float %a, -0.
   %cond1 = select nsz i1 %cmp1, float %a, float -0.
   %cmp2 = fcmp fast ugt float %cond1, -0.
@@ -348,11 +537,17 @@ define float @fp-armv8_vminmaxnm_neg0(float %a) {
   ret float %cond2
 }
 
-define float @fp-armv8_vminmaxnm_e_0(float %a) {
-; CHECK-LABEL: "fp-armv8_vminmaxnm_e_0":
-; CHECK-NOT: vcmp
-; CHECK: vminnm.f32
-; CHECK: vmaxnm.f32
+define float @fparmv8_vminmaxnm_e_0(float %a) {
+; CHECK-LABEL: fparmv8_vminmaxnm_e_0:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr s2, .LCPI34_0
+; CHECK-NEXT:    vminnm.f32 s0, s0, s2
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s2
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI34_0:
+; CHECK-NEXT:    .long 0x00000000 @ float 0
   %cmp1 = fcmp fast ule float 0., %a
   %cond1 = select nsz i1 %cmp1, float 0., float %a
   %cmp2 = fcmp fast uge float 0., %cond1
@@ -360,11 +555,17 @@ define float @fp-armv8_vminmaxnm_e_0(float %a) {
   ret float %cond2
 }
 
-define float @fp-armv8_vminmaxnm_e_neg0(float %a) {
-; CHECK-LABEL: "fp-armv8_vminmaxnm_e_neg0":
-; CHECK-NOT: vcmp
-; CHECK: vminnm.f32
-; CHECK: vmaxnm.f32
+define float @fparmv8_vminmaxnm_e_neg0(float %a) {
+; CHECK-LABEL: fparmv8_vminmaxnm_e_neg0:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr s2, .LCPI35_0
+; CHECK-NEXT:    vminnm.f32 s0, s0, s2
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s2
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI35_0:
+; CHECK-NEXT:    .long 0x80000000 @ float -0
   %cmp1 = fcmp fast ule float -0., %a
   %cond1 = select nsz i1 %cmp1, float -0., float %a
   %cmp2 = fcmp fast oge float -0., %cond1


        


More information about the llvm-commits mailing list