[llvm] 5a25399 - [ARM] Add and update FMA tests. NFC

David Green via llvm-commits llvm-commits at lists.llvm.org
Sun Jan 5 03:55:45 PST 2020


Author: David Green
Date: 2020-01-05T11:24:04Z
New Revision: 5a253992212451be919978610332cc42d4b48d47

URL: https://github.com/llvm/llvm-project/commit/5a253992212451be919978610332cc42d4b48d47
DIFF: https://github.com/llvm/llvm-project/commit/5a253992212451be919978610332cc42d4b48d47.diff

LOG: [ARM] Add and update FMA tests. NFC

Added: 
    llvm/test/CodeGen/ARM/fp16-fusedMAC.ll

Modified: 
    llvm/test/CodeGen/ARM/cortex-a57-misched-vfma.ll
    llvm/test/CodeGen/ARM/fusedMAC.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/ARM/cortex-a57-misched-vfma.ll b/llvm/test/CodeGen/ARM/cortex-a57-misched-vfma.ll
index a3e07ba17b9a..1eb7e4237706 100644
--- a/llvm/test/CodeGen/ARM/cortex-a57-misched-vfma.ll
+++ b/llvm/test/CodeGen/ARM/cortex-a57-misched-vfma.ll
@@ -3,7 +3,7 @@
 ; RUN: llc < %s -mtriple=armv8r-eabi -mcpu=cortex-a57 -enable-misched -verify-misched -debug-only=machine-scheduler -o - 2>&1 > /dev/null -fp-contract=fast | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-FAST
 ; Check latencies of vmul/vfma accumulate chains.
 
-define float @Test1(float %f1, float %f2, float %f3, float %f4, float %f5, float %f6) {
+define arm_aapcs_vfpcc float @Test1(float %f1, float %f2, float %f3, float %f4, float %f5, float %f6) {
 ; CHECK:       ********** MI Scheduling **********
 ; CHECK:       Test1:%bb.0
 
@@ -42,7 +42,7 @@ define float @Test1(float %f1, float %f2, float %f3, float %f4, float %f5, float
 }
 
 ; ASIMD form
-define <2 x float> @Test2(<2 x float> %f1, <2 x float> %f2, <2 x float> %f3, <2 x float> %f4, <2 x float> %f5, <2 x float> %f6) {
+define arm_aapcs_vfpcc <2 x float> @Test2(<2 x float> %f1, <2 x float> %f2, <2 x float> %f3, <2 x float> %f4, <2 x float> %f5, <2 x float> %f6) {
 ; CHECK:       ********** MI Scheduling **********
 ; CHECK:       Test2:%bb.0
 
@@ -80,7 +80,7 @@ define <2 x float> @Test2(<2 x float> %f1, <2 x float> %f2, <2 x float> %f3, <2
   ret <2 x float> %add2
 }
 
-define float @Test3(float %f1, float %f2, float %f3, float %f4, float %f5, float %f6) {
+define arm_aapcs_vfpcc float @Test3(float %f1, float %f2, float %f3, float %f4, float %f5, float %f6) {
 ; CHECK:       ********** MI Scheduling **********
 ; CHECK:       Test3:%bb.0
 
@@ -119,7 +119,7 @@ define float @Test3(float %f1, float %f2, float %f3, float %f4, float %f5, float
 }
 
 ; ASIMD form
-define <2 x float> @Test4(<2 x float> %f1, <2 x float> %f2, <2 x float> %f3, <2 x float> %f4, <2 x float> %f5, <2 x float> %f6) {
+define arm_aapcs_vfpcc <2 x float> @Test4(<2 x float> %f1, <2 x float> %f2, <2 x float> %f3, <2 x float> %f4, <2 x float> %f5, <2 x float> %f6) {
 ; CHECK:       ********** MI Scheduling **********
 ; CHECK:       Test4:%bb.0
 
@@ -157,7 +157,7 @@ define <2 x float> @Test4(<2 x float> %f1, <2 x float> %f2, <2 x float> %f3, <2
   ret <2 x float> %sub2
 }
 
-define float @Test5(float %f1, float %f2, float %f3) {
+define arm_aapcs_vfpcc float @Test5(float %f1, float %f2, float %f3) {
 ; CHECK:       ********** MI Scheduling **********
 ; CHECK:       Test5:%bb.0
 
@@ -176,7 +176,7 @@ define float @Test5(float %f1, float %f2, float %f3) {
 }
 
 
-define float @Test6(float %f1, float %f2, float %f3) {
+define arm_aapcs_vfpcc float @Test6(float %f1, float %f2, float %f3) {
 ; CHECK:       ********** MI Scheduling **********
 ; CHECK:       Test6:%bb.0
 

diff  --git a/llvm/test/CodeGen/ARM/fp16-fusedMAC.ll b/llvm/test/CodeGen/ARM/fp16-fusedMAC.ll
new file mode 100644
index 000000000000..fea408a01a42
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/fp16-fusedMAC.ll
@@ -0,0 +1,449 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=thumbv8.1-m-none-eabi -mattr=+fullfp16 -fp-contract=fast | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv8.1-m-none-eabi -mattr=+fullfp16,+slowfpvmlx -fp-contract=fast | FileCheck %s -check-prefix=DONT-FUSE
+
+; Check generated fp16 fused MAC and MLS.
+
+define arm_aapcs_vfpcc void @fusedMACTest2(half *%a1, half *%a2, half *%a3) {
+; CHECK-LABEL: fusedMACTest2:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr.16 s0, [r1]
+; CHECK-NEXT:    vldr.16 s2, [r0]
+; CHECK-NEXT:    vldr.16 s4, [r2]
+; CHECK-NEXT:    vfma.f16 s4, s2, s0
+; CHECK-NEXT:    vstr.16 s4, [r0]
+; CHECK-NEXT:    bx lr
+;
+; DONT-FUSE-LABEL: fusedMACTest2:
+; DONT-FUSE:       @ %bb.0:
+; DONT-FUSE-NEXT:    vldr.16 s0, [r1]
+; DONT-FUSE-NEXT:    vldr.16 s2, [r0]
+; DONT-FUSE-NEXT:    vmul.f16 s0, s2, s0
+; DONT-FUSE-NEXT:    vldr.16 s2, [r2]
+; DONT-FUSE-NEXT:    vadd.f16 s0, s0, s2
+; DONT-FUSE-NEXT:    vstr.16 s0, [r0]
+; DONT-FUSE-NEXT:    bx lr
+
+  %f1 = load half, half *%a1, align 2
+  %f2 = load half, half *%a2, align 2
+  %f3 = load half, half *%a3, align 2
+  %1 = fmul half %f1, %f2
+  %2 = fadd half %1, %f3
+  store half %2, half *%a1, align 2
+  ret void
+}
+
+define arm_aapcs_vfpcc void @fusedMACTest4(half *%a1, half *%a2, half *%a3) {
+; CHECK-LABEL: fusedMACTest4:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr.16 s0, [r2]
+; CHECK-NEXT:    vldr.16 s2, [r1]
+; CHECK-NEXT:    vldr.16 s4, [r0]
+; CHECK-NEXT:    vfms.f16 s4, s2, s0
+; CHECK-NEXT:    vstr.16 s4, [r0]
+; CHECK-NEXT:    bx lr
+;
+; DONT-FUSE-LABEL: fusedMACTest4:
+; DONT-FUSE:       @ %bb.0:
+; DONT-FUSE-NEXT:    vldr.16 s0, [r2]
+; DONT-FUSE-NEXT:    vldr.16 s2, [r1]
+; DONT-FUSE-NEXT:    vmul.f16 s0, s2, s0
+; DONT-FUSE-NEXT:    vldr.16 s2, [r0]
+; DONT-FUSE-NEXT:    vsub.f16 s0, s2, s0
+; DONT-FUSE-NEXT:    vstr.16 s0, [r0]
+; DONT-FUSE-NEXT:    bx lr
+
+  %f1 = load half, half *%a1, align 2
+  %f2 = load half, half *%a2, align 2
+  %f3 = load half, half *%a3, align 2
+  %1 = fmul half %f2, %f3
+  %2 = fsub half %f1, %1
+  store half %2, half *%a1, align 2
+  ret void
+}
+
+define arm_aapcs_vfpcc void @fusedMACTest6(half *%a1, half *%a2, half *%a3) {
+; CHECK-LABEL: fusedMACTest6:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr.16 s0, [r1]
+; CHECK-NEXT:    vldr.16 s2, [r0]
+; CHECK-NEXT:    vldr.16 s4, [r2]
+; CHECK-NEXT:    vfnma.f16 s4, s2, s0
+; CHECK-NEXT:    vstr.16 s4, [r0]
+; CHECK-NEXT:    bx lr
+;
+; DONT-FUSE-LABEL: fusedMACTest6:
+; DONT-FUSE:       @ %bb.0:
+; DONT-FUSE-NEXT:    vldr.16 s0, [r1]
+; DONT-FUSE-NEXT:    vldr.16 s2, [r0]
+; DONT-FUSE-NEXT:    vnmul.f16 s0, s2, s0
+; DONT-FUSE-NEXT:    vldr.16 s2, [r2]
+; DONT-FUSE-NEXT:    vsub.f16 s0, s0, s2
+; DONT-FUSE-NEXT:    vstr.16 s0, [r0]
+; DONT-FUSE-NEXT:    bx lr
+
+  %f1 = load half, half *%a1, align 2
+  %f2 = load half, half *%a2, align 2
+  %f3 = load half, half *%a3, align 2
+  %1 = fmul half %f1, %f2
+  %2 = fsub half -0.0, %1
+  %3 = fsub half %2, %f3
+  store half %3, half *%a1, align 2
+  ret void
+}
+
+define arm_aapcs_vfpcc void @fusedMACTest8(half *%a1, half *%a2, half *%a3) {
+; CHECK-LABEL: fusedMACTest8:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr.16 s0, [r1]
+; CHECK-NEXT:    vldr.16 s2, [r0]
+; CHECK-NEXT:    vldr.16 s4, [r2]
+; CHECK-NEXT:    vfnms.f16 s4, s2, s0
+; CHECK-NEXT:    vstr.16 s4, [r0]
+; CHECK-NEXT:    bx lr
+;
+; DONT-FUSE-LABEL: fusedMACTest8:
+; DONT-FUSE:       @ %bb.0:
+; DONT-FUSE-NEXT:    vldr.16 s0, [r1]
+; DONT-FUSE-NEXT:    vldr.16 s2, [r0]
+; DONT-FUSE-NEXT:    vmul.f16 s0, s2, s0
+; DONT-FUSE-NEXT:    vldr.16 s2, [r2]
+; DONT-FUSE-NEXT:    vsub.f16 s0, s0, s2
+; DONT-FUSE-NEXT:    vstr.16 s0, [r0]
+; DONT-FUSE-NEXT:    bx lr
+
+  %f1 = load half, half *%a1, align 2
+  %f2 = load half, half *%a2, align 2
+  %f3 = load half, half *%a3, align 2
+  %1 = fmul half %f1, %f2
+  %2 = fsub half %1, %f3
+  store half %2, half *%a1, align 2
+  ret void
+}
+
+define arm_aapcs_vfpcc void @test_fma_f16(half *%aa, half *%bb, half *%cc) nounwind readnone ssp {
+; CHECK-LABEL: test_fma_f16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldr.16 s0, [r1]
+; CHECK-NEXT:    vldr.16 s2, [r0]
+; CHECK-NEXT:    vldr.16 s4, [r2]
+; CHECK-NEXT:    vfma.f16 s4, s2, s0
+; CHECK-NEXT:    vstr.16 s4, [r0]
+; CHECK-NEXT:    bx lr
+;
+; DONT-FUSE-LABEL: test_fma_f16:
+; DONT-FUSE:       @ %bb.0: @ %entry
+; DONT-FUSE-NEXT:    vldr.16 s0, [r1]
+; DONT-FUSE-NEXT:    vldr.16 s2, [r0]
+; DONT-FUSE-NEXT:    vldr.16 s4, [r2]
+; DONT-FUSE-NEXT:    vfma.f16 s4, s2, s0
+; DONT-FUSE-NEXT:    vstr.16 s4, [r0]
+; DONT-FUSE-NEXT:    bx lr
+entry:
+  %a = load half, half *%aa, align 2
+  %b = load half, half *%bb, align 2
+  %c = load half, half *%cc, align 2
+  %tmp1 = tail call half @llvm.fma.f16(half %a, half %b, half %c) nounwind readnone
+  store half %tmp1, half *%aa, align 2
+  ret void
+}
+
+define arm_aapcs_vfpcc void @test_fnms_f16(half *%aa, half *%bb, half *%cc) nounwind readnone ssp {
+; CHECK-LABEL: test_fnms_f16:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr.16 s0, [r1]
+; CHECK-NEXT:    vldr.16 s2, [r0]
+; CHECK-NEXT:    vldr.16 s4, [r2]
+; CHECK-NEXT:    vfma.f16 s4, s2, s0
+; CHECK-NEXT:    vstr.16 s4, [r0]
+; CHECK-NEXT:    bx lr
+;
+; DONT-FUSE-LABEL: test_fnms_f16:
+; DONT-FUSE:       @ %bb.0:
+; DONT-FUSE-NEXT:    vldr.16 s0, [r1]
+; DONT-FUSE-NEXT:    vldr.16 s2, [r0]
+; DONT-FUSE-NEXT:    vldr.16 s4, [r2]
+; DONT-FUSE-NEXT:    vfma.f16 s4, s2, s0
+; DONT-FUSE-NEXT:    vstr.16 s4, [r0]
+; DONT-FUSE-NEXT:    bx lr
+
+  %a = load half, half *%aa, align 2
+  %b = load half, half *%bb, align 2
+  %c = load half, half *%cc, align 2
+  %tmp2 = fsub half -0.0, %c
+  %tmp3 = tail call half @llvm.fma.f16(half %a, half %b, half %c) nounwind readnone
+  store half %tmp3, half *%aa, align 2
+  ret void
+}
+
+define arm_aapcs_vfpcc void @test_fma_const_fold(half *%aa, half *%bb) nounwind {
+; CHECK-LABEL: test_fma_const_fold:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr.16 s0, [r1]
+; CHECK-NEXT:    vldr.16 s2, [r0]
+; CHECK-NEXT:    vadd.f16 s0, s2, s0
+; CHECK-NEXT:    vstr.16 s0, [r0]
+; CHECK-NEXT:    bx lr
+;
+; DONT-FUSE-LABEL: test_fma_const_fold:
+; DONT-FUSE:       @ %bb.0:
+; DONT-FUSE-NEXT:    vldr.16 s0, [r1]
+; DONT-FUSE-NEXT:    vldr.16 s2, [r0]
+; DONT-FUSE-NEXT:    vadd.f16 s0, s2, s0
+; DONT-FUSE-NEXT:    vstr.16 s0, [r0]
+; DONT-FUSE-NEXT:    bx lr
+
+  %a = load half, half *%aa, align 2
+  %b = load half, half *%bb, align 2
+  %ret = call half @llvm.fma.f16(half %a, half 1.0, half %b)
+  store half %ret, half *%aa, align 2
+  ret void
+}
+
+define arm_aapcs_vfpcc void @test_fma_canonicalize(half *%aa, half *%bb) nounwind {
+; CHECK-LABEL: test_fma_canonicalize:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr.16 s0, [r0]
+; CHECK-NEXT:    vldr.16 s2, [r1]
+; CHECK-NEXT:    vmov.f16 s4, #2.000000e+00
+; CHECK-NEXT:    vfma.f16 s2, s0, s4
+; CHECK-NEXT:    vstr.16 s2, [r0]
+; CHECK-NEXT:    bx lr
+;
+; DONT-FUSE-LABEL: test_fma_canonicalize:
+; DONT-FUSE:       @ %bb.0:
+; DONT-FUSE-NEXT:    vldr.16 s0, [r0]
+; DONT-FUSE-NEXT:    vldr.16 s2, [r1]
+; DONT-FUSE-NEXT:    vmov.f16 s4, #2.000000e+00
+; DONT-FUSE-NEXT:    vfma.f16 s2, s0, s4
+; DONT-FUSE-NEXT:    vstr.16 s2, [r0]
+; DONT-FUSE-NEXT:    bx lr
+
+  %a = load half, half *%aa, align 2
+  %b = load half, half *%bb, align 2
+  %ret = call half @llvm.fma.f16(half 2.0, half %a, half %b)
+  store half %ret, half *%aa, align 2
+  ret void
+}
+
+define arm_aapcs_vfpcc void @fms1(half *%a1, half *%a2, half *%a3) {
+; CHECK-LABEL: fms1:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr.16 s0, [r1]
+; CHECK-NEXT:    vldr.16 s2, [r2]
+; CHECK-NEXT:    vldr.16 s4, [r0]
+; CHECK-NEXT:    vneg.f16 s4, s4
+; CHECK-NEXT:    vfma.f16 s2, s4, s0
+; CHECK-NEXT:    vstr.16 s2, [r0]
+; CHECK-NEXT:    bx lr
+;
+; DONT-FUSE-LABEL: fms1:
+; DONT-FUSE:       @ %bb.0:
+; DONT-FUSE-NEXT:    vldr.16 s0, [r1]
+; DONT-FUSE-NEXT:    vldr.16 s2, [r2]
+; DONT-FUSE-NEXT:    vldr.16 s4, [r0]
+; DONT-FUSE-NEXT:    vneg.f16 s4, s4
+; DONT-FUSE-NEXT:    vfma.f16 s2, s4, s0
+; DONT-FUSE-NEXT:    vstr.16 s2, [r0]
+; DONT-FUSE-NEXT:    bx lr
+
+  %f1 = load half, half *%a1, align 2
+  %f2 = load half, half *%a2, align 2
+  %f3 = load half, half *%a3, align 2
+  %s = fsub half -0.0, %f1
+  %ret = call half @llvm.fma.f16(half %s, half %f2, half %f3)
+  store half %ret, half *%a1, align 2
+  ret void
+}
+
+define arm_aapcs_vfpcc void @fms2(half *%a1, half *%a2, half *%a3) {
+; CHECK-LABEL: fms2:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr.16 s0, [r1]
+; CHECK-NEXT:    vldr.16 s2, [r2]
+; CHECK-NEXT:    vldr.16 s4, [r0]
+; CHECK-NEXT:    vneg.f16 s4, s4
+; CHECK-NEXT:    vfma.f16 s2, s0, s4
+; CHECK-NEXT:    vstr.16 s2, [r0]
+; CHECK-NEXT:    bx lr
+;
+; DONT-FUSE-LABEL: fms2:
+; DONT-FUSE:       @ %bb.0:
+; DONT-FUSE-NEXT:    vldr.16 s0, [r1]
+; DONT-FUSE-NEXT:    vldr.16 s2, [r2]
+; DONT-FUSE-NEXT:    vldr.16 s4, [r0]
+; DONT-FUSE-NEXT:    vneg.f16 s4, s4
+; DONT-FUSE-NEXT:    vfma.f16 s2, s0, s4
+; DONT-FUSE-NEXT:    vstr.16 s2, [r0]
+; DONT-FUSE-NEXT:    bx lr
+
+  %f1 = load half, half *%a1, align 2
+  %f2 = load half, half *%a2, align 2
+  %f3 = load half, half *%a3, align 2
+  %s = fsub half -0.0, %f1
+  %ret = call half @llvm.fma.f16(half %f2, half %s, half %f3)
+  store half %ret, half *%a1, align 2
+  ret void
+}
+
+define arm_aapcs_vfpcc void @fnma1(half *%a1, half *%a2, half *%a3) {
+; CHECK-LABEL: fnma1:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr.16 s0, [r1]
+; CHECK-NEXT:    vldr.16 s2, [r0]
+; CHECK-NEXT:    vldr.16 s4, [r2]
+; CHECK-NEXT:    vfma.f16 s4, s2, s0
+; CHECK-NEXT:    vneg.f16 s0, s4
+; CHECK-NEXT:    vstr.16 s0, [r0]
+; CHECK-NEXT:    bx lr
+;
+; DONT-FUSE-LABEL: fnma1:
+; DONT-FUSE:       @ %bb.0:
+; DONT-FUSE-NEXT:    vldr.16 s0, [r1]
+; DONT-FUSE-NEXT:    vldr.16 s2, [r0]
+; DONT-FUSE-NEXT:    vldr.16 s4, [r2]
+; DONT-FUSE-NEXT:    vfma.f16 s4, s2, s0
+; DONT-FUSE-NEXT:    vneg.f16 s0, s4
+; DONT-FUSE-NEXT:    vstr.16 s0, [r0]
+; DONT-FUSE-NEXT:    bx lr
+
+  %f1 = load half, half *%a1, align 2
+  %f2 = load half, half *%a2, align 2
+  %f3 = load half, half *%a3, align 2
+  %fma = call half @llvm.fma.f16(half %f1, half %f2, half %f3)
+  %n1 = fsub half -0.0, %fma
+  store half %n1, half *%a1, align 2
+  ret void
+}
+
+define arm_aapcs_vfpcc void @fnma2(half *%a1, half *%a2, half *%a3) {
+; CHECK-LABEL: fnma2:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr.16 s0, [r1]
+; CHECK-NEXT:    vldr.16 s2, [r0]
+; CHECK-NEXT:    vneg.f16 s2, s2
+; CHECK-NEXT:    vldr.16 s4, [r2]
+; CHECK-NEXT:    vneg.f16 s4, s4
+; CHECK-NEXT:    vfma.f16 s4, s2, s0
+; CHECK-NEXT:    vstr.16 s4, [r0]
+; CHECK-NEXT:    bx lr
+;
+; DONT-FUSE-LABEL: fnma2:
+; DONT-FUSE:       @ %bb.0:
+; DONT-FUSE-NEXT:    vldr.16 s0, [r1]
+; DONT-FUSE-NEXT:    vldr.16 s2, [r0]
+; DONT-FUSE-NEXT:    vneg.f16 s2, s2
+; DONT-FUSE-NEXT:    vldr.16 s4, [r2]
+; DONT-FUSE-NEXT:    vneg.f16 s4, s4
+; DONT-FUSE-NEXT:    vfma.f16 s4, s2, s0
+; DONT-FUSE-NEXT:    vstr.16 s4, [r0]
+; DONT-FUSE-NEXT:    bx lr
+
+  %f1 = load half, half *%a1, align 2
+  %f2 = load half, half *%a2, align 2
+  %f3 = load half, half *%a3, align 2
+  %n1 = fsub half -0.0, %f1
+  %n3 = fsub half -0.0, %f3
+  %ret = call half @llvm.fma.f16(half %n1, half %f2, half %n3)
+  store half %ret, half *%a1, align 2
+  ret void
+}
+
+define arm_aapcs_vfpcc void @fnms1(half *%a1, half *%a2, half *%a3) {
+; CHECK-LABEL: fnms1:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr.16 s0, [r1]
+; CHECK-NEXT:    vldr.16 s2, [r0]
+; CHECK-NEXT:    vldr.16 s4, [r2]
+; CHECK-NEXT:    vneg.f16 s4, s4
+; CHECK-NEXT:    vfma.f16 s4, s2, s0
+; CHECK-NEXT:    vstr.16 s4, [r0]
+; CHECK-NEXT:    bx lr
+;
+; DONT-FUSE-LABEL: fnms1:
+; DONT-FUSE:       @ %bb.0:
+; DONT-FUSE-NEXT:    vldr.16 s0, [r1]
+; DONT-FUSE-NEXT:    vldr.16 s2, [r0]
+; DONT-FUSE-NEXT:    vldr.16 s4, [r2]
+; DONT-FUSE-NEXT:    vneg.f16 s4, s4
+; DONT-FUSE-NEXT:    vfma.f16 s4, s2, s0
+; DONT-FUSE-NEXT:    vstr.16 s4, [r0]
+; DONT-FUSE-NEXT:    bx lr
+
+  %f1 = load half, half *%a1, align 2
+  %f2 = load half, half *%a2, align 2
+  %f3 = load half, half *%a3, align 2
+  %n3 = fsub half -0.0, %f3
+  %ret = call half @llvm.fma.f16(half %f1, half %f2, half %n3)
+  store half %ret, half *%a1, align 2
+  ret void
+}
+
+define arm_aapcs_vfpcc void @fnms2(half *%a1, half *%a2, half *%a3) {
+; CHECK-LABEL: fnms2:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr.16 s0, [r1]
+; CHECK-NEXT:    vldr.16 s2, [r2]
+; CHECK-NEXT:    vldr.16 s4, [r0]
+; CHECK-NEXT:    vneg.f16 s4, s4
+; CHECK-NEXT:    vfma.f16 s2, s4, s0
+; CHECK-NEXT:    vneg.f16 s0, s2
+; CHECK-NEXT:    vstr.16 s0, [r0]
+; CHECK-NEXT:    bx lr
+;
+; DONT-FUSE-LABEL: fnms2:
+; DONT-FUSE:       @ %bb.0:
+; DONT-FUSE-NEXT:    vldr.16 s0, [r1]
+; DONT-FUSE-NEXT:    vldr.16 s2, [r2]
+; DONT-FUSE-NEXT:    vldr.16 s4, [r0]
+; DONT-FUSE-NEXT:    vneg.f16 s4, s4
+; DONT-FUSE-NEXT:    vfma.f16 s2, s4, s0
+; DONT-FUSE-NEXT:    vneg.f16 s0, s2
+; DONT-FUSE-NEXT:    vstr.16 s0, [r0]
+; DONT-FUSE-NEXT:    bx lr
+
+  %f1 = load half, half *%a1, align 2
+  %f2 = load half, half *%a2, align 2
+  %f3 = load half, half *%a3, align 2
+  %n1 = fsub half -0.0, %f1
+  %fma = call half @llvm.fma.f16(half %n1, half %f2, half %f3)
+  %n = fsub half -0.0, %fma
+  store half %n, half *%a1, align 2
+  ret void
+}
+
+define arm_aapcs_vfpcc void @fnms3(half *%a1, half *%a2, half *%a3) {
+; CHECK-LABEL: fnms3:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr.16 s0, [r0]
+; CHECK-NEXT:    vldr.16 s2, [r2]
+; CHECK-NEXT:    vldr.16 s4, [r1]
+; CHECK-NEXT:    vneg.f16 s4, s4
+; CHECK-NEXT:    vfma.f16 s2, s0, s4
+; CHECK-NEXT:    vneg.f16 s0, s2
+; CHECK-NEXT:    vstr.16 s0, [r0]
+; CHECK-NEXT:    bx lr
+;
+; DONT-FUSE-LABEL: fnms3:
+; DONT-FUSE:       @ %bb.0:
+; DONT-FUSE-NEXT:    vldr.16 s0, [r0]
+; DONT-FUSE-NEXT:    vldr.16 s2, [r2]
+; DONT-FUSE-NEXT:    vldr.16 s4, [r1]
+; DONT-FUSE-NEXT:    vneg.f16 s4, s4
+; DONT-FUSE-NEXT:    vfma.f16 s2, s0, s4
+; DONT-FUSE-NEXT:    vneg.f16 s0, s2
+; DONT-FUSE-NEXT:    vstr.16 s0, [r0]
+; DONT-FUSE-NEXT:    bx lr
+
+  %f1 = load half, half *%a1, align 2
+  %f2 = load half, half *%a2, align 2
+  %f3 = load half, half *%a3, align 2
+  %n2 = fsub half -0.0, %f2
+  %fma = call half @llvm.fma.f16(half %f1, half %n2, half %f3)
+  %n1 = fsub half -0.0, %fma
+  store half %n1, half *%a1, align 2
+  ret void
+}
+
+
+declare half @llvm.fma.f16(half, half, half) nounwind readnone

diff  --git a/llvm/test/CodeGen/ARM/fusedMAC.ll b/llvm/test/CodeGen/ARM/fusedMAC.ll
index 6b922895b006..493e71c9f627 100644
--- a/llvm/test/CodeGen/ARM/fusedMAC.ll
+++ b/llvm/test/CodeGen/ARM/fusedMAC.ll
@@ -5,7 +5,7 @@
 
 ; Check generated fused MAC and MLS.
 
-define double @fusedMACTest1(double %d1, double %d2, double %d3) {
+define arm_aapcs_vfpcc double @fusedMACTest1(double %d1, double %d2, double %d3) {
 ;CHECK-LABEL: fusedMACTest1:
 ;CHECK: vfma.f64
   %1 = fmul double %d1, %d2
@@ -13,7 +13,7 @@ define double @fusedMACTest1(double %d1, double %d2, double %d3) {
   ret double %2
 }
 
-define float @fusedMACTest2(float %f1, float %f2, float %f3) {
+define arm_aapcs_vfpcc float @fusedMACTest2(float %f1, float %f2, float %f3) {
 ;CHECK-LABEL: fusedMACTest2:
 ;CHECK: vfma.f32
 
@@ -26,7 +26,7 @@ define float @fusedMACTest2(float %f1, float %f2, float %f3) {
   ret float %2
 }
 
-define double @fusedMACTest3(double %d1, double %d2, double %d3) {
+define arm_aapcs_vfpcc double @fusedMACTest3(double %d1, double %d2, double %d3) {
 ;CHECK-LABEL: fusedMACTest3:
 ;CHECK: vfms.f64
   %1 = fmul double %d2, %d3
@@ -34,7 +34,7 @@ define double @fusedMACTest3(double %d1, double %d2, double %d3) {
   ret double %2
 }
 
-define float @fusedMACTest4(float %f1, float %f2, float %f3) {
+define arm_aapcs_vfpcc float @fusedMACTest4(float %f1, float %f2, float %f3) {
 ;CHECK-LABEL: fusedMACTest4:
 ;CHECK: vfms.f32
   %1 = fmul float %f2, %f3
@@ -42,7 +42,7 @@ define float @fusedMACTest4(float %f1, float %f2, float %f3) {
   ret float %2
 }
 
-define double @fusedMACTest5(double %d1, double %d2, double %d3) {
+define arm_aapcs_vfpcc double @fusedMACTest5(double %d1, double %d2, double %d3) {
 ;CHECK-LABEL: fusedMACTest5:
 ;CHECK: vfnma.f64
   %1 = fmul double %d1, %d2
@@ -51,7 +51,7 @@ define double @fusedMACTest5(double %d1, double %d2, double %d3) {
   ret double %3
 }
 
-define float @fusedMACTest6(float %f1, float %f2, float %f3) {
+define arm_aapcs_vfpcc float @fusedMACTest6(float %f1, float %f2, float %f3) {
 ;CHECK-LABEL: fusedMACTest6:
 ;CHECK: vfnma.f32
   %1 = fmul float %f1, %f2
@@ -60,7 +60,7 @@ define float @fusedMACTest6(float %f1, float %f2, float %f3) {
   ret float %3
 }
 
-define double @fusedMACTest7(double %d1, double %d2, double %d3) {
+define arm_aapcs_vfpcc double @fusedMACTest7(double %d1, double %d2, double %d3) {
 ;CHECK-LABEL: fusedMACTest7:
 ;CHECK: vfnms.f64
   %1 = fmul double %d1, %d2
@@ -68,7 +68,7 @@ define double @fusedMACTest7(double %d1, double %d2, double %d3) {
   ret double %2
 }
 
-define float @fusedMACTest8(float %f1, float %f2, float %f3) {
+define arm_aapcs_vfpcc float @fusedMACTest8(float %f1, float %f2, float %f3) {
 ;CHECK-LABEL: fusedMACTest8:
 ;CHECK: vfnms.f32
   %1 = fmul float %f1, %f2
@@ -76,7 +76,7 @@ define float @fusedMACTest8(float %f1, float %f2, float %f3) {
   ret float %2
 }
 
-define <2 x float> @fusedMACTest9(<2 x float> %a, <2 x float> %b) {
+define arm_aapcs_vfpcc <2 x float> @fusedMACTest9(<2 x float> %a, <2 x float> %b) {
 ;CHECK-LABEL: fusedMACTest9:
 ;CHECK: vfma.f32
   %mul = fmul <2 x float> %a, %b
@@ -84,7 +84,7 @@ define <2 x float> @fusedMACTest9(<2 x float> %a, <2 x float> %b) {
   ret <2 x float> %add
 }
 
-define <2 x float> @fusedMACTest10(<2 x float> %a, <2 x float> %b) {
+define arm_aapcs_vfpcc <2 x float> @fusedMACTest10(<2 x float> %a, <2 x float> %b) {
 ;CHECK-LABEL: fusedMACTest10:
 ;CHECK: vfms.f32
   %mul = fmul <2 x float> %a, %b
@@ -92,7 +92,7 @@ define <2 x float> @fusedMACTest10(<2 x float> %a, <2 x float> %b) {
   ret <2 x float> %sub
 }
 
-define <4 x float> @fusedMACTest11(<4 x float> %a, <4 x float> %b) {
+define arm_aapcs_vfpcc <4 x float> @fusedMACTest11(<4 x float> %a, <4 x float> %b) {
 ;CHECK-LABEL: fusedMACTest11:
 ;CHECK: vfma.f32
   %mul = fmul <4 x float> %a, %b
@@ -100,7 +100,7 @@ define <4 x float> @fusedMACTest11(<4 x float> %a, <4 x float> %b) {
   ret <4 x float> %add
 }
 
-define <4 x float> @fusedMACTest12(<4 x float> %a, <4 x float> %b) {
+define arm_aapcs_vfpcc <4 x float> @fusedMACTest12(<4 x float> %a, <4 x float> %b) {
 ;CHECK-LABEL: fusedMACTest12:
 ;CHECK: vfms.f32
   %mul = fmul <4 x float> %a, %b
@@ -108,7 +108,7 @@ define <4 x float> @fusedMACTest12(<4 x float> %a, <4 x float> %b) {
   ret <4 x float> %sub
 }
 
-define float @test_fma_f32(float %a, float %b, float %c) nounwind readnone ssp {
+define arm_aapcs_vfpcc float @test_fma_f32(float %a, float %b, float %c) nounwind readnone ssp {
 entry:
 ; CHECK: test_fma_f32
 ; CHECK: vfma.f32
@@ -116,7 +116,7 @@ entry:
   ret float %tmp1
 }
 
-define double @test_fma_f64(double %a, double %b, double %c) nounwind readnone ssp {
+define arm_aapcs_vfpcc double @test_fma_f64(double %a, double %b, double %c) nounwind readnone ssp {
 entry:
 ; CHECK: test_fma_f64
 ; CHECK: vfma.f64
@@ -124,7 +124,7 @@ entry:
   ret double %tmp1
 }
 
-define <2 x float> @test_fma_v2f32(<2 x float> %a, <2 x float> %b, <2 x float> %c) nounwind readnone ssp {
+define arm_aapcs_vfpcc <2 x float> @test_fma_v2f32(<2 x float> %a, <2 x float> %b, <2 x float> %c) nounwind readnone ssp {
 entry:
 ; CHECK: test_fma_v2f32
 ; CHECK: vfma.f32
@@ -132,7 +132,7 @@ entry:
   ret <2 x float> %tmp1
 }
 
-define double @test_fms_f64(double %a, double %b, double %c) nounwind readnone ssp {
+define arm_aapcs_vfpcc double @test_fms_f64(double %a, double %b, double %c) nounwind readnone ssp {
 entry:
 ; CHECK: test_fms_f64
 ; CHECK: vfms.f64
@@ -141,7 +141,7 @@ entry:
   ret double %tmp2
 }
 
-define double @test_fms_f64_2(double %a, double %b, double %c) nounwind readnone ssp {
+define arm_aapcs_vfpcc double @test_fms_f64_2(double %a, double %b, double %c) nounwind readnone ssp {
 entry:
 ; CHECK: test_fms_f64_2
 ; CHECK: vfms.f64
@@ -150,7 +150,7 @@ entry:
   ret double %tmp2
 }
 
-define float @test_fnms_f32(float %a, float %b, float* %c) nounwind readnone ssp {
+define arm_aapcs_vfpcc float @test_fnms_f32(float %a, float %b, float* %c) nounwind readnone ssp {
 ; CHECK: test_fnms_f32
 ; CHECK: vfnms.f32
   %tmp1 = load float, float* %c, align 4
@@ -159,7 +159,7 @@ define float @test_fnms_f32(float %a, float %b, float* %c) nounwind readnone ssp
   ret float %tmp3 
 }
 
-define double @test_fnms_f64(double %a, double %b, double %c) nounwind readnone ssp {
+define arm_aapcs_vfpcc double @test_fnms_f64(double %a, double %b, double %c) nounwind readnone ssp {
 entry:
 ; CHECK: test_fnms_f64
 ; CHECK: vfnms.f64
@@ -169,7 +169,7 @@ entry:
   ret double %tmp3
 }
 
-define double @test_fnms_f64_2(double %a, double %b, double %c) nounwind readnone ssp {
+define arm_aapcs_vfpcc double @test_fnms_f64_2(double %a, double %b, double %c) nounwind readnone ssp {
 entry:
 ; CHECK: test_fnms_f64_2
 ; CHECK: vfnms.f64
@@ -179,7 +179,7 @@ entry:
   ret double %tmp3
 }
 
-define double @test_fnma_f64(double %a, double %b, double %c) nounwind readnone ssp {
+define arm_aapcs_vfpcc double @test_fnma_f64(double %a, double %b, double %c) nounwind readnone ssp {
 entry:
 ; CHECK: test_fnma_f64
 ; CHECK: vfnma.f64
@@ -188,7 +188,7 @@ entry:
   ret double %tmp2
 }
 
-define double @test_fnma_f64_2(double %a, double %b, double %c) nounwind readnone ssp {
+define arm_aapcs_vfpcc double @test_fnma_f64_2(double %a, double %b, double %c) nounwind readnone ssp {
 entry:
 ; CHECK: test_fnma_f64_2
 ; CHECK: vfnma.f64
@@ -198,7 +198,7 @@ entry:
   ret double %tmp3
 }
 
-define float @test_fma_const_fold(float %a, float %b) nounwind {
+define arm_aapcs_vfpcc float @test_fma_const_fold(float %a, float %b) nounwind {
 ; CHECK: test_fma_const_fold
 ; CHECK-NOT: vfma
 ; CHECK-NOT: vmul
@@ -207,7 +207,7 @@ define float @test_fma_const_fold(float %a, float %b) nounwind {
   ret float %ret
 }
 
-define float @test_fma_canonicalize(float %a, float %b) nounwind {
+define arm_aapcs_vfpcc float @test_fma_canonicalize(float %a, float %b) nounwind {
 ; CHECK: test_fma_canonicalize
 ; CHECK: vmov.f32 [[R1:s[0-9]+]], #2.000000e+00
 ; CHECK: vfma.f32 {{s[0-9]+}}, {{s[0-9]+}}, [[R1]]
@@ -216,7 +216,7 @@ define float @test_fma_canonicalize(float %a, float %b) nounwind {
 }
 
 ; Check that very wide vector fma's can be split into legal fma's.
-define void @test_fma_v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x float>* %p) nounwind readnone ssp {
+define arm_aapcs_vfpcc void @test_fma_v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x float>* %p) nounwind readnone ssp {
 ; CHECK: test_fma_v8f32
 ; CHECK: vfma.f32
 ; CHECK: vfma.f32


        


More information about the llvm-commits mailing list