[llvm] 92f1446 - [X86] Updated strict fp scalar tests and add fp80 tests for D68857, NFC.

via llvm-commits llvm-commits at lists.llvm.org
Mon Nov 25 21:44:46 PST 2019


Author: Wang, Pengfei
Date: 2019-11-26T13:44:27+08:00
New Revision: 92f1446b8b8a1031d1676df5f90d5b5ca69e425b

URL: https://github.com/llvm/llvm-project/commit/92f1446b8b8a1031d1676df5f90d5b5ca69e425b
DIFF: https://github.com/llvm/llvm-project/commit/92f1446b8b8a1031d1676df5f90d5b5ca69e425b.diff

LOG: [X86] Updated strict fp scalar tests and add fp80 tests for D68857, NFC.

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/fp-strict-scalar.ll
    llvm/test/CodeGen/X86/fp80-strict-scalar.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/fp-strict-scalar.ll b/llvm/test/CodeGen/X86/fp-strict-scalar.ll
index 724095e8aca3..8813047636ed 100644
--- a/llvm/test/CodeGen/X86/fp-strict-scalar.ll
+++ b/llvm/test/CodeGen/X86/fp-strict-scalar.ll
@@ -70,8 +70,8 @@ define double @fadd_f64(double %a, double %b) nounwind strictfp {
   ret double %ret
 }
 
-define float @fadd_fsub_f32(float %a, float %b) nounwind strictfp {
-; SSE-X86-LABEL: fadd_fsub_f32:
+define float @fadd_f32(float %a, float %b) nounwind strictfp {
+; SSE-X86-LABEL: fadd_f32:
 ; SSE-X86:       # %bb.0:
 ; SSE-X86-NEXT:    pushl %eax
 ; SSE-X86-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -81,12 +81,12 @@ define float @fadd_fsub_f32(float %a, float %b) nounwind strictfp {
 ; SSE-X86-NEXT:    popl %eax
 ; SSE-X86-NEXT:    retl
 ;
-; SSE-X64-LABEL: fadd_fsub_f32:
+; SSE-X64-LABEL: fadd_f32:
 ; SSE-X64:       # %bb.0:
 ; SSE-X64-NEXT:    addss %xmm1, %xmm0
 ; SSE-X64-NEXT:    retq
 ;
-; AVX-X86-LABEL: fadd_fsub_f32:
+; AVX-X86-LABEL: fadd_f32:
 ; AVX-X86:       # %bb.0:
 ; AVX-X86-NEXT:    pushl %eax
 ; AVX-X86-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -96,12 +96,12 @@ define float @fadd_fsub_f32(float %a, float %b) nounwind strictfp {
 ; AVX-X86-NEXT:    popl %eax
 ; AVX-X86-NEXT:    retl
 ;
-; AVX-X64-LABEL: fadd_fsub_f32:
+; AVX-X64-LABEL: fadd_f32:
 ; AVX-X64:       # %bb.0:
 ; AVX-X64-NEXT:    vaddss %xmm1, %xmm0, %xmm0
 ; AVX-X64-NEXT:    retq
 ;
-; X87-LABEL: fadd_fsub_f32:
+; X87-LABEL: fadd_f32:
 ; X87:       # %bb.0:
 ; X87-NEXT:    flds {{[0-9]+}}(%esp)
 ; X87-NEXT:    fadds {{[0-9]+}}(%esp)

diff  --git a/llvm/test/CodeGen/X86/fp80-strict-scalar.ll b/llvm/test/CodeGen/X86/fp80-strict-scalar.ll
index 1fc5d0196190..279500863220 100644
--- a/llvm/test/CodeGen/X86/fp80-strict-scalar.ll
+++ b/llvm/test/CodeGen/X86/fp80-strict-scalar.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=-sse -O3 | FileCheck %s --check-prefixes=CHECK,X86
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=-sse -O3 | FileCheck %s --check-prefixes=CHECK,X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -O3 | FileCheck %s --check-prefixes=CHECK,X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -O3 | FileCheck %s --check-prefixes=CHECK,X64
 
 declare x86_fp80 @llvm.experimental.constrained.fadd.x86_fp80(x86_fp80, x86_fp80, metadata, metadata)
 declare x86_fp80 @llvm.experimental.constrained.fsub.x86_fp80(x86_fp80, x86_fp80, metadata, metadata)
@@ -92,129 +92,107 @@ define x86_fp80 @fdiv_fp80(x86_fp80 %a, x86_fp80 %b) nounwind strictfp {
   ret x86_fp80 %ret
 }
 
-define void @fpext_f32_to_fp80(float* %val, x86_fp80* %ret) nounwind strictfp {
+define x86_fp80 @fpext_f32_to_fp80(float %a) nounwind strictfp {
 ; X86-LABEL: fpext_f32_to_fp80:
 ; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    flds (%ecx)
-; X86-NEXT:    fstpt (%eax)
+; X86-NEXT:    flds {{[0-9]+}}(%esp)
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: fpext_f32_to_fp80:
 ; X64:       # %bb.0:
-; X64-NEXT:    flds (%rdi)
-; X64-NEXT:    fstpt (%rsi)
+; X64-NEXT:    movss %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NEXT:    flds -{{[0-9]+}}(%rsp)
 ; X64-NEXT:    retq
-  %1 = load float, float* %val, align 4
-  %res = call x86_fp80 @llvm.experimental.constrained.fpext.x86_fp80.f32(float %1,
+  %ret = call x86_fp80 @llvm.experimental.constrained.fpext.x86_fp80.f32(float %a,
                                                                          metadata !"fpexcept.strict") #0
-  store x86_fp80 %res, x86_fp80* %ret, align 16
-  ret void
+  ret x86_fp80 %ret
+
 }
 
-define void @fpext_f64_to_fp80(double* %val, x86_fp80* %ret) nounwind strictfp {
+define x86_fp80 @fpext_f64_to_fp80(double %a) nounwind strictfp {
 ; X86-LABEL: fpext_f64_to_fp80:
 ; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    fldl (%ecx)
-; X86-NEXT:    fstpt (%eax)
+; X86-NEXT:    fldl {{[0-9]+}}(%esp)
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: fpext_f64_to_fp80:
 ; X64:       # %bb.0:
-; X64-NEXT:    fldl (%rdi)
-; X64-NEXT:    fstpt (%rsi)
+; X64-NEXT:    movsd %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NEXT:    fldl -{{[0-9]+}}(%rsp)
 ; X64-NEXT:    retq
-  %1 = load double, double* %val, align 8
-  %res = call x86_fp80 @llvm.experimental.constrained.fpext.x86_fp80.f64(double %1,
+  %ret = call x86_fp80 @llvm.experimental.constrained.fpext.x86_fp80.f64(double %a,
                                                                          metadata !"fpexcept.strict") #0
-  store x86_fp80 %res, x86_fp80* %ret, align 16
-  ret void
+  ret x86_fp80 %ret
+
 }
 
-define void @fptrunc_fp80_to_f32(x86_fp80* %val, float *%ret) nounwind strictfp {
+define float @fptrunc_fp80_to_f32(x86_fp80 %a) nounwind strictfp {
 ; X86-LABEL: fptrunc_fp80_to_f32:
 ; X86:       # %bb.0:
 ; X86-NEXT:    pushl %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    fldt (%ecx)
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
 ; X86-NEXT:    fstps (%esp)
 ; X86-NEXT:    flds (%esp)
-; X86-NEXT:    fstps (%eax)
 ; X86-NEXT:    popl %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: fptrunc_fp80_to_f32:
 ; X64:       # %bb.0:
-; X64-NEXT:    fldt (%rdi)
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
 ; X64-NEXT:    fstps -{{[0-9]+}}(%rsp)
-; X64-NEXT:    flds -{{[0-9]+}}(%rsp)
-; X64-NEXT:    fstps (%rsi)
+; X64-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X64-NEXT:    retq
-  %1 = load x86_fp80, x86_fp80* %val, align 16
-  %res = call float @llvm.experimental.constrained.fptrunc.x86_fp80.f32(x86_fp80 %1,
+  %ret = call float @llvm.experimental.constrained.fptrunc.x86_fp80.f32(x86_fp80 %a,
                                                                         metadata !"round.dynamic",
                                                                         metadata !"fpexcept.strict") #0
-  store float %res, float* %ret, align 4
-  ret void
+  ret float %ret
+
 }
 
-define void @fptrunc_fp80_to_f64(x86_fp80* %val, double* %ret) nounwind strictfp {
+define double @fptrunc_fp80_to_f64(x86_fp80 %a) nounwind strictfp {
 ; X86-LABEL: fptrunc_fp80_to_f64:
 ; X86:       # %bb.0:
 ; X86-NEXT:    pushl %ebp
 ; X86-NEXT:    movl %esp, %ebp
 ; X86-NEXT:    andl $-8, %esp
 ; X86-NEXT:    subl $8, %esp
-; X86-NEXT:    movl 12(%ebp), %eax
-; X86-NEXT:    movl 8(%ebp), %ecx
-; X86-NEXT:    fldt (%ecx)
+; X86-NEXT:    fldt 8(%ebp)
 ; X86-NEXT:    fstpl (%esp)
 ; X86-NEXT:    fldl (%esp)
-; X86-NEXT:    fstpl (%eax)
 ; X86-NEXT:    movl %ebp, %esp
 ; X86-NEXT:    popl %ebp
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: fptrunc_fp80_to_f64:
 ; X64:       # %bb.0:
-; X64-NEXT:    fldt (%rdi)
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
 ; X64-NEXT:    fstpl -{{[0-9]+}}(%rsp)
-; X64-NEXT:    fldl -{{[0-9]+}}(%rsp)
-; X64-NEXT:    fstpl (%rsi)
+; X64-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; X64-NEXT:    retq
-  %1 = load x86_fp80, x86_fp80* %val, align 16
-  %res = call double @llvm.experimental.constrained.fptrunc.x86_fp80.f64(x86_fp80 %1,
+  %ret = call double @llvm.experimental.constrained.fptrunc.x86_fp80.f64(x86_fp80 %a,
                                                                          metadata !"round.dynamic",
                                                                          metadata !"fpexcept.strict") #0
-  store double %res, double* %ret, align 8
-  ret void
+  ret double %ret
+
 }
 
-define void @fsqrt_fp80(x86_fp80* %a) nounwind strictfp {
+define x86_fp80 @fsqrt_fp80(x86_fp80 %a) nounwind strictfp {
 ; X86-LABEL: fsqrt_fp80:
 ; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    fldt (%eax)
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
 ; X86-NEXT:    fsqrt
-; X86-NEXT:    fstpt (%eax)
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: fsqrt_fp80:
 ; X64:       # %bb.0:
-; X64-NEXT:    fldt (%rdi)
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
 ; X64-NEXT:    fsqrt
-; X64-NEXT:    fstpt (%rdi)
 ; X64-NEXT:    retq
-  %1 = load x86_fp80, x86_fp80* %a, align 16
-  %res = call x86_fp80 @llvm.experimental.constrained.sqrt.x86_fp80(x86_fp80 %1,
+  %ret = call x86_fp80 @llvm.experimental.constrained.sqrt.x86_fp80(x86_fp80 %a,
                                                                     metadata !"round.dynamic",
                                                                     metadata !"fpexcept.strict") #0
-  store x86_fp80 %res, x86_fp80* %a, align 16
-  ret void
+  ret x86_fp80 %ret
+
 }
 
 attributes #0 = { strictfp }


        


More information about the llvm-commits mailing list