[llvm] 289b8e0 - [X86] Add fp80 copysign test coverage

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Mon Feb 7 01:44:27 PST 2022


Author: Simon Pilgrim
Date: 2022-02-07T09:44:01Z
New Revision: 289b8e0d2fc6747543ee3dce829f148900f437bc

URL: https://github.com/llvm/llvm-project/commit/289b8e0d2fc6747543ee3dce829f148900f437bc
DIFF: https://github.com/llvm/llvm-project/commit/289b8e0d2fc6747543ee3dce829f148900f437bc.diff

LOG: [X86] Add fp80 copysign test coverage

Add PR41749 test coverage

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/sse-fcopysign.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/sse-fcopysign.ll b/llvm/test/CodeGen/X86/sse-fcopysign.ll
index ebc35b2843cc0..1e19eec150916 100644
--- a/llvm/test/CodeGen/X86/sse-fcopysign.ll
+++ b/llvm/test/CodeGen/X86/sse-fcopysign.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86
 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64
 
 ;
@@ -7,16 +7,16 @@
 ;
 
 define float @tst1(float %a, float %b) nounwind {
-; X32-LABEL: tst1:
-; X32:       # %bb.0:
-; X32-NEXT:    subl $8, %esp
-; X32-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X32-NEXT:    movss %xmm1, {{[0-9]+}}(%esp)
-; X32-NEXT:    movss %xmm0, (%esp)
-; X32-NEXT:    calll copysignf
-; X32-NEXT:    addl $8, %esp
-; X32-NEXT:    retl
+; X86-LABEL: tst1:
+; X86:       # %bb.0:
+; X86-NEXT:    subl $8, %esp
+; X86-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-NEXT:    movss %xmm1, {{[0-9]+}}(%esp)
+; X86-NEXT:    movss %xmm0, (%esp)
+; X86-NEXT:    calll copysignf
+; X86-NEXT:    addl $8, %esp
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: tst1:
 ; X64:       # %bb.0:
@@ -29,18 +29,18 @@ define float @tst1(float %a, float %b) nounwind {
 }
 
 define double @tst2(double %a, float %b, float %c) nounwind {
-; X32-LABEL: tst2:
-; X32:       # %bb.0:
-; X32-NEXT:    subl $16, %esp
-; X32-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X32-NEXT:    addss {{[0-9]+}}(%esp), %xmm1
-; X32-NEXT:    cvtss2sd %xmm1, %xmm1
-; X32-NEXT:    movsd %xmm0, (%esp)
-; X32-NEXT:    movsd %xmm1, {{[0-9]+}}(%esp)
-; X32-NEXT:    calll copysign
-; X32-NEXT:    addl $16, %esp
-; X32-NEXT:    retl
+; X86-LABEL: tst2:
+; X86:       # %bb.0:
+; X86-NEXT:    subl $16, %esp
+; X86-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-NEXT:    addss {{[0-9]+}}(%esp), %xmm1
+; X86-NEXT:    cvtss2sd %xmm1, %xmm1
+; X86-NEXT:    movsd %xmm0, (%esp)
+; X86-NEXT:    movsd %xmm1, {{[0-9]+}}(%esp)
+; X86-NEXT:    calll copysign
+; X86-NEXT:    addl $16, %esp
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: tst2:
 ; X64:       # %bb.0:
@@ -53,26 +53,53 @@ define double @tst2(double %a, float %b, float %c) nounwind {
   ret double %tmp
 }
 
+define x86_fp80 @tst3(x86_fp80 %a, x86_fp80 %b) nounwind {
+; X86-LABEL: tst3:
+; X86:       # %bb.0:
+; X86-NEXT:    subl $24, %esp
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fstpt {{[0-9]+}}(%esp)
+; X86-NEXT:    fstpt (%esp)
+; X86-NEXT:    calll copysignl
+; X86-NEXT:    addl $24, %esp
+; X86-NEXT:    retl
+;
+; X64-LABEL: tst3:
+; X64:       # %bb.0:
+; X64-NEXT:    subq $40, %rsp
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fstpt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fstpt (%rsp)
+; X64-NEXT:    callq copysignl
+; X64-NEXT:    addq $40, %rsp
+; X64-NEXT:    retq
+  %tmp = tail call x86_fp80 @copysignl( x86_fp80 %b, x86_fp80 %a )
+  ret x86_fp80 %tmp
+}
+
 declare dso_local float @copysignf(float, float)
 declare dso_local double @copysign(double, double)
+declare dso_local x86_fp80 @copysignl(x86_fp80, x86_fp80)
 
 ;
 ; LLVM Intrinsic
 ;
 
 define float @int1(float %a, float %b) nounwind {
-; X32-LABEL: int1:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %eax
-; X32-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-NEXT:    andps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X32-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X32-NEXT:    andps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
-; X32-NEXT:    orps %xmm0, %xmm1
-; X32-NEXT:    movss %xmm1, (%esp)
-; X32-NEXT:    flds (%esp)
-; X32-NEXT:    popl %eax
-; X32-NEXT:    retl
+; X86-LABEL: int1:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %eax
+; X86-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT:    andps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-NEXT:    andps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-NEXT:    orps %xmm0, %xmm1
+; X86-NEXT:    movss %xmm1, (%esp)
+; X86-NEXT:    flds (%esp)
+; X86-NEXT:    popl %eax
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: int1:
 ; X64:       # %bb.0:
@@ -85,24 +112,24 @@ define float @int1(float %a, float %b) nounwind {
 }
 
 define double @int2(double %a, float %b, float %c) nounwind {
-; X32-LABEL: int2:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %ebp
-; X32-NEXT:    movl %esp, %ebp
-; X32-NEXT:    andl $-8, %esp
-; X32-NEXT:    subl $8, %esp
-; X32-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-NEXT:    addss 20(%ebp), %xmm0
-; X32-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; X32-NEXT:    andps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
-; X32-NEXT:    cvtss2sd %xmm0, %xmm0
-; X32-NEXT:    andps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X32-NEXT:    orps %xmm1, %xmm0
-; X32-NEXT:    movlps %xmm0, (%esp)
-; X32-NEXT:    fldl (%esp)
-; X32-NEXT:    movl %ebp, %esp
-; X32-NEXT:    popl %ebp
-; X32-NEXT:    retl
+; X86-LABEL: int2:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-8, %esp
+; X86-NEXT:    subl $8, %esp
+; X86-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT:    addss 20(%ebp), %xmm0
+; X86-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; X86-NEXT:    andps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-NEXT:    cvtss2sd %xmm0, %xmm0
+; X86-NEXT:    andps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT:    orps %xmm1, %xmm0
+; X86-NEXT:    movlps %xmm0, (%esp)
+; X86-NEXT:    fldl (%esp)
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: int2:
 ; X64:       # %bb.0:
@@ -118,12 +145,46 @@ define double @int2(double %a, float %b, float %c) nounwind {
   ret double %tmp
 }
 
+define x86_fp80 @int3(x86_fp80 %a, x86_fp80 %b) nounwind {
+; X86-LABEL: int3:
+; X86:       # %bb.0:
+; X86-NEXT:    subl $12, %esp
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fstpt (%esp)
+; X86-NEXT:    fabs
+; X86-NEXT:    fld %st(0)
+; X86-NEXT:    fchs
+; X86-NEXT:    testb $-128, {{[0-9]+}}(%esp)
+; X86-NEXT:    fxch %st(1)
+; X86-NEXT:    fcmovne %st(1), %st
+; X86-NEXT:    fstp %st(1)
+; X86-NEXT:    addl $12, %esp
+; X86-NEXT:    retl
+;
+; X64-LABEL: int3:
+; X64:       # %bb.0:
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fstpt -{{[0-9]+}}(%rsp)
+; X64-NEXT:    fabs
+; X64-NEXT:    fld %st(0)
+; X64-NEXT:    fchs
+; X64-NEXT:    testb $-128, -{{[0-9]+}}(%rsp)
+; X64-NEXT:    fxch %st(1)
+; X64-NEXT:    fcmovne %st(1), %st
+; X64-NEXT:    fstp %st(1)
+; X64-NEXT:    retq
+  %tmp = tail call x86_fp80 @llvm.copysign.f80( x86_fp80 %b, x86_fp80 %a )
+  ret x86_fp80 %tmp
+}
+
 define float @cst1() nounwind {
-; X32-LABEL: cst1:
-; X32:       # %bb.0:
-; X32-NEXT:    fld1
-; X32-NEXT:    fchs
-; X32-NEXT:    retl
+; X86-LABEL: cst1:
+; X86:       # %bb.0:
+; X86-NEXT:    fld1
+; X86-NEXT:    fchs
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: cst1:
 ; X64:       # %bb.0:
@@ -134,11 +195,11 @@ define float @cst1() nounwind {
 }
 
 define double @cst2() nounwind {
-; X32-LABEL: cst2:
-; X32:       # %bb.0:
-; X32-NEXT:    fldz
-; X32-NEXT:    fchs
-; X32-NEXT:    retl
+; X86-LABEL: cst2:
+; X86:       # %bb.0:
+; X86-NEXT:    fldz
+; X86-NEXT:    fchs
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: cst2:
 ; X64:       # %bb.0:
@@ -150,5 +211,61 @@ define double @cst2() nounwind {
   ret double %tmp
 }
 
+define x86_fp80 @cst3() nounwind {
+; X86-LABEL: cst3:
+; X86:       # %bb.0:
+; X86-NEXT:    fldz
+; X86-NEXT:    fchs
+; X86-NEXT:    retl
+;
+; X64-LABEL: cst3:
+; X64:       # %bb.0:
+; X64-NEXT:    fldz
+; X64-NEXT:    fchs
+; X64-NEXT:    retq
+  %tmp1 = fadd float -1.0, -1.0
+  %tmp2 = fpext float %tmp1 to x86_fp80
+  %tmp = tail call x86_fp80 @llvm.copysign.f80( x86_fp80 zeroinitializer, x86_fp80 %tmp2 )
+  ret x86_fp80 %tmp
+}
+
+define void @PR41749() {
+; X86-LABEL: PR41749:
+; X86:       # %bb.0:
+; X86-NEXT:    subl $12, %esp
+; X86-NEXT:    .cfi_def_cfa_offset 16
+; X86-NEXT:    fldz
+; X86-NEXT:    fld %st(0)
+; X86-NEXT:    fstpt (%esp)
+; X86-NEXT:    testb $-128, {{[0-9]+}}(%esp)
+; X86-NEXT:    fld %st(0)
+; X86-NEXT:    fchs
+; X86-NEXT:    fxch %st(1)
+; X86-NEXT:    fcmovne %st(1), %st
+; X86-NEXT:    fstp %st(1)
+; X86-NEXT:    fstpt (%eax)
+; X86-NEXT:    addl $12, %esp
+; X86-NEXT:    .cfi_def_cfa_offset 4
+; X86-NEXT:    retl
+;
+; X64-LABEL: PR41749:
+; X64:       # %bb.0:
+; X64-NEXT:    fldz
+; X64-NEXT:    fld %st(0)
+; X64-NEXT:    fstpt -{{[0-9]+}}(%rsp)
+; X64-NEXT:    testb $-128, -{{[0-9]+}}(%rsp)
+; X64-NEXT:    fld %st(0)
+; X64-NEXT:    fchs
+; X64-NEXT:    fxch %st(1)
+; X64-NEXT:    fcmovne %st(1), %st
+; X64-NEXT:    fstp %st(1)
+; X64-NEXT:    fstpt (%rax)
+; X64-NEXT:    retq
+  %1 = call x86_fp80 @llvm.copysign.f80(x86_fp80 0xK00000000000000000000, x86_fp80 undef)
+  store x86_fp80 %1, x86_fp80* undef, align 16
+  ret void
+}
+
 declare dso_local float     @llvm.copysign.f32(float  %Mag, float  %Sgn)
 declare dso_local double    @llvm.copysign.f64(double %Mag, double %Sgn)
+declare dso_local x86_fp80  @llvm.copysign.f80(x86_fp80 %Mag, x86_fp80 %Sgn)


        


More information about the llvm-commits mailing list