[llvm] r326930 - [X86][SSE] Regenerate float maxnum/minnum tests

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Wed Mar 7 11:14:05 PST 2018


Author: rksimon
Date: Wed Mar  7 11:14:05 2018
New Revision: 326930

URL: http://llvm.org/viewvc/llvm-project?rev=326930&view=rev
Log:
[X86][SSE] Regenerate float maxnum/minnum tests

Modified:
    llvm/trunk/test/CodeGen/X86/fmaxnum.ll
    llvm/trunk/test/CodeGen/X86/fminnum.ll

Modified: llvm/trunk/test/CodeGen/X86/fmaxnum.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fmaxnum.ll?rev=326930&r1=326929&r2=326930&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fmaxnum.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fmaxnum.ll Wed Mar  7 11:14:05 2018
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=sse2  < %s | FileCheck %s --check-prefix=CHECK --check-prefix=SSE
 ; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=avx  < %s | FileCheck %s --check-prefix=CHECK --check-prefix=AVX
 
@@ -16,8 +17,10 @@ declare <8 x double> @llvm.maxnum.v8f64(
 
 ; FIXME: As the vector tests show, the SSE run shouldn't need this many moves.
 
-; CHECK-LABEL: @test_fmaxf
-; SSE:         movaps %xmm0, %xmm2
+define float @test_fmaxf(float %x, float %y) {
+; SSE-LABEL: test_fmaxf:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps %xmm0, %xmm2
 ; SSE-NEXT:    cmpunordss %xmm0, %xmm2
 ; SSE-NEXT:    movaps %xmm2, %xmm3
 ; SSE-NEXT:    andps %xmm1, %xmm3
@@ -27,26 +30,30 @@ declare <8 x double> @llvm.maxnum.v8f64(
 ; SSE-NEXT:    movaps %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX:         vmaxss %xmm0, %xmm1, %xmm2
+; AVX-LABEL: test_fmaxf:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmaxss %xmm0, %xmm1, %xmm2
 ; AVX-NEXT:    vcmpunordss %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    vblendvps %xmm0, %xmm1, %xmm2, %xmm0
 ; AVX-NEXT:    retq
-define float @test_fmaxf(float %x, float %y) {
   %z = call float @fmaxf(float %x, float %y) readnone
   ret float %z
 }
 
-; CHECK-LABEL: @test_fmaxf_minsize
-; CHECK:       jmp fmaxf
 define float @test_fmaxf_minsize(float %x, float %y) minsize {
+; CHECK-LABEL: test_fmaxf_minsize:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    jmp fmaxf at PLT # TAILCALL
   %z = call float @fmaxf(float %x, float %y) readnone
   ret float %z
 }
 
 ; FIXME: As the vector tests show, the SSE run shouldn't need this many moves.
 
-; CHECK-LABEL: @test_fmax
-; SSE:         movapd %xmm0, %xmm2
+define double @test_fmax(double %x, double %y) {
+; SSE-LABEL: test_fmax:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movapd %xmm0, %xmm2
 ; SSE-NEXT:    cmpunordsd %xmm0, %xmm2
 ; SSE-NEXT:    movapd %xmm2, %xmm3
 ; SSE-NEXT:    andpd %xmm1, %xmm3
@@ -56,24 +63,36 @@ define float @test_fmaxf_minsize(float %
 ; SSE-NEXT:    movapd %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX:         vmaxsd %xmm0, %xmm1, %xmm2
+; AVX-LABEL: test_fmax:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmaxsd %xmm0, %xmm1, %xmm2
 ; AVX-NEXT:    vcmpunordsd %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
 ; AVX-NEXT:    retq
-define double @test_fmax(double %x, double %y) {
   %z = call double @fmax(double %x, double %y) readnone
   ret double %z
 }
 
-; CHECK-LABEL: @test_fmaxl
-; CHECK: callq fmaxl
 define x86_fp80 @test_fmaxl(x86_fp80 %x, x86_fp80 %y) {
+; CHECK-LABEL: test_fmaxl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    subq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    fldt {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    fldt {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    fstpt {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    fstpt (%rsp)
+; CHECK-NEXT:    callq fmaxl at PLT
+; CHECK-NEXT:    addq $40, %rsp
+; CHECK-NEXT:    retq
   %z = call x86_fp80 @fmaxl(x86_fp80 %x, x86_fp80 %y) readnone
   ret x86_fp80 %z
 }
 
-; CHECK-LABEL: @test_intrinsic_fmaxf
-; SSE:         movaps %xmm0, %xmm2
+define float @test_intrinsic_fmaxf(float %x, float %y) {
+; SSE-LABEL: test_intrinsic_fmaxf:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps %xmm0, %xmm2
 ; SSE-NEXT:    cmpunordss %xmm0, %xmm2
 ; SSE-NEXT:    movaps %xmm2, %xmm3
 ; SSE-NEXT:    andps %xmm1, %xmm3
@@ -83,18 +102,20 @@ define x86_fp80 @test_fmaxl(x86_fp80 %x,
 ; SSE-NEXT:    movaps %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX:         vmaxss %xmm0, %xmm1, %xmm2
+; AVX-LABEL: test_intrinsic_fmaxf:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmaxss %xmm0, %xmm1, %xmm2
 ; AVX-NEXT:    vcmpunordss %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    vblendvps %xmm0, %xmm1, %xmm2, %xmm0
 ; AVX-NEXT:    retq
-define float @test_intrinsic_fmaxf(float %x, float %y) {
   %z = call float @llvm.maxnum.f32(float %x, float %y) readnone
   ret float %z
 }
 
-
-; CHECK-LABEL: @test_intrinsic_fmax
-; SSE:         movapd %xmm0, %xmm2
+define double @test_intrinsic_fmax(double %x, double %y) {
+; SSE-LABEL: test_intrinsic_fmax:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movapd %xmm0, %xmm2
 ; SSE-NEXT:    cmpunordsd %xmm0, %xmm2
 ; SSE-NEXT:    movapd %xmm2, %xmm3
 ; SSE-NEXT:    andpd %xmm1, %xmm3
@@ -104,24 +125,36 @@ define float @test_intrinsic_fmaxf(float
 ; SSE-NEXT:    movapd %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX:         vmaxsd %xmm0, %xmm1, %xmm2
+; AVX-LABEL: test_intrinsic_fmax:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmaxsd %xmm0, %xmm1, %xmm2
 ; AVX-NEXT:    vcmpunordsd %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
 ; AVX-NEXT:    retq
-define double @test_intrinsic_fmax(double %x, double %y) {
   %z = call double @llvm.maxnum.f64(double %x, double %y) readnone
   ret double %z
 }
 
-; CHECK-LABEL: @test_intrinsic_fmaxl
-; CHECK: callq fmaxl
 define x86_fp80 @test_intrinsic_fmaxl(x86_fp80 %x, x86_fp80 %y) {
+; CHECK-LABEL: test_intrinsic_fmaxl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    subq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    fldt {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    fldt {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    fstpt {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    fstpt (%rsp)
+; CHECK-NEXT:    callq fmaxl at PLT
+; CHECK-NEXT:    addq $40, %rsp
+; CHECK-NEXT:    retq
   %z = call x86_fp80 @llvm.maxnum.f80(x86_fp80 %x, x86_fp80 %y) readnone
   ret x86_fp80 %z
 }
 
-; CHECK-LABEL: @test_intrinsic_fmax_v2f32
-; SSE:         movaps %xmm1, %xmm2
+define <2 x float> @test_intrinsic_fmax_v2f32(<2 x float> %x, <2 x float> %y) {
+; SSE-LABEL: test_intrinsic_fmax_v2f32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps %xmm1, %xmm2
 ; SSE-NEXT:    maxps %xmm0, %xmm2
 ; SSE-NEXT:    cmpunordps %xmm0, %xmm0
 ; SSE-NEXT:    andps %xmm0, %xmm1
@@ -129,17 +162,20 @@ define x86_fp80 @test_intrinsic_fmaxl(x8
 ; SSE-NEXT:    orps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX:         vmaxps %xmm0, %xmm1, %xmm2
+; AVX-LABEL: test_intrinsic_fmax_v2f32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmaxps %xmm0, %xmm1, %xmm2
 ; AVX-NEXT:    vcmpunordps %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    vblendvps %xmm0, %xmm1, %xmm2, %xmm0
 ; AVX-NEXT:    retq
-define <2 x float> @test_intrinsic_fmax_v2f32(<2 x float> %x, <2 x float> %y) {
   %z = call <2 x float> @llvm.maxnum.v2f32(<2 x float> %x, <2 x float> %y) readnone
   ret <2 x float> %z
 }
 
-; CHECK-LABEL: @test_intrinsic_fmax_v4f32
-; SSE:         movaps %xmm1, %xmm2
+define <4 x float> @test_intrinsic_fmax_v4f32(<4 x float> %x, <4 x float> %y) {
+; SSE-LABEL: test_intrinsic_fmax_v4f32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps %xmm1, %xmm2
 ; SSE-NEXT:    maxps %xmm0, %xmm2
 ; SSE-NEXT:    cmpunordps %xmm0, %xmm0
 ; SSE-NEXT:    andps %xmm0, %xmm1
@@ -147,17 +183,20 @@ define <2 x float> @test_intrinsic_fmax_
 ; SSE-NEXT:    orps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX:         vmaxps %xmm0, %xmm1, %xmm2
+; AVX-LABEL: test_intrinsic_fmax_v4f32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmaxps %xmm0, %xmm1, %xmm2
 ; AVX-NEXT:    vcmpunordps %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    vblendvps %xmm0, %xmm1, %xmm2, %xmm0
 ; AVX-NEXT:    retq
-define <4 x float> @test_intrinsic_fmax_v4f32(<4 x float> %x, <4 x float> %y) {
   %z = call <4 x float> @llvm.maxnum.v4f32(<4 x float> %x, <4 x float> %y) readnone
   ret <4 x float> %z
 }
 
-; CHECK-LABEL: @test_intrinsic_fmax_v2f64
-; SSE:         movapd %xmm1, %xmm2
+define <2 x double> @test_intrinsic_fmax_v2f64(<2 x double> %x, <2 x double> %y) {
+; SSE-LABEL: test_intrinsic_fmax_v2f64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movapd %xmm1, %xmm2
 ; SSE-NEXT:    maxpd %xmm0, %xmm2
 ; SSE-NEXT:    cmpunordpd %xmm0, %xmm0
 ; SSE-NEXT:    andpd %xmm0, %xmm1
@@ -165,74 +204,81 @@ define <4 x float> @test_intrinsic_fmax_
 ; SSE-NEXT:    orpd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX:         vmaxpd %xmm0, %xmm1, %xmm2
+; AVX-LABEL: test_intrinsic_fmax_v2f64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmaxpd %xmm0, %xmm1, %xmm2
 ; AVX-NEXT:    vcmpunordpd %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
 ; AVX-NEXT:    retq
-define <2 x double> @test_intrinsic_fmax_v2f64(<2 x double> %x, <2 x double> %y) {
   %z = call <2 x double> @llvm.maxnum.v2f64(<2 x double> %x, <2 x double> %y) readnone
   ret <2 x double> %z
 }
 
-; CHECK-LABEL: @test_intrinsic_fmax_v4f64
-; SSE:         movapd  %xmm2, %xmm4
+define <4 x double> @test_intrinsic_fmax_v4f64(<4 x double> %x, <4 x double> %y) {
+; SSE-LABEL: test_intrinsic_fmax_v4f64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movapd %xmm2, %xmm4
 ; SSE-NEXT:    maxpd %xmm0, %xmm4
-; SSE-NEXT:    cmpunordpd  %xmm0, %xmm0
+; SSE-NEXT:    cmpunordpd %xmm0, %xmm0
 ; SSE-NEXT:    andpd %xmm0, %xmm2
-; SSE-NEXT:    andnpd  %xmm4, %xmm0
-; SSE-NEXT:    orpd  %xmm2, %xmm0
-; SSE-NEXT:    movapd  %xmm3, %xmm2
+; SSE-NEXT:    andnpd %xmm4, %xmm0
+; SSE-NEXT:    orpd %xmm2, %xmm0
+; SSE-NEXT:    movapd %xmm3, %xmm2
 ; SSE-NEXT:    maxpd %xmm1, %xmm2
-; SSE-NEXT:    cmpunordpd  %xmm1, %xmm1
+; SSE-NEXT:    cmpunordpd %xmm1, %xmm1
 ; SSE-NEXT:    andpd %xmm1, %xmm3
-; SSE-NEXT:    andnpd  %xmm2, %xmm1
-; SSE-NEXT:    orpd  %xmm3, %xmm1
+; SSE-NEXT:    andnpd %xmm2, %xmm1
+; SSE-NEXT:    orpd %xmm3, %xmm1
 ; SSE-NEXT:    retq
 ;
-; AVX:         vmaxpd  %ymm0, %ymm1, %ymm2
+; AVX-LABEL: test_intrinsic_fmax_v4f64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmaxpd %ymm0, %ymm1, %ymm2
 ; AVX-NEXT:    vcmpunordpd %ymm0, %ymm0, %ymm0
 ; AVX-NEXT:    vblendvpd %ymm0, %ymm1, %ymm2, %ymm0
 ; AVX-NEXT:    retq
-define <4 x double> @test_intrinsic_fmax_v4f64(<4 x double> %x, <4 x double> %y) {
   %z = call <4 x double> @llvm.maxnum.v4f64(<4 x double> %x, <4 x double> %y) readnone
   ret <4 x double> %z
 }
 
-; CHECK-LABEL: @test_intrinsic_fmax_v8f64
-; SSE:         movapd  %xmm4, %xmm8
+define <8 x double> @test_intrinsic_fmax_v8f64(<8 x double> %x, <8 x double> %y) {
+; SSE-LABEL: test_intrinsic_fmax_v8f64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movapd %xmm4, %xmm8
 ; SSE-NEXT:    maxpd %xmm0, %xmm8
-; SSE-NEXT:    cmpunordpd  %xmm0, %xmm0
+; SSE-NEXT:    cmpunordpd %xmm0, %xmm0
 ; SSE-NEXT:    andpd %xmm0, %xmm4
-; SSE-NEXT:    andnpd  %xmm8, %xmm0
-; SSE-NEXT:    orpd  %xmm4, %xmm0
-; SSE-NEXT:    movapd  %xmm5, %xmm4
+; SSE-NEXT:    andnpd %xmm8, %xmm0
+; SSE-NEXT:    orpd %xmm4, %xmm0
+; SSE-NEXT:    movapd %xmm5, %xmm4
 ; SSE-NEXT:    maxpd %xmm1, %xmm4
-; SSE-NEXT:    cmpunordpd  %xmm1, %xmm1
+; SSE-NEXT:    cmpunordpd %xmm1, %xmm1
 ; SSE-NEXT:    andpd %xmm1, %xmm5
-; SSE-NEXT:    andnpd  %xmm4, %xmm1
-; SSE-NEXT:    orpd  %xmm5, %xmm1
-; SSE-NEXT:    movapd  %xmm6, %xmm4
+; SSE-NEXT:    andnpd %xmm4, %xmm1
+; SSE-NEXT:    orpd %xmm5, %xmm1
+; SSE-NEXT:    movapd %xmm6, %xmm4
 ; SSE-NEXT:    maxpd %xmm2, %xmm4
-; SSE-NEXT:    cmpunordpd  %xmm2, %xmm2
+; SSE-NEXT:    cmpunordpd %xmm2, %xmm2
 ; SSE-NEXT:    andpd %xmm2, %xmm6
-; SSE-NEXT:    andnpd  %xmm4, %xmm2
-; SSE-NEXT:    orpd  %xmm6, %xmm2
-; SSE-NEXT:    movapd  %xmm7, %xmm4
+; SSE-NEXT:    andnpd %xmm4, %xmm2
+; SSE-NEXT:    orpd %xmm6, %xmm2
+; SSE-NEXT:    movapd %xmm7, %xmm4
 ; SSE-NEXT:    maxpd %xmm3, %xmm4
-; SSE-NEXT:    cmpunordpd  %xmm3, %xmm3
+; SSE-NEXT:    cmpunordpd %xmm3, %xmm3
 ; SSE-NEXT:    andpd %xmm3, %xmm7
-; SSE-NEXT:    andnpd  %xmm4, %xmm3
-; SSE-NEXT:    orpd  %xmm7, %xmm3
+; SSE-NEXT:    andnpd %xmm4, %xmm3
+; SSE-NEXT:    orpd %xmm7, %xmm3
 ; SSE-NEXT:    retq
 ;
-; AVX:         vmaxpd  %ymm0, %ymm2, %ymm4
+; AVX-LABEL: test_intrinsic_fmax_v8f64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmaxpd %ymm0, %ymm2, %ymm4
 ; AVX-NEXT:    vcmpunordpd %ymm0, %ymm0, %ymm0
 ; AVX-NEXT:    vblendvpd %ymm0, %ymm2, %ymm4, %ymm0
-; AVX-NEXT:    vmaxpd  %ymm1, %ymm3, %ymm2
+; AVX-NEXT:    vmaxpd %ymm1, %ymm3, %ymm2
 ; AVX-NEXT:    vcmpunordpd %ymm1, %ymm1, %ymm1
 ; AVX-NEXT:    vblendvpd %ymm1, %ymm3, %ymm2, %ymm1
 ; AVX-NEXT:    retq
-define <8 x double> @test_intrinsic_fmax_v8f64(<8 x double> %x, <8 x double> %y) {
   %z = call <8 x double> @llvm.maxnum.v8f64(<8 x double> %x, <8 x double> %y) readnone
   ret <8 x double> %z
 }

Modified: llvm/trunk/test/CodeGen/X86/fminnum.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fminnum.ll?rev=326930&r1=326929&r2=326930&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fminnum.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fminnum.ll Wed Mar  7 11:14:05 2018
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=sse2  < %s | FileCheck %s --check-prefix=CHECK --check-prefix=SSE
 ; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=avx  < %s | FileCheck %s --check-prefix=CHECK --check-prefix=AVX
 
@@ -16,8 +17,10 @@ declare <8 x double> @llvm.minnum.v8f64(
 
 ; FIXME: As the vector tests show, the SSE run shouldn't need this many moves.
 
-; CHECK-LABEL: @test_fminf
-; SSE:         movaps %xmm0, %xmm2
+define float @test_fminf(float %x, float %y) {
+; SSE-LABEL: test_fminf:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps %xmm0, %xmm2
 ; SSE-NEXT:    cmpunordss %xmm0, %xmm2
 ; SSE-NEXT:    movaps %xmm2, %xmm3
 ; SSE-NEXT:    andps %xmm1, %xmm3
@@ -27,19 +30,22 @@ declare <8 x double> @llvm.minnum.v8f64(
 ; SSE-NEXT:    movaps %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX:         vminss %xmm0, %xmm1, %xmm2
+; AVX-LABEL: test_fminf:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vminss %xmm0, %xmm1, %xmm2
 ; AVX-NEXT:    vcmpunordss %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    vblendvps %xmm0, %xmm1, %xmm2, %xmm0
 ; AVX-NEXT:    retq
-define float @test_fminf(float %x, float %y) {
   %z = call float @fminf(float %x, float %y) readnone
   ret float %z
 }
 
 ; FIXME: As the vector tests show, the SSE run shouldn't need this many moves.
 
-; CHECK-LABEL: @test_fmin
-; SSE:         movapd %xmm0, %xmm2
+define double @test_fmin(double %x, double %y) {
+; SSE-LABEL: test_fmin:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movapd %xmm0, %xmm2
 ; SSE-NEXT:    cmpunordsd %xmm0, %xmm2
 ; SSE-NEXT:    movapd %xmm2, %xmm3
 ; SSE-NEXT:    andpd %xmm1, %xmm3
@@ -49,24 +55,36 @@ define float @test_fminf(float %x, float
 ; SSE-NEXT:    movapd %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX:         vminsd %xmm0, %xmm1, %xmm2
+; AVX-LABEL: test_fmin:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vminsd %xmm0, %xmm1, %xmm2
 ; AVX-NEXT:    vcmpunordsd %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
 ; AVX-NEXT:    retq
-define double @test_fmin(double %x, double %y) {
   %z = call double @fmin(double %x, double %y) readnone
   ret double %z
 }
 
-; CHECK-LABEL: @test_fminl
-; CHECK: callq fminl
 define x86_fp80 @test_fminl(x86_fp80 %x, x86_fp80 %y) {
+; CHECK-LABEL: test_fminl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    subq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    fldt {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    fldt {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    fstpt {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    fstpt (%rsp)
+; CHECK-NEXT:    callq fminl at PLT
+; CHECK-NEXT:    addq $40, %rsp
+; CHECK-NEXT:    retq
   %z = call x86_fp80 @fminl(x86_fp80 %x, x86_fp80 %y) readnone
   ret x86_fp80 %z
 }
 
-; CHECK-LABEL: @test_intrinsic_fminf
-; SSE:         movaps %xmm0, %xmm2
+define float @test_intrinsic_fminf(float %x, float %y) {
+; SSE-LABEL: test_intrinsic_fminf:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps %xmm0, %xmm2
 ; SSE-NEXT:    cmpunordss %xmm0, %xmm2
 ; SSE-NEXT:    movaps %xmm2, %xmm3
 ; SSE-NEXT:    andps %xmm1, %xmm3
@@ -76,17 +94,20 @@ define x86_fp80 @test_fminl(x86_fp80 %x,
 ; SSE-NEXT:    movaps %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX:         vminss %xmm0, %xmm1, %xmm2
+; AVX-LABEL: test_intrinsic_fminf:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vminss %xmm0, %xmm1, %xmm2
 ; AVX-NEXT:    vcmpunordss %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    vblendvps %xmm0, %xmm1, %xmm2, %xmm0
 ; AVX-NEXT:    retq
-define float @test_intrinsic_fminf(float %x, float %y) {
   %z = call float @llvm.minnum.f32(float %x, float %y) readnone
   ret float %z
 }
 
-; CHECK-LABEL: @test_intrinsic_fmin
-; SSE:         movapd %xmm0, %xmm2
+define double @test_intrinsic_fmin(double %x, double %y) {
+; SSE-LABEL: test_intrinsic_fmin:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movapd %xmm0, %xmm2
 ; SSE-NEXT:    cmpunordsd %xmm0, %xmm2
 ; SSE-NEXT:    movapd %xmm2, %xmm3
 ; SSE-NEXT:    andpd %xmm1, %xmm3
@@ -96,24 +117,36 @@ define float @test_intrinsic_fminf(float
 ; SSE-NEXT:    movapd %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX:         vminsd %xmm0, %xmm1, %xmm2
+; AVX-LABEL: test_intrinsic_fmin:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vminsd %xmm0, %xmm1, %xmm2
 ; AVX-NEXT:    vcmpunordsd %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
 ; AVX-NEXT:    retq
-define double @test_intrinsic_fmin(double %x, double %y) {
   %z = call double @llvm.minnum.f64(double %x, double %y) readnone
   ret double %z
 }
 
-; CHECK-LABEL: @test_intrinsic_fminl
-; CHECK: callq fminl
 define x86_fp80 @test_intrinsic_fminl(x86_fp80 %x, x86_fp80 %y) {
+; CHECK-LABEL: test_intrinsic_fminl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    subq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    fldt {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    fldt {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    fstpt {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    fstpt (%rsp)
+; CHECK-NEXT:    callq fminl at PLT
+; CHECK-NEXT:    addq $40, %rsp
+; CHECK-NEXT:    retq
   %z = call x86_fp80 @llvm.minnum.f80(x86_fp80 %x, x86_fp80 %y) readnone
   ret x86_fp80 %z
 }
 
-; CHECK-LABEL: @test_intrinsic_fmin_v2f32
-; SSE:         movaps %xmm1, %xmm2
+define <2 x float> @test_intrinsic_fmin_v2f32(<2 x float> %x, <2 x float> %y) {
+; SSE-LABEL: test_intrinsic_fmin_v2f32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps %xmm1, %xmm2
 ; SSE-NEXT:    minps %xmm0, %xmm2
 ; SSE-NEXT:    cmpunordps %xmm0, %xmm0
 ; SSE-NEXT:    andps %xmm0, %xmm1
@@ -121,17 +154,20 @@ define x86_fp80 @test_intrinsic_fminl(x8
 ; SSE-NEXT:    orps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX:         vminps %xmm0, %xmm1, %xmm2
+; AVX-LABEL: test_intrinsic_fmin_v2f32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vminps %xmm0, %xmm1, %xmm2
 ; AVX-NEXT:    vcmpunordps %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    vblendvps %xmm0, %xmm1, %xmm2, %xmm0
 ; AVX-NEXT:    retq
-define <2 x float> @test_intrinsic_fmin_v2f32(<2 x float> %x, <2 x float> %y) {
   %z = call <2 x float> @llvm.minnum.v2f32(<2 x float> %x, <2 x float> %y) readnone
   ret <2 x float> %z
 }
 
-; CHECK-LABEL: @test_intrinsic_fmin_v4f32
-; SSE:         movaps %xmm1, %xmm2
+define <4 x float> @test_intrinsic_fmin_v4f32(<4 x float> %x, <4 x float> %y) {
+; SSE-LABEL: test_intrinsic_fmin_v4f32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps %xmm1, %xmm2
 ; SSE-NEXT:    minps %xmm0, %xmm2
 ; SSE-NEXT:    cmpunordps %xmm0, %xmm0
 ; SSE-NEXT:    andps %xmm0, %xmm1
@@ -139,17 +175,20 @@ define <2 x float> @test_intrinsic_fmin_
 ; SSE-NEXT:    orps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX:         vminps %xmm0, %xmm1, %xmm2
+; AVX-LABEL: test_intrinsic_fmin_v4f32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vminps %xmm0, %xmm1, %xmm2
 ; AVX-NEXT:    vcmpunordps %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    vblendvps %xmm0, %xmm1, %xmm2, %xmm0
 ; AVX-NEXT:    retq
-define <4 x float> @test_intrinsic_fmin_v4f32(<4 x float> %x, <4 x float> %y) {
   %z = call <4 x float> @llvm.minnum.v4f32(<4 x float> %x, <4 x float> %y) readnone
   ret <4 x float> %z
 }
 
-; CHECK-LABEL: @test_intrinsic_fmin_v2f64
-; SSE:         movapd %xmm1, %xmm2
+define <2 x double> @test_intrinsic_fmin_v2f64(<2 x double> %x, <2 x double> %y) {
+; SSE-LABEL: test_intrinsic_fmin_v2f64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movapd %xmm1, %xmm2
 ; SSE-NEXT:    minpd %xmm0, %xmm2
 ; SSE-NEXT:    cmpunordpd %xmm0, %xmm0
 ; SSE-NEXT:    andpd %xmm0, %xmm1
@@ -157,74 +196,81 @@ define <4 x float> @test_intrinsic_fmin_
 ; SSE-NEXT:    orpd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX:         vminpd %xmm0, %xmm1, %xmm2
+; AVX-LABEL: test_intrinsic_fmin_v2f64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vminpd %xmm0, %xmm1, %xmm2
 ; AVX-NEXT:    vcmpunordpd %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
 ; AVX-NEXT:    retq
-define <2 x double> @test_intrinsic_fmin_v2f64(<2 x double> %x, <2 x double> %y) {
   %z = call <2 x double> @llvm.minnum.v2f64(<2 x double> %x, <2 x double> %y) readnone
   ret <2 x double> %z
 }
 
-; CHECK-LABEL: @test_intrinsic_fmin_v4f64
-; SSE:         movapd  %xmm2, %xmm4
+define <4 x double> @test_intrinsic_fmin_v4f64(<4 x double> %x, <4 x double> %y) {
+; SSE-LABEL: test_intrinsic_fmin_v4f64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movapd %xmm2, %xmm4
 ; SSE-NEXT:    minpd %xmm0, %xmm4
-; SSE-NEXT:    cmpunordpd  %xmm0, %xmm0
+; SSE-NEXT:    cmpunordpd %xmm0, %xmm0
 ; SSE-NEXT:    andpd %xmm0, %xmm2
-; SSE-NEXT:    andnpd  %xmm4, %xmm0
-; SSE-NEXT:    orpd  %xmm2, %xmm0
-; SSE-NEXT:    movapd  %xmm3, %xmm2
+; SSE-NEXT:    andnpd %xmm4, %xmm0
+; SSE-NEXT:    orpd %xmm2, %xmm0
+; SSE-NEXT:    movapd %xmm3, %xmm2
 ; SSE-NEXT:    minpd %xmm1, %xmm2
-; SSE-NEXT:    cmpunordpd  %xmm1, %xmm1
+; SSE-NEXT:    cmpunordpd %xmm1, %xmm1
 ; SSE-NEXT:    andpd %xmm1, %xmm3
-; SSE-NEXT:    andnpd  %xmm2, %xmm1
-; SSE-NEXT:    orpd  %xmm3, %xmm1
+; SSE-NEXT:    andnpd %xmm2, %xmm1
+; SSE-NEXT:    orpd %xmm3, %xmm1
 ; SSE-NEXT:    retq
 ;
-; AVX:         vminpd  %ymm0, %ymm1, %ymm2
+; AVX-LABEL: test_intrinsic_fmin_v4f64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vminpd %ymm0, %ymm1, %ymm2
 ; AVX-NEXT:    vcmpunordpd %ymm0, %ymm0, %ymm0
 ; AVX-NEXT:    vblendvpd %ymm0, %ymm1, %ymm2, %ymm0
 ; AVX-NEXT:    retq
-define <4 x double> @test_intrinsic_fmin_v4f64(<4 x double> %x, <4 x double> %y) {
   %z = call <4 x double> @llvm.minnum.v4f64(<4 x double> %x, <4 x double> %y) readnone
   ret <4 x double> %z
 }
 
-; CHECK-LABEL: @test_intrinsic_fmin_v8f64
-; SSE:         movapd  %xmm4, %xmm8
+define <8 x double> @test_intrinsic_fmin_v8f64(<8 x double> %x, <8 x double> %y) {
+; SSE-LABEL: test_intrinsic_fmin_v8f64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movapd %xmm4, %xmm8
 ; SSE-NEXT:    minpd %xmm0, %xmm8
-; SSE-NEXT:    cmpunordpd  %xmm0, %xmm0
+; SSE-NEXT:    cmpunordpd %xmm0, %xmm0
 ; SSE-NEXT:    andpd %xmm0, %xmm4
-; SSE-NEXT:    andnpd  %xmm8, %xmm0
-; SSE-NEXT:    orpd  %xmm4, %xmm0
-; SSE-NEXT:    movapd  %xmm5, %xmm4
+; SSE-NEXT:    andnpd %xmm8, %xmm0
+; SSE-NEXT:    orpd %xmm4, %xmm0
+; SSE-NEXT:    movapd %xmm5, %xmm4
 ; SSE-NEXT:    minpd %xmm1, %xmm4
-; SSE-NEXT:    cmpunordpd  %xmm1, %xmm1
+; SSE-NEXT:    cmpunordpd %xmm1, %xmm1
 ; SSE-NEXT:    andpd %xmm1, %xmm5
-; SSE-NEXT:    andnpd  %xmm4, %xmm1
-; SSE-NEXT:    orpd  %xmm5, %xmm1
-; SSE-NEXT:    movapd  %xmm6, %xmm4
+; SSE-NEXT:    andnpd %xmm4, %xmm1
+; SSE-NEXT:    orpd %xmm5, %xmm1
+; SSE-NEXT:    movapd %xmm6, %xmm4
 ; SSE-NEXT:    minpd %xmm2, %xmm4
-; SSE-NEXT:    cmpunordpd  %xmm2, %xmm2
+; SSE-NEXT:    cmpunordpd %xmm2, %xmm2
 ; SSE-NEXT:    andpd %xmm2, %xmm6
-; SSE-NEXT:    andnpd  %xmm4, %xmm2
-; SSE-NEXT:    orpd  %xmm6, %xmm2
-; SSE-NEXT:    movapd  %xmm7, %xmm4
+; SSE-NEXT:    andnpd %xmm4, %xmm2
+; SSE-NEXT:    orpd %xmm6, %xmm2
+; SSE-NEXT:    movapd %xmm7, %xmm4
 ; SSE-NEXT:    minpd %xmm3, %xmm4
-; SSE-NEXT:    cmpunordpd  %xmm3, %xmm3
+; SSE-NEXT:    cmpunordpd %xmm3, %xmm3
 ; SSE-NEXT:    andpd %xmm3, %xmm7
-; SSE-NEXT:    andnpd  %xmm4, %xmm3
-; SSE-NEXT:    orpd  %xmm7, %xmm3
+; SSE-NEXT:    andnpd %xmm4, %xmm3
+; SSE-NEXT:    orpd %xmm7, %xmm3
 ; SSE-NEXT:    retq
 ;
-; AVX:         vminpd  %ymm0, %ymm2, %ymm4
+; AVX-LABEL: test_intrinsic_fmin_v8f64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vminpd %ymm0, %ymm2, %ymm4
 ; AVX-NEXT:    vcmpunordpd %ymm0, %ymm0, %ymm0
 ; AVX-NEXT:    vblendvpd %ymm0, %ymm2, %ymm4, %ymm0
-; AVX-NEXT:    vminpd  %ymm1, %ymm3, %ymm2
+; AVX-NEXT:    vminpd %ymm1, %ymm3, %ymm2
 ; AVX-NEXT:    vcmpunordpd %ymm1, %ymm1, %ymm1
 ; AVX-NEXT:    vblendvpd %ymm1, %ymm3, %ymm2, %ymm1
 ; AVX-NEXT:    retq
-define <8 x double> @test_intrinsic_fmin_v8f64(<8 x double> %x, <8 x double> %y) {
   %z = call <8 x double> @llvm.minnum.v8f64(<8 x double> %x, <8 x double> %y) readnone
   ret <8 x double> %z
 }




More information about the llvm-commits mailing list