[llvm] r331988 - [x86] add tests for maxnum/minnum intrinsics with nnan; NFC

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Thu May 10 07:48:42 PDT 2018


Author: spatel
Date: Thu May 10 07:48:42 2018
New Revision: 331988

URL: http://llvm.org/viewvc/llvm-project?rev=331988&view=rev
Log:
[x86] add tests for maxnum/minnum intrinsics with nnan; NFC

Clang 6.0 was updated to create these intrinsics rather than 
libcalls or fcmp/select, but the backend wasn't prepared to 
handle that optimally. 

This bug is not the primary reason for PR37403:
https://bugs.llvm.org/show_bug.cgi?id=37403
...but it's probably more important for x86 perf.

Modified:
    llvm/trunk/test/CodeGen/X86/fmaxnum.ll
    llvm/trunk/test/CodeGen/X86/fminnum.ll

Modified: llvm/trunk/test/CodeGen/X86/fmaxnum.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fmaxnum.ll?rev=331988&r1=331987&r2=331988&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fmaxnum.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fmaxnum.ll Thu May 10 07:48:42 2018
@@ -285,3 +285,101 @@ define <8 x double> @test_intrinsic_fmax
   ret <8 x double> %z
 }
 
+; FIXME: The IR-level FMF should propagate to the node.
+
+define double @maxnum_intrinsic_nnan_fmf_f64(double %a, double %b) {
+; SSE-LABEL: maxnum_intrinsic_nnan_fmf_f64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movapd %xmm0, %xmm2
+; SSE-NEXT:    cmpunordsd %xmm0, %xmm2
+; SSE-NEXT:    movapd %xmm2, %xmm3
+; SSE-NEXT:    andpd %xmm1, %xmm3
+; SSE-NEXT:    maxsd %xmm0, %xmm1
+; SSE-NEXT:    andnpd %xmm1, %xmm2
+; SSE-NEXT:    orpd %xmm3, %xmm2
+; SSE-NEXT:    movapd %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: maxnum_intrinsic_nnan_fmf_f64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmaxsd %xmm0, %xmm1, %xmm2
+; AVX-NEXT:    vcmpunordsd %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
+; AVX-NEXT:    retq
+  %r = tail call nnan double @llvm.maxnum.f64(double %a, double %b)
+  ret double %r
+}
+
+; FIXME: Make sure vectors work too.
+
+define <4 x float> @maxnum_intrinsic_nnan_fmf_f432(<4 x float> %a, <4 x float> %b) {
+; SSE-LABEL: maxnum_intrinsic_nnan_fmf_f432:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps %xmm1, %xmm2
+; SSE-NEXT:    maxps %xmm0, %xmm2
+; SSE-NEXT:    cmpunordps %xmm0, %xmm0
+; SSE-NEXT:    andps %xmm0, %xmm1
+; SSE-NEXT:    andnps %xmm2, %xmm0
+; SSE-NEXT:    orps %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: maxnum_intrinsic_nnan_fmf_f432:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmaxps %xmm0, %xmm1, %xmm2
+; AVX-NEXT:    vcmpunordps %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    vblendvps %xmm0, %xmm1, %xmm2, %xmm0
+; AVX-NEXT:    retq
+  %r = tail call nnan <4 x float> @llvm.maxnum.v4f32(<4 x float> %a, <4 x float> %b)
+  ret <4 x float> %r
+}
+
+; FIXME: Current (but legacy someday): a function-level attribute should also enable the fold.
+
+define float @maxnum_intrinsic_nnan_fmf_f32(float %a, float %b) #0 {
+; SSE-LABEL: maxnum_intrinsic_nnan_fmf_f32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps %xmm0, %xmm2
+; SSE-NEXT:    cmpunordss %xmm0, %xmm2
+; SSE-NEXT:    movaps %xmm2, %xmm3
+; SSE-NEXT:    andps %xmm1, %xmm3
+; SSE-NEXT:    maxss %xmm0, %xmm1
+; SSE-NEXT:    andnps %xmm1, %xmm2
+; SSE-NEXT:    orps %xmm3, %xmm2
+; SSE-NEXT:    movaps %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: maxnum_intrinsic_nnan_fmf_f32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmaxss %xmm0, %xmm1, %xmm2
+; AVX-NEXT:    vcmpunordss %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    vblendvps %xmm0, %xmm1, %xmm2, %xmm0
+; AVX-NEXT:    retq
+  %r = tail call float @llvm.maxnum.f32(float %a, float %b)
+  ret float %r
+}
+
+; FIXME: Make sure vectors work too.
+
+define <2 x double> @maxnum_intrinsic_nnan_attr_f64(<2 x double> %a, <2 x double> %b) #0 {
+; SSE-LABEL: maxnum_intrinsic_nnan_attr_f64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movapd %xmm1, %xmm2
+; SSE-NEXT:    maxpd %xmm0, %xmm2
+; SSE-NEXT:    cmpunordpd %xmm0, %xmm0
+; SSE-NEXT:    andpd %xmm0, %xmm1
+; SSE-NEXT:    andnpd %xmm2, %xmm0
+; SSE-NEXT:    orpd %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: maxnum_intrinsic_nnan_attr_f64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmaxpd %xmm0, %xmm1, %xmm2
+; AVX-NEXT:    vcmpunordpd %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
+; AVX-NEXT:    retq
+  %r = tail call <2 x double> @llvm.maxnum.v2f64(<2 x double> %a, <2 x double> %b)
+  ret <2 x double> %r
+}
+
+attributes #0 = { "no-nans-fp-math"="true" }
+

Modified: llvm/trunk/test/CodeGen/X86/fminnum.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fminnum.ll?rev=331988&r1=331987&r2=331988&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fminnum.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fminnum.ll Thu May 10 07:48:42 2018
@@ -276,3 +276,102 @@ define <8 x double> @test_intrinsic_fmin
   %z = call <8 x double> @llvm.minnum.v8f64(<8 x double> %x, <8 x double> %y) readnone
   ret <8 x double> %z
 }
+
+; FIXME: The IR-level FMF should propagate to the node.
+
+define float @minnum_intrinsic_nnan_fmf_f32(float %a, float %b) {
+; SSE-LABEL: minnum_intrinsic_nnan_fmf_f32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps %xmm0, %xmm2
+; SSE-NEXT:    cmpunordss %xmm0, %xmm2
+; SSE-NEXT:    movaps %xmm2, %xmm3
+; SSE-NEXT:    andps %xmm1, %xmm3
+; SSE-NEXT:    minss %xmm0, %xmm1
+; SSE-NEXT:    andnps %xmm1, %xmm2
+; SSE-NEXT:    orps %xmm3, %xmm2
+; SSE-NEXT:    movaps %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: minnum_intrinsic_nnan_fmf_f32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vminss %xmm0, %xmm1, %xmm2
+; AVX-NEXT:    vcmpunordss %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    vblendvps %xmm0, %xmm1, %xmm2, %xmm0
+; AVX-NEXT:    retq
+  %r = tail call nnan float @llvm.minnum.f32(float %a, float %b)
+  ret float %r
+}
+
+; FIXME: Make sure vectors work too.
+
+define <2 x double> @minnum_intrinsic_nnan_fmf_v2f64(<2 x double> %a, <2 x double> %b) {
+; SSE-LABEL: minnum_intrinsic_nnan_fmf_v2f64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movapd %xmm1, %xmm2
+; SSE-NEXT:    minpd %xmm0, %xmm2
+; SSE-NEXT:    cmpunordpd %xmm0, %xmm0
+; SSE-NEXT:    andpd %xmm0, %xmm1
+; SSE-NEXT:    andnpd %xmm2, %xmm0
+; SSE-NEXT:    orpd %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: minnum_intrinsic_nnan_fmf_v2f64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vminpd %xmm0, %xmm1, %xmm2
+; AVX-NEXT:    vcmpunordpd %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
+; AVX-NEXT:    retq
+  %r = tail call nnan <2 x double> @llvm.minnum.v2f64(<2 x double> %a, <2 x double> %b)
+  ret <2 x double> %r
+}
+
+; FIXME: Current (but legacy someday): a function-level attribute should also enable the fold.
+
+define double @minnum_intrinsic_nnan_fmf_f64(double %a, double %b) #0 {
+; SSE-LABEL: minnum_intrinsic_nnan_fmf_f64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movapd %xmm0, %xmm2
+; SSE-NEXT:    cmpunordsd %xmm0, %xmm2
+; SSE-NEXT:    movapd %xmm2, %xmm3
+; SSE-NEXT:    andpd %xmm1, %xmm3
+; SSE-NEXT:    minsd %xmm0, %xmm1
+; SSE-NEXT:    andnpd %xmm1, %xmm2
+; SSE-NEXT:    orpd %xmm3, %xmm2
+; SSE-NEXT:    movapd %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: minnum_intrinsic_nnan_fmf_f64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vminsd %xmm0, %xmm1, %xmm2
+; AVX-NEXT:    vcmpunordsd %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
+; AVX-NEXT:    retq
+  %r = tail call nnan double @llvm.minnum.f64(double %a, double %b)
+  ret double %r
+}
+
+; FIXME: Make sure vectors work too.
+
+define <4 x float> @minnum_intrinsic_nnan_attr_v4f32(<4 x float> %a, <4 x float> %b) #0 {
+; SSE-LABEL: minnum_intrinsic_nnan_attr_v4f32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps %xmm1, %xmm2
+; SSE-NEXT:    minps %xmm0, %xmm2
+; SSE-NEXT:    cmpunordps %xmm0, %xmm0
+; SSE-NEXT:    andps %xmm0, %xmm1
+; SSE-NEXT:    andnps %xmm2, %xmm0
+; SSE-NEXT:    orps %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: minnum_intrinsic_nnan_attr_v4f32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vminps %xmm0, %xmm1, %xmm2
+; AVX-NEXT:    vcmpunordps %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    vblendvps %xmm0, %xmm1, %xmm2, %xmm0
+; AVX-NEXT:    retq
+  %r = tail call <4 x float> @llvm.minnum.v4f32(<4 x float> %a, <4 x float> %b)
+  ret <4 x float> %r
+}
+
+attributes #0 = { "no-nans-fp-math"="true" }
+




More information about the llvm-commits mailing list