[llvm] r256510 - [x86] lower calls to fmax and llvm.maxnum.* using maxps/maxpd (PR24475)

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Mon Dec 28 11:20:19 PST 2015


Author: spatel
Date: Mon Dec 28 13:20:19 2015
New Revision: 256510

URL: http://llvm.org/viewvc/llvm-project?rev=256510&view=rev
Log:
[x86] lower calls to fmax and llvm.maxnum.* using maxps/maxpd (PR24475)

This is a follow-on to:
http://reviews.llvm.org/rL255700
http://reviews.llvm.org/rL256454


Modified:
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
    llvm/trunk/test/CodeGen/X86/fmaxnum.ll

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=256510&r1=256509&r2=256510&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Mon Dec 28 13:20:19 2015
@@ -26926,10 +26926,12 @@ static SDValue performFMaxNumCombine(SDN
   //       should be able to lower to FMAX/FMIN alone.
   // TODO: If an operand is already known to be a NaN or not a NaN, this
   //       should be an optional swap and FMAX/FMIN.
-  // TODO: Allow f64, vectors, and fminnum.
+  // TODO: Allow fminnum.
 
   EVT VT = N->getValueType(0);
-  if (!(Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)))
+  if (!((Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
+        (Subtarget->hasSSE2() && (VT == MVT::f64 || VT == MVT::v2f64)) ||
+        (Subtarget->hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))))
     return SDValue();
 
   // This takes at least 3 instructions, so favor a library call when operating

Modified: llvm/trunk/test/CodeGen/X86/fmaxnum.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fmaxnum.ll?rev=256510&r1=256509&r2=256510&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fmaxnum.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fmaxnum.ll Mon Dec 28 13:20:19 2015
@@ -14,6 +14,7 @@ declare <2 x double> @llvm.maxnum.v2f64(
 declare <4 x double> @llvm.maxnum.v4f64(<4 x double>, <4 x double>)
 declare <8 x double> @llvm.maxnum.v8f64(<8 x double>, <8 x double>)
 
+; FIXME: As the vector tests show, the SSE run shouldn't need this many moves.
 
 ; CHECK-LABEL: @test_fmaxf
 ; SSE:         movaps %xmm0, %xmm2
@@ -42,10 +43,23 @@ define float @test_fmaxf_minsize(float %
   ret float %z
 }
 
-; FIXME: Doubles should be inlined similarly to floats.
+; FIXME: As the vector tests show, the SSE run shouldn't need this many moves.
 
 ; CHECK-LABEL: @test_fmax
-; CHECK: jmp fmax
+; SSE:         movapd %xmm0, %xmm2
+; SSE-NEXT:    cmpunordsd %xmm2, %xmm2
+; SSE-NEXT:    movapd %xmm2, %xmm3
+; SSE-NEXT:    andpd %xmm1, %xmm3
+; SSE-NEXT:    maxsd %xmm0, %xmm1
+; SSE-NEXT:    andnpd %xmm1, %xmm2
+; SSE-NEXT:    orpd %xmm3, %xmm2
+; SSE-NEXT:    movapd %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX:         vmaxsd %xmm0, %xmm1, %xmm2
+; AVX-NEXT:    vcmpunordsd %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
+; AVX-NEXT:    retq
 define double @test_fmax(double %x, double %y) {
   %z = call double @fmax(double %x, double %y) readnone
   ret double %z
@@ -78,10 +92,22 @@ define float @test_intrinsic_fmaxf(float
   ret float %z
 }
 
-; FIXME: Doubles should be inlined similarly to floats.
 
 ; CHECK-LABEL: @test_intrinsic_fmax
-; CHECK: jmp fmax
+; SSE:         movapd %xmm0, %xmm2
+; SSE-NEXT:    cmpunordsd %xmm2, %xmm2
+; SSE-NEXT:    movapd %xmm2, %xmm3
+; SSE-NEXT:    andpd %xmm1, %xmm3
+; SSE-NEXT:    maxsd %xmm0, %xmm1
+; SSE-NEXT:    andnpd %xmm1, %xmm2
+; SSE-NEXT:    orpd %xmm3, %xmm2
+; SSE-NEXT:    movapd %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX:         vmaxsd %xmm0, %xmm1, %xmm2
+; AVX-NEXT:    vcmpunordsd %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
+; AVX-NEXT:    retq
 define double @test_intrinsic_fmax(double %x, double %y) {
   %z = call double @llvm.maxnum.f64(double %x, double %y) readnone
   ret double %z
@@ -130,39 +156,82 @@ define <4 x float> @test_intrinsic_fmax_
   ret <4 x float> %z
 }
 
-; FIXME: Vector of doubles should be inlined similarly to vector of floats.
-
 ; CHECK-LABEL: @test_intrinsic_fmax_v2f64
-; CHECK: callq fmax
-; CHECK: callq fmax
+; SSE:         movapd %xmm1, %xmm2
+; SSE-NEXT:    maxpd %xmm0, %xmm2
+; SSE-NEXT:    cmpunordpd %xmm0, %xmm0
+; SSE-NEXT:    andpd %xmm0, %xmm1
+; SSE-NEXT:    andnpd %xmm2, %xmm0
+; SSE-NEXT:    orpd %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX:         vmaxpd %xmm0, %xmm1, %xmm2
+; AVX-NEXT:    vcmpunordpd %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
+; AVX-NEXT:    retq
 define <2 x double> @test_intrinsic_fmax_v2f64(<2 x double> %x, <2 x double> %y) {
   %z = call <2 x double> @llvm.maxnum.v2f64(<2 x double> %x, <2 x double> %y) readnone
   ret <2 x double> %z
 }
 
-; FIXME: Vector of doubles should be inlined similarly to vector of floats.
-
 ; CHECK-LABEL: @test_intrinsic_fmax_v4f64
-; CHECK: callq fmax
-; CHECK: callq fmax
-; CHECK: callq fmax
-; CHECK: callq fmax
+; SSE:         movapd  %xmm2, %xmm4
+; SSE-NEXT:    maxpd %xmm0, %xmm4
+; SSE-NEXT:    cmpunordpd  %xmm0, %xmm0
+; SSE-NEXT:    andpd %xmm0, %xmm2
+; SSE-NEXT:    andnpd  %xmm4, %xmm0
+; SSE-NEXT:    orpd  %xmm2, %xmm0
+; SSE-NEXT:    movapd  %xmm3, %xmm2
+; SSE-NEXT:    maxpd %xmm1, %xmm2
+; SSE-NEXT:    cmpunordpd  %xmm1, %xmm1
+; SSE-NEXT:    andpd %xmm1, %xmm3
+; SSE-NEXT:    andnpd  %xmm2, %xmm1
+; SSE-NEXT:    orpd  %xmm3, %xmm1
+; SSE-NEXT:    retq
+;
+; AVX:         vmaxpd  %ymm0, %ymm1, %ymm2
+; AVX-NEXT:    vcmpunordpd %ymm0, %ymm0, %ymm0
+; AVX-NEXT:    vblendvpd %ymm0, %ymm1, %ymm2, %ymm0
+; AVX-NEXT:    retq
 define <4 x double> @test_intrinsic_fmax_v4f64(<4 x double> %x, <4 x double> %y) {
   %z = call <4 x double> @llvm.maxnum.v4f64(<4 x double> %x, <4 x double> %y) readnone
   ret <4 x double> %z
 }
 
-; FIXME: Vector of doubles should be inlined similarly to vector of floats.
-
 ; CHECK-LABEL: @test_intrinsic_fmax_v8f64
-; CHECK: callq fmax
-; CHECK: callq fmax
-; CHECK: callq fmax
-; CHECK: callq fmax
-; CHECK: callq fmax
-; CHECK: callq fmax
-; CHECK: callq fmax
-; CHECK: callq fmax
+; SSE:         movapd  %xmm4, %xmm8
+; SSE-NEXT:    maxpd %xmm0, %xmm8
+; SSE-NEXT:    cmpunordpd  %xmm0, %xmm0
+; SSE-NEXT:    andpd %xmm0, %xmm4
+; SSE-NEXT:    andnpd  %xmm8, %xmm0
+; SSE-NEXT:    orpd  %xmm4, %xmm0
+; SSE-NEXT:    movapd  %xmm5, %xmm4
+; SSE-NEXT:    maxpd %xmm1, %xmm4
+; SSE-NEXT:    cmpunordpd  %xmm1, %xmm1
+; SSE-NEXT:    andpd %xmm1, %xmm5
+; SSE-NEXT:    andnpd  %xmm4, %xmm1
+; SSE-NEXT:    orpd  %xmm5, %xmm1
+; SSE-NEXT:    movapd  %xmm6, %xmm4
+; SSE-NEXT:    maxpd %xmm2, %xmm4
+; SSE-NEXT:    cmpunordpd  %xmm2, %xmm2
+; SSE-NEXT:    andpd %xmm2, %xmm6
+; SSE-NEXT:    andnpd  %xmm4, %xmm2
+; SSE-NEXT:    orpd  %xmm6, %xmm2
+; SSE-NEXT:    movapd  %xmm7, %xmm4
+; SSE-NEXT:    maxpd %xmm3, %xmm4
+; SSE-NEXT:    cmpunordpd  %xmm3, %xmm3
+; SSE-NEXT:    andpd %xmm3, %xmm7
+; SSE-NEXT:    andnpd  %xmm4, %xmm3
+; SSE-NEXT:    orpd  %xmm7, %xmm3
+; SSE-NEXT:    retq
+;
+; AVX:         vmaxpd  %ymm0, %ymm2, %ymm4
+; AVX-NEXT:    vcmpunordpd %ymm0, %ymm0, %ymm0
+; AVX-NEXT:    vblendvpd %ymm0, %ymm2, %ymm4, %ymm0
+; AVX-NEXT:    vmaxpd  %ymm1, %ymm3, %ymm2
+; AVX-NEXT:    vcmpunordpd %ymm1, %ymm1, %ymm1
+; AVX-NEXT:    vblendvpd %ymm1, %ymm3, %ymm2, %ymm1
+; AVX-NEXT:    retq
 define <8 x double> @test_intrinsic_fmax_v8f64(<8 x double> %x, <8 x double> %y) {
   %z = call <8 x double> @llvm.maxnum.v8f64(<8 x double> %x, <8 x double> %y) readnone
   ret <8 x double> %z




More information about the llvm-commits mailing list