[llvm] r255700 - [x86] inline calls to fmaxf / llvm.maxnum.f32 using maxss (PR24475)

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Tue Dec 15 15:11:46 PST 2015


Author: spatel
Date: Tue Dec 15 17:11:43 2015
New Revision: 255700

URL: http://llvm.org/viewvc/llvm-project?rev=255700&view=rev
Log:
[x86] inline calls to fmaxf / llvm.maxnum.f32 using maxss (PR24475)

This patch improves on the suggested codegen from PR24475:
https://llvm.org/bugs/show_bug.cgi?id=24475

but only for the fmaxf() case to start, so we can sort out any bugs before
extending to fmin, f64, and vectors.

The fmax / maxnum definitions provide us flexibility for signed zeros, so the
only thing we have to worry about in this replacement sequence is NaN handling.

Note 1: It may be better to implement this as lowerFMAXNUM(), but that exposes
a problem: SelectionDAGBuilder::visitSelect() transforms compare/select
instructions into FMAXNUM nodes if we declare FMAXNUM legal or custom. Perhaps
that should be checking for NaN inputs or global unsafe-math before transforming?
As it stands, that bypasses a big set of optimizations that the x86 backend 
already has in PerformSELECTCombine().

Note 2: The v2f32 test reveals another bug; the vector is extended to v4f32, so
we have completely unnecessary operations happening on undef elements of the 
vector.

Differential Revision: http://reviews.llvm.org/D15294


Modified:
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
    llvm/trunk/test/CodeGen/X86/fmaxnum.ll

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=255700&r1=255699&r2=255700&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Tue Dec 15 17:11:43 2015
@@ -1799,6 +1799,7 @@ X86TargetLowering::X86TargetLowering(con
   setTargetDAGCombine(ISD::FSUB);
   setTargetDAGCombine(ISD::FNEG);
   setTargetDAGCombine(ISD::FMA);
+  setTargetDAGCombine(ISD::FMAXNUM);
   setTargetDAGCombine(ISD::SUB);
   setTargetDAGCombine(ISD::LOAD);
   setTargetDAGCombine(ISD::MLOAD);
@@ -26533,6 +26534,56 @@ static SDValue PerformFMinFMaxCombine(SD
                      N->getOperand(0), N->getOperand(1));
 }
 
+static SDValue performFMaxNumCombine(SDNode *N, SelectionDAG &DAG,
+                                     const X86Subtarget *Subtarget) {
+  // This takes at least 3 instructions, so favor a library call when
+  // minimizing code size.
+  if (DAG.getMachineFunction().getFunction()->optForMinSize())
+    return SDValue();
+
+  EVT VT = N->getValueType(0);
+
+  // TODO: Check for global or instruction-level "nnan". In that case, we
+  //       should be able to lower to FMAX/FMIN alone.
+  // TODO: If an operand is already known to be a NaN or not a NaN, this
+  //       should be an optional swap and FMAX/FMIN.
+  // TODO: Allow f64, vectors, and fminnum.
+
+  if (VT != MVT::f32 || !Subtarget->hasSSE1() || Subtarget->useSoftFloat())
+    return SDValue();
+
+  SDValue Op0 = N->getOperand(0);
+  SDValue Op1 = N->getOperand(1);
+  SDLoc DL(N);
+  EVT SetCCType = DAG.getTargetLoweringInfo().getSetCCResultType(
+      DAG.getDataLayout(), *DAG.getContext(), VT);
+
+  // There are 4 possibilities involving NaN inputs, and these are the required
+  // outputs:
+  //                   Op1
+  //               Num     NaN
+  //            ----------------
+  //       Num  |  Max  |  Op0 |
+  // Op0        ----------------
+  //       NaN  |  Op1  |  NaN |
+  //            ----------------
+  //
+  // The SSE FP max/min instructions were not designed for this case, but rather
+  // to implement:
+  //   Max = Op1 > Op0 ? Op1 : Op0
+  //
+  // So they always return Op0 if either input is a NaN. However, we can still
+  // use those instructions for fmaxnum by selecting away a NaN input.
+
+  // If either operand is NaN, the 2nd source operand (Op0) is passed through.
+  SDValue Max = DAG.getNode(X86ISD::FMAX, DL, VT, Op1, Op0);
+  SDValue IsOp0Nan = DAG.getSetCC(DL, SetCCType , Op0, Op0, ISD::SETUO);
+
+  // If Op0 is a NaN, select Op1. Otherwise, select the max. If both operands
+  // are NaN, the NaN value of Op1 is the result.
+  return DAG.getNode(ISD::SELECT, DL, VT, IsOp0Nan, Op1, Max);
+}
+
 /// Do target-specific dag combines on X86ISD::FAND nodes.
 static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG,
                                   const X86Subtarget *Subtarget) {
@@ -27392,6 +27443,7 @@ SDValue X86TargetLowering::PerformDAGCom
   case X86ISD::FOR:         return PerformFORCombine(N, DAG, Subtarget);
   case X86ISD::FMIN:
   case X86ISD::FMAX:        return PerformFMinFMaxCombine(N, DAG);
+  case ISD::FMAXNUM:        return performFMaxNumCombine(N, DAG, Subtarget);
   case X86ISD::FAND:        return PerformFANDCombine(N, DAG, Subtarget);
   case X86ISD::FANDN:       return PerformFANDNCombine(N, DAG, Subtarget);
   case X86ISD::BT:          return PerformBTCombine(N, DAG, DCI);

Modified: llvm/trunk/test/CodeGen/X86/fmaxnum.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fmaxnum.ll?rev=255700&r1=255699&r2=255700&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fmaxnum.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fmaxnum.ll Tue Dec 15 17:11:43 2015
@@ -16,12 +16,34 @@ declare <8 x double> @llvm.maxnum.v8f64(
 
 
 ; CHECK-LABEL: @test_fmaxf
-; CHECK: jmp fmaxf
+; SSE:         movaps %xmm0, %xmm2
+; SSE-NEXT:    cmpunordss %xmm2, %xmm2
+; SSE-NEXT:    movaps %xmm2, %xmm3
+; SSE-NEXT:    andps %xmm1, %xmm3
+; SSE-NEXT:    maxss %xmm0, %xmm1
+; SSE-NEXT:    andnps %xmm1, %xmm2
+; SSE-NEXT:    orps %xmm3, %xmm2
+; SSE-NEXT:    movaps %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX:         vmaxss %xmm0, %xmm1, %xmm2
+; AVX-NEXT:    vcmpunordss %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    vblendvps %xmm0, %xmm1, %xmm2, %xmm0
+; AVX-NEXT:    retq
 define float @test_fmaxf(float %x, float %y) {
   %z = call float @fmaxf(float %x, float %y) readnone
   ret float %z
 }
 
+; CHECK-LABEL: @test_fmaxf_minsize
+; CHECK:       jmp fmaxf
+define float @test_fmaxf_minsize(float %x, float %y) minsize {
+  %z = call float @fmaxf(float %x, float %y) readnone
+  ret float %z
+}
+
+; FIXME: Doubles should be inlined similarly to floats.
+
 ; CHECK-LABEL: @test_fmax
 ; CHECK: jmp fmax
 define double @test_fmax(double %x, double %y) {
@@ -37,12 +59,27 @@ define x86_fp80 @test_fmaxl(x86_fp80 %x,
 }
 
 ; CHECK-LABEL: @test_intrinsic_fmaxf
-; CHECK: jmp fmaxf
+; SSE:         movaps %xmm0, %xmm2
+; SSE-NEXT:    cmpunordss %xmm2, %xmm2
+; SSE-NEXT:    movaps %xmm2, %xmm3
+; SSE-NEXT:    andps %xmm1, %xmm3
+; SSE-NEXT:    maxss %xmm0, %xmm1
+; SSE-NEXT:    andnps %xmm1, %xmm2
+; SSE-NEXT:    orps %xmm3, %xmm2
+; SSE-NEXT:    movaps %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX:         vmaxss %xmm0, %xmm1, %xmm2
+; AVX-NEXT:    vcmpunordss %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    vblendvps %xmm0, %xmm1, %xmm2, %xmm0
+; AVX-NEXT:    retq
 define float @test_intrinsic_fmaxf(float %x, float %y) {
   %z = call float @llvm.maxnum.f32(float %x, float %y) readnone
   ret float %z
 }
 
+; FIXME: Doubles should be inlined similarly to floats.
+
 ; CHECK-LABEL: @test_intrinsic_fmax
 ; CHECK: jmp fmax
 define double @test_intrinsic_fmax(double %x, double %y) {
@@ -57,122 +94,159 @@ define x86_fp80 @test_intrinsic_fmaxl(x8
   ret x86_fp80 %z
 }
 
+; FIXME: This should not be doing 4 scalar ops on a 2 element vector.
+; FIXME: This should use vector ops (maxps / cmpps).
+
 ; CHECK-LABEL: @test_intrinsic_fmax_v2f32
-; SSE:         movaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
-; SSE-NEXT:    callq fmaxf
-; SSE-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; SSE-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
-; SSE-NEXT:    callq fmaxf
-; SSE-NEXT:    unpcklps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload
-; SSE:         movaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
-; SSE-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
-; SSE-NEXT:    callq fmaxf
-; SSE-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; SSE-NEXT:    movapd {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
-; SSE-NEXT:    shufpd {{.*#+}} xmm0 = xmm0[1,0]
-; SSE-NEXT:    movapd {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
+; SSE:         movaps %xmm1, %xmm2
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[3,1,2,3]
+; SSE-NEXT:    movaps %xmm0, %xmm3
+; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
+; SSE-NEXT:    movaps %xmm3, %xmm4
+; SSE-NEXT:    cmpunordss %xmm4, %xmm4
+; SSE-NEXT:    movaps %xmm4, %xmm5
+; SSE-NEXT:    andps %xmm2, %xmm5
+; SSE-NEXT:    maxss %xmm3, %xmm2
+; SSE-NEXT:    andnps %xmm2, %xmm4
+; SSE-NEXT:    orps %xmm5, %xmm4
+; SSE-NEXT:    movaps %xmm1, %xmm2
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[1,1,2,3]
+; SSE-NEXT:    movaps %xmm0, %xmm5
+; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[1,1,2,3]
+; SSE-NEXT:    movaps %xmm5, %xmm3
+; SSE-NEXT:    cmpunordss %xmm3, %xmm3
+; SSE-NEXT:    movaps %xmm3, %xmm6
+; SSE-NEXT:    andps %xmm2, %xmm6
+; SSE-NEXT:    maxss %xmm5, %xmm2
+; SSE-NEXT:    andnps %xmm2, %xmm3
+; SSE-NEXT:    orps %xmm6, %xmm3
+; SSE-NEXT:    unpcklps {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
+; SSE-NEXT:    movaps %xmm0, %xmm2
+; SSE-NEXT:    cmpunordss %xmm2, %xmm2
+; SSE-NEXT:    movaps %xmm2, %xmm4
+; SSE-NEXT:    andps %xmm1, %xmm4
+; SSE-NEXT:    movaps %xmm1, %xmm5
+; SSE-NEXT:    maxss %xmm0, %xmm5
+; SSE-NEXT:    andnps %xmm5, %xmm2
+; SSE-NEXT:    orps %xmm4, %xmm2
 ; SSE-NEXT:    shufpd {{.*#+}} xmm1 = xmm1[1,0]
-; SSE-NEXT:    callq fmaxf
-; SSE-NEXT:    movaps (%rsp), %xmm1 # 16-byte Reload
-; SSE-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE-NEXT:    unpcklps {{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload
-; SSE:         movaps %xmm1, %xmm0
-; SSE-NEXT:    addq $72, %rsp
+; SSE-NEXT:    shufpd {{.*#+}} xmm0 = xmm0[1,0]
+; SSE-NEXT:    movapd %xmm0, %xmm4
+; SSE-NEXT:    cmpunordss %xmm4, %xmm4
+; SSE-NEXT:    movaps %xmm4, %xmm5
+; SSE-NEXT:    andps %xmm1, %xmm5
+; SSE-NEXT:    maxss %xmm0, %xmm1
+; SSE-NEXT:    andnps %xmm1, %xmm4
+; SSE-NEXT:    orps %xmm5, %xmm4
+; SSE-NEXT:    unpcklps {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
+; SSE-NEXT:    unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; SSE-NEXT:    movaps %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX:         vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
-; AVX-NEXT:    vmovaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
-; AVX-NEXT:    callq fmaxf
-; AVX-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT:    vmovshdup {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload
-; AVX:         vmovshdup {{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload
-; AVX:         callq fmaxf
-; AVX-NEXT:    vmovaps (%rsp), %xmm1 # 16-byte Reload
-; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
-; AVX-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT:    vpermilpd $1, {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload
-; AVX:         vpermilpd $1, {{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload
-; AVX:         callq fmaxf
-; AVX-NEXT:    vmovaps (%rsp), %xmm1 # 16-byte Reload
-; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
-; AVX-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT:    vpermilps $231, {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload
-; AVX:         vpermilps $231, {{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload
-; AVX:         callq fmaxf
-; AVX-NEXT:    vmovaps (%rsp), %xmm1 # 16-byte Reload
-; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
-; AVX-NEXT:    addq $56, %rsp
+; AVX:         vmaxss %xmm0, %xmm1, %xmm2
+; AVX-NEXT:    vcmpunordss %xmm0, %xmm0, %xmm3
+; AVX-NEXT:    vblendvps %xmm3, %xmm1, %xmm2, %xmm2
+; AVX-NEXT:    vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; AVX-NEXT:    vmovshdup {{.*#+}} xmm4 = xmm1[1,1,3,3]
+; AVX-NEXT:    vmaxss %xmm3, %xmm4, %xmm5
+; AVX-NEXT:    vcmpunordss %xmm3, %xmm3, %xmm3
+; AVX-NEXT:    vblendvps %xmm3, %xmm4, %xmm5, %xmm3
+; AVX-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3]
+; AVX-NEXT:    vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
+; AVX-NEXT:    vpermilpd {{.*#+}} xmm4 = xmm1[1,0]
+; AVX-NEXT:    vmaxss %xmm3, %xmm4, %xmm5
+; AVX-NEXT:    vcmpunordss %xmm3, %xmm3, %xmm3
+; AVX-NEXT:    vblendvps %xmm3, %xmm4, %xmm5, %xmm3
+; AVX-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3]
+; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; AVX-NEXT:    vmaxss %xmm0, %xmm1, %xmm3
+; AVX-NEXT:    vcmpunordss %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    vblendvps %xmm0, %xmm1, %xmm3, %xmm0
+; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[0]
 ; AVX-NEXT:    retq
 define <2 x float> @test_intrinsic_fmax_v2f32(<2 x float> %x, <2 x float> %y) {
   %z = call <2 x float> @llvm.maxnum.v2f32(<2 x float> %x, <2 x float> %y) readnone
   ret <2 x float> %z
 }
 
+; FIXME: This should use vector ops (maxps / cmpps).
+
 ; CHECK-LABEL: @test_intrinsic_fmax_v4f32
-; SSE:         movaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
-; SSE-NEXT:    callq fmaxf
-; SSE-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; SSE-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
-; SSE-NEXT:    callq fmaxf
-; SSE-NEXT:    unpcklps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload
-; SSE:         movaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
-; SSE-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
-; SSE-NEXT:    callq fmaxf
-; SSE-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; SSE-NEXT:    movapd {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
-; SSE-NEXT:    shufpd {{.*#+}} xmm0 = xmm0[1,0]
-; SSE-NEXT:    movapd {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
+; SSE:         movaps %xmm1, %xmm2
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[3,1,2,3]
+; SSE-NEXT:    movaps %xmm0, %xmm3
+; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
+; SSE-NEXT:    movaps %xmm3, %xmm4
+; SSE-NEXT:    cmpunordss %xmm4, %xmm4
+; SSE-NEXT:    movaps %xmm4, %xmm5
+; SSE-NEXT:    andps %xmm2, %xmm5
+; SSE-NEXT:    maxss %xmm3, %xmm2
+; SSE-NEXT:    andnps %xmm2, %xmm4
+; SSE-NEXT:    orps %xmm5, %xmm4
+; SSE-NEXT:    movaps %xmm1, %xmm2
+; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[1,1,2,3]
+; SSE-NEXT:    movaps %xmm0, %xmm5
+; SSE-NEXT:    shufps {{.*#+}} xmm5 = xmm5[1,1,2,3]
+; SSE-NEXT:    movaps %xmm5, %xmm3
+; SSE-NEXT:    cmpunordss %xmm3, %xmm3
+; SSE-NEXT:    movaps %xmm3, %xmm6
+; SSE-NEXT:    andps %xmm2, %xmm6
+; SSE-NEXT:    maxss %xmm5, %xmm2
+; SSE-NEXT:    andnps %xmm2, %xmm3
+; SSE-NEXT:    orps %xmm6, %xmm3
+; SSE-NEXT:    unpcklps {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
+; SSE-NEXT:    movaps %xmm0, %xmm2
+; SSE-NEXT:    cmpunordss %xmm2, %xmm2
+; SSE-NEXT:    movaps %xmm2, %xmm4
+; SSE-NEXT:    andps %xmm1, %xmm4
+; SSE-NEXT:    movaps %xmm1, %xmm5
+; SSE-NEXT:    maxss %xmm0, %xmm5
+; SSE-NEXT:    andnps %xmm5, %xmm2
+; SSE-NEXT:    orps %xmm4, %xmm2
 ; SSE-NEXT:    shufpd {{.*#+}} xmm1 = xmm1[1,0]
-; SSE-NEXT:    callq fmaxf
-; SSE-NEXT:    movaps (%rsp), %xmm1 # 16-byte Reload
-; SSE-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE-NEXT:    unpcklps {{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload
-; SSE:         movaps %xmm1, %xmm0
-; SSE-NEXT:    addq $72, %rsp
+; SSE-NEXT:    shufpd {{.*#+}} xmm0 = xmm0[1,0]
+; SSE-NEXT:    movapd %xmm0, %xmm4
+; SSE-NEXT:    cmpunordss %xmm4, %xmm4
+; SSE-NEXT:    movaps %xmm4, %xmm5
+; SSE-NEXT:    andps %xmm1, %xmm5
+; SSE-NEXT:    maxss %xmm0, %xmm1
+; SSE-NEXT:    andnps %xmm1, %xmm4
+; SSE-NEXT:    orps %xmm5, %xmm4
+; SSE-NEXT:    unpcklps {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
+; SSE-NEXT:    unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; SSE-NEXT:    movaps %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX:         vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
-; AVX-NEXT:    vmovaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
-; AVX-NEXT:    callq fmaxf
-; AVX-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT:    vmovshdup {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload
-; AVX:         vmovshdup {{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload
-; AVX:         callq fmaxf
-; AVX-NEXT:    vmovaps (%rsp), %xmm1 # 16-byte Reload
-; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
-; AVX-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT:    vpermilpd $1, {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload
-; AVX:         vpermilpd $1, {{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload
-; AVX:         callq fmaxf
-; AVX-NEXT:    vmovaps (%rsp), %xmm1 # 16-byte Reload
-; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
-; AVX-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT:    vpermilps $231, {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload
-; AVX:         vpermilps $231, {{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload
-; AVX:         callq fmaxf
-; AVX-NEXT:    vmovaps (%rsp), %xmm1 # 16-byte Reload
-; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
-; AVX-NEXT:    addq $56, %rsp
+; AVX:         vmaxss %xmm0, %xmm1, %xmm2
+; AVX-NEXT:    vcmpunordss %xmm0, %xmm0, %xmm3
+; AVX-NEXT:    vblendvps %xmm3, %xmm1, %xmm2, %xmm2
+; AVX-NEXT:    vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; AVX-NEXT:    vmovshdup {{.*#+}} xmm4 = xmm1[1,1,3,3]
+; AVX-NEXT:    vmaxss %xmm3, %xmm4, %xmm5
+; AVX-NEXT:    vcmpunordss %xmm3, %xmm3, %xmm3
+; AVX-NEXT:    vblendvps %xmm3, %xmm4, %xmm5, %xmm3
+; AVX-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3]
+; AVX-NEXT:    vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
+; AVX-NEXT:    vpermilpd {{.*#+}} xmm4 = xmm1[1,0]
+; AVX-NEXT:    vmaxss %xmm3, %xmm4, %xmm5
+; AVX-NEXT:    vcmpunordss %xmm3, %xmm3, %xmm3
+; AVX-NEXT:    vblendvps %xmm3, %xmm4, %xmm5, %xmm3
+; AVX-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3]
+; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; AVX-NEXT:    vmaxss %xmm0, %xmm1, %xmm3
+; AVX-NEXT:    vcmpunordss %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    vblendvps %xmm0, %xmm1, %xmm3, %xmm0
+; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[0]
 ; AVX-NEXT:    retq
 define <4 x float> @test_intrinsic_fmax_v4f32(<4 x float> %x, <4 x float> %y) {
   %z = call <4 x float> @llvm.maxnum.v4f32(<4 x float> %x, <4 x float> %y) readnone
   ret <4 x float> %z
 }
 
+; FIXME: Vector of doubles should be inlined similarly to vector of floats.
+
 ; CHECK-LABEL: @test_intrinsic_fmax_v2f64
 ; CHECK: callq fmax
 ; CHECK: callq fmax
@@ -181,6 +255,8 @@ define <2 x double> @test_intrinsic_fmax
   ret <2 x double> %z
 }
 
+; FIXME: Vector of doubles should be inlined similarly to vector of floats.
+
 ; CHECK-LABEL: @test_intrinsic_fmax_v4f64
 ; CHECK: callq fmax
 ; CHECK: callq fmax
@@ -191,6 +267,8 @@ define <4 x double> @test_intrinsic_fmax
   ret <4 x double> %z
 }
 
+; FIXME: Vector of doubles should be inlined similarly to vector of floats.
+
 ; CHECK-LABEL: @test_intrinsic_fmax_v8f64
 ; CHECK: callq fmax
 ; CHECK: callq fmax




More information about the llvm-commits mailing list