[llvm] 056d31d - [ExpandReductions] fix FMF requirement for fmin/fmax

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Thu Feb 4 10:36:31 PST 2021


Author: Sanjay Patel
Date: 2021-02-04T13:32:08-05:00
New Revision: 056d31dd2a043ecc2baf22b708c8dd95f7c2df35

URL: https://github.com/llvm/llvm-project/commit/056d31dd2a043ecc2baf22b708c8dd95f7c2df35
DIFF: https://github.com/llvm/llvm-project/commit/056d31dd2a043ecc2baf22b708c8dd95f7c2df35.diff

LOG: [ExpandReductions] fix FMF requirement for fmin/fmax

The upstream callers (the vectorizers) were fixed with:
bbed5f2f8a04 ( D95690 )
77adbe6a8c71

We should remove this pass entirely now that reduction
legalization/lowering is expected to work just as well,
but we need to confirm that the shuffle ops do not
regress (for x86 in particular).

This should be the last step needed to close:
https://llvm.org/PR23116

Added: 
    

Modified: 
    llvm/lib/CodeGen/ExpandReductions.cpp
    llvm/test/CodeGen/X86/vector-reduce-fmax-nnan.ll
    llvm/test/CodeGen/X86/vector-reduce-fmin-nnan.ll
    llvm/test/Transforms/PhaseOrdering/X86/vector-reductions-expanded.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/ExpandReductions.cpp b/llvm/lib/CodeGen/ExpandReductions.cpp
index a4c9f02dc64d..2bcaf750911b 100644
--- a/llvm/lib/CodeGen/ExpandReductions.cpp
+++ b/llvm/lib/CodeGen/ExpandReductions.cpp
@@ -154,13 +154,12 @@ bool expandReductions(Function &F, const TargetTransformInfo *TTI) {
     }
     case Intrinsic::vector_reduce_fmax:
     case Intrinsic::vector_reduce_fmin: {
-      // FIXME: We only expand 'fast' reductions here because the underlying
-      //        code in createMinMaxOp() assumes that comparisons use 'fast'
-      //        semantics.
+      // We require "nnan" to use a shuffle reduction; "nsz" is implied by the
+      // semantics of the reduction.
       Value *Vec = II->getArgOperand(0);
       if (!isPowerOf2_32(
               cast<FixedVectorType>(Vec->getType())->getNumElements()) ||
-          !FMF.isFast())
+          !FMF.noNaNs())
         continue;
 
       Rdx = getShuffleReduction(Builder, Vec, getOpcode(ID), RK);

diff  --git a/llvm/test/CodeGen/X86/vector-reduce-fmax-nnan.ll b/llvm/test/CodeGen/X86/vector-reduce-fmax-nnan.ll
index 167248181ecb..d576fc074775 100644
--- a/llvm/test/CodeGen/X86/vector-reduce-fmax-nnan.ll
+++ b/llvm/test/CodeGen/X86/vector-reduce-fmax-nnan.ll
@@ -51,45 +51,35 @@ define float @test_v4f32(<4 x float> %a0) {
 ; SSE2-LABEL: test_v4f32:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movaps %xmm0, %xmm1
-; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[3,3],xmm0[3,3]
-; SSE2-NEXT:    movaps %xmm0, %xmm2
-; SSE2-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
-; SSE2-NEXT:    movaps %xmm0, %xmm3
-; SSE2-NEXT:    shufps {{.*#+}} xmm3 = xmm3[1,1],xmm0[1,1]
-; SSE2-NEXT:    maxss %xmm3, %xmm0
-; SSE2-NEXT:    maxss %xmm2, %xmm0
+; SSE2-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE2-NEXT:    maxps %xmm1, %xmm0
+; SSE2-NEXT:    movaps %xmm0, %xmm1
+; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1]
 ; SSE2-NEXT:    maxss %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: test_v4f32:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movaps %xmm0, %xmm1
-; SSE41-NEXT:    shufps {{.*#+}} xmm1 = xmm1[3,3],xmm0[3,3]
-; SSE41-NEXT:    movaps %xmm0, %xmm2
-; SSE41-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
-; SSE41-NEXT:    movshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT:    maxss %xmm3, %xmm0
-; SSE41-NEXT:    maxss %xmm2, %xmm0
+; SSE41-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE41-NEXT:    maxps %xmm1, %xmm0
+; SSE41-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
 ; SSE41-NEXT:    maxss %xmm1, %xmm0
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: test_v4f32:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[3,3,3,3]
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
-; AVX-NEXT:    vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; AVX-NEXT:    vmaxss %xmm3, %xmm0, %xmm0
-; AVX-NEXT:    vmaxss %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-NEXT:    vmaxps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
 ; AVX-NEXT:    vmaxss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v4f32:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[3,3,3,3]
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
-; AVX512-NEXT:    vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; AVX512-NEXT:    vmaxss %xmm3, %xmm0, %xmm0
-; AVX512-NEXT:    vmaxss %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX512-NEXT:    vmaxps %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
 ; AVX512-NEXT:    vmaxss %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    retq
   %1 = call nnan float @llvm.vector.reduce.fmax.v4f32(<4 x float> %a0)
@@ -100,67 +90,43 @@ define float @test_v8f32(<8 x float> %a0) {
 ; SSE2-LABEL: test_v8f32:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    maxps %xmm1, %xmm0
-; SSE2-NEXT:    movaps %xmm0, %xmm2
-; SSE2-NEXT:    shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[1,1]
 ; SSE2-NEXT:    movaps %xmm0, %xmm1
-; SSE2-NEXT:    maxss %xmm2, %xmm1
-; SSE2-NEXT:    movaps %xmm0, %xmm2
-; SSE2-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
-; SSE2-NEXT:    maxss %xmm2, %xmm1
-; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; SSE2-NEXT:    maxss %xmm0, %xmm1
-; SSE2-NEXT:    movaps %xmm1, %xmm0
+; SSE2-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE2-NEXT:    maxps %xmm1, %xmm0
+; SSE2-NEXT:    movaps %xmm0, %xmm1
+; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1]
+; SSE2-NEXT:    maxss %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: test_v8f32:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    maxps %xmm1, %xmm0
-; SSE41-NEXT:    movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
 ; SSE41-NEXT:    movaps %xmm0, %xmm1
-; SSE41-NEXT:    maxss %xmm2, %xmm1
-; SSE41-NEXT:    movaps %xmm0, %xmm2
-; SSE41-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
-; SSE41-NEXT:    maxss %xmm2, %xmm1
-; SSE41-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; SSE41-NEXT:    maxss %xmm0, %xmm1
-; SSE41-NEXT:    movaps %xmm1, %xmm0
+; SSE41-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE41-NEXT:    maxps %xmm1, %xmm0
+; SSE41-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE41-NEXT:    maxss %xmm1, %xmm0
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: test_v8f32:
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX-NEXT:    vpermilps {{.*#+}} xmm2 = xmm1[3,3,3,3]
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm3 = xmm1[1,0]
-; AVX-NEXT:    vmovshdup {{.*#+}} xmm4 = xmm1[1,1,3,3]
-; AVX-NEXT:    vpermilps {{.*#+}} xmm5 = xmm0[3,3,3,3]
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm6 = xmm0[1,0]
-; AVX-NEXT:    vmovshdup {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; AVX-NEXT:    vmaxss %xmm7, %xmm0, %xmm0
-; AVX-NEXT:    vmaxss %xmm6, %xmm0, %xmm0
-; AVX-NEXT:    vmaxss %xmm5, %xmm0, %xmm0
+; AVX-NEXT:    vmaxps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-NEXT:    vmaxps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
 ; AVX-NEXT:    vmaxss %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vmaxss %xmm4, %xmm0, %xmm0
-; AVX-NEXT:    vmaxss %xmm3, %xmm0, %xmm0
-; AVX-NEXT:    vmaxss %xmm2, %xmm0, %xmm0
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v8f32:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX512-NEXT:    vpermilps {{.*#+}} xmm2 = xmm1[3,3,3,3]
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm3 = xmm1[1,0]
-; AVX512-NEXT:    vmovshdup {{.*#+}} xmm4 = xmm1[1,1,3,3]
-; AVX512-NEXT:    vpermilps {{.*#+}} xmm5 = xmm0[3,3,3,3]
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm6 = xmm0[1,0]
-; AVX512-NEXT:    vmovshdup {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; AVX512-NEXT:    vmaxss %xmm7, %xmm0, %xmm0
-; AVX512-NEXT:    vmaxss %xmm6, %xmm0, %xmm0
-; AVX512-NEXT:    vmaxss %xmm5, %xmm0, %xmm0
+; AVX512-NEXT:    vmaxps %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX512-NEXT:    vmaxps %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
 ; AVX512-NEXT:    vmaxss %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vmaxss %xmm4, %xmm0, %xmm0
-; AVX512-NEXT:    vmaxss %xmm3, %xmm0, %xmm0
-; AVX512-NEXT:    vmaxss %xmm2, %xmm0, %xmm0
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
   %1 = call nnan float @llvm.vector.reduce.fmax.v8f32(<8 x float> %a0)
@@ -173,16 +139,12 @@ define float @test_v16f32(<16 x float> %a0) {
 ; SSE2-NEXT:    maxps %xmm3, %xmm1
 ; SSE2-NEXT:    maxps %xmm2, %xmm0
 ; SSE2-NEXT:    maxps %xmm1, %xmm0
-; SSE2-NEXT:    movaps %xmm0, %xmm2
-; SSE2-NEXT:    shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[1,1]
 ; SSE2-NEXT:    movaps %xmm0, %xmm1
-; SSE2-NEXT:    maxss %xmm2, %xmm1
-; SSE2-NEXT:    movaps %xmm0, %xmm2
-; SSE2-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
-; SSE2-NEXT:    maxss %xmm2, %xmm1
-; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; SSE2-NEXT:    maxss %xmm0, %xmm1
-; SSE2-NEXT:    movaps %xmm1, %xmm0
+; SSE2-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE2-NEXT:    maxps %xmm1, %xmm0
+; SSE2-NEXT:    movaps %xmm0, %xmm1
+; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1]
+; SSE2-NEXT:    maxss %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: test_v16f32:
@@ -190,69 +152,35 @@ define float @test_v16f32(<16 x float> %a0) {
 ; SSE41-NEXT:    maxps %xmm3, %xmm1
 ; SSE41-NEXT:    maxps %xmm2, %xmm0
 ; SSE41-NEXT:    maxps %xmm1, %xmm0
-; SSE41-NEXT:    movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
 ; SSE41-NEXT:    movaps %xmm0, %xmm1
-; SSE41-NEXT:    maxss %xmm2, %xmm1
-; SSE41-NEXT:    movaps %xmm0, %xmm2
-; SSE41-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
-; SSE41-NEXT:    maxss %xmm2, %xmm1
-; SSE41-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; SSE41-NEXT:    maxss %xmm0, %xmm1
-; SSE41-NEXT:    movaps %xmm1, %xmm0
+; SSE41-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE41-NEXT:    maxps %xmm1, %xmm0
+; SSE41-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE41-NEXT:    maxss %xmm1, %xmm0
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: test_v16f32:
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    vmaxps %ymm1, %ymm0, %ymm0
+; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX-NEXT:    vmaxps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-NEXT:    vmaxps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX-NEXT:    vmaxss %xmm1, %xmm0, %xmm1
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
-; AVX-NEXT:    vmaxss %xmm2, %xmm1, %xmm1
-; AVX-NEXT:    vpermilps {{.*#+}} xmm2 = xmm0[3,3,3,3]
-; AVX-NEXT:    vmaxss %xmm2, %xmm1, %xmm1
-; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; AVX-NEXT:    vmaxss %xmm0, %xmm1, %xmm1
-; AVX-NEXT:    vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; AVX-NEXT:    vmaxss %xmm2, %xmm1, %xmm1
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
-; AVX-NEXT:    vmaxss %xmm2, %xmm1, %xmm1
-; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; AVX-NEXT:    vmaxss %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vmaxss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v16f32:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vextractf32x4 $3, %zmm0, %xmm1
-; AVX512-NEXT:    vpermilps {{.*#+}} xmm8 = xmm1[3,3,3,3]
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm9 = xmm1[1,0]
-; AVX512-NEXT:    vmovshdup {{.*#+}} xmm10 = xmm1[1,1,3,3]
-; AVX512-NEXT:    vextractf32x4 $2, %zmm0, %xmm5
-; AVX512-NEXT:    vpermilps {{.*#+}} xmm11 = xmm5[3,3,3,3]
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm12 = xmm5[1,0]
-; AVX512-NEXT:    vmovshdup {{.*#+}} xmm13 = xmm5[1,1,3,3]
-; AVX512-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX512-NEXT:    vpermilps {{.*#+}} xmm14 = xmm3[3,3,3,3]
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm15 = xmm3[1,0]
-; AVX512-NEXT:    vmovshdup {{.*#+}} xmm7 = xmm3[1,1,3,3]
-; AVX512-NEXT:    vpermilps {{.*#+}} xmm2 = xmm0[3,3,3,3]
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm4 = xmm0[1,0]
-; AVX512-NEXT:    vmovshdup {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; AVX512-NEXT:    vmaxss %xmm6, %xmm0, %xmm0
-; AVX512-NEXT:    vmaxss %xmm4, %xmm0, %xmm0
-; AVX512-NEXT:    vmaxss %xmm2, %xmm0, %xmm0
-; AVX512-NEXT:    vmaxss %xmm3, %xmm0, %xmm0
-; AVX512-NEXT:    vmaxss %xmm7, %xmm0, %xmm0
-; AVX512-NEXT:    vmaxss %xmm15, %xmm0, %xmm0
-; AVX512-NEXT:    vmaxss %xmm14, %xmm0, %xmm0
-; AVX512-NEXT:    vmaxss %xmm5, %xmm0, %xmm0
-; AVX512-NEXT:    vmaxss %xmm13, %xmm0, %xmm0
-; AVX512-NEXT:    vmaxss %xmm12, %xmm0, %xmm0
-; AVX512-NEXT:    vmaxss %xmm11, %xmm0, %xmm0
+; AVX512-NEXT:    vextractf64x4 $1, %zmm0, %ymm1
+; AVX512-NEXT:    vmaxps %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX512-NEXT:    vmaxps %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX512-NEXT:    vmaxps %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
 ; AVX512-NEXT:    vmaxss %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vmaxss %xmm10, %xmm0, %xmm0
-; AVX512-NEXT:    vmaxss %xmm9, %xmm0, %xmm0
-; AVX512-NEXT:    vmaxss %xmm8, %xmm0, %xmm0
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
   %1 = call nnan float @llvm.vector.reduce.fmax.v16f32(<16 x float> %a0)
@@ -340,22 +268,18 @@ define double @test_v4f64(<4 x double> %a0) {
 ; AVX-LABEL: test_v4f64:
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
-; AVX-NEXT:    vmaxsd %xmm3, %xmm0, %xmm0
+; AVX-NEXT:    vmaxpd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
 ; AVX-NEXT:    vmaxsd %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vmaxsd %xmm2, %xmm0, %xmm0
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v4f64:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
-; AVX512-NEXT:    vmaxsd %xmm3, %xmm0, %xmm0
+; AVX512-NEXT:    vmaxpd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
 ; AVX512-NEXT:    vmaxsd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vmaxsd %xmm2, %xmm0, %xmm0
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
   %1 = call nnan double @llvm.vector.reduce.fmax.v4f64(<4 x double> %a0)
@@ -376,31 +300,21 @@ define double @test_v8f64(<8 x double> %a0) {
 ; AVX-LABEL: test_v8f64:
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    vmaxpd %ymm1, %ymm0, %ymm0
+; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX-NEXT:    vmaxpd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX-NEXT:    vmaxsd %xmm1, %xmm0, %xmm1
-; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; AVX-NEXT:    vmaxsd %xmm0, %xmm1, %xmm1
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
-; AVX-NEXT:    vmaxsd %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vmaxsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v8f64:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vextractf32x4 $3, %zmm0, %xmm1
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
-; AVX512-NEXT:    vextractf32x4 $2, %zmm0, %xmm3
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm4 = xmm3[1,0]
-; AVX512-NEXT:    vextractf128 $1, %ymm0, %xmm5
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm6 = xmm5[1,0]
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm7 = xmm0[1,0]
-; AVX512-NEXT:    vmaxsd %xmm7, %xmm0, %xmm0
-; AVX512-NEXT:    vmaxsd %xmm5, %xmm0, %xmm0
-; AVX512-NEXT:    vmaxsd %xmm6, %xmm0, %xmm0
-; AVX512-NEXT:    vmaxsd %xmm3, %xmm0, %xmm0
-; AVX512-NEXT:    vmaxsd %xmm4, %xmm0, %xmm0
+; AVX512-NEXT:    vextractf64x4 $1, %zmm0, %ymm1
+; AVX512-NEXT:    vmaxpd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX512-NEXT:    vmaxpd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
 ; AVX512-NEXT:    vmaxsd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vmaxsd %xmm2, %xmm0, %xmm0
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
   %1 = call nnan double @llvm.vector.reduce.fmax.v8f64(<8 x double> %a0)
@@ -410,12 +324,12 @@ define double @test_v8f64(<8 x double> %a0) {
 define double @test_v16f64(<16 x double> %a0) {
 ; SSE-LABEL: test_v16f64:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    maxpd %xmm7, %xmm3
-; SSE-NEXT:    maxpd %xmm5, %xmm1
-; SSE-NEXT:    maxpd %xmm3, %xmm1
 ; SSE-NEXT:    maxpd %xmm6, %xmm2
 ; SSE-NEXT:    maxpd %xmm4, %xmm0
 ; SSE-NEXT:    maxpd %xmm2, %xmm0
+; SSE-NEXT:    maxpd %xmm7, %xmm3
+; SSE-NEXT:    maxpd %xmm5, %xmm1
+; SSE-NEXT:    maxpd %xmm3, %xmm1
 ; SSE-NEXT:    maxpd %xmm1, %xmm0
 ; SSE-NEXT:    movapd %xmm0, %xmm1
 ; SSE-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
@@ -427,32 +341,22 @@ define double @test_v16f64(<16 x double> %a0) {
 ; AVX-NEXT:    vmaxpd %ymm3, %ymm1, %ymm1
 ; AVX-NEXT:    vmaxpd %ymm2, %ymm0, %ymm0
 ; AVX-NEXT:    vmaxpd %ymm1, %ymm0, %ymm0
+; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX-NEXT:    vmaxpd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX-NEXT:    vmaxsd %xmm1, %xmm0, %xmm1
-; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; AVX-NEXT:    vmaxsd %xmm0, %xmm1, %xmm1
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
-; AVX-NEXT:    vmaxsd %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vmaxsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v16f64:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmaxpd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    vextractf64x4 $1, %zmm0, %ymm1
+; AVX512-NEXT:    vmaxpd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX512-NEXT:    vmaxpd %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX512-NEXT:    vmaxsd %xmm1, %xmm0, %xmm1
-; AVX512-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; AVX512-NEXT:    vmaxsd %xmm2, %xmm1, %xmm1
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
-; AVX512-NEXT:    vmaxsd %xmm2, %xmm1, %xmm1
-; AVX512-NEXT:    vextractf32x4 $2, %zmm0, %xmm2
-; AVX512-NEXT:    vmaxsd %xmm2, %xmm1, %xmm1
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
-; AVX512-NEXT:    vmaxsd %xmm2, %xmm1, %xmm1
-; AVX512-NEXT:    vextractf32x4 $3, %zmm0, %xmm0
-; AVX512-NEXT:    vmaxsd %xmm0, %xmm1, %xmm1
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
-; AVX512-NEXT:    vmaxsd %xmm0, %xmm1, %xmm0
+; AVX512-NEXT:    vmaxsd %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
   %1 = call nnan double @llvm.vector.reduce.fmax.v16f64(<16 x double> %a0)
@@ -462,61 +366,69 @@ define double @test_v16f64(<16 x double> %a0) {
 define half @test_v2f16(<2 x half> %a0) nounwind {
 ; SSE-LABEL: test_v2f16:
 ; SSE:       # %bb.0:
+; SSE-NEXT:    pushq %rbp
+; SSE-NEXT:    pushq %r14
 ; SSE-NEXT:    pushq %rbx
 ; SSE-NEXT:    subq $16, %rsp
-; SSE-NEXT:    movl %edi, %ebx
-; SSE-NEXT:    movzwl %si, %edi
+; SSE-NEXT:    movl %esi, %ebx
+; SSE-NEXT:    movl %edi, %r14d
+; SSE-NEXT:    movzwl %bx, %ebp
+; SSE-NEXT:    movl %ebp, %edi
 ; SSE-NEXT:    callq __gnu_h2f_ieee at PLT
-; SSE-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; SSE-NEXT:    movzwl %bx, %edi
+; SSE-NEXT:    movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; SSE-NEXT:    movzwl %r14w, %edi
 ; SSE-NEXT:    callq __gnu_h2f_ieee at PLT
-; SSE-NEXT:    movaps %xmm0, %xmm1
-; SSE-NEXT:    cmpunordss %xmm0, %xmm1
-; SSE-NEXT:    movaps %xmm1, %xmm2
-; SSE-NEXT:    movaps (%rsp), %xmm3 # 16-byte Reload
-; SSE-NEXT:    andps %xmm3, %xmm2
-; SSE-NEXT:    maxss %xmm0, %xmm3
-; SSE-NEXT:    andnps %xmm3, %xmm1
-; SSE-NEXT:    orps %xmm2, %xmm1
-; SSE-NEXT:    movaps %xmm1, %xmm0
-; SSE-NEXT:    callq __gnu_f2h_ieee at PLT
+; SSE-NEXT:    ucomiss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; SSE-NEXT:    movw %bp, {{[0-9]+}}(%rsp)
+; SSE-NEXT:    cmoval %r14d, %ebx
+; SSE-NEXT:    movw %bx, (%rsp)
+; SSE-NEXT:    movl (%rsp), %eax
+; SSE-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE-NEXT:    addq $16, %rsp
 ; SSE-NEXT:    popq %rbx
+; SSE-NEXT:    popq %r14
+; SSE-NEXT:    popq %rbp
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_v2f16:
 ; AVX:       # %bb.0:
+; AVX-NEXT:    pushq %rbp
+; AVX-NEXT:    pushq %r14
 ; AVX-NEXT:    pushq %rbx
 ; AVX-NEXT:    subq $16, %rsp
 ; AVX-NEXT:    movl %esi, %ebx
-; AVX-NEXT:    movzwl %di, %edi
+; AVX-NEXT:    movl %edi, %r14d
+; AVX-NEXT:    movzwl %bx, %ebp
+; AVX-NEXT:    movl %ebp, %edi
 ; AVX-NEXT:    callq __gnu_h2f_ieee at PLT
 ; AVX-NEXT:    vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
-; AVX-NEXT:    movzwl %bx, %edi
+; AVX-NEXT:    movzwl %r14w, %edi
 ; AVX-NEXT:    callq __gnu_h2f_ieee at PLT
-; AVX-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 4-byte Reload
-; AVX-NEXT:    # xmm2 = mem[0],zero,zero,zero
-; AVX-NEXT:    vmaxss %xmm2, %xmm0, %xmm1
-; AVX-NEXT:    vcmpunordss %xmm2, %xmm2, %xmm2
-; AVX-NEXT:    vblendvps %xmm2, %xmm0, %xmm1, %xmm0
-; AVX-NEXT:    callq __gnu_f2h_ieee at PLT
+; AVX-NEXT:    vucomiss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; AVX-NEXT:    movw %bp, {{[0-9]+}}(%rsp)
+; AVX-NEXT:    cmoval %r14d, %ebx
+; AVX-NEXT:    movw %bx, (%rsp)
+; AVX-NEXT:    movl (%rsp), %eax
+; AVX-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX-NEXT:    addq $16, %rsp
 ; AVX-NEXT:    popq %rbx
+; AVX-NEXT:    popq %r14
+; AVX-NEXT:    popq %rbp
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v2f16:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    movzwl %di, %eax
+; AVX512-NEXT:    movzwl %si, %eax
 ; AVX512-NEXT:    vmovd %eax, %xmm0
 ; AVX512-NEXT:    vcvtph2ps %xmm0, %xmm0
-; AVX512-NEXT:    movzwl %si, %eax
-; AVX512-NEXT:    vmovd %eax, %xmm1
+; AVX512-NEXT:    movzwl %di, %ecx
+; AVX512-NEXT:    vmovd %ecx, %xmm1
 ; AVX512-NEXT:    vcvtph2ps %xmm1, %xmm1
-; AVX512-NEXT:    vmaxss %xmm0, %xmm1, %xmm2
-; AVX512-NEXT:    vcmpunordss %xmm0, %xmm0, %k1
-; AVX512-NEXT:    vmovss %xmm1, %xmm2, %xmm2 {%k1}
-; AVX512-NEXT:    vcvtps2ph $4, %xmm2, %xmm0
-; AVX512-NEXT:    vmovd %xmm0, %eax
+; AVX512-NEXT:    vucomiss %xmm0, %xmm1
+; AVX512-NEXT:    movw %ax, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT:    cmoval %edi, %esi
+; AVX512-NEXT:    movw %si, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT:    movl -{{[0-9]+}}(%rsp), %eax
 ; AVX512-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX512-NEXT:    retq
   %1 = call nnan half @llvm.vector.reduce.fmax.v2f16(<2 x half> %a0)

diff  --git a/llvm/test/CodeGen/X86/vector-reduce-fmin-nnan.ll b/llvm/test/CodeGen/X86/vector-reduce-fmin-nnan.ll
index 99912900ef7f..0e96da412ef1 100644
--- a/llvm/test/CodeGen/X86/vector-reduce-fmin-nnan.ll
+++ b/llvm/test/CodeGen/X86/vector-reduce-fmin-nnan.ll
@@ -92,45 +92,35 @@ define float @test_v4f32(<4 x float> %a0) {
 ; SSE2-LABEL: test_v4f32:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movaps %xmm0, %xmm1
-; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[3,3],xmm0[3,3]
-; SSE2-NEXT:    movaps %xmm0, %xmm2
-; SSE2-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
-; SSE2-NEXT:    movaps %xmm0, %xmm3
-; SSE2-NEXT:    shufps {{.*#+}} xmm3 = xmm3[1,1],xmm0[1,1]
-; SSE2-NEXT:    minss %xmm3, %xmm0
-; SSE2-NEXT:    minss %xmm2, %xmm0
+; SSE2-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE2-NEXT:    minps %xmm1, %xmm0
+; SSE2-NEXT:    movaps %xmm0, %xmm1
+; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1]
 ; SSE2-NEXT:    minss %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: test_v4f32:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movaps %xmm0, %xmm1
-; SSE41-NEXT:    shufps {{.*#+}} xmm1 = xmm1[3,3],xmm0[3,3]
-; SSE41-NEXT:    movaps %xmm0, %xmm2
-; SSE41-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
-; SSE41-NEXT:    movshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT:    minss %xmm3, %xmm0
-; SSE41-NEXT:    minss %xmm2, %xmm0
+; SSE41-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE41-NEXT:    minps %xmm1, %xmm0
+; SSE41-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
 ; SSE41-NEXT:    minss %xmm1, %xmm0
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: test_v4f32:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[3,3,3,3]
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
-; AVX-NEXT:    vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; AVX-NEXT:    vminss %xmm3, %xmm0, %xmm0
-; AVX-NEXT:    vminss %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-NEXT:    vminps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
 ; AVX-NEXT:    vminss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v4f32:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[3,3,3,3]
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
-; AVX512-NEXT:    vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; AVX512-NEXT:    vminss %xmm3, %xmm0, %xmm0
-; AVX512-NEXT:    vminss %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX512-NEXT:    vminps %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
 ; AVX512-NEXT:    vminss %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    retq
   %1 = call nnan float @llvm.vector.reduce.fmin.v4f32(<4 x float> %a0)
@@ -141,67 +131,43 @@ define float @test_v8f32(<8 x float> %a0) {
 ; SSE2-LABEL: test_v8f32:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    minps %xmm1, %xmm0
-; SSE2-NEXT:    movaps %xmm0, %xmm2
-; SSE2-NEXT:    shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[1,1]
 ; SSE2-NEXT:    movaps %xmm0, %xmm1
-; SSE2-NEXT:    minss %xmm2, %xmm1
-; SSE2-NEXT:    movaps %xmm0, %xmm2
-; SSE2-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
-; SSE2-NEXT:    minss %xmm2, %xmm1
-; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; SSE2-NEXT:    minss %xmm0, %xmm1
-; SSE2-NEXT:    movaps %xmm1, %xmm0
+; SSE2-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE2-NEXT:    minps %xmm1, %xmm0
+; SSE2-NEXT:    movaps %xmm0, %xmm1
+; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1]
+; SSE2-NEXT:    minss %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: test_v8f32:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    minps %xmm1, %xmm0
-; SSE41-NEXT:    movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
 ; SSE41-NEXT:    movaps %xmm0, %xmm1
-; SSE41-NEXT:    minss %xmm2, %xmm1
-; SSE41-NEXT:    movaps %xmm0, %xmm2
-; SSE41-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
-; SSE41-NEXT:    minss %xmm2, %xmm1
-; SSE41-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; SSE41-NEXT:    minss %xmm0, %xmm1
-; SSE41-NEXT:    movaps %xmm1, %xmm0
+; SSE41-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE41-NEXT:    minps %xmm1, %xmm0
+; SSE41-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE41-NEXT:    minss %xmm1, %xmm0
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: test_v8f32:
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX-NEXT:    vpermilps {{.*#+}} xmm2 = xmm1[3,3,3,3]
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm3 = xmm1[1,0]
-; AVX-NEXT:    vmovshdup {{.*#+}} xmm4 = xmm1[1,1,3,3]
-; AVX-NEXT:    vpermilps {{.*#+}} xmm5 = xmm0[3,3,3,3]
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm6 = xmm0[1,0]
-; AVX-NEXT:    vmovshdup {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; AVX-NEXT:    vminss %xmm7, %xmm0, %xmm0
-; AVX-NEXT:    vminss %xmm6, %xmm0, %xmm0
-; AVX-NEXT:    vminss %xmm5, %xmm0, %xmm0
+; AVX-NEXT:    vminps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-NEXT:    vminps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
 ; AVX-NEXT:    vminss %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vminss %xmm4, %xmm0, %xmm0
-; AVX-NEXT:    vminss %xmm3, %xmm0, %xmm0
-; AVX-NEXT:    vminss %xmm2, %xmm0, %xmm0
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v8f32:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX512-NEXT:    vpermilps {{.*#+}} xmm2 = xmm1[3,3,3,3]
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm3 = xmm1[1,0]
-; AVX512-NEXT:    vmovshdup {{.*#+}} xmm4 = xmm1[1,1,3,3]
-; AVX512-NEXT:    vpermilps {{.*#+}} xmm5 = xmm0[3,3,3,3]
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm6 = xmm0[1,0]
-; AVX512-NEXT:    vmovshdup {{.*#+}} xmm7 = xmm0[1,1,3,3]
-; AVX512-NEXT:    vminss %xmm7, %xmm0, %xmm0
-; AVX512-NEXT:    vminss %xmm6, %xmm0, %xmm0
-; AVX512-NEXT:    vminss %xmm5, %xmm0, %xmm0
+; AVX512-NEXT:    vminps %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX512-NEXT:    vminps %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
 ; AVX512-NEXT:    vminss %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vminss %xmm4, %xmm0, %xmm0
-; AVX512-NEXT:    vminss %xmm3, %xmm0, %xmm0
-; AVX512-NEXT:    vminss %xmm2, %xmm0, %xmm0
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
   %1 = call nnan float @llvm.vector.reduce.fmin.v8f32(<8 x float> %a0)
@@ -214,16 +180,12 @@ define float @test_v16f32(<16 x float> %a0) {
 ; SSE2-NEXT:    minps %xmm3, %xmm1
 ; SSE2-NEXT:    minps %xmm2, %xmm0
 ; SSE2-NEXT:    minps %xmm1, %xmm0
-; SSE2-NEXT:    movaps %xmm0, %xmm2
-; SSE2-NEXT:    shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[1,1]
 ; SSE2-NEXT:    movaps %xmm0, %xmm1
-; SSE2-NEXT:    minss %xmm2, %xmm1
-; SSE2-NEXT:    movaps %xmm0, %xmm2
-; SSE2-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
-; SSE2-NEXT:    minss %xmm2, %xmm1
-; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; SSE2-NEXT:    minss %xmm0, %xmm1
-; SSE2-NEXT:    movaps %xmm1, %xmm0
+; SSE2-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE2-NEXT:    minps %xmm1, %xmm0
+; SSE2-NEXT:    movaps %xmm0, %xmm1
+; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1]
+; SSE2-NEXT:    minss %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: test_v16f32:
@@ -231,69 +193,35 @@ define float @test_v16f32(<16 x float> %a0) {
 ; SSE41-NEXT:    minps %xmm3, %xmm1
 ; SSE41-NEXT:    minps %xmm2, %xmm0
 ; SSE41-NEXT:    minps %xmm1, %xmm0
-; SSE41-NEXT:    movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
 ; SSE41-NEXT:    movaps %xmm0, %xmm1
-; SSE41-NEXT:    minss %xmm2, %xmm1
-; SSE41-NEXT:    movaps %xmm0, %xmm2
-; SSE41-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
-; SSE41-NEXT:    minss %xmm2, %xmm1
-; SSE41-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; SSE41-NEXT:    minss %xmm0, %xmm1
-; SSE41-NEXT:    movaps %xmm1, %xmm0
+; SSE41-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE41-NEXT:    minps %xmm1, %xmm0
+; SSE41-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE41-NEXT:    minss %xmm1, %xmm0
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: test_v16f32:
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    vminps %ymm1, %ymm0, %ymm0
+; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX-NEXT:    vminps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-NEXT:    vminps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX-NEXT:    vminss %xmm1, %xmm0, %xmm1
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
-; AVX-NEXT:    vminss %xmm2, %xmm1, %xmm1
-; AVX-NEXT:    vpermilps {{.*#+}} xmm2 = xmm0[3,3,3,3]
-; AVX-NEXT:    vminss %xmm2, %xmm1, %xmm1
-; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; AVX-NEXT:    vminss %xmm0, %xmm1, %xmm1
-; AVX-NEXT:    vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; AVX-NEXT:    vminss %xmm2, %xmm1, %xmm1
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
-; AVX-NEXT:    vminss %xmm2, %xmm1, %xmm1
-; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; AVX-NEXT:    vminss %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vminss %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v16f32:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vextractf32x4 $3, %zmm0, %xmm1
-; AVX512-NEXT:    vpermilps {{.*#+}} xmm8 = xmm1[3,3,3,3]
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm9 = xmm1[1,0]
-; AVX512-NEXT:    vmovshdup {{.*#+}} xmm10 = xmm1[1,1,3,3]
-; AVX512-NEXT:    vextractf32x4 $2, %zmm0, %xmm5
-; AVX512-NEXT:    vpermilps {{.*#+}} xmm11 = xmm5[3,3,3,3]
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm12 = xmm5[1,0]
-; AVX512-NEXT:    vmovshdup {{.*#+}} xmm13 = xmm5[1,1,3,3]
-; AVX512-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX512-NEXT:    vpermilps {{.*#+}} xmm14 = xmm3[3,3,3,3]
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm15 = xmm3[1,0]
-; AVX512-NEXT:    vmovshdup {{.*#+}} xmm7 = xmm3[1,1,3,3]
-; AVX512-NEXT:    vpermilps {{.*#+}} xmm2 = xmm0[3,3,3,3]
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm4 = xmm0[1,0]
-; AVX512-NEXT:    vmovshdup {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; AVX512-NEXT:    vminss %xmm6, %xmm0, %xmm0
-; AVX512-NEXT:    vminss %xmm4, %xmm0, %xmm0
-; AVX512-NEXT:    vminss %xmm2, %xmm0, %xmm0
-; AVX512-NEXT:    vminss %xmm3, %xmm0, %xmm0
-; AVX512-NEXT:    vminss %xmm7, %xmm0, %xmm0
-; AVX512-NEXT:    vminss %xmm15, %xmm0, %xmm0
-; AVX512-NEXT:    vminss %xmm14, %xmm0, %xmm0
-; AVX512-NEXT:    vminss %xmm5, %xmm0, %xmm0
-; AVX512-NEXT:    vminss %xmm13, %xmm0, %xmm0
-; AVX512-NEXT:    vminss %xmm12, %xmm0, %xmm0
-; AVX512-NEXT:    vminss %xmm11, %xmm0, %xmm0
+; AVX512-NEXT:    vextractf64x4 $1, %zmm0, %ymm1
+; AVX512-NEXT:    vminps %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX512-NEXT:    vminps %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX512-NEXT:    vminps %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
 ; AVX512-NEXT:    vminss %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vminss %xmm10, %xmm0, %xmm0
-; AVX512-NEXT:    vminss %xmm9, %xmm0, %xmm0
-; AVX512-NEXT:    vminss %xmm8, %xmm0, %xmm0
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
   %1 = call nnan float @llvm.vector.reduce.fmin.v16f32(<16 x float> %a0)
@@ -339,22 +267,18 @@ define double @test_v4f64(<4 x double> %a0) {
 ; AVX-LABEL: test_v4f64:
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
-; AVX-NEXT:    vminsd %xmm3, %xmm0, %xmm0
+; AVX-NEXT:    vminpd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
 ; AVX-NEXT:    vminsd %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vminsd %xmm2, %xmm0, %xmm0
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v4f64:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
-; AVX512-NEXT:    vminsd %xmm3, %xmm0, %xmm0
+; AVX512-NEXT:    vminpd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
 ; AVX512-NEXT:    vminsd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vminsd %xmm2, %xmm0, %xmm0
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
   %1 = call nnan double @llvm.vector.reduce.fmin.v4f64(<4 x double> %a0)
@@ -375,31 +299,21 @@ define double @test_v8f64(<8 x double> %a0) {
 ; AVX-LABEL: test_v8f64:
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    vminpd %ymm1, %ymm0, %ymm0
+; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX-NEXT:    vminpd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX-NEXT:    vminsd %xmm1, %xmm0, %xmm1
-; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; AVX-NEXT:    vminsd %xmm0, %xmm1, %xmm1
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
-; AVX-NEXT:    vminsd %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vminsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v8f64:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vextractf32x4 $3, %zmm0, %xmm1
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
-; AVX512-NEXT:    vextractf32x4 $2, %zmm0, %xmm3
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm4 = xmm3[1,0]
-; AVX512-NEXT:    vextractf128 $1, %ymm0, %xmm5
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm6 = xmm5[1,0]
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm7 = xmm0[1,0]
-; AVX512-NEXT:    vminsd %xmm7, %xmm0, %xmm0
-; AVX512-NEXT:    vminsd %xmm5, %xmm0, %xmm0
-; AVX512-NEXT:    vminsd %xmm6, %xmm0, %xmm0
-; AVX512-NEXT:    vminsd %xmm3, %xmm0, %xmm0
-; AVX512-NEXT:    vminsd %xmm4, %xmm0, %xmm0
+; AVX512-NEXT:    vextractf64x4 $1, %zmm0, %ymm1
+; AVX512-NEXT:    vminpd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX512-NEXT:    vminpd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
 ; AVX512-NEXT:    vminsd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vminsd %xmm2, %xmm0, %xmm0
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
   %1 = call nnan double @llvm.vector.reduce.fmin.v8f64(<8 x double> %a0)
@@ -409,12 +323,12 @@ define double @test_v8f64(<8 x double> %a0) {
 define double @test_v16f64(<16 x double> %a0) {
 ; SSE-LABEL: test_v16f64:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    minpd %xmm7, %xmm3
-; SSE-NEXT:    minpd %xmm5, %xmm1
-; SSE-NEXT:    minpd %xmm3, %xmm1
 ; SSE-NEXT:    minpd %xmm6, %xmm2
 ; SSE-NEXT:    minpd %xmm4, %xmm0
 ; SSE-NEXT:    minpd %xmm2, %xmm0
+; SSE-NEXT:    minpd %xmm7, %xmm3
+; SSE-NEXT:    minpd %xmm5, %xmm1
+; SSE-NEXT:    minpd %xmm3, %xmm1
 ; SSE-NEXT:    minpd %xmm1, %xmm0
 ; SSE-NEXT:    movapd %xmm0, %xmm1
 ; SSE-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
@@ -426,32 +340,22 @@ define double @test_v16f64(<16 x double> %a0) {
 ; AVX-NEXT:    vminpd %ymm3, %ymm1, %ymm1
 ; AVX-NEXT:    vminpd %ymm2, %ymm0, %ymm0
 ; AVX-NEXT:    vminpd %ymm1, %ymm0, %ymm0
+; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX-NEXT:    vminpd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX-NEXT:    vminsd %xmm1, %xmm0, %xmm1
-; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; AVX-NEXT:    vminsd %xmm0, %xmm1, %xmm1
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
-; AVX-NEXT:    vminsd %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vminsd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v16f64:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vminpd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    vextractf64x4 $1, %zmm0, %ymm1
+; AVX512-NEXT:    vminpd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX512-NEXT:    vminpd %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX512-NEXT:    vminsd %xmm1, %xmm0, %xmm1
-; AVX512-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; AVX512-NEXT:    vminsd %xmm2, %xmm1, %xmm1
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
-; AVX512-NEXT:    vminsd %xmm2, %xmm1, %xmm1
-; AVX512-NEXT:    vextractf32x4 $2, %zmm0, %xmm2
-; AVX512-NEXT:    vminsd %xmm2, %xmm1, %xmm1
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
-; AVX512-NEXT:    vminsd %xmm2, %xmm1, %xmm1
-; AVX512-NEXT:    vextractf32x4 $3, %zmm0, %xmm0
-; AVX512-NEXT:    vminsd %xmm0, %xmm1, %xmm1
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
-; AVX512-NEXT:    vminsd %xmm0, %xmm1, %xmm0
+; AVX512-NEXT:    vminsd %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
   %1 = call nnan double @llvm.vector.reduce.fmin.v16f64(<16 x double> %a0)
@@ -461,61 +365,69 @@ define double @test_v16f64(<16 x double> %a0) {
 define half @test_v2f16(<2 x half> %a0) nounwind {
 ; SSE-LABEL: test_v2f16:
 ; SSE:       # %bb.0:
+; SSE-NEXT:    pushq %rbp
+; SSE-NEXT:    pushq %r14
 ; SSE-NEXT:    pushq %rbx
 ; SSE-NEXT:    subq $16, %rsp
-; SSE-NEXT:    movl %edi, %ebx
-; SSE-NEXT:    movzwl %si, %edi
+; SSE-NEXT:    movl %esi, %ebx
+; SSE-NEXT:    movl %edi, %r14d
+; SSE-NEXT:    movzwl %bx, %ebp
+; SSE-NEXT:    movl %ebp, %edi
 ; SSE-NEXT:    callq __gnu_h2f_ieee at PLT
-; SSE-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; SSE-NEXT:    movzwl %bx, %edi
+; SSE-NEXT:    movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; SSE-NEXT:    movzwl %r14w, %edi
 ; SSE-NEXT:    callq __gnu_h2f_ieee at PLT
-; SSE-NEXT:    movaps %xmm0, %xmm1
-; SSE-NEXT:    cmpunordss %xmm0, %xmm1
-; SSE-NEXT:    movaps %xmm1, %xmm2
-; SSE-NEXT:    movaps (%rsp), %xmm3 # 16-byte Reload
-; SSE-NEXT:    andps %xmm3, %xmm2
-; SSE-NEXT:    minss %xmm0, %xmm3
-; SSE-NEXT:    andnps %xmm3, %xmm1
-; SSE-NEXT:    orps %xmm2, %xmm1
-; SSE-NEXT:    movaps %xmm1, %xmm0
-; SSE-NEXT:    callq __gnu_f2h_ieee at PLT
+; SSE-NEXT:    ucomiss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; SSE-NEXT:    movw %bp, {{[0-9]+}}(%rsp)
+; SSE-NEXT:    cmovbl %r14d, %ebx
+; SSE-NEXT:    movw %bx, (%rsp)
+; SSE-NEXT:    movl (%rsp), %eax
+; SSE-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE-NEXT:    addq $16, %rsp
 ; SSE-NEXT:    popq %rbx
+; SSE-NEXT:    popq %r14
+; SSE-NEXT:    popq %rbp
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_v2f16:
 ; AVX:       # %bb.0:
+; AVX-NEXT:    pushq %rbp
+; AVX-NEXT:    pushq %r14
 ; AVX-NEXT:    pushq %rbx
 ; AVX-NEXT:    subq $16, %rsp
 ; AVX-NEXT:    movl %esi, %ebx
-; AVX-NEXT:    movzwl %di, %edi
+; AVX-NEXT:    movl %edi, %r14d
+; AVX-NEXT:    movzwl %bx, %ebp
+; AVX-NEXT:    movl %ebp, %edi
 ; AVX-NEXT:    callq __gnu_h2f_ieee at PLT
 ; AVX-NEXT:    vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
-; AVX-NEXT:    movzwl %bx, %edi
+; AVX-NEXT:    movzwl %r14w, %edi
 ; AVX-NEXT:    callq __gnu_h2f_ieee at PLT
-; AVX-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 4-byte Reload
-; AVX-NEXT:    # xmm2 = mem[0],zero,zero,zero
-; AVX-NEXT:    vminss %xmm2, %xmm0, %xmm1
-; AVX-NEXT:    vcmpunordss %xmm2, %xmm2, %xmm2
-; AVX-NEXT:    vblendvps %xmm2, %xmm0, %xmm1, %xmm0
-; AVX-NEXT:    callq __gnu_f2h_ieee at PLT
+; AVX-NEXT:    vucomiss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; AVX-NEXT:    movw %bp, {{[0-9]+}}(%rsp)
+; AVX-NEXT:    cmovbl %r14d, %ebx
+; AVX-NEXT:    movw %bx, (%rsp)
+; AVX-NEXT:    movl (%rsp), %eax
+; AVX-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX-NEXT:    addq $16, %rsp
 ; AVX-NEXT:    popq %rbx
+; AVX-NEXT:    popq %r14
+; AVX-NEXT:    popq %rbp
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v2f16:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    movzwl %di, %eax
+; AVX512-NEXT:    movzwl %si, %eax
 ; AVX512-NEXT:    vmovd %eax, %xmm0
 ; AVX512-NEXT:    vcvtph2ps %xmm0, %xmm0
-; AVX512-NEXT:    movzwl %si, %eax
-; AVX512-NEXT:    vmovd %eax, %xmm1
+; AVX512-NEXT:    movzwl %di, %ecx
+; AVX512-NEXT:    vmovd %ecx, %xmm1
 ; AVX512-NEXT:    vcvtph2ps %xmm1, %xmm1
-; AVX512-NEXT:    vminss %xmm0, %xmm1, %xmm2
-; AVX512-NEXT:    vcmpunordss %xmm0, %xmm0, %k1
-; AVX512-NEXT:    vmovss %xmm1, %xmm2, %xmm2 {%k1}
-; AVX512-NEXT:    vcvtps2ph $4, %xmm2, %xmm0
-; AVX512-NEXT:    vmovd %xmm0, %eax
+; AVX512-NEXT:    vucomiss %xmm0, %xmm1
+; AVX512-NEXT:    movw %ax, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT:    cmovbl %edi, %esi
+; AVX512-NEXT:    movw %si, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT:    movl -{{[0-9]+}}(%rsp), %eax
 ; AVX512-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX512-NEXT:    retq
   %1 = call nnan half @llvm.vector.reduce.fmin.v2f16(<2 x half> %a0)

diff  --git a/llvm/test/Transforms/PhaseOrdering/X86/vector-reductions-expanded.ll b/llvm/test/Transforms/PhaseOrdering/X86/vector-reductions-expanded.ll
index cc517aa443e0..3aefdb2dd6b1 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/vector-reductions-expanded.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/vector-reductions-expanded.ll
@@ -411,7 +411,16 @@ define float @findMax(<8 x float>* byval(<8 x float>) align 16 %0) {
 ; CHECK-LABEL: @findMax(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[V:%.*]] = load <8 x float>, <8 x float>* [[TMP0:%.*]], align 16, [[TBAA0]]
-; CHECK-NEXT:    [[TMP1:%.*]] = call nnan ninf nsz float @llvm.vector.reduce.fmax.v8f32(<8 x float> [[V]])
+; CHECK-NEXT:    [[RDX_SHUF:%.*]] = shufflevector <8 x float> [[V]], <8 x float> poison, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT:    [[RDX_MINMAX_CMP:%.*]] = fcmp nnan ninf nsz ogt <8 x float> [[V]], [[RDX_SHUF]]
+; CHECK-NEXT:    [[RDX_MINMAX_SELECT:%.*]] = select nnan ninf nsz <8 x i1> [[RDX_MINMAX_CMP]], <8 x float> [[V]], <8 x float> [[RDX_SHUF]]
+; CHECK-NEXT:    [[RDX_SHUF8:%.*]] = shufflevector <8 x float> [[RDX_MINMAX_SELECT]], <8 x float> poison, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT:    [[RDX_MINMAX_CMP9:%.*]] = fcmp nnan ninf nsz ogt <8 x float> [[RDX_MINMAX_SELECT]], [[RDX_SHUF8]]
+; CHECK-NEXT:    [[RDX_MINMAX_SELECT10:%.*]] = select nnan ninf nsz <8 x i1> [[RDX_MINMAX_CMP9]], <8 x float> [[RDX_MINMAX_SELECT]], <8 x float> [[RDX_SHUF8]]
+; CHECK-NEXT:    [[RDX_SHUF11:%.*]] = shufflevector <8 x float> [[RDX_MINMAX_SELECT10]], <8 x float> poison, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT:    [[RDX_MINMAX_CMP12:%.*]] = fcmp nnan ninf nsz ogt <8 x float> [[RDX_MINMAX_SELECT10]], [[RDX_SHUF11]]
+; CHECK-NEXT:    [[RDX_MINMAX_SELECT13:%.*]] = select nnan ninf nsz <8 x i1> [[RDX_MINMAX_CMP12]], <8 x float> [[RDX_MINMAX_SELECT10]], <8 x float> [[RDX_SHUF11]]
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <8 x float> [[RDX_MINMAX_SELECT13]], i32 0
 ; CHECK-NEXT:    ret float [[TMP1]]
 ;
 entry:


        


More information about the llvm-commits mailing list