[llvm] [X86] Support lowering of FMINIMUMNUM/FMAXIMUMNUM (PR #121464)
Phoebe Wang via llvm-commits
llvm-commits at lists.llvm.org
Thu Jan 2 05:13:29 PST 2025
https://github.com/phoebewang updated https://github.com/llvm/llvm-project/pull/121464
>From d72d42b434eecacee3d8907056d13d9b1dfda77a Mon Sep 17 00:00:00 2001
From: "Wang, Phoebe" <phoebe.wang at intel.com>
Date: Thu, 2 Jan 2025 17:59:08 +0800
Subject: [PATCH 1/2] [X86] Support lowering of FMINIMUMNUM/FMAXIMUMNUM
---
.../SelectionDAG/LegalizeVectorOps.cpp | 2 +
.../SelectionDAG/LegalizeVectorTypes.cpp | 2 +
llvm/lib/Target/X86/X86ISelLowering.cpp | 40 +-
.../CodeGen/X86/fminimumnum-fmaximumnum.ll | 2553 +++++++++++++++++
4 files changed, 2592 insertions(+), 5 deletions(-)
create mode 100644 llvm/test/CodeGen/X86/fminimumnum-fmaximumnum.ll
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index db21e708970648..de1bec1c130d20 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -402,6 +402,8 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
case ISD::FMAXNUM_IEEE:
case ISD::FMINIMUM:
case ISD::FMAXIMUM:
+ case ISD::FMINIMUMNUM:
+ case ISD::FMAXIMUMNUM:
case ISD::FCOPYSIGN:
case ISD::FSQRT:
case ISD::FSIN:
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 107454a92e356c..780eba16c9c498 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -149,6 +149,8 @@ void DAGTypeLegalizer::ScalarizeVectorResult(SDNode *N, unsigned ResNo) {
case ISD::FMAXNUM_IEEE:
case ISD::FMINIMUM:
case ISD::FMAXIMUM:
+ case ISD::FMINIMUMNUM:
+ case ISD::FMAXIMUMNUM:
case ISD::FLDEXP:
case ISD::ABDS:
case ISD::ABDU:
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index a0514e93d6598b..219674b1e84131 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -623,6 +623,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::FMAXNUM, VT, Action);
setOperationAction(ISD::FMINIMUM, VT, Action);
setOperationAction(ISD::FMAXIMUM, VT, Action);
+ setOperationAction(ISD::FMINIMUMNUM, VT, Action);
+ setOperationAction(ISD::FMAXIMUMNUM, VT, Action);
setOperationAction(ISD::FSIN, VT, Action);
setOperationAction(ISD::FCOS, VT, Action);
setOperationAction(ISD::FSINCOS, VT, Action);
@@ -1066,6 +1068,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::FMAXIMUM, MVT::f32, Custom);
setOperationAction(ISD::FMINIMUM, MVT::f32, Custom);
+ setOperationAction(ISD::FMAXIMUMNUM, MVT::f32, Custom);
+ setOperationAction(ISD::FMINIMUMNUM, MVT::f32, Custom);
setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
setOperationAction(ISD::FABS, MVT::v4f32, Custom);
@@ -1108,6 +1112,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
for (auto VT : { MVT::f64, MVT::v4f32, MVT::v2f64 }) {
setOperationAction(ISD::FMAXIMUM, VT, Custom);
setOperationAction(ISD::FMINIMUM, VT, Custom);
+ setOperationAction(ISD::FMAXIMUMNUM, VT, Custom);
+ setOperationAction(ISD::FMINIMUMNUM, VT, Custom);
}
for (auto VT : { MVT::v2i8, MVT::v4i8, MVT::v8i8,
@@ -1473,6 +1479,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::FMAXIMUM, VT, Custom);
setOperationAction(ISD::FMINIMUM, VT, Custom);
+ setOperationAction(ISD::FMAXIMUMNUM, VT, Custom);
+ setOperationAction(ISD::FMINIMUMNUM, VT, Custom);
setOperationAction(ISD::FCANONICALIZE, VT, Custom);
}
@@ -1818,6 +1826,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
for (MVT VT : { MVT::v16f32, MVT::v8f64 }) {
setOperationAction(ISD::FMAXIMUM, VT, Custom);
setOperationAction(ISD::FMINIMUM, VT, Custom);
+ setOperationAction(ISD::FMAXIMUMNUM, VT, Custom);
+ setOperationAction(ISD::FMINIMUMNUM, VT, Custom);
setOperationAction(ISD::FNEG, VT, Custom);
setOperationAction(ISD::FABS, VT, Custom);
setOperationAction(ISD::FMA, VT, Legal);
@@ -2289,6 +2299,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Custom);
setOperationAction(ISD::FMAXIMUM, MVT::f16, Custom);
setOperationAction(ISD::FMINIMUM, MVT::f16, Custom);
+ setOperationAction(ISD::FMAXIMUMNUM, MVT::f16, Custom);
+ setOperationAction(ISD::FMINIMUMNUM, MVT::f16, Custom);
setOperationAction(ISD::FP_EXTEND, MVT::f32, Legal);
setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal);
@@ -2336,6 +2348,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::FMINIMUM, MVT::v32f16, Custom);
setOperationAction(ISD::FMAXIMUM, MVT::v32f16, Custom);
+ setOperationAction(ISD::FMINIMUMNUM, MVT::v32f16, Custom);
+ setOperationAction(ISD::FMAXIMUMNUM, MVT::v32f16, Custom);
}
if (Subtarget.hasVLX()) {
@@ -2383,9 +2397,13 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::FMINIMUM, MVT::v8f16, Custom);
setOperationAction(ISD::FMAXIMUM, MVT::v8f16, Custom);
+ setOperationAction(ISD::FMINIMUMNUM, MVT::v8f16, Custom);
+ setOperationAction(ISD::FMAXIMUMNUM, MVT::v8f16, Custom);
setOperationAction(ISD::FMINIMUM, MVT::v16f16, Custom);
setOperationAction(ISD::FMAXIMUM, MVT::v16f16, Custom);
+ setOperationAction(ISD::FMINIMUMNUM, MVT::v16f16, Custom);
+ setOperationAction(ISD::FMAXIMUMNUM, MVT::v16f16, Custom);
}
}
@@ -2444,6 +2462,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::SETCC, VT, Custom);
setOperationAction(ISD::FMINIMUM, VT, Custom);
setOperationAction(ISD::FMAXIMUM, VT, Custom);
+ setOperationAction(ISD::FMINIMUMNUM, VT, Custom);
+ setOperationAction(ISD::FMAXIMUMNUM, VT, Custom);
}
if (Subtarget.hasAVX10_2_512()) {
setOperationAction(ISD::FADD, MVT::v32bf16, Legal);
@@ -2455,6 +2475,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::SETCC, MVT::v32bf16, Custom);
setOperationAction(ISD::FMINIMUM, MVT::v32bf16, Custom);
setOperationAction(ISD::FMAXIMUM, MVT::v32bf16, Custom);
+ setOperationAction(ISD::FMINIMUMNUM, MVT::v32bf16, Custom);
+ setOperationAction(ISD::FMAXIMUMNUM, MVT::v32bf16, Custom);
}
for (auto VT : {MVT::f16, MVT::f32, MVT::f64}) {
setCondCodeAction(ISD::SETOEQ, VT, Custom);
@@ -28839,13 +28861,15 @@ static SDValue LowerMINMAX(SDValue Op, const X86Subtarget &Subtarget,
static SDValue LowerFMINIMUM_FMAXIMUM(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- assert((Op.getOpcode() == ISD::FMAXIMUM || Op.getOpcode() == ISD::FMINIMUM) &&
- "Expected FMAXIMUM or FMINIMUM opcode");
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
EVT VT = Op.getValueType();
SDValue X = Op.getOperand(0);
SDValue Y = Op.getOperand(1);
SDLoc DL(Op);
+ bool IsMaxOp =
+ Op.getOpcode() == ISD::FMAXIMUM || Op.getOpcode() == ISD::FMAXIMUMNUM;
+ bool IsNum =
+ Op.getOpcode() == ISD::FMINIMUMNUM || Op.getOpcode() == ISD::FMAXIMUMNUM;
if (Subtarget.hasAVX10_2() && TLI.isTypeLegal(VT)) {
unsigned Opc = 0;
if (VT.isVector())
@@ -28855,7 +28879,7 @@ static SDValue LowerFMINIMUM_FMAXIMUM(SDValue Op, const X86Subtarget &Subtarget,
if (Opc) {
SDValue Imm =
- DAG.getTargetConstant(Op.getOpcode() == ISD::FMAXIMUM, DL, MVT::i32);
+ DAG.getTargetConstant(IsMaxOp + (IsNum ? 16 : 0), DL, MVT::i32);
return DAG.getNode(Opc, DL, VT, X, Y, Imm, Op->getFlags());
}
}
@@ -28865,7 +28889,7 @@ static SDValue LowerFMINIMUM_FMAXIMUM(SDValue Op, const X86Subtarget &Subtarget,
APInt OppositeZero = PreferredZero;
EVT IVT = VT.changeTypeToInteger();
X86ISD::NodeType MinMaxOp;
- if (Op.getOpcode() == ISD::FMAXIMUM) {
+ if (IsMaxOp) {
MinMaxOp = X86ISD::FMAX;
OppositeZero.setSignBit();
} else {
@@ -28995,7 +29019,9 @@ static SDValue LowerFMINIMUM_FMAXIMUM(SDValue Op, const X86Subtarget &Subtarget,
if (IgnoreNaN || DAG.isKnownNeverNaN(NewX))
return MinMax;
- SDValue IsNaN = DAG.getSetCC(DL, SetCCType, NewX, NewX, ISD::SETUO);
+ SDValue IsNaN =
+ DAG.getSetCC(DL, SetCCType, NewX, NewX, IsNum ? ISD::SETO : ISD::SETUO);
+
return DAG.getSelect(DL, VT, IsNaN, NewX, MinMax);
}
@@ -33253,6 +33279,8 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::UMIN: return LowerMINMAX(Op, Subtarget, DAG);
case ISD::FMINIMUM:
case ISD::FMAXIMUM:
+ case ISD::FMINIMUMNUM:
+ case ISD::FMAXIMUMNUM:
return LowerFMINIMUM_FMAXIMUM(Op, Subtarget, DAG);
case ISD::ABS: return LowerABS(Op, Subtarget, DAG);
case ISD::ABDS:
@@ -45994,6 +46022,8 @@ static SDValue scalarizeExtEltFP(SDNode *ExtElt, SelectionDAG &DAG,
case ISD::FMAXNUM_IEEE:
case ISD::FMAXIMUM:
case ISD::FMINIMUM:
+ case ISD::FMAXIMUMNUM:
+ case ISD::FMINIMUMNUM:
case X86ISD::FMAX:
case X86ISD::FMIN:
case ISD::FABS: // Begin 1 operand
diff --git a/llvm/test/CodeGen/X86/fminimumnum-fmaximumnum.ll b/llvm/test/CodeGen/X86/fminimumnum-fmaximumnum.ll
new file mode 100644
index 00000000000000..b183994280a474
--- /dev/null
+++ b/llvm/test/CodeGen/X86/fminimumnum-fmaximumnum.ll
@@ -0,0 +1,2553 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512dq | FileCheck %s --check-prefixes=AVX,AVX512,AVX512DQ
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx10.2-256 | FileCheck %s --check-prefixes=AVX10_2
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=X86
+
+declare float @llvm.maximumnum.f32(float, float)
+declare double @llvm.maximumnum.f64(double, double)
+declare float @llvm.minimumnum.f32(float, float)
+declare double @llvm.minimumnum.f64(double, double)
+declare <2 x double> @llvm.minimumnum.v2f64(<2 x double>, <2 x double>)
+declare <4 x float> @llvm.maximumnum.v4f32(<4 x float>, <4 x float>)
+declare <4 x half> @llvm.maximumnum.v4f16(<4 x half>, <4 x half>)
+declare <4 x bfloat> @llvm.maximumnum.v4bf16(<4 x bfloat>, <4 x bfloat>)
+
+;
+; fmaximumnum
+;
+
+define float @test_fmaximumnum(float %x, float %y) nounwind {
+; SSE2-LABEL: test_fmaximumnum:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: testl %eax, %eax
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: js .LBB0_2
+; SSE2-NEXT: # %bb.1:
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: .LBB0_2:
+; SSE2-NEXT: movdqa %xmm3, %xmm0
+; SSE2-NEXT: cmpordss %xmm3, %xmm0
+; SSE2-NEXT: movaps %xmm0, %xmm4
+; SSE2-NEXT: andps %xmm3, %xmm4
+; SSE2-NEXT: js .LBB0_4
+; SSE2-NEXT: # %bb.3:
+; SSE2-NEXT: movdqa %xmm2, %xmm1
+; SSE2-NEXT: .LBB0_4:
+; SSE2-NEXT: maxss %xmm1, %xmm3
+; SSE2-NEXT: andnps %xmm3, %xmm0
+; SSE2-NEXT: orps %xmm4, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX1-LABEL: test_fmaximumnum:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovd %xmm0, %eax
+; AVX1-NEXT: testl %eax, %eax
+; AVX1-NEXT: js .LBB0_1
+; AVX1-NEXT: # %bb.2:
+; AVX1-NEXT: vmovdqa %xmm0, %xmm2
+; AVX1-NEXT: jmp .LBB0_3
+; AVX1-NEXT: .LBB0_1:
+; AVX1-NEXT: vmovdqa %xmm1, %xmm2
+; AVX1-NEXT: vmovdqa %xmm0, %xmm1
+; AVX1-NEXT: .LBB0_3:
+; AVX1-NEXT: vmaxss %xmm2, %xmm1, %xmm0
+; AVX1-NEXT: vcmpordss %xmm1, %xmm1, %xmm2
+; AVX1-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: test_fmaximumnum:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: testl %eax, %eax
+; AVX512-NEXT: sets %al
+; AVX512-NEXT: kmovw %eax, %k1
+; AVX512-NEXT: vmovdqa %xmm0, %xmm2
+; AVX512-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1}
+; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1}
+; AVX512-NEXT: vmaxss %xmm2, %xmm1, %xmm0
+; AVX512-NEXT: vcmpordss %xmm1, %xmm1, %k1
+; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1}
+; AVX512-NEXT: retq
+;
+; AVX10_2-LABEL: test_fmaximumnum:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vminmaxss $17, %xmm1, %xmm0
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fmaximumnum:
+; X86: # %bb.0:
+; X86-NEXT: pushl %eax
+; X86-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X86-NEXT: vmovd %xmm2, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: js .LBB0_1
+; X86-NEXT: # %bb.2:
+; X86-NEXT: vmovdqa %xmm2, %xmm1
+; X86-NEXT: jmp .LBB0_3
+; X86-NEXT: .LBB0_1:
+; X86-NEXT: vmovdqa %xmm0, %xmm1
+; X86-NEXT: vmovdqa %xmm2, %xmm0
+; X86-NEXT: .LBB0_3:
+; X86-NEXT: vmaxss %xmm1, %xmm0, %xmm1
+; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; X86-NEXT: vmovss %xmm0, (%esp)
+; X86-NEXT: flds (%esp)
+; X86-NEXT: popl %eax
+; X86-NEXT: retl
+ %1 = tail call float @llvm.maximumnum.f32(float %x, float %y)
+ ret float %1
+}
+
+define <4 x float> @test_fmaximumnum_scalarize(<4 x float> %x, <4 x float> %y) "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" {
+; SSE2-LABEL: test_fmaximumnum_scalarize:
+; SSE2: # %bb.0:
+; SSE2-NEXT: maxps %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX-LABEL: test_fmaximumnum_scalarize:
+; AVX: # %bb.0:
+; AVX-NEXT: vmaxps %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+;
+; AVX10_2-LABEL: test_fmaximumnum_scalarize:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vminmaxps $17, %xmm1, %xmm0, %xmm0
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fmaximumnum_scalarize:
+; X86: # %bb.0:
+; X86-NEXT: vmaxps %xmm1, %xmm0, %xmm0
+; X86-NEXT: retl
+ %r = call <4 x float> @llvm.maximumnum.v4f32(<4 x float> %x, <4 x float> %y)
+ ret <4 x float> %r
+}
+
+define float @test_fmaximumnum_nan0(float %x, float %y) {
+; SSE2-LABEL: test_fmaximumnum_nan0:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX-LABEL: test_fmaximumnum_nan0:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovaps %xmm1, %xmm0
+; AVX-NEXT: retq
+;
+; AVX10_2-LABEL: test_fmaximumnum_nan0:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vmovaps %xmm1, %xmm0
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fmaximumnum_nan0:
+; X86: # %bb.0:
+; X86-NEXT: flds {{[0-9]+}}(%esp)
+; X86-NEXT: retl
+ %1 = tail call float @llvm.maximumnum.f32(float 0x7fff000000000000, float %y)
+ ret float %1
+}
+
+define float @test_fmaximumnum_nan1(float %x, float %y) {
+; SSE2-LABEL: test_fmaximumnum_nan1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: retq
+;
+; AVX-LABEL: test_fmaximumnum_nan1:
+; AVX: # %bb.0:
+; AVX-NEXT: retq
+;
+; AVX10_2-LABEL: test_fmaximumnum_nan1:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fmaximumnum_nan1:
+; X86: # %bb.0:
+; X86-NEXT: flds {{[0-9]+}}(%esp)
+; X86-NEXT: retl
+ %1 = tail call float @llvm.maximumnum.f32(float %x, float 0x7fff000000000000)
+ ret float %1
+}
+
+define float @test_fmaximumnum_nnan(float %x, float %y) nounwind {
+; SSE2-LABEL: test_fmaximumnum_nnan:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movaps %xmm0, %xmm2
+; SSE2-NEXT: addss %xmm1, %xmm2
+; SSE2-NEXT: subss %xmm1, %xmm0
+; SSE2-NEXT: movd %xmm2, %eax
+; SSE2-NEXT: testl %eax, %eax
+; SSE2-NEXT: js .LBB4_1
+; SSE2-NEXT: # %bb.2:
+; SSE2-NEXT: maxss %xmm2, %xmm0
+; SSE2-NEXT: retq
+; SSE2-NEXT: .LBB4_1:
+; SSE2-NEXT: movaps %xmm0, %xmm1
+; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: maxss %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX1-LABEL: test_fmaximumnum_nnan:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vaddss %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vsubss %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vmovd %xmm2, %eax
+; AVX1-NEXT: testl %eax, %eax
+; AVX1-NEXT: js .LBB4_1
+; AVX1-NEXT: # %bb.2:
+; AVX1-NEXT: vmaxss %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: retq
+; AVX1-NEXT: .LBB4_1:
+; AVX1-NEXT: vmovaps %xmm0, %xmm1
+; AVX1-NEXT: vmaxss %xmm1, %xmm2, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX512F-LABEL: test_fmaximumnum_nnan:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vaddss %xmm1, %xmm0, %xmm2
+; AVX512F-NEXT: vsubss %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT: vmovd %xmm2, %eax
+; AVX512F-NEXT: testl %eax, %eax
+; AVX512F-NEXT: sets %al
+; AVX512F-NEXT: kmovw %eax, %k1
+; AVX512F-NEXT: vmovaps %xmm2, %xmm1
+; AVX512F-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1}
+; AVX512F-NEXT: vmovss %xmm2, %xmm0, %xmm0 {%k1}
+; AVX512F-NEXT: vmaxss %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT: retq
+;
+; AVX512DQ-LABEL: test_fmaximumnum_nnan:
+; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: vaddss %xmm1, %xmm0, %xmm2
+; AVX512DQ-NEXT: vsubss %xmm1, %xmm0, %xmm0
+; AVX512DQ-NEXT: vfpclassss $3, %xmm0, %k0 # k0 = isQuietNaN(xmm0) | isPositiveZero(xmm0)
+; AVX512DQ-NEXT: kmovw %k0, %k1
+; AVX512DQ-NEXT: vmovaps %xmm2, %xmm1
+; AVX512DQ-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1}
+; AVX512DQ-NEXT: vmovss %xmm2, %xmm0, %xmm0 {%k1}
+; AVX512DQ-NEXT: vmaxss %xmm1, %xmm0, %xmm0
+; AVX512DQ-NEXT: retq
+;
+; AVX10_2-LABEL: test_fmaximumnum_nnan:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vaddss %xmm1, %xmm0, %xmm2
+; AVX10_2-NEXT: vsubss %xmm1, %xmm0, %xmm0
+; AVX10_2-NEXT: vminmaxss $17, %xmm0, %xmm2
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fmaximumnum_nnan:
+; X86: # %bb.0:
+; X86-NEXT: pushl %eax
+; X86-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X86-NEXT: vaddss %xmm0, %xmm2, %xmm1
+; X86-NEXT: vsubss %xmm0, %xmm2, %xmm0
+; X86-NEXT: vmovd %xmm1, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: js .LBB4_1
+; X86-NEXT: # %bb.2:
+; X86-NEXT: vmovaps %xmm1, %xmm2
+; X86-NEXT: jmp .LBB4_3
+; X86-NEXT: .LBB4_1:
+; X86-NEXT: vmovaps %xmm0, %xmm2
+; X86-NEXT: vmovaps %xmm1, %xmm0
+; X86-NEXT: .LBB4_3:
+; X86-NEXT: vmaxss %xmm2, %xmm0, %xmm0
+; X86-NEXT: vmovss %xmm0, (%esp)
+; X86-NEXT: flds (%esp)
+; X86-NEXT: popl %eax
+; X86-NEXT: retl
+ %1 = fadd nnan float %x, %y
+ %2 = fsub nnan float %x, %y
+ %3 = tail call float @llvm.maximumnum.f32(float %1, float %2)
+ ret float %3
+}
+
+define double @test_fmaximumnum_zero0(double %x, double %y) nounwind {
+; SSE2-LABEL: test_fmaximumnum_zero0:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movapd %xmm1, %xmm0
+; SSE2-NEXT: cmpordsd %xmm1, %xmm0
+; SSE2-NEXT: movapd %xmm0, %xmm2
+; SSE2-NEXT: andpd %xmm1, %xmm2
+; SSE2-NEXT: xorpd %xmm3, %xmm3
+; SSE2-NEXT: maxsd %xmm3, %xmm1
+; SSE2-NEXT: andnpd %xmm1, %xmm0
+; SSE2-NEXT: orpd %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX1-LABEL: test_fmaximumnum_zero0:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vxorpd %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vmaxsd %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vcmpordsd %xmm1, %xmm1, %xmm2
+; AVX1-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: test_fmaximumnum_zero0:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vxorpd %xmm0, %xmm0, %xmm0
+; AVX512-NEXT: vmaxsd %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: vcmpordsd %xmm1, %xmm1, %k1
+; AVX512-NEXT: vmovsd %xmm1, %xmm0, %xmm0 {%k1}
+; AVX512-NEXT: retq
+;
+; AVX10_2-LABEL: test_fmaximumnum_zero0:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vxorpd %xmm0, %xmm0, %xmm0
+; AVX10_2-NEXT: vminmaxsd $17, %xmm0, %xmm1
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fmaximumnum_zero0:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; X86-NEXT: vmaxsd %xmm1, %xmm0, %xmm1
+; X86-NEXT: vcmpordsd %xmm0, %xmm0, %xmm2
+; X86-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; X86-NEXT: vmovlpd %xmm0, (%esp)
+; X86-NEXT: fldl (%esp)
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+ %1 = tail call double @llvm.maximumnum.f64(double 0.0, double %y)
+ ret double %1
+}
+
+define double @test_fmaximumnum_zero1(double %x, double %y) nounwind {
+; SSE2-LABEL: test_fmaximumnum_zero1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movapd %xmm0, %xmm1
+; SSE2-NEXT: cmpordsd %xmm0, %xmm1
+; SSE2-NEXT: movapd %xmm1, %xmm2
+; SSE2-NEXT: andpd %xmm0, %xmm2
+; SSE2-NEXT: xorpd %xmm3, %xmm3
+; SSE2-NEXT: maxsd %xmm3, %xmm0
+; SSE2-NEXT: andnpd %xmm0, %xmm1
+; SSE2-NEXT: orpd %xmm2, %xmm1
+; SSE2-NEXT: movapd %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX1-LABEL: test_fmaximumnum_zero1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vmaxsd %xmm1, %xmm0, %xmm1
+; AVX1-NEXT: vcmpordsd %xmm0, %xmm0, %xmm2
+; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: test_fmaximumnum_zero1:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vmaxsd %xmm1, %xmm0, %xmm1
+; AVX512-NEXT: vcmpordsd %xmm0, %xmm0, %k1
+; AVX512-NEXT: vmovsd %xmm0, %xmm1, %xmm1 {%k1}
+; AVX512-NEXT: vmovapd %xmm1, %xmm0
+; AVX512-NEXT: retq
+;
+; AVX10_2-LABEL: test_fmaximumnum_zero1:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; AVX10_2-NEXT: vminmaxsd $17, %xmm1, %xmm0
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fmaximumnum_zero1:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; X86-NEXT: vmaxsd %xmm1, %xmm0, %xmm1
+; X86-NEXT: vcmpordsd %xmm0, %xmm0, %xmm2
+; X86-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; X86-NEXT: vmovlpd %xmm0, (%esp)
+; X86-NEXT: fldl (%esp)
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+ %1 = tail call double @llvm.maximumnum.f64(double %x, double 0.0)
+ ret double %1
+}
+
+define double @test_fmaximumnum_zero2(double %x, double %y) {
+; SSE2-LABEL: test_fmaximumnum_zero2:
+; SSE2: # %bb.0:
+; SSE2-NEXT: xorps %xmm0, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX-LABEL: test_fmaximumnum_zero2:
+; AVX: # %bb.0:
+; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; AVX-NEXT: retq
+;
+; AVX10_2-LABEL: test_fmaximumnum_zero2:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fmaximumnum_zero2:
+; X86: # %bb.0:
+; X86-NEXT: fldz
+; X86-NEXT: retl
+ %1 = tail call double @llvm.maximumnum.f64(double 0.0, double -0.0)
+ ret double %1
+}
+
+define float @test_fmaximumnum_nsz(float %x, float %y) "no-signed-zeros-fp-math"="true" nounwind {
+; SSE2-LABEL: test_fmaximumnum_nsz:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movaps %xmm0, %xmm2
+; SSE2-NEXT: cmpordss %xmm0, %xmm2
+; SSE2-NEXT: movaps %xmm2, %xmm3
+; SSE2-NEXT: andps %xmm0, %xmm3
+; SSE2-NEXT: maxss %xmm1, %xmm0
+; SSE2-NEXT: andnps %xmm0, %xmm2
+; SSE2-NEXT: orps %xmm3, %xmm2
+; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX1-LABEL: test_fmaximumnum_nsz:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmaxss %xmm1, %xmm0, %xmm1
+; AVX1-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; AVX1-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: test_fmaximumnum_nsz:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vmaxss %xmm1, %xmm0, %xmm1
+; AVX512-NEXT: vcmpordss %xmm0, %xmm0, %k1
+; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1}
+; AVX512-NEXT: vmovaps %xmm1, %xmm0
+; AVX512-NEXT: retq
+;
+; AVX10_2-LABEL: test_fmaximumnum_nsz:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vminmaxss $17, %xmm1, %xmm0
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fmaximumnum_nsz:
+; X86: # %bb.0:
+; X86-NEXT: pushl %eax
+; X86-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm1
+; X86-NEXT: vmaxss {{[0-9]+}}(%esp), %xmm0, %xmm2
+; X86-NEXT: vblendvps %xmm1, %xmm0, %xmm2, %xmm0
+; X86-NEXT: vmovss %xmm0, (%esp)
+; X86-NEXT: flds (%esp)
+; X86-NEXT: popl %eax
+; X86-NEXT: retl
+ %1 = tail call float @llvm.maximumnum.f32(float %x, float %y)
+ ret float %1
+}
+
+define float @test_fmaximumnum_combine_cmps(float %x, float %y) nounwind {
+; SSE2-LABEL: test_fmaximumnum_combine_cmps:
+; SSE2: # %bb.0:
+; SSE2-NEXT: divss %xmm0, %xmm1
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: testl %eax, %eax
+; SSE2-NEXT: movaps %xmm0, %xmm3
+; SSE2-NEXT: js .LBB9_2
+; SSE2-NEXT: # %bb.1:
+; SSE2-NEXT: movaps %xmm1, %xmm3
+; SSE2-NEXT: .LBB9_2:
+; SSE2-NEXT: movaps %xmm3, %xmm2
+; SSE2-NEXT: cmpordss %xmm3, %xmm2
+; SSE2-NEXT: movaps %xmm2, %xmm4
+; SSE2-NEXT: andps %xmm3, %xmm4
+; SSE2-NEXT: js .LBB9_4
+; SSE2-NEXT: # %bb.3:
+; SSE2-NEXT: movaps %xmm0, %xmm1
+; SSE2-NEXT: .LBB9_4:
+; SSE2-NEXT: maxss %xmm1, %xmm3
+; SSE2-NEXT: andnps %xmm3, %xmm2
+; SSE2-NEXT: orps %xmm4, %xmm2
+; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX1-LABEL: test_fmaximumnum_combine_cmps:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vdivss %xmm0, %xmm1, %xmm1
+; AVX1-NEXT: vmovd %xmm0, %eax
+; AVX1-NEXT: testl %eax, %eax
+; AVX1-NEXT: js .LBB9_1
+; AVX1-NEXT: # %bb.2:
+; AVX1-NEXT: vmovaps %xmm0, %xmm2
+; AVX1-NEXT: jmp .LBB9_3
+; AVX1-NEXT: .LBB9_1:
+; AVX1-NEXT: vmovaps %xmm1, %xmm2
+; AVX1-NEXT: vmovaps %xmm0, %xmm1
+; AVX1-NEXT: .LBB9_3:
+; AVX1-NEXT: vmaxss %xmm2, %xmm1, %xmm0
+; AVX1-NEXT: vcmpordss %xmm1, %xmm1, %xmm2
+; AVX1-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX512F-LABEL: test_fmaximumnum_combine_cmps:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vdivss %xmm0, %xmm1, %xmm1
+; AVX512F-NEXT: vmovd %xmm0, %eax
+; AVX512F-NEXT: testl %eax, %eax
+; AVX512F-NEXT: sets %al
+; AVX512F-NEXT: kmovw %eax, %k1
+; AVX512F-NEXT: vmovaps %xmm0, %xmm2
+; AVX512F-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1}
+; AVX512F-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1}
+; AVX512F-NEXT: vmaxss %xmm2, %xmm1, %xmm0
+; AVX512F-NEXT: vcmpordss %xmm1, %xmm1, %k1
+; AVX512F-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1}
+; AVX512F-NEXT: retq
+;
+; AVX512DQ-LABEL: test_fmaximumnum_combine_cmps:
+; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: vdivss %xmm0, %xmm1, %xmm1
+; AVX512DQ-NEXT: vfpclassss $3, %xmm0, %k0 # k0 = isQuietNaN(xmm0) | isPositiveZero(xmm0)
+; AVX512DQ-NEXT: kmovw %k0, %k1
+; AVX512DQ-NEXT: vmovaps %xmm1, %xmm2
+; AVX512DQ-NEXT: vmovss %xmm0, %xmm2, %xmm2 {%k1}
+; AVX512DQ-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1}
+; AVX512DQ-NEXT: vmaxss %xmm2, %xmm0, %xmm0
+; AVX512DQ-NEXT: retq
+;
+; AVX10_2-LABEL: test_fmaximumnum_combine_cmps:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vdivss %xmm0, %xmm1, %xmm1
+; AVX10_2-NEXT: vminmaxss $17, %xmm1, %xmm0
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fmaximumnum_combine_cmps:
+; X86: # %bb.0:
+; X86-NEXT: pushl %eax
+; X86-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT: vdivss %xmm1, %xmm0, %xmm0
+; X86-NEXT: vmovd %xmm1, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: js .LBB9_1
+; X86-NEXT: # %bb.2:
+; X86-NEXT: vmovaps %xmm1, %xmm2
+; X86-NEXT: jmp .LBB9_3
+; X86-NEXT: .LBB9_1:
+; X86-NEXT: vmovaps %xmm0, %xmm2
+; X86-NEXT: vmovaps %xmm1, %xmm0
+; X86-NEXT: .LBB9_3:
+; X86-NEXT: vmaxss %xmm2, %xmm0, %xmm1
+; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; X86-NEXT: vmovss %xmm0, (%esp)
+; X86-NEXT: flds (%esp)
+; X86-NEXT: popl %eax
+; X86-NEXT: retl
+ %1 = fdiv nnan float %y, %x
+ %2 = tail call float @llvm.maximumnum.f32(float %x, float %1)
+ ret float %2
+}
+
+;
+; fminimumnum
+;
+
+define float @test_fminimumnum(float %x, float %y) nounwind {
+; SSE2-LABEL: test_fminimumnum:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: testl %eax, %eax
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: js .LBB10_2
+; SSE2-NEXT: # %bb.1:
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: .LBB10_2:
+; SSE2-NEXT: movdqa %xmm3, %xmm2
+; SSE2-NEXT: cmpordss %xmm3, %xmm2
+; SSE2-NEXT: movaps %xmm2, %xmm4
+; SSE2-NEXT: andps %xmm3, %xmm4
+; SSE2-NEXT: js .LBB10_4
+; SSE2-NEXT: # %bb.3:
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: .LBB10_4:
+; SSE2-NEXT: minss %xmm0, %xmm3
+; SSE2-NEXT: andnps %xmm3, %xmm2
+; SSE2-NEXT: orps %xmm4, %xmm2
+; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX1-LABEL: test_fminimumnum:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovd %xmm0, %eax
+; AVX1-NEXT: testl %eax, %eax
+; AVX1-NEXT: js .LBB10_1
+; AVX1-NEXT: # %bb.2:
+; AVX1-NEXT: vmovdqa %xmm1, %xmm2
+; AVX1-NEXT: jmp .LBB10_3
+; AVX1-NEXT: .LBB10_1:
+; AVX1-NEXT: vmovdqa %xmm0, %xmm2
+; AVX1-NEXT: vmovdqa %xmm1, %xmm0
+; AVX1-NEXT: .LBB10_3:
+; AVX1-NEXT: vminss %xmm2, %xmm0, %xmm1
+; AVX1-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; AVX1-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: test_fminimumnum:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: testl %eax, %eax
+; AVX512-NEXT: sets %al
+; AVX512-NEXT: kmovw %eax, %k1
+; AVX512-NEXT: vmovaps %xmm1, %xmm2
+; AVX512-NEXT: vmovss %xmm0, %xmm2, %xmm2 {%k1}
+; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1}
+; AVX512-NEXT: vminss %xmm2, %xmm0, %xmm1
+; AVX512-NEXT: vcmpordss %xmm0, %xmm0, %k1
+; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1}
+; AVX512-NEXT: vmovaps %xmm1, %xmm0
+; AVX512-NEXT: retq
+;
+; AVX10_2-LABEL: test_fminimumnum:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vminmaxss $16, %xmm1, %xmm0
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fminimumnum:
+; X86: # %bb.0:
+; X86-NEXT: pushl %eax
+; X86-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT: vmovd %xmm0, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: js .LBB10_1
+; X86-NEXT: # %bb.2:
+; X86-NEXT: vmovdqa %xmm1, %xmm2
+; X86-NEXT: jmp .LBB10_3
+; X86-NEXT: .LBB10_1:
+; X86-NEXT: vmovdqa %xmm0, %xmm2
+; X86-NEXT: vmovdqa %xmm1, %xmm0
+; X86-NEXT: .LBB10_3:
+; X86-NEXT: vminss %xmm2, %xmm0, %xmm1
+; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; X86-NEXT: vmovss %xmm0, (%esp)
+; X86-NEXT: flds (%esp)
+; X86-NEXT: popl %eax
+; X86-NEXT: retl
+ %1 = tail call float @llvm.minimumnum.f32(float %x, float %y)
+ ret float %1
+}
+
+define <2 x double> @test_fminimumnum_scalarize(<2 x double> %x, <2 x double> %y) "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" {
+; SSE2-LABEL: test_fminimumnum_scalarize:
+; SSE2: # %bb.0:
+; SSE2-NEXT: minpd %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX-LABEL: test_fminimumnum_scalarize:
+; AVX: # %bb.0:
+; AVX-NEXT: vminpd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+;
+; AVX10_2-LABEL: test_fminimumnum_scalarize:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vminmaxpd $16, %xmm1, %xmm0, %xmm0
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fminimumnum_scalarize:
+; X86: # %bb.0:
+; X86-NEXT: vminpd %xmm1, %xmm0, %xmm0
+; X86-NEXT: retl
+ %r = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> %x, <2 x double> %y)
+ ret <2 x double> %r
+}
+
+define float @test_fminimumnum_nan0(float %x, float %y) {
+; SSE2-LABEL: test_fminimumnum_nan0:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX-LABEL: test_fminimumnum_nan0:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovaps %xmm1, %xmm0
+; AVX-NEXT: retq
+;
+; AVX10_2-LABEL: test_fminimumnum_nan0:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vmovaps %xmm1, %xmm0
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fminimumnum_nan0:
+; X86: # %bb.0:
+; X86-NEXT: flds {{[0-9]+}}(%esp)
+; X86-NEXT: retl
+ %1 = tail call float @llvm.minimumnum.f32(float 0x7fff000000000000, float %y)
+ ret float %1
+}
+
+define float @test_fminimumnum_nan1(float %x, float %y) {
+; SSE2-LABEL: test_fminimumnum_nan1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: retq
+;
+; AVX-LABEL: test_fminimumnum_nan1:
+; AVX: # %bb.0:
+; AVX-NEXT: retq
+;
+; AVX10_2-LABEL: test_fminimumnum_nan1:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fminimumnum_nan1:
+; X86: # %bb.0:
+; X86-NEXT: flds {{[0-9]+}}(%esp)
+; X86-NEXT: retl
+ %1 = tail call float @llvm.minimumnum.f32(float %x, float 0x7fff000000000000)
+ ret float %1
+}
+
+define double @test_fminimumnum_nnan(double %x, double %y) "no-nans-fp-math"="true" nounwind {
+; SSE2-LABEL: test_fminimumnum_nnan:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movq %xmm0, %rax
+; SSE2-NEXT: testq %rax, %rax
+; SSE2-NEXT: js .LBB14_1
+; SSE2-NEXT: # %bb.2:
+; SSE2-NEXT: minsd %xmm1, %xmm0
+; SSE2-NEXT: retq
+; SSE2-NEXT: .LBB14_1:
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: movapd %xmm1, %xmm0
+; SSE2-NEXT: minsd %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX1-LABEL: test_fminimumnum_nnan:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovq %xmm0, %rax
+; AVX1-NEXT: testq %rax, %rax
+; AVX1-NEXT: js .LBB14_1
+; AVX1-NEXT: # %bb.2:
+; AVX1-NEXT: vminsd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: retq
+; AVX1-NEXT: .LBB14_1:
+; AVX1-NEXT: vmovdqa %xmm0, %xmm2
+; AVX1-NEXT: vminsd %xmm2, %xmm1, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX512F-LABEL: test_fminimumnum_nnan:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vmovq %xmm0, %rax
+; AVX512F-NEXT: testq %rax, %rax
+; AVX512F-NEXT: sets %al
+; AVX512F-NEXT: kmovw %eax, %k1
+; AVX512F-NEXT: vmovapd %xmm1, %xmm2
+; AVX512F-NEXT: vmovsd %xmm0, %xmm2, %xmm2 {%k1}
+; AVX512F-NEXT: vmovsd %xmm1, %xmm0, %xmm0 {%k1}
+; AVX512F-NEXT: vminsd %xmm2, %xmm0, %xmm0
+; AVX512F-NEXT: retq
+;
+; AVX512DQ-LABEL: test_fminimumnum_nnan:
+; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: vfpclasssd $5, %xmm1, %k0 # k0 = isQuietNaN(xmm1) | isNegativeZero(xmm1)
+; AVX512DQ-NEXT: kmovw %k0, %k1
+; AVX512DQ-NEXT: vmovapd %xmm0, %xmm2
+; AVX512DQ-NEXT: vmovsd %xmm1, %xmm2, %xmm2 {%k1}
+; AVX512DQ-NEXT: vmovsd %xmm0, %xmm1, %xmm1 {%k1}
+; AVX512DQ-NEXT: vminsd %xmm2, %xmm1, %xmm0
+; AVX512DQ-NEXT: retq
+;
+; AVX10_2-LABEL: test_fminimumnum_nnan:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vminmaxsd $16, %xmm1, %xmm0
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fminimumnum_nnan:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; X86-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT: vextractps $1, %xmm0, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: js .LBB14_1
+; X86-NEXT: # %bb.2:
+; X86-NEXT: vmovapd %xmm1, %xmm2
+; X86-NEXT: jmp .LBB14_3
+; X86-NEXT: .LBB14_1:
+; X86-NEXT: vmovapd %xmm0, %xmm2
+; X86-NEXT: vmovapd %xmm1, %xmm0
+; X86-NEXT: .LBB14_3:
+; X86-NEXT: vminsd %xmm2, %xmm0, %xmm0
+; X86-NEXT: vmovsd %xmm0, (%esp)
+; X86-NEXT: fldl (%esp)
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+ %1 = tail call double @llvm.minimumnum.f64(double %x, double %y)
+ ret double %1
+}
+
+define double @test_fminimumnum_zero0(double %x, double %y) nounwind {
+; SSE2-LABEL: test_fminimumnum_zero0:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movapd %xmm1, %xmm0
+; SSE2-NEXT: cmpordsd %xmm1, %xmm0
+; SSE2-NEXT: movapd %xmm0, %xmm2
+; SSE2-NEXT: andpd %xmm1, %xmm2
+; SSE2-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-NEXT: andnpd %xmm1, %xmm0
+; SSE2-NEXT: orpd %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX1-LABEL: test_fminimumnum_zero0:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vcmpordsd %xmm1, %xmm1, %xmm0
+; AVX1-NEXT: vminsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm2
+; AVX1-NEXT: vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: test_fminimumnum_zero0:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vcmpordsd %xmm1, %xmm1, %k1
+; AVX512-NEXT: vminsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm0
+; AVX512-NEXT: vmovsd %xmm1, %xmm0, %xmm0 {%k1}
+; AVX512-NEXT: retq
+;
+; AVX10_2-LABEL: test_fminimumnum_zero0:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vminmaxsd $16, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fminimumnum_zero0:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT: vcmpordsd %xmm0, %xmm0, %xmm1
+; X86-NEXT: vminsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm2
+; X86-NEXT: vblendvpd %xmm1, %xmm0, %xmm2, %xmm0
+; X86-NEXT: vmovlpd %xmm0, (%esp)
+; X86-NEXT: fldl (%esp)
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+ %1 = tail call double @llvm.minimumnum.f64(double -0.0, double %y)
+ ret double %1
+}
+
+define double @test_fminimumnum_zero1(double %x, double %y) nounwind {
+; SSE2-LABEL: test_fminimumnum_zero1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movapd %xmm0, %xmm1
+; SSE2-NEXT: cmpordsd %xmm0, %xmm1
+; SSE2-NEXT: movapd %xmm1, %xmm2
+; SSE2-NEXT: andpd %xmm0, %xmm2
+; SSE2-NEXT: minsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: andnpd %xmm0, %xmm1
+; SSE2-NEXT: orpd %xmm2, %xmm1
+; SSE2-NEXT: movapd %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX1-LABEL: test_fminimumnum_zero1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vcmpordsd %xmm0, %xmm0, %xmm1
+; AVX1-NEXT: vminsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2
+; AVX1-NEXT: vblendvpd %xmm1, %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: test_fminimumnum_zero1:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vcmpordsd %xmm0, %xmm0, %k1
+; AVX512-NEXT: vminsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVX512-NEXT: vmovsd %xmm0, %xmm1, %xmm1 {%k1}
+; AVX512-NEXT: vmovapd %xmm1, %xmm0
+; AVX512-NEXT: retq
+;
+; AVX10_2-LABEL: test_fminimumnum_zero1:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vminmaxsd $16, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fminimumnum_zero1:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT: vcmpordsd %xmm0, %xmm0, %xmm1
+; X86-NEXT: vminsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm2
+; X86-NEXT: vblendvpd %xmm1, %xmm0, %xmm2, %xmm0
+; X86-NEXT: vmovlpd %xmm0, (%esp)
+; X86-NEXT: fldl (%esp)
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+ %1 = tail call double @llvm.minimumnum.f64(double %x, double -0.0)
+ ret double %1
+}
+
+define double @test_fminimumnum_zero2(double %x, double %y) {
+; SSE2-LABEL: test_fminimumnum_zero2:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movsd {{.*#+}} xmm0 = [-0.0E+0,0.0E+0]
+; SSE2-NEXT: retq
+;
+; AVX-LABEL: test_fminimumnum_zero2:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [-0.0E+0,0.0E+0]
+; AVX-NEXT: retq
+;
+; AVX10_2-LABEL: test_fminimumnum_zero2:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vmovsd {{.*#+}} xmm0 = [-0.0E+0,0.0E+0]
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fminimumnum_zero2:
+; X86: # %bb.0:
+; X86-NEXT: fldz
+; X86-NEXT: fchs
+; X86-NEXT: retl
+ %1 = tail call double @llvm.minimumnum.f64(double -0.0, double 0.0)
+ ret double %1
+}
+
+define float @test_fminimumnum_nsz(float %x, float %y) nounwind {
+; SSE2-LABEL: test_fminimumnum_nsz:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movaps %xmm0, %xmm2
+; SSE2-NEXT: cmpordss %xmm0, %xmm2
+; SSE2-NEXT: movaps %xmm2, %xmm3
+; SSE2-NEXT: andps %xmm0, %xmm3
+; SSE2-NEXT: minss %xmm1, %xmm0
+; SSE2-NEXT: andnps %xmm0, %xmm2
+; SSE2-NEXT: orps %xmm3, %xmm2
+; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX1-LABEL: test_fminimumnum_nsz:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vminss %xmm1, %xmm0, %xmm1
+; AVX1-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; AVX1-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: test_fminimumnum_nsz:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vminss %xmm1, %xmm0, %xmm1
+; AVX512-NEXT: vcmpordss %xmm0, %xmm0, %k1
+; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1}
+; AVX512-NEXT: vmovaps %xmm1, %xmm0
+; AVX512-NEXT: retq
+;
+; AVX10_2-LABEL: test_fminimumnum_nsz:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vminmaxss $16, %xmm1, %xmm0
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fminimumnum_nsz:
+; X86: # %bb.0:
+; X86-NEXT: pushl %eax
+; X86-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm1
+; X86-NEXT: vminss {{[0-9]+}}(%esp), %xmm0, %xmm2
+; X86-NEXT: vblendvps %xmm1, %xmm0, %xmm2, %xmm0
+; X86-NEXT: vmovss %xmm0, (%esp)
+; X86-NEXT: flds (%esp)
+; X86-NEXT: popl %eax
+; X86-NEXT: retl
+ %1 = tail call nsz float @llvm.minimumnum.f32(float %x, float %y)
+ ret float %1
+}
+
+define float @test_fminimumnum_combine_cmps(float %x, float %y) nounwind {
+; SSE2-LABEL: test_fminimumnum_combine_cmps:
+; SSE2: # %bb.0:
+; SSE2-NEXT: divss %xmm0, %xmm1
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: testl %eax, %eax
+; SSE2-NEXT: movaps %xmm1, %xmm3
+; SSE2-NEXT: js .LBB19_2
+; SSE2-NEXT: # %bb.1:
+; SSE2-NEXT: movaps %xmm0, %xmm3
+; SSE2-NEXT: .LBB19_2:
+; SSE2-NEXT: movaps %xmm3, %xmm2
+; SSE2-NEXT: cmpordss %xmm3, %xmm2
+; SSE2-NEXT: movaps %xmm2, %xmm4
+; SSE2-NEXT: andps %xmm3, %xmm4
+; SSE2-NEXT: js .LBB19_4
+; SSE2-NEXT: # %bb.3:
+; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: .LBB19_4:
+; SSE2-NEXT: minss %xmm0, %xmm3
+; SSE2-NEXT: andnps %xmm3, %xmm2
+; SSE2-NEXT: orps %xmm4, %xmm2
+; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX1-LABEL: test_fminimumnum_combine_cmps:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vdivss %xmm0, %xmm1, %xmm2
+; AVX1-NEXT: vmovd %xmm0, %eax
+; AVX1-NEXT: testl %eax, %eax
+; AVX1-NEXT: js .LBB19_1
+; AVX1-NEXT: # %bb.2:
+; AVX1-NEXT: vmovaps %xmm2, %xmm1
+; AVX1-NEXT: jmp .LBB19_3
+; AVX1-NEXT: .LBB19_1:
+; AVX1-NEXT: vmovaps %xmm0, %xmm1
+; AVX1-NEXT: vmovaps %xmm2, %xmm0
+; AVX1-NEXT: .LBB19_3:
+; AVX1-NEXT: vminss %xmm1, %xmm0, %xmm1
+; AVX1-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; AVX1-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX512F-LABEL: test_fminimumnum_combine_cmps:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vdivss %xmm0, %xmm1, %xmm1
+; AVX512F-NEXT: vmovd %xmm0, %eax
+; AVX512F-NEXT: testl %eax, %eax
+; AVX512F-NEXT: sets %al
+; AVX512F-NEXT: kmovw %eax, %k1
+; AVX512F-NEXT: vmovaps %xmm1, %xmm2
+; AVX512F-NEXT: vmovss %xmm0, %xmm2, %xmm2 {%k1}
+; AVX512F-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1}
+; AVX512F-NEXT: vminss %xmm2, %xmm0, %xmm1
+; AVX512F-NEXT: vcmpordss %xmm0, %xmm0, %k1
+; AVX512F-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1}
+; AVX512F-NEXT: vmovaps %xmm1, %xmm0
+; AVX512F-NEXT: retq
+;
+; AVX512DQ-LABEL: test_fminimumnum_combine_cmps:
+; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: vdivss %xmm0, %xmm1, %xmm1
+; AVX512DQ-NEXT: vfpclassss $5, %xmm0, %k0 # k0 = isQuietNaN(xmm0) | isNegativeZero(xmm0)
+; AVX512DQ-NEXT: kmovw %k0, %k1
+; AVX512DQ-NEXT: vmovaps %xmm1, %xmm2
+; AVX512DQ-NEXT: vmovss %xmm0, %xmm2, %xmm2 {%k1}
+; AVX512DQ-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1}
+; AVX512DQ-NEXT: vminss %xmm2, %xmm0, %xmm0
+; AVX512DQ-NEXT: retq
+;
+; AVX10_2-LABEL: test_fminimumnum_combine_cmps:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vdivss %xmm0, %xmm1, %xmm1
+; AVX10_2-NEXT: vminmaxss $16, %xmm1, %xmm0
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fminimumnum_combine_cmps:
+; X86: # %bb.0:
+; X86-NEXT: pushl %eax
+; X86-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-NEXT: vdivss %xmm0, %xmm1, %xmm2
+; X86-NEXT: vmovd %xmm0, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: js .LBB19_1
+; X86-NEXT: # %bb.2:
+; X86-NEXT: vmovaps %xmm2, %xmm1
+; X86-NEXT: jmp .LBB19_3
+; X86-NEXT: .LBB19_1:
+; X86-NEXT: vmovaps %xmm0, %xmm1
+; X86-NEXT: vmovaps %xmm2, %xmm0
+; X86-NEXT: .LBB19_3:
+; X86-NEXT: vminss %xmm1, %xmm0, %xmm1
+; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; X86-NEXT: vmovss %xmm0, (%esp)
+; X86-NEXT: flds (%esp)
+; X86-NEXT: popl %eax
+; X86-NEXT: retl
+ %1 = fdiv nnan float %y, %x
+ %2 = tail call float @llvm.minimumnum.f32(float %x, float %1)
+ ret float %2
+}
+
+define <2 x double> @test_fminimumnum_vector(<2 x double> %x, <2 x double> %y) {
+; SSE2-LABEL: test_fminimumnum_vector:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movaps %xmm0, %xmm2
+; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[3,3]
+; SSE2-NEXT: pxor %xmm3, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm3
+; SSE2-NEXT: movdqa %xmm3, %xmm2
+; SSE2-NEXT: pandn %xmm1, %xmm2
+; SSE2-NEXT: movdqa %xmm3, %xmm4
+; SSE2-NEXT: pandn %xmm0, %xmm4
+; SSE2-NEXT: pand %xmm3, %xmm0
+; SSE2-NEXT: por %xmm2, %xmm0
+; SSE2-NEXT: pand %xmm1, %xmm3
+; SSE2-NEXT: por %xmm4, %xmm3
+; SSE2-NEXT: movdqa %xmm3, %xmm1
+; SSE2-NEXT: minpd %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm3, %xmm0
+; SSE2-NEXT: cmpordpd %xmm3, %xmm0
+; SSE2-NEXT: andpd %xmm0, %xmm3
+; SSE2-NEXT: andnpd %xmm1, %xmm0
+; SSE2-NEXT: orpd %xmm3, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX-LABEL: test_fminimumnum_vector:
+; AVX: # %bb.0:
+; AVX-NEXT: vblendvpd %xmm0, %xmm0, %xmm1, %xmm2
+; AVX-NEXT: vblendvpd %xmm0, %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vminpd %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vcmpordpd %xmm0, %xmm0, %xmm2
+; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX-NEXT: retq
+;
+; AVX10_2-LABEL: test_fminimumnum_vector:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vminmaxpd $16, %xmm1, %xmm0, %xmm0
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fminimumnum_vector:
+; X86: # %bb.0:
+; X86-NEXT: vblendvpd %xmm0, %xmm0, %xmm1, %xmm2
+; X86-NEXT: vblendvpd %xmm0, %xmm1, %xmm0, %xmm0
+; X86-NEXT: vminpd %xmm2, %xmm0, %xmm1
+; X86-NEXT: vcmpordpd %xmm0, %xmm0, %xmm2
+; X86-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; X86-NEXT: retl
+ %r = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> %x, <2 x double> %y)
+ ret <2 x double> %r
+}
+
+define <4 x float> @test_fmaximumnum_vector(<4 x float> %x, <4 x float> %y) "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" {
+; SSE2-LABEL: test_fmaximumnum_vector:
+; SSE2: # %bb.0:
+; SSE2-NEXT: maxps %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX-LABEL: test_fmaximumnum_vector:
+; AVX: # %bb.0:
+; AVX-NEXT: vmaxps %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+;
+; AVX10_2-LABEL: test_fmaximumnum_vector:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vminmaxps $17, %xmm1, %xmm0, %xmm0
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fmaximumnum_vector:
+; X86: # %bb.0:
+; X86-NEXT: vmaxps %xmm1, %xmm0, %xmm0
+; X86-NEXT: retl
+ %r = call <4 x float> @llvm.maximumnum.v4f32(<4 x float> %x, <4 x float> %y)
+ ret <4 x float> %r
+}
+
+define <2 x double> @test_fminimumnum_vector_zero(<2 x double> %x) {
+; SSE2-LABEL: test_fminimumnum_vector_zero:
+; SSE2: # %bb.0:
+; SSE2-NEXT: xorpd %xmm1, %xmm1
+; SSE2-NEXT: minpd %xmm0, %xmm1
+; SSE2-NEXT: movapd %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX-LABEL: test_fminimumnum_vector_zero:
+; AVX: # %bb.0:
+; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vminpd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: retq
+;
+; AVX10_2-LABEL: test_fminimumnum_vector_zero:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; AVX10_2-NEXT: vminmaxpd $16, %xmm1, %xmm0, %xmm0
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fminimumnum_vector_zero:
+; X86: # %bb.0:
+; X86-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; X86-NEXT: vminpd %xmm0, %xmm1, %xmm0
+; X86-NEXT: retl
+ %r = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> %x, <2 x double> <double 0., double 0.>)
+ ret <2 x double> %r
+}
+
+define <4 x float> @test_fmaximumnum_vector_signed_zero(<4 x float> %x) {
+; SSE2-LABEL: test_fmaximumnum_vector_signed_zero:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movaps {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; SSE2-NEXT: maxps %xmm0, %xmm1
+; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX-LABEL: test_fmaximumnum_vector_signed_zero:
+; AVX: # %bb.0:
+; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; AVX-NEXT: vmaxps %xmm0, %xmm1, %xmm0
+; AVX-NEXT: retq
+;
+; AVX10_2-LABEL: test_fmaximumnum_vector_signed_zero:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vminmaxps $17, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fmaximumnum_vector_signed_zero:
+; X86: # %bb.0:
+; X86-NEXT: vbroadcastss {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; X86-NEXT: vmaxps %xmm0, %xmm1, %xmm0
+; X86-NEXT: retl
+ %r = call <4 x float> @llvm.maximumnum.v4f32(<4 x float> %x, <4 x float> <float -0., float -0., float -0., float -0.>)
+ ret <4 x float> %r
+}
+
+define <2 x double> @test_fminimumnum_vector_partially_zero(<2 x double> %x) {
+; SSE2-LABEL: test_fminimumnum_vector_partially_zero:
+; SSE2: # %bb.0:
+; SSE2-NEXT: xorpd %xmm1, %xmm1
+; SSE2-NEXT: movhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
+; SSE2-NEXT: minpd %xmm0, %xmm1
+; SSE2-NEXT: movapd %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX-LABEL: test_fminimumnum_vector_partially_zero:
+; AVX: # %bb.0:
+; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
+; AVX-NEXT: vminpd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: retq
+;
+; AVX10_2-LABEL: test_fminimumnum_vector_partially_zero:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; AVX10_2-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
+; AVX10_2-NEXT: vminmaxpd $16, %xmm1, %xmm0, %xmm0
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fminimumnum_vector_partially_zero:
+; X86: # %bb.0:
+; X86-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; X86-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
+; X86-NEXT: vminpd %xmm0, %xmm1, %xmm0
+; X86-NEXT: retl
+ %r = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> %x, <2 x double> <double 0., double 5.>)
+ ret <2 x double> %r
+}
+
+define <2 x double> @test_fminimumnum_vector_different_zeros(<2 x double> %x) {
+; SSE2-LABEL: test_fminimumnum_vector_different_zeros:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movaps %xmm0, %xmm1
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[3,3]
+; SSE2-NEXT: xorps %xmm2, %xmm2
+; SSE2-NEXT: pxor %xmm3, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm3
+; SSE2-NEXT: movhps {{.*#+}} xmm2 = xmm2[0,1],mem[0,1]
+; SSE2-NEXT: movdqa %xmm3, %xmm1
+; SSE2-NEXT: pandn %xmm2, %xmm1
+; SSE2-NEXT: movaps %xmm0, %xmm4
+; SSE2-NEXT: andps %xmm3, %xmm4
+; SSE2-NEXT: orps %xmm1, %xmm4
+; SSE2-NEXT: pand %xmm0, %xmm2
+; SSE2-NEXT: pandn %xmm0, %xmm3
+; SSE2-NEXT: por %xmm2, %xmm3
+; SSE2-NEXT: movdqa %xmm3, %xmm1
+; SSE2-NEXT: minpd %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm3, %xmm0
+; SSE2-NEXT: cmpordpd %xmm3, %xmm0
+; SSE2-NEXT: andpd %xmm0, %xmm3
+; SSE2-NEXT: andnpd %xmm1, %xmm0
+; SSE2-NEXT: orpd %xmm3, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX-LABEL: test_fminimumnum_vector_different_zeros:
+; AVX: # %bb.0:
+; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
+; AVX-NEXT: vblendvpd %xmm0, %xmm0, %xmm1, %xmm2
+; AVX-NEXT: vblendvpd %xmm0, %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vminpd %xmm2, %xmm0, %xmm1
+; AVX-NEXT: vcmpordpd %xmm0, %xmm0, %xmm2
+; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX-NEXT: retq
+;
+; AVX10_2-LABEL: test_fminimumnum_vector_different_zeros:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; AVX10_2-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
+; AVX10_2-NEXT: vminmaxpd $16, %xmm1, %xmm0, %xmm0
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fminimumnum_vector_different_zeros:
+; X86: # %bb.0:
+; X86-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; X86-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
+; X86-NEXT: vblendvpd %xmm0, %xmm0, %xmm1, %xmm2
+; X86-NEXT: vblendvpd %xmm0, %xmm1, %xmm0, %xmm0
+; X86-NEXT: vminpd %xmm2, %xmm0, %xmm1
+; X86-NEXT: vcmpordpd %xmm0, %xmm0, %xmm2
+; X86-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; X86-NEXT: retl
+ %r = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> %x, <2 x double> <double 0., double -0.>)
+ ret <2 x double> %r
+}
+
+define <4 x float> @test_fmaximumnum_vector_non_zero(<4 x float> %x) {
+; SSE2-LABEL: test_fmaximumnum_vector_non_zero:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movaps {{.*#+}} xmm1 = [5.0E+0,4.0E+0,3.0E+0,2.0E+0]
+; SSE2-NEXT: maxps %xmm0, %xmm1
+; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX-LABEL: test_fmaximumnum_vector_non_zero:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovaps {{.*#+}} xmm1 = [5.0E+0,4.0E+0,3.0E+0,2.0E+0]
+; AVX-NEXT: vmaxps %xmm0, %xmm1, %xmm0
+; AVX-NEXT: retq
+;
+; AVX10_2-LABEL: test_fmaximumnum_vector_non_zero:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vminmaxps $17, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fmaximumnum_vector_non_zero:
+; X86: # %bb.0:
+; X86-NEXT: vmovaps {{.*#+}} xmm1 = [5.0E+0,4.0E+0,3.0E+0,2.0E+0]
+; X86-NEXT: vmaxps %xmm0, %xmm1, %xmm0
+; X86-NEXT: retl
+ %r = call <4 x float> @llvm.maximumnum.v4f32(<4 x float> %x, <4 x float> <float 5., float 4., float 3., float 2.>)
+ ret <4 x float> %r
+}
+
+define <2 x double> @test_fminimumnum_vector_nan(<2 x double> %x) {
+; SSE2-LABEL: test_fminimumnum_vector_nan:
+; SSE2: # %bb.0:
+; SSE2-NEXT: xorpd %xmm2, %xmm2
+; SSE2-NEXT: xorpd %xmm1, %xmm1
+; SSE2-NEXT: movhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
+; SSE2-NEXT: minpd %xmm0, %xmm1
+; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
+; SSE2-NEXT: movapd %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX-LABEL: test_fminimumnum_vector_nan:
+; AVX: # %bb.0:
+; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vmovhpd {{.*#+}} xmm2 = xmm1[0],mem[0]
+; AVX-NEXT: vminpd %xmm0, %xmm2, %xmm0
+; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; AVX-NEXT: retq
+;
+; AVX10_2-LABEL: test_fminimumnum_vector_nan:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; AVX10_2-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
+; AVX10_2-NEXT: vminmaxpd $16, %xmm1, %xmm0, %xmm0
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fminimumnum_vector_nan:
+; X86: # %bb.0:
+; X86-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; X86-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
+; X86-NEXT: vminpd %xmm0, %xmm1, %xmm0
+; X86-NEXT: vcmpordpd %xmm1, %xmm1, %xmm2
+; X86-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
+; X86-NEXT: retl
+ %r = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> %x, <2 x double> <double 0., double 0x7fff000000000000>)
+ ret <2 x double> %r
+}
+
+define <2 x double> @test_fminimumnum_vector_zero_first(<2 x double> %x) {
+; SSE2-LABEL: test_fminimumnum_vector_zero_first:
+; SSE2: # %bb.0:
+; SSE2-NEXT: xorpd %xmm1, %xmm1
+; SSE2-NEXT: minpd %xmm0, %xmm1
+; SSE2-NEXT: movapd %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX-LABEL: test_fminimumnum_vector_zero_first:
+; AVX: # %bb.0:
+; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vminpd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: retq
+;
+; AVX10_2-LABEL: test_fminimumnum_vector_zero_first:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; AVX10_2-NEXT: vminmaxpd $16, %xmm1, %xmm0, %xmm0
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fminimumnum_vector_zero_first:
+; X86: # %bb.0:
+; X86-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; X86-NEXT: vminpd %xmm0, %xmm1, %xmm0
+; X86-NEXT: retl
+ %r = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> <double 0., double 0.>, <2 x double> %x)
+ ret <2 x double> %r
+}
+
+define <2 x double> @test_fminimumnum_vector_signed_zero(<2 x double> %x) {
+; SSE2-LABEL: test_fminimumnum_vector_signed_zero:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movapd %xmm0, %xmm1
+; SSE2-NEXT: minpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-NEXT: movapd %xmm0, %xmm2
+; SSE2-NEXT: cmpordpd %xmm0, %xmm2
+; SSE2-NEXT: andpd %xmm2, %xmm0
+; SSE2-NEXT: andnpd %xmm1, %xmm2
+; SSE2-NEXT: orpd %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX-LABEL: test_fminimumnum_vector_signed_zero:
+; AVX: # %bb.0:
+; AVX-NEXT: vcmpordpd %xmm0, %xmm0, %xmm1
+; AVX-NEXT: vminpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2
+; AVX-NEXT: vblendvpd %xmm1, %xmm0, %xmm2, %xmm0
+; AVX-NEXT: retq
+;
+; AVX10_2-LABEL: test_fminimumnum_vector_signed_zero:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vminmaxpd $16, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fminimumnum_vector_signed_zero:
+; X86: # %bb.0:
+; X86-NEXT: vcmpordpd %xmm0, %xmm0, %xmm1
+; X86-NEXT: vminpd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm2
+; X86-NEXT: vblendvpd %xmm1, %xmm0, %xmm2, %xmm0
+; X86-NEXT: retl
+ %r = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> %x, <2 x double> <double -0., double -0.>)
+ ret <2 x double> %r
+}
+
+define <4 x float> @test_fmaximumnum_vector_signed_zero_first(<4 x float> %x) {
+; SSE2-LABEL: test_fmaximumnum_vector_signed_zero_first:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movaps {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; SSE2-NEXT: maxps %xmm0, %xmm1
+; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX-LABEL: test_fmaximumnum_vector_signed_zero_first:
+; AVX: # %bb.0:
+; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; AVX-NEXT: vmaxps %xmm0, %xmm1, %xmm0
+; AVX-NEXT: retq
+;
+; AVX10_2-LABEL: test_fmaximumnum_vector_signed_zero_first:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vminmaxps $17, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fmaximumnum_vector_signed_zero_first:
+; X86: # %bb.0:
+; X86-NEXT: vbroadcastss {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; X86-NEXT: vmaxps %xmm0, %xmm1, %xmm0
+; X86-NEXT: retl
+ %r = call <4 x float> @llvm.maximumnum.v4f32(<4 x float> <float -0., float -0., float -0., float -0.>, <4 x float> %x)
+ ret <4 x float> %r
+}
+
+define <4 x float> @test_fmaximumnum_vector_zero(<4 x float> %x) {
+; SSE2-LABEL: test_fmaximumnum_vector_zero:
+; SSE2: # %bb.0:
+; SSE2-NEXT: xorps %xmm1, %xmm1
+; SSE2-NEXT: movaps %xmm0, %xmm2
+; SSE2-NEXT: maxps %xmm1, %xmm2
+; SSE2-NEXT: movaps %xmm0, %xmm1
+; SSE2-NEXT: cmpordps %xmm0, %xmm1
+; SSE2-NEXT: andps %xmm1, %xmm0
+; SSE2-NEXT: andnps %xmm2, %xmm1
+; SSE2-NEXT: orps %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX-LABEL: test_fmaximumnum_vector_zero:
+; AVX: # %bb.0:
+; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vmaxps %xmm1, %xmm0, %xmm1
+; AVX-NEXT: vcmpordps %xmm0, %xmm0, %xmm2
+; AVX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; AVX-NEXT: retq
+;
+; AVX10_2-LABEL: test_fmaximumnum_vector_zero:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; AVX10_2-NEXT: vminmaxps $17, %xmm1, %xmm0, %xmm0
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fmaximumnum_vector_zero:
+; X86: # %bb.0:
+; X86-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; X86-NEXT: vmaxps %xmm1, %xmm0, %xmm1
+; X86-NEXT: vcmpordps %xmm0, %xmm0, %xmm2
+; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; X86-NEXT: retl
+ %r = call <4 x float> @llvm.maximumnum.v4f32(<4 x float> %x, <4 x float> <float 0., float 0., float 0., float 0.>)
+ ret <4 x float> %r
+}
+
+; PR77805: Check that signed zeroes are handled correctly in this case (FIXME)
+define <4 x float> @test_fmaximumnum_v4f32_splat(<4 x float> %x, float %y) {
+; SSE2-LABEL: test_fmaximumnum_v4f32_splat:
+; SSE2: # %bb.0:
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm0, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: pandn %xmm0, %xmm3
+; SSE2-NEXT: movaps %xmm1, %xmm4
+; SSE2-NEXT: andps %xmm2, %xmm4
+; SSE2-NEXT: orps %xmm3, %xmm4
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: andnps %xmm1, %xmm2
+; SSE2-NEXT: por %xmm2, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: maxps %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: cmpordps %xmm0, %xmm2
+; SSE2-NEXT: andps %xmm2, %xmm0
+; SSE2-NEXT: andnps %xmm1, %xmm2
+; SSE2-NEXT: orps %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX1-LABEL: test_fmaximumnum_v4f32_splat:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; AVX1-NEXT: vblendvps %xmm0, %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vblendvps %xmm0, %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vmaxps %xmm2, %xmm0, %xmm1
+; AVX1-NEXT: vcmpordps %xmm0, %xmm0, %xmm2
+; AVX1-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: test_fmaximumnum_v4f32_splat:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vbroadcastss %xmm1, %xmm1
+; AVX512-NEXT: vblendvps %xmm0, %xmm1, %xmm0, %xmm2
+; AVX512-NEXT: vblendvps %xmm0, %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: vmaxps %xmm2, %xmm0, %xmm1
+; AVX512-NEXT: vcmpordps %xmm0, %xmm0, %xmm2
+; AVX512-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: retq
+;
+; AVX10_2-LABEL: test_fmaximumnum_v4f32_splat:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vbroadcastss %xmm1, %xmm1
+; AVX10_2-NEXT: vminmaxps $17, %xmm1, %xmm0, %xmm0
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fmaximumnum_v4f32_splat:
+; X86: # %bb.0:
+; X86-NEXT: vbroadcastss {{[0-9]+}}(%esp), %xmm1
+; X86-NEXT: vblendvps %xmm0, %xmm1, %xmm0, %xmm2
+; X86-NEXT: vblendvps %xmm0, %xmm0, %xmm1, %xmm0
+; X86-NEXT: vmaxps %xmm2, %xmm0, %xmm1
+; X86-NEXT: vcmpordps %xmm0, %xmm0, %xmm2
+; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; X86-NEXT: retl
+ %splatinsert = insertelement <4 x float> poison, float %y, i64 0
+ %vec = shufflevector <4 x float> %splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
+ %r = call <4 x float> @llvm.maximumnum.v4f32(<4 x float> %x, <4 x float> %vec) readnone
+ ret <4 x float> %r
+}
+
+define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind {
+; SSE2-LABEL: test_fmaximumnum_v4f16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: subq $104, %rsp
+; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: psrld $16, %xmm0
+; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm1, (%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: psrld $16, %xmm0
+; SSE2-NEXT: callq __extendhfsf2 at PLT
+; SSE2-NEXT: movd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE2-NEXT: callq __extendhfsf2 at PLT
+; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 4-byte Folded Reload
+; SSE2-NEXT: # xmm4 = mem[0],zero,zero,zero
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: testl %eax, %eax
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: js .LBB33_2
+; SSE2-NEXT: # %bb.1:
+; SSE2-NEXT: movdqa %xmm4, %xmm2
+; SSE2-NEXT: .LBB33_2:
+; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: cmpordss %xmm2, %xmm0
+; SSE2-NEXT: movaps %xmm0, %xmm3
+; SSE2-NEXT: andps %xmm2, %xmm3
+; SSE2-NEXT: js .LBB33_4
+; SSE2-NEXT: # %bb.3:
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: .LBB33_4:
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
+; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
+; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: maxss %xmm4, %xmm2
+; SSE2-NEXT: andnps %xmm2, %xmm0
+; SSE2-NEXT: orps %xmm3, %xmm0
+; SSE2-NEXT: callq __truncsfhf2 at PLT
+; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
+; SSE2-NEXT: callq __extendhfsf2 at PLT
+; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE2-NEXT: callq __extendhfsf2 at PLT
+; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 4-byte Folded Reload
+; SSE2-NEXT: # xmm4 = mem[0],zero,zero,zero
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: testl %eax, %eax
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: js .LBB33_6
+; SSE2-NEXT: # %bb.5:
+; SSE2-NEXT: movdqa %xmm4, %xmm2
+; SSE2-NEXT: .LBB33_6:
+; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: cmpordss %xmm2, %xmm0
+; SSE2-NEXT: movaps %xmm0, %xmm3
+; SSE2-NEXT: andps %xmm2, %xmm3
+; SSE2-NEXT: js .LBB33_8
+; SSE2-NEXT: # %bb.7:
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: .LBB33_8:
+; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE2-NEXT: psrlq $48, %xmm1
+; SSE2-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movdqa (%rsp), %xmm1 # 16-byte Reload
+; SSE2-NEXT: psrlq $48, %xmm1
+; SSE2-NEXT: movdqa %xmm1, (%rsp) # 16-byte Spill
+; SSE2-NEXT: maxss %xmm4, %xmm2
+; SSE2-NEXT: andnps %xmm2, %xmm0
+; SSE2-NEXT: orps %xmm3, %xmm0
+; SSE2-NEXT: callq __truncsfhf2 at PLT
+; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE2-NEXT: callq __extendhfsf2 at PLT
+; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE2-NEXT: callq __extendhfsf2 at PLT
+; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 4-byte Folded Reload
+; SSE2-NEXT: # xmm4 = mem[0],zero,zero,zero
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: testl %eax, %eax
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: js .LBB33_10
+; SSE2-NEXT: # %bb.9:
+; SSE2-NEXT: movdqa %xmm4, %xmm2
+; SSE2-NEXT: .LBB33_10:
+; SSE2-NEXT: movdqa %xmm2, %xmm1
+; SSE2-NEXT: cmpordss %xmm2, %xmm1
+; SSE2-NEXT: movaps %xmm1, %xmm3
+; SSE2-NEXT: andps %xmm2, %xmm3
+; SSE2-NEXT: js .LBB33_12
+; SSE2-NEXT: # %bb.11:
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: .LBB33_12:
+; SSE2-NEXT: maxss %xmm4, %xmm2
+; SSE2-NEXT: andnps %xmm2, %xmm1
+; SSE2-NEXT: orps %xmm3, %xmm1
+; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: callq __truncsfhf2 at PLT
+; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
+; SSE2-NEXT: callq __extendhfsf2 at PLT
+; SSE2-NEXT: movss %xmm0, (%rsp) # 4-byte Spill
+; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE2-NEXT: callq __extendhfsf2 at PLT
+; SSE2-NEXT: movd (%rsp), %xmm4 # 4-byte Folded Reload
+; SSE2-NEXT: # xmm4 = mem[0],zero,zero,zero
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: testl %eax, %eax
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: js .LBB33_14
+; SSE2-NEXT: # %bb.13:
+; SSE2-NEXT: movdqa %xmm4, %xmm2
+; SSE2-NEXT: .LBB33_14:
+; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: cmpordss %xmm2, %xmm0
+; SSE2-NEXT: movaps %xmm0, %xmm3
+; SSE2-NEXT: andps %xmm2, %xmm3
+; SSE2-NEXT: js .LBB33_16
+; SSE2-NEXT: # %bb.15:
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: .LBB33_16:
+; SSE2-NEXT: maxss %xmm4, %xmm2
+; SSE2-NEXT: andnps %xmm2, %xmm0
+; SSE2-NEXT: orps %xmm3, %xmm0
+; SSE2-NEXT: callq __truncsfhf2 at PLT
+; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE2-NEXT: punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; SSE2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: addq $104, %rsp
+; SSE2-NEXT: retq
+;
+; AVX1-LABEL: test_fmaximumnum_v4f16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: subq $120, %rsp
+; AVX1-NEXT: vmovaps %xmm0, %xmm2
+; AVX1-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT: vmovshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT: vpsrld $16, %xmm2, %xmm0
+; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT: vmovaps %xmm1, (%rsp) # 16-byte Spill
+; AVX1-NEXT: vpsrld $16, %xmm1, %xmm0
+; AVX1-NEXT: callq __extendhfsf2 at PLT
+; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-NEXT: callq __extendhfsf2 at PLT
+; AVX1-NEXT: vmovd %xmm0, %eax
+; AVX1-NEXT: testl %eax, %eax
+; AVX1-NEXT: js .LBB33_1
+; AVX1-NEXT: # %bb.2:
+; AVX1-NEXT: vmovdqa %xmm0, %xmm1
+; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; AVX1-NEXT: jmp .LBB33_3
+; AVX1-NEXT: .LBB33_1:
+; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX1-NEXT: vmovdqa %xmm0, %xmm2
+; AVX1-NEXT: .LBB33_3:
+; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-NEXT: vpsrlq $48, %xmm0, %xmm0
+; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
+; AVX1-NEXT: vpsrlq $48, %xmm0, %xmm0
+; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT: vmaxss %xmm1, %xmm2, %xmm0
+; AVX1-NEXT: vcmpordss %xmm2, %xmm2, %xmm1
+; AVX1-NEXT: vblendvps %xmm1, %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: callq __truncsfhf2 at PLT
+; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
+; AVX1-NEXT: callq __extendhfsf2 at PLT
+; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-NEXT: callq __extendhfsf2 at PLT
+; AVX1-NEXT: vmovd %xmm0, %eax
+; AVX1-NEXT: testl %eax, %eax
+; AVX1-NEXT: js .LBB33_4
+; AVX1-NEXT: # %bb.5:
+; AVX1-NEXT: vmovdqa %xmm0, %xmm1
+; AVX1-NEXT: vmovdqa (%rsp), %xmm2 # 16-byte Reload
+; AVX1-NEXT: jmp .LBB33_6
+; AVX1-NEXT: .LBB33_4:
+; AVX1-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
+; AVX1-NEXT: vmovdqa %xmm0, %xmm2
+; AVX1-NEXT: .LBB33_6:
+; AVX1-NEXT: vmaxss %xmm1, %xmm2, %xmm0
+; AVX1-NEXT: vcmpordss %xmm2, %xmm2, %xmm1
+; AVX1-NEXT: vblendvps %xmm1, %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: callq __truncsfhf2 at PLT
+; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-NEXT: callq __extendhfsf2 at PLT
+; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-NEXT: callq __extendhfsf2 at PLT
+; AVX1-NEXT: vmovd %xmm0, %eax
+; AVX1-NEXT: testl %eax, %eax
+; AVX1-NEXT: js .LBB33_7
+; AVX1-NEXT: # %bb.8:
+; AVX1-NEXT: vmovdqa %xmm0, %xmm1
+; AVX1-NEXT: vmovdqa (%rsp), %xmm2 # 16-byte Reload
+; AVX1-NEXT: jmp .LBB33_9
+; AVX1-NEXT: .LBB33_7:
+; AVX1-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
+; AVX1-NEXT: vmovdqa %xmm0, %xmm2
+; AVX1-NEXT: .LBB33_9:
+; AVX1-NEXT: vmaxss %xmm1, %xmm2, %xmm0
+; AVX1-NEXT: vcmpordss %xmm2, %xmm2, %xmm1
+; AVX1-NEXT: vblendvps %xmm1, %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: callq __truncsfhf2 at PLT
+; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-NEXT: callq __extendhfsf2 at PLT
+; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-NEXT: callq __extendhfsf2 at PLT
+; AVX1-NEXT: vmovd %xmm0, %eax
+; AVX1-NEXT: testl %eax, %eax
+; AVX1-NEXT: js .LBB33_10
+; AVX1-NEXT: # %bb.11:
+; AVX1-NEXT: vmovdqa %xmm0, %xmm1
+; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; AVX1-NEXT: jmp .LBB33_12
+; AVX1-NEXT: .LBB33_10:
+; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX1-NEXT: vmovdqa %xmm0, %xmm2
+; AVX1-NEXT: .LBB33_12:
+; AVX1-NEXT: vmaxss %xmm1, %xmm2, %xmm0
+; AVX1-NEXT: vcmpordss %xmm2, %xmm2, %xmm1
+; AVX1-NEXT: vblendvps %xmm1, %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: callq __truncsfhf2 at PLT
+; AVX1-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
+; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX1-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; AVX1-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
+; AVX1-NEXT: addq $120, %rsp
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: test_fmaximumnum_v4f16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsrldq {{.*#+}} xmm2 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT: vcvtph2ps %xmm2, %xmm2
+; AVX512-NEXT: vmovd %xmm2, %eax
+; AVX512-NEXT: testl %eax, %eax
+; AVX512-NEXT: sets %al
+; AVX512-NEXT: kmovw %eax, %k1
+; AVX512-NEXT: vpsrldq {{.*#+}} xmm3 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT: vcvtph2ps %xmm3, %xmm3
+; AVX512-NEXT: vmovdqa %xmm2, %xmm4
+; AVX512-NEXT: vmovss %xmm3, %xmm4, %xmm4 {%k1}
+; AVX512-NEXT: vmovss %xmm2, %xmm3, %xmm3 {%k1}
+; AVX512-NEXT: vmaxss %xmm4, %xmm3, %xmm2
+; AVX512-NEXT: vcmpordss %xmm3, %xmm3, %k1
+; AVX512-NEXT: vmovss %xmm3, %xmm2, %xmm2 {%k1}
+; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm2
+; AVX512-NEXT: vmovd %xmm2, %eax
+; AVX512-NEXT: vpinsrw $0, %eax, %xmm0, %xmm2
+; AVX512-NEXT: vshufps {{.*#+}} xmm3 = xmm0[3,3,3,3]
+; AVX512-NEXT: vcvtph2ps %xmm3, %xmm3
+; AVX512-NEXT: vmovd %xmm3, %eax
+; AVX512-NEXT: testl %eax, %eax
+; AVX512-NEXT: sets %al
+; AVX512-NEXT: kmovw %eax, %k1
+; AVX512-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[3,3,3,3]
+; AVX512-NEXT: vcvtph2ps %xmm4, %xmm4
+; AVX512-NEXT: vmovdqa %xmm3, %xmm5
+; AVX512-NEXT: vmovss %xmm4, %xmm5, %xmm5 {%k1}
+; AVX512-NEXT: vmovss %xmm3, %xmm4, %xmm4 {%k1}
+; AVX512-NEXT: vmaxss %xmm5, %xmm4, %xmm3
+; AVX512-NEXT: vcmpordss %xmm4, %xmm4, %k1
+; AVX512-NEXT: vmovss %xmm4, %xmm3, %xmm3 {%k1}
+; AVX512-NEXT: vcvtps2ph $4, %xmm3, %xmm3
+; AVX512-NEXT: vmovd %xmm3, %eax
+; AVX512-NEXT: vpinsrw $0, %eax, %xmm0, %xmm3
+; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; AVX512-NEXT: vpsrldq {{.*#+}} xmm3 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT: vcvtph2ps %xmm3, %xmm3
+; AVX512-NEXT: vmovd %xmm3, %eax
+; AVX512-NEXT: testl %eax, %eax
+; AVX512-NEXT: sets %al
+; AVX512-NEXT: kmovw %eax, %k1
+; AVX512-NEXT: vpsrldq {{.*#+}} xmm4 = xmm1[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT: vcvtph2ps %xmm4, %xmm4
+; AVX512-NEXT: vmovdqa %xmm3, %xmm5
+; AVX512-NEXT: vmovss %xmm4, %xmm5, %xmm5 {%k1}
+; AVX512-NEXT: vmovss %xmm3, %xmm4, %xmm4 {%k1}
+; AVX512-NEXT: vmaxss %xmm5, %xmm4, %xmm3
+; AVX512-NEXT: vcmpordss %xmm4, %xmm4, %k1
+; AVX512-NEXT: vmovss %xmm4, %xmm3, %xmm3 {%k1}
+; AVX512-NEXT: vcvtps2ph $4, %xmm3, %xmm3
+; AVX512-NEXT: vmovd %xmm3, %eax
+; AVX512-NEXT: vpinsrw $0, %eax, %xmm0, %xmm3
+; AVX512-NEXT: vshufpd {{.*#+}} xmm4 = xmm0[1,0]
+; AVX512-NEXT: vcvtph2ps %xmm4, %xmm4
+; AVX512-NEXT: vmovd %xmm4, %eax
+; AVX512-NEXT: testl %eax, %eax
+; AVX512-NEXT: sets %al
+; AVX512-NEXT: kmovw %eax, %k1
+; AVX512-NEXT: vshufpd {{.*#+}} xmm5 = xmm1[1,0]
+; AVX512-NEXT: vcvtph2ps %xmm5, %xmm5
+; AVX512-NEXT: vmovdqa %xmm4, %xmm6
+; AVX512-NEXT: vmovss %xmm5, %xmm6, %xmm6 {%k1}
+; AVX512-NEXT: vmovss %xmm4, %xmm5, %xmm5 {%k1}
+; AVX512-NEXT: vmaxss %xmm6, %xmm5, %xmm4
+; AVX512-NEXT: vcmpordss %xmm5, %xmm5, %k1
+; AVX512-NEXT: vmovss %xmm5, %xmm4, %xmm4 {%k1}
+; AVX512-NEXT: vcvtps2ph $4, %xmm4, %xmm4
+; AVX512-NEXT: vmovd %xmm4, %eax
+; AVX512-NEXT: vpinsrw $0, %eax, %xmm0, %xmm4
+; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; AVX512-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; AVX512-NEXT: vpshuflw {{.*#+}} xmm3 = xmm0[3,3,3,3,4,5,6,7]
+; AVX512-NEXT: vcvtph2ps %xmm3, %xmm3
+; AVX512-NEXT: vmovd %xmm3, %eax
+; AVX512-NEXT: testl %eax, %eax
+; AVX512-NEXT: sets %al
+; AVX512-NEXT: kmovw %eax, %k1
+; AVX512-NEXT: vpshuflw {{.*#+}} xmm4 = xmm1[3,3,3,3,4,5,6,7]
+; AVX512-NEXT: vcvtph2ps %xmm4, %xmm4
+; AVX512-NEXT: vmovdqa %xmm3, %xmm5
+; AVX512-NEXT: vmovss %xmm4, %xmm5, %xmm5 {%k1}
+; AVX512-NEXT: vmovss %xmm3, %xmm4, %xmm4 {%k1}
+; AVX512-NEXT: vmaxss %xmm5, %xmm4, %xmm3
+; AVX512-NEXT: vcmpordss %xmm4, %xmm4, %k1
+; AVX512-NEXT: vmovss %xmm4, %xmm3, %xmm3 {%k1}
+; AVX512-NEXT: vcvtps2ph $4, %xmm3, %xmm3
+; AVX512-NEXT: vmovd %xmm3, %eax
+; AVX512-NEXT: vpinsrw $0, %eax, %xmm0, %xmm3
+; AVX512-NEXT: vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; AVX512-NEXT: vcvtph2ps %xmm4, %xmm4
+; AVX512-NEXT: vmovd %xmm4, %eax
+; AVX512-NEXT: testl %eax, %eax
+; AVX512-NEXT: sets %al
+; AVX512-NEXT: kmovw %eax, %k1
+; AVX512-NEXT: vmovshdup {{.*#+}} xmm5 = xmm1[1,1,3,3]
+; AVX512-NEXT: vcvtph2ps %xmm5, %xmm5
+; AVX512-NEXT: vmovdqa %xmm4, %xmm6
+; AVX512-NEXT: vmovss %xmm5, %xmm6, %xmm6 {%k1}
+; AVX512-NEXT: vmovss %xmm4, %xmm5, %xmm5 {%k1}
+; AVX512-NEXT: vmaxss %xmm6, %xmm5, %xmm4
+; AVX512-NEXT: vcmpordss %xmm5, %xmm5, %k1
+; AVX512-NEXT: vmovss %xmm5, %xmm4, %xmm4 {%k1}
+; AVX512-NEXT: vcvtps2ph $4, %xmm4, %xmm4
+; AVX512-NEXT: vmovd %xmm4, %eax
+; AVX512-NEXT: vpinsrw $0, %eax, %xmm0, %xmm4
+; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm4
+; AVX512-NEXT: vmovd %xmm4, %eax
+; AVX512-NEXT: testl %eax, %eax
+; AVX512-NEXT: sets %al
+; AVX512-NEXT: kmovw %eax, %k1
+; AVX512-NEXT: vcvtph2ps %xmm1, %xmm5
+; AVX512-NEXT: vmovdqa %xmm4, %xmm6
+; AVX512-NEXT: vmovss %xmm5, %xmm6, %xmm6 {%k1}
+; AVX512-NEXT: vmovss %xmm4, %xmm5, %xmm5 {%k1}
+; AVX512-NEXT: vmaxss %xmm6, %xmm5, %xmm4
+; AVX512-NEXT: vcmpordss %xmm5, %xmm5, %k1
+; AVX512-NEXT: vmovss %xmm5, %xmm4, %xmm4 {%k1}
+; AVX512-NEXT: vcvtps2ph $4, %xmm4, %xmm4
+; AVX512-NEXT: vmovd %xmm4, %eax
+; AVX512-NEXT: vpinsrw $0, %eax, %xmm0, %xmm4
+; AVX512-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: testl %eax, %eax
+; AVX512-NEXT: sets %al
+; AVX512-NEXT: kmovw %eax, %k1
+; AVX512-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[1,1,1,1,4,5,6,7]
+; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1
+; AVX512-NEXT: vmovdqa %xmm0, %xmm5
+; AVX512-NEXT: vmovss %xmm1, %xmm5, %xmm5 {%k1}
+; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1}
+; AVX512-NEXT: vmaxss %xmm5, %xmm1, %xmm0
+; AVX512-NEXT: vcmpordss %xmm1, %xmm1, %k1
+; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1}
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
+; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
+; AVX512-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX512-NEXT: retq
+;
+; AVX10_2-LABEL: test_fmaximumnum_v4f16:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vminmaxph $17, %xmm1, %xmm0, %xmm0
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fmaximumnum_v4f16:
+; X86: # %bb.0:
+; X86-NEXT: subl $164, %esp
+; X86-NEXT: vmovdqa %xmm0, %xmm2
+; X86-NEXT: vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-NEXT: vpsrlq $48, %xmm0, %xmm0
+; X86-NEXT: vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-NEXT: vmovshdup {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; X86-NEXT: vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-NEXT: vmovshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; X86-NEXT: vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-NEXT: vpsrlq $48, %xmm1, %xmm0
+; X86-NEXT: vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-NEXT: vpsrld $16, %xmm2, %xmm0
+; X86-NEXT: vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-NEXT: vpsrld $16, %xmm1, %xmm0
+; X86-NEXT: vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-NEXT: vpextrw $0, %xmm1, (%esp)
+; X86-NEXT: calll __extendhfsf2
+; X86-NEXT: fstpt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Spill
+; X86-NEXT: vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-NEXT: vpextrw $0, %xmm0, (%esp)
+; X86-NEXT: calll __extendhfsf2
+; X86-NEXT: fstpt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Spill
+; X86-NEXT: vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-NEXT: vpextrw $0, %xmm0, (%esp)
+; X86-NEXT: calll __extendhfsf2
+; X86-NEXT: vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-NEXT: vpextrw $0, %xmm0, (%esp)
+; X86-NEXT: fstps {{[0-9]+}}(%esp)
+; X86-NEXT: fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload
+; X86-NEXT: fstps {{[0-9]+}}(%esp)
+; X86-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X86-NEXT: vmovd %xmm2, %eax
+; X86-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: js .LBB33_1
+; X86-NEXT: # %bb.2:
+; X86-NEXT: vmovdqa %xmm2, %xmm1
+; X86-NEXT: jmp .LBB33_3
+; X86-NEXT: .LBB33_1:
+; X86-NEXT: vmovdqa %xmm0, %xmm1
+; X86-NEXT: vmovdqa %xmm2, %xmm0
+; X86-NEXT: .LBB33_3:
+; X86-NEXT: vmaxss %xmm1, %xmm0, %xmm1
+; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; X86-NEXT: vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-NEXT: calll __extendhfsf2
+; X86-NEXT: vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-NEXT: vmovss %xmm0, (%esp)
+; X86-NEXT: fstps {{[0-9]+}}(%esp)
+; X86-NEXT: fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload
+; X86-NEXT: fstps {{[0-9]+}}(%esp)
+; X86-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-NEXT: vmovd %xmm1, %eax
+; X86-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: js .LBB33_4
+; X86-NEXT: # %bb.5:
+; X86-NEXT: vmovdqa %xmm1, %xmm2
+; X86-NEXT: jmp .LBB33_6
+; X86-NEXT: .LBB33_4:
+; X86-NEXT: vmovdqa %xmm0, %xmm2
+; X86-NEXT: vmovdqa %xmm1, %xmm0
+; X86-NEXT: .LBB33_6:
+; X86-NEXT: vmaxss %xmm2, %xmm0, %xmm1
+; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; X86-NEXT: vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-NEXT: calll __truncsfhf2
+; X86-NEXT: vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-NEXT: vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-NEXT: vmovss %xmm0, (%esp)
+; X86-NEXT: calll __truncsfhf2
+; X86-NEXT: vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-NEXT: vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-NEXT: vpextrw $0, %xmm0, (%esp)
+; X86-NEXT: calll __extendhfsf2
+; X86-NEXT: fstpt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Spill
+; X86-NEXT: vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-NEXT: vpextrw $0, %xmm0, (%esp)
+; X86-NEXT: calll __extendhfsf2
+; X86-NEXT: fstpt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Spill
+; X86-NEXT: vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-NEXT: vpextrw $0, %xmm0, (%esp)
+; X86-NEXT: calll __extendhfsf2
+; X86-NEXT: vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-NEXT: vpextrw $0, %xmm0, (%esp)
+; X86-NEXT: fstps {{[0-9]+}}(%esp)
+; X86-NEXT: fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload
+; X86-NEXT: fstps {{[0-9]+}}(%esp)
+; X86-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-NEXT: vmovd %xmm1, %eax
+; X86-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: js .LBB33_7
+; X86-NEXT: # %bb.8:
+; X86-NEXT: vmovdqa %xmm1, %xmm2
+; X86-NEXT: jmp .LBB33_9
+; X86-NEXT: .LBB33_7:
+; X86-NEXT: vmovdqa %xmm0, %xmm2
+; X86-NEXT: vmovdqa %xmm1, %xmm0
+; X86-NEXT: .LBB33_9:
+; X86-NEXT: vmaxss %xmm2, %xmm0, %xmm1
+; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; X86-NEXT: vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-NEXT: calll __extendhfsf2
+; X86-NEXT: vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-NEXT: vmovss %xmm0, (%esp)
+; X86-NEXT: fstps {{[0-9]+}}(%esp)
+; X86-NEXT: fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload
+; X86-NEXT: fstps {{[0-9]+}}(%esp)
+; X86-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-NEXT: vmovd %xmm1, %eax
+; X86-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: js .LBB33_10
+; X86-NEXT: # %bb.11:
+; X86-NEXT: vmovdqa %xmm1, %xmm2
+; X86-NEXT: jmp .LBB33_12
+; X86-NEXT: .LBB33_10:
+; X86-NEXT: vmovdqa %xmm0, %xmm2
+; X86-NEXT: vmovdqa %xmm1, %xmm0
+; X86-NEXT: .LBB33_12:
+; X86-NEXT: vmaxss %xmm2, %xmm0, %xmm1
+; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; X86-NEXT: vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-NEXT: calll __truncsfhf2
+; X86-NEXT: vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-NEXT: vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-NEXT: vmovd %xmm0, (%esp)
+; X86-NEXT: calll __truncsfhf2
+; X86-NEXT: vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X86-NEXT: vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-NEXT: vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; X86-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
+; X86-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
+; X86-NEXT: addl $164, %esp
+; X86-NEXT: retl
+ %r = call <4 x half> @llvm.maximumnum.v4f16(<4 x half> %x, <4 x half> %y)
+ ret <4 x half> %r
+}
+
+define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) nounwind {
+; SSE2-LABEL: test_fmaximumnum_v4bf16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pushq %rbp
+; SSE2-NEXT: pushq %r15
+; SSE2-NEXT: pushq %r14
+; SSE2-NEXT: pushq %rbx
+; SSE2-NEXT: subq $56, %rsp
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: movdqa %xmm0, %xmm5
+; SSE2-NEXT: pextrw $0, %xmm1, %r14d
+; SSE2-NEXT: pextrw $0, %xmm0, %r15d
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: psrld $16, %xmm0
+; SSE2-NEXT: pextrw $0, %xmm0, %eax
+; SSE2-NEXT: movdqa %xmm5, %xmm0
+; SSE2-NEXT: psrld $16, %xmm0
+; SSE2-NEXT: pextrw $0, %xmm0, %ecx
+; SSE2-NEXT: shll $16, %ecx
+; SSE2-NEXT: movd %ecx, %xmm3
+; SSE2-NEXT: shll $16, %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: testl %ecx, %ecx
+; SSE2-NEXT: movdqa %xmm3, %xmm1
+; SSE2-NEXT: js .LBB34_2
+; SSE2-NEXT: # %bb.1:
+; SSE2-NEXT: movdqa %xmm2, %xmm1
+; SSE2-NEXT: .LBB34_2:
+; SSE2-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1,1,1]
+; SSE2-NEXT: movdqa %xmm5, (%rsp) # 16-byte Spill
+; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,1,1,1]
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: cmpordss %xmm1, %xmm0
+; SSE2-NEXT: movaps %xmm0, %xmm6
+; SSE2-NEXT: andps %xmm1, %xmm6
+; SSE2-NEXT: js .LBB34_4
+; SSE2-NEXT: # %bb.3:
+; SSE2-NEXT: movdqa %xmm3, %xmm2
+; SSE2-NEXT: .LBB34_4:
+; SSE2-NEXT: pextrw $0, %xmm4, %ebp
+; SSE2-NEXT: pextrw $0, %xmm5, %ebx
+; SSE2-NEXT: maxss %xmm2, %xmm1
+; SSE2-NEXT: andnps %xmm1, %xmm0
+; SSE2-NEXT: orps %xmm6, %xmm0
+; SSE2-NEXT: callq __truncsfbf2 at PLT
+; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: shll $16, %r15d
+; SSE2-NEXT: movd %r15d, %xmm3
+; SSE2-NEXT: shll $16, %r14d
+; SSE2-NEXT: movd %r14d, %xmm2
+; SSE2-NEXT: testl %r15d, %r15d
+; SSE2-NEXT: movdqa %xmm3, %xmm1
+; SSE2-NEXT: js .LBB34_6
+; SSE2-NEXT: # %bb.5:
+; SSE2-NEXT: movdqa %xmm2, %xmm1
+; SSE2-NEXT: .LBB34_6:
+; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; SSE2-NEXT: psrlq $48, %xmm5
+; SSE2-NEXT: movdqa (%rsp), %xmm6 # 16-byte Reload
+; SSE2-NEXT: psrlq $48, %xmm6
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: cmpordss %xmm1, %xmm0
+; SSE2-NEXT: movaps %xmm0, %xmm4
+; SSE2-NEXT: andps %xmm1, %xmm4
+; SSE2-NEXT: js .LBB34_8
+; SSE2-NEXT: # %bb.7:
+; SSE2-NEXT: movdqa %xmm3, %xmm2
+; SSE2-NEXT: .LBB34_8:
+; SSE2-NEXT: pextrw $0, %xmm5, %r15d
+; SSE2-NEXT: pextrw $0, %xmm6, %r14d
+; SSE2-NEXT: maxss %xmm2, %xmm1
+; SSE2-NEXT: andnps %xmm1, %xmm0
+; SSE2-NEXT: orps %xmm4, %xmm0
+; SSE2-NEXT: callq __truncsfbf2 at PLT
+; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: shll $16, %ebx
+; SSE2-NEXT: movd %ebx, %xmm1
+; SSE2-NEXT: shll $16, %ebp
+; SSE2-NEXT: movd %ebp, %xmm3
+; SSE2-NEXT: testl %ebx, %ebx
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: js .LBB34_10
+; SSE2-NEXT: # %bb.9:
+; SSE2-NEXT: movdqa %xmm3, %xmm2
+; SSE2-NEXT: .LBB34_10:
+; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: cmpordss %xmm2, %xmm0
+; SSE2-NEXT: movaps %xmm0, %xmm4
+; SSE2-NEXT: andps %xmm2, %xmm4
+; SSE2-NEXT: js .LBB34_12
+; SSE2-NEXT: # %bb.11:
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: .LBB34_12:
+; SSE2-NEXT: maxss %xmm3, %xmm2
+; SSE2-NEXT: andnps %xmm2, %xmm0
+; SSE2-NEXT: orps %xmm4, %xmm0
+; SSE2-NEXT: callq __truncsfbf2 at PLT
+; SSE2-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
+; SSE2-NEXT: shll $16, %r14d
+; SSE2-NEXT: movd %r14d, %xmm1
+; SSE2-NEXT: shll $16, %r15d
+; SSE2-NEXT: movd %r15d, %xmm3
+; SSE2-NEXT: testl %r14d, %r14d
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: js .LBB34_14
+; SSE2-NEXT: # %bb.13:
+; SSE2-NEXT: movdqa %xmm3, %xmm2
+; SSE2-NEXT: .LBB34_14:
+; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: cmpordss %xmm2, %xmm0
+; SSE2-NEXT: movaps %xmm0, %xmm4
+; SSE2-NEXT: andps %xmm2, %xmm4
+; SSE2-NEXT: js .LBB34_16
+; SSE2-NEXT: # %bb.15:
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: .LBB34_16:
+; SSE2-NEXT: maxss %xmm3, %xmm2
+; SSE2-NEXT: andnps %xmm2, %xmm0
+; SSE2-NEXT: orps %xmm4, %xmm0
+; SSE2-NEXT: callq __truncsfbf2 at PLT
+; SSE2-NEXT: movdqa (%rsp), %xmm1 # 16-byte Reload
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE2-NEXT: punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; SSE2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: addq $56, %rsp
+; SSE2-NEXT: popq %rbx
+; SSE2-NEXT: popq %r14
+; SSE2-NEXT: popq %r15
+; SSE2-NEXT: popq %rbp
+; SSE2-NEXT: retq
+;
+; AVX1-LABEL: test_fmaximumnum_v4bf16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: pushq %rbp
+; AVX1-NEXT: pushq %r15
+; AVX1-NEXT: pushq %r14
+; AVX1-NEXT: pushq %r13
+; AVX1-NEXT: pushq %r12
+; AVX1-NEXT: pushq %rbx
+; AVX1-NEXT: subq $56, %rsp
+; AVX1-NEXT: vpsrlq $48, %xmm0, %xmm2
+; AVX1-NEXT: vpsrlq $48, %xmm1, %xmm3
+; AVX1-NEXT: vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; AVX1-NEXT: vpextrw $0, %xmm4, %ebx
+; AVX1-NEXT: vmovshdup {{.*#+}} xmm4 = xmm1[1,1,3,3]
+; AVX1-NEXT: vpextrw $0, %xmm4, %ebp
+; AVX1-NEXT: vpextrw $0, %xmm0, %r12d
+; AVX1-NEXT: vpextrw $0, %xmm1, %r13d
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
+; AVX1-NEXT: vpextrw $0, %xmm0, %eax
+; AVX1-NEXT: vpsrld $16, %xmm1, %xmm0
+; AVX1-NEXT: vpextrw $0, %xmm0, %ecx
+; AVX1-NEXT: shll $16, %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm0
+; AVX1-NEXT: shll $16, %eax
+; AVX1-NEXT: vmovd %eax, %xmm4
+; AVX1-NEXT: js .LBB34_1
+; AVX1-NEXT: # %bb.2:
+; AVX1-NEXT: vmovdqa %xmm4, %xmm1
+; AVX1-NEXT: jmp .LBB34_3
+; AVX1-NEXT: .LBB34_1:
+; AVX1-NEXT: vmovdqa %xmm0, %xmm1
+; AVX1-NEXT: vmovdqa %xmm4, %xmm0
+; AVX1-NEXT: .LBB34_3:
+; AVX1-NEXT: vpextrw $0, %xmm2, %r14d
+; AVX1-NEXT: vpextrw $0, %xmm3, %r15d
+; AVX1-NEXT: vmaxss %xmm1, %xmm0, %xmm1
+; AVX1-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; AVX1-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: callq __truncsfbf2 at PLT
+; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT: shll $16, %r13d
+; AVX1-NEXT: vmovd %r13d, %xmm0
+; AVX1-NEXT: shll $16, %r12d
+; AVX1-NEXT: vmovd %r12d, %xmm2
+; AVX1-NEXT: js .LBB34_4
+; AVX1-NEXT: # %bb.5:
+; AVX1-NEXT: vmovdqa %xmm2, %xmm1
+; AVX1-NEXT: jmp .LBB34_6
+; AVX1-NEXT: .LBB34_4:
+; AVX1-NEXT: vmovdqa %xmm0, %xmm1
+; AVX1-NEXT: vmovdqa %xmm2, %xmm0
+; AVX1-NEXT: .LBB34_6:
+; AVX1-NEXT: vmaxss %xmm1, %xmm0, %xmm1
+; AVX1-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; AVX1-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: callq __truncsfbf2 at PLT
+; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT: shll $16, %ebp
+; AVX1-NEXT: vmovd %ebp, %xmm0
+; AVX1-NEXT: shll $16, %ebx
+; AVX1-NEXT: vmovd %ebx, %xmm2
+; AVX1-NEXT: js .LBB34_7
+; AVX1-NEXT: # %bb.8:
+; AVX1-NEXT: vmovdqa %xmm2, %xmm1
+; AVX1-NEXT: jmp .LBB34_9
+; AVX1-NEXT: .LBB34_7:
+; AVX1-NEXT: vmovdqa %xmm0, %xmm1
+; AVX1-NEXT: vmovdqa %xmm2, %xmm0
+; AVX1-NEXT: .LBB34_9:
+; AVX1-NEXT: vmaxss %xmm1, %xmm0, %xmm1
+; AVX1-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; AVX1-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: callq __truncsfbf2 at PLT
+; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX1-NEXT: shll $16, %r15d
+; AVX1-NEXT: vmovd %r15d, %xmm0
+; AVX1-NEXT: shll $16, %r14d
+; AVX1-NEXT: vmovd %r14d, %xmm2
+; AVX1-NEXT: js .LBB34_10
+; AVX1-NEXT: # %bb.11:
+; AVX1-NEXT: vmovdqa %xmm2, %xmm1
+; AVX1-NEXT: jmp .LBB34_12
+; AVX1-NEXT: .LBB34_10:
+; AVX1-NEXT: vmovdqa %xmm0, %xmm1
+; AVX1-NEXT: vmovdqa %xmm2, %xmm0
+; AVX1-NEXT: .LBB34_12:
+; AVX1-NEXT: vmaxss %xmm1, %xmm0, %xmm1
+; AVX1-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; AVX1-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: callq __truncsfbf2 at PLT
+; AVX1-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
+; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX1-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; AVX1-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
+; AVX1-NEXT: addq $56, %rsp
+; AVX1-NEXT: popq %rbx
+; AVX1-NEXT: popq %r12
+; AVX1-NEXT: popq %r13
+; AVX1-NEXT: popq %r14
+; AVX1-NEXT: popq %r15
+; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: test_fmaximumnum_v4bf16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: pushq %rbp
+; AVX512-NEXT: pushq %r15
+; AVX512-NEXT: pushq %r14
+; AVX512-NEXT: pushq %r13
+; AVX512-NEXT: pushq %r12
+; AVX512-NEXT: pushq %rbx
+; AVX512-NEXT: pushq %rax
+; AVX512-NEXT: vmovq %xmm1, %r13
+; AVX512-NEXT: movq %r13, %rbx
+; AVX512-NEXT: shrq $32, %rbx
+; AVX512-NEXT: vmovq %xmm0, %rbp
+; AVX512-NEXT: movq %rbp, %r14
+; AVX512-NEXT: shrq $32, %r14
+; AVX512-NEXT: movq %r13, %r15
+; AVX512-NEXT: shrq $48, %r15
+; AVX512-NEXT: movq %rbp, %r12
+; AVX512-NEXT: shrq $48, %r12
+; AVX512-NEXT: movl %ebp, %eax
+; AVX512-NEXT: andl $-65536, %eax # imm = 0xFFFF0000
+; AVX512-NEXT: sets %cl
+; AVX512-NEXT: kmovw %ecx, %k1
+; AVX512-NEXT: movl %r13d, %ecx
+; AVX512-NEXT: andl $-65536, %ecx # imm = 0xFFFF0000
+; AVX512-NEXT: vmovd %ecx, %xmm1
+; AVX512-NEXT: vmovd %eax, %xmm0
+; AVX512-NEXT: vmovdqa %xmm0, %xmm2
+; AVX512-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1}
+; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1}
+; AVX512-NEXT: vmaxss %xmm2, %xmm1, %xmm0
+; AVX512-NEXT: vcmpordss %xmm1, %xmm1, %k1
+; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1}
+; AVX512-NEXT: callq __truncsfbf2 at PLT
+; AVX512-NEXT: vpextrw $0, %xmm0, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: shll $16, %ebp
+; AVX512-NEXT: sets %al
+; AVX512-NEXT: kmovw %eax, %k1
+; AVX512-NEXT: shll $16, %r13d
+; AVX512-NEXT: vmovd %r13d, %xmm1
+; AVX512-NEXT: vmovd %ebp, %xmm0
+; AVX512-NEXT: vmovdqa %xmm0, %xmm2
+; AVX512-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1}
+; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1}
+; AVX512-NEXT: vmaxss %xmm2, %xmm1, %xmm0
+; AVX512-NEXT: vcmpordss %xmm1, %xmm1, %k1
+; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1}
+; AVX512-NEXT: callq __truncsfbf2 at PLT
+; AVX512-NEXT: vpextrw $0, %xmm0, (%rsp)
+; AVX512-NEXT: shll $16, %r12d
+; AVX512-NEXT: sets %al
+; AVX512-NEXT: kmovw %eax, %k1
+; AVX512-NEXT: shll $16, %r15d
+; AVX512-NEXT: vmovd %r15d, %xmm1
+; AVX512-NEXT: vmovd %r12d, %xmm0
+; AVX512-NEXT: vmovdqa %xmm0, %xmm2
+; AVX512-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1}
+; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1}
+; AVX512-NEXT: vmaxss %xmm2, %xmm1, %xmm0
+; AVX512-NEXT: vcmpordss %xmm1, %xmm1, %k1
+; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1}
+; AVX512-NEXT: callq __truncsfbf2 at PLT
+; AVX512-NEXT: vpextrw $0, %xmm0, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: shll $16, %r14d
+; AVX512-NEXT: sets %al
+; AVX512-NEXT: kmovw %eax, %k1
+; AVX512-NEXT: shll $16, %ebx
+; AVX512-NEXT: vmovd %ebx, %xmm1
+; AVX512-NEXT: vmovd %r14d, %xmm0
+; AVX512-NEXT: vmovdqa %xmm0, %xmm2
+; AVX512-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1}
+; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1}
+; AVX512-NEXT: vmaxss %xmm2, %xmm1, %xmm0
+; AVX512-NEXT: vcmpordss %xmm1, %xmm1, %k1
+; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1}
+; AVX512-NEXT: callq __truncsfbf2 at PLT
+; AVX512-NEXT: vpextrw $0, %xmm0, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: vmovaps (%rsp), %xmm0
+; AVX512-NEXT: addq $8, %rsp
+; AVX512-NEXT: popq %rbx
+; AVX512-NEXT: popq %r12
+; AVX512-NEXT: popq %r13
+; AVX512-NEXT: popq %r14
+; AVX512-NEXT: popq %r15
+; AVX512-NEXT: popq %rbp
+; AVX512-NEXT: retq
+;
+; AVX10_2-LABEL: test_fmaximumnum_v4bf16:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vminmaxnepbf16 $17, %xmm1, %xmm0, %xmm0
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fmaximumnum_v4bf16:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: pushl %ebx
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $68, %esp
+; X86-NEXT: vpsrlq $48, %xmm0, %xmm2
+; X86-NEXT: vpsrlq $48, %xmm1, %xmm3
+; X86-NEXT: vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; X86-NEXT: vpextrw $0, %xmm4, %esi
+; X86-NEXT: vmovshdup {{.*#+}} xmm4 = xmm1[1,1,3,3]
+; X86-NEXT: vpextrw $0, %xmm4, %ebx
+; X86-NEXT: vpextrw $0, %xmm0, %eax
+; X86-NEXT: vpextrw $0, %xmm1, %ecx
+; X86-NEXT: vpsrld $16, %xmm0, %xmm0
+; X86-NEXT: vpextrw $0, %xmm0, %edx
+; X86-NEXT: vpsrld $16, %xmm1, %xmm0
+; X86-NEXT: vpextrw $0, %xmm0, %edi
+; X86-NEXT: shll $16, %edi
+; X86-NEXT: vmovd %edi, %xmm0
+; X86-NEXT: shll $16, %edx
+; X86-NEXT: vmovd %edx, %xmm4
+; X86-NEXT: js .LBB34_1
+; X86-NEXT: # %bb.2:
+; X86-NEXT: vmovdqa %xmm4, %xmm1
+; X86-NEXT: jmp .LBB34_3
+; X86-NEXT: .LBB34_1:
+; X86-NEXT: vmovdqa %xmm0, %xmm1
+; X86-NEXT: vmovdqa %xmm4, %xmm0
+; X86-NEXT: .LBB34_3:
+; X86-NEXT: vpextrw $0, %xmm2, %edi
+; X86-NEXT: vpextrw $0, %xmm3, %ebp
+; X86-NEXT: vmaxss %xmm1, %xmm0, %xmm1
+; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; X86-NEXT: vmovss %xmm0, (%esp)
+; X86-NEXT: shll $16, %ecx
+; X86-NEXT: vmovd %ecx, %xmm0
+; X86-NEXT: shll $16, %eax
+; X86-NEXT: vmovd %eax, %xmm2
+; X86-NEXT: js .LBB34_4
+; X86-NEXT: # %bb.5:
+; X86-NEXT: vmovdqa %xmm2, %xmm1
+; X86-NEXT: jmp .LBB34_6
+; X86-NEXT: .LBB34_4:
+; X86-NEXT: vmovdqa %xmm0, %xmm1
+; X86-NEXT: vmovdqa %xmm2, %xmm0
+; X86-NEXT: .LBB34_6:
+; X86-NEXT: vmaxss %xmm1, %xmm0, %xmm1
+; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; X86-NEXT: vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-NEXT: calll __truncsfbf2
+; X86-NEXT: vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-NEXT: vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-NEXT: vmovss %xmm0, (%esp)
+; X86-NEXT: shll $16, %ebx
+; X86-NEXT: vmovd %ebx, %xmm0
+; X86-NEXT: shll $16, %esi
+; X86-NEXT: vmovd %esi, %xmm2
+; X86-NEXT: js .LBB34_7
+; X86-NEXT: # %bb.8:
+; X86-NEXT: vmovdqa %xmm2, %xmm1
+; X86-NEXT: jmp .LBB34_9
+; X86-NEXT: .LBB34_7:
+; X86-NEXT: vmovdqa %xmm0, %xmm1
+; X86-NEXT: vmovdqa %xmm2, %xmm0
+; X86-NEXT: .LBB34_9:
+; X86-NEXT: vmaxss %xmm1, %xmm0, %xmm1
+; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; X86-NEXT: vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-NEXT: calll __truncsfbf2
+; X86-NEXT: vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-NEXT: vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-NEXT: vmovss %xmm0, (%esp)
+; X86-NEXT: shll $16, %ebp
+; X86-NEXT: vmovd %ebp, %xmm0
+; X86-NEXT: shll $16, %edi
+; X86-NEXT: vmovd %edi, %xmm2
+; X86-NEXT: js .LBB34_10
+; X86-NEXT: # %bb.11:
+; X86-NEXT: vmovdqa %xmm2, %xmm1
+; X86-NEXT: jmp .LBB34_12
+; X86-NEXT: .LBB34_10:
+; X86-NEXT: vmovdqa %xmm0, %xmm1
+; X86-NEXT: vmovdqa %xmm2, %xmm0
+; X86-NEXT: .LBB34_12:
+; X86-NEXT: vmaxss %xmm1, %xmm0, %xmm1
+; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; X86-NEXT: vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-NEXT: calll __truncsfbf2
+; X86-NEXT: vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-NEXT: vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-NEXT: vmovd %xmm0, (%esp)
+; X86-NEXT: calll __truncsfbf2
+; X86-NEXT: vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X86-NEXT: vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-NEXT: vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; X86-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
+; X86-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
+; X86-NEXT: addl $68, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: popl %ebx
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+ %r = call <4 x bfloat> @llvm.maximumnum.v4bf16(<4 x bfloat> %x, <4 x bfloat> %y)
+ ret <4 x bfloat> %r
+}
>From 3760fc61e859dd8309beb3d6ac0a77a5377f6ce0 Mon Sep 17 00:00:00 2001
From: "Wang, Phoebe" <phoebe.wang at intel.com>
Date: Thu, 2 Jan 2025 21:13:12 +0800
Subject: [PATCH 2/2] Regenerate AArch64 and AMDGPU tests
---
.../AArch64/fp-maximumnum-minimumnum.ll | 1632 +++++++++++++----
llvm/test/CodeGen/AMDGPU/maximumnum.ll | 752 +++++---
llvm/test/CodeGen/AMDGPU/minimumnum.ll | 752 +++++---
3 files changed, 2270 insertions(+), 866 deletions(-)
diff --git a/llvm/test/CodeGen/AArch64/fp-maximumnum-minimumnum.ll b/llvm/test/CodeGen/AArch64/fp-maximumnum-minimumnum.ll
index bb3f9a3e52a16b..2f75cc5afd8cf9 100644
--- a/llvm/test/CodeGen/AArch64/fp-maximumnum-minimumnum.ll
+++ b/llvm/test/CodeGen/AArch64/fp-maximumnum-minimumnum.ll
@@ -15,7 +15,11 @@ entry:
define <2 x double> @max_nnan_v2f64(<2 x double> %a, <2 x double> %b) {
; AARCH64-LABEL: max_nnan_v2f64:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fmaxnm v0.2d, v0.2d, v1.2d
+; AARCH64-NEXT: mov d2, v1.d[1]
+; AARCH64-NEXT: mov d3, v0.d[1]
+; AARCH64-NEXT: fmaxnm d0, d0, d1
+; AARCH64-NEXT: fmaxnm d2, d3, d2
+; AARCH64-NEXT: mov v0.d[1], v2.d[0]
; AARCH64-NEXT: ret
entry:
%c = call nnan <2 x double> @llvm.maximumnum.v2f64(<2 x double> %a, <2 x double> %b)
@@ -25,20 +29,9 @@ entry:
define <3 x double> @max_nnan_v3f64(<3 x double> %a, <3 x double> %b) {
; AARCH64-LABEL: max_nnan_v3f64:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: // kill: def $d3 killed $d3 def $q3
-; AARCH64-NEXT: // kill: def $d0 killed $d0 def $q0
-; AARCH64-NEXT: // kill: def $d4 killed $d4 def $q4
-; AARCH64-NEXT: // kill: def $d1 killed $d1 def $q1
-; AARCH64-NEXT: // kill: def $d2 killed $d2 def $q2
-; AARCH64-NEXT: // kill: def $d5 killed $d5 def $q5
-; AARCH64-NEXT: mov v3.d[1], v4.d[0]
-; AARCH64-NEXT: mov v0.d[1], v1.d[0]
-; AARCH64-NEXT: fmaxnm v2.2d, v2.2d, v5.2d
-; AARCH64-NEXT: // kill: def $d2 killed $d2 killed $q2
-; AARCH64-NEXT: fmaxnm v0.2d, v0.2d, v3.2d
-; AARCH64-NEXT: ext v1.16b, v0.16b, v0.16b, #8
-; AARCH64-NEXT: // kill: def $d0 killed $d0 killed $q0
-; AARCH64-NEXT: // kill: def $d1 killed $d1 killed $q1
+; AARCH64-NEXT: fmaxnm d0, d0, d3
+; AARCH64-NEXT: fmaxnm d1, d1, d4
+; AARCH64-NEXT: fmaxnm d2, d2, d5
; AARCH64-NEXT: ret
entry:
%c = call nnan <3 x double> @llvm.maximumnum.v3f64(<3 x double> %a, <3 x double> %b)
@@ -48,8 +41,16 @@ entry:
define <4 x double> @max_nnan_v4f64(<4 x double> %a, <4 x double> %b) {
; AARCH64-LABEL: max_nnan_v4f64:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fmaxnm v1.2d, v1.2d, v3.2d
-; AARCH64-NEXT: fmaxnm v0.2d, v0.2d, v2.2d
+; AARCH64-NEXT: mov d4, v2.d[1]
+; AARCH64-NEXT: mov d5, v0.d[1]
+; AARCH64-NEXT: mov d6, v3.d[1]
+; AARCH64-NEXT: mov d7, v1.d[1]
+; AARCH64-NEXT: fmaxnm d0, d0, d2
+; AARCH64-NEXT: fmaxnm d1, d1, d3
+; AARCH64-NEXT: fmaxnm d4, d5, d4
+; AARCH64-NEXT: fmaxnm d2, d7, d6
+; AARCH64-NEXT: mov v0.d[1], v4.d[0]
+; AARCH64-NEXT: mov v1.d[1], v2.d[0]
; AARCH64-NEXT: ret
entry:
%c = call nnan <4 x double> @llvm.maximumnum.v4f64(<4 x double> %a, <4 x double> %b)
@@ -70,7 +71,14 @@ entry:
define <2 x float> @max_nnan_v2f32(<2 x float> %a, <2 x float> %b) {
; AARCH64-LABEL: max_nnan_v2f32:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fmaxnm v0.2s, v0.2s, v1.2s
+; AARCH64-NEXT: // kill: def $d1 killed $d1 def $q1
+; AARCH64-NEXT: // kill: def $d0 killed $d0 def $q0
+; AARCH64-NEXT: mov s2, v1.s[1]
+; AARCH64-NEXT: mov s3, v0.s[1]
+; AARCH64-NEXT: fmaxnm s0, s0, s1
+; AARCH64-NEXT: fmaxnm s2, s3, s2
+; AARCH64-NEXT: mov v0.s[1], v2.s[0]
+; AARCH64-NEXT: // kill: def $d0 killed $d0 killed $q0
; AARCH64-NEXT: ret
entry:
%c = call nnan <2 x float> @llvm.maximumnum.v2f32(<2 x float> %a, <2 x float> %b)
@@ -80,7 +88,20 @@ entry:
define <3 x float> @max_nnan_v3f32(<3 x float> %a, <3 x float> %b) {
; AARCH64-LABEL: max_nnan_v3f32:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fmaxnm v0.4s, v0.4s, v1.4s
+; AARCH64-NEXT: mov s3, v1.s[1]
+; AARCH64-NEXT: mov s4, v0.s[1]
+; AARCH64-NEXT: fmaxnm s2, s0, s1
+; AARCH64-NEXT: mov s5, v0.s[2]
+; AARCH64-NEXT: mov s0, v0.s[3]
+; AARCH64-NEXT: fmaxnm s3, s4, s3
+; AARCH64-NEXT: mov s4, v1.s[2]
+; AARCH64-NEXT: mov s1, v1.s[3]
+; AARCH64-NEXT: mov v2.s[1], v3.s[0]
+; AARCH64-NEXT: fmaxnm s3, s5, s4
+; AARCH64-NEXT: fmaxnm s0, s0, s1
+; AARCH64-NEXT: mov v2.s[2], v3.s[0]
+; AARCH64-NEXT: mov v2.s[3], v0.s[0]
+; AARCH64-NEXT: mov v0.16b, v2.16b
; AARCH64-NEXT: ret
entry:
%c = call nnan <3 x float> @llvm.maximumnum.v3f32(<3 x float> %a, <3 x float> %b)
@@ -90,7 +111,20 @@ entry:
define <4 x float> @max_nnan_v4f32(<4 x float> %a, <4 x float> %b) {
; AARCH64-LABEL: max_nnan_v4f32:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fmaxnm v0.4s, v0.4s, v1.4s
+; AARCH64-NEXT: mov s3, v1.s[1]
+; AARCH64-NEXT: mov s4, v0.s[1]
+; AARCH64-NEXT: fmaxnm s2, s0, s1
+; AARCH64-NEXT: mov s5, v0.s[2]
+; AARCH64-NEXT: mov s0, v0.s[3]
+; AARCH64-NEXT: fmaxnm s3, s4, s3
+; AARCH64-NEXT: mov s4, v1.s[2]
+; AARCH64-NEXT: mov s1, v1.s[3]
+; AARCH64-NEXT: mov v2.s[1], v3.s[0]
+; AARCH64-NEXT: fmaxnm s3, s5, s4
+; AARCH64-NEXT: fmaxnm s0, s0, s1
+; AARCH64-NEXT: mov v2.s[2], v3.s[0]
+; AARCH64-NEXT: mov v2.s[3], v0.s[0]
+; AARCH64-NEXT: mov v0.16b, v2.16b
; AARCH64-NEXT: ret
entry:
%c = call nnan <4 x float> @llvm.maximumnum.v4f32(<4 x float> %a, <4 x float> %b)
@@ -100,29 +134,13 @@ entry:
define <5 x float> @max_nnan_v5f32(<5 x float> %a, <5 x float> %b) {
; AARCH64-LABEL: max_nnan_v5f32:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: // kill: def $s0 killed $s0 def $q0
-; AARCH64-NEXT: // kill: def $s5 killed $s5 def $q5
-; AARCH64-NEXT: // kill: def $s1 killed $s1 def $q1
-; AARCH64-NEXT: // kill: def $s6 killed $s6 def $q6
-; AARCH64-NEXT: // kill: def $s2 killed $s2 def $q2
-; AARCH64-NEXT: // kill: def $s7 killed $s7 def $q7
-; AARCH64-NEXT: // kill: def $s3 killed $s3 def $q3
-; AARCH64-NEXT: mov x8, sp
-; AARCH64-NEXT: // kill: def $s4 killed $s4 def $q4
-; AARCH64-NEXT: mov v0.s[1], v1.s[0]
-; AARCH64-NEXT: mov v5.s[1], v6.s[0]
-; AARCH64-NEXT: mov v0.s[2], v2.s[0]
-; AARCH64-NEXT: mov v5.s[2], v7.s[0]
-; AARCH64-NEXT: ldr s2, [sp, #8]
-; AARCH64-NEXT: fmaxnm v4.4s, v4.4s, v2.4s
-; AARCH64-NEXT: // kill: def $s4 killed $s4 killed $q4
-; AARCH64-NEXT: mov v0.s[3], v3.s[0]
-; AARCH64-NEXT: ld1 { v5.s }[3], [x8]
-; AARCH64-NEXT: fmaxnm v0.4s, v0.4s, v5.4s
-; AARCH64-NEXT: mov s1, v0.s[1]
-; AARCH64-NEXT: mov s2, v0.s[2]
-; AARCH64-NEXT: mov s3, v0.s[3]
-; AARCH64-NEXT: // kill: def $s0 killed $s0 killed $q0
+; AARCH64-NEXT: ldr s16, [sp, #8]
+; AARCH64-NEXT: ldr s17, [sp]
+; AARCH64-NEXT: fmaxnm s0, s0, s5
+; AARCH64-NEXT: fmaxnm s1, s1, s6
+; AARCH64-NEXT: fmaxnm s2, s2, s7
+; AARCH64-NEXT: fmaxnm s3, s3, s17
+; AARCH64-NEXT: fmaxnm s4, s4, s16
; AARCH64-NEXT: ret
entry:
%c = call nnan <5 x float> @llvm.maximumnum.v5f32(<5 x float> %a, <5 x float> %b)
@@ -132,8 +150,34 @@ entry:
define <8 x float> @max_nnan_v8f32(<8 x float> %a, <8 x float> %b) {
; AARCH64-LABEL: max_nnan_v8f32:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fmaxnm v1.4s, v1.4s, v3.4s
-; AARCH64-NEXT: fmaxnm v0.4s, v0.4s, v2.4s
+; AARCH64-NEXT: mov s4, v2.s[1]
+; AARCH64-NEXT: mov s5, v0.s[1]
+; AARCH64-NEXT: mov s6, v3.s[1]
+; AARCH64-NEXT: mov s7, v1.s[1]
+; AARCH64-NEXT: mov s16, v2.s[2]
+; AARCH64-NEXT: mov s17, v0.s[2]
+; AARCH64-NEXT: mov s18, v3.s[2]
+; AARCH64-NEXT: mov s19, v1.s[2]
+; AARCH64-NEXT: fmaxnm s20, s5, s4
+; AARCH64-NEXT: fmaxnm s4, s0, s2
+; AARCH64-NEXT: fmaxnm s5, s1, s3
+; AARCH64-NEXT: fmaxnm s6, s7, s6
+; AARCH64-NEXT: fmaxnm s7, s17, s16
+; AARCH64-NEXT: mov s2, v2.s[3]
+; AARCH64-NEXT: fmaxnm s16, s19, s18
+; AARCH64-NEXT: mov s0, v0.s[3]
+; AARCH64-NEXT: mov s3, v3.s[3]
+; AARCH64-NEXT: mov s1, v1.s[3]
+; AARCH64-NEXT: mov v4.s[1], v20.s[0]
+; AARCH64-NEXT: mov v5.s[1], v6.s[0]
+; AARCH64-NEXT: fmaxnm s0, s0, s2
+; AARCH64-NEXT: fmaxnm s1, s1, s3
+; AARCH64-NEXT: mov v4.s[2], v7.s[0]
+; AARCH64-NEXT: mov v5.s[2], v16.s[0]
+; AARCH64-NEXT: mov v4.s[3], v0.s[0]
+; AARCH64-NEXT: mov v5.s[3], v1.s[0]
+; AARCH64-NEXT: mov v0.16b, v4.16b
+; AARCH64-NEXT: mov v1.16b, v5.16b
; AARCH64-NEXT: ret
entry:
%c = call nnan <8 x float> @llvm.maximumnum.v8f32(<8 x float> %a, <8 x float> %b)
@@ -154,7 +198,22 @@ entry:
define <2 x half> @max_nnan_v2f16(<2 x half> %a, <2 x half> %b) {
; AARCH64-LABEL: max_nnan_v2f16:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fmaxnm v0.4h, v0.4h, v1.4h
+; AARCH64-NEXT: // kill: def $d1 killed $d1 def $q1
+; AARCH64-NEXT: // kill: def $d0 killed $d0 def $q0
+; AARCH64-NEXT: mov h3, v1.h[1]
+; AARCH64-NEXT: mov h4, v0.h[1]
+; AARCH64-NEXT: fmaxnm h2, h0, h1
+; AARCH64-NEXT: mov h5, v0.h[2]
+; AARCH64-NEXT: mov h0, v0.h[3]
+; AARCH64-NEXT: fmaxnm h3, h4, h3
+; AARCH64-NEXT: mov h4, v1.h[2]
+; AARCH64-NEXT: mov h1, v1.h[3]
+; AARCH64-NEXT: mov v2.h[1], v3.h[0]
+; AARCH64-NEXT: fmaxnm h3, h5, h4
+; AARCH64-NEXT: fmaxnm h0, h0, h1
+; AARCH64-NEXT: mov v2.h[2], v3.h[0]
+; AARCH64-NEXT: mov v2.h[3], v0.h[0]
+; AARCH64-NEXT: fmov d0, d2
; AARCH64-NEXT: ret
entry:
%c = call nnan <2 x half> @llvm.maximumnum.v2f16(<2 x half> %a, <2 x half> %b)
@@ -164,7 +223,22 @@ entry:
define <4 x half> @max_nnan_v4f16(<4 x half> %a, <4 x half> %b) {
; AARCH64-LABEL: max_nnan_v4f16:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fmaxnm v0.4h, v0.4h, v1.4h
+; AARCH64-NEXT: // kill: def $d1 killed $d1 def $q1
+; AARCH64-NEXT: // kill: def $d0 killed $d0 def $q0
+; AARCH64-NEXT: mov h3, v1.h[1]
+; AARCH64-NEXT: mov h4, v0.h[1]
+; AARCH64-NEXT: fmaxnm h2, h0, h1
+; AARCH64-NEXT: mov h5, v0.h[2]
+; AARCH64-NEXT: mov h0, v0.h[3]
+; AARCH64-NEXT: fmaxnm h3, h4, h3
+; AARCH64-NEXT: mov h4, v1.h[2]
+; AARCH64-NEXT: mov h1, v1.h[3]
+; AARCH64-NEXT: mov v2.h[1], v3.h[0]
+; AARCH64-NEXT: fmaxnm h3, h5, h4
+; AARCH64-NEXT: fmaxnm h0, h0, h1
+; AARCH64-NEXT: mov v2.h[2], v3.h[0]
+; AARCH64-NEXT: mov v2.h[3], v0.h[0]
+; AARCH64-NEXT: fmov d0, d2
; AARCH64-NEXT: ret
entry:
%c = call nnan <4 x half> @llvm.maximumnum.v4f16(<4 x half> %a, <4 x half> %b)
@@ -174,7 +248,36 @@ entry:
define <8 x half> @max_nnan_v8f16(<8 x half> %a, <8 x half> %b) {
; AARCH64-LABEL: max_nnan_v8f16:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fmaxnm v0.8h, v0.8h, v1.8h
+; AARCH64-NEXT: mov h3, v1.h[1]
+; AARCH64-NEXT: mov h4, v0.h[1]
+; AARCH64-NEXT: fmaxnm h2, h0, h1
+; AARCH64-NEXT: mov h5, v1.h[2]
+; AARCH64-NEXT: mov h6, v0.h[2]
+; AARCH64-NEXT: mov h7, v0.h[3]
+; AARCH64-NEXT: fmaxnm h3, h4, h3
+; AARCH64-NEXT: mov h4, v1.h[3]
+; AARCH64-NEXT: fmaxnm h5, h6, h5
+; AARCH64-NEXT: mov h6, v0.h[4]
+; AARCH64-NEXT: mov v2.h[1], v3.h[0]
+; AARCH64-NEXT: mov h3, v1.h[4]
+; AARCH64-NEXT: fmaxnm h4, h7, h4
+; AARCH64-NEXT: mov v2.h[2], v5.h[0]
+; AARCH64-NEXT: fmaxnm h3, h6, h3
+; AARCH64-NEXT: mov h5, v1.h[5]
+; AARCH64-NEXT: mov v2.h[3], v4.h[0]
+; AARCH64-NEXT: mov h4, v0.h[5]
+; AARCH64-NEXT: fmaxnm h4, h4, h5
+; AARCH64-NEXT: mov v2.h[4], v3.h[0]
+; AARCH64-NEXT: mov h3, v1.h[6]
+; AARCH64-NEXT: mov h5, v0.h[6]
+; AARCH64-NEXT: mov h1, v1.h[7]
+; AARCH64-NEXT: mov h0, v0.h[7]
+; AARCH64-NEXT: mov v2.h[5], v4.h[0]
+; AARCH64-NEXT: fmaxnm h3, h5, h3
+; AARCH64-NEXT: fmaxnm h0, h0, h1
+; AARCH64-NEXT: mov v2.h[6], v3.h[0]
+; AARCH64-NEXT: mov v2.h[7], v0.h[0]
+; AARCH64-NEXT: mov v0.16b, v2.16b
; AARCH64-NEXT: ret
entry:
%c = call nnan <8 x half> @llvm.maximumnum.v8f16(<8 x half> %a, <8 x half> %b)
@@ -184,41 +287,33 @@ entry:
define <9 x half> @max_nnan_v9f16(<9 x half> %a, <9 x half> %b) {
; AARCH64-LABEL: max_nnan_v9f16:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: // kill: def $h0 killed $h0 def $q0
-; AARCH64-NEXT: // kill: def $h1 killed $h1 def $q1
-; AARCH64-NEXT: // kill: def $h2 killed $h2 def $q2
-; AARCH64-NEXT: add x9, sp, #16
-; AARCH64-NEXT: // kill: def $h3 killed $h3 def $q3
-; AARCH64-NEXT: // kill: def $h4 killed $h4 def $q4
-; AARCH64-NEXT: // kill: def $h5 killed $h5 def $q5
-; AARCH64-NEXT: // kill: def $h6 killed $h6 def $q6
-; AARCH64-NEXT: // kill: def $h7 killed $h7 def $q7
+; AARCH64-NEXT: ldr h16, [sp, #8]
+; AARCH64-NEXT: ldr h17, [sp, #16]
+; AARCH64-NEXT: fmaxnm h1, h1, h17
+; AARCH64-NEXT: fmaxnm h0, h0, h16
+; AARCH64-NEXT: ldr h16, [sp, #24]
; AARCH64-NEXT: mov v0.h[1], v1.h[0]
-; AARCH64-NEXT: ldr h1, [sp, #8]
-; AARCH64-NEXT: ld1 { v1.h }[1], [x9]
-; AARCH64-NEXT: add x9, sp, #24
-; AARCH64-NEXT: mov v0.h[2], v2.h[0]
-; AARCH64-NEXT: ldr h2, [sp, #72]
-; AARCH64-NEXT: ld1 { v1.h }[2], [x9]
-; AARCH64-NEXT: add x9, sp, #32
-; AARCH64-NEXT: mov v0.h[3], v3.h[0]
-; AARCH64-NEXT: ld1 { v1.h }[3], [x9]
-; AARCH64-NEXT: add x9, sp, #40
+; AARCH64-NEXT: fmaxnm h1, h2, h16
+; AARCH64-NEXT: ldr h2, [sp, #32]
+; AARCH64-NEXT: mov v0.h[2], v1.h[0]
+; AARCH64-NEXT: fmaxnm h1, h3, h2
+; AARCH64-NEXT: ldr h2, [sp, #40]
; AARCH64-NEXT: ldr h3, [sp]
-; AARCH64-NEXT: ld1 { v1.h }[4], [x9]
-; AARCH64-NEXT: add x9, sp, #48
-; AARCH64-NEXT: fmaxnm v2.8h, v3.8h, v2.8h
-; AARCH64-NEXT: mov v0.h[4], v4.h[0]
-; AARCH64-NEXT: ld1 { v1.h }[5], [x9]
-; AARCH64-NEXT: add x9, sp, #56
+; AARCH64-NEXT: mov v0.h[3], v1.h[0]
+; AARCH64-NEXT: fmaxnm h1, h4, h2
+; AARCH64-NEXT: ldr h2, [sp, #48]
+; AARCH64-NEXT: mov v0.h[4], v1.h[0]
+; AARCH64-NEXT: fmaxnm h1, h5, h2
+; AARCH64-NEXT: ldr h2, [sp, #56]
+; AARCH64-NEXT: mov v0.h[5], v1.h[0]
+; AARCH64-NEXT: fmaxnm h1, h6, h2
+; AARCH64-NEXT: ldr h2, [sp, #64]
+; AARCH64-NEXT: mov v0.h[6], v1.h[0]
+; AARCH64-NEXT: fmaxnm h1, h7, h2
+; AARCH64-NEXT: ldr h2, [sp, #72]
+; AARCH64-NEXT: fmaxnm h2, h3, h2
+; AARCH64-NEXT: mov v0.h[7], v1.h[0]
; AARCH64-NEXT: str h2, [x8, #16]
-; AARCH64-NEXT: mov v0.h[5], v5.h[0]
-; AARCH64-NEXT: ld1 { v1.h }[6], [x9]
-; AARCH64-NEXT: add x9, sp, #64
-; AARCH64-NEXT: mov v0.h[6], v6.h[0]
-; AARCH64-NEXT: ld1 { v1.h }[7], [x9]
-; AARCH64-NEXT: mov v0.h[7], v7.h[0]
-; AARCH64-NEXT: fmaxnm v0.8h, v0.8h, v1.8h
; AARCH64-NEXT: str q0, [x8]
; AARCH64-NEXT: ret
entry:
@@ -229,8 +324,66 @@ entry:
define <16 x half> @max_nnan_v16f16(<16 x half> %a, <16 x half> %b) {
; AARCH64-LABEL: max_nnan_v16f16:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fmaxnm v1.8h, v1.8h, v3.8h
-; AARCH64-NEXT: fmaxnm v0.8h, v0.8h, v2.8h
+; AARCH64-NEXT: mov h6, v2.h[1]
+; AARCH64-NEXT: mov h7, v0.h[1]
+; AARCH64-NEXT: mov h16, v3.h[1]
+; AARCH64-NEXT: mov h17, v1.h[1]
+; AARCH64-NEXT: fmaxnm h4, h0, h2
+; AARCH64-NEXT: mov h18, v2.h[2]
+; AARCH64-NEXT: mov h19, v0.h[2]
+; AARCH64-NEXT: fmaxnm h5, h1, h3
+; AARCH64-NEXT: mov h20, v3.h[2]
+; AARCH64-NEXT: mov h21, v1.h[2]
+; AARCH64-NEXT: mov h22, v3.h[3]
+; AARCH64-NEXT: fmaxnm h6, h7, h6
+; AARCH64-NEXT: fmaxnm h7, h17, h16
+; AARCH64-NEXT: mov h16, v2.h[3]
+; AARCH64-NEXT: mov h17, v0.h[3]
+; AARCH64-NEXT: fmaxnm h18, h19, h18
+; AARCH64-NEXT: mov h19, v1.h[3]
+; AARCH64-NEXT: fmaxnm h20, h21, h20
+; AARCH64-NEXT: mov h21, v3.h[4]
+; AARCH64-NEXT: mov v4.h[1], v6.h[0]
+; AARCH64-NEXT: mov h6, v2.h[4]
+; AARCH64-NEXT: mov v5.h[1], v7.h[0]
+; AARCH64-NEXT: mov h7, v0.h[4]
+; AARCH64-NEXT: fmaxnm h16, h17, h16
+; AARCH64-NEXT: mov h17, v1.h[4]
+; AARCH64-NEXT: fmaxnm h19, h19, h22
+; AARCH64-NEXT: mov h22, v3.h[5]
+; AARCH64-NEXT: mov v4.h[2], v18.h[0]
+; AARCH64-NEXT: mov h18, v2.h[5]
+; AARCH64-NEXT: mov v5.h[2], v20.h[0]
+; AARCH64-NEXT: mov h20, v0.h[5]
+; AARCH64-NEXT: fmaxnm h6, h7, h6
+; AARCH64-NEXT: mov h7, v1.h[5]
+; AARCH64-NEXT: fmaxnm h17, h17, h21
+; AARCH64-NEXT: mov h21, v3.h[6]
+; AARCH64-NEXT: mov h3, v3.h[7]
+; AARCH64-NEXT: mov v4.h[3], v16.h[0]
+; AARCH64-NEXT: mov h16, v2.h[6]
+; AARCH64-NEXT: mov h2, v2.h[7]
+; AARCH64-NEXT: mov v5.h[3], v19.h[0]
+; AARCH64-NEXT: mov h19, v0.h[6]
+; AARCH64-NEXT: fmaxnm h18, h20, h18
+; AARCH64-NEXT: mov h20, v1.h[6]
+; AARCH64-NEXT: fmaxnm h7, h7, h22
+; AARCH64-NEXT: mov h0, v0.h[7]
+; AARCH64-NEXT: mov h1, v1.h[7]
+; AARCH64-NEXT: mov v4.h[4], v6.h[0]
+; AARCH64-NEXT: mov v5.h[4], v17.h[0]
+; AARCH64-NEXT: fmaxnm h6, h19, h16
+; AARCH64-NEXT: fmaxnm h16, h20, h21
+; AARCH64-NEXT: fmaxnm h0, h0, h2
+; AARCH64-NEXT: fmaxnm h1, h1, h3
+; AARCH64-NEXT: mov v4.h[5], v18.h[0]
+; AARCH64-NEXT: mov v5.h[5], v7.h[0]
+; AARCH64-NEXT: mov v4.h[6], v6.h[0]
+; AARCH64-NEXT: mov v5.h[6], v16.h[0]
+; AARCH64-NEXT: mov v4.h[7], v0.h[0]
+; AARCH64-NEXT: mov v5.h[7], v1.h[0]
+; AARCH64-NEXT: mov v0.16b, v4.16b
+; AARCH64-NEXT: mov v1.16b, v5.16b
; AARCH64-NEXT: ret
entry:
%c = call nnan <16 x half> @llvm.maximumnum.v16f16(<16 x half> %a, <16 x half> %b)
@@ -251,7 +404,11 @@ entry:
define <2 x double> @min_nnan_v2f64(<2 x double> %a, <2 x double> %b) {
; AARCH64-LABEL: min_nnan_v2f64:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fminnm v0.2d, v0.2d, v1.2d
+; AARCH64-NEXT: mov d2, v1.d[1]
+; AARCH64-NEXT: mov d3, v0.d[1]
+; AARCH64-NEXT: fminnm d0, d0, d1
+; AARCH64-NEXT: fminnm d2, d3, d2
+; AARCH64-NEXT: mov v0.d[1], v2.d[0]
; AARCH64-NEXT: ret
entry:
%c = call nnan <2 x double> @llvm.minimumnum.v2f64(<2 x double> %a, <2 x double> %b)
@@ -261,20 +418,9 @@ entry:
define <3 x double> @min_nnan_v3f64(<3 x double> %a, <3 x double> %b) {
; AARCH64-LABEL: min_nnan_v3f64:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: // kill: def $d3 killed $d3 def $q3
-; AARCH64-NEXT: // kill: def $d0 killed $d0 def $q0
-; AARCH64-NEXT: // kill: def $d4 killed $d4 def $q4
-; AARCH64-NEXT: // kill: def $d1 killed $d1 def $q1
-; AARCH64-NEXT: // kill: def $d2 killed $d2 def $q2
-; AARCH64-NEXT: // kill: def $d5 killed $d5 def $q5
-; AARCH64-NEXT: mov v3.d[1], v4.d[0]
-; AARCH64-NEXT: mov v0.d[1], v1.d[0]
-; AARCH64-NEXT: fminnm v2.2d, v2.2d, v5.2d
-; AARCH64-NEXT: // kill: def $d2 killed $d2 killed $q2
-; AARCH64-NEXT: fminnm v0.2d, v0.2d, v3.2d
-; AARCH64-NEXT: ext v1.16b, v0.16b, v0.16b, #8
-; AARCH64-NEXT: // kill: def $d0 killed $d0 killed $q0
-; AARCH64-NEXT: // kill: def $d1 killed $d1 killed $q1
+; AARCH64-NEXT: fminnm d0, d0, d3
+; AARCH64-NEXT: fminnm d1, d1, d4
+; AARCH64-NEXT: fminnm d2, d2, d5
; AARCH64-NEXT: ret
entry:
%c = call nnan <3 x double> @llvm.minimumnum.v3f64(<3 x double> %a, <3 x double> %b)
@@ -284,8 +430,16 @@ entry:
define <4 x double> @min_nnan_v4f64(<4 x double> %a, <4 x double> %b) {
; AARCH64-LABEL: min_nnan_v4f64:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fminnm v1.2d, v1.2d, v3.2d
-; AARCH64-NEXT: fminnm v0.2d, v0.2d, v2.2d
+; AARCH64-NEXT: mov d4, v2.d[1]
+; AARCH64-NEXT: mov d5, v0.d[1]
+; AARCH64-NEXT: mov d6, v3.d[1]
+; AARCH64-NEXT: mov d7, v1.d[1]
+; AARCH64-NEXT: fminnm d0, d0, d2
+; AARCH64-NEXT: fminnm d1, d1, d3
+; AARCH64-NEXT: fminnm d4, d5, d4
+; AARCH64-NEXT: fminnm d2, d7, d6
+; AARCH64-NEXT: mov v0.d[1], v4.d[0]
+; AARCH64-NEXT: mov v1.d[1], v2.d[0]
; AARCH64-NEXT: ret
entry:
%c = call nnan <4 x double> @llvm.minimumnum.v4f64(<4 x double> %a, <4 x double> %b)
@@ -306,7 +460,14 @@ entry:
define <2 x float> @min_nnan_v2f32(<2 x float> %a, <2 x float> %b) {
; AARCH64-LABEL: min_nnan_v2f32:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fminnm v0.2s, v0.2s, v1.2s
+; AARCH64-NEXT: // kill: def $d1 killed $d1 def $q1
+; AARCH64-NEXT: // kill: def $d0 killed $d0 def $q0
+; AARCH64-NEXT: mov s2, v1.s[1]
+; AARCH64-NEXT: mov s3, v0.s[1]
+; AARCH64-NEXT: fminnm s0, s0, s1
+; AARCH64-NEXT: fminnm s2, s3, s2
+; AARCH64-NEXT: mov v0.s[1], v2.s[0]
+; AARCH64-NEXT: // kill: def $d0 killed $d0 killed $q0
; AARCH64-NEXT: ret
entry:
%c = call nnan <2 x float> @llvm.minimumnum.v2f32(<2 x float> %a, <2 x float> %b)
@@ -316,7 +477,20 @@ entry:
define <3 x float> @min_nnan_v3f32(<3 x float> %a, <3 x float> %b) {
; AARCH64-LABEL: min_nnan_v3f32:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fminnm v0.4s, v0.4s, v1.4s
+; AARCH64-NEXT: mov s3, v1.s[1]
+; AARCH64-NEXT: mov s4, v0.s[1]
+; AARCH64-NEXT: fminnm s2, s0, s1
+; AARCH64-NEXT: mov s5, v0.s[2]
+; AARCH64-NEXT: mov s0, v0.s[3]
+; AARCH64-NEXT: fminnm s3, s4, s3
+; AARCH64-NEXT: mov s4, v1.s[2]
+; AARCH64-NEXT: mov s1, v1.s[3]
+; AARCH64-NEXT: mov v2.s[1], v3.s[0]
+; AARCH64-NEXT: fminnm s3, s5, s4
+; AARCH64-NEXT: fminnm s0, s0, s1
+; AARCH64-NEXT: mov v2.s[2], v3.s[0]
+; AARCH64-NEXT: mov v2.s[3], v0.s[0]
+; AARCH64-NEXT: mov v0.16b, v2.16b
; AARCH64-NEXT: ret
entry:
%c = call nnan <3 x float> @llvm.minimumnum.v3f32(<3 x float> %a, <3 x float> %b)
@@ -326,7 +500,20 @@ entry:
define <4 x float> @min_nnan_v4f32(<4 x float> %a, <4 x float> %b) {
; AARCH64-LABEL: min_nnan_v4f32:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fminnm v0.4s, v0.4s, v1.4s
+; AARCH64-NEXT: mov s3, v1.s[1]
+; AARCH64-NEXT: mov s4, v0.s[1]
+; AARCH64-NEXT: fminnm s2, s0, s1
+; AARCH64-NEXT: mov s5, v0.s[2]
+; AARCH64-NEXT: mov s0, v0.s[3]
+; AARCH64-NEXT: fminnm s3, s4, s3
+; AARCH64-NEXT: mov s4, v1.s[2]
+; AARCH64-NEXT: mov s1, v1.s[3]
+; AARCH64-NEXT: mov v2.s[1], v3.s[0]
+; AARCH64-NEXT: fminnm s3, s5, s4
+; AARCH64-NEXT: fminnm s0, s0, s1
+; AARCH64-NEXT: mov v2.s[2], v3.s[0]
+; AARCH64-NEXT: mov v2.s[3], v0.s[0]
+; AARCH64-NEXT: mov v0.16b, v2.16b
; AARCH64-NEXT: ret
entry:
%c = call nnan <4 x float> @llvm.minimumnum.v4f32(<4 x float> %a, <4 x float> %b)
@@ -336,29 +523,13 @@ entry:
define <5 x float> @min_nnan_v5f32(<5 x float> %a, <5 x float> %b) {
; AARCH64-LABEL: min_nnan_v5f32:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: // kill: def $s0 killed $s0 def $q0
-; AARCH64-NEXT: // kill: def $s5 killed $s5 def $q5
-; AARCH64-NEXT: // kill: def $s1 killed $s1 def $q1
-; AARCH64-NEXT: // kill: def $s6 killed $s6 def $q6
-; AARCH64-NEXT: // kill: def $s2 killed $s2 def $q2
-; AARCH64-NEXT: // kill: def $s7 killed $s7 def $q7
-; AARCH64-NEXT: // kill: def $s3 killed $s3 def $q3
-; AARCH64-NEXT: mov x8, sp
-; AARCH64-NEXT: // kill: def $s4 killed $s4 def $q4
-; AARCH64-NEXT: mov v0.s[1], v1.s[0]
-; AARCH64-NEXT: mov v5.s[1], v6.s[0]
-; AARCH64-NEXT: mov v0.s[2], v2.s[0]
-; AARCH64-NEXT: mov v5.s[2], v7.s[0]
-; AARCH64-NEXT: ldr s2, [sp, #8]
-; AARCH64-NEXT: fminnm v4.4s, v4.4s, v2.4s
-; AARCH64-NEXT: // kill: def $s4 killed $s4 killed $q4
-; AARCH64-NEXT: mov v0.s[3], v3.s[0]
-; AARCH64-NEXT: ld1 { v5.s }[3], [x8]
-; AARCH64-NEXT: fminnm v0.4s, v0.4s, v5.4s
-; AARCH64-NEXT: mov s1, v0.s[1]
-; AARCH64-NEXT: mov s2, v0.s[2]
-; AARCH64-NEXT: mov s3, v0.s[3]
-; AARCH64-NEXT: // kill: def $s0 killed $s0 killed $q0
+; AARCH64-NEXT: ldr s16, [sp, #8]
+; AARCH64-NEXT: ldr s17, [sp]
+; AARCH64-NEXT: fminnm s0, s0, s5
+; AARCH64-NEXT: fminnm s1, s1, s6
+; AARCH64-NEXT: fminnm s2, s2, s7
+; AARCH64-NEXT: fminnm s3, s3, s17
+; AARCH64-NEXT: fminnm s4, s4, s16
; AARCH64-NEXT: ret
entry:
%c = call nnan <5 x float> @llvm.minimumnum.v5f32(<5 x float> %a, <5 x float> %b)
@@ -368,8 +539,34 @@ entry:
define <8 x float> @min_nnan_v8f32(<8 x float> %a, <8 x float> %b) {
; AARCH64-LABEL: min_nnan_v8f32:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fminnm v1.4s, v1.4s, v3.4s
-; AARCH64-NEXT: fminnm v0.4s, v0.4s, v2.4s
+; AARCH64-NEXT: mov s4, v2.s[1]
+; AARCH64-NEXT: mov s5, v0.s[1]
+; AARCH64-NEXT: mov s6, v3.s[1]
+; AARCH64-NEXT: mov s7, v1.s[1]
+; AARCH64-NEXT: mov s16, v2.s[2]
+; AARCH64-NEXT: mov s17, v0.s[2]
+; AARCH64-NEXT: mov s18, v3.s[2]
+; AARCH64-NEXT: mov s19, v1.s[2]
+; AARCH64-NEXT: fminnm s20, s5, s4
+; AARCH64-NEXT: fminnm s4, s0, s2
+; AARCH64-NEXT: fminnm s5, s1, s3
+; AARCH64-NEXT: fminnm s6, s7, s6
+; AARCH64-NEXT: fminnm s7, s17, s16
+; AARCH64-NEXT: mov s2, v2.s[3]
+; AARCH64-NEXT: fminnm s16, s19, s18
+; AARCH64-NEXT: mov s0, v0.s[3]
+; AARCH64-NEXT: mov s3, v3.s[3]
+; AARCH64-NEXT: mov s1, v1.s[3]
+; AARCH64-NEXT: mov v4.s[1], v20.s[0]
+; AARCH64-NEXT: mov v5.s[1], v6.s[0]
+; AARCH64-NEXT: fminnm s0, s0, s2
+; AARCH64-NEXT: fminnm s1, s1, s3
+; AARCH64-NEXT: mov v4.s[2], v7.s[0]
+; AARCH64-NEXT: mov v5.s[2], v16.s[0]
+; AARCH64-NEXT: mov v4.s[3], v0.s[0]
+; AARCH64-NEXT: mov v5.s[3], v1.s[0]
+; AARCH64-NEXT: mov v0.16b, v4.16b
+; AARCH64-NEXT: mov v1.16b, v5.16b
; AARCH64-NEXT: ret
entry:
%c = call nnan <8 x float> @llvm.minimumnum.v8f32(<8 x float> %a, <8 x float> %b)
@@ -390,7 +587,22 @@ entry:
define <2 x half> @min_nnan_v2f16(<2 x half> %a, <2 x half> %b) {
; AARCH64-LABEL: min_nnan_v2f16:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fminnm v0.4h, v0.4h, v1.4h
+; AARCH64-NEXT: // kill: def $d1 killed $d1 def $q1
+; AARCH64-NEXT: // kill: def $d0 killed $d0 def $q0
+; AARCH64-NEXT: mov h3, v1.h[1]
+; AARCH64-NEXT: mov h4, v0.h[1]
+; AARCH64-NEXT: fminnm h2, h0, h1
+; AARCH64-NEXT: mov h5, v0.h[2]
+; AARCH64-NEXT: mov h0, v0.h[3]
+; AARCH64-NEXT: fminnm h3, h4, h3
+; AARCH64-NEXT: mov h4, v1.h[2]
+; AARCH64-NEXT: mov h1, v1.h[3]
+; AARCH64-NEXT: mov v2.h[1], v3.h[0]
+; AARCH64-NEXT: fminnm h3, h5, h4
+; AARCH64-NEXT: fminnm h0, h0, h1
+; AARCH64-NEXT: mov v2.h[2], v3.h[0]
+; AARCH64-NEXT: mov v2.h[3], v0.h[0]
+; AARCH64-NEXT: fmov d0, d2
; AARCH64-NEXT: ret
entry:
%c = call nnan <2 x half> @llvm.minimumnum.v2f16(<2 x half> %a, <2 x half> %b)
@@ -400,7 +612,22 @@ entry:
define <4 x half> @min_nnan_v4f16(<4 x half> %a, <4 x half> %b) {
; AARCH64-LABEL: min_nnan_v4f16:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fminnm v0.4h, v0.4h, v1.4h
+; AARCH64-NEXT: // kill: def $d1 killed $d1 def $q1
+; AARCH64-NEXT: // kill: def $d0 killed $d0 def $q0
+; AARCH64-NEXT: mov h3, v1.h[1]
+; AARCH64-NEXT: mov h4, v0.h[1]
+; AARCH64-NEXT: fminnm h2, h0, h1
+; AARCH64-NEXT: mov h5, v0.h[2]
+; AARCH64-NEXT: mov h0, v0.h[3]
+; AARCH64-NEXT: fminnm h3, h4, h3
+; AARCH64-NEXT: mov h4, v1.h[2]
+; AARCH64-NEXT: mov h1, v1.h[3]
+; AARCH64-NEXT: mov v2.h[1], v3.h[0]
+; AARCH64-NEXT: fminnm h3, h5, h4
+; AARCH64-NEXT: fminnm h0, h0, h1
+; AARCH64-NEXT: mov v2.h[2], v3.h[0]
+; AARCH64-NEXT: mov v2.h[3], v0.h[0]
+; AARCH64-NEXT: fmov d0, d2
; AARCH64-NEXT: ret
entry:
%c = call nnan <4 x half> @llvm.minimumnum.v4f16(<4 x half> %a, <4 x half> %b)
@@ -410,7 +637,36 @@ entry:
define <8 x half> @min_nnan_v8f16(<8 x half> %a, <8 x half> %b) {
; AARCH64-LABEL: min_nnan_v8f16:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fminnm v0.8h, v0.8h, v1.8h
+; AARCH64-NEXT: mov h3, v1.h[1]
+; AARCH64-NEXT: mov h4, v0.h[1]
+; AARCH64-NEXT: fminnm h2, h0, h1
+; AARCH64-NEXT: mov h5, v1.h[2]
+; AARCH64-NEXT: mov h6, v0.h[2]
+; AARCH64-NEXT: mov h7, v0.h[3]
+; AARCH64-NEXT: fminnm h3, h4, h3
+; AARCH64-NEXT: mov h4, v1.h[3]
+; AARCH64-NEXT: fminnm h5, h6, h5
+; AARCH64-NEXT: mov h6, v0.h[4]
+; AARCH64-NEXT: mov v2.h[1], v3.h[0]
+; AARCH64-NEXT: mov h3, v1.h[4]
+; AARCH64-NEXT: fminnm h4, h7, h4
+; AARCH64-NEXT: mov v2.h[2], v5.h[0]
+; AARCH64-NEXT: fminnm h3, h6, h3
+; AARCH64-NEXT: mov h5, v1.h[5]
+; AARCH64-NEXT: mov v2.h[3], v4.h[0]
+; AARCH64-NEXT: mov h4, v0.h[5]
+; AARCH64-NEXT: fminnm h4, h4, h5
+; AARCH64-NEXT: mov v2.h[4], v3.h[0]
+; AARCH64-NEXT: mov h3, v1.h[6]
+; AARCH64-NEXT: mov h5, v0.h[6]
+; AARCH64-NEXT: mov h1, v1.h[7]
+; AARCH64-NEXT: mov h0, v0.h[7]
+; AARCH64-NEXT: mov v2.h[5], v4.h[0]
+; AARCH64-NEXT: fminnm h3, h5, h3
+; AARCH64-NEXT: fminnm h0, h0, h1
+; AARCH64-NEXT: mov v2.h[6], v3.h[0]
+; AARCH64-NEXT: mov v2.h[7], v0.h[0]
+; AARCH64-NEXT: mov v0.16b, v2.16b
; AARCH64-NEXT: ret
entry:
%c = call nnan <8 x half> @llvm.minimumnum.v8f16(<8 x half> %a, <8 x half> %b)
@@ -420,41 +676,33 @@ entry:
define <9 x half> @min_nnan_v9f16(<9 x half> %a, <9 x half> %b) {
; AARCH64-LABEL: min_nnan_v9f16:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: // kill: def $h0 killed $h0 def $q0
-; AARCH64-NEXT: // kill: def $h1 killed $h1 def $q1
-; AARCH64-NEXT: // kill: def $h2 killed $h2 def $q2
-; AARCH64-NEXT: add x9, sp, #16
-; AARCH64-NEXT: // kill: def $h3 killed $h3 def $q3
-; AARCH64-NEXT: // kill: def $h4 killed $h4 def $q4
-; AARCH64-NEXT: // kill: def $h5 killed $h5 def $q5
-; AARCH64-NEXT: // kill: def $h6 killed $h6 def $q6
-; AARCH64-NEXT: // kill: def $h7 killed $h7 def $q7
+; AARCH64-NEXT: ldr h16, [sp, #8]
+; AARCH64-NEXT: ldr h17, [sp, #16]
+; AARCH64-NEXT: fminnm h1, h1, h17
+; AARCH64-NEXT: fminnm h0, h0, h16
+; AARCH64-NEXT: ldr h16, [sp, #24]
; AARCH64-NEXT: mov v0.h[1], v1.h[0]
-; AARCH64-NEXT: ldr h1, [sp, #8]
-; AARCH64-NEXT: ld1 { v1.h }[1], [x9]
-; AARCH64-NEXT: add x9, sp, #24
-; AARCH64-NEXT: mov v0.h[2], v2.h[0]
-; AARCH64-NEXT: ldr h2, [sp, #72]
-; AARCH64-NEXT: ld1 { v1.h }[2], [x9]
-; AARCH64-NEXT: add x9, sp, #32
-; AARCH64-NEXT: mov v0.h[3], v3.h[0]
-; AARCH64-NEXT: ld1 { v1.h }[3], [x9]
-; AARCH64-NEXT: add x9, sp, #40
+; AARCH64-NEXT: fminnm h1, h2, h16
+; AARCH64-NEXT: ldr h2, [sp, #32]
+; AARCH64-NEXT: mov v0.h[2], v1.h[0]
+; AARCH64-NEXT: fminnm h1, h3, h2
+; AARCH64-NEXT: ldr h2, [sp, #40]
; AARCH64-NEXT: ldr h3, [sp]
-; AARCH64-NEXT: ld1 { v1.h }[4], [x9]
-; AARCH64-NEXT: add x9, sp, #48
-; AARCH64-NEXT: fminnm v2.8h, v3.8h, v2.8h
-; AARCH64-NEXT: mov v0.h[4], v4.h[0]
-; AARCH64-NEXT: ld1 { v1.h }[5], [x9]
-; AARCH64-NEXT: add x9, sp, #56
+; AARCH64-NEXT: mov v0.h[3], v1.h[0]
+; AARCH64-NEXT: fminnm h1, h4, h2
+; AARCH64-NEXT: ldr h2, [sp, #48]
+; AARCH64-NEXT: mov v0.h[4], v1.h[0]
+; AARCH64-NEXT: fminnm h1, h5, h2
+; AARCH64-NEXT: ldr h2, [sp, #56]
+; AARCH64-NEXT: mov v0.h[5], v1.h[0]
+; AARCH64-NEXT: fminnm h1, h6, h2
+; AARCH64-NEXT: ldr h2, [sp, #64]
+; AARCH64-NEXT: mov v0.h[6], v1.h[0]
+; AARCH64-NEXT: fminnm h1, h7, h2
+; AARCH64-NEXT: ldr h2, [sp, #72]
+; AARCH64-NEXT: fminnm h2, h3, h2
+; AARCH64-NEXT: mov v0.h[7], v1.h[0]
; AARCH64-NEXT: str h2, [x8, #16]
-; AARCH64-NEXT: mov v0.h[5], v5.h[0]
-; AARCH64-NEXT: ld1 { v1.h }[6], [x9]
-; AARCH64-NEXT: add x9, sp, #64
-; AARCH64-NEXT: mov v0.h[6], v6.h[0]
-; AARCH64-NEXT: ld1 { v1.h }[7], [x9]
-; AARCH64-NEXT: mov v0.h[7], v7.h[0]
-; AARCH64-NEXT: fminnm v0.8h, v0.8h, v1.8h
; AARCH64-NEXT: str q0, [x8]
; AARCH64-NEXT: ret
entry:
@@ -465,8 +713,66 @@ entry:
define <16 x half> @min_nnan_v16f16(<16 x half> %a, <16 x half> %b) {
; AARCH64-LABEL: min_nnan_v16f16:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fminnm v1.8h, v1.8h, v3.8h
-; AARCH64-NEXT: fminnm v0.8h, v0.8h, v2.8h
+; AARCH64-NEXT: mov h6, v2.h[1]
+; AARCH64-NEXT: mov h7, v0.h[1]
+; AARCH64-NEXT: mov h16, v3.h[1]
+; AARCH64-NEXT: mov h17, v1.h[1]
+; AARCH64-NEXT: fminnm h4, h0, h2
+; AARCH64-NEXT: mov h18, v2.h[2]
+; AARCH64-NEXT: mov h19, v0.h[2]
+; AARCH64-NEXT: fminnm h5, h1, h3
+; AARCH64-NEXT: mov h20, v3.h[2]
+; AARCH64-NEXT: mov h21, v1.h[2]
+; AARCH64-NEXT: mov h22, v3.h[3]
+; AARCH64-NEXT: fminnm h6, h7, h6
+; AARCH64-NEXT: fminnm h7, h17, h16
+; AARCH64-NEXT: mov h16, v2.h[3]
+; AARCH64-NEXT: mov h17, v0.h[3]
+; AARCH64-NEXT: fminnm h18, h19, h18
+; AARCH64-NEXT: mov h19, v1.h[3]
+; AARCH64-NEXT: fminnm h20, h21, h20
+; AARCH64-NEXT: mov h21, v3.h[4]
+; AARCH64-NEXT: mov v4.h[1], v6.h[0]
+; AARCH64-NEXT: mov h6, v2.h[4]
+; AARCH64-NEXT: mov v5.h[1], v7.h[0]
+; AARCH64-NEXT: mov h7, v0.h[4]
+; AARCH64-NEXT: fminnm h16, h17, h16
+; AARCH64-NEXT: mov h17, v1.h[4]
+; AARCH64-NEXT: fminnm h19, h19, h22
+; AARCH64-NEXT: mov h22, v3.h[5]
+; AARCH64-NEXT: mov v4.h[2], v18.h[0]
+; AARCH64-NEXT: mov h18, v2.h[5]
+; AARCH64-NEXT: mov v5.h[2], v20.h[0]
+; AARCH64-NEXT: mov h20, v0.h[5]
+; AARCH64-NEXT: fminnm h6, h7, h6
+; AARCH64-NEXT: mov h7, v1.h[5]
+; AARCH64-NEXT: fminnm h17, h17, h21
+; AARCH64-NEXT: mov h21, v3.h[6]
+; AARCH64-NEXT: mov h3, v3.h[7]
+; AARCH64-NEXT: mov v4.h[3], v16.h[0]
+; AARCH64-NEXT: mov h16, v2.h[6]
+; AARCH64-NEXT: mov h2, v2.h[7]
+; AARCH64-NEXT: mov v5.h[3], v19.h[0]
+; AARCH64-NEXT: mov h19, v0.h[6]
+; AARCH64-NEXT: fminnm h18, h20, h18
+; AARCH64-NEXT: mov h20, v1.h[6]
+; AARCH64-NEXT: fminnm h7, h7, h22
+; AARCH64-NEXT: mov h0, v0.h[7]
+; AARCH64-NEXT: mov h1, v1.h[7]
+; AARCH64-NEXT: mov v4.h[4], v6.h[0]
+; AARCH64-NEXT: mov v5.h[4], v17.h[0]
+; AARCH64-NEXT: fminnm h6, h19, h16
+; AARCH64-NEXT: fminnm h16, h20, h21
+; AARCH64-NEXT: fminnm h0, h0, h2
+; AARCH64-NEXT: fminnm h1, h1, h3
+; AARCH64-NEXT: mov v4.h[5], v18.h[0]
+; AARCH64-NEXT: mov v5.h[5], v7.h[0]
+; AARCH64-NEXT: mov v4.h[6], v6.h[0]
+; AARCH64-NEXT: mov v5.h[6], v16.h[0]
+; AARCH64-NEXT: mov v4.h[7], v0.h[0]
+; AARCH64-NEXT: mov v5.h[7], v1.h[0]
+; AARCH64-NEXT: mov v0.16b, v4.16b
+; AARCH64-NEXT: mov v1.16b, v5.16b
; AARCH64-NEXT: ret
entry:
%c = call nnan <16 x half> @llvm.minimumnum.v16f16(<16 x half> %a, <16 x half> %b)
@@ -489,9 +795,15 @@ entry:
define <2 x double> @max_v2f64(<2 x double> %a, <2 x double> %b) {
; AARCH64-LABEL: max_v2f64:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fminnm v1.2d, v1.2d, v1.2d
-; AARCH64-NEXT: fminnm v0.2d, v0.2d, v0.2d
-; AARCH64-NEXT: fmaxnm v0.2d, v0.2d, v1.2d
+; AARCH64-NEXT: mov d2, v1.d[1]
+; AARCH64-NEXT: mov d3, v0.d[1]
+; AARCH64-NEXT: fminnm d1, d1, d1
+; AARCH64-NEXT: fminnm d0, d0, d0
+; AARCH64-NEXT: fminnm d2, d2, d2
+; AARCH64-NEXT: fminnm d3, d3, d3
+; AARCH64-NEXT: fmaxnm d0, d0, d1
+; AARCH64-NEXT: fmaxnm d2, d3, d2
+; AARCH64-NEXT: mov v0.d[1], v2.d[0]
; AARCH64-NEXT: ret
entry:
%c = call <2 x double> @llvm.maximumnum.v2f64(<2 x double> %a, <2 x double> %b)
@@ -501,24 +813,15 @@ entry:
define <3 x double> @max_v3f64(<3 x double> %a, <3 x double> %b) {
; AARCH64-LABEL: max_v3f64:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: // kill: def $d3 killed $d3 def $q3
-; AARCH64-NEXT: // kill: def $d0 killed $d0 def $q0
-; AARCH64-NEXT: // kill: def $d4 killed $d4 def $q4
-; AARCH64-NEXT: // kill: def $d1 killed $d1 def $q1
-; AARCH64-NEXT: // kill: def $d2 killed $d2 def $q2
-; AARCH64-NEXT: // kill: def $d5 killed $d5 def $q5
-; AARCH64-NEXT: mov v0.d[1], v1.d[0]
-; AARCH64-NEXT: mov v3.d[1], v4.d[0]
-; AARCH64-NEXT: fminnm v2.2d, v2.2d, v2.2d
-; AARCH64-NEXT: fminnm v1.2d, v3.2d, v3.2d
-; AARCH64-NEXT: fminnm v0.2d, v0.2d, v0.2d
-; AARCH64-NEXT: fmaxnm v0.2d, v0.2d, v1.2d
-; AARCH64-NEXT: fminnm v1.2d, v5.2d, v5.2d
-; AARCH64-NEXT: fmaxnm v2.2d, v2.2d, v1.2d
-; AARCH64-NEXT: ext v1.16b, v0.16b, v0.16b, #8
-; AARCH64-NEXT: // kill: def $d0 killed $d0 killed $q0
-; AARCH64-NEXT: // kill: def $d1 killed $d1 killed $q1
-; AARCH64-NEXT: // kill: def $d2 killed $d2 killed $q2
+; AARCH64-NEXT: fminnm d3, d3, d3
+; AARCH64-NEXT: fminnm d0, d0, d0
+; AARCH64-NEXT: fminnm d4, d4, d4
+; AARCH64-NEXT: fminnm d1, d1, d1
+; AARCH64-NEXT: fminnm d5, d5, d5
+; AARCH64-NEXT: fminnm d2, d2, d2
+; AARCH64-NEXT: fmaxnm d0, d0, d3
+; AARCH64-NEXT: fmaxnm d1, d1, d4
+; AARCH64-NEXT: fmaxnm d2, d2, d5
; AARCH64-NEXT: ret
entry:
%c = call <3 x double> @llvm.maximumnum.v3f64(<3 x double> %a, <3 x double> %b)
@@ -528,12 +831,24 @@ entry:
define <4 x double> @max_v4f64(<4 x double> %a, <4 x double> %b) {
; AARCH64-LABEL: max_v4f64:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fminnm v2.2d, v2.2d, v2.2d
-; AARCH64-NEXT: fminnm v0.2d, v0.2d, v0.2d
-; AARCH64-NEXT: fminnm v3.2d, v3.2d, v3.2d
-; AARCH64-NEXT: fminnm v1.2d, v1.2d, v1.2d
-; AARCH64-NEXT: fmaxnm v0.2d, v0.2d, v2.2d
-; AARCH64-NEXT: fmaxnm v1.2d, v1.2d, v3.2d
+; AARCH64-NEXT: mov d4, v2.d[1]
+; AARCH64-NEXT: mov d5, v0.d[1]
+; AARCH64-NEXT: mov d6, v3.d[1]
+; AARCH64-NEXT: mov d7, v1.d[1]
+; AARCH64-NEXT: fminnm d2, d2, d2
+; AARCH64-NEXT: fminnm d0, d0, d0
+; AARCH64-NEXT: fminnm d3, d3, d3
+; AARCH64-NEXT: fminnm d1, d1, d1
+; AARCH64-NEXT: fminnm d4, d4, d4
+; AARCH64-NEXT: fminnm d5, d5, d5
+; AARCH64-NEXT: fminnm d6, d6, d6
+; AARCH64-NEXT: fminnm d7, d7, d7
+; AARCH64-NEXT: fmaxnm d0, d0, d2
+; AARCH64-NEXT: fmaxnm d1, d1, d3
+; AARCH64-NEXT: fmaxnm d4, d5, d4
+; AARCH64-NEXT: fmaxnm d2, d7, d6
+; AARCH64-NEXT: mov v0.d[1], v4.d[0]
+; AARCH64-NEXT: mov v1.d[1], v2.d[0]
; AARCH64-NEXT: ret
entry:
%c = call <4 x double> @llvm.maximumnum.v4f64(<4 x double> %a, <4 x double> %b)
@@ -556,9 +871,18 @@ entry:
define <2 x float> @max_v2f32(<2 x float> %a, <2 x float> %b) {
; AARCH64-LABEL: max_v2f32:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fminnm v1.2s, v1.2s, v1.2s
-; AARCH64-NEXT: fminnm v0.2s, v0.2s, v0.2s
-; AARCH64-NEXT: fmaxnm v0.2s, v0.2s, v1.2s
+; AARCH64-NEXT: // kill: def $d1 killed $d1 def $q1
+; AARCH64-NEXT: // kill: def $d0 killed $d0 def $q0
+; AARCH64-NEXT: mov s2, v1.s[1]
+; AARCH64-NEXT: mov s3, v0.s[1]
+; AARCH64-NEXT: fminnm s1, s1, s1
+; AARCH64-NEXT: fminnm s0, s0, s0
+; AARCH64-NEXT: fminnm s2, s2, s2
+; AARCH64-NEXT: fminnm s3, s3, s3
+; AARCH64-NEXT: fmaxnm s0, s0, s1
+; AARCH64-NEXT: fmaxnm s2, s3, s2
+; AARCH64-NEXT: mov v0.s[1], v2.s[0]
+; AARCH64-NEXT: // kill: def $d0 killed $d0 killed $q0
; AARCH64-NEXT: ret
entry:
%c = call <2 x float> @llvm.maximumnum.v2f32(<2 x float> %a, <2 x float> %b)
@@ -568,9 +892,28 @@ entry:
define <3 x float> @max_v3f32(<3 x float> %a, <3 x float> %b) {
; AARCH64-LABEL: max_v3f32:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fminnm v1.4s, v1.4s, v1.4s
-; AARCH64-NEXT: fminnm v0.4s, v0.4s, v0.4s
-; AARCH64-NEXT: fmaxnm v0.4s, v0.4s, v1.4s
+; AARCH64-NEXT: mov s2, v1.s[1]
+; AARCH64-NEXT: mov s3, v0.s[1]
+; AARCH64-NEXT: fminnm s4, s1, s1
+; AARCH64-NEXT: fminnm s5, s0, s0
+; AARCH64-NEXT: mov s7, v1.s[2]
+; AARCH64-NEXT: mov s16, v0.s[2]
+; AARCH64-NEXT: mov s1, v1.s[3]
+; AARCH64-NEXT: mov s0, v0.s[3]
+; AARCH64-NEXT: fminnm s6, s2, s2
+; AARCH64-NEXT: fminnm s3, s3, s3
+; AARCH64-NEXT: fmaxnm s2, s5, s4
+; AARCH64-NEXT: fminnm s4, s7, s7
+; AARCH64-NEXT: fminnm s5, s16, s16
+; AARCH64-NEXT: fminnm s1, s1, s1
+; AARCH64-NEXT: fminnm s0, s0, s0
+; AARCH64-NEXT: fmaxnm s3, s3, s6
+; AARCH64-NEXT: fmaxnm s0, s0, s1
+; AARCH64-NEXT: mov v2.s[1], v3.s[0]
+; AARCH64-NEXT: fmaxnm s3, s5, s4
+; AARCH64-NEXT: mov v2.s[2], v3.s[0]
+; AARCH64-NEXT: mov v2.s[3], v0.s[0]
+; AARCH64-NEXT: mov v0.16b, v2.16b
; AARCH64-NEXT: ret
entry:
%c = call <3 x float> @llvm.maximumnum.v3f32(<3 x float> %a, <3 x float> %b)
@@ -580,9 +923,28 @@ entry:
define <4 x float> @max_v4f32(<4 x float> %a, <4 x float> %b) {
; AARCH64-LABEL: max_v4f32:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fminnm v1.4s, v1.4s, v1.4s
-; AARCH64-NEXT: fminnm v0.4s, v0.4s, v0.4s
-; AARCH64-NEXT: fmaxnm v0.4s, v0.4s, v1.4s
+; AARCH64-NEXT: mov s2, v1.s[1]
+; AARCH64-NEXT: mov s3, v0.s[1]
+; AARCH64-NEXT: fminnm s4, s1, s1
+; AARCH64-NEXT: fminnm s5, s0, s0
+; AARCH64-NEXT: mov s7, v1.s[2]
+; AARCH64-NEXT: mov s16, v0.s[2]
+; AARCH64-NEXT: mov s1, v1.s[3]
+; AARCH64-NEXT: mov s0, v0.s[3]
+; AARCH64-NEXT: fminnm s6, s2, s2
+; AARCH64-NEXT: fminnm s3, s3, s3
+; AARCH64-NEXT: fmaxnm s2, s5, s4
+; AARCH64-NEXT: fminnm s4, s7, s7
+; AARCH64-NEXT: fminnm s5, s16, s16
+; AARCH64-NEXT: fminnm s1, s1, s1
+; AARCH64-NEXT: fminnm s0, s0, s0
+; AARCH64-NEXT: fmaxnm s3, s3, s6
+; AARCH64-NEXT: fmaxnm s0, s0, s1
+; AARCH64-NEXT: mov v2.s[1], v3.s[0]
+; AARCH64-NEXT: fmaxnm s3, s5, s4
+; AARCH64-NEXT: mov v2.s[2], v3.s[0]
+; AARCH64-NEXT: mov v2.s[3], v0.s[0]
+; AARCH64-NEXT: mov v0.16b, v2.16b
; AARCH64-NEXT: ret
entry:
%c = call <4 x float> @llvm.maximumnum.v4f32(<4 x float> %a, <4 x float> %b)
@@ -592,33 +954,23 @@ entry:
define <5 x float> @max_v5f32(<5 x float> %a, <5 x float> %b) {
; AARCH64-LABEL: max_v5f32:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: // kill: def $s0 killed $s0 def $q0
-; AARCH64-NEXT: // kill: def $s5 killed $s5 def $q5
-; AARCH64-NEXT: // kill: def $s1 killed $s1 def $q1
-; AARCH64-NEXT: // kill: def $s6 killed $s6 def $q6
-; AARCH64-NEXT: // kill: def $s2 killed $s2 def $q2
-; AARCH64-NEXT: // kill: def $s7 killed $s7 def $q7
-; AARCH64-NEXT: // kill: def $s3 killed $s3 def $q3
-; AARCH64-NEXT: mov x8, sp
-; AARCH64-NEXT: // kill: def $s4 killed $s4 def $q4
-; AARCH64-NEXT: mov v0.s[1], v1.s[0]
-; AARCH64-NEXT: mov v5.s[1], v6.s[0]
-; AARCH64-NEXT: mov v0.s[2], v2.s[0]
-; AARCH64-NEXT: mov v5.s[2], v7.s[0]
-; AARCH64-NEXT: ldr s2, [sp, #8]
-; AARCH64-NEXT: fminnm v2.4s, v2.4s, v2.4s
-; AARCH64-NEXT: mov v0.s[3], v3.s[0]
-; AARCH64-NEXT: ld1 { v5.s }[3], [x8]
-; AARCH64-NEXT: fminnm v3.4s, v4.4s, v4.4s
-; AARCH64-NEXT: fminnm v1.4s, v5.4s, v5.4s
-; AARCH64-NEXT: fminnm v0.4s, v0.4s, v0.4s
-; AARCH64-NEXT: fmaxnm v4.4s, v3.4s, v2.4s
-; AARCH64-NEXT: // kill: def $s4 killed $s4 killed $q4
-; AARCH64-NEXT: fmaxnm v0.4s, v0.4s, v1.4s
-; AARCH64-NEXT: mov s1, v0.s[1]
-; AARCH64-NEXT: mov s2, v0.s[2]
-; AARCH64-NEXT: mov s3, v0.s[3]
-; AARCH64-NEXT: // kill: def $s0 killed $s0 killed $q0
+; AARCH64-NEXT: ldr s16, [sp, #8]
+; AARCH64-NEXT: ldr s17, [sp]
+; AARCH64-NEXT: fminnm s5, s5, s5
+; AARCH64-NEXT: fminnm s0, s0, s0
+; AARCH64-NEXT: fminnm s6, s6, s6
+; AARCH64-NEXT: fminnm s1, s1, s1
+; AARCH64-NEXT: fminnm s7, s7, s7
+; AARCH64-NEXT: fminnm s2, s2, s2
+; AARCH64-NEXT: fminnm s17, s17, s17
+; AARCH64-NEXT: fminnm s3, s3, s3
+; AARCH64-NEXT: fminnm s16, s16, s16
+; AARCH64-NEXT: fminnm s4, s4, s4
+; AARCH64-NEXT: fmaxnm s0, s0, s5
+; AARCH64-NEXT: fmaxnm s1, s1, s6
+; AARCH64-NEXT: fmaxnm s2, s2, s7
+; AARCH64-NEXT: fmaxnm s3, s3, s17
+; AARCH64-NEXT: fmaxnm s4, s4, s16
; AARCH64-NEXT: ret
entry:
%c = call <5 x float> @llvm.maximumnum.v5f32(<5 x float> %a, <5 x float> %b)
@@ -628,12 +980,50 @@ entry:
define <8 x float> @max_v8f32(<8 x float> %a, <8 x float> %b) {
; AARCH64-LABEL: max_v8f32:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fminnm v2.4s, v2.4s, v2.4s
-; AARCH64-NEXT: fminnm v0.4s, v0.4s, v0.4s
-; AARCH64-NEXT: fminnm v3.4s, v3.4s, v3.4s
-; AARCH64-NEXT: fminnm v1.4s, v1.4s, v1.4s
-; AARCH64-NEXT: fmaxnm v0.4s, v0.4s, v2.4s
-; AARCH64-NEXT: fmaxnm v1.4s, v1.4s, v3.4s
+; AARCH64-NEXT: mov s4, v2.s[1]
+; AARCH64-NEXT: mov s5, v0.s[1]
+; AARCH64-NEXT: mov s6, v3.s[1]
+; AARCH64-NEXT: mov s7, v1.s[1]
+; AARCH64-NEXT: fminnm s16, s2, s2
+; AARCH64-NEXT: fminnm s17, s0, s0
+; AARCH64-NEXT: mov s18, v2.s[2]
+; AARCH64-NEXT: fminnm s19, s3, s3
+; AARCH64-NEXT: fminnm s20, s1, s1
+; AARCH64-NEXT: mov s23, v0.s[2]
+; AARCH64-NEXT: mov s24, v3.s[2]
+; AARCH64-NEXT: mov s25, v1.s[2]
+; AARCH64-NEXT: fminnm s21, s4, s4
+; AARCH64-NEXT: fminnm s22, s5, s5
+; AARCH64-NEXT: fminnm s6, s6, s6
+; AARCH64-NEXT: fminnm s7, s7, s7
+; AARCH64-NEXT: fmaxnm s4, s17, s16
+; AARCH64-NEXT: mov s2, v2.s[3]
+; AARCH64-NEXT: fmaxnm s5, s20, s19
+; AARCH64-NEXT: mov s0, v0.s[3]
+; AARCH64-NEXT: mov s3, v3.s[3]
+; AARCH64-NEXT: fminnm s17, s23, s23
+; AARCH64-NEXT: fminnm s19, s25, s25
+; AARCH64-NEXT: mov s1, v1.s[3]
+; AARCH64-NEXT: fmaxnm s16, s22, s21
+; AARCH64-NEXT: fmaxnm s6, s7, s6
+; AARCH64-NEXT: fminnm s7, s18, s18
+; AARCH64-NEXT: fminnm s18, s24, s24
+; AARCH64-NEXT: fminnm s2, s2, s2
+; AARCH64-NEXT: fminnm s0, s0, s0
+; AARCH64-NEXT: fminnm s3, s3, s3
+; AARCH64-NEXT: fminnm s1, s1, s1
+; AARCH64-NEXT: mov v4.s[1], v16.s[0]
+; AARCH64-NEXT: fmaxnm s7, s17, s7
+; AARCH64-NEXT: mov v5.s[1], v6.s[0]
+; AARCH64-NEXT: fmaxnm s6, s19, s18
+; AARCH64-NEXT: fmaxnm s0, s0, s2
+; AARCH64-NEXT: fmaxnm s1, s1, s3
+; AARCH64-NEXT: mov v4.s[2], v7.s[0]
+; AARCH64-NEXT: mov v5.s[2], v6.s[0]
+; AARCH64-NEXT: mov v4.s[3], v0.s[0]
+; AARCH64-NEXT: mov v5.s[3], v1.s[0]
+; AARCH64-NEXT: mov v0.16b, v4.16b
+; AARCH64-NEXT: mov v1.16b, v5.16b
; AARCH64-NEXT: ret
entry:
%c = call <8 x float> @llvm.maximumnum.v8f32(<8 x float> %a, <8 x float> %b)
@@ -656,9 +1046,30 @@ entry:
define <2 x half> @max_v2f16(<2 x half> %a, <2 x half> %b) {
; AARCH64-LABEL: max_v2f16:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fminnm v1.4h, v1.4h, v1.4h
-; AARCH64-NEXT: fminnm v0.4h, v0.4h, v0.4h
-; AARCH64-NEXT: fmaxnm v0.4h, v0.4h, v1.4h
+; AARCH64-NEXT: // kill: def $d1 killed $d1 def $q1
+; AARCH64-NEXT: // kill: def $d0 killed $d0 def $q0
+; AARCH64-NEXT: mov h2, v1.h[1]
+; AARCH64-NEXT: mov h3, v0.h[1]
+; AARCH64-NEXT: fminnm h4, h1, h1
+; AARCH64-NEXT: fminnm h5, h0, h0
+; AARCH64-NEXT: mov h7, v1.h[2]
+; AARCH64-NEXT: mov h16, v0.h[2]
+; AARCH64-NEXT: mov h1, v1.h[3]
+; AARCH64-NEXT: mov h0, v0.h[3]
+; AARCH64-NEXT: fminnm h6, h2, h2
+; AARCH64-NEXT: fminnm h3, h3, h3
+; AARCH64-NEXT: fmaxnm h2, h5, h4
+; AARCH64-NEXT: fminnm h4, h7, h7
+; AARCH64-NEXT: fminnm h5, h16, h16
+; AARCH64-NEXT: fminnm h1, h1, h1
+; AARCH64-NEXT: fminnm h0, h0, h0
+; AARCH64-NEXT: fmaxnm h3, h3, h6
+; AARCH64-NEXT: fmaxnm h0, h0, h1
+; AARCH64-NEXT: mov v2.h[1], v3.h[0]
+; AARCH64-NEXT: fmaxnm h3, h5, h4
+; AARCH64-NEXT: mov v2.h[2], v3.h[0]
+; AARCH64-NEXT: mov v2.h[3], v0.h[0]
+; AARCH64-NEXT: fmov d0, d2
; AARCH64-NEXT: ret
entry:
%c = call <2 x half> @llvm.maximumnum.v2f16(<2 x half> %a, <2 x half> %b)
@@ -668,9 +1079,30 @@ entry:
define <4 x half> @max_v4f16(<4 x half> %a, <4 x half> %b) {
; AARCH64-LABEL: max_v4f16:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fminnm v1.4h, v1.4h, v1.4h
-; AARCH64-NEXT: fminnm v0.4h, v0.4h, v0.4h
-; AARCH64-NEXT: fmaxnm v0.4h, v0.4h, v1.4h
+; AARCH64-NEXT: // kill: def $d1 killed $d1 def $q1
+; AARCH64-NEXT: // kill: def $d0 killed $d0 def $q0
+; AARCH64-NEXT: mov h2, v1.h[1]
+; AARCH64-NEXT: mov h3, v0.h[1]
+; AARCH64-NEXT: fminnm h4, h1, h1
+; AARCH64-NEXT: fminnm h5, h0, h0
+; AARCH64-NEXT: mov h7, v1.h[2]
+; AARCH64-NEXT: mov h16, v0.h[2]
+; AARCH64-NEXT: mov h1, v1.h[3]
+; AARCH64-NEXT: mov h0, v0.h[3]
+; AARCH64-NEXT: fminnm h6, h2, h2
+; AARCH64-NEXT: fminnm h3, h3, h3
+; AARCH64-NEXT: fmaxnm h2, h5, h4
+; AARCH64-NEXT: fminnm h4, h7, h7
+; AARCH64-NEXT: fminnm h5, h16, h16
+; AARCH64-NEXT: fminnm h1, h1, h1
+; AARCH64-NEXT: fminnm h0, h0, h0
+; AARCH64-NEXT: fmaxnm h3, h3, h6
+; AARCH64-NEXT: fmaxnm h0, h0, h1
+; AARCH64-NEXT: mov v2.h[1], v3.h[0]
+; AARCH64-NEXT: fmaxnm h3, h5, h4
+; AARCH64-NEXT: mov v2.h[2], v3.h[0]
+; AARCH64-NEXT: mov v2.h[3], v0.h[0]
+; AARCH64-NEXT: fmov d0, d2
; AARCH64-NEXT: ret
entry:
%c = call <4 x half> @llvm.maximumnum.v4f16(<4 x half> %a, <4 x half> %b)
@@ -680,9 +1112,52 @@ entry:
define <8 x half> @max_v8f16(<8 x half> %a, <8 x half> %b) {
; AARCH64-LABEL: max_v8f16:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fminnm v1.8h, v1.8h, v1.8h
-; AARCH64-NEXT: fminnm v0.8h, v0.8h, v0.8h
-; AARCH64-NEXT: fmaxnm v0.8h, v0.8h, v1.8h
+; AARCH64-NEXT: mov h2, v1.h[1]
+; AARCH64-NEXT: mov h3, v0.h[1]
+; AARCH64-NEXT: fminnm h4, h1, h1
+; AARCH64-NEXT: fminnm h5, h0, h0
+; AARCH64-NEXT: mov h7, v1.h[2]
+; AARCH64-NEXT: mov h16, v0.h[2]
+; AARCH64-NEXT: fminnm h6, h2, h2
+; AARCH64-NEXT: fminnm h3, h3, h3
+; AARCH64-NEXT: fmaxnm h2, h5, h4
+; AARCH64-NEXT: mov h4, v1.h[3]
+; AARCH64-NEXT: mov h5, v0.h[3]
+; AARCH64-NEXT: fmaxnm h3, h3, h6
+; AARCH64-NEXT: fminnm h6, h7, h7
+; AARCH64-NEXT: fminnm h7, h16, h16
+; AARCH64-NEXT: fminnm h4, h4, h4
+; AARCH64-NEXT: fminnm h5, h5, h5
+; AARCH64-NEXT: fmaxnm h6, h7, h6
+; AARCH64-NEXT: mov v2.h[1], v3.h[0]
+; AARCH64-NEXT: mov h3, v1.h[4]
+; AARCH64-NEXT: mov h7, v0.h[4]
+; AARCH64-NEXT: fmaxnm h4, h5, h4
+; AARCH64-NEXT: mov h5, v1.h[5]
+; AARCH64-NEXT: mov v2.h[2], v6.h[0]
+; AARCH64-NEXT: mov h6, v0.h[5]
+; AARCH64-NEXT: fminnm h3, h3, h3
+; AARCH64-NEXT: fminnm h7, h7, h7
+; AARCH64-NEXT: fminnm h5, h5, h5
+; AARCH64-NEXT: fminnm h6, h6, h6
+; AARCH64-NEXT: mov v2.h[3], v4.h[0]
+; AARCH64-NEXT: mov h4, v1.h[6]
+; AARCH64-NEXT: fmaxnm h3, h7, h3
+; AARCH64-NEXT: mov h7, v0.h[6]
+; AARCH64-NEXT: mov h1, v1.h[7]
+; AARCH64-NEXT: mov h0, v0.h[7]
+; AARCH64-NEXT: fmaxnm h5, h6, h5
+; AARCH64-NEXT: mov v2.h[4], v3.h[0]
+; AARCH64-NEXT: fminnm h3, h4, h4
+; AARCH64-NEXT: fminnm h4, h7, h7
+; AARCH64-NEXT: fminnm h1, h1, h1
+; AARCH64-NEXT: fminnm h0, h0, h0
+; AARCH64-NEXT: mov v2.h[5], v5.h[0]
+; AARCH64-NEXT: fmaxnm h3, h4, h3
+; AARCH64-NEXT: fmaxnm h0, h0, h1
+; AARCH64-NEXT: mov v2.h[6], v3.h[0]
+; AARCH64-NEXT: mov v2.h[7], v0.h[0]
+; AARCH64-NEXT: mov v0.16b, v2.16b
; AARCH64-NEXT: ret
entry:
%c = call <8 x half> @llvm.maximumnum.v8f16(<8 x half> %a, <8 x half> %b)
@@ -692,45 +1167,51 @@ entry:
define <9 x half> @max_v9f16(<9 x half> %a, <9 x half> %b) {
; AARCH64-LABEL: max_v9f16:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: // kill: def $h0 killed $h0 def $q0
-; AARCH64-NEXT: // kill: def $h1 killed $h1 def $q1
-; AARCH64-NEXT: // kill: def $h2 killed $h2 def $q2
-; AARCH64-NEXT: add x9, sp, #16
-; AARCH64-NEXT: // kill: def $h3 killed $h3 def $q3
-; AARCH64-NEXT: // kill: def $h4 killed $h4 def $q4
-; AARCH64-NEXT: // kill: def $h5 killed $h5 def $q5
-; AARCH64-NEXT: // kill: def $h6 killed $h6 def $q6
-; AARCH64-NEXT: // kill: def $h7 killed $h7 def $q7
+; AARCH64-NEXT: ldr h16, [sp, #16]
+; AARCH64-NEXT: ldr h17, [sp, #8]
+; AARCH64-NEXT: fminnm h1, h1, h1
+; AARCH64-NEXT: fminnm h0, h0, h0
+; AARCH64-NEXT: ldr h18, [sp, #24]
+; AARCH64-NEXT: fminnm h2, h2, h2
+; AARCH64-NEXT: fminnm h16, h16, h16
+; AARCH64-NEXT: fminnm h17, h17, h17
+; AARCH64-NEXT: fminnm h3, h3, h3
+; AARCH64-NEXT: fminnm h18, h18, h18
+; AARCH64-NEXT: fminnm h4, h4, h4
+; AARCH64-NEXT: fminnm h5, h5, h5
+; AARCH64-NEXT: fmaxnm h1, h1, h16
+; AARCH64-NEXT: fmaxnm h0, h0, h17
+; AARCH64-NEXT: ldr h16, [sp, #32]
+; AARCH64-NEXT: fmaxnm h2, h2, h18
+; AARCH64-NEXT: fminnm h16, h16, h16
; AARCH64-NEXT: mov v0.h[1], v1.h[0]
-; AARCH64-NEXT: ldr h1, [sp, #8]
-; AARCH64-NEXT: ld1 { v1.h }[1], [x9]
-; AARCH64-NEXT: add x9, sp, #24
+; AARCH64-NEXT: ldr h1, [sp, #40]
+; AARCH64-NEXT: fminnm h1, h1, h1
+; AARCH64-NEXT: fmaxnm h3, h3, h16
; AARCH64-NEXT: mov v0.h[2], v2.h[0]
-; AARCH64-NEXT: ldr h2, [sp]
-; AARCH64-NEXT: ld1 { v1.h }[2], [x9]
-; AARCH64-NEXT: add x9, sp, #32
-; AARCH64-NEXT: fminnm v2.8h, v2.8h, v2.8h
+; AARCH64-NEXT: ldr h2, [sp, #48]
+; AARCH64-NEXT: fminnm h2, h2, h2
+; AARCH64-NEXT: fmaxnm h1, h4, h1
+; AARCH64-NEXT: ldr h4, [sp, #64]
; AARCH64-NEXT: mov v0.h[3], v3.h[0]
-; AARCH64-NEXT: ld1 { v1.h }[3], [x9]
-; AARCH64-NEXT: add x9, sp, #40
-; AARCH64-NEXT: ldr h3, [sp, #72]
-; AARCH64-NEXT: ld1 { v1.h }[4], [x9]
-; AARCH64-NEXT: add x9, sp, #48
-; AARCH64-NEXT: fminnm v3.8h, v3.8h, v3.8h
-; AARCH64-NEXT: mov v0.h[4], v4.h[0]
-; AARCH64-NEXT: ld1 { v1.h }[5], [x9]
-; AARCH64-NEXT: add x9, sp, #56
-; AARCH64-NEXT: fmaxnm v2.8h, v2.8h, v3.8h
-; AARCH64-NEXT: mov v0.h[5], v5.h[0]
-; AARCH64-NEXT: ld1 { v1.h }[6], [x9]
-; AARCH64-NEXT: add x9, sp, #64
+; AARCH64-NEXT: ldr h3, [sp, #56]
+; AARCH64-NEXT: fmaxnm h2, h5, h2
+; AARCH64-NEXT: fminnm h3, h3, h3
+; AARCH64-NEXT: ldr h5, [sp, #72]
+; AARCH64-NEXT: mov v0.h[4], v1.h[0]
+; AARCH64-NEXT: fminnm h1, h6, h6
+; AARCH64-NEXT: fminnm h5, h5, h5
+; AARCH64-NEXT: mov v0.h[5], v2.h[0]
+; AARCH64-NEXT: fmaxnm h1, h1, h3
+; AARCH64-NEXT: fminnm h2, h4, h4
+; AARCH64-NEXT: fminnm h3, h7, h7
+; AARCH64-NEXT: ldr h4, [sp]
+; AARCH64-NEXT: fminnm h4, h4, h4
+; AARCH64-NEXT: mov v0.h[6], v1.h[0]
+; AARCH64-NEXT: fmaxnm h1, h3, h2
+; AARCH64-NEXT: fmaxnm h2, h4, h5
+; AARCH64-NEXT: mov v0.h[7], v1.h[0]
; AARCH64-NEXT: str h2, [x8, #16]
-; AARCH64-NEXT: mov v0.h[6], v6.h[0]
-; AARCH64-NEXT: ld1 { v1.h }[7], [x9]
-; AARCH64-NEXT: fminnm v1.8h, v1.8h, v1.8h
-; AARCH64-NEXT: mov v0.h[7], v7.h[0]
-; AARCH64-NEXT: fminnm v0.8h, v0.8h, v0.8h
-; AARCH64-NEXT: fmaxnm v0.8h, v0.8h, v1.8h
; AARCH64-NEXT: str q0, [x8]
; AARCH64-NEXT: ret
entry:
@@ -741,12 +1222,98 @@ entry:
define <16 x half> @max_v16f16(<16 x half> %a, <16 x half> %b) {
; AARCH64-LABEL: max_v16f16:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fminnm v2.8h, v2.8h, v2.8h
-; AARCH64-NEXT: fminnm v0.8h, v0.8h, v0.8h
-; AARCH64-NEXT: fminnm v3.8h, v3.8h, v3.8h
-; AARCH64-NEXT: fminnm v1.8h, v1.8h, v1.8h
-; AARCH64-NEXT: fmaxnm v0.8h, v0.8h, v2.8h
-; AARCH64-NEXT: fmaxnm v1.8h, v1.8h, v3.8h
+; AARCH64-NEXT: mov h4, v2.h[1]
+; AARCH64-NEXT: mov h5, v0.h[1]
+; AARCH64-NEXT: mov h6, v3.h[1]
+; AARCH64-NEXT: mov h7, v1.h[1]
+; AARCH64-NEXT: fminnm h16, h2, h2
+; AARCH64-NEXT: fminnm h17, h0, h0
+; AARCH64-NEXT: mov h18, v2.h[2]
+; AARCH64-NEXT: mov h19, v0.h[2]
+; AARCH64-NEXT: fminnm h20, h3, h3
+; AARCH64-NEXT: fminnm h22, h1, h1
+; AARCH64-NEXT: mov h23, v3.h[2]
+; AARCH64-NEXT: mov h24, v1.h[2]
+; AARCH64-NEXT: fminnm h21, h4, h4
+; AARCH64-NEXT: fminnm h5, h5, h5
+; AARCH64-NEXT: fminnm h6, h6, h6
+; AARCH64-NEXT: fminnm h7, h7, h7
+; AARCH64-NEXT: fmaxnm h4, h17, h16
+; AARCH64-NEXT: fminnm h16, h18, h18
+; AARCH64-NEXT: fminnm h17, h19, h19
+; AARCH64-NEXT: mov h19, v2.h[3]
+; AARCH64-NEXT: fmaxnm h18, h5, h21
+; AARCH64-NEXT: mov h21, v0.h[3]
+; AARCH64-NEXT: fmaxnm h5, h22, h20
+; AARCH64-NEXT: fmaxnm h6, h7, h6
+; AARCH64-NEXT: fminnm h7, h23, h23
+; AARCH64-NEXT: fminnm h20, h24, h24
+; AARCH64-NEXT: mov h22, v3.h[3]
+; AARCH64-NEXT: mov h23, v1.h[3]
+; AARCH64-NEXT: fmaxnm h16, h17, h16
+; AARCH64-NEXT: fminnm h17, h19, h19
+; AARCH64-NEXT: mov h19, v2.h[4]
+; AARCH64-NEXT: mov v4.h[1], v18.h[0]
+; AARCH64-NEXT: fminnm h18, h21, h21
+; AARCH64-NEXT: mov h21, v0.h[4]
+; AARCH64-NEXT: mov v5.h[1], v6.h[0]
+; AARCH64-NEXT: fmaxnm h6, h20, h7
+; AARCH64-NEXT: fminnm h7, h22, h22
+; AARCH64-NEXT: fminnm h20, h23, h23
+; AARCH64-NEXT: mov h22, v3.h[4]
+; AARCH64-NEXT: mov h23, v1.h[4]
+; AARCH64-NEXT: mov v4.h[2], v16.h[0]
+; AARCH64-NEXT: fmaxnm h16, h18, h17
+; AARCH64-NEXT: fminnm h17, h19, h19
+; AARCH64-NEXT: fminnm h18, h21, h21
+; AARCH64-NEXT: mov h19, v2.h[5]
+; AARCH64-NEXT: mov h21, v0.h[5]
+; AARCH64-NEXT: mov v5.h[2], v6.h[0]
+; AARCH64-NEXT: fmaxnm h6, h20, h7
+; AARCH64-NEXT: fminnm h7, h22, h22
+; AARCH64-NEXT: fminnm h20, h23, h23
+; AARCH64-NEXT: mov h22, v3.h[5]
+; AARCH64-NEXT: mov h23, v1.h[5]
+; AARCH64-NEXT: mov v4.h[3], v16.h[0]
+; AARCH64-NEXT: fmaxnm h16, h18, h17
+; AARCH64-NEXT: fminnm h17, h19, h19
+; AARCH64-NEXT: fminnm h18, h21, h21
+; AARCH64-NEXT: mov h19, v2.h[6]
+; AARCH64-NEXT: mov v5.h[3], v6.h[0]
+; AARCH64-NEXT: mov h2, v2.h[7]
+; AARCH64-NEXT: fmaxnm h6, h20, h7
+; AARCH64-NEXT: mov h7, v0.h[6]
+; AARCH64-NEXT: fminnm h20, h22, h22
+; AARCH64-NEXT: fminnm h21, h23, h23
+; AARCH64-NEXT: mov h22, v3.h[6]
+; AARCH64-NEXT: mov h23, v1.h[6]
+; AARCH64-NEXT: mov v4.h[4], v16.h[0]
+; AARCH64-NEXT: fmaxnm h16, h18, h17
+; AARCH64-NEXT: mov h0, v0.h[7]
+; AARCH64-NEXT: fminnm h17, h19, h19
+; AARCH64-NEXT: mov h3, v3.h[7]
+; AARCH64-NEXT: mov h1, v1.h[7]
+; AARCH64-NEXT: mov v5.h[4], v6.h[0]
+; AARCH64-NEXT: fminnm h7, h7, h7
+; AARCH64-NEXT: fminnm h2, h2, h2
+; AARCH64-NEXT: fmaxnm h6, h21, h20
+; AARCH64-NEXT: fminnm h18, h22, h22
+; AARCH64-NEXT: fminnm h19, h23, h23
+; AARCH64-NEXT: mov v4.h[5], v16.h[0]
+; AARCH64-NEXT: fminnm h0, h0, h0
+; AARCH64-NEXT: fminnm h3, h3, h3
+; AARCH64-NEXT: fminnm h1, h1, h1
+; AARCH64-NEXT: fmaxnm h7, h7, h17
+; AARCH64-NEXT: mov v5.h[5], v6.h[0]
+; AARCH64-NEXT: fmaxnm h6, h19, h18
+; AARCH64-NEXT: fmaxnm h0, h0, h2
+; AARCH64-NEXT: fmaxnm h1, h1, h3
+; AARCH64-NEXT: mov v4.h[6], v7.h[0]
+; AARCH64-NEXT: mov v5.h[6], v6.h[0]
+; AARCH64-NEXT: mov v4.h[7], v0.h[0]
+; AARCH64-NEXT: mov v5.h[7], v1.h[0]
+; AARCH64-NEXT: mov v0.16b, v4.16b
+; AARCH64-NEXT: mov v1.16b, v5.16b
; AARCH64-NEXT: ret
entry:
%c = call <16 x half> @llvm.maximumnum.v16f16(<16 x half> %a, <16 x half> %b)
@@ -769,9 +1336,15 @@ entry:
define <2 x double> @min_v2f64(<2 x double> %a, <2 x double> %b) {
; AARCH64-LABEL: min_v2f64:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fminnm v1.2d, v1.2d, v1.2d
-; AARCH64-NEXT: fminnm v0.2d, v0.2d, v0.2d
-; AARCH64-NEXT: fminnm v0.2d, v0.2d, v1.2d
+; AARCH64-NEXT: mov d2, v1.d[1]
+; AARCH64-NEXT: mov d3, v0.d[1]
+; AARCH64-NEXT: fminnm d1, d1, d1
+; AARCH64-NEXT: fminnm d0, d0, d0
+; AARCH64-NEXT: fminnm d2, d2, d2
+; AARCH64-NEXT: fminnm d3, d3, d3
+; AARCH64-NEXT: fminnm d0, d0, d1
+; AARCH64-NEXT: fminnm d2, d3, d2
+; AARCH64-NEXT: mov v0.d[1], v2.d[0]
; AARCH64-NEXT: ret
entry:
%c = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> %a, <2 x double> %b)
@@ -781,24 +1354,15 @@ entry:
define <3 x double> @min_v3f64(<3 x double> %a, <3 x double> %b) {
; AARCH64-LABEL: min_v3f64:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: // kill: def $d3 killed $d3 def $q3
-; AARCH64-NEXT: // kill: def $d0 killed $d0 def $q0
-; AARCH64-NEXT: // kill: def $d4 killed $d4 def $q4
-; AARCH64-NEXT: // kill: def $d1 killed $d1 def $q1
-; AARCH64-NEXT: // kill: def $d2 killed $d2 def $q2
-; AARCH64-NEXT: // kill: def $d5 killed $d5 def $q5
-; AARCH64-NEXT: mov v0.d[1], v1.d[0]
-; AARCH64-NEXT: mov v3.d[1], v4.d[0]
-; AARCH64-NEXT: fminnm v2.2d, v2.2d, v2.2d
-; AARCH64-NEXT: fminnm v1.2d, v3.2d, v3.2d
-; AARCH64-NEXT: fminnm v0.2d, v0.2d, v0.2d
-; AARCH64-NEXT: fminnm v0.2d, v0.2d, v1.2d
-; AARCH64-NEXT: fminnm v1.2d, v5.2d, v5.2d
-; AARCH64-NEXT: fminnm v2.2d, v2.2d, v1.2d
-; AARCH64-NEXT: ext v1.16b, v0.16b, v0.16b, #8
-; AARCH64-NEXT: // kill: def $d0 killed $d0 killed $q0
-; AARCH64-NEXT: // kill: def $d1 killed $d1 killed $q1
-; AARCH64-NEXT: // kill: def $d2 killed $d2 killed $q2
+; AARCH64-NEXT: fminnm d3, d3, d3
+; AARCH64-NEXT: fminnm d0, d0, d0
+; AARCH64-NEXT: fminnm d4, d4, d4
+; AARCH64-NEXT: fminnm d1, d1, d1
+; AARCH64-NEXT: fminnm d5, d5, d5
+; AARCH64-NEXT: fminnm d2, d2, d2
+; AARCH64-NEXT: fminnm d0, d0, d3
+; AARCH64-NEXT: fminnm d1, d1, d4
+; AARCH64-NEXT: fminnm d2, d2, d5
; AARCH64-NEXT: ret
entry:
%c = call <3 x double> @llvm.minimumnum.v3f64(<3 x double> %a, <3 x double> %b)
@@ -808,12 +1372,24 @@ entry:
define <4 x double> @min_v4f64(<4 x double> %a, <4 x double> %b) {
; AARCH64-LABEL: min_v4f64:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fminnm v2.2d, v2.2d, v2.2d
-; AARCH64-NEXT: fminnm v0.2d, v0.2d, v0.2d
-; AARCH64-NEXT: fminnm v3.2d, v3.2d, v3.2d
-; AARCH64-NEXT: fminnm v1.2d, v1.2d, v1.2d
-; AARCH64-NEXT: fminnm v0.2d, v0.2d, v2.2d
-; AARCH64-NEXT: fminnm v1.2d, v1.2d, v3.2d
+; AARCH64-NEXT: mov d4, v2.d[1]
+; AARCH64-NEXT: mov d5, v0.d[1]
+; AARCH64-NEXT: mov d6, v3.d[1]
+; AARCH64-NEXT: mov d7, v1.d[1]
+; AARCH64-NEXT: fminnm d2, d2, d2
+; AARCH64-NEXT: fminnm d0, d0, d0
+; AARCH64-NEXT: fminnm d3, d3, d3
+; AARCH64-NEXT: fminnm d1, d1, d1
+; AARCH64-NEXT: fminnm d4, d4, d4
+; AARCH64-NEXT: fminnm d5, d5, d5
+; AARCH64-NEXT: fminnm d6, d6, d6
+; AARCH64-NEXT: fminnm d7, d7, d7
+; AARCH64-NEXT: fminnm d0, d0, d2
+; AARCH64-NEXT: fminnm d1, d1, d3
+; AARCH64-NEXT: fminnm d4, d5, d4
+; AARCH64-NEXT: fminnm d2, d7, d6
+; AARCH64-NEXT: mov v0.d[1], v4.d[0]
+; AARCH64-NEXT: mov v1.d[1], v2.d[0]
; AARCH64-NEXT: ret
entry:
%c = call <4 x double> @llvm.minimumnum.v4f64(<4 x double> %a, <4 x double> %b)
@@ -836,9 +1412,18 @@ entry:
define <2 x float> @min_v2f32(<2 x float> %a, <2 x float> %b) {
; AARCH64-LABEL: min_v2f32:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fminnm v1.2s, v1.2s, v1.2s
-; AARCH64-NEXT: fminnm v0.2s, v0.2s, v0.2s
-; AARCH64-NEXT: fminnm v0.2s, v0.2s, v1.2s
+; AARCH64-NEXT: // kill: def $d1 killed $d1 def $q1
+; AARCH64-NEXT: // kill: def $d0 killed $d0 def $q0
+; AARCH64-NEXT: mov s2, v1.s[1]
+; AARCH64-NEXT: mov s3, v0.s[1]
+; AARCH64-NEXT: fminnm s1, s1, s1
+; AARCH64-NEXT: fminnm s0, s0, s0
+; AARCH64-NEXT: fminnm s2, s2, s2
+; AARCH64-NEXT: fminnm s3, s3, s3
+; AARCH64-NEXT: fminnm s0, s0, s1
+; AARCH64-NEXT: fminnm s2, s3, s2
+; AARCH64-NEXT: mov v0.s[1], v2.s[0]
+; AARCH64-NEXT: // kill: def $d0 killed $d0 killed $q0
; AARCH64-NEXT: ret
entry:
%c = call <2 x float> @llvm.minimumnum.v2f32(<2 x float> %a, <2 x float> %b)
@@ -848,9 +1433,28 @@ entry:
define <3 x float> @min_v3f32(<3 x float> %a, <3 x float> %b) {
; AARCH64-LABEL: min_v3f32:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fminnm v1.4s, v1.4s, v1.4s
-; AARCH64-NEXT: fminnm v0.4s, v0.4s, v0.4s
-; AARCH64-NEXT: fminnm v0.4s, v0.4s, v1.4s
+; AARCH64-NEXT: mov s2, v1.s[1]
+; AARCH64-NEXT: mov s3, v0.s[1]
+; AARCH64-NEXT: fminnm s4, s1, s1
+; AARCH64-NEXT: fminnm s5, s0, s0
+; AARCH64-NEXT: mov s7, v1.s[2]
+; AARCH64-NEXT: mov s16, v0.s[2]
+; AARCH64-NEXT: mov s1, v1.s[3]
+; AARCH64-NEXT: mov s0, v0.s[3]
+; AARCH64-NEXT: fminnm s6, s2, s2
+; AARCH64-NEXT: fminnm s3, s3, s3
+; AARCH64-NEXT: fminnm s2, s5, s4
+; AARCH64-NEXT: fminnm s4, s7, s7
+; AARCH64-NEXT: fminnm s5, s16, s16
+; AARCH64-NEXT: fminnm s1, s1, s1
+; AARCH64-NEXT: fminnm s0, s0, s0
+; AARCH64-NEXT: fminnm s3, s3, s6
+; AARCH64-NEXT: fminnm s0, s0, s1
+; AARCH64-NEXT: mov v2.s[1], v3.s[0]
+; AARCH64-NEXT: fminnm s3, s5, s4
+; AARCH64-NEXT: mov v2.s[2], v3.s[0]
+; AARCH64-NEXT: mov v2.s[3], v0.s[0]
+; AARCH64-NEXT: mov v0.16b, v2.16b
; AARCH64-NEXT: ret
entry:
%c = call <3 x float> @llvm.minimumnum.v3f32(<3 x float> %a, <3 x float> %b)
@@ -860,9 +1464,28 @@ entry:
define <4 x float> @min_v4f32(<4 x float> %a, <4 x float> %b) {
; AARCH64-LABEL: min_v4f32:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fminnm v1.4s, v1.4s, v1.4s
-; AARCH64-NEXT: fminnm v0.4s, v0.4s, v0.4s
-; AARCH64-NEXT: fminnm v0.4s, v0.4s, v1.4s
+; AARCH64-NEXT: mov s2, v1.s[1]
+; AARCH64-NEXT: mov s3, v0.s[1]
+; AARCH64-NEXT: fminnm s4, s1, s1
+; AARCH64-NEXT: fminnm s5, s0, s0
+; AARCH64-NEXT: mov s7, v1.s[2]
+; AARCH64-NEXT: mov s16, v0.s[2]
+; AARCH64-NEXT: mov s1, v1.s[3]
+; AARCH64-NEXT: mov s0, v0.s[3]
+; AARCH64-NEXT: fminnm s6, s2, s2
+; AARCH64-NEXT: fminnm s3, s3, s3
+; AARCH64-NEXT: fminnm s2, s5, s4
+; AARCH64-NEXT: fminnm s4, s7, s7
+; AARCH64-NEXT: fminnm s5, s16, s16
+; AARCH64-NEXT: fminnm s1, s1, s1
+; AARCH64-NEXT: fminnm s0, s0, s0
+; AARCH64-NEXT: fminnm s3, s3, s6
+; AARCH64-NEXT: fminnm s0, s0, s1
+; AARCH64-NEXT: mov v2.s[1], v3.s[0]
+; AARCH64-NEXT: fminnm s3, s5, s4
+; AARCH64-NEXT: mov v2.s[2], v3.s[0]
+; AARCH64-NEXT: mov v2.s[3], v0.s[0]
+; AARCH64-NEXT: mov v0.16b, v2.16b
; AARCH64-NEXT: ret
entry:
%c = call <4 x float> @llvm.minimumnum.v4f32(<4 x float> %a, <4 x float> %b)
@@ -872,33 +1495,23 @@ entry:
define <5 x float> @min_v5f32(<5 x float> %a, <5 x float> %b) {
; AARCH64-LABEL: min_v5f32:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: // kill: def $s0 killed $s0 def $q0
-; AARCH64-NEXT: // kill: def $s5 killed $s5 def $q5
-; AARCH64-NEXT: // kill: def $s1 killed $s1 def $q1
-; AARCH64-NEXT: // kill: def $s6 killed $s6 def $q6
-; AARCH64-NEXT: // kill: def $s2 killed $s2 def $q2
-; AARCH64-NEXT: // kill: def $s7 killed $s7 def $q7
-; AARCH64-NEXT: // kill: def $s3 killed $s3 def $q3
-; AARCH64-NEXT: mov x8, sp
-; AARCH64-NEXT: // kill: def $s4 killed $s4 def $q4
-; AARCH64-NEXT: mov v0.s[1], v1.s[0]
-; AARCH64-NEXT: mov v5.s[1], v6.s[0]
-; AARCH64-NEXT: mov v0.s[2], v2.s[0]
-; AARCH64-NEXT: mov v5.s[2], v7.s[0]
-; AARCH64-NEXT: ldr s2, [sp, #8]
-; AARCH64-NEXT: fminnm v2.4s, v2.4s, v2.4s
-; AARCH64-NEXT: mov v0.s[3], v3.s[0]
-; AARCH64-NEXT: ld1 { v5.s }[3], [x8]
-; AARCH64-NEXT: fminnm v3.4s, v4.4s, v4.4s
-; AARCH64-NEXT: fminnm v1.4s, v5.4s, v5.4s
-; AARCH64-NEXT: fminnm v0.4s, v0.4s, v0.4s
-; AARCH64-NEXT: fminnm v4.4s, v3.4s, v2.4s
-; AARCH64-NEXT: // kill: def $s4 killed $s4 killed $q4
-; AARCH64-NEXT: fminnm v0.4s, v0.4s, v1.4s
-; AARCH64-NEXT: mov s1, v0.s[1]
-; AARCH64-NEXT: mov s2, v0.s[2]
-; AARCH64-NEXT: mov s3, v0.s[3]
-; AARCH64-NEXT: // kill: def $s0 killed $s0 killed $q0
+; AARCH64-NEXT: ldr s16, [sp, #8]
+; AARCH64-NEXT: ldr s17, [sp]
+; AARCH64-NEXT: fminnm s5, s5, s5
+; AARCH64-NEXT: fminnm s0, s0, s0
+; AARCH64-NEXT: fminnm s6, s6, s6
+; AARCH64-NEXT: fminnm s1, s1, s1
+; AARCH64-NEXT: fminnm s7, s7, s7
+; AARCH64-NEXT: fminnm s2, s2, s2
+; AARCH64-NEXT: fminnm s17, s17, s17
+; AARCH64-NEXT: fminnm s3, s3, s3
+; AARCH64-NEXT: fminnm s16, s16, s16
+; AARCH64-NEXT: fminnm s4, s4, s4
+; AARCH64-NEXT: fminnm s0, s0, s5
+; AARCH64-NEXT: fminnm s1, s1, s6
+; AARCH64-NEXT: fminnm s2, s2, s7
+; AARCH64-NEXT: fminnm s3, s3, s17
+; AARCH64-NEXT: fminnm s4, s4, s16
; AARCH64-NEXT: ret
entry:
%c = call <5 x float> @llvm.minimumnum.v5f32(<5 x float> %a, <5 x float> %b)
@@ -908,12 +1521,50 @@ entry:
define <8 x float> @min_v8f32(<8 x float> %a, <8 x float> %b) {
; AARCH64-LABEL: min_v8f32:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fminnm v2.4s, v2.4s, v2.4s
-; AARCH64-NEXT: fminnm v0.4s, v0.4s, v0.4s
-; AARCH64-NEXT: fminnm v3.4s, v3.4s, v3.4s
-; AARCH64-NEXT: fminnm v1.4s, v1.4s, v1.4s
-; AARCH64-NEXT: fminnm v0.4s, v0.4s, v2.4s
-; AARCH64-NEXT: fminnm v1.4s, v1.4s, v3.4s
+; AARCH64-NEXT: mov s4, v2.s[1]
+; AARCH64-NEXT: mov s5, v0.s[1]
+; AARCH64-NEXT: mov s6, v3.s[1]
+; AARCH64-NEXT: mov s7, v1.s[1]
+; AARCH64-NEXT: fminnm s16, s2, s2
+; AARCH64-NEXT: fminnm s17, s0, s0
+; AARCH64-NEXT: mov s18, v2.s[2]
+; AARCH64-NEXT: fminnm s19, s3, s3
+; AARCH64-NEXT: fminnm s20, s1, s1
+; AARCH64-NEXT: mov s23, v0.s[2]
+; AARCH64-NEXT: mov s24, v3.s[2]
+; AARCH64-NEXT: mov s25, v1.s[2]
+; AARCH64-NEXT: fminnm s21, s4, s4
+; AARCH64-NEXT: fminnm s22, s5, s5
+; AARCH64-NEXT: fminnm s6, s6, s6
+; AARCH64-NEXT: fminnm s7, s7, s7
+; AARCH64-NEXT: fminnm s4, s17, s16
+; AARCH64-NEXT: mov s2, v2.s[3]
+; AARCH64-NEXT: fminnm s5, s20, s19
+; AARCH64-NEXT: mov s0, v0.s[3]
+; AARCH64-NEXT: mov s3, v3.s[3]
+; AARCH64-NEXT: fminnm s17, s23, s23
+; AARCH64-NEXT: fminnm s19, s25, s25
+; AARCH64-NEXT: mov s1, v1.s[3]
+; AARCH64-NEXT: fminnm s16, s22, s21
+; AARCH64-NEXT: fminnm s6, s7, s6
+; AARCH64-NEXT: fminnm s7, s18, s18
+; AARCH64-NEXT: fminnm s18, s24, s24
+; AARCH64-NEXT: fminnm s2, s2, s2
+; AARCH64-NEXT: fminnm s0, s0, s0
+; AARCH64-NEXT: fminnm s3, s3, s3
+; AARCH64-NEXT: fminnm s1, s1, s1
+; AARCH64-NEXT: mov v4.s[1], v16.s[0]
+; AARCH64-NEXT: fminnm s7, s17, s7
+; AARCH64-NEXT: mov v5.s[1], v6.s[0]
+; AARCH64-NEXT: fminnm s6, s19, s18
+; AARCH64-NEXT: fminnm s0, s0, s2
+; AARCH64-NEXT: fminnm s1, s1, s3
+; AARCH64-NEXT: mov v4.s[2], v7.s[0]
+; AARCH64-NEXT: mov v5.s[2], v6.s[0]
+; AARCH64-NEXT: mov v4.s[3], v0.s[0]
+; AARCH64-NEXT: mov v5.s[3], v1.s[0]
+; AARCH64-NEXT: mov v0.16b, v4.16b
+; AARCH64-NEXT: mov v1.16b, v5.16b
; AARCH64-NEXT: ret
entry:
%c = call <8 x float> @llvm.minimumnum.v8f32(<8 x float> %a, <8 x float> %b)
@@ -936,9 +1587,30 @@ entry:
define <2 x half> @min_v2f16(<2 x half> %a, <2 x half> %b) {
; AARCH64-LABEL: min_v2f16:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fminnm v1.4h, v1.4h, v1.4h
-; AARCH64-NEXT: fminnm v0.4h, v0.4h, v0.4h
-; AARCH64-NEXT: fminnm v0.4h, v0.4h, v1.4h
+; AARCH64-NEXT: // kill: def $d1 killed $d1 def $q1
+; AARCH64-NEXT: // kill: def $d0 killed $d0 def $q0
+; AARCH64-NEXT: mov h2, v1.h[1]
+; AARCH64-NEXT: mov h3, v0.h[1]
+; AARCH64-NEXT: fminnm h4, h1, h1
+; AARCH64-NEXT: fminnm h5, h0, h0
+; AARCH64-NEXT: mov h7, v1.h[2]
+; AARCH64-NEXT: mov h16, v0.h[2]
+; AARCH64-NEXT: mov h1, v1.h[3]
+; AARCH64-NEXT: mov h0, v0.h[3]
+; AARCH64-NEXT: fminnm h6, h2, h2
+; AARCH64-NEXT: fminnm h3, h3, h3
+; AARCH64-NEXT: fminnm h2, h5, h4
+; AARCH64-NEXT: fminnm h4, h7, h7
+; AARCH64-NEXT: fminnm h5, h16, h16
+; AARCH64-NEXT: fminnm h1, h1, h1
+; AARCH64-NEXT: fminnm h0, h0, h0
+; AARCH64-NEXT: fminnm h3, h3, h6
+; AARCH64-NEXT: fminnm h0, h0, h1
+; AARCH64-NEXT: mov v2.h[1], v3.h[0]
+; AARCH64-NEXT: fminnm h3, h5, h4
+; AARCH64-NEXT: mov v2.h[2], v3.h[0]
+; AARCH64-NEXT: mov v2.h[3], v0.h[0]
+; AARCH64-NEXT: fmov d0, d2
; AARCH64-NEXT: ret
entry:
%c = call <2 x half> @llvm.minimumnum.v2f16(<2 x half> %a, <2 x half> %b)
@@ -948,9 +1620,30 @@ entry:
define <4 x half> @min_v4f16(<4 x half> %a, <4 x half> %b) {
; AARCH64-LABEL: min_v4f16:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fminnm v1.4h, v1.4h, v1.4h
-; AARCH64-NEXT: fminnm v0.4h, v0.4h, v0.4h
-; AARCH64-NEXT: fminnm v0.4h, v0.4h, v1.4h
+; AARCH64-NEXT: // kill: def $d1 killed $d1 def $q1
+; AARCH64-NEXT: // kill: def $d0 killed $d0 def $q0
+; AARCH64-NEXT: mov h2, v1.h[1]
+; AARCH64-NEXT: mov h3, v0.h[1]
+; AARCH64-NEXT: fminnm h4, h1, h1
+; AARCH64-NEXT: fminnm h5, h0, h0
+; AARCH64-NEXT: mov h7, v1.h[2]
+; AARCH64-NEXT: mov h16, v0.h[2]
+; AARCH64-NEXT: mov h1, v1.h[3]
+; AARCH64-NEXT: mov h0, v0.h[3]
+; AARCH64-NEXT: fminnm h6, h2, h2
+; AARCH64-NEXT: fminnm h3, h3, h3
+; AARCH64-NEXT: fminnm h2, h5, h4
+; AARCH64-NEXT: fminnm h4, h7, h7
+; AARCH64-NEXT: fminnm h5, h16, h16
+; AARCH64-NEXT: fminnm h1, h1, h1
+; AARCH64-NEXT: fminnm h0, h0, h0
+; AARCH64-NEXT: fminnm h3, h3, h6
+; AARCH64-NEXT: fminnm h0, h0, h1
+; AARCH64-NEXT: mov v2.h[1], v3.h[0]
+; AARCH64-NEXT: fminnm h3, h5, h4
+; AARCH64-NEXT: mov v2.h[2], v3.h[0]
+; AARCH64-NEXT: mov v2.h[3], v0.h[0]
+; AARCH64-NEXT: fmov d0, d2
; AARCH64-NEXT: ret
entry:
%c = call <4 x half> @llvm.minimumnum.v4f16(<4 x half> %a, <4 x half> %b)
@@ -960,9 +1653,52 @@ entry:
define <8 x half> @min_v8f16(<8 x half> %a, <8 x half> %b) {
; AARCH64-LABEL: min_v8f16:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fminnm v1.8h, v1.8h, v1.8h
-; AARCH64-NEXT: fminnm v0.8h, v0.8h, v0.8h
-; AARCH64-NEXT: fminnm v0.8h, v0.8h, v1.8h
+; AARCH64-NEXT: mov h2, v1.h[1]
+; AARCH64-NEXT: mov h3, v0.h[1]
+; AARCH64-NEXT: fminnm h4, h1, h1
+; AARCH64-NEXT: fminnm h5, h0, h0
+; AARCH64-NEXT: mov h7, v1.h[2]
+; AARCH64-NEXT: mov h16, v0.h[2]
+; AARCH64-NEXT: fminnm h6, h2, h2
+; AARCH64-NEXT: fminnm h3, h3, h3
+; AARCH64-NEXT: fminnm h2, h5, h4
+; AARCH64-NEXT: mov h4, v1.h[3]
+; AARCH64-NEXT: mov h5, v0.h[3]
+; AARCH64-NEXT: fminnm h3, h3, h6
+; AARCH64-NEXT: fminnm h6, h7, h7
+; AARCH64-NEXT: fminnm h7, h16, h16
+; AARCH64-NEXT: fminnm h4, h4, h4
+; AARCH64-NEXT: fminnm h5, h5, h5
+; AARCH64-NEXT: fminnm h6, h7, h6
+; AARCH64-NEXT: mov v2.h[1], v3.h[0]
+; AARCH64-NEXT: mov h3, v1.h[4]
+; AARCH64-NEXT: mov h7, v0.h[4]
+; AARCH64-NEXT: fminnm h4, h5, h4
+; AARCH64-NEXT: mov h5, v1.h[5]
+; AARCH64-NEXT: mov v2.h[2], v6.h[0]
+; AARCH64-NEXT: mov h6, v0.h[5]
+; AARCH64-NEXT: fminnm h3, h3, h3
+; AARCH64-NEXT: fminnm h7, h7, h7
+; AARCH64-NEXT: fminnm h5, h5, h5
+; AARCH64-NEXT: fminnm h6, h6, h6
+; AARCH64-NEXT: mov v2.h[3], v4.h[0]
+; AARCH64-NEXT: mov h4, v1.h[6]
+; AARCH64-NEXT: fminnm h3, h7, h3
+; AARCH64-NEXT: mov h7, v0.h[6]
+; AARCH64-NEXT: mov h1, v1.h[7]
+; AARCH64-NEXT: mov h0, v0.h[7]
+; AARCH64-NEXT: fminnm h5, h6, h5
+; AARCH64-NEXT: mov v2.h[4], v3.h[0]
+; AARCH64-NEXT: fminnm h3, h4, h4
+; AARCH64-NEXT: fminnm h4, h7, h7
+; AARCH64-NEXT: fminnm h1, h1, h1
+; AARCH64-NEXT: fminnm h0, h0, h0
+; AARCH64-NEXT: mov v2.h[5], v5.h[0]
+; AARCH64-NEXT: fminnm h3, h4, h3
+; AARCH64-NEXT: fminnm h0, h0, h1
+; AARCH64-NEXT: mov v2.h[6], v3.h[0]
+; AARCH64-NEXT: mov v2.h[7], v0.h[0]
+; AARCH64-NEXT: mov v0.16b, v2.16b
; AARCH64-NEXT: ret
entry:
%c = call <8 x half> @llvm.minimumnum.v8f16(<8 x half> %a, <8 x half> %b)
@@ -972,45 +1708,51 @@ entry:
define <9 x half> @min_v9f16(<9 x half> %a, <9 x half> %b) {
; AARCH64-LABEL: min_v9f16:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: // kill: def $h0 killed $h0 def $q0
-; AARCH64-NEXT: // kill: def $h1 killed $h1 def $q1
-; AARCH64-NEXT: // kill: def $h2 killed $h2 def $q2
-; AARCH64-NEXT: add x9, sp, #16
-; AARCH64-NEXT: // kill: def $h3 killed $h3 def $q3
-; AARCH64-NEXT: // kill: def $h4 killed $h4 def $q4
-; AARCH64-NEXT: // kill: def $h5 killed $h5 def $q5
-; AARCH64-NEXT: // kill: def $h6 killed $h6 def $q6
-; AARCH64-NEXT: // kill: def $h7 killed $h7 def $q7
+; AARCH64-NEXT: ldr h16, [sp, #16]
+; AARCH64-NEXT: ldr h17, [sp, #8]
+; AARCH64-NEXT: fminnm h1, h1, h1
+; AARCH64-NEXT: fminnm h0, h0, h0
+; AARCH64-NEXT: ldr h18, [sp, #24]
+; AARCH64-NEXT: fminnm h2, h2, h2
+; AARCH64-NEXT: fminnm h16, h16, h16
+; AARCH64-NEXT: fminnm h17, h17, h17
+; AARCH64-NEXT: fminnm h3, h3, h3
+; AARCH64-NEXT: fminnm h18, h18, h18
+; AARCH64-NEXT: fminnm h4, h4, h4
+; AARCH64-NEXT: fminnm h5, h5, h5
+; AARCH64-NEXT: fminnm h1, h1, h16
+; AARCH64-NEXT: fminnm h0, h0, h17
+; AARCH64-NEXT: ldr h16, [sp, #32]
+; AARCH64-NEXT: fminnm h2, h2, h18
+; AARCH64-NEXT: fminnm h16, h16, h16
; AARCH64-NEXT: mov v0.h[1], v1.h[0]
-; AARCH64-NEXT: ldr h1, [sp, #8]
-; AARCH64-NEXT: ld1 { v1.h }[1], [x9]
-; AARCH64-NEXT: add x9, sp, #24
+; AARCH64-NEXT: ldr h1, [sp, #40]
+; AARCH64-NEXT: fminnm h1, h1, h1
+; AARCH64-NEXT: fminnm h3, h3, h16
; AARCH64-NEXT: mov v0.h[2], v2.h[0]
-; AARCH64-NEXT: ldr h2, [sp]
-; AARCH64-NEXT: ld1 { v1.h }[2], [x9]
-; AARCH64-NEXT: add x9, sp, #32
-; AARCH64-NEXT: fminnm v2.8h, v2.8h, v2.8h
+; AARCH64-NEXT: ldr h2, [sp, #48]
+; AARCH64-NEXT: fminnm h2, h2, h2
+; AARCH64-NEXT: fminnm h1, h4, h1
+; AARCH64-NEXT: ldr h4, [sp, #64]
; AARCH64-NEXT: mov v0.h[3], v3.h[0]
-; AARCH64-NEXT: ld1 { v1.h }[3], [x9]
-; AARCH64-NEXT: add x9, sp, #40
-; AARCH64-NEXT: ldr h3, [sp, #72]
-; AARCH64-NEXT: ld1 { v1.h }[4], [x9]
-; AARCH64-NEXT: add x9, sp, #48
-; AARCH64-NEXT: fminnm v3.8h, v3.8h, v3.8h
-; AARCH64-NEXT: mov v0.h[4], v4.h[0]
-; AARCH64-NEXT: ld1 { v1.h }[5], [x9]
-; AARCH64-NEXT: add x9, sp, #56
-; AARCH64-NEXT: fminnm v2.8h, v2.8h, v3.8h
-; AARCH64-NEXT: mov v0.h[5], v5.h[0]
-; AARCH64-NEXT: ld1 { v1.h }[6], [x9]
-; AARCH64-NEXT: add x9, sp, #64
+; AARCH64-NEXT: ldr h3, [sp, #56]
+; AARCH64-NEXT: fminnm h2, h5, h2
+; AARCH64-NEXT: fminnm h3, h3, h3
+; AARCH64-NEXT: ldr h5, [sp, #72]
+; AARCH64-NEXT: mov v0.h[4], v1.h[0]
+; AARCH64-NEXT: fminnm h1, h6, h6
+; AARCH64-NEXT: fminnm h5, h5, h5
+; AARCH64-NEXT: mov v0.h[5], v2.h[0]
+; AARCH64-NEXT: fminnm h1, h1, h3
+; AARCH64-NEXT: fminnm h2, h4, h4
+; AARCH64-NEXT: fminnm h3, h7, h7
+; AARCH64-NEXT: ldr h4, [sp]
+; AARCH64-NEXT: fminnm h4, h4, h4
+; AARCH64-NEXT: mov v0.h[6], v1.h[0]
+; AARCH64-NEXT: fminnm h1, h3, h2
+; AARCH64-NEXT: fminnm h2, h4, h5
+; AARCH64-NEXT: mov v0.h[7], v1.h[0]
; AARCH64-NEXT: str h2, [x8, #16]
-; AARCH64-NEXT: mov v0.h[6], v6.h[0]
-; AARCH64-NEXT: ld1 { v1.h }[7], [x9]
-; AARCH64-NEXT: fminnm v1.8h, v1.8h, v1.8h
-; AARCH64-NEXT: mov v0.h[7], v7.h[0]
-; AARCH64-NEXT: fminnm v0.8h, v0.8h, v0.8h
-; AARCH64-NEXT: fminnm v0.8h, v0.8h, v1.8h
; AARCH64-NEXT: str q0, [x8]
; AARCH64-NEXT: ret
entry:
@@ -1021,12 +1763,98 @@ entry:
define <16 x half> @min_v16f16(<16 x half> %a, <16 x half> %b) {
; AARCH64-LABEL: min_v16f16:
; AARCH64: // %bb.0: // %entry
-; AARCH64-NEXT: fminnm v2.8h, v2.8h, v2.8h
-; AARCH64-NEXT: fminnm v0.8h, v0.8h, v0.8h
-; AARCH64-NEXT: fminnm v3.8h, v3.8h, v3.8h
-; AARCH64-NEXT: fminnm v1.8h, v1.8h, v1.8h
-; AARCH64-NEXT: fminnm v0.8h, v0.8h, v2.8h
-; AARCH64-NEXT: fminnm v1.8h, v1.8h, v3.8h
+; AARCH64-NEXT: mov h4, v2.h[1]
+; AARCH64-NEXT: mov h5, v0.h[1]
+; AARCH64-NEXT: mov h6, v3.h[1]
+; AARCH64-NEXT: mov h7, v1.h[1]
+; AARCH64-NEXT: fminnm h16, h2, h2
+; AARCH64-NEXT: fminnm h17, h0, h0
+; AARCH64-NEXT: mov h18, v2.h[2]
+; AARCH64-NEXT: mov h19, v0.h[2]
+; AARCH64-NEXT: fminnm h20, h3, h3
+; AARCH64-NEXT: fminnm h22, h1, h1
+; AARCH64-NEXT: mov h23, v3.h[2]
+; AARCH64-NEXT: mov h24, v1.h[2]
+; AARCH64-NEXT: fminnm h21, h4, h4
+; AARCH64-NEXT: fminnm h5, h5, h5
+; AARCH64-NEXT: fminnm h6, h6, h6
+; AARCH64-NEXT: fminnm h7, h7, h7
+; AARCH64-NEXT: fminnm h4, h17, h16
+; AARCH64-NEXT: fminnm h16, h18, h18
+; AARCH64-NEXT: fminnm h17, h19, h19
+; AARCH64-NEXT: mov h19, v2.h[3]
+; AARCH64-NEXT: fminnm h18, h5, h21
+; AARCH64-NEXT: mov h21, v0.h[3]
+; AARCH64-NEXT: fminnm h5, h22, h20
+; AARCH64-NEXT: fminnm h6, h7, h6
+; AARCH64-NEXT: fminnm h7, h23, h23
+; AARCH64-NEXT: fminnm h20, h24, h24
+; AARCH64-NEXT: mov h22, v3.h[3]
+; AARCH64-NEXT: mov h23, v1.h[3]
+; AARCH64-NEXT: fminnm h16, h17, h16
+; AARCH64-NEXT: fminnm h17, h19, h19
+; AARCH64-NEXT: mov h19, v2.h[4]
+; AARCH64-NEXT: mov v4.h[1], v18.h[0]
+; AARCH64-NEXT: fminnm h18, h21, h21
+; AARCH64-NEXT: mov h21, v0.h[4]
+; AARCH64-NEXT: mov v5.h[1], v6.h[0]
+; AARCH64-NEXT: fminnm h6, h20, h7
+; AARCH64-NEXT: fminnm h7, h22, h22
+; AARCH64-NEXT: fminnm h20, h23, h23
+; AARCH64-NEXT: mov h22, v3.h[4]
+; AARCH64-NEXT: mov h23, v1.h[4]
+; AARCH64-NEXT: mov v4.h[2], v16.h[0]
+; AARCH64-NEXT: fminnm h16, h18, h17
+; AARCH64-NEXT: fminnm h17, h19, h19
+; AARCH64-NEXT: fminnm h18, h21, h21
+; AARCH64-NEXT: mov h19, v2.h[5]
+; AARCH64-NEXT: mov h21, v0.h[5]
+; AARCH64-NEXT: mov v5.h[2], v6.h[0]
+; AARCH64-NEXT: fminnm h6, h20, h7
+; AARCH64-NEXT: fminnm h7, h22, h22
+; AARCH64-NEXT: fminnm h20, h23, h23
+; AARCH64-NEXT: mov h22, v3.h[5]
+; AARCH64-NEXT: mov h23, v1.h[5]
+; AARCH64-NEXT: mov v4.h[3], v16.h[0]
+; AARCH64-NEXT: fminnm h16, h18, h17
+; AARCH64-NEXT: fminnm h17, h19, h19
+; AARCH64-NEXT: fminnm h18, h21, h21
+; AARCH64-NEXT: mov h19, v2.h[6]
+; AARCH64-NEXT: mov v5.h[3], v6.h[0]
+; AARCH64-NEXT: mov h2, v2.h[7]
+; AARCH64-NEXT: fminnm h6, h20, h7
+; AARCH64-NEXT: mov h7, v0.h[6]
+; AARCH64-NEXT: fminnm h20, h22, h22
+; AARCH64-NEXT: fminnm h21, h23, h23
+; AARCH64-NEXT: mov h22, v3.h[6]
+; AARCH64-NEXT: mov h23, v1.h[6]
+; AARCH64-NEXT: mov v4.h[4], v16.h[0]
+; AARCH64-NEXT: fminnm h16, h18, h17
+; AARCH64-NEXT: mov h0, v0.h[7]
+; AARCH64-NEXT: fminnm h17, h19, h19
+; AARCH64-NEXT: mov h3, v3.h[7]
+; AARCH64-NEXT: mov h1, v1.h[7]
+; AARCH64-NEXT: mov v5.h[4], v6.h[0]
+; AARCH64-NEXT: fminnm h7, h7, h7
+; AARCH64-NEXT: fminnm h2, h2, h2
+; AARCH64-NEXT: fminnm h6, h21, h20
+; AARCH64-NEXT: fminnm h18, h22, h22
+; AARCH64-NEXT: fminnm h19, h23, h23
+; AARCH64-NEXT: mov v4.h[5], v16.h[0]
+; AARCH64-NEXT: fminnm h0, h0, h0
+; AARCH64-NEXT: fminnm h3, h3, h3
+; AARCH64-NEXT: fminnm h1, h1, h1
+; AARCH64-NEXT: fminnm h7, h7, h17
+; AARCH64-NEXT: mov v5.h[5], v6.h[0]
+; AARCH64-NEXT: fminnm h6, h19, h18
+; AARCH64-NEXT: fminnm h0, h0, h2
+; AARCH64-NEXT: fminnm h1, h1, h3
+; AARCH64-NEXT: mov v4.h[6], v7.h[0]
+; AARCH64-NEXT: mov v5.h[6], v6.h[0]
+; AARCH64-NEXT: mov v4.h[7], v0.h[0]
+; AARCH64-NEXT: mov v5.h[7], v1.h[0]
+; AARCH64-NEXT: mov v0.16b, v4.16b
+; AARCH64-NEXT: mov v1.16b, v5.16b
; AARCH64-NEXT: ret
entry:
%c = call <16 x half> @llvm.minimumnum.v16f16(<16 x half> %a, <16 x half> %b)
diff --git a/llvm/test/CodeGen/AMDGPU/maximumnum.ll b/llvm/test/CodeGen/AMDGPU/maximumnum.ll
index 5e46fd6b28d275..635228334574ea 100644
--- a/llvm/test/CodeGen/AMDGPU/maximumnum.ll
+++ b/llvm/test/CodeGen/AMDGPU/maximumnum.ll
@@ -1751,26 +1751,42 @@ define <2 x half> @v_maximumnum_v2f16(<2 x half> %x, <2 x half> %y) {
; GFX9-LABEL: v_maximumnum_v2f16:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_pk_max_f16 v1, v1, v1
-; GFX9-NEXT: v_pk_max_f16 v0, v0, v0
-; GFX9-NEXT: v_pk_max_f16 v0, v0, v1
+; GFX9-NEXT: v_max_f16_sdwa v2, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_sdwa v3, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX9-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX9-NEXT: v_max_f16_e32 v2, v3, v2
+; GFX9-NEXT: v_max_f16_e32 v0, v0, v1
+; GFX9-NEXT: v_pack_b32_f16 v0, v0, v2
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: v_maximumnum_v2f16:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_pk_max_f16 v1, v1, v1
-; GFX10-NEXT: v_pk_max_f16 v0, v0, v0
-; GFX10-NEXT: v_pk_max_f16 v0, v0, v1
+; GFX10-NEXT: v_max_f16_sdwa v2, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_sdwa v3, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX10-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX10-NEXT: v_max_f16_e32 v2, v3, v2
+; GFX10-NEXT: v_max_f16_e32 v0, v0, v1
+; GFX10-NEXT: v_pack_b32_f16 v0, v0, v2
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_maximumnum_v2f16:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_pk_max_f16 v1, v1, v1
-; GFX11-NEXT: v_pk_max_f16 v0, v0, v0
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GFX11-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v1
+; GFX11-NEXT: v_max_f16_e32 v2, v3, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_pk_max_f16 v0, v0, v1
+; GFX11-NEXT: v_pack_b32_f16 v0, v0, v2
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
; GFX12-LABEL: v_maximumnum_v2f16:
@@ -1780,10 +1796,18 @@ define <2 x half> @v_maximumnum_v2f16(<2 x half> %x, <2 x half> %y) {
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_pk_max_num_f16 v1, v1, v1
-; GFX12-NEXT: v_pk_max_num_f16 v0, v0, v0
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GFX12-NEXT: v_max_num_f16_e32 v1, v1, v1
+; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-NEXT: v_max_num_f16_e32 v3, v3, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v1
+; GFX12-NEXT: v_max_num_f16_e32 v2, v3, v2
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_pk_max_num_f16 v0, v0, v1
+; GFX12-NEXT: v_pack_b32_f16 v0, v0, v2
; GFX12-NEXT: s_setpc_b64 s[30:31]
%result = call <2 x half> @llvm.maximumnum.v2f16(<2 x half> %x, <2 x half> %y)
ret <2 x half> %result
@@ -1801,19 +1825,28 @@ define <2 x half> @v_maximumnum_v2f16_nnan(<2 x half> %x, <2 x half> %y) {
; GFX9-LABEL: v_maximumnum_v2f16_nnan:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_pk_max_f16 v0, v0, v1
+; GFX9-NEXT: v_max_f16_sdwa v2, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_e32 v0, v0, v1
+; GFX9-NEXT: v_pack_b32_f16 v0, v0, v2
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: v_maximumnum_v2f16_nnan:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_pk_max_f16 v0, v0, v1
+; GFX10-NEXT: v_max_f16_sdwa v2, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v0, v0, v1
+; GFX10-NEXT: v_pack_b32_f16 v0, v0, v2
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_maximumnum_v2f16_nnan:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_pk_max_f16 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_max_f16_e32 v2, v3, v2
+; GFX11-NEXT: v_pack_b32_f16 v0, v0, v2
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
; GFX12-LABEL: v_maximumnum_v2f16_nnan:
@@ -1823,7 +1856,12 @@ define <2 x half> @v_maximumnum_v2f16_nnan(<2 x half> %x, <2 x half> %y) {
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_pk_max_num_f16 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_max_num_f16_e32 v2, v3, v2
+; GFX12-NEXT: v_pack_b32_f16 v0, v0, v2
; GFX12-NEXT: s_setpc_b64 s[30:31]
%result = call nnan <2 x half> @llvm.maximumnum.v2f16(<2 x half> %x, <2 x half> %y)
ret <2 x half> %result
@@ -1838,45 +1876,60 @@ define <3 x half> @v_maximumnum_v3f16(<3 x half> %x, <3 x half> %y) {
; GFX8-NEXT: v_max_f16_e32 v2, v2, v2
; GFX8-NEXT: v_max_f16_e32 v0, v0, v0
; GFX8-NEXT: v_max_f16_sdwa v4, v5, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX8-NEXT: v_max_f16_e32 v0, v0, v2
-; GFX8-NEXT: v_max_f16_e32 v2, v3, v3
+; GFX8-NEXT: v_max_f16_e32 v3, v3, v3
; GFX8-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX8-NEXT: v_max_f16_e32 v0, v0, v2
+; GFX8-NEXT: v_max_f16_e32 v1, v1, v3
; GFX8-NEXT: v_or_b32_e32 v0, v0, v4
-; GFX8-NEXT: v_max_f16_e32 v1, v1, v2
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_maximumnum_v3f16:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_pk_max_f16 v2, v2, v2
-; GFX9-NEXT: v_pk_max_f16 v0, v0, v0
-; GFX9-NEXT: v_pk_max_f16 v0, v0, v2
-; GFX9-NEXT: v_pk_max_f16 v2, v3, v3
-; GFX9-NEXT: v_pk_max_f16 v1, v1, v1
-; GFX9-NEXT: v_pk_max_f16 v1, v1, v2
+; GFX9-NEXT: v_max_f16_sdwa v4, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_sdwa v5, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX9-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX9-NEXT: v_max_f16_e32 v4, v5, v4
+; GFX9-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX9-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX9-NEXT: v_max_f16_e32 v0, v0, v2
+; GFX9-NEXT: v_max_f16_e32 v1, v1, v3
+; GFX9-NEXT: v_pack_b32_f16 v0, v0, v4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: v_maximumnum_v3f16:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_pk_max_f16 v2, v2, v2
-; GFX10-NEXT: v_pk_max_f16 v0, v0, v0
-; GFX10-NEXT: v_pk_max_f16 v3, v3, v3
-; GFX10-NEXT: v_pk_max_f16 v1, v1, v1
-; GFX10-NEXT: v_pk_max_f16 v0, v0, v2
-; GFX10-NEXT: v_pk_max_f16 v1, v1, v3
+; GFX10-NEXT: v_max_f16_sdwa v4, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_sdwa v5, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX10-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX10-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX10-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX10-NEXT: v_max_f16_e32 v4, v5, v4
+; GFX10-NEXT: v_max_f16_e32 v0, v0, v2
+; GFX10-NEXT: v_max_f16_e32 v1, v1, v3
+; GFX10-NEXT: v_pack_b32_f16 v0, v0, v4
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_maximumnum_v3f16:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_pk_max_f16 v2, v2, v2
-; GFX11-NEXT: v_pk_max_f16 v0, v0, v0
-; GFX11-NEXT: v_pk_max_f16 v3, v3, v3
-; GFX11-NEXT: v_pk_max_f16 v1, v1, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_pk_max_f16 v0, v0, v2
-; GFX11-NEXT: v_pk_max_f16 v1, v1, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v0
+; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX11-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX11-NEXT: v_max_f16_e32 v4, v4, v4
+; GFX11-NEXT: v_max_f16_e32 v5, v5, v5
+; GFX11-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_max_f16_e32 v2, v5, v4
+; GFX11-NEXT: v_max_f16_e32 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_pack_b32_f16 v0, v0, v2
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
; GFX12-LABEL: v_maximumnum_v3f16:
@@ -1886,13 +1939,20 @@ define <3 x half> @v_maximumnum_v3f16(<3 x half> %x, <3 x half> %y) {
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_pk_max_num_f16 v2, v2, v2
-; GFX12-NEXT: v_pk_max_num_f16 v0, v0, v0
-; GFX12-NEXT: v_pk_max_num_f16 v3, v3, v3
-; GFX12-NEXT: v_pk_max_num_f16 v1, v1, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_pk_max_num_f16 v0, v0, v2
-; GFX12-NEXT: v_pk_max_num_f16 v1, v1, v3
+; GFX12-NEXT: v_lshrrev_b32_e32 v4, 16, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v0
+; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v0
+; GFX12-NEXT: v_max_num_f16_e32 v3, v3, v3
+; GFX12-NEXT: v_max_num_f16_e32 v4, v4, v4
+; GFX12-NEXT: v_max_num_f16_e32 v5, v5, v5
+; GFX12-NEXT: v_max_num_f16_e32 v1, v1, v1
+; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_max_num_f16_e32 v2, v5, v4
+; GFX12-NEXT: v_max_num_f16_e32 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_pack_b32_f16 v0, v0, v2
; GFX12-NEXT: s_setpc_b64 s[30:31]
%result = call <3 x half> @llvm.maximumnum.v3f16(<3 x half> %x, <3 x half> %y)
ret <3 x half> %result
@@ -1904,29 +1964,38 @@ define <3 x half> @v_maximumnum_v3f16_nnan(<3 x half> %x, <3 x half> %y) {
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_max_f16_sdwa v4, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_max_f16_e32 v0, v0, v2
-; GFX8-NEXT: v_or_b32_e32 v0, v0, v4
; GFX8-NEXT: v_max_f16_e32 v1, v1, v3
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v4
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_maximumnum_v3f16_nnan:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_pk_max_f16 v0, v0, v2
-; GFX9-NEXT: v_pk_max_f16 v1, v1, v3
+; GFX9-NEXT: v_max_f16_sdwa v4, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_e32 v0, v0, v2
+; GFX9-NEXT: v_max_f16_e32 v1, v1, v3
+; GFX9-NEXT: v_pack_b32_f16 v0, v0, v4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: v_maximumnum_v3f16_nnan:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_pk_max_f16 v0, v0, v2
-; GFX10-NEXT: v_pk_max_f16 v1, v1, v3
+; GFX10-NEXT: v_max_f16_sdwa v4, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v0, v0, v2
+; GFX10-NEXT: v_max_f16_e32 v1, v1, v3
+; GFX10-NEXT: v_pack_b32_f16 v0, v0, v4
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_maximumnum_v3f16_nnan:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_pk_max_f16 v0, v0, v2
-; GFX11-NEXT: v_pk_max_f16 v1, v1, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v0
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v2
+; GFX11-NEXT: v_max_f16_e32 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_max_f16_e32 v2, v5, v4
+; GFX11-NEXT: v_pack_b32_f16 v0, v0, v2
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
; GFX12-LABEL: v_maximumnum_v3f16_nnan:
@@ -1936,8 +2005,13 @@ define <3 x half> @v_maximumnum_v3f16_nnan(<3 x half> %x, <3 x half> %y) {
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_pk_max_num_f16 v0, v0, v2
-; GFX12-NEXT: v_pk_max_num_f16 v1, v1, v3
+; GFX12-NEXT: v_lshrrev_b32_e32 v4, 16, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v0
+; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v2
+; GFX12-NEXT: v_max_num_f16_e32 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_max_num_f16_e32 v2, v5, v4
+; GFX12-NEXT: v_pack_b32_f16 v0, v0, v2
; GFX12-NEXT: s_setpc_b64 s[30:31]
%result = call nnan <3 x half> @llvm.maximumnum.v3f16(<3 x half> %x, <3 x half> %y)
ret <3 x half> %result
@@ -1947,54 +2021,83 @@ define <4 x half> @v_maximumnum_v4f16(<4 x half> %x, <4 x half> %y) {
; GFX8-LABEL: v_maximumnum_v4f16:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_max_f16_sdwa v4, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_max_f16_sdwa v5, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX8-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX8-NEXT: v_max_f16_sdwa v4, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_sdwa v5, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_max_f16_sdwa v4, v5, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX8-NEXT: v_max_f16_e32 v0, v0, v2
-; GFX8-NEXT: v_or_b32_e32 v0, v0, v4
-; GFX8-NEXT: v_max_f16_sdwa v2, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_max_f16_sdwa v4, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_sdwa v5, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_sdwa v6, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_max_f16_e32 v3, v3, v3
; GFX8-NEXT: v_max_f16_e32 v1, v1, v1
-; GFX8-NEXT: v_max_f16_sdwa v2, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX8-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX8-NEXT: v_max_f16_sdwa v5, v6, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX8-NEXT: v_max_f16_e32 v1, v1, v3
-; GFX8-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX8-NEXT: v_max_f16_e32 v0, v0, v2
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v5
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v4
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_maximumnum_v4f16:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_pk_max_f16 v2, v2, v2
-; GFX9-NEXT: v_pk_max_f16 v0, v0, v0
-; GFX9-NEXT: v_pk_max_f16 v0, v0, v2
-; GFX9-NEXT: v_pk_max_f16 v2, v3, v3
-; GFX9-NEXT: v_pk_max_f16 v1, v1, v1
-; GFX9-NEXT: v_pk_max_f16 v1, v1, v2
+; GFX9-NEXT: v_max_f16_sdwa v4, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_sdwa v5, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_e32 v4, v5, v4
+; GFX9-NEXT: v_max_f16_sdwa v5, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_sdwa v6, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX9-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX9-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX9-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX9-NEXT: v_max_f16_e32 v5, v6, v5
+; GFX9-NEXT: v_max_f16_e32 v1, v1, v3
+; GFX9-NEXT: v_max_f16_e32 v0, v0, v2
+; GFX9-NEXT: v_pack_b32_f16 v0, v0, v5
+; GFX9-NEXT: v_pack_b32_f16 v1, v1, v4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: v_maximumnum_v4f16:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_pk_max_f16 v2, v2, v2
-; GFX10-NEXT: v_pk_max_f16 v0, v0, v0
-; GFX10-NEXT: v_pk_max_f16 v3, v3, v3
-; GFX10-NEXT: v_pk_max_f16 v1, v1, v1
-; GFX10-NEXT: v_pk_max_f16 v0, v0, v2
-; GFX10-NEXT: v_pk_max_f16 v1, v1, v3
+; GFX10-NEXT: v_max_f16_sdwa v4, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_sdwa v5, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_sdwa v6, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_sdwa v7, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX10-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX10-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX10-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX10-NEXT: v_max_f16_e32 v4, v5, v4
+; GFX10-NEXT: v_max_f16_e32 v5, v7, v6
+; GFX10-NEXT: v_max_f16_e32 v0, v0, v2
+; GFX10-NEXT: v_max_f16_e32 v1, v1, v3
+; GFX10-NEXT: v_pack_b32_f16 v0, v0, v5
+; GFX10-NEXT: v_pack_b32_f16 v1, v1, v4
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_maximumnum_v4f16:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_pk_max_f16 v2, v2, v2
-; GFX11-NEXT: v_pk_max_f16 v0, v0, v0
-; GFX11-NEXT: v_pk_max_f16 v3, v3, v3
-; GFX11-NEXT: v_pk_max_f16 v1, v1, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_pk_max_f16 v0, v0, v2
-; GFX11-NEXT: v_pk_max_f16 v1, v1, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v0
+; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v1
+; GFX11-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX11-NEXT: v_max_f16_e32 v4, v4, v4
+; GFX11-NEXT: v_max_f16_e32 v5, v5, v5
+; GFX11-NEXT: v_max_f16_e32 v6, v6, v6
+; GFX11-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX11-NEXT: v_max_f16_e32 v7, v7, v7
+; GFX11-NEXT: v_max_f16_e32 v5, v6, v5
+; GFX11-NEXT: v_max_f16_e32 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v2
+; GFX11-NEXT: v_max_f16_e32 v2, v7, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_pack_b32_f16 v0, v0, v5
+; GFX11-NEXT: v_pack_b32_f16 v1, v1, v2
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
; GFX12-LABEL: v_maximumnum_v4f16:
@@ -2004,13 +2107,26 @@ define <4 x half> @v_maximumnum_v4f16(<4 x half> %x, <4 x half> %y) {
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_pk_max_num_f16 v2, v2, v2
-; GFX12-NEXT: v_pk_max_num_f16 v0, v0, v0
-; GFX12-NEXT: v_pk_max_num_f16 v3, v3, v3
-; GFX12-NEXT: v_pk_max_num_f16 v1, v1, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_pk_max_num_f16 v0, v0, v2
-; GFX12-NEXT: v_pk_max_num_f16 v1, v1, v3
+; GFX12-NEXT: v_lshrrev_b32_e32 v4, 16, v3
+; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v6, 16, v0
+; GFX12-NEXT: v_lshrrev_b32_e32 v7, 16, v1
+; GFX12-NEXT: v_max_num_f16_e32 v3, v3, v3
+; GFX12-NEXT: v_max_num_f16_e32 v4, v4, v4
+; GFX12-NEXT: v_max_num_f16_e32 v5, v5, v5
+; GFX12-NEXT: v_max_num_f16_e32 v6, v6, v6
+; GFX12-NEXT: v_max_num_f16_e32 v1, v1, v1
+; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v0
+; GFX12-NEXT: v_max_num_f16_e32 v7, v7, v7
+; GFX12-NEXT: v_max_num_f16_e32 v5, v6, v5
+; GFX12-NEXT: v_max_num_f16_e32 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v2
+; GFX12-NEXT: v_max_num_f16_e32 v2, v7, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_pack_b32_f16 v0, v0, v5
+; GFX12-NEXT: v_pack_b32_f16 v1, v1, v2
; GFX12-NEXT: s_setpc_b64 s[30:31]
%result = call <4 x half> @llvm.maximumnum.v4f16(<4 x half> %x, <4 x half> %y)
ret <4 x half> %result
@@ -2020,33 +2136,51 @@ define <4 x half> @v_maximumnum_v4f16_nnan(<4 x half> %x, <4 x half> %y) {
; GFX8-LABEL: v_maximumnum_v4f16_nnan:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_max_f16_sdwa v4, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_max_f16_e32 v0, v0, v2
-; GFX8-NEXT: v_max_f16_sdwa v2, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_sdwa v5, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_max_f16_e32 v1, v1, v3
-; GFX8-NEXT: v_or_b32_e32 v0, v0, v4
-; GFX8-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX8-NEXT: v_max_f16_e32 v0, v0, v2
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v5
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v4
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_maximumnum_v4f16_nnan:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_pk_max_f16 v0, v0, v2
-; GFX9-NEXT: v_pk_max_f16 v1, v1, v3
+; GFX9-NEXT: v_max_f16_sdwa v4, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_sdwa v5, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_e32 v1, v1, v3
+; GFX9-NEXT: v_max_f16_e32 v0, v0, v2
+; GFX9-NEXT: v_pack_b32_f16 v0, v0, v5
+; GFX9-NEXT: v_pack_b32_f16 v1, v1, v4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: v_maximumnum_v4f16_nnan:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_pk_max_f16 v0, v0, v2
-; GFX10-NEXT: v_pk_max_f16 v1, v1, v3
+; GFX10-NEXT: v_max_f16_sdwa v4, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_sdwa v5, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v0, v0, v2
+; GFX10-NEXT: v_max_f16_e32 v1, v1, v3
+; GFX10-NEXT: v_pack_b32_f16 v0, v0, v5
+; GFX10-NEXT: v_pack_b32_f16 v1, v1, v4
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_maximumnum_v4f16_nnan:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_pk_max_f16 v0, v0, v2
-; GFX11-NEXT: v_pk_max_f16 v1, v1, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v0
+; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v1
+; GFX11-NEXT: v_max_f16_e32 v1, v1, v3
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_max_f16_e32 v2, v6, v5
+; GFX11-NEXT: v_max_f16_e32 v3, v7, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_pack_b32_f16 v0, v0, v2
+; GFX11-NEXT: v_pack_b32_f16 v1, v1, v3
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
; GFX12-LABEL: v_maximumnum_v4f16_nnan:
@@ -2056,8 +2190,18 @@ define <4 x half> @v_maximumnum_v4f16_nnan(<4 x half> %x, <4 x half> %y) {
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_pk_max_num_f16 v0, v0, v2
-; GFX12-NEXT: v_pk_max_num_f16 v1, v1, v3
+; GFX12-NEXT: v_lshrrev_b32_e32 v4, 16, v3
+; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v6, 16, v0
+; GFX12-NEXT: v_lshrrev_b32_e32 v7, 16, v1
+; GFX12-NEXT: v_max_num_f16_e32 v1, v1, v3
+; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_max_num_f16_e32 v2, v6, v5
+; GFX12-NEXT: v_max_num_f16_e32 v3, v7, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_pack_b32_f16 v0, v0, v2
+; GFX12-NEXT: v_pack_b32_f16 v1, v1, v3
; GFX12-NEXT: s_setpc_b64 s[30:31]
%result = call nnan <4 x half> @llvm.maximumnum.v4f16(<4 x half> %x, <4 x half> %y)
ret <4 x half> %result
@@ -2067,70 +2211,113 @@ define <6 x half> @v_maximumnum_v6f16(<6 x half> %x, <6 x half> %y) {
; GFX8-LABEL: v_maximumnum_v6f16:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_max_f16_sdwa v6, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_max_f16_sdwa v7, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_max_f16_e32 v3, v3, v3
-; GFX8-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX8-NEXT: v_max_f16_sdwa v6, v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_sdwa v7, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_max_f16_sdwa v6, v7, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX8-NEXT: v_max_f16_e32 v0, v0, v3
-; GFX8-NEXT: v_or_b32_e32 v0, v0, v6
-; GFX8-NEXT: v_max_f16_sdwa v3, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_max_f16_sdwa v6, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_sdwa v7, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_sdwa v8, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_sdwa v7, v8, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_max_f16_sdwa v8, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_sdwa v9, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_e32 v5, v5, v5
+; GFX8-NEXT: v_max_f16_e32 v2, v2, v2
; GFX8-NEXT: v_max_f16_e32 v4, v4, v4
; GFX8-NEXT: v_max_f16_e32 v1, v1, v1
-; GFX8-NEXT: v_max_f16_sdwa v3, v6, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX8-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX8-NEXT: v_max_f16_sdwa v8, v9, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_max_f16_e32 v2, v2, v5
; GFX8-NEXT: v_max_f16_e32 v1, v1, v4
-; GFX8-NEXT: v_or_b32_e32 v1, v1, v3
-; GFX8-NEXT: v_max_f16_sdwa v3, v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_max_f16_sdwa v4, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_max_f16_sdwa v3, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX8-NEXT: v_max_f16_e32 v4, v5, v5
-; GFX8-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX8-NEXT: v_max_f16_e32 v2, v2, v4
-; GFX8-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX8-NEXT: v_max_f16_e32 v0, v0, v3
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v8
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v7
+; GFX8-NEXT: v_or_b32_e32 v2, v2, v6
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_maximumnum_v6f16:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_pk_max_f16 v3, v3, v3
-; GFX9-NEXT: v_pk_max_f16 v0, v0, v0
-; GFX9-NEXT: v_pk_max_f16 v0, v0, v3
-; GFX9-NEXT: v_pk_max_f16 v3, v4, v4
-; GFX9-NEXT: v_pk_max_f16 v1, v1, v1
-; GFX9-NEXT: v_pk_max_f16 v1, v1, v3
-; GFX9-NEXT: v_pk_max_f16 v3, v5, v5
-; GFX9-NEXT: v_pk_max_f16 v2, v2, v2
-; GFX9-NEXT: v_pk_max_f16 v2, v2, v3
+; GFX9-NEXT: v_max_f16_sdwa v6, v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_sdwa v7, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_e32 v6, v7, v6
+; GFX9-NEXT: v_max_f16_sdwa v7, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_sdwa v8, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_e32 v7, v8, v7
+; GFX9-NEXT: v_max_f16_sdwa v8, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_sdwa v9, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_e32 v5, v5, v5
+; GFX9-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX9-NEXT: v_max_f16_e32 v4, v4, v4
+; GFX9-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX9-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX9-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX9-NEXT: v_max_f16_e32 v8, v9, v8
+; GFX9-NEXT: v_max_f16_e32 v2, v2, v5
+; GFX9-NEXT: v_max_f16_e32 v1, v1, v4
+; GFX9-NEXT: v_max_f16_e32 v0, v0, v3
+; GFX9-NEXT: v_pack_b32_f16 v0, v0, v8
+; GFX9-NEXT: v_pack_b32_f16 v1, v1, v7
+; GFX9-NEXT: v_pack_b32_f16 v2, v2, v6
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: v_maximumnum_v6f16:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_pk_max_f16 v3, v3, v3
-; GFX10-NEXT: v_pk_max_f16 v0, v0, v0
-; GFX10-NEXT: v_pk_max_f16 v4, v4, v4
-; GFX10-NEXT: v_pk_max_f16 v1, v1, v1
-; GFX10-NEXT: v_pk_max_f16 v5, v5, v5
-; GFX10-NEXT: v_pk_max_f16 v2, v2, v2
-; GFX10-NEXT: v_pk_max_f16 v0, v0, v3
-; GFX10-NEXT: v_pk_max_f16 v1, v1, v4
-; GFX10-NEXT: v_pk_max_f16 v2, v2, v5
+; GFX10-NEXT: v_max_f16_sdwa v6, v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_sdwa v7, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_sdwa v8, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_sdwa v9, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_sdwa v10, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v5, v5, v5
+; GFX10-NEXT: v_max_f16_e32 v6, v7, v6
+; GFX10-NEXT: v_max_f16_sdwa v7, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v4, v4, v4
+; GFX10-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX10-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX10-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX10-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX10-NEXT: v_max_f16_e32 v8, v9, v8
+; GFX10-NEXT: v_max_f16_e32 v7, v10, v7
+; GFX10-NEXT: v_max_f16_e32 v0, v0, v3
+; GFX10-NEXT: v_max_f16_e32 v1, v1, v4
+; GFX10-NEXT: v_max_f16_e32 v2, v2, v5
+; GFX10-NEXT: v_pack_b32_f16 v0, v0, v7
+; GFX10-NEXT: v_pack_b32_f16 v1, v1, v8
+; GFX10-NEXT: v_pack_b32_f16 v2, v2, v6
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_maximumnum_v6f16:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_pk_max_f16 v3, v3, v3
-; GFX11-NEXT: v_pk_max_f16 v0, v0, v0
-; GFX11-NEXT: v_pk_max_f16 v4, v4, v4
-; GFX11-NEXT: v_pk_max_f16 v1, v1, v1
-; GFX11-NEXT: v_pk_max_f16 v5, v5, v5
-; GFX11-NEXT: v_pk_max_f16 v2, v2, v2
-; GFX11-NEXT: v_pk_max_f16 v0, v0, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_pk_max_f16 v1, v1, v4
-; GFX11-NEXT: v_pk_max_f16 v2, v2, v5
+; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v5
+; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v4
+; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v3
+; GFX11-NEXT: v_max_f16_e32 v6, v6, v6
+; GFX11-NEXT: v_max_f16_e32 v7, v7, v7
+; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v0
+; GFX11-NEXT: v_max_f16_e32 v8, v8, v8
+; GFX11-NEXT: v_max_f16_e32 v5, v5, v5
+; GFX11-NEXT: v_max_f16_e32 v4, v4, v4
+; GFX11-NEXT: v_max_f16_e32 v6, v7, v6
+; GFX11-NEXT: v_max_f16_e32 v7, v9, v9
+; GFX11-NEXT: v_max_f16_e32 v9, v10, v10
+; GFX11-NEXT: v_max_f16_e32 v10, v11, v11
+; GFX11-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX11-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-NEXT: v_max_f16_e32 v7, v7, v8
+; GFX11-NEXT: v_max_f16_e32 v8, v10, v9
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v3
+; GFX11-NEXT: v_max_f16_e32 v1, v1, v4
+; GFX11-NEXT: v_max_f16_e32 v2, v2, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_pack_b32_f16 v0, v0, v8
+; GFX11-NEXT: v_pack_b32_f16 v1, v1, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-NEXT: v_pack_b32_f16 v2, v2, v6
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
; GFX12-LABEL: v_maximumnum_v6f16:
@@ -2140,16 +2327,35 @@ define <6 x half> @v_maximumnum_v6f16(<6 x half> %x, <6 x half> %y) {
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_pk_max_num_f16 v3, v3, v3
-; GFX12-NEXT: v_pk_max_num_f16 v0, v0, v0
-; GFX12-NEXT: v_pk_max_num_f16 v4, v4, v4
-; GFX12-NEXT: v_pk_max_num_f16 v1, v1, v1
-; GFX12-NEXT: v_pk_max_num_f16 v5, v5, v5
-; GFX12-NEXT: v_pk_max_num_f16 v2, v2, v2
-; GFX12-NEXT: v_pk_max_num_f16 v0, v0, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_pk_max_num_f16 v1, v1, v4
-; GFX12-NEXT: v_pk_max_num_f16 v2, v2, v5
+; GFX12-NEXT: v_lshrrev_b32_e32 v6, 16, v5
+; GFX12-NEXT: v_lshrrev_b32_e32 v7, 16, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v8, 16, v4
+; GFX12-NEXT: v_lshrrev_b32_e32 v9, 16, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v10, 16, v3
+; GFX12-NEXT: v_max_num_f16_e32 v6, v6, v6
+; GFX12-NEXT: v_max_num_f16_e32 v7, v7, v7
+; GFX12-NEXT: v_lshrrev_b32_e32 v11, 16, v0
+; GFX12-NEXT: v_max_num_f16_e32 v8, v8, v8
+; GFX12-NEXT: v_max_num_f16_e32 v5, v5, v5
+; GFX12-NEXT: v_max_num_f16_e32 v4, v4, v4
+; GFX12-NEXT: v_max_num_f16_e32 v6, v7, v6
+; GFX12-NEXT: v_max_num_f16_e32 v7, v9, v9
+; GFX12-NEXT: v_max_num_f16_e32 v9, v10, v10
+; GFX12-NEXT: v_max_num_f16_e32 v10, v11, v11
+; GFX12-NEXT: v_max_num_f16_e32 v3, v3, v3
+; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v0
+; GFX12-NEXT: v_max_num_f16_e32 v1, v1, v1
+; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-NEXT: v_max_num_f16_e32 v7, v7, v8
+; GFX12-NEXT: v_max_num_f16_e32 v8, v10, v9
+; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v3
+; GFX12-NEXT: v_max_num_f16_e32 v1, v1, v4
+; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_pack_b32_f16 v0, v0, v8
+; GFX12-NEXT: v_pack_b32_f16 v1, v1, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX12-NEXT: v_pack_b32_f16 v2, v2, v6
; GFX12-NEXT: s_setpc_b64 s[30:31]
%result = call <6 x half> @llvm.maximumnum.v6f16(<6 x half> %x, <6 x half> %y)
ret <6 x half> %result
@@ -2159,86 +2365,143 @@ define <8 x half> @v_maximumnum_v8f16(<8 x half> %x, <8 x half> %y) {
; GFX8-LABEL: v_maximumnum_v8f16:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_max_f16_sdwa v8, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_max_f16_sdwa v9, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX8-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX8-NEXT: v_max_f16_sdwa v8, v7, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_sdwa v9, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_max_f16_sdwa v8, v9, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX8-NEXT: v_max_f16_e32 v0, v0, v4
-; GFX8-NEXT: v_or_b32_e32 v0, v0, v8
-; GFX8-NEXT: v_max_f16_sdwa v4, v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_max_f16_sdwa v8, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_sdwa v9, v6, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_sdwa v10, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_sdwa v9, v10, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_max_f16_sdwa v10, v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_sdwa v11, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_sdwa v10, v11, v10 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_max_f16_sdwa v11, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_sdwa v12, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_e32 v7, v7, v7
+; GFX8-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX8-NEXT: v_max_f16_e32 v6, v6, v6
+; GFX8-NEXT: v_max_f16_e32 v2, v2, v2
; GFX8-NEXT: v_max_f16_e32 v5, v5, v5
; GFX8-NEXT: v_max_f16_e32 v1, v1, v1
-; GFX8-NEXT: v_max_f16_sdwa v4, v8, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_max_f16_e32 v4, v4, v4
+; GFX8-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX8-NEXT: v_max_f16_sdwa v11, v12, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_max_f16_e32 v3, v3, v7
+; GFX8-NEXT: v_max_f16_e32 v2, v2, v6
; GFX8-NEXT: v_max_f16_e32 v1, v1, v5
-; GFX8-NEXT: v_or_b32_e32 v1, v1, v4
-; GFX8-NEXT: v_max_f16_sdwa v4, v6, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_max_f16_sdwa v5, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_max_f16_sdwa v4, v5, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX8-NEXT: v_max_f16_e32 v5, v6, v6
-; GFX8-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX8-NEXT: v_max_f16_e32 v2, v2, v5
-; GFX8-NEXT: v_or_b32_e32 v2, v2, v4
-; GFX8-NEXT: v_max_f16_sdwa v4, v7, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_max_f16_sdwa v5, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_max_f16_sdwa v4, v5, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX8-NEXT: v_max_f16_e32 v5, v7, v7
-; GFX8-NEXT: v_max_f16_e32 v3, v3, v3
-; GFX8-NEXT: v_max_f16_e32 v3, v3, v5
-; GFX8-NEXT: v_or_b32_e32 v3, v3, v4
+; GFX8-NEXT: v_max_f16_e32 v0, v0, v4
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v11
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v10
+; GFX8-NEXT: v_or_b32_e32 v2, v2, v9
+; GFX8-NEXT: v_or_b32_e32 v3, v3, v8
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_maximumnum_v8f16:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_pk_max_f16 v4, v4, v4
-; GFX9-NEXT: v_pk_max_f16 v0, v0, v0
-; GFX9-NEXT: v_pk_max_f16 v0, v0, v4
-; GFX9-NEXT: v_pk_max_f16 v4, v5, v5
-; GFX9-NEXT: v_pk_max_f16 v1, v1, v1
-; GFX9-NEXT: v_pk_max_f16 v1, v1, v4
-; GFX9-NEXT: v_pk_max_f16 v4, v6, v6
-; GFX9-NEXT: v_pk_max_f16 v2, v2, v2
-; GFX9-NEXT: v_pk_max_f16 v2, v2, v4
-; GFX9-NEXT: v_pk_max_f16 v4, v7, v7
-; GFX9-NEXT: v_pk_max_f16 v3, v3, v3
-; GFX9-NEXT: v_pk_max_f16 v3, v3, v4
+; GFX9-NEXT: v_max_f16_sdwa v8, v7, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_sdwa v9, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_e32 v8, v9, v8
+; GFX9-NEXT: v_max_f16_sdwa v9, v6, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_sdwa v10, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_e32 v9, v10, v9
+; GFX9-NEXT: v_max_f16_sdwa v10, v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_sdwa v11, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_e32 v10, v11, v10
+; GFX9-NEXT: v_max_f16_sdwa v11, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_sdwa v12, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_e32 v7, v7, v7
+; GFX9-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX9-NEXT: v_max_f16_e32 v6, v6, v6
+; GFX9-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX9-NEXT: v_max_f16_e32 v5, v5, v5
+; GFX9-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX9-NEXT: v_max_f16_e32 v4, v4, v4
+; GFX9-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX9-NEXT: v_max_f16_e32 v11, v12, v11
+; GFX9-NEXT: v_max_f16_e32 v3, v3, v7
+; GFX9-NEXT: v_max_f16_e32 v2, v2, v6
+; GFX9-NEXT: v_max_f16_e32 v1, v1, v5
+; GFX9-NEXT: v_max_f16_e32 v0, v0, v4
+; GFX9-NEXT: v_pack_b32_f16 v0, v0, v11
+; GFX9-NEXT: v_pack_b32_f16 v1, v1, v10
+; GFX9-NEXT: v_pack_b32_f16 v2, v2, v9
+; GFX9-NEXT: v_pack_b32_f16 v3, v3, v8
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: v_maximumnum_v8f16:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_pk_max_f16 v4, v4, v4
-; GFX10-NEXT: v_pk_max_f16 v0, v0, v0
-; GFX10-NEXT: v_pk_max_f16 v5, v5, v5
-; GFX10-NEXT: v_pk_max_f16 v1, v1, v1
-; GFX10-NEXT: v_pk_max_f16 v6, v6, v6
-; GFX10-NEXT: v_pk_max_f16 v2, v2, v2
-; GFX10-NEXT: v_pk_max_f16 v7, v7, v7
-; GFX10-NEXT: v_pk_max_f16 v3, v3, v3
-; GFX10-NEXT: v_pk_max_f16 v0, v0, v4
-; GFX10-NEXT: v_pk_max_f16 v1, v1, v5
-; GFX10-NEXT: v_pk_max_f16 v2, v2, v6
-; GFX10-NEXT: v_pk_max_f16 v3, v3, v7
+; GFX10-NEXT: v_max_f16_sdwa v8, v7, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_sdwa v9, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_sdwa v10, v6, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_sdwa v11, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_sdwa v12, v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_sdwa v13, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v8, v9, v8
+; GFX10-NEXT: v_max_f16_e32 v7, v7, v7
+; GFX10-NEXT: v_max_f16_e32 v9, v11, v10
+; GFX10-NEXT: v_max_f16_sdwa v11, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v10, v13, v12
+; GFX10-NEXT: v_max_f16_sdwa v12, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v6, v6, v6
+; GFX10-NEXT: v_max_f16_e32 v5, v5, v5
+; GFX10-NEXT: v_max_f16_e32 v4, v4, v4
+; GFX10-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX10-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX10-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX10-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX10-NEXT: v_max_f16_e32 v11, v12, v11
+; GFX10-NEXT: v_max_f16_e32 v0, v0, v4
+; GFX10-NEXT: v_max_f16_e32 v1, v1, v5
+; GFX10-NEXT: v_max_f16_e32 v2, v2, v6
+; GFX10-NEXT: v_max_f16_e32 v3, v3, v7
+; GFX10-NEXT: v_pack_b32_f16 v0, v0, v11
+; GFX10-NEXT: v_pack_b32_f16 v1, v1, v10
+; GFX10-NEXT: v_pack_b32_f16 v2, v2, v9
+; GFX10-NEXT: v_pack_b32_f16 v3, v3, v8
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_maximumnum_v8f16:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_pk_max_f16 v4, v4, v4
-; GFX11-NEXT: v_pk_max_f16 v0, v0, v0
-; GFX11-NEXT: v_pk_max_f16 v5, v5, v5
-; GFX11-NEXT: v_pk_max_f16 v1, v1, v1
-; GFX11-NEXT: v_pk_max_f16 v6, v6, v6
-; GFX11-NEXT: v_pk_max_f16 v2, v2, v2
-; GFX11-NEXT: v_pk_max_f16 v7, v7, v7
-; GFX11-NEXT: v_pk_max_f16 v3, v3, v3
-; GFX11-NEXT: v_pk_max_f16 v0, v0, v4
-; GFX11-NEXT: v_pk_max_f16 v1, v1, v5
-; GFX11-NEXT: v_pk_max_f16 v2, v2, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_pk_max_f16 v3, v3, v7
+; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v7
+; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v6
+; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v5
+; GFX11-NEXT: v_lshrrev_b32_e32 v13, 16, v1
+; GFX11-NEXT: v_max_f16_e32 v8, v8, v8
+; GFX11-NEXT: v_max_f16_e32 v9, v9, v9
+; GFX11-NEXT: v_max_f16_e32 v10, v10, v10
+; GFX11-NEXT: v_max_f16_e32 v11, v11, v11
+; GFX11-NEXT: v_max_f16_e32 v12, v12, v12
+; GFX11-NEXT: v_max_f16_e32 v13, v13, v13
+; GFX11-NEXT: v_lshrrev_b32_e32 v14, 16, v4
+; GFX11-NEXT: v_lshrrev_b32_e32 v15, 16, v0
+; GFX11-NEXT: v_max_f16_e32 v8, v9, v8
+; GFX11-NEXT: v_max_f16_e32 v9, v11, v10
+; GFX11-NEXT: v_max_f16_e32 v10, v13, v12
+; GFX11-NEXT: v_max_f16_e32 v11, v14, v14
+; GFX11-NEXT: v_max_f16_e32 v12, v15, v15
+; GFX11-NEXT: v_max_f16_e32 v7, v7, v7
+; GFX11-NEXT: v_max_f16_e32 v6, v6, v6
+; GFX11-NEXT: v_max_f16_e32 v5, v5, v5
+; GFX11-NEXT: v_max_f16_e32 v4, v4, v4
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX11-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX11-NEXT: v_max_f16_e32 v11, v12, v11
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v4
+; GFX11-NEXT: v_max_f16_e32 v1, v1, v5
+; GFX11-NEXT: v_max_f16_e32 v2, v2, v6
+; GFX11-NEXT: v_max_f16_e32 v3, v3, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_pack_b32_f16 v0, v0, v11
+; GFX11-NEXT: v_pack_b32_f16 v1, v1, v10
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_pack_b32_f16 v2, v2, v9
+; GFX11-NEXT: v_pack_b32_f16 v3, v3, v8
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
; GFX12-LABEL: v_maximumnum_v8f16:
@@ -2248,19 +2511,44 @@ define <8 x half> @v_maximumnum_v8f16(<8 x half> %x, <8 x half> %y) {
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_pk_max_num_f16 v4, v4, v4
-; GFX12-NEXT: v_pk_max_num_f16 v0, v0, v0
-; GFX12-NEXT: v_pk_max_num_f16 v5, v5, v5
-; GFX12-NEXT: v_pk_max_num_f16 v1, v1, v1
-; GFX12-NEXT: v_pk_max_num_f16 v6, v6, v6
-; GFX12-NEXT: v_pk_max_num_f16 v2, v2, v2
-; GFX12-NEXT: v_pk_max_num_f16 v7, v7, v7
-; GFX12-NEXT: v_pk_max_num_f16 v3, v3, v3
-; GFX12-NEXT: v_pk_max_num_f16 v0, v0, v4
-; GFX12-NEXT: v_pk_max_num_f16 v1, v1, v5
-; GFX12-NEXT: v_pk_max_num_f16 v2, v2, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX12-NEXT: v_pk_max_num_f16 v3, v3, v7
+; GFX12-NEXT: v_lshrrev_b32_e32 v8, 16, v7
+; GFX12-NEXT: v_lshrrev_b32_e32 v9, 16, v3
+; GFX12-NEXT: v_lshrrev_b32_e32 v10, 16, v6
+; GFX12-NEXT: v_lshrrev_b32_e32 v11, 16, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v12, 16, v5
+; GFX12-NEXT: v_lshrrev_b32_e32 v13, 16, v1
+; GFX12-NEXT: v_max_num_f16_e32 v8, v8, v8
+; GFX12-NEXT: v_max_num_f16_e32 v9, v9, v9
+; GFX12-NEXT: v_max_num_f16_e32 v10, v10, v10
+; GFX12-NEXT: v_max_num_f16_e32 v11, v11, v11
+; GFX12-NEXT: v_max_num_f16_e32 v12, v12, v12
+; GFX12-NEXT: v_max_num_f16_e32 v13, v13, v13
+; GFX12-NEXT: v_lshrrev_b32_e32 v14, 16, v4
+; GFX12-NEXT: v_lshrrev_b32_e32 v15, 16, v0
+; GFX12-NEXT: v_max_num_f16_e32 v8, v9, v8
+; GFX12-NEXT: v_max_num_f16_e32 v9, v11, v10
+; GFX12-NEXT: v_max_num_f16_e32 v10, v13, v12
+; GFX12-NEXT: v_max_num_f16_e32 v11, v14, v14
+; GFX12-NEXT: v_max_num_f16_e32 v12, v15, v15
+; GFX12-NEXT: v_max_num_f16_e32 v7, v7, v7
+; GFX12-NEXT: v_max_num_f16_e32 v6, v6, v6
+; GFX12-NEXT: v_max_num_f16_e32 v5, v5, v5
+; GFX12-NEXT: v_max_num_f16_e32 v4, v4, v4
+; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v0
+; GFX12-NEXT: v_max_num_f16_e32 v1, v1, v1
+; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-NEXT: v_max_num_f16_e32 v3, v3, v3
+; GFX12-NEXT: v_max_num_f16_e32 v11, v12, v11
+; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v4
+; GFX12-NEXT: v_max_num_f16_e32 v1, v1, v5
+; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v6
+; GFX12-NEXT: v_max_num_f16_e32 v3, v3, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_pack_b32_f16 v0, v0, v11
+; GFX12-NEXT: v_pack_b32_f16 v1, v1, v10
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_pack_b32_f16 v2, v2, v9
+; GFX12-NEXT: v_pack_b32_f16 v3, v3, v8
; GFX12-NEXT: s_setpc_b64 s[30:31]
%result = call <8 x half> @llvm.maximumnum.v8f16(<8 x half> %x, <8 x half> %y)
ret <8 x half> %result
diff --git a/llvm/test/CodeGen/AMDGPU/minimumnum.ll b/llvm/test/CodeGen/AMDGPU/minimumnum.ll
index 9e0b7daf38de16..cc445da9f22ba2 100644
--- a/llvm/test/CodeGen/AMDGPU/minimumnum.ll
+++ b/llvm/test/CodeGen/AMDGPU/minimumnum.ll
@@ -1705,26 +1705,42 @@ define <2 x half> @v_minimumnum_v2f16(<2 x half> %x, <2 x half> %y) {
; GFX9-LABEL: v_minimumnum_v2f16:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_pk_max_f16 v1, v1, v1
-; GFX9-NEXT: v_pk_max_f16 v0, v0, v0
-; GFX9-NEXT: v_pk_min_f16 v0, v0, v1
+; GFX9-NEXT: v_max_f16_sdwa v2, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_sdwa v3, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX9-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX9-NEXT: v_min_f16_e32 v2, v3, v2
+; GFX9-NEXT: v_min_f16_e32 v0, v0, v1
+; GFX9-NEXT: v_pack_b32_f16 v0, v0, v2
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: v_minimumnum_v2f16:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_pk_max_f16 v1, v1, v1
-; GFX10-NEXT: v_pk_max_f16 v0, v0, v0
-; GFX10-NEXT: v_pk_min_f16 v0, v0, v1
+; GFX10-NEXT: v_max_f16_sdwa v2, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_sdwa v3, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX10-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX10-NEXT: v_min_f16_e32 v2, v3, v2
+; GFX10-NEXT: v_min_f16_e32 v0, v0, v1
+; GFX10-NEXT: v_pack_b32_f16 v0, v0, v2
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_minimumnum_v2f16:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_pk_max_f16 v1, v1, v1
-; GFX11-NEXT: v_pk_max_f16 v0, v0, v0
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GFX11-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_min_f16_e32 v0, v0, v1
+; GFX11-NEXT: v_min_f16_e32 v2, v3, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_pk_min_f16 v0, v0, v1
+; GFX11-NEXT: v_pack_b32_f16 v0, v0, v2
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
; GFX12-LABEL: v_minimumnum_v2f16:
@@ -1734,10 +1750,18 @@ define <2 x half> @v_minimumnum_v2f16(<2 x half> %x, <2 x half> %y) {
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_pk_max_num_f16 v1, v1, v1
-; GFX12-NEXT: v_pk_max_num_f16 v0, v0, v0
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GFX12-NEXT: v_max_num_f16_e32 v1, v1, v1
+; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-NEXT: v_max_num_f16_e32 v3, v3, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_min_num_f16_e32 v0, v0, v1
+; GFX12-NEXT: v_min_num_f16_e32 v2, v3, v2
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_pk_min_num_f16 v0, v0, v1
+; GFX12-NEXT: v_pack_b32_f16 v0, v0, v2
; GFX12-NEXT: s_setpc_b64 s[30:31]
%result = call <2 x half> @llvm.minimumnum.v2f16(<2 x half> %x, <2 x half> %y)
ret <2 x half> %result
@@ -1755,19 +1779,28 @@ define <2 x half> @v_minimumnum_v2f16_nnan(<2 x half> %x, <2 x half> %y) {
; GFX9-LABEL: v_minimumnum_v2f16_nnan:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_pk_min_f16 v0, v0, v1
+; GFX9-NEXT: v_min_f16_sdwa v2, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_min_f16_e32 v0, v0, v1
+; GFX9-NEXT: v_pack_b32_f16 v0, v0, v2
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: v_minimumnum_v2f16_nnan:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_pk_min_f16 v0, v0, v1
+; GFX10-NEXT: v_min_f16_sdwa v2, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_min_f16_e32 v0, v0, v1
+; GFX10-NEXT: v_pack_b32_f16 v0, v0, v2
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_minimumnum_v2f16_nnan:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_pk_min_f16 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GFX11-NEXT: v_min_f16_e32 v0, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_min_f16_e32 v2, v3, v2
+; GFX11-NEXT: v_pack_b32_f16 v0, v0, v2
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
; GFX12-LABEL: v_minimumnum_v2f16_nnan:
@@ -1777,7 +1810,12 @@ define <2 x half> @v_minimumnum_v2f16_nnan(<2 x half> %x, <2 x half> %y) {
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_pk_min_num_f16 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GFX12-NEXT: v_min_num_f16_e32 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_min_num_f16_e32 v2, v3, v2
+; GFX12-NEXT: v_pack_b32_f16 v0, v0, v2
; GFX12-NEXT: s_setpc_b64 s[30:31]
%result = call nnan <2 x half> @llvm.minimumnum.v2f16(<2 x half> %x, <2 x half> %y)
ret <2 x half> %result
@@ -1792,45 +1830,60 @@ define <3 x half> @v_minimumnum_v3f16(<3 x half> %x, <3 x half> %y) {
; GFX8-NEXT: v_max_f16_e32 v2, v2, v2
; GFX8-NEXT: v_max_f16_e32 v0, v0, v0
; GFX8-NEXT: v_min_f16_sdwa v4, v5, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX8-NEXT: v_min_f16_e32 v0, v0, v2
-; GFX8-NEXT: v_max_f16_e32 v2, v3, v3
+; GFX8-NEXT: v_max_f16_e32 v3, v3, v3
; GFX8-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX8-NEXT: v_min_f16_e32 v0, v0, v2
+; GFX8-NEXT: v_min_f16_e32 v1, v1, v3
; GFX8-NEXT: v_or_b32_e32 v0, v0, v4
-; GFX8-NEXT: v_min_f16_e32 v1, v1, v2
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_minimumnum_v3f16:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_pk_max_f16 v2, v2, v2
-; GFX9-NEXT: v_pk_max_f16 v0, v0, v0
-; GFX9-NEXT: v_pk_min_f16 v0, v0, v2
-; GFX9-NEXT: v_pk_max_f16 v2, v3, v3
-; GFX9-NEXT: v_pk_max_f16 v1, v1, v1
-; GFX9-NEXT: v_pk_min_f16 v1, v1, v2
+; GFX9-NEXT: v_max_f16_sdwa v4, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_sdwa v5, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX9-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX9-NEXT: v_min_f16_e32 v4, v5, v4
+; GFX9-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX9-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX9-NEXT: v_min_f16_e32 v0, v0, v2
+; GFX9-NEXT: v_min_f16_e32 v1, v1, v3
+; GFX9-NEXT: v_pack_b32_f16 v0, v0, v4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: v_minimumnum_v3f16:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_pk_max_f16 v2, v2, v2
-; GFX10-NEXT: v_pk_max_f16 v0, v0, v0
-; GFX10-NEXT: v_pk_max_f16 v3, v3, v3
-; GFX10-NEXT: v_pk_max_f16 v1, v1, v1
-; GFX10-NEXT: v_pk_min_f16 v0, v0, v2
-; GFX10-NEXT: v_pk_min_f16 v1, v1, v3
+; GFX10-NEXT: v_max_f16_sdwa v4, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_sdwa v5, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX10-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX10-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX10-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX10-NEXT: v_min_f16_e32 v4, v5, v4
+; GFX10-NEXT: v_min_f16_e32 v0, v0, v2
+; GFX10-NEXT: v_min_f16_e32 v1, v1, v3
+; GFX10-NEXT: v_pack_b32_f16 v0, v0, v4
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_minimumnum_v3f16:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_pk_max_f16 v2, v2, v2
-; GFX11-NEXT: v_pk_max_f16 v0, v0, v0
-; GFX11-NEXT: v_pk_max_f16 v3, v3, v3
-; GFX11-NEXT: v_pk_max_f16 v1, v1, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_pk_min_f16 v0, v0, v2
-; GFX11-NEXT: v_pk_min_f16 v1, v1, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v0
+; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX11-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX11-NEXT: v_max_f16_e32 v4, v4, v4
+; GFX11-NEXT: v_max_f16_e32 v5, v5, v5
+; GFX11-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX11-NEXT: v_min_f16_e32 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_min_f16_e32 v2, v5, v4
+; GFX11-NEXT: v_min_f16_e32 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_pack_b32_f16 v0, v0, v2
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
; GFX12-LABEL: v_minimumnum_v3f16:
@@ -1840,13 +1893,20 @@ define <3 x half> @v_minimumnum_v3f16(<3 x half> %x, <3 x half> %y) {
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_pk_max_num_f16 v2, v2, v2
-; GFX12-NEXT: v_pk_max_num_f16 v0, v0, v0
-; GFX12-NEXT: v_pk_max_num_f16 v3, v3, v3
-; GFX12-NEXT: v_pk_max_num_f16 v1, v1, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_pk_min_num_f16 v0, v0, v2
-; GFX12-NEXT: v_pk_min_num_f16 v1, v1, v3
+; GFX12-NEXT: v_lshrrev_b32_e32 v4, 16, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v0
+; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v0
+; GFX12-NEXT: v_max_num_f16_e32 v3, v3, v3
+; GFX12-NEXT: v_max_num_f16_e32 v4, v4, v4
+; GFX12-NEXT: v_max_num_f16_e32 v5, v5, v5
+; GFX12-NEXT: v_max_num_f16_e32 v1, v1, v1
+; GFX12-NEXT: v_min_num_f16_e32 v0, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_min_num_f16_e32 v2, v5, v4
+; GFX12-NEXT: v_min_num_f16_e32 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_pack_b32_f16 v0, v0, v2
; GFX12-NEXT: s_setpc_b64 s[30:31]
%result = call <3 x half> @llvm.minimumnum.v3f16(<3 x half> %x, <3 x half> %y)
ret <3 x half> %result
@@ -1858,29 +1918,38 @@ define <3 x half> @v_minimumnum_v3f16_nnan(<3 x half> %x, <3 x half> %y) {
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_min_f16_sdwa v4, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_min_f16_e32 v0, v0, v2
-; GFX8-NEXT: v_or_b32_e32 v0, v0, v4
; GFX8-NEXT: v_min_f16_e32 v1, v1, v3
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v4
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_minimumnum_v3f16_nnan:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_pk_min_f16 v0, v0, v2
-; GFX9-NEXT: v_pk_min_f16 v1, v1, v3
+; GFX9-NEXT: v_min_f16_sdwa v4, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_min_f16_e32 v0, v0, v2
+; GFX9-NEXT: v_min_f16_e32 v1, v1, v3
+; GFX9-NEXT: v_pack_b32_f16 v0, v0, v4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: v_minimumnum_v3f16_nnan:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_pk_min_f16 v0, v0, v2
-; GFX10-NEXT: v_pk_min_f16 v1, v1, v3
+; GFX10-NEXT: v_min_f16_sdwa v4, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_min_f16_e32 v0, v0, v2
+; GFX10-NEXT: v_min_f16_e32 v1, v1, v3
+; GFX10-NEXT: v_pack_b32_f16 v0, v0, v4
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_minimumnum_v3f16_nnan:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_pk_min_f16 v0, v0, v2
-; GFX11-NEXT: v_pk_min_f16 v1, v1, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v0
+; GFX11-NEXT: v_min_f16_e32 v0, v0, v2
+; GFX11-NEXT: v_min_f16_e32 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_min_f16_e32 v2, v5, v4
+; GFX11-NEXT: v_pack_b32_f16 v0, v0, v2
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
; GFX12-LABEL: v_minimumnum_v3f16_nnan:
@@ -1890,8 +1959,13 @@ define <3 x half> @v_minimumnum_v3f16_nnan(<3 x half> %x, <3 x half> %y) {
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_pk_min_num_f16 v0, v0, v2
-; GFX12-NEXT: v_pk_min_num_f16 v1, v1, v3
+; GFX12-NEXT: v_lshrrev_b32_e32 v4, 16, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v0
+; GFX12-NEXT: v_min_num_f16_e32 v0, v0, v2
+; GFX12-NEXT: v_min_num_f16_e32 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_min_num_f16_e32 v2, v5, v4
+; GFX12-NEXT: v_pack_b32_f16 v0, v0, v2
; GFX12-NEXT: s_setpc_b64 s[30:31]
%result = call nnan <3 x half> @llvm.minimumnum.v3f16(<3 x half> %x, <3 x half> %y)
ret <3 x half> %result
@@ -1901,54 +1975,83 @@ define <4 x half> @v_minimumnum_v4f16(<4 x half> %x, <4 x half> %y) {
; GFX8-LABEL: v_minimumnum_v4f16:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_max_f16_sdwa v4, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_max_f16_sdwa v5, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX8-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX8-NEXT: v_max_f16_sdwa v4, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_sdwa v5, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_min_f16_sdwa v4, v5, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX8-NEXT: v_min_f16_e32 v0, v0, v2
-; GFX8-NEXT: v_or_b32_e32 v0, v0, v4
-; GFX8-NEXT: v_max_f16_sdwa v2, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_max_f16_sdwa v4, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_sdwa v5, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_sdwa v6, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_max_f16_e32 v3, v3, v3
; GFX8-NEXT: v_max_f16_e32 v1, v1, v1
-; GFX8-NEXT: v_min_f16_sdwa v2, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX8-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX8-NEXT: v_min_f16_sdwa v5, v6, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX8-NEXT: v_min_f16_e32 v1, v1, v3
-; GFX8-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX8-NEXT: v_min_f16_e32 v0, v0, v2
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v5
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v4
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_minimumnum_v4f16:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_pk_max_f16 v2, v2, v2
-; GFX9-NEXT: v_pk_max_f16 v0, v0, v0
-; GFX9-NEXT: v_pk_min_f16 v0, v0, v2
-; GFX9-NEXT: v_pk_max_f16 v2, v3, v3
-; GFX9-NEXT: v_pk_max_f16 v1, v1, v1
-; GFX9-NEXT: v_pk_min_f16 v1, v1, v2
+; GFX9-NEXT: v_max_f16_sdwa v4, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_sdwa v5, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_min_f16_e32 v4, v5, v4
+; GFX9-NEXT: v_max_f16_sdwa v5, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_sdwa v6, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX9-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX9-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX9-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX9-NEXT: v_min_f16_e32 v5, v6, v5
+; GFX9-NEXT: v_min_f16_e32 v1, v1, v3
+; GFX9-NEXT: v_min_f16_e32 v0, v0, v2
+; GFX9-NEXT: v_pack_b32_f16 v0, v0, v5
+; GFX9-NEXT: v_pack_b32_f16 v1, v1, v4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: v_minimumnum_v4f16:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_pk_max_f16 v2, v2, v2
-; GFX10-NEXT: v_pk_max_f16 v0, v0, v0
-; GFX10-NEXT: v_pk_max_f16 v3, v3, v3
-; GFX10-NEXT: v_pk_max_f16 v1, v1, v1
-; GFX10-NEXT: v_pk_min_f16 v0, v0, v2
-; GFX10-NEXT: v_pk_min_f16 v1, v1, v3
+; GFX10-NEXT: v_max_f16_sdwa v4, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_sdwa v5, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_sdwa v6, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_sdwa v7, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX10-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX10-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX10-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX10-NEXT: v_min_f16_e32 v4, v5, v4
+; GFX10-NEXT: v_min_f16_e32 v5, v7, v6
+; GFX10-NEXT: v_min_f16_e32 v0, v0, v2
+; GFX10-NEXT: v_min_f16_e32 v1, v1, v3
+; GFX10-NEXT: v_pack_b32_f16 v0, v0, v5
+; GFX10-NEXT: v_pack_b32_f16 v1, v1, v4
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_minimumnum_v4f16:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_pk_max_f16 v2, v2, v2
-; GFX11-NEXT: v_pk_max_f16 v0, v0, v0
-; GFX11-NEXT: v_pk_max_f16 v3, v3, v3
-; GFX11-NEXT: v_pk_max_f16 v1, v1, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_pk_min_f16 v0, v0, v2
-; GFX11-NEXT: v_pk_min_f16 v1, v1, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v0
+; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v1
+; GFX11-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX11-NEXT: v_max_f16_e32 v4, v4, v4
+; GFX11-NEXT: v_max_f16_e32 v5, v5, v5
+; GFX11-NEXT: v_max_f16_e32 v6, v6, v6
+; GFX11-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX11-NEXT: v_max_f16_e32 v7, v7, v7
+; GFX11-NEXT: v_min_f16_e32 v5, v6, v5
+; GFX11-NEXT: v_min_f16_e32 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_min_f16_e32 v0, v0, v2
+; GFX11-NEXT: v_min_f16_e32 v2, v7, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_pack_b32_f16 v0, v0, v5
+; GFX11-NEXT: v_pack_b32_f16 v1, v1, v2
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
; GFX12-LABEL: v_minimumnum_v4f16:
@@ -1958,13 +2061,26 @@ define <4 x half> @v_minimumnum_v4f16(<4 x half> %x, <4 x half> %y) {
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_pk_max_num_f16 v2, v2, v2
-; GFX12-NEXT: v_pk_max_num_f16 v0, v0, v0
-; GFX12-NEXT: v_pk_max_num_f16 v3, v3, v3
-; GFX12-NEXT: v_pk_max_num_f16 v1, v1, v1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_pk_min_num_f16 v0, v0, v2
-; GFX12-NEXT: v_pk_min_num_f16 v1, v1, v3
+; GFX12-NEXT: v_lshrrev_b32_e32 v4, 16, v3
+; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v6, 16, v0
+; GFX12-NEXT: v_lshrrev_b32_e32 v7, 16, v1
+; GFX12-NEXT: v_max_num_f16_e32 v3, v3, v3
+; GFX12-NEXT: v_max_num_f16_e32 v4, v4, v4
+; GFX12-NEXT: v_max_num_f16_e32 v5, v5, v5
+; GFX12-NEXT: v_max_num_f16_e32 v6, v6, v6
+; GFX12-NEXT: v_max_num_f16_e32 v1, v1, v1
+; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v0
+; GFX12-NEXT: v_max_num_f16_e32 v7, v7, v7
+; GFX12-NEXT: v_min_num_f16_e32 v5, v6, v5
+; GFX12-NEXT: v_min_num_f16_e32 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_min_num_f16_e32 v0, v0, v2
+; GFX12-NEXT: v_min_num_f16_e32 v2, v7, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_pack_b32_f16 v0, v0, v5
+; GFX12-NEXT: v_pack_b32_f16 v1, v1, v2
; GFX12-NEXT: s_setpc_b64 s[30:31]
%result = call <4 x half> @llvm.minimumnum.v4f16(<4 x half> %x, <4 x half> %y)
ret <4 x half> %result
@@ -1974,33 +2090,51 @@ define <4 x half> @v_minimumnum_v4f16_nnan(<4 x half> %x, <4 x half> %y) {
; GFX8-LABEL: v_minimumnum_v4f16_nnan:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_min_f16_sdwa v4, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_min_f16_e32 v0, v0, v2
-; GFX8-NEXT: v_min_f16_sdwa v2, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_min_f16_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_min_f16_sdwa v5, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_min_f16_e32 v1, v1, v3
-; GFX8-NEXT: v_or_b32_e32 v0, v0, v4
-; GFX8-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX8-NEXT: v_min_f16_e32 v0, v0, v2
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v5
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v4
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_minimumnum_v4f16_nnan:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_pk_min_f16 v0, v0, v2
-; GFX9-NEXT: v_pk_min_f16 v1, v1, v3
+; GFX9-NEXT: v_min_f16_sdwa v4, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_min_f16_sdwa v5, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_min_f16_e32 v1, v1, v3
+; GFX9-NEXT: v_min_f16_e32 v0, v0, v2
+; GFX9-NEXT: v_pack_b32_f16 v0, v0, v5
+; GFX9-NEXT: v_pack_b32_f16 v1, v1, v4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: v_minimumnum_v4f16_nnan:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_pk_min_f16 v0, v0, v2
-; GFX10-NEXT: v_pk_min_f16 v1, v1, v3
+; GFX10-NEXT: v_min_f16_sdwa v4, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_min_f16_sdwa v5, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_min_f16_e32 v0, v0, v2
+; GFX10-NEXT: v_min_f16_e32 v1, v1, v3
+; GFX10-NEXT: v_pack_b32_f16 v0, v0, v5
+; GFX10-NEXT: v_pack_b32_f16 v1, v1, v4
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_minimumnum_v4f16_nnan:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_pk_min_f16 v0, v0, v2
-; GFX11-NEXT: v_pk_min_f16 v1, v1, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v0
+; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v1
+; GFX11-NEXT: v_min_f16_e32 v1, v1, v3
+; GFX11-NEXT: v_min_f16_e32 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_min_f16_e32 v2, v6, v5
+; GFX11-NEXT: v_min_f16_e32 v3, v7, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_pack_b32_f16 v0, v0, v2
+; GFX11-NEXT: v_pack_b32_f16 v1, v1, v3
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
; GFX12-LABEL: v_minimumnum_v4f16_nnan:
@@ -2010,8 +2144,18 @@ define <4 x half> @v_minimumnum_v4f16_nnan(<4 x half> %x, <4 x half> %y) {
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_pk_min_num_f16 v0, v0, v2
-; GFX12-NEXT: v_pk_min_num_f16 v1, v1, v3
+; GFX12-NEXT: v_lshrrev_b32_e32 v4, 16, v3
+; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v6, 16, v0
+; GFX12-NEXT: v_lshrrev_b32_e32 v7, 16, v1
+; GFX12-NEXT: v_min_num_f16_e32 v1, v1, v3
+; GFX12-NEXT: v_min_num_f16_e32 v0, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_min_num_f16_e32 v2, v6, v5
+; GFX12-NEXT: v_min_num_f16_e32 v3, v7, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_pack_b32_f16 v0, v0, v2
+; GFX12-NEXT: v_pack_b32_f16 v1, v1, v3
; GFX12-NEXT: s_setpc_b64 s[30:31]
%result = call nnan <4 x half> @llvm.minimumnum.v4f16(<4 x half> %x, <4 x half> %y)
ret <4 x half> %result
@@ -2021,70 +2165,113 @@ define <6 x half> @v_minimumnum_v6f16(<6 x half> %x, <6 x half> %y) {
; GFX8-LABEL: v_minimumnum_v6f16:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_max_f16_sdwa v6, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_max_f16_sdwa v7, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_max_f16_e32 v3, v3, v3
-; GFX8-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX8-NEXT: v_max_f16_sdwa v6, v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_sdwa v7, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_min_f16_sdwa v6, v7, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX8-NEXT: v_min_f16_e32 v0, v0, v3
-; GFX8-NEXT: v_or_b32_e32 v0, v0, v6
-; GFX8-NEXT: v_max_f16_sdwa v3, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_max_f16_sdwa v6, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_sdwa v7, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_sdwa v8, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_min_f16_sdwa v7, v8, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_max_f16_sdwa v8, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_sdwa v9, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_e32 v5, v5, v5
+; GFX8-NEXT: v_max_f16_e32 v2, v2, v2
; GFX8-NEXT: v_max_f16_e32 v4, v4, v4
; GFX8-NEXT: v_max_f16_e32 v1, v1, v1
-; GFX8-NEXT: v_min_f16_sdwa v3, v6, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX8-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX8-NEXT: v_min_f16_sdwa v8, v9, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_min_f16_e32 v2, v2, v5
; GFX8-NEXT: v_min_f16_e32 v1, v1, v4
-; GFX8-NEXT: v_or_b32_e32 v1, v1, v3
-; GFX8-NEXT: v_max_f16_sdwa v3, v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_max_f16_sdwa v4, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_min_f16_sdwa v3, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX8-NEXT: v_max_f16_e32 v4, v5, v5
-; GFX8-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX8-NEXT: v_min_f16_e32 v2, v2, v4
-; GFX8-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX8-NEXT: v_min_f16_e32 v0, v0, v3
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v8
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v7
+; GFX8-NEXT: v_or_b32_e32 v2, v2, v6
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_minimumnum_v6f16:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_pk_max_f16 v3, v3, v3
-; GFX9-NEXT: v_pk_max_f16 v0, v0, v0
-; GFX9-NEXT: v_pk_min_f16 v0, v0, v3
-; GFX9-NEXT: v_pk_max_f16 v3, v4, v4
-; GFX9-NEXT: v_pk_max_f16 v1, v1, v1
-; GFX9-NEXT: v_pk_min_f16 v1, v1, v3
-; GFX9-NEXT: v_pk_max_f16 v3, v5, v5
-; GFX9-NEXT: v_pk_max_f16 v2, v2, v2
-; GFX9-NEXT: v_pk_min_f16 v2, v2, v3
+; GFX9-NEXT: v_max_f16_sdwa v6, v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_sdwa v7, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_min_f16_e32 v6, v7, v6
+; GFX9-NEXT: v_max_f16_sdwa v7, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_sdwa v8, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_min_f16_e32 v7, v8, v7
+; GFX9-NEXT: v_max_f16_sdwa v8, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_sdwa v9, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_e32 v5, v5, v5
+; GFX9-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX9-NEXT: v_max_f16_e32 v4, v4, v4
+; GFX9-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX9-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX9-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX9-NEXT: v_min_f16_e32 v8, v9, v8
+; GFX9-NEXT: v_min_f16_e32 v2, v2, v5
+; GFX9-NEXT: v_min_f16_e32 v1, v1, v4
+; GFX9-NEXT: v_min_f16_e32 v0, v0, v3
+; GFX9-NEXT: v_pack_b32_f16 v0, v0, v8
+; GFX9-NEXT: v_pack_b32_f16 v1, v1, v7
+; GFX9-NEXT: v_pack_b32_f16 v2, v2, v6
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: v_minimumnum_v6f16:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_pk_max_f16 v3, v3, v3
-; GFX10-NEXT: v_pk_max_f16 v0, v0, v0
-; GFX10-NEXT: v_pk_max_f16 v4, v4, v4
-; GFX10-NEXT: v_pk_max_f16 v1, v1, v1
-; GFX10-NEXT: v_pk_max_f16 v5, v5, v5
-; GFX10-NEXT: v_pk_max_f16 v2, v2, v2
-; GFX10-NEXT: v_pk_min_f16 v0, v0, v3
-; GFX10-NEXT: v_pk_min_f16 v1, v1, v4
-; GFX10-NEXT: v_pk_min_f16 v2, v2, v5
+; GFX10-NEXT: v_max_f16_sdwa v6, v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_sdwa v7, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_sdwa v8, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_sdwa v9, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_sdwa v10, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v5, v5, v5
+; GFX10-NEXT: v_min_f16_e32 v6, v7, v6
+; GFX10-NEXT: v_max_f16_sdwa v7, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v4, v4, v4
+; GFX10-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX10-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX10-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX10-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX10-NEXT: v_min_f16_e32 v8, v9, v8
+; GFX10-NEXT: v_min_f16_e32 v7, v10, v7
+; GFX10-NEXT: v_min_f16_e32 v0, v0, v3
+; GFX10-NEXT: v_min_f16_e32 v1, v1, v4
+; GFX10-NEXT: v_min_f16_e32 v2, v2, v5
+; GFX10-NEXT: v_pack_b32_f16 v0, v0, v7
+; GFX10-NEXT: v_pack_b32_f16 v1, v1, v8
+; GFX10-NEXT: v_pack_b32_f16 v2, v2, v6
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_minimumnum_v6f16:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_pk_max_f16 v3, v3, v3
-; GFX11-NEXT: v_pk_max_f16 v0, v0, v0
-; GFX11-NEXT: v_pk_max_f16 v4, v4, v4
-; GFX11-NEXT: v_pk_max_f16 v1, v1, v1
-; GFX11-NEXT: v_pk_max_f16 v5, v5, v5
-; GFX11-NEXT: v_pk_max_f16 v2, v2, v2
-; GFX11-NEXT: v_pk_min_f16 v0, v0, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_pk_min_f16 v1, v1, v4
-; GFX11-NEXT: v_pk_min_f16 v2, v2, v5
+; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v5
+; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v4
+; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v3
+; GFX11-NEXT: v_max_f16_e32 v6, v6, v6
+; GFX11-NEXT: v_max_f16_e32 v7, v7, v7
+; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v0
+; GFX11-NEXT: v_max_f16_e32 v8, v8, v8
+; GFX11-NEXT: v_max_f16_e32 v5, v5, v5
+; GFX11-NEXT: v_max_f16_e32 v4, v4, v4
+; GFX11-NEXT: v_min_f16_e32 v6, v7, v6
+; GFX11-NEXT: v_max_f16_e32 v7, v9, v9
+; GFX11-NEXT: v_max_f16_e32 v9, v10, v10
+; GFX11-NEXT: v_max_f16_e32 v10, v11, v11
+; GFX11-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX11-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-NEXT: v_min_f16_e32 v7, v7, v8
+; GFX11-NEXT: v_min_f16_e32 v8, v10, v9
+; GFX11-NEXT: v_min_f16_e32 v0, v0, v3
+; GFX11-NEXT: v_min_f16_e32 v1, v1, v4
+; GFX11-NEXT: v_min_f16_e32 v2, v2, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_pack_b32_f16 v0, v0, v8
+; GFX11-NEXT: v_pack_b32_f16 v1, v1, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-NEXT: v_pack_b32_f16 v2, v2, v6
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
; GFX12-LABEL: v_minimumnum_v6f16:
@@ -2094,16 +2281,35 @@ define <6 x half> @v_minimumnum_v6f16(<6 x half> %x, <6 x half> %y) {
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_pk_max_num_f16 v3, v3, v3
-; GFX12-NEXT: v_pk_max_num_f16 v0, v0, v0
-; GFX12-NEXT: v_pk_max_num_f16 v4, v4, v4
-; GFX12-NEXT: v_pk_max_num_f16 v1, v1, v1
-; GFX12-NEXT: v_pk_max_num_f16 v5, v5, v5
-; GFX12-NEXT: v_pk_max_num_f16 v2, v2, v2
-; GFX12-NEXT: v_pk_min_num_f16 v0, v0, v3
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_pk_min_num_f16 v1, v1, v4
-; GFX12-NEXT: v_pk_min_num_f16 v2, v2, v5
+; GFX12-NEXT: v_lshrrev_b32_e32 v6, 16, v5
+; GFX12-NEXT: v_lshrrev_b32_e32 v7, 16, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v8, 16, v4
+; GFX12-NEXT: v_lshrrev_b32_e32 v9, 16, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v10, 16, v3
+; GFX12-NEXT: v_max_num_f16_e32 v6, v6, v6
+; GFX12-NEXT: v_max_num_f16_e32 v7, v7, v7
+; GFX12-NEXT: v_lshrrev_b32_e32 v11, 16, v0
+; GFX12-NEXT: v_max_num_f16_e32 v8, v8, v8
+; GFX12-NEXT: v_max_num_f16_e32 v5, v5, v5
+; GFX12-NEXT: v_max_num_f16_e32 v4, v4, v4
+; GFX12-NEXT: v_min_num_f16_e32 v6, v7, v6
+; GFX12-NEXT: v_max_num_f16_e32 v7, v9, v9
+; GFX12-NEXT: v_max_num_f16_e32 v9, v10, v10
+; GFX12-NEXT: v_max_num_f16_e32 v10, v11, v11
+; GFX12-NEXT: v_max_num_f16_e32 v3, v3, v3
+; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v0
+; GFX12-NEXT: v_max_num_f16_e32 v1, v1, v1
+; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-NEXT: v_min_num_f16_e32 v7, v7, v8
+; GFX12-NEXT: v_min_num_f16_e32 v8, v10, v9
+; GFX12-NEXT: v_min_num_f16_e32 v0, v0, v3
+; GFX12-NEXT: v_min_num_f16_e32 v1, v1, v4
+; GFX12-NEXT: v_min_num_f16_e32 v2, v2, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_pack_b32_f16 v0, v0, v8
+; GFX12-NEXT: v_pack_b32_f16 v1, v1, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX12-NEXT: v_pack_b32_f16 v2, v2, v6
; GFX12-NEXT: s_setpc_b64 s[30:31]
%result = call <6 x half> @llvm.minimumnum.v6f16(<6 x half> %x, <6 x half> %y)
ret <6 x half> %result
@@ -2113,86 +2319,143 @@ define <8 x half> @v_minimumnum_v8f16(<8 x half> %x, <8 x half> %y) {
; GFX8-LABEL: v_minimumnum_v8f16:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_max_f16_sdwa v8, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_max_f16_sdwa v9, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX8-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX8-NEXT: v_max_f16_sdwa v8, v7, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_sdwa v9, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_min_f16_sdwa v8, v9, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX8-NEXT: v_min_f16_e32 v0, v0, v4
-; GFX8-NEXT: v_or_b32_e32 v0, v0, v8
-; GFX8-NEXT: v_max_f16_sdwa v4, v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_max_f16_sdwa v8, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_sdwa v9, v6, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_sdwa v10, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_min_f16_sdwa v9, v10, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_max_f16_sdwa v10, v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_sdwa v11, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_min_f16_sdwa v10, v11, v10 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_max_f16_sdwa v11, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_sdwa v12, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_e32 v7, v7, v7
+; GFX8-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX8-NEXT: v_max_f16_e32 v6, v6, v6
+; GFX8-NEXT: v_max_f16_e32 v2, v2, v2
; GFX8-NEXT: v_max_f16_e32 v5, v5, v5
; GFX8-NEXT: v_max_f16_e32 v1, v1, v1
-; GFX8-NEXT: v_min_f16_sdwa v4, v8, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_max_f16_e32 v4, v4, v4
+; GFX8-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX8-NEXT: v_min_f16_sdwa v11, v12, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_min_f16_e32 v3, v3, v7
+; GFX8-NEXT: v_min_f16_e32 v2, v2, v6
; GFX8-NEXT: v_min_f16_e32 v1, v1, v5
-; GFX8-NEXT: v_or_b32_e32 v1, v1, v4
-; GFX8-NEXT: v_max_f16_sdwa v4, v6, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_max_f16_sdwa v5, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_min_f16_sdwa v4, v5, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX8-NEXT: v_max_f16_e32 v5, v6, v6
-; GFX8-NEXT: v_max_f16_e32 v2, v2, v2
-; GFX8-NEXT: v_min_f16_e32 v2, v2, v5
-; GFX8-NEXT: v_or_b32_e32 v2, v2, v4
-; GFX8-NEXT: v_max_f16_sdwa v4, v7, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_max_f16_sdwa v5, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_min_f16_sdwa v4, v5, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX8-NEXT: v_max_f16_e32 v5, v7, v7
-; GFX8-NEXT: v_max_f16_e32 v3, v3, v3
-; GFX8-NEXT: v_min_f16_e32 v3, v3, v5
-; GFX8-NEXT: v_or_b32_e32 v3, v3, v4
+; GFX8-NEXT: v_min_f16_e32 v0, v0, v4
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v11
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v10
+; GFX8-NEXT: v_or_b32_e32 v2, v2, v9
+; GFX8-NEXT: v_or_b32_e32 v3, v3, v8
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_minimumnum_v8f16:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_pk_max_f16 v4, v4, v4
-; GFX9-NEXT: v_pk_max_f16 v0, v0, v0
-; GFX9-NEXT: v_pk_min_f16 v0, v0, v4
-; GFX9-NEXT: v_pk_max_f16 v4, v5, v5
-; GFX9-NEXT: v_pk_max_f16 v1, v1, v1
-; GFX9-NEXT: v_pk_min_f16 v1, v1, v4
-; GFX9-NEXT: v_pk_max_f16 v4, v6, v6
-; GFX9-NEXT: v_pk_max_f16 v2, v2, v2
-; GFX9-NEXT: v_pk_min_f16 v2, v2, v4
-; GFX9-NEXT: v_pk_max_f16 v4, v7, v7
-; GFX9-NEXT: v_pk_max_f16 v3, v3, v3
-; GFX9-NEXT: v_pk_min_f16 v3, v3, v4
+; GFX9-NEXT: v_max_f16_sdwa v8, v7, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_sdwa v9, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_min_f16_e32 v8, v9, v8
+; GFX9-NEXT: v_max_f16_sdwa v9, v6, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_sdwa v10, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_min_f16_e32 v9, v10, v9
+; GFX9-NEXT: v_max_f16_sdwa v10, v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_sdwa v11, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_min_f16_e32 v10, v11, v10
+; GFX9-NEXT: v_max_f16_sdwa v11, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_sdwa v12, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_e32 v7, v7, v7
+; GFX9-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX9-NEXT: v_max_f16_e32 v6, v6, v6
+; GFX9-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX9-NEXT: v_max_f16_e32 v5, v5, v5
+; GFX9-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX9-NEXT: v_max_f16_e32 v4, v4, v4
+; GFX9-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX9-NEXT: v_min_f16_e32 v11, v12, v11
+; GFX9-NEXT: v_min_f16_e32 v3, v3, v7
+; GFX9-NEXT: v_min_f16_e32 v2, v2, v6
+; GFX9-NEXT: v_min_f16_e32 v1, v1, v5
+; GFX9-NEXT: v_min_f16_e32 v0, v0, v4
+; GFX9-NEXT: v_pack_b32_f16 v0, v0, v11
+; GFX9-NEXT: v_pack_b32_f16 v1, v1, v10
+; GFX9-NEXT: v_pack_b32_f16 v2, v2, v9
+; GFX9-NEXT: v_pack_b32_f16 v3, v3, v8
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: v_minimumnum_v8f16:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_pk_max_f16 v4, v4, v4
-; GFX10-NEXT: v_pk_max_f16 v0, v0, v0
-; GFX10-NEXT: v_pk_max_f16 v5, v5, v5
-; GFX10-NEXT: v_pk_max_f16 v1, v1, v1
-; GFX10-NEXT: v_pk_max_f16 v6, v6, v6
-; GFX10-NEXT: v_pk_max_f16 v2, v2, v2
-; GFX10-NEXT: v_pk_max_f16 v7, v7, v7
-; GFX10-NEXT: v_pk_max_f16 v3, v3, v3
-; GFX10-NEXT: v_pk_min_f16 v0, v0, v4
-; GFX10-NEXT: v_pk_min_f16 v1, v1, v5
-; GFX10-NEXT: v_pk_min_f16 v2, v2, v6
-; GFX10-NEXT: v_pk_min_f16 v3, v3, v7
+; GFX10-NEXT: v_max_f16_sdwa v8, v7, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_sdwa v9, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_sdwa v10, v6, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_sdwa v11, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_sdwa v12, v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_sdwa v13, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_min_f16_e32 v8, v9, v8
+; GFX10-NEXT: v_max_f16_e32 v7, v7, v7
+; GFX10-NEXT: v_min_f16_e32 v9, v11, v10
+; GFX10-NEXT: v_max_f16_sdwa v11, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_min_f16_e32 v10, v13, v12
+; GFX10-NEXT: v_max_f16_sdwa v12, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v6, v6, v6
+; GFX10-NEXT: v_max_f16_e32 v5, v5, v5
+; GFX10-NEXT: v_max_f16_e32 v4, v4, v4
+; GFX10-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX10-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX10-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX10-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX10-NEXT: v_min_f16_e32 v11, v12, v11
+; GFX10-NEXT: v_min_f16_e32 v0, v0, v4
+; GFX10-NEXT: v_min_f16_e32 v1, v1, v5
+; GFX10-NEXT: v_min_f16_e32 v2, v2, v6
+; GFX10-NEXT: v_min_f16_e32 v3, v3, v7
+; GFX10-NEXT: v_pack_b32_f16 v0, v0, v11
+; GFX10-NEXT: v_pack_b32_f16 v1, v1, v10
+; GFX10-NEXT: v_pack_b32_f16 v2, v2, v9
+; GFX10-NEXT: v_pack_b32_f16 v3, v3, v8
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_minimumnum_v8f16:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_pk_max_f16 v4, v4, v4
-; GFX11-NEXT: v_pk_max_f16 v0, v0, v0
-; GFX11-NEXT: v_pk_max_f16 v5, v5, v5
-; GFX11-NEXT: v_pk_max_f16 v1, v1, v1
-; GFX11-NEXT: v_pk_max_f16 v6, v6, v6
-; GFX11-NEXT: v_pk_max_f16 v2, v2, v2
-; GFX11-NEXT: v_pk_max_f16 v7, v7, v7
-; GFX11-NEXT: v_pk_max_f16 v3, v3, v3
-; GFX11-NEXT: v_pk_min_f16 v0, v0, v4
-; GFX11-NEXT: v_pk_min_f16 v1, v1, v5
-; GFX11-NEXT: v_pk_min_f16 v2, v2, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_pk_min_f16 v3, v3, v7
+; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v7
+; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v6
+; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v5
+; GFX11-NEXT: v_lshrrev_b32_e32 v13, 16, v1
+; GFX11-NEXT: v_max_f16_e32 v8, v8, v8
+; GFX11-NEXT: v_max_f16_e32 v9, v9, v9
+; GFX11-NEXT: v_max_f16_e32 v10, v10, v10
+; GFX11-NEXT: v_max_f16_e32 v11, v11, v11
+; GFX11-NEXT: v_max_f16_e32 v12, v12, v12
+; GFX11-NEXT: v_max_f16_e32 v13, v13, v13
+; GFX11-NEXT: v_lshrrev_b32_e32 v14, 16, v4
+; GFX11-NEXT: v_lshrrev_b32_e32 v15, 16, v0
+; GFX11-NEXT: v_min_f16_e32 v8, v9, v8
+; GFX11-NEXT: v_min_f16_e32 v9, v11, v10
+; GFX11-NEXT: v_min_f16_e32 v10, v13, v12
+; GFX11-NEXT: v_max_f16_e32 v11, v14, v14
+; GFX11-NEXT: v_max_f16_e32 v12, v15, v15
+; GFX11-NEXT: v_max_f16_e32 v7, v7, v7
+; GFX11-NEXT: v_max_f16_e32 v6, v6, v6
+; GFX11-NEXT: v_max_f16_e32 v5, v5, v5
+; GFX11-NEXT: v_max_f16_e32 v4, v4, v4
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX11-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX11-NEXT: v_min_f16_e32 v11, v12, v11
+; GFX11-NEXT: v_min_f16_e32 v0, v0, v4
+; GFX11-NEXT: v_min_f16_e32 v1, v1, v5
+; GFX11-NEXT: v_min_f16_e32 v2, v2, v6
+; GFX11-NEXT: v_min_f16_e32 v3, v3, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_pack_b32_f16 v0, v0, v11
+; GFX11-NEXT: v_pack_b32_f16 v1, v1, v10
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_pack_b32_f16 v2, v2, v9
+; GFX11-NEXT: v_pack_b32_f16 v3, v3, v8
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
; GFX12-LABEL: v_minimumnum_v8f16:
@@ -2202,19 +2465,44 @@ define <8 x half> @v_minimumnum_v8f16(<8 x half> %x, <8 x half> %y) {
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_pk_max_num_f16 v4, v4, v4
-; GFX12-NEXT: v_pk_max_num_f16 v0, v0, v0
-; GFX12-NEXT: v_pk_max_num_f16 v5, v5, v5
-; GFX12-NEXT: v_pk_max_num_f16 v1, v1, v1
-; GFX12-NEXT: v_pk_max_num_f16 v6, v6, v6
-; GFX12-NEXT: v_pk_max_num_f16 v2, v2, v2
-; GFX12-NEXT: v_pk_max_num_f16 v7, v7, v7
-; GFX12-NEXT: v_pk_max_num_f16 v3, v3, v3
-; GFX12-NEXT: v_pk_min_num_f16 v0, v0, v4
-; GFX12-NEXT: v_pk_min_num_f16 v1, v1, v5
-; GFX12-NEXT: v_pk_min_num_f16 v2, v2, v6
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX12-NEXT: v_pk_min_num_f16 v3, v3, v7
+; GFX12-NEXT: v_lshrrev_b32_e32 v8, 16, v7
+; GFX12-NEXT: v_lshrrev_b32_e32 v9, 16, v3
+; GFX12-NEXT: v_lshrrev_b32_e32 v10, 16, v6
+; GFX12-NEXT: v_lshrrev_b32_e32 v11, 16, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v12, 16, v5
+; GFX12-NEXT: v_lshrrev_b32_e32 v13, 16, v1
+; GFX12-NEXT: v_max_num_f16_e32 v8, v8, v8
+; GFX12-NEXT: v_max_num_f16_e32 v9, v9, v9
+; GFX12-NEXT: v_max_num_f16_e32 v10, v10, v10
+; GFX12-NEXT: v_max_num_f16_e32 v11, v11, v11
+; GFX12-NEXT: v_max_num_f16_e32 v12, v12, v12
+; GFX12-NEXT: v_max_num_f16_e32 v13, v13, v13
+; GFX12-NEXT: v_lshrrev_b32_e32 v14, 16, v4
+; GFX12-NEXT: v_lshrrev_b32_e32 v15, 16, v0
+; GFX12-NEXT: v_min_num_f16_e32 v8, v9, v8
+; GFX12-NEXT: v_min_num_f16_e32 v9, v11, v10
+; GFX12-NEXT: v_min_num_f16_e32 v10, v13, v12
+; GFX12-NEXT: v_max_num_f16_e32 v11, v14, v14
+; GFX12-NEXT: v_max_num_f16_e32 v12, v15, v15
+; GFX12-NEXT: v_max_num_f16_e32 v7, v7, v7
+; GFX12-NEXT: v_max_num_f16_e32 v6, v6, v6
+; GFX12-NEXT: v_max_num_f16_e32 v5, v5, v5
+; GFX12-NEXT: v_max_num_f16_e32 v4, v4, v4
+; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v0
+; GFX12-NEXT: v_max_num_f16_e32 v1, v1, v1
+; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-NEXT: v_max_num_f16_e32 v3, v3, v3
+; GFX12-NEXT: v_min_num_f16_e32 v11, v12, v11
+; GFX12-NEXT: v_min_num_f16_e32 v0, v0, v4
+; GFX12-NEXT: v_min_num_f16_e32 v1, v1, v5
+; GFX12-NEXT: v_min_num_f16_e32 v2, v2, v6
+; GFX12-NEXT: v_min_num_f16_e32 v3, v3, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_pack_b32_f16 v0, v0, v11
+; GFX12-NEXT: v_pack_b32_f16 v1, v1, v10
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_pack_b32_f16 v2, v2, v9
+; GFX12-NEXT: v_pack_b32_f16 v3, v3, v8
; GFX12-NEXT: s_setpc_b64 s[30:31]
%result = call <8 x half> @llvm.minimumnum.v8f16(<8 x half> %x, <8 x half> %y)
ret <8 x half> %result
More information about the llvm-commits
mailing list