[llvm] [AMDGPU][GlobalISel] Enable vector reductions (PR #131413)
Tim Gymnich via llvm-commits
llvm-commits at lists.llvm.org
Fri Mar 14 18:30:35 PDT 2025
https://github.com/tgymnich created https://github.com/llvm/llvm-project/pull/131413
fixes https://github.com/llvm/llvm-project/issues/114816
>From 051a1f65eb61b4a724cb8963b564b5a4b94d91d3 Mon Sep 17 00:00:00 2001
From: Tim Gymnich <tim at gymni.ch>
Date: Fri, 14 Mar 2025 09:51:26 +0000
Subject: [PATCH 1/3] [AMDGPU][GlobalISel] enable vector reductions
---
llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp | 16 +++++++++++++++-
1 file changed, 15 insertions(+), 1 deletion(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index b3a8183beeacf..fc2d4954df8c0 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -350,6 +350,12 @@ static std::initializer_list<LLT> AllS32Vectors = {
static std::initializer_list<LLT> AllS64Vectors = {V2S64, V3S64, V4S64, V5S64,
V6S64, V7S64, V8S64, V16S64};
+static std::initializer_list<LLT> AllVectors{
+ V2S16, V4S16, V6S16, V8S16, V10S16, V12S16, V16S16, V2S128,
+ V4S128, V2S32, V3S32, V4S32, V5S32, V6S32, V7S32, V8S32,
+ V9S32, V10S32, V11S32, V12S32, V16S32, V32S32, V2S64, V3S64,
+ V4S64, V5S64, V6S64, V7S64, V8S64, V16S64};
+
// Checks whether a type is in the list of legal register types.
static bool isRegisterClassType(const GCNSubtarget &ST, LLT Ty) {
if (Ty.isPointerOrPointerVector())
@@ -2090,7 +2096,6 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
.clampMaxNumElements(0, S16, 2)
.scalarize(0);
} else {
- // TODO: Implement
getActionDefinitionsBuilder({G_FMINIMUM, G_FMAXIMUM}).lower();
}
@@ -2106,6 +2111,15 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
getActionDefinitionsBuilder(G_PREFETCH).alwaysLegal();
+ getActionDefinitionsBuilder(
+ {G_VECREDUCE_SMIN, G_VECREDUCE_SMAX, G_VECREDUCE_UMIN, G_VECREDUCE_UMAX,
+ G_VECREDUCE_ADD, G_VECREDUCE_MUL, G_VECREDUCE_FMUL, G_VECREDUCE_FMIN,
+ G_VECREDUCE_FMAX, G_VECREDUCE_FMINIMUM, G_VECREDUCE_FMAXIMUM,
+ G_VECREDUCE_OR, G_VECREDUCE_AND, G_VECREDUCE_XOR})
+ .legalFor(AllVectors)
+ .scalarize(1)
+ .lower();
+
getLegacyLegalizerInfo().computeTables();
verify(*ST.getInstrInfo());
}
>From 4a396b233215069770b6b96446c03508679e7840 Mon Sep 17 00:00:00 2001
From: Tim Gymnich <tim at gymni.ch>
Date: Fri, 14 Mar 2025 19:40:42 +0100
Subject: [PATCH 2/3] lower fminimum/fmaximum
---
.../llvm/CodeGen/GlobalISel/LegalizerHelper.h | 1 +
llvm/include/llvm/CodeGen/GlobalISel/Utils.h | 2 +
.../CodeGen/GlobalISel/LegalizerHelper.cpp | 61 +++++++++++++++++++
llvm/lib/CodeGen/GlobalISel/Utils.cpp | 10 +++
4 files changed, 74 insertions(+)
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
index 4e18f5cc913a7..3b02b23dcd7b9 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -426,6 +426,7 @@ class LegalizerHelper {
LegalizeResult lowerMinMax(MachineInstr &MI);
LegalizeResult lowerFCopySign(MachineInstr &MI);
LegalizeResult lowerFMinNumMaxNum(MachineInstr &MI);
+ LegalizeResult lowerFMinimum_FMaximum(MachineInstr &MI);
LegalizeResult lowerFMad(MachineInstr &MI);
LegalizeResult lowerIntrinsicRound(MachineInstr &MI);
LegalizeResult lowerFFloor(MachineInstr &MI);
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/Utils.h b/llvm/include/llvm/CodeGen/GlobalISel/Utils.h
index a35ecae5d18bf..ad9ca2ad26f6a 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/Utils.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/Utils.h
@@ -343,6 +343,8 @@ inline bool isKnownNeverSNaN(Register Val, const MachineRegisterInfo &MRI) {
return isKnownNeverNaN(Val, MRI, true);
}
+bool isKnownNeverZeroFloat(Register Val, const MachineRegisterInfo &MRI);
+
Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO);
/// Return a virtual register corresponding to the incoming argument register \p
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index ed8bd25698c03..b20a79a24d70c 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
+#include "llvm/ADT/APFloat.h"
#include "llvm/CodeGen/GlobalISel/CallLowering.h"
#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
@@ -32,6 +33,7 @@
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
+#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instructions.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
@@ -4594,6 +4596,9 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT LowerHintTy) {
case G_FMINNUM:
case G_FMAXNUM:
return lowerFMinNumMaxNum(MI);
+ case G_FMINIMUM:
+ case G_FMAXIMUM:
+ return lowerFMinimum_FMaximum(MI);
case G_MERGE_VALUES:
return lowerMergeValues(MI);
case G_UNMERGE_VALUES:
@@ -8165,6 +8170,62 @@ LegalizerHelper::lowerFMinNumMaxNum(MachineInstr &MI) {
return Legalized;
}
+LegalizerHelper::LegalizeResult
+LegalizerHelper::lowerFMinimum_FMaximum(MachineInstr &MI) {
+ auto [Dst, Src0, Src1] = MI.getFirst3Regs();
+ LLT Ty = MRI.getType(Dst);
+ unsigned Opc = MI.getOpcode();
+ bool IsMax = Opc == TargetOpcode::G_FMAXIMUM;
+
+ Register MinMax;
+ unsigned CompOpcIeee = IsMax ? TargetOpcode::G_FMAXNUM_IEEE : TargetOpcode::G_FMINNUM_IEEE;
+ unsigned CompOpc = IsMax ? TargetOpcode::G_FMAXNUM : TargetOpcode::G_FMINNUM;
+ CmpInst::Predicate CompPred = IsMax ? CmpInst::FCMP_OGT : CmpInst::FCMP_OLT;
+ LLT S1 = LLT::scalar(1);
+ const fltSemantics &FPSem = getFltSemanticForLLT(Ty);
+
+ bool MinMaxMustRespectOrderedZero = false;
+
+ if (LI.isLegalOrCustom({CompOpcIeee, Ty})) {
+ MinMax = MIRBuilder.buildInstr(CompOpcIeee, {Ty}, {Src0, Src1}).getReg(0);
+ MinMaxMustRespectOrderedZero = true;
+ } else if (LI.isLegalOrCustom({CompOpc, Ty})) {
+ MinMax = MIRBuilder.buildInstr(CompOpc, {Ty}, {Src0, Src1}).getReg(0);
+ } else {
+ // NaN (if exists) will be propagated later, so orderness doesn't matter.
+ auto Comp = MIRBuilder.buildFCmp(CompPred, S1, Src0, Src1);
+ MinMax = MIRBuilder.buildSelect(Ty, Comp,Src0, Src1).getReg(0);
+ }
+
+ // Propagate any NaN of both operands
+ if (!MI.getFlag(MachineInstr::FmNoNans) && (!isKnownNeverNaN(Src0, MRI) || !isKnownNeverNaN(Src1, MRI))) {
+ auto FPNaN = MIRBuilder.buildFConstant(Ty, APFloat::getNaN(FPSem));
+ auto Comp = MIRBuilder.buildFCmp(CmpInst::Predicate::FCMP_UNO, S1, Src0, Src1);
+ MinMax = MIRBuilder.buildSelect(Ty, Comp, FPNaN, MinMax).getReg(0);
+ }
+
+ // fminimum/fmaximum requires -0.0 less than +0.0
+ if (!MinMaxMustRespectOrderedZero && !MI.getFlag(MachineInstr::FmNsz) &&
+ !isKnownNeverZeroFloat(Src0, MRI) && !isKnownNeverZeroFloat(Src1, MRI)) {
+ auto Zero = MIRBuilder.buildFConstant(Ty, APFloat::getZero(FPSem));
+ auto IsZero = MIRBuilder.buildFCmp(CmpInst::Predicate::FCMP_OEQ, S1,MinMax, Zero);
+
+ unsigned TestZeroMask = IsMax ? fcPosZero : fcNegZero;
+
+ auto Src0Zero = MIRBuilder.buildIsFPClass(S1, Src0, TestZeroMask);
+ auto Src0Comp = MIRBuilder.buildSelect(Ty, Src0Zero, Src0, MinMax);
+
+ auto Src1Zero = MIRBuilder.buildIsFPClass(S1, Src1, TestZeroMask);
+ auto Src1Comp = MIRBuilder.buildSelect(Ty, Src1Zero, Src1, Src0Comp);
+
+ MinMax = MIRBuilder.buildSelect(Ty, IsZero, Src1Comp, MinMax).getReg(0);
+ }
+
+ MRI.replaceRegWith(Dst, MinMax);
+ MI.eraseFromParent();
+ return Legalized;
+}
+
LegalizerHelper::LegalizeResult LegalizerHelper::lowerFMad(MachineInstr &MI) {
// Expand G_FMAD a, b, c -> G_FADD (G_FMUL a, b), c
Register DstReg = MI.getOperand(0).getReg();
diff --git a/llvm/lib/CodeGen/GlobalISel/Utils.cpp b/llvm/lib/CodeGen/GlobalISel/Utils.cpp
index 625d556e3ff5e..59764fc74e928 100644
--- a/llvm/lib/CodeGen/GlobalISel/Utils.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/Utils.cpp
@@ -890,6 +890,16 @@ bool llvm::isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI,
return false;
}
+bool llvm::isKnownNeverZeroFloat(Register Reg, const MachineRegisterInfo &MRI) {
+ std::optional<FPValueAndVReg> FPValReg;
+ if (mi_match(Reg, MRI, m_GFCstOrSplat(FPValReg))) {
+ if (!FPValReg->Value.isZero())
+ return true;
+ }
+
+ return false;
+}
+
Align llvm::inferAlignFromPtrInfo(MachineFunction &MF,
const MachinePointerInfo &MPO) {
auto PSV = dyn_cast_if_present<const PseudoSourceValue *>(MPO.V);
>From b9b659cc45196ed76bed6abff4ad27f4d74b1ae2 Mon Sep 17 00:00:00 2001
From: Tim Gymnich <tim at gymni.ch>
Date: Fri, 14 Mar 2025 11:17:00 +0000
Subject: [PATCH 3/3] add tests
---
.../GlobalISel/llvm-intrinsics/reduce/add.ll | 2388 +++++++++++++
.../GlobalISel/llvm-intrinsics/reduce/and.ll | 2352 +++++++++++++
.../GlobalISel/llvm-intrinsics/reduce/fadd.ll | 1461 ++++++++
.../GlobalISel/llvm-intrinsics/reduce/fmax.ll | 1806 ++++++++++
.../llvm-intrinsics/reduce/fmaximum.ll | 2671 ++++++++++++++
.../GlobalISel/llvm-intrinsics/reduce/fmin.ll | 1805 ++++++++++
.../llvm-intrinsics/reduce/fminimum.ll | 2675 ++++++++++++++
.../GlobalISel/llvm-intrinsics/reduce/fmul.ll | 1461 ++++++++
.../GlobalISel/llvm-intrinsics/reduce/mul.ll | 2711 +++++++++++++++
.../GlobalISel/llvm-intrinsics/reduce/or.ll | 2347 +++++++++++++
.../GlobalISel/llvm-intrinsics/reduce/smax.ll | 3098 +++++++++++++++++
.../GlobalISel/llvm-intrinsics/reduce/smin.ll | 3098 +++++++++++++++++
.../GlobalISel/llvm-intrinsics/reduce/umax.ll | 3031 ++++++++++++++++
.../GlobalISel/llvm-intrinsics/reduce/umin.ll | 2678 ++++++++++++++
.../GlobalISel/llvm-intrinsics/reduce/xor.ll | 2276 ++++++++++++
15 files changed, 35858 insertions(+)
create mode 100644 llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/add.ll
create mode 100644 llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/and.ll
create mode 100644 llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/fadd.ll
create mode 100644 llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/fmax.ll
create mode 100644 llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/fmaximum.ll
create mode 100644 llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/fmin.ll
create mode 100644 llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/fminimum.ll
create mode 100644 llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/fmul.ll
create mode 100644 llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/mul.ll
create mode 100644 llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/or.ll
create mode 100644 llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/smax.ll
create mode 100644 llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/smin.ll
create mode 100644 llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/umax.ll
create mode 100644 llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/umin.ll
create mode 100644 llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/xor.ll
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/add.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/add.ll
new file mode 100644
index 0000000000000..76c97f4327aa3
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/add.ll
@@ -0,0 +1,2388 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx700 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX7 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx801 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX8 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx942 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX9 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX10 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX12 %s
+
+define i8 @test_vector_reduce_add_v2i8(<2 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_add_v2i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_add_v2i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_u16_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_add_v2i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_u16_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_add_v2i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_add_nc_u16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_add_v2i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_add_nc_u16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_add_v2i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_add_nc_u16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.add.v2i8(<2 x i8> %v)
+ ret i8 %res
+}
+
+define i8 @test_vector_reduce_add_v3i8(<3 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_add_v3i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v2
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_add_v3i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_u16_e32 v0, v0, v1
+; GFX8-NEXT: v_add_u16_e32 v0, v0, v2
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_add_v3i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_u16_e32 v0, v0, v1
+; GFX9-NEXT: v_add_u16_e32 v0, v0, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_add_v3i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_add_nc_u16 v0, v0, v1
+; GFX10-NEXT: v_add_nc_u16 v0, v0, v2
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_add_v3i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_add_nc_u16 v0, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_nc_u16 v0, v0, v2
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_add_v3i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_add_nc_u16 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_add_nc_u16 v0, v0, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.add.v3i8(<3 x i8> %v)
+ ret i8 %res
+}
+
+define i8 @test_vector_reduce_add_v4i8(<4 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_add_v4i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX7-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 24, v2
+; GFX7-NEXT: v_add_i32_e32 v1, vcc, v1, v2
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v3
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_add_v4i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v4, 8
+; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX8-NEXT: v_or_b32_sdwa v5, v0, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX8-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX8-NEXT: v_or_b32_e32 v2, v5, v2
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX8-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX8-NEXT: v_add_u16_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_add_u16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX8-NEXT: v_add_u16_e32 v0, v0, v1
+; GFX8-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_add_v4i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v4, 0xff
+; GFX9-NEXT: v_lshlrev_b32_sdwa v6, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX9-NEXT: v_and_or_b32 v6, v0, v4, v6
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX9-NEXT: v_or3_b32 v2, v6, v2, v3
+; GFX9-NEXT: v_add_u16_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_add_u16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v1, v0, v4, v1
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX9-NEXT: v_add_u16_e32 v0, v0, v1
+; GFX9-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_add_v4i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v4, 8
+; GFX10-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX10-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX10-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX10-NEXT: v_and_or_b32 v5, 0xff, v0, v5
+; GFX10-NEXT: v_or3_b32 v2, v5, v2, v3
+; GFX10-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX10-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX10-NEXT: v_add_nc_u16 v1, v1, v3
+; GFX10-NEXT: v_add_nc_u16 v0, v0, v2
+; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX10-NEXT: v_add_nc_u16 v0, v0, v1
+; GFX10-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_add_v4i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v1
+; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 8, v4
+; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_add_nc_u16 v1, v1, v3
+; GFX11-NEXT: v_add_nc_u16 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_add_nc_u16 v0, v0, v1
+; GFX11-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_add_v4i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v4, 0xff, v1
+; GFX12-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v4, 8, v4
+; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX12-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_add_nc_u16 v1, v1, v3
+; GFX12-NEXT: v_add_nc_u16 v0, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX12-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_add_nc_u16 v0, v0, v1
+; GFX12-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.add.v4i8(<4 x i8> %v)
+ ret i8 %res
+}
+
+define i8 @test_vector_reduce_add_v8i8(<8 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_add_v8i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v7
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX7-NEXT: v_add_i32_e32 v1, vcc, v1, v5
+; GFX7-NEXT: v_lshrrev_b32_e32 v7, 24, v4
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v4
+; GFX7-NEXT: v_add_i32_e32 v2, vcc, v2, v6
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX7-NEXT: v_add_i32_e32 v3, vcc, v3, v7
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX7-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 24, v2
+; GFX7-NEXT: v_add_i32_e32 v1, vcc, v1, v2
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v3
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_add_v8i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v8, 8
+; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v8, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v4, v4, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v7
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX8-NEXT: v_add_u16_e32 v1, v1, v5
+; GFX8-NEXT: v_add_u16_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_u16_e32 v0, v0, v4
+; GFX8-NEXT: v_add_u16_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX8-NEXT: v_or_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX8-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX8-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX8-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX8-NEXT: v_add_u16_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_add_u16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX8-NEXT: v_add_u16_e32 v0, v0, v1
+; GFX8-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_add_v8i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v9, 8
+; GFX9-NEXT: v_mov_b32_e32 v8, 0xff
+; GFX9-NEXT: v_lshlrev_b32_sdwa v5, v9, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v4, v4, v8, v5
+; GFX9-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX9-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX9-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX9-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX9-NEXT: v_add_u16_e32 v1, v1, v5
+; GFX9-NEXT: v_add_u16_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_add_u16_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_add_u16_e32 v0, v0, v4
+; GFX9-NEXT: v_lshlrev_b32_sdwa v4, v9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX9-NEXT: v_and_or_b32 v4, v0, v8, v4
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX9-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX9-NEXT: v_add_u16_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_add_u16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v1, v0, v8, v1
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX9-NEXT: v_add_u16_e32 v0, v0, v1
+; GFX9-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_add_v8i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v8, 8
+; GFX10-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX10-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v8, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v7
+; GFX10-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX10-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX10-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX10-NEXT: v_lshrrev_b32_e32 v7, 24, v4
+; GFX10-NEXT: v_add_nc_u16 v0, v0, v4
+; GFX10-NEXT: v_add_nc_u16 v1, v1, v5
+; GFX10-NEXT: v_add_nc_u16 v2, v2, v6
+; GFX10-NEXT: v_add_nc_u16 v3, v3, v7
+; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX10-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX10-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX10-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX10-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX10-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX10-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX10-NEXT: v_add_nc_u16 v1, v1, v3
+; GFX10-NEXT: v_add_nc_u16 v0, v0, v2
+; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX10-NEXT: v_add_nc_u16 v0, v0, v1
+; GFX10-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_add_v8i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v7, 24, v7
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v4, v4, v6, v7
+; GFX11-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX11-NEXT: v_add_nc_u16 v0, v0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_add_nc_u16 v1, v1, v5
+; GFX11-NEXT: v_lshrrev_b32_e32 v5, 24, v4
+; GFX11-NEXT: v_add_nc_u16 v2, v2, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v1
+; GFX11-NEXT: v_add_nc_u16 v3, v3, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 8, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX11-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_add_nc_u16 v1, v1, v3
+; GFX11-NEXT: v_add_nc_u16 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_add_nc_u16 v0, v0, v1
+; GFX11-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_add_v8i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX12-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v7, 24, v7
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v4, v4, v6, v7
+; GFX12-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX12-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX12-NEXT: v_add_nc_u16 v0, v0, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_add_nc_u16 v1, v1, v5
+; GFX12-NEXT: v_lshrrev_b32_e32 v5, 24, v4
+; GFX12-NEXT: v_add_nc_u16 v2, v2, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v1
+; GFX12-NEXT: v_add_nc_u16 v3, v3, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-NEXT: v_lshlrev_b32_e32 v4, 8, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX12-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX12-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_add_nc_u16 v1, v1, v3
+; GFX12-NEXT: v_add_nc_u16 v0, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX12-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_add_nc_u16 v0, v0, v1
+; GFX12-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> %v)
+ ret i8 %res
+}
+
+define i8 @test_vector_reduce_add_v16i8(<16 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_add_v16i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v13, 0xff, v13
+; GFX7-NEXT: v_and_b32_e32 v12, 0xff, v12
+; GFX7-NEXT: v_lshlrev_b32_e32 v13, 8, v13
+; GFX7-NEXT: v_or_b32_e32 v12, v12, v13
+; GFX7-NEXT: v_and_b32_e32 v13, 0xff, v14
+; GFX7-NEXT: v_lshlrev_b32_e32 v13, 16, v13
+; GFX7-NEXT: v_or_b32_e32 v12, v12, v13
+; GFX7-NEXT: v_and_b32_e32 v13, 0xff, v15
+; GFX7-NEXT: v_lshlrev_b32_e32 v13, 24, v13
+; GFX7-NEXT: v_or_b32_e32 v12, v12, v13
+; GFX7-NEXT: v_lshrrev_b32_e32 v13, 8, v12
+; GFX7-NEXT: v_and_b32_e32 v9, 0xff, v9
+; GFX7-NEXT: v_add_i32_e32 v5, vcc, v5, v13
+; GFX7-NEXT: v_and_b32_e32 v8, 0xff, v8
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX7-NEXT: v_lshrrev_b32_e32 v14, 16, v12
+; GFX7-NEXT: v_add_i32_e32 v4, vcc, v4, v12
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX7-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX7-NEXT: v_and_b32_e32 v9, 0xff, v10
+; GFX7-NEXT: v_add_i32_e32 v6, vcc, v6, v14
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX7-NEXT: v_lshrrev_b32_e32 v15, 24, v12
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX7-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX7-NEXT: v_and_b32_e32 v9, 0xff, v11
+; GFX7-NEXT: v_add_i32_e32 v7, vcc, v7, v15
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 24, v9
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v7
+; GFX7-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX7-NEXT: v_lshrrev_b32_e32 v9, 8, v8
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshrrev_b32_e32 v10, 16, v8
+; GFX7-NEXT: v_add_i32_e32 v1, vcc, v1, v9
+; GFX7-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX7-NEXT: v_lshrrev_b32_e32 v11, 24, v8
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v8
+; GFX7-NEXT: v_add_i32_e32 v2, vcc, v2, v10
+; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX7-NEXT: v_add_i32_e32 v1, vcc, v1, v5
+; GFX7-NEXT: v_add_i32_e32 v3, vcc, v3, v11
+; GFX7-NEXT: v_lshrrev_b32_e32 v7, 24, v4
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v4
+; GFX7-NEXT: v_add_i32_e32 v2, vcc, v2, v6
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX7-NEXT: v_add_i32_e32 v3, vcc, v3, v7
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX7-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 24, v2
+; GFX7-NEXT: v_add_i32_e32 v1, vcc, v1, v2
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v3
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_add_v16i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v16, 8
+; GFX8-NEXT: v_lshlrev_b32_sdwa v9, v16, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v8, v8, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v9, 0xff, v10
+; GFX8-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX8-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX8-NEXT: v_and_b32_e32 v9, 0xff, v11
+; GFX8-NEXT: v_lshlrev_b32_sdwa v10, v16, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v11, 0xff, v14
+; GFX8-NEXT: v_or_b32_sdwa v10, v12, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v11, 16, v11
+; GFX8-NEXT: v_or_b32_e32 v10, v10, v11
+; GFX8-NEXT: v_and_b32_e32 v11, 0xff, v15
+; GFX8-NEXT: v_lshlrev_b32_e32 v11, 24, v11
+; GFX8-NEXT: v_or_b32_e32 v10, v10, v11
+; GFX8-NEXT: v_lshrrev_b32_e32 v11, 8, v10
+; GFX8-NEXT: v_add_u16_e32 v5, v5, v11
+; GFX8-NEXT: v_add_u16_e32 v4, v4, v10
+; GFX8-NEXT: v_add_u16_sdwa v6, v6, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v16, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v4, v4, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX8-NEXT: v_add_u16_sdwa v7, v7, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX8-NEXT: v_lshlrev_b32_e32 v9, 24, v9
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v7
+; GFX8-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX8-NEXT: v_lshrrev_b32_e32 v9, 8, v8
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_add_u16_e32 v1, v1, v9
+; GFX8-NEXT: v_add_u16_sdwa v2, v2, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX8-NEXT: v_add_u16_e32 v0, v0, v8
+; GFX8-NEXT: v_add_u16_sdwa v3, v3, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_add_u16_e32 v1, v1, v5
+; GFX8-NEXT: v_add_u16_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_u16_e32 v0, v0, v4
+; GFX8-NEXT: v_add_u16_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v16, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX8-NEXT: v_or_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX8-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX8-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX8-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX8-NEXT: v_add_u16_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_add_u16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v16, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX8-NEXT: v_add_u16_e32 v0, v0, v1
+; GFX8-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_add_v16i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v17, 8
+; GFX9-NEXT: v_mov_b32_e32 v16, 0xff
+; GFX9-NEXT: v_lshlrev_b32_sdwa v9, v17, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v8, v8, v16, v9
+; GFX9-NEXT: v_and_b32_e32 v9, 0xff, v10
+; GFX9-NEXT: v_and_b32_e32 v10, 0xff, v11
+; GFX9-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX9-NEXT: v_lshlrev_b32_e32 v10, 24, v10
+; GFX9-NEXT: v_or3_b32 v8, v8, v9, v10
+; GFX9-NEXT: v_lshlrev_b32_sdwa v10, v17, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v10, v12, v16, v10
+; GFX9-NEXT: v_and_b32_e32 v11, 0xff, v14
+; GFX9-NEXT: v_and_b32_e32 v12, 0xff, v15
+; GFX9-NEXT: v_lshlrev_b32_e32 v11, 16, v11
+; GFX9-NEXT: v_lshlrev_b32_e32 v12, 24, v12
+; GFX9-NEXT: v_or3_b32 v10, v10, v11, v12
+; GFX9-NEXT: v_lshrrev_b32_e32 v11, 8, v10
+; GFX9-NEXT: v_add_u16_e32 v5, v5, v11
+; GFX9-NEXT: v_add_u16_e32 v4, v4, v10
+; GFX9-NEXT: v_add_u16_sdwa v6, v6, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_add_u16_sdwa v7, v7, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_lshlrev_b32_sdwa v5, v17, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v4, v4, v16, v5
+; GFX9-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX9-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX9-NEXT: v_lshrrev_b32_e32 v9, 8, v8
+; GFX9-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX9-NEXT: v_add_u16_e32 v1, v1, v9
+; GFX9-NEXT: v_add_u16_sdwa v2, v2, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_add_u16_sdwa v3, v3, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX9-NEXT: v_add_u16_e32 v0, v0, v8
+; GFX9-NEXT: v_add_u16_e32 v1, v1, v5
+; GFX9-NEXT: v_add_u16_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_add_u16_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_add_u16_e32 v0, v0, v4
+; GFX9-NEXT: v_lshlrev_b32_sdwa v4, v17, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX9-NEXT: v_and_or_b32 v4, v0, v16, v4
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX9-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX9-NEXT: v_add_u16_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_add_u16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v17, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v1, v0, v16, v1
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX9-NEXT: v_add_u16_e32 v0, v0, v1
+; GFX9-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_add_v16i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v16, 8
+; GFX10-NEXT: v_and_b32_e32 v14, 0xff, v14
+; GFX10-NEXT: v_and_b32_e32 v15, 0xff, v15
+; GFX10-NEXT: v_and_b32_e32 v11, 0xff, v11
+; GFX10-NEXT: v_lshlrev_b32_sdwa v13, v16, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_lshlrev_b32_sdwa v9, v16, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_or_b32 v12, 0xff, v12, v13
+; GFX10-NEXT: v_lshlrev_b32_e32 v13, 16, v14
+; GFX10-NEXT: v_lshlrev_b32_e32 v14, 24, v15
+; GFX10-NEXT: v_and_or_b32 v8, 0xff, v8, v9
+; GFX10-NEXT: v_and_b32_e32 v9, 0xff, v10
+; GFX10-NEXT: v_or3_b32 v12, v12, v13, v14
+; GFX10-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX10-NEXT: v_lshrrev_b32_e32 v10, 8, v12
+; GFX10-NEXT: v_lshrrev_b32_e32 v13, 16, v12
+; GFX10-NEXT: v_lshrrev_b32_e32 v14, 24, v12
+; GFX10-NEXT: v_add_nc_u16 v4, v4, v12
+; GFX10-NEXT: v_add_nc_u16 v5, v5, v10
+; GFX10-NEXT: v_add_nc_u16 v6, v6, v13
+; GFX10-NEXT: v_add_nc_u16 v7, v7, v14
+; GFX10-NEXT: v_lshlrev_b32_e32 v10, 24, v11
+; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v16, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX10-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX10-NEXT: v_or3_b32 v8, v8, v9, v10
+; GFX10-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v7
+; GFX10-NEXT: v_lshrrev_b32_e32 v7, 8, v8
+; GFX10-NEXT: v_lshrrev_b32_e32 v9, 16, v8
+; GFX10-NEXT: v_lshrrev_b32_e32 v10, 24, v8
+; GFX10-NEXT: v_add_nc_u16 v0, v0, v8
+; GFX10-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX10-NEXT: v_add_nc_u16 v1, v1, v7
+; GFX10-NEXT: v_add_nc_u16 v2, v2, v9
+; GFX10-NEXT: v_add_nc_u16 v3, v3, v10
+; GFX10-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX10-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX10-NEXT: v_lshrrev_b32_e32 v7, 24, v4
+; GFX10-NEXT: v_add_nc_u16 v0, v0, v4
+; GFX10-NEXT: v_add_nc_u16 v1, v1, v5
+; GFX10-NEXT: v_add_nc_u16 v2, v2, v6
+; GFX10-NEXT: v_add_nc_u16 v3, v3, v7
+; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v16, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX10-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX10-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX10-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX10-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX10-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX10-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX10-NEXT: v_add_nc_u16 v1, v1, v3
+; GFX10-NEXT: v_add_nc_u16 v0, v0, v2
+; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v16, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX10-NEXT: v_add_nc_u16 v0, v0, v1
+; GFX10-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_add_v16i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v13, 0xff, v13
+; GFX11-NEXT: v_and_b32_e32 v14, 0xff, v14
+; GFX11-NEXT: v_and_b32_e32 v15, 0xff, v15
+; GFX11-NEXT: v_and_b32_e32 v9, 0xff, v9
+; GFX11-NEXT: v_and_b32_e32 v11, 0xff, v11
+; GFX11-NEXT: v_lshlrev_b32_e32 v13, 8, v13
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX11-NEXT: v_and_or_b32 v12, 0xff, v12, v13
+; GFX11-NEXT: v_lshlrev_b32_e32 v13, 16, v14
+; GFX11-NEXT: v_lshlrev_b32_e32 v14, 24, v15
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_or_b32 v8, 0xff, v8, v9
+; GFX11-NEXT: v_and_b32_e32 v9, 0xff, v10
+; GFX11-NEXT: v_or3_b32 v12, v12, v13, v14
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX11-NEXT: v_lshrrev_b32_e32 v13, 8, v12
+; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v12
+; GFX11-NEXT: v_lshrrev_b32_e32 v14, 24, v12
+; GFX11-NEXT: v_add_nc_u16 v4, v4, v12
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_add_nc_u16 v5, v5, v13
+; GFX11-NEXT: v_add_nc_u16 v6, v6, v10
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_add_nc_u16 v7, v7, v14
+; GFX11-NEXT: v_lshlrev_b32_e32 v10, 24, v11
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_or3_b32 v8, v8, v9, v10
+; GFX11-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX11-NEXT: v_lshlrev_b32_e32 v7, 24, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_add_nc_u16 v0, v0, v8
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX11-NEXT: v_lshrrev_b32_e32 v5, 8, v8
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_or3_b32 v4, v4, v6, v7
+; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v8
+; GFX11-NEXT: v_lshrrev_b32_e32 v7, 24, v8
+; GFX11-NEXT: v_add_nc_u16 v1, v1, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX11-NEXT: v_add_nc_u16 v2, v2, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_add_nc_u16 v3, v3, v7
+; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX11-NEXT: v_add_nc_u16 v0, v0, v4
+; GFX11-NEXT: v_add_nc_u16 v1, v1, v5
+; GFX11-NEXT: v_lshrrev_b32_e32 v5, 24, v4
+; GFX11-NEXT: v_add_nc_u16 v2, v2, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v1
+; GFX11-NEXT: v_add_nc_u16 v3, v3, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 8, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX11-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_add_nc_u16 v1, v1, v3
+; GFX11-NEXT: v_add_nc_u16 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_add_nc_u16 v0, v0, v1
+; GFX11-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_add_v16i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v13, 0xff, v13
+; GFX12-NEXT: v_and_b32_e32 v14, 0xff, v14
+; GFX12-NEXT: v_and_b32_e32 v15, 0xff, v15
+; GFX12-NEXT: v_and_b32_e32 v9, 0xff, v9
+; GFX12-NEXT: v_and_b32_e32 v11, 0xff, v11
+; GFX12-NEXT: v_lshlrev_b32_e32 v13, 8, v13
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX12-NEXT: v_and_or_b32 v12, 0xff, v12, v13
+; GFX12-NEXT: v_lshlrev_b32_e32 v13, 16, v14
+; GFX12-NEXT: v_lshlrev_b32_e32 v14, 24, v15
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_or_b32 v8, 0xff, v8, v9
+; GFX12-NEXT: v_and_b32_e32 v9, 0xff, v10
+; GFX12-NEXT: v_or3_b32 v12, v12, v13, v14
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX12-NEXT: v_lshrrev_b32_e32 v13, 8, v12
+; GFX12-NEXT: v_lshrrev_b32_e32 v10, 16, v12
+; GFX12-NEXT: v_lshrrev_b32_e32 v14, 24, v12
+; GFX12-NEXT: v_add_nc_u16 v4, v4, v12
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_add_nc_u16 v5, v5, v13
+; GFX12-NEXT: v_add_nc_u16 v6, v6, v10
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_add_nc_u16 v7, v7, v14
+; GFX12-NEXT: v_lshlrev_b32_e32 v10, 24, v11
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX12-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_or3_b32 v8, v8, v9, v10
+; GFX12-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX12-NEXT: v_lshlrev_b32_e32 v7, 24, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_add_nc_u16 v0, v0, v8
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX12-NEXT: v_lshrrev_b32_e32 v5, 8, v8
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_or3_b32 v4, v4, v6, v7
+; GFX12-NEXT: v_lshrrev_b32_e32 v6, 16, v8
+; GFX12-NEXT: v_lshrrev_b32_e32 v7, 24, v8
+; GFX12-NEXT: v_add_nc_u16 v1, v1, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX12-NEXT: v_add_nc_u16 v2, v2, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_add_nc_u16 v3, v3, v7
+; GFX12-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX12-NEXT: v_add_nc_u16 v0, v0, v4
+; GFX12-NEXT: v_add_nc_u16 v1, v1, v5
+; GFX12-NEXT: v_lshrrev_b32_e32 v5, 24, v4
+; GFX12-NEXT: v_add_nc_u16 v2, v2, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v1
+; GFX12-NEXT: v_add_nc_u16 v3, v3, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-NEXT: v_lshlrev_b32_e32 v4, 8, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX12-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX12-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_add_nc_u16 v1, v1, v3
+; GFX12-NEXT: v_add_nc_u16 v0, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX12-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_add_nc_u16 v0, v0, v1
+; GFX12-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %v)
+ ret i8 %res
+}
+
+define i16 @test_vector_reduce_add_v2i16(<2 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_add_v2i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v0
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT: v_bfe_u32 v0, v0, 0, 16
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_add_v2i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_and_b32 s4, 0xffff, s4
+; GFX8-NEXT: v_add_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: s_lshl_b32 s4, s4, 16
+; GFX8-NEXT: v_or_b32_e32 v0, s4, v0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_add_v2i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX9-NEXT: v_pk_add_u16 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_add_v2i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX10-NEXT: v_pk_add_u16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_add_v2i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_pk_add_u16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_add_v2i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_pk_add_u16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.add.v2i16(<2 x i16> %v)
+ ret i16 %res
+}
+
+define i16 @test_vector_reduce_add_v3i16(<3 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_add_v3i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v2
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_add_v3i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_u16_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_add_v3i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_add_u16_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_add_v3i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX10-NEXT: v_add_nc_u16 v0, v0, v2
+; GFX10-NEXT: v_add_nc_u16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_add_v3i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_add_nc_u16 v0, v0, v2
+; GFX11-NEXT: v_add_nc_u16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_add_v3i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_add_nc_u16 v0, v0, v2
+; GFX12-NEXT: v_add_nc_u16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.add.v3i16(<3 x i16> %v)
+ ret i16 %res
+}
+
+define i16 @test_vector_reduce_add_v4i16(<4 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_add_v4i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-NEXT: v_or_b32_e32 v2, v3, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX7-NEXT: v_add_i32_e32 v1, vcc, v1, v3
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v2
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT: v_bfe_u32 v0, v0, 0, 16
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_add_v4i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_u16_e32 v2, v0, v1
+; GFX8-NEXT: v_add_u16_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v0, v2, v0
+; GFX8-NEXT: s_and_b32 s4, 0xffff, s4
+; GFX8-NEXT: v_add_u16_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: s_lshl_b32 s4, s4, 16
+; GFX8-NEXT: v_or_b32_e32 v0, s4, v0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_add_v4i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_add_u16 v0, v0, v1
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX9-NEXT: v_pk_add_u16 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_add_v4i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_pk_add_u16 v0, v0, v1
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX10-NEXT: v_pk_add_u16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_add_v4i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_pk_add_u16 v0, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX11-NEXT: v_pk_add_u16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_add_v4i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_add_u16 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX12-NEXT: v_pk_add_u16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %v)
+ ret i16 %res
+}
+
+define i16 @test_vector_reduce_add_v8i16(<8 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_add_v8i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v7
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX7-NEXT: v_or_b32_e32 v5, v5, v6
+; GFX7-NEXT: v_lshrrev_b32_e32 v7, 16, v5
+; GFX7-NEXT: v_add_i32_e32 v3, vcc, v3, v7
+; GFX7-NEXT: v_add_i32_e32 v2, vcc, v2, v5
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_add_i32_e32 v1, vcc, v1, v6
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v4
+; GFX7-NEXT: v_add_i32_e32 v1, vcc, v1, v3
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v2
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT: v_bfe_u32 v0, v0, 0, 16
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_add_v8i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_u16_e32 v4, v0, v2
+; GFX8-NEXT: v_add_u16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_add_u16_e32 v2, v1, v3
+; GFX8-NEXT: v_add_u16_sdwa v1, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX8-NEXT: v_add_u16_e32 v2, v4, v1
+; GFX8-NEXT: v_add_u16_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v0, v2, v0
+; GFX8-NEXT: s_and_b32 s4, 0xffff, s4
+; GFX8-NEXT: v_add_u16_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: s_lshl_b32 s4, s4, 16
+; GFX8-NEXT: v_or_b32_e32 v0, s4, v0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_add_v8i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_add_u16 v0, v0, v2
+; GFX9-NEXT: v_pk_add_u16 v1, v1, v3
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_pk_add_u16 v0, v0, v1
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX9-NEXT: v_pk_add_u16 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_add_v8i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_pk_add_u16 v0, v0, v2
+; GFX10-NEXT: v_pk_add_u16 v1, v1, v3
+; GFX10-NEXT: v_pk_add_u16 v0, v0, v1
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX10-NEXT: v_pk_add_u16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_add_v8i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_pk_add_u16 v0, v0, v2
+; GFX11-NEXT: v_pk_add_u16 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_pk_add_u16 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_pk_add_u16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_add_v8i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_add_u16 v0, v0, v2
+; GFX12-NEXT: v_pk_add_u16 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_pk_add_u16 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_pk_add_u16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %v)
+ ret i16 %res
+}
+
+define i16 @test_vector_reduce_add_v16i16(<16 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_add_v16i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX7-NEXT: v_and_b32_e32 v8, 0xffff, v8
+; GFX7-NEXT: v_or_b32_e32 v8, v9, v8
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v11
+; GFX7-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX7-NEXT: v_or_b32_e32 v9, v9, v10
+; GFX7-NEXT: v_lshlrev_b32_e32 v10, 16, v13
+; GFX7-NEXT: v_and_b32_e32 v11, 0xffff, v12
+; GFX7-NEXT: v_or_b32_e32 v10, v10, v11
+; GFX7-NEXT: v_lshlrev_b32_e32 v11, 16, v15
+; GFX7-NEXT: v_and_b32_e32 v12, 0xffff, v14
+; GFX7-NEXT: v_lshrrev_b32_e32 v14, 16, v10
+; GFX7-NEXT: v_or_b32_e32 v11, v11, v12
+; GFX7-NEXT: v_add_i32_e32 v5, vcc, v5, v14
+; GFX7-NEXT: v_lshrrev_b32_e32 v15, 16, v11
+; GFX7-NEXT: v_add_i32_e32 v4, vcc, v4, v10
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX7-NEXT: v_add_i32_e32 v6, vcc, v6, v11
+; GFX7-NEXT: v_add_i32_e32 v7, vcc, v7, v15
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff, v6
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff, v7
+; GFX7-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX7-NEXT: v_lshrrev_b32_e32 v13, 16, v9
+; GFX7-NEXT: v_or_b32_e32 v5, v5, v6
+; GFX7-NEXT: v_add_i32_e32 v3, vcc, v3, v13
+; GFX7-NEXT: v_lshrrev_b32_e32 v7, 16, v5
+; GFX7-NEXT: v_add_i32_e32 v2, vcc, v2, v9
+; GFX7-NEXT: v_add_i32_e32 v3, vcc, v3, v7
+; GFX7-NEXT: v_add_i32_e32 v2, vcc, v2, v5
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v12, 16, v8
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_add_i32_e32 v1, vcc, v1, v12
+; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v8
+; GFX7-NEXT: v_add_i32_e32 v1, vcc, v1, v6
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v4
+; GFX7-NEXT: v_add_i32_e32 v1, vcc, v1, v3
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v2
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT: v_bfe_u32 v0, v0, 0, 16
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_add_v16i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_u16_e32 v8, v0, v4
+; GFX8-NEXT: v_add_u16_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_add_u16_e32 v4, v1, v5
+; GFX8-NEXT: v_add_u16_sdwa v1, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_add_u16_e32 v5, v2, v6
+; GFX8-NEXT: v_add_u16_sdwa v2, v2, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v2, v5, v2
+; GFX8-NEXT: v_add_u16_e32 v5, v3, v7
+; GFX8-NEXT: v_add_u16_sdwa v3, v3, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: v_add_u16_e32 v5, v8, v2
+; GFX8-NEXT: v_add_u16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_u16_e32 v2, v4, v3
+; GFX8-NEXT: v_add_u16_sdwa v1, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX8-NEXT: v_add_u16_e32 v2, v5, v1
+; GFX8-NEXT: v_add_u16_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v0, v2, v0
+; GFX8-NEXT: s_and_b32 s4, 0xffff, s4
+; GFX8-NEXT: v_add_u16_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: s_lshl_b32 s4, s4, 16
+; GFX8-NEXT: v_or_b32_e32 v0, s4, v0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_add_v16i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_add_u16 v0, v0, v4
+; GFX9-NEXT: v_pk_add_u16 v1, v1, v5
+; GFX9-NEXT: v_pk_add_u16 v2, v2, v6
+; GFX9-NEXT: v_pk_add_u16 v3, v3, v7
+; GFX9-NEXT: v_pk_add_u16 v0, v0, v2
+; GFX9-NEXT: v_pk_add_u16 v1, v1, v3
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_pk_add_u16 v0, v0, v1
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX9-NEXT: v_pk_add_u16 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_add_v16i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_pk_add_u16 v0, v0, v4
+; GFX10-NEXT: v_pk_add_u16 v1, v1, v5
+; GFX10-NEXT: v_pk_add_u16 v2, v2, v6
+; GFX10-NEXT: v_pk_add_u16 v3, v3, v7
+; GFX10-NEXT: v_pk_add_u16 v0, v0, v2
+; GFX10-NEXT: v_pk_add_u16 v1, v1, v3
+; GFX10-NEXT: v_pk_add_u16 v0, v0, v1
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX10-NEXT: v_pk_add_u16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_add_v16i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_pk_add_u16 v0, v0, v4
+; GFX11-NEXT: v_pk_add_u16 v1, v1, v5
+; GFX11-NEXT: v_pk_add_u16 v2, v2, v6
+; GFX11-NEXT: v_pk_add_u16 v3, v3, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_pk_add_u16 v0, v0, v2
+; GFX11-NEXT: v_pk_add_u16 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_pk_add_u16 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_pk_add_u16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_add_v16i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_add_u16 v0, v0, v4
+; GFX12-NEXT: v_pk_add_u16 v1, v1, v5
+; GFX12-NEXT: v_pk_add_u16 v2, v2, v6
+; GFX12-NEXT: v_pk_add_u16 v3, v3, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_pk_add_u16 v0, v0, v2
+; GFX12-NEXT: v_pk_add_u16 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_pk_add_u16 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_pk_add_u16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %v)
+ ret i16 %res
+}
+
+
+define i32 @test_vector_reduce_add_v2i32(<2 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_add_v2i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_add_v2i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_add_v2i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_add_v2i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_add_nc_u32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_add_v2i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_add_nc_u32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_add_v2i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_add_nc_u32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> %v)
+ ret i32 %res
+}
+
+define i32 @test_vector_reduce_add_v3i32(<3 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_add_v3i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v2
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_add_v3i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v1
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_add_v3i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add3_u32 v0, v0, v1, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_add_v3i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_add3_u32 v0, v0, v1, v2
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_add_v3i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_add3_u32 v0, v0, v1, v2
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_add_v3i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_add3_u32 v0, v0, v1, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.add.v3i32(<3 x i32> %v)
+ ret i32 %res
+}
+
+define i32 @test_vector_reduce_add_v4i32(<4 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_add_v4i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v2
+; GFX7-NEXT: v_add_i32_e32 v1, vcc, v1, v3
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_add_v4i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2
+; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v3
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_add_v4i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v1, v1, v3
+; GFX9-NEXT: v_add3_u32 v0, v0, v2, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_add_v4i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_add_nc_u32_e32 v1, v1, v3
+; GFX10-NEXT: v_add3_u32 v0, v0, v2, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_add_v4i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_add_nc_u32_e32 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add3_u32 v0, v0, v2, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_add_v4i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_add_nc_u32_e32 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_add3_u32 v0, v0, v2, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %v)
+ ret i32 %res
+}
+
+define i32 @test_vector_reduce_add_v8i32(<8 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_add_v8i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v4
+; GFX7-NEXT: v_add_i32_e32 v1, vcc, v1, v5
+; GFX7-NEXT: v_add_i32_e32 v2, vcc, v2, v6
+; GFX7-NEXT: v_add_i32_e32 v3, vcc, v3, v7
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v2
+; GFX7-NEXT: v_add_i32_e32 v1, vcc, v1, v3
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_add_v8i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v4
+; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v5
+; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v6
+; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v7
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2
+; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v3
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_add_v8i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v3, v3, v7
+; GFX9-NEXT: v_add_u32_e32 v0, v0, v4
+; GFX9-NEXT: v_add_u32_e32 v2, v2, v6
+; GFX9-NEXT: v_add3_u32 v1, v1, v5, v3
+; GFX9-NEXT: v_add3_u32 v0, v0, v2, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_add_v8i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_add_nc_u32_e32 v3, v3, v7
+; GFX10-NEXT: v_add_nc_u32_e32 v0, v0, v4
+; GFX10-NEXT: v_add_nc_u32_e32 v2, v2, v6
+; GFX10-NEXT: v_add3_u32 v1, v1, v5, v3
+; GFX10-NEXT: v_add3_u32 v0, v0, v2, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_add_v8i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v7
+; GFX11-NEXT: v_add_nc_u32_e32 v0, v0, v4
+; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_add3_u32 v1, v1, v5, v3
+; GFX11-NEXT: v_add3_u32 v0, v0, v2, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_add_v8i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_add_nc_u32_e32 v3, v3, v7
+; GFX12-NEXT: v_add_nc_u32_e32 v0, v0, v4
+; GFX12-NEXT: v_add_nc_u32_e32 v2, v2, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_add3_u32 v1, v1, v5, v3
+; GFX12-NEXT: v_add3_u32 v0, v0, v2, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %v)
+ ret i32 %res
+}
+
+define i32 @test_vector_reduce_add_v16i32(<16 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_add_v16i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v8
+; GFX7-NEXT: v_add_i32_e32 v1, vcc, v1, v9
+; GFX7-NEXT: v_add_i32_e32 v2, vcc, v2, v10
+; GFX7-NEXT: v_add_i32_e32 v3, vcc, v3, v11
+; GFX7-NEXT: v_add_i32_e32 v4, vcc, v4, v12
+; GFX7-NEXT: v_add_i32_e32 v5, vcc, v5, v13
+; GFX7-NEXT: v_add_i32_e32 v6, vcc, v6, v14
+; GFX7-NEXT: v_add_i32_e32 v7, vcc, v7, v15
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v4
+; GFX7-NEXT: v_add_i32_e32 v1, vcc, v1, v5
+; GFX7-NEXT: v_add_i32_e32 v2, vcc, v2, v6
+; GFX7-NEXT: v_add_i32_e32 v3, vcc, v3, v7
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v2
+; GFX7-NEXT: v_add_i32_e32 v1, vcc, v1, v3
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_add_v16i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v8
+; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v9
+; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v10
+; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v11
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v12
+; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v13
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v14
+; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v15
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v4
+; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v5
+; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v6
+; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v7
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2
+; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v3
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_add_v16i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v7, v7, v15
+; GFX9-NEXT: v_add_u32_e32 v1, v1, v9
+; GFX9-NEXT: v_add_u32_e32 v4, v4, v12
+; GFX9-NEXT: v_add_u32_e32 v5, v5, v13
+; GFX9-NEXT: v_add_u32_e32 v6, v6, v14
+; GFX9-NEXT: v_add3_u32 v3, v3, v11, v7
+; GFX9-NEXT: v_add3_u32 v0, v0, v8, v4
+; GFX9-NEXT: v_add3_u32 v2, v2, v10, v6
+; GFX9-NEXT: v_add3_u32 v1, v1, v5, v3
+; GFX9-NEXT: v_add3_u32 v0, v0, v2, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_add_v16i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_add_nc_u32_e32 v7, v7, v15
+; GFX10-NEXT: v_add_nc_u32_e32 v1, v1, v9
+; GFX10-NEXT: v_add_nc_u32_e32 v4, v4, v12
+; GFX10-NEXT: v_add_nc_u32_e32 v5, v5, v13
+; GFX10-NEXT: v_add_nc_u32_e32 v6, v6, v14
+; GFX10-NEXT: v_add3_u32 v3, v3, v11, v7
+; GFX10-NEXT: v_add3_u32 v0, v0, v8, v4
+; GFX10-NEXT: v_add3_u32 v2, v2, v10, v6
+; GFX10-NEXT: v_add3_u32 v1, v1, v5, v3
+; GFX10-NEXT: v_add3_u32 v0, v0, v2, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_add_v16i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_add_nc_u32_e32 v7, v7, v15
+; GFX11-NEXT: v_add_nc_u32_e32 v1, v1, v9
+; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v12
+; GFX11-NEXT: v_add_nc_u32_e32 v5, v5, v13
+; GFX11-NEXT: v_add_nc_u32_e32 v6, v6, v14
+; GFX11-NEXT: v_add3_u32 v3, v3, v11, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_add3_u32 v0, v0, v8, v4
+; GFX11-NEXT: v_add3_u32 v2, v2, v10, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_add3_u32 v1, v1, v5, v3
+; GFX11-NEXT: v_add3_u32 v0, v0, v2, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_add_v16i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_add_nc_u32_e32 v7, v7, v15
+; GFX12-NEXT: v_add_nc_u32_e32 v1, v1, v9
+; GFX12-NEXT: v_add_nc_u32_e32 v4, v4, v12
+; GFX12-NEXT: v_add_nc_u32_e32 v5, v5, v13
+; GFX12-NEXT: v_add_nc_u32_e32 v6, v6, v14
+; GFX12-NEXT: v_add3_u32 v3, v3, v11, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_add3_u32 v0, v0, v8, v4
+; GFX12-NEXT: v_add3_u32 v2, v2, v10, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_add3_u32 v1, v1, v5, v3
+; GFX12-NEXT: v_add3_u32 v0, v0, v2, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %v)
+ ret i32 %res
+}
+
+define i64 @test_vector_reduce_add_v2i64(<2 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_add_v2i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v2
+; GFX7-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_add_v2i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2
+; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_add_v2i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_add_v2i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_add_v2i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_add_v2i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %v)
+ ret i64 %res
+}
+
+define i64 @test_vector_reduce_add_v3i64(<3 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_add_v3i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v2
+; GFX7-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v4
+; GFX7-NEXT: v_addc_u32_e32 v1, vcc, v1, v5, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_add_v3i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2
+; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v4
+; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v1, v5, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_add_v3i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v4
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v5, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_add_v3i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v0, v4
+; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v5, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_add_v3i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, v4
+; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v5, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_add_v3i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX12-NEXT: v_add_co_u32 v0, vcc_lo, v0, v4
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v5, vcc_lo
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.add.v3i64(<3 x i64> %v)
+ ret i64 %res
+}
+
+define i64 @test_vector_reduce_add_v4i64(<4 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_add_v4i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v4
+; GFX7-NEXT: v_addc_u32_e32 v1, vcc, v1, v5, vcc
+; GFX7-NEXT: v_add_i32_e32 v2, vcc, v2, v6
+; GFX7-NEXT: v_addc_u32_e32 v3, vcc, v3, v7, vcc
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v2
+; GFX7-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_add_v4i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v4
+; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v1, v5, vcc
+; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v6
+; GFX8-NEXT: v_addc_u32_e32 v3, vcc, v3, v7, vcc
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2
+; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_add_v4i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v4
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v5, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v2, v6
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v7, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_add_v4i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v0, v4
+; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v5, vcc_lo
+; GFX10-NEXT: v_add_co_u32 v2, vcc_lo, v2, v6
+; GFX10-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v3, v7, vcc_lo
+; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_add_v4i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, v4
+; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v5, vcc_lo
+; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, v2, v6
+; GFX11-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v3, v7, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_add_v4i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_add_co_u32 v0, vcc_lo, v0, v4
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v5, vcc_lo
+; GFX12-NEXT: v_add_co_u32 v2, vcc_lo, v2, v6
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v3, v7, vcc_lo
+; GFX12-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %v)
+ ret i64 %res
+}
+
+define i64 @test_vector_reduce_add_v8i64(<8 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_add_v8i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v8
+; GFX7-NEXT: v_addc_u32_e32 v1, vcc, v1, v9, vcc
+; GFX7-NEXT: v_add_i32_e32 v2, vcc, v2, v10
+; GFX7-NEXT: v_addc_u32_e32 v3, vcc, v3, v11, vcc
+; GFX7-NEXT: v_add_i32_e32 v4, vcc, v4, v12
+; GFX7-NEXT: v_addc_u32_e32 v5, vcc, v5, v13, vcc
+; GFX7-NEXT: v_add_i32_e32 v6, vcc, v6, v14
+; GFX7-NEXT: v_addc_u32_e32 v7, vcc, v7, v15, vcc
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v4
+; GFX7-NEXT: v_addc_u32_e32 v1, vcc, v1, v5, vcc
+; GFX7-NEXT: v_add_i32_e32 v2, vcc, v2, v6
+; GFX7-NEXT: v_addc_u32_e32 v3, vcc, v3, v7, vcc
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v2
+; GFX7-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_add_v8i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v8
+; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v1, v9, vcc
+; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v10
+; GFX8-NEXT: v_addc_u32_e32 v3, vcc, v3, v11, vcc
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v12
+; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v5, v13, vcc
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v14
+; GFX8-NEXT: v_addc_u32_e32 v7, vcc, v7, v15, vcc
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v4
+; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v1, v5, vcc
+; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v6
+; GFX8-NEXT: v_addc_u32_e32 v3, vcc, v3, v7, vcc
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2
+; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_add_v8i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v8
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v9, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v2, v10
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v11, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v12
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, v5, v13, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v6, vcc, v6, v14
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_addc_co_u32_e32 v7, vcc, v7, v15, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v4
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v5, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v2, v6
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v7, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_add_v8i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v0, v8
+; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v9, vcc_lo
+; GFX10-NEXT: v_add_co_u32 v2, vcc_lo, v2, v10
+; GFX10-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v3, v11, vcc_lo
+; GFX10-NEXT: v_add_co_u32 v4, vcc_lo, v4, v12
+; GFX10-NEXT: v_add_co_ci_u32_e32 v5, vcc_lo, v5, v13, vcc_lo
+; GFX10-NEXT: v_add_co_u32 v6, vcc_lo, v6, v14
+; GFX10-NEXT: v_add_co_ci_u32_e32 v7, vcc_lo, v7, v15, vcc_lo
+; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v0, v4
+; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v5, vcc_lo
+; GFX10-NEXT: v_add_co_u32 v2, vcc_lo, v2, v6
+; GFX10-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v3, v7, vcc_lo
+; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_add_v8i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, v8
+; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v9, vcc_lo
+; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, v2, v10
+; GFX11-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v3, v11, vcc_lo
+; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, v4, v12
+; GFX11-NEXT: v_add_co_ci_u32_e32 v5, vcc_lo, v5, v13, vcc_lo
+; GFX11-NEXT: v_add_co_u32 v6, vcc_lo, v6, v14
+; GFX11-NEXT: v_add_co_ci_u32_e32 v7, vcc_lo, v7, v15, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, v4
+; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v5, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, v2, v6
+; GFX11-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v3, v7, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_add_v8i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_add_co_u32 v0, vcc_lo, v0, v8
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v9, vcc_lo
+; GFX12-NEXT: v_add_co_u32 v2, vcc_lo, v2, v10
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v3, v11, vcc_lo
+; GFX12-NEXT: v_add_co_u32 v4, vcc_lo, v4, v12
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_add_co_ci_u32_e32 v5, vcc_lo, v5, v13, vcc_lo
+; GFX12-NEXT: v_add_co_u32 v6, vcc_lo, v6, v14
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_add_co_ci_u32_e32 v7, vcc_lo, v7, v15, vcc_lo
+; GFX12-NEXT: v_add_co_u32 v0, vcc_lo, v0, v4
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v5, vcc_lo
+; GFX12-NEXT: v_add_co_u32 v2, vcc_lo, v2, v6
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v3, v7, vcc_lo
+; GFX12-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %v)
+ ret i64 %res
+}
+
+define i64 @test_vector_reduce_add_v16i64(<16 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_add_v16i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v16
+; GFX7-NEXT: buffer_load_dword v16, off, s[0:3], s32
+; GFX7-NEXT: v_addc_u32_e32 v1, vcc, v1, v17, vcc
+; GFX7-NEXT: v_add_i32_e32 v2, vcc, v2, v18
+; GFX7-NEXT: v_addc_u32_e32 v3, vcc, v3, v19, vcc
+; GFX7-NEXT: v_add_i32_e32 v4, vcc, v4, v20
+; GFX7-NEXT: v_addc_u32_e32 v5, vcc, v5, v21, vcc
+; GFX7-NEXT: v_add_i32_e32 v6, vcc, v6, v22
+; GFX7-NEXT: v_addc_u32_e32 v7, vcc, v7, v23, vcc
+; GFX7-NEXT: v_add_i32_e32 v8, vcc, v8, v24
+; GFX7-NEXT: v_addc_u32_e32 v9, vcc, v9, v25, vcc
+; GFX7-NEXT: v_add_i32_e32 v10, vcc, v10, v26
+; GFX7-NEXT: v_addc_u32_e32 v11, vcc, v11, v27, vcc
+; GFX7-NEXT: v_add_i32_e32 v12, vcc, v12, v28
+; GFX7-NEXT: v_addc_u32_e32 v13, vcc, v13, v29, vcc
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v8
+; GFX7-NEXT: v_addc_u32_e32 v1, vcc, v1, v9, vcc
+; GFX7-NEXT: v_add_i32_e32 v2, vcc, v2, v10
+; GFX7-NEXT: v_addc_u32_e32 v3, vcc, v3, v11, vcc
+; GFX7-NEXT: v_add_i32_e32 v4, vcc, v4, v12
+; GFX7-NEXT: v_addc_u32_e32 v5, vcc, v5, v13, vcc
+; GFX7-NEXT: v_add_i32_e32 v8, vcc, v14, v30
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_addc_u32_e32 v9, vcc, v15, v16, vcc
+; GFX7-NEXT: v_add_i32_e32 v6, vcc, v6, v8
+; GFX7-NEXT: v_addc_u32_e32 v7, vcc, v7, v9, vcc
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v4
+; GFX7-NEXT: v_addc_u32_e32 v1, vcc, v1, v5, vcc
+; GFX7-NEXT: v_add_i32_e32 v2, vcc, v2, v6
+; GFX7-NEXT: v_addc_u32_e32 v3, vcc, v3, v7, vcc
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v2
+; GFX7-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_add_v16i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v16
+; GFX8-NEXT: buffer_load_dword v16, off, s[0:3], s32
+; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v1, v17, vcc
+; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v18
+; GFX8-NEXT: v_addc_u32_e32 v3, vcc, v3, v19, vcc
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v20
+; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v5, v21, vcc
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v22
+; GFX8-NEXT: v_addc_u32_e32 v7, vcc, v7, v23, vcc
+; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v24
+; GFX8-NEXT: v_addc_u32_e32 v9, vcc, v9, v25, vcc
+; GFX8-NEXT: v_add_u32_e32 v10, vcc, v10, v26
+; GFX8-NEXT: v_addc_u32_e32 v11, vcc, v11, v27, vcc
+; GFX8-NEXT: v_add_u32_e32 v12, vcc, v12, v28
+; GFX8-NEXT: v_addc_u32_e32 v13, vcc, v13, v29, vcc
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v8
+; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v1, v9, vcc
+; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v10
+; GFX8-NEXT: v_addc_u32_e32 v3, vcc, v3, v11, vcc
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v12
+; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v5, v13, vcc
+; GFX8-NEXT: v_add_u32_e32 v8, vcc, v14, v30
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_addc_u32_e32 v9, vcc, v15, v16, vcc
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v8
+; GFX8-NEXT: v_addc_u32_e32 v7, vcc, v7, v9, vcc
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v4
+; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v1, v5, vcc
+; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v6
+; GFX8-NEXT: v_addc_u32_e32 v3, vcc, v3, v7, vcc
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2
+; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_add_v16i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: scratch_load_dword v31, off, s32
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v16
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v17, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v2, v18
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v19, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v20
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, v5, v21, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v6, vcc, v6, v22
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_addc_co_u32_e32 v7, vcc, v7, v23, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v8, vcc, v8, v24
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_addc_co_u32_e32 v9, vcc, v9, v25, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v10, vcc, v10, v26
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_addc_co_u32_e32 v11, vcc, v11, v27, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v12, vcc, v12, v28
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_addc_co_u32_e32 v13, vcc, v13, v29, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v8
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v9, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v2, v10
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v11, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v12
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, v5, v13, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v8, vcc, v14, v30
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_addc_co_u32_e32 v9, vcc, v15, v31, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v6, vcc, v6, v8
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_addc_co_u32_e32 v7, vcc, v7, v9, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v4
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v5, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v2, v6
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v7, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_add_v16i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v0, v16
+; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v17, vcc_lo
+; GFX10-NEXT: v_add_co_u32 v2, vcc_lo, v2, v18
+; GFX10-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v3, v19, vcc_lo
+; GFX10-NEXT: v_add_co_u32 v4, vcc_lo, v4, v20
+; GFX10-NEXT: v_add_co_ci_u32_e32 v5, vcc_lo, v5, v21, vcc_lo
+; GFX10-NEXT: v_add_co_u32 v6, vcc_lo, v6, v22
+; GFX10-NEXT: v_add_co_ci_u32_e32 v7, vcc_lo, v7, v23, vcc_lo
+; GFX10-NEXT: v_add_co_u32 v8, vcc_lo, v8, v24
+; GFX10-NEXT: v_add_co_ci_u32_e32 v9, vcc_lo, v9, v25, vcc_lo
+; GFX10-NEXT: v_add_co_u32 v10, vcc_lo, v10, v26
+; GFX10-NEXT: v_add_co_ci_u32_e32 v11, vcc_lo, v11, v27, vcc_lo
+; GFX10-NEXT: v_add_co_u32 v12, vcc_lo, v12, v28
+; GFX10-NEXT: v_add_co_ci_u32_e32 v13, vcc_lo, v13, v29, vcc_lo
+; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v0, v8
+; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v9, vcc_lo
+; GFX10-NEXT: v_add_co_u32 v2, vcc_lo, v2, v10
+; GFX10-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v3, v11, vcc_lo
+; GFX10-NEXT: v_add_co_u32 v8, vcc_lo, v14, v30
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_add_co_ci_u32_e32 v9, vcc_lo, v15, v31, vcc_lo
+; GFX10-NEXT: v_add_co_u32 v4, vcc_lo, v4, v12
+; GFX10-NEXT: v_add_co_ci_u32_e32 v5, vcc_lo, v5, v13, vcc_lo
+; GFX10-NEXT: v_add_co_u32 v6, vcc_lo, v6, v8
+; GFX10-NEXT: v_add_co_ci_u32_e32 v7, vcc_lo, v7, v9, vcc_lo
+; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v0, v4
+; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v5, vcc_lo
+; GFX10-NEXT: v_add_co_u32 v2, vcc_lo, v2, v6
+; GFX10-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v3, v7, vcc_lo
+; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_add_v16i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: scratch_load_b32 v31, off, s32
+; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, v16
+; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v17, vcc_lo
+; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, v2, v18
+; GFX11-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v3, v19, vcc_lo
+; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, v4, v20
+; GFX11-NEXT: v_add_co_ci_u32_e32 v5, vcc_lo, v5, v21, vcc_lo
+; GFX11-NEXT: v_add_co_u32 v6, vcc_lo, v6, v22
+; GFX11-NEXT: v_add_co_ci_u32_e32 v7, vcc_lo, v7, v23, vcc_lo
+; GFX11-NEXT: v_add_co_u32 v8, vcc_lo, v8, v24
+; GFX11-NEXT: v_add_co_ci_u32_e32 v9, vcc_lo, v9, v25, vcc_lo
+; GFX11-NEXT: v_add_co_u32 v10, vcc_lo, v10, v26
+; GFX11-NEXT: v_add_co_ci_u32_e32 v11, vcc_lo, v11, v27, vcc_lo
+; GFX11-NEXT: v_add_co_u32 v12, vcc_lo, v12, v28
+; GFX11-NEXT: v_add_co_ci_u32_e32 v13, vcc_lo, v13, v29, vcc_lo
+; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, v8
+; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v9, vcc_lo
+; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, v2, v10
+; GFX11-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v3, v11, vcc_lo
+; GFX11-NEXT: v_add_co_u32 v8, vcc_lo, v14, v30
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_add_co_ci_u32_e32 v9, vcc_lo, v15, v31, vcc_lo
+; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, v4, v12
+; GFX11-NEXT: v_add_co_ci_u32_e32 v5, vcc_lo, v5, v13, vcc_lo
+; GFX11-NEXT: v_add_co_u32 v6, vcc_lo, v6, v8
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_add_co_ci_u32_e32 v7, vcc_lo, v7, v9, vcc_lo
+; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v5, vcc_lo
+; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, v2, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v3, v7, vcc_lo
+; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_add_v16i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: scratch_load_b32 v31, off, s32
+; GFX12-NEXT: v_add_co_u32 v0, vcc_lo, v0, v16
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v17, vcc_lo
+; GFX12-NEXT: v_add_co_u32 v2, vcc_lo, v2, v18
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v3, v19, vcc_lo
+; GFX12-NEXT: v_add_co_u32 v4, vcc_lo, v4, v20
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_add_co_ci_u32_e32 v5, vcc_lo, v5, v21, vcc_lo
+; GFX12-NEXT: v_add_co_u32 v6, vcc_lo, v6, v22
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_add_co_ci_u32_e32 v7, vcc_lo, v7, v23, vcc_lo
+; GFX12-NEXT: v_add_co_u32 v8, vcc_lo, v8, v24
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_add_co_ci_u32_e32 v9, vcc_lo, v9, v25, vcc_lo
+; GFX12-NEXT: v_add_co_u32 v10, vcc_lo, v10, v26
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_add_co_ci_u32_e32 v11, vcc_lo, v11, v27, vcc_lo
+; GFX12-NEXT: v_add_co_u32 v12, vcc_lo, v12, v28
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_add_co_ci_u32_e32 v13, vcc_lo, v13, v29, vcc_lo
+; GFX12-NEXT: v_add_co_u32 v0, vcc_lo, v0, v8
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v9, vcc_lo
+; GFX12-NEXT: v_add_co_u32 v2, vcc_lo, v2, v10
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v3, v11, vcc_lo
+; GFX12-NEXT: v_add_co_u32 v8, vcc_lo, v14, v30
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_add_co_ci_u32_e32 v9, vcc_lo, v15, v31, vcc_lo
+; GFX12-NEXT: v_add_co_u32 v4, vcc_lo, v4, v12
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_add_co_ci_u32_e32 v5, vcc_lo, v5, v13, vcc_lo
+; GFX12-NEXT: v_add_co_u32 v6, vcc_lo, v6, v8
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_add_co_ci_u32_e32 v7, vcc_lo, v7, v9, vcc_lo
+; GFX12-NEXT: v_add_co_u32 v0, vcc_lo, v0, v4
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v5, vcc_lo
+; GFX12-NEXT: v_add_co_u32 v2, vcc_lo, v2, v6
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v3, v7, vcc_lo
+; GFX12-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> %v)
+ ret i64 %res
+}
+
+declare i8 @llvm.vector.reduce.add.v2i8(<2 x i8>)
+declare i8 @llvm.vector.reduce.add.v3i8(<3 x i8>)
+declare i8 @llvm.vector.reduce.add.v4i8(<4 x i8>)
+declare i8 @llvm.vector.reduce.add.v8i8(<8 x i8>)
+declare i8 @llvm.vector.reduce.add.v16i8(<16 x i8>)
+
+declare i16 @llvm.vector.reduce.add.v2i16(<2 x i16>)
+declare i16 @llvm.vector.reduce.add.v3i16(<3 x i16>)
+declare i16 @llvm.vector.reduce.add.v4i16(<4 x i16>)
+declare i16 @llvm.vector.reduce.add.v8i16(<8 x i16>)
+declare i16 @llvm.vector.reduce.add.v16i16(<16 x i16>)
+
+declare i32 @llvm.vector.reduce.add.v2i32(<2 x i32>)
+declare i32 @llvm.vector.reduce.add.v3i32(<3 x i32>)
+declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>)
+declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>)
+
+declare i64 @llvm.vector.reduce.add.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.add.v3i64(<3 x i64>)
+declare i64 @llvm.vector.reduce.add.v4i64(<4 x i64>)
+declare i64 @llvm.vector.reduce.add.v8i64(<8 x i64>)
+declare i64 @llvm.vector.reduce.add.v16i64(<16 x i64>)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/and.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/and.ll
new file mode 100644
index 0000000000000..955dad87f0294
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/and.ll
@@ -0,0 +1,2352 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx700 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX7 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx801 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX8 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx942 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX9 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX10 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX12 %s
+
+define i8 @test_vector_reduce_and_v2i8(<2 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_and_v2i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_and_v2i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_and_v2i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_and_v2i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_and_v2i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_and_v2i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.and.v2i8(<2 x i8> %v)
+ ret i8 %res
+}
+
+define i8 @test_vector_reduce_and_v3i8(<3 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_and_v3i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_and_v3i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_and_v3i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX9-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_and_v3i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX10-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_and_v3i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_and_v3i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.and.v3i8(<3 x i8> %v)
+ ret i8 %res
+}
+
+define i8 @test_vector_reduce_and_v4i8(<4 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_and_v4i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX7-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 24, v2
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v3
+; GFX7-NEXT: v_and_b32_e32 v1, v1, v2
+; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_and_v4i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v4, 8
+; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX8-NEXT: v_or_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX8-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX8-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX8-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX8-NEXT: v_and_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_and_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX8-NEXT: v_or_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX8-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_and_v4i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v4, 0xff
+; GFX9-NEXT: v_lshlrev_b32_sdwa v5, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX9-NEXT: v_and_or_b32 v5, v0, v4, v5
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX9-NEXT: v_or3_b32 v2, v5, v2, v3
+; GFX9-NEXT: v_and_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_and_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX9-NEXT: v_and_or_b32 v1, v0, v4, v1
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX9-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX9-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_and_v4i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v4, 8
+; GFX10-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX10-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX10-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX10-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX10-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX10-NEXT: v_and_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX10-NEXT: v_and_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX10-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX10-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX10-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_and_v4i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v1
+; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 8, v4
+; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_and_v4i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v4, 0xff, v1
+; GFX12-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v4, 8, v4
+; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX12-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.and.v4i8(<4 x i8> %v)
+ ret i8 %res
+}
+
+define i8 @test_vector_reduce_and_v8i8(<8 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_and_v8i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v7
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX7-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX7-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v7, 24, v4
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX7-NEXT: v_and_b32_e32 v3, v3, v7
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX7-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 24, v2
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v3
+; GFX7-NEXT: v_and_b32_e32 v1, v1, v2
+; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_and_v8i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v8, 8
+; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v8, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v4, v4, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v7
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX8-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX8-NEXT: v_and_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX8-NEXT: v_and_b32_sdwa v3, v3, v4 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX8-NEXT: v_or_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX8-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX8-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX8-NEXT: v_and_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_and_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX8-NEXT: v_or_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX8-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_and_v8i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v9, 8
+; GFX9-NEXT: v_mov_b32_e32 v8, 0xff
+; GFX9-NEXT: v_lshlrev_b32_sdwa v5, v9, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v4, v4, v8, v5
+; GFX9-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX9-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX9-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX9-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX9-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX9-NEXT: v_and_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX9-NEXT: v_and_b32_sdwa v3, v3, v4 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_lshlrev_b32_sdwa v4, v9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX9-NEXT: v_and_or_b32 v4, v0, v8, v4
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX9-NEXT: v_and_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_and_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX9-NEXT: v_and_or_b32 v1, v0, v8, v1
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX9-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX9-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_and_v8i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v8, 8
+; GFX10-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX10-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v8, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v7
+; GFX10-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX10-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX10-NEXT: v_and_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX10-NEXT: v_and_b32_sdwa v3, v3, v4 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX10-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX10-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX10-NEXT: v_and_or_b32 v4, 0xff, v0, v5
+; GFX10-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX10-NEXT: v_and_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX10-NEXT: v_and_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX10-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX10-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX10-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_and_v8i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v7, 24, v7
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v4, v4, v6, v7
+; GFX11-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX11-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX11-NEXT: v_lshrrev_b32_e32 v6, 24, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_b32_e32 v3, v3, v6
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 8, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX11-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX11-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX11-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_and_v8i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX12-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v7, 24, v7
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v4, v4, v6, v7
+; GFX12-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX12-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX12-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX12-NEXT: v_lshrrev_b32_e32 v6, 24, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX12-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_b32_e32 v3, v3, v6
+; GFX12-NEXT: v_lshlrev_b32_e32 v4, 8, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX12-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX12-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX12-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.and.v8i8(<8 x i8> %v)
+ ret i8 %res
+}
+
+define i8 @test_vector_reduce_and_v16i8(<16 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_and_v16i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v13, 0xff, v13
+; GFX7-NEXT: v_and_b32_e32 v12, 0xff, v12
+; GFX7-NEXT: v_lshlrev_b32_e32 v13, 8, v13
+; GFX7-NEXT: v_or_b32_e32 v12, v12, v13
+; GFX7-NEXT: v_and_b32_e32 v13, 0xff, v14
+; GFX7-NEXT: v_lshlrev_b32_e32 v13, 16, v13
+; GFX7-NEXT: v_or_b32_e32 v12, v12, v13
+; GFX7-NEXT: v_and_b32_e32 v13, 0xff, v15
+; GFX7-NEXT: v_lshlrev_b32_e32 v13, 24, v13
+; GFX7-NEXT: v_or_b32_e32 v12, v12, v13
+; GFX7-NEXT: v_and_b32_e32 v9, 0xff, v9
+; GFX7-NEXT: v_lshrrev_b32_e32 v13, 8, v12
+; GFX7-NEXT: v_and_b32_e32 v8, 0xff, v8
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX7-NEXT: v_and_b32_e32 v5, v5, v13
+; GFX7-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX7-NEXT: v_and_b32_e32 v9, 0xff, v10
+; GFX7-NEXT: v_lshrrev_b32_e32 v14, 16, v12
+; GFX7-NEXT: v_and_b32_e32 v4, v4, v12
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX7-NEXT: v_and_b32_e32 v6, v6, v14
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX7-NEXT: v_and_b32_e32 v9, 0xff, v11
+; GFX7-NEXT: v_lshrrev_b32_e32 v15, 24, v12
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 24, v9
+; GFX7-NEXT: v_and_b32_e32 v7, v7, v15
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v7
+; GFX7-NEXT: v_lshrrev_b32_e32 v9, 8, v8
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshrrev_b32_e32 v10, 16, v8
+; GFX7-NEXT: v_and_b32_e32 v1, v1, v9
+; GFX7-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v8
+; GFX7-NEXT: v_and_b32_e32 v2, v2, v10
+; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX7-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX7-NEXT: v_lshrrev_b32_e32 v11, 24, v8
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX7-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX7-NEXT: v_and_b32_e32 v3, v3, v11
+; GFX7-NEXT: v_lshrrev_b32_e32 v7, 24, v4
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX7-NEXT: v_and_b32_e32 v3, v3, v7
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX7-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 24, v2
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v3
+; GFX7-NEXT: v_and_b32_e32 v1, v1, v2
+; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_and_v16i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v16, 8
+; GFX8-NEXT: v_lshlrev_b32_sdwa v9, v16, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v8, v8, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v9, 0xff, v10
+; GFX8-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX8-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX8-NEXT: v_and_b32_e32 v9, 0xff, v11
+; GFX8-NEXT: v_lshlrev_b32_sdwa v10, v16, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v11, 0xff, v14
+; GFX8-NEXT: v_or_b32_sdwa v10, v12, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v11, 16, v11
+; GFX8-NEXT: v_or_b32_e32 v10, v10, v11
+; GFX8-NEXT: v_and_b32_e32 v11, 0xff, v15
+; GFX8-NEXT: v_lshlrev_b32_e32 v11, 24, v11
+; GFX8-NEXT: v_or_b32_e32 v10, v10, v11
+; GFX8-NEXT: v_lshrrev_b32_e32 v11, 8, v10
+; GFX8-NEXT: v_and_b32_e32 v5, v5, v11
+; GFX8-NEXT: v_and_b32_e32 v4, v4, v10
+; GFX8-NEXT: v_and_b32_sdwa v6, v6, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v16, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v4, v4, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX8-NEXT: v_lshlrev_b32_e32 v9, 24, v9
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX8-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX8-NEXT: v_and_b32_sdwa v7, v7, v10 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_lshrrev_b32_e32 v9, 8, v8
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v7
+; GFX8-NEXT: v_and_b32_e32 v1, v1, v9
+; GFX8-NEXT: v_and_b32_sdwa v2, v2, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX8-NEXT: v_and_b32_e32 v0, v0, v8
+; GFX8-NEXT: v_and_b32_sdwa v3, v3, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX8-NEXT: v_and_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX8-NEXT: v_and_b32_sdwa v3, v3, v4 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v16, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX8-NEXT: v_or_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX8-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX8-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX8-NEXT: v_and_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_and_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX8-NEXT: v_or_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX8-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_and_v16i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v17, 8
+; GFX9-NEXT: v_mov_b32_e32 v16, 0xff
+; GFX9-NEXT: v_lshlrev_b32_sdwa v9, v17, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v8, v8, v16, v9
+; GFX9-NEXT: v_and_b32_e32 v9, 0xff, v10
+; GFX9-NEXT: v_and_b32_e32 v10, 0xff, v11
+; GFX9-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX9-NEXT: v_lshlrev_b32_e32 v10, 24, v10
+; GFX9-NEXT: v_or3_b32 v8, v8, v9, v10
+; GFX9-NEXT: v_lshlrev_b32_sdwa v10, v17, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v10, v12, v16, v10
+; GFX9-NEXT: v_and_b32_e32 v11, 0xff, v14
+; GFX9-NEXT: v_and_b32_e32 v12, 0xff, v15
+; GFX9-NEXT: v_lshlrev_b32_e32 v11, 16, v11
+; GFX9-NEXT: v_lshlrev_b32_e32 v12, 24, v12
+; GFX9-NEXT: v_or3_b32 v10, v10, v11, v12
+; GFX9-NEXT: v_lshrrev_b32_e32 v11, 8, v10
+; GFX9-NEXT: v_and_b32_e32 v5, v5, v11
+; GFX9-NEXT: v_and_b32_e32 v4, v4, v10
+; GFX9-NEXT: v_and_b32_sdwa v6, v6, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_lshlrev_b32_sdwa v5, v17, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v4, v4, v16, v5
+; GFX9-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX9-NEXT: v_and_b32_sdwa v7, v7, v10 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX9-NEXT: v_lshrrev_b32_e32 v9, 8, v8
+; GFX9-NEXT: v_or3_b32 v4, v4, v5, v7
+; GFX9-NEXT: v_and_b32_e32 v1, v1, v9
+; GFX9-NEXT: v_and_b32_sdwa v2, v2, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX9-NEXT: v_and_b32_e32 v0, v0, v8
+; GFX9-NEXT: v_and_b32_sdwa v3, v3, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX9-NEXT: v_and_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX9-NEXT: v_and_b32_sdwa v3, v3, v4 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_lshlrev_b32_sdwa v4, v17, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX9-NEXT: v_and_or_b32 v4, v0, v16, v4
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX9-NEXT: v_and_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_and_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX9-NEXT: v_and_or_b32 v1, v0, v16, v1
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX9-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX9-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_and_v16i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v16, 8
+; GFX10-NEXT: v_and_b32_e32 v14, 0xff, v14
+; GFX10-NEXT: v_and_b32_e32 v15, 0xff, v15
+; GFX10-NEXT: v_and_b32_e32 v10, 0xff, v10
+; GFX10-NEXT: v_and_b32_e32 v11, 0xff, v11
+; GFX10-NEXT: v_lshlrev_b32_sdwa v13, v16, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_lshlrev_b32_sdwa v9, v16, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_or_b32 v12, 0xff, v12, v13
+; GFX10-NEXT: v_lshlrev_b32_e32 v13, 16, v14
+; GFX10-NEXT: v_lshlrev_b32_e32 v14, 24, v15
+; GFX10-NEXT: v_and_or_b32 v8, 0xff, v8, v9
+; GFX10-NEXT: v_lshlrev_b32_e32 v9, 16, v10
+; GFX10-NEXT: v_lshlrev_b32_e32 v10, 24, v11
+; GFX10-NEXT: v_or3_b32 v12, v12, v13, v14
+; GFX10-NEXT: v_or3_b32 v8, v8, v9, v10
+; GFX10-NEXT: v_lshrrev_b32_e32 v13, 8, v12
+; GFX10-NEXT: v_and_b32_sdwa v6, v6, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_and_b32_e32 v4, v4, v12
+; GFX10-NEXT: v_and_b32_sdwa v7, v7, v12 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX10-NEXT: v_and_b32_sdwa v2, v2, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_and_b32_e32 v5, v5, v13
+; GFX10-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX10-NEXT: v_and_b32_e32 v0, v0, v8
+; GFX10-NEXT: v_and_b32_sdwa v3, v3, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v16, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX10-NEXT: v_lshrrev_b32_e32 v6, 8, v8
+; GFX10-NEXT: v_or3_b32 v4, v4, v5, v7
+; GFX10-NEXT: v_and_b32_e32 v1, v1, v6
+; GFX10-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX10-NEXT: v_and_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX10-NEXT: v_and_b32_sdwa v3, v3, v4 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX10-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX10-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v16, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX10-NEXT: v_and_or_b32 v4, 0xff, v0, v5
+; GFX10-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX10-NEXT: v_and_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX10-NEXT: v_and_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX10-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX10-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX10-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_and_v16i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v13, 0xff, v13
+; GFX11-NEXT: v_and_b32_e32 v14, 0xff, v14
+; GFX11-NEXT: v_and_b32_e32 v15, 0xff, v15
+; GFX11-NEXT: v_and_b32_e32 v9, 0xff, v9
+; GFX11-NEXT: v_and_b32_e32 v10, 0xff, v10
+; GFX11-NEXT: v_lshlrev_b32_e32 v13, 8, v13
+; GFX11-NEXT: v_and_b32_e32 v11, 0xff, v11
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX11-NEXT: v_lshlrev_b32_e32 v10, 16, v10
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_or_b32 v12, 0xff, v12, v13
+; GFX11-NEXT: v_lshlrev_b32_e32 v13, 16, v14
+; GFX11-NEXT: v_lshlrev_b32_e32 v14, 24, v15
+; GFX11-NEXT: v_lshlrev_b32_e32 v11, 24, v11
+; GFX11-NEXT: v_and_or_b32 v8, 0xff, v8, v9
+; GFX11-NEXT: v_or3_b32 v12, v12, v13, v14
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_or3_b32 v8, v8, v10, v11
+; GFX11-NEXT: v_lshrrev_b32_e32 v13, 8, v12
+; GFX11-NEXT: v_lshrrev_b32_e32 v14, 16, v12
+; GFX11-NEXT: v_and_b32_e32 v4, v4, v12
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v8
+; GFX11-NEXT: v_and_b32_e32 v5, v5, v13
+; GFX11-NEXT: v_lshrrev_b32_e32 v13, 24, v12
+; GFX11-NEXT: v_and_b32_e32 v6, v6, v14
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX11-NEXT: v_and_b32_e32 v7, v7, v13
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX11-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v7, 24, v7
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX11-NEXT: v_lshrrev_b32_e32 v5, 8, v8
+; GFX11-NEXT: v_or3_b32 v4, v4, v6, v7
+; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v8
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX11-NEXT: v_lshrrev_b32_e32 v7, 24, v8
+; GFX11-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX11-NEXT: v_and_b32_e32 v3, v3, v7
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX11-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX11-NEXT: v_lshrrev_b32_e32 v6, 24, v4
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-NEXT: v_and_b32_e32 v3, v3, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 8, v5
+; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_and_v16i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v13, 0xff, v13
+; GFX12-NEXT: v_and_b32_e32 v14, 0xff, v14
+; GFX12-NEXT: v_and_b32_e32 v15, 0xff, v15
+; GFX12-NEXT: v_and_b32_e32 v9, 0xff, v9
+; GFX12-NEXT: v_and_b32_e32 v10, 0xff, v10
+; GFX12-NEXT: v_lshlrev_b32_e32 v13, 8, v13
+; GFX12-NEXT: v_and_b32_e32 v11, 0xff, v11
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX12-NEXT: v_lshlrev_b32_e32 v10, 16, v10
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_or_b32 v12, 0xff, v12, v13
+; GFX12-NEXT: v_lshlrev_b32_e32 v13, 16, v14
+; GFX12-NEXT: v_lshlrev_b32_e32 v14, 24, v15
+; GFX12-NEXT: v_lshlrev_b32_e32 v11, 24, v11
+; GFX12-NEXT: v_and_or_b32 v8, 0xff, v8, v9
+; GFX12-NEXT: v_or3_b32 v12, v12, v13, v14
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_or3_b32 v8, v8, v10, v11
+; GFX12-NEXT: v_lshrrev_b32_e32 v13, 8, v12
+; GFX12-NEXT: v_lshrrev_b32_e32 v14, 16, v12
+; GFX12-NEXT: v_and_b32_e32 v4, v4, v12
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v8
+; GFX12-NEXT: v_and_b32_e32 v5, v5, v13
+; GFX12-NEXT: v_lshrrev_b32_e32 v13, 24, v12
+; GFX12-NEXT: v_and_b32_e32 v6, v6, v14
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX12-NEXT: v_and_b32_e32 v7, v7, v13
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX12-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v7, 24, v7
+; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX12-NEXT: v_lshrrev_b32_e32 v5, 8, v8
+; GFX12-NEXT: v_or3_b32 v4, v4, v6, v7
+; GFX12-NEXT: v_lshrrev_b32_e32 v6, 16, v8
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX12-NEXT: v_lshrrev_b32_e32 v7, 24, v8
+; GFX12-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX12-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX12-NEXT: v_and_b32_e32 v3, v3, v7
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX12-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX12-NEXT: v_lshrrev_b32_e32 v6, 24, v4
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-NEXT: v_and_b32_e32 v3, v3, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v4, 8, v5
+; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX12-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.and.v16i8(<16 x i8> %v)
+ ret i8 %res
+}
+
+define i16 @test_vector_reduce_and_v2i16(<2 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_and_v2i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_and_v2i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_and_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_and_v2i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_and_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_and_v2i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_and_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_and_v2i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_and_v2i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.and.v2i16(<2 x i16> %v)
+ ret i16 %res
+}
+
+define i16 @test_vector_reduce_and_v3i16(<3 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_and_v3i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_and_v3i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_and_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_and_v3i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_and_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_and_v3i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_and_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_and_v3i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_and_v3i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.and.v3i16(<3 x i16> %v)
+ ret i16 %res
+}
+
+define i16 @test_vector_reduce_and_v4i16(<4 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_and_v4i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v3
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_and_v4i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX8-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_and_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_and_v4i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX9-NEXT: v_and_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_and_v4i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX10-NEXT: v_and_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_and_v4i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_and_v4i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.and.v4i16(<4 x i16> %v)
+ ret i16 %res
+}
+
+define i16 @test_vector_reduce_and_v8i16(<8 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_and_v8i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v7
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_or_b32_e32 v5, v5, v6
+; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX7-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v3
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v7, 16, v5
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v7
+; GFX7-NEXT: v_or_b32_e32 v3, v3, v4
+; GFX7-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_and_v8i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v2
+; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX8-NEXT: v_or_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v5
+; GFX8-NEXT: v_or_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX8-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX8-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_and_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_and_v8i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX9-NEXT: s_mov_b32 s0, 0xffff
+; GFX9-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX9-NEXT: v_bfi_b32 v2, s0, v1, v1
+; GFX9-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX9-NEXT: v_and_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_and_b32_e32 v1, s0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_and_v8i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX10-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX10-NEXT: v_bfi_b32 v2, 0xffff, v1, v1
+; GFX10-NEXT: v_and_b32_e32 v1, s4, v1
+; GFX10-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX10-NEXT: v_and_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_and_v8i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_bfi_b32 v2, 0xffff, v1, v1
+; GFX11-NEXT: v_and_b32_e32 v1, s0, v1
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_and_v8i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_bfi_b32 v2, 0xffff, v1, v1
+; GFX12-NEXT: v_and_b32_e32 v1, s0, v1
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.and.v8i16(<8 x i16> %v)
+ ret i16 %res
+}
+
+define i16 @test_vector_reduce_and_v16i16(<16 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_and_v16i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX7-NEXT: v_and_b32_e32 v8, 0xffff, v8
+; GFX7-NEXT: v_or_b32_e32 v8, v9, v8
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v11
+; GFX7-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_or_b32_e32 v9, v9, v10
+; GFX7-NEXT: v_lshlrev_b32_e32 v10, 16, v13
+; GFX7-NEXT: v_and_b32_e32 v11, 0xffff, v12
+; GFX7-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v3
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-NEXT: v_or_b32_e32 v10, v10, v11
+; GFX7-NEXT: v_lshlrev_b32_e32 v11, 16, v15
+; GFX7-NEXT: v_and_b32_e32 v12, 0xffff, v14
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v5
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff, v4
+; GFX7-NEXT: v_or_b32_e32 v11, v11, v12
+; GFX7-NEXT: v_lshrrev_b32_e32 v12, 16, v8
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v7
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff, v6
+; GFX7-NEXT: v_lshrrev_b32_e32 v13, 16, v9
+; GFX7-NEXT: v_or_b32_e32 v3, v3, v4
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff, v8
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v12
+; GFX7-NEXT: v_lshrrev_b32_e32 v14, 16, v10
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff, v9
+; GFX7-NEXT: v_lshlrev_b32_e32 v6, 16, v13
+; GFX7-NEXT: v_lshrrev_b32_e32 v15, 16, v11
+; GFX7-NEXT: v_or_b32_e32 v5, v5, v6
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff, v10
+; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v14
+; GFX7-NEXT: v_or_b32_e32 v6, v6, v7
+; GFX7-NEXT: v_and_b32_e32 v7, 0xffff, v11
+; GFX7-NEXT: v_lshlrev_b32_e32 v8, 16, v15
+; GFX7-NEXT: v_or_b32_e32 v7, v7, v8
+; GFX7-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX7-NEXT: v_and_b32_e32 v3, v3, v7
+; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v2
+; GFX7-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v3
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v4
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v5
+; GFX7-NEXT: v_or_b32_e32 v3, v3, v4
+; GFX7-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_and_v16i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshrrev_b32_e32 v8, 16, v4
+; GFX8-NEXT: v_lshrrev_b32_e32 v9, 16, v5
+; GFX8-NEXT: v_lshlrev_b32_e32 v8, 16, v8
+; GFX8-NEXT: v_lshrrev_b32_e32 v10, 16, v6
+; GFX8-NEXT: v_or_b32_sdwa v4, v4, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v8, 16, v9
+; GFX8-NEXT: v_lshrrev_b32_e32 v11, 16, v7
+; GFX8-NEXT: v_or_b32_sdwa v5, v5, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v8, 16, v10
+; GFX8-NEXT: v_or_b32_sdwa v6, v6, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v8, 16, v11
+; GFX8-NEXT: v_or_b32_sdwa v7, v7, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX8-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX8-NEXT: v_and_b32_e32 v3, v3, v7
+; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v2
+; GFX8-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX8-NEXT: v_or_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v5
+; GFX8-NEXT: v_or_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX8-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX8-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_and_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_and_v16i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_and_b32_e32 v5, v1, v5
+; GFX9-NEXT: v_and_b32_e32 v1, v3, v7
+; GFX9-NEXT: s_mov_b32 s0, 0xffff
+; GFX9-NEXT: v_and_b32_e32 v4, v0, v4
+; GFX9-NEXT: v_and_b32_e32 v0, v2, v6
+; GFX9-NEXT: v_bfi_b32 v1, s0, v1, v1
+; GFX9-NEXT: v_bfi_b32 v0, s0, v0, v0
+; GFX9-NEXT: v_and_b32_e32 v1, v5, v1
+; GFX9-NEXT: v_and_b32_e32 v0, v4, v0
+; GFX9-NEXT: v_bfi_b32 v2, s0, v1, v1
+; GFX9-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX9-NEXT: v_and_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_and_b32_e32 v1, s0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_and_v16i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_and_b32_e32 v3, v3, v7
+; GFX10-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX10-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX10-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX10-NEXT: v_bfi_b32 v3, 0xffff, v3, v3
+; GFX10-NEXT: v_bfi_b32 v2, 0xffff, v2, v2
+; GFX10-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX10-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX10-NEXT: v_bfi_b32 v2, 0xffff, v1, v1
+; GFX10-NEXT: v_and_b32_e32 v1, s4, v1
+; GFX10-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX10-NEXT: v_and_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_and_v16i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v3, v3, v7
+; GFX11-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX11-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_bfi_b32 v3, 0xffff, v3, v3
+; GFX11-NEXT: v_bfi_b32 v2, 0xffff, v2, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_bfi_b32 v2, 0xffff, v1, v1
+; GFX11-NEXT: v_and_b32_e32 v1, s0, v1
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_and_v16i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v3, v3, v7
+; GFX12-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX12-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_bfi_b32 v3, 0xffff, v3, v3
+; GFX12-NEXT: v_bfi_b32 v2, 0xffff, v2, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_bfi_b32 v2, 0xffff, v1, v1
+; GFX12-NEXT: v_and_b32_e32 v1, s0, v1
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.and.v16i16(<16 x i16> %v)
+ ret i16 %res
+}
+
+
+define i32 @test_vector_reduce_and_v2i32(<2 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_and_v2i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_and_v2i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_and_v2i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_and_v2i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_and_v2i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_and_v2i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.and.v2i32(<2 x i32> %v)
+ ret i32 %res
+}
+
+define i32 @test_vector_reduce_and_v3i32(<3 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_and_v3i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_and_v3i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_and_v3i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX9-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_and_v3i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX10-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_and_v3i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_and_v3i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.and.v3i32(<3 x i32> %v)
+ ret i32 %res
+}
+
+define i32 @test_vector_reduce_and_v4i32(<4 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_and_v4i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX7-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_and_v4i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX8-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX8-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_and_v4i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX9-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX9-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_and_v4i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX10-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX10-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_and_v4i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX11-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_and_v4i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX12-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> %v)
+ ret i32 %res
+}
+
+define i32 @test_vector_reduce_and_v8i32(<8 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_and_v8i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX7-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX7-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX7-NEXT: v_and_b32_e32 v3, v3, v7
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX7-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_and_v8i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX8-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX8-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX8-NEXT: v_and_b32_e32 v3, v3, v7
+; GFX8-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX8-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX8-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_and_v8i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX9-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX9-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX9-NEXT: v_and_b32_e32 v3, v3, v7
+; GFX9-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX9-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX9-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_and_v8i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX10-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX10-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX10-NEXT: v_and_b32_e32 v3, v3, v7
+; GFX10-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX10-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX10-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_and_v8i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX11-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX11-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX11-NEXT: v_and_b32_e32 v3, v3, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX11-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_and_v8i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX12-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX12-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX12-NEXT: v_and_b32_e32 v3, v3, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX12-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> %v)
+ ret i32 %res
+}
+
+define i32 @test_vector_reduce_and_v16i32(<16 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_and_v16i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v8
+; GFX7-NEXT: v_and_b32_e32 v1, v1, v9
+; GFX7-NEXT: v_and_b32_e32 v2, v2, v10
+; GFX7-NEXT: v_and_b32_e32 v3, v3, v11
+; GFX7-NEXT: v_and_b32_e32 v4, v4, v12
+; GFX7-NEXT: v_and_b32_e32 v5, v5, v13
+; GFX7-NEXT: v_and_b32_e32 v6, v6, v14
+; GFX7-NEXT: v_and_b32_e32 v7, v7, v15
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX7-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX7-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX7-NEXT: v_and_b32_e32 v3, v3, v7
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX7-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_and_v16i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_and_b32_e32 v0, v0, v8
+; GFX8-NEXT: v_and_b32_e32 v1, v1, v9
+; GFX8-NEXT: v_and_b32_e32 v2, v2, v10
+; GFX8-NEXT: v_and_b32_e32 v3, v3, v11
+; GFX8-NEXT: v_and_b32_e32 v4, v4, v12
+; GFX8-NEXT: v_and_b32_e32 v5, v5, v13
+; GFX8-NEXT: v_and_b32_e32 v6, v6, v14
+; GFX8-NEXT: v_and_b32_e32 v7, v7, v15
+; GFX8-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX8-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX8-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX8-NEXT: v_and_b32_e32 v3, v3, v7
+; GFX8-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX8-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX8-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_and_v16i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_and_b32_e32 v0, v0, v8
+; GFX9-NEXT: v_and_b32_e32 v1, v1, v9
+; GFX9-NEXT: v_and_b32_e32 v2, v2, v10
+; GFX9-NEXT: v_and_b32_e32 v3, v3, v11
+; GFX9-NEXT: v_and_b32_e32 v4, v4, v12
+; GFX9-NEXT: v_and_b32_e32 v5, v5, v13
+; GFX9-NEXT: v_and_b32_e32 v6, v6, v14
+; GFX9-NEXT: v_and_b32_e32 v7, v7, v15
+; GFX9-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX9-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX9-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX9-NEXT: v_and_b32_e32 v3, v3, v7
+; GFX9-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX9-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX9-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_and_v16i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_and_b32_e32 v0, v0, v8
+; GFX10-NEXT: v_and_b32_e32 v1, v1, v9
+; GFX10-NEXT: v_and_b32_e32 v2, v2, v10
+; GFX10-NEXT: v_and_b32_e32 v3, v3, v11
+; GFX10-NEXT: v_and_b32_e32 v4, v4, v12
+; GFX10-NEXT: v_and_b32_e32 v5, v5, v13
+; GFX10-NEXT: v_and_b32_e32 v6, v6, v14
+; GFX10-NEXT: v_and_b32_e32 v7, v7, v15
+; GFX10-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX10-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX10-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX10-NEXT: v_and_b32_e32 v3, v3, v7
+; GFX10-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX10-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX10-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_and_v16i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v8
+; GFX11-NEXT: v_and_b32_e32 v1, v1, v9
+; GFX11-NEXT: v_and_b32_e32 v2, v2, v10
+; GFX11-NEXT: v_and_b32_e32 v3, v3, v11
+; GFX11-NEXT: v_and_b32_e32 v4, v4, v12
+; GFX11-NEXT: v_and_b32_e32 v5, v5, v13
+; GFX11-NEXT: v_and_b32_e32 v6, v6, v14
+; GFX11-NEXT: v_and_b32_e32 v7, v7, v15
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX11-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX11-NEXT: v_and_b32_e32 v3, v3, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX11-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_and_v16i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v8
+; GFX12-NEXT: v_and_b32_e32 v1, v1, v9
+; GFX12-NEXT: v_and_b32_e32 v2, v2, v10
+; GFX12-NEXT: v_and_b32_e32 v3, v3, v11
+; GFX12-NEXT: v_and_b32_e32 v4, v4, v12
+; GFX12-NEXT: v_and_b32_e32 v5, v5, v13
+; GFX12-NEXT: v_and_b32_e32 v6, v6, v14
+; GFX12-NEXT: v_and_b32_e32 v7, v7, v15
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX12-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX12-NEXT: v_and_b32_e32 v3, v3, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX12-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.and.v16i32(<16 x i32> %v)
+ ret i32 %res
+}
+
+define i64 @test_vector_reduce_and_v2i64(<2 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_and_v2i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX7-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_and_v2i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX8-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_and_v2i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX9-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_and_v2i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX10-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_and_v2i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX11-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_and_v2i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX12-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.and.v2i64(<2 x i64> %v)
+ ret i64 %res
+}
+
+define i64 @test_vector_reduce_and_v3i64(<3 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_and_v3i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX7-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX7-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_and_v3i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX8-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX8-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX8-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_and_v3i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX9-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX9-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX9-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_and_v3i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX10-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX10-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX10-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_and_v3i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX11-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX11-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_and_v3i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX12-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX12-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.and.v3i64(<3 x i64> %v)
+ ret i64 %res
+}
+
+define i64 @test_vector_reduce_and_v4i64(<4 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_and_v4i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX7-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX7-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX7-NEXT: v_and_b32_e32 v3, v3, v7
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX7-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_and_v4i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX8-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX8-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX8-NEXT: v_and_b32_e32 v3, v3, v7
+; GFX8-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX8-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_and_v4i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX9-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX9-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX9-NEXT: v_and_b32_e32 v3, v3, v7
+; GFX9-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX9-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_and_v4i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX10-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX10-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX10-NEXT: v_and_b32_e32 v3, v3, v7
+; GFX10-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX10-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_and_v4i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX11-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX11-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX11-NEXT: v_and_b32_e32 v3, v3, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX11-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_and_v4i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX12-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX12-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX12-NEXT: v_and_b32_e32 v3, v3, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX12-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.and.v4i64(<4 x i64> %v)
+ ret i64 %res
+}
+
+define i64 @test_vector_reduce_and_v8i64(<8 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_and_v8i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v8
+; GFX7-NEXT: v_and_b32_e32 v1, v1, v9
+; GFX7-NEXT: v_and_b32_e32 v2, v2, v10
+; GFX7-NEXT: v_and_b32_e32 v3, v3, v11
+; GFX7-NEXT: v_and_b32_e32 v4, v4, v12
+; GFX7-NEXT: v_and_b32_e32 v5, v5, v13
+; GFX7-NEXT: v_and_b32_e32 v6, v6, v14
+; GFX7-NEXT: v_and_b32_e32 v7, v7, v15
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX7-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX7-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX7-NEXT: v_and_b32_e32 v3, v3, v7
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX7-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_and_v8i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_and_b32_e32 v0, v0, v8
+; GFX8-NEXT: v_and_b32_e32 v1, v1, v9
+; GFX8-NEXT: v_and_b32_e32 v2, v2, v10
+; GFX8-NEXT: v_and_b32_e32 v3, v3, v11
+; GFX8-NEXT: v_and_b32_e32 v4, v4, v12
+; GFX8-NEXT: v_and_b32_e32 v5, v5, v13
+; GFX8-NEXT: v_and_b32_e32 v6, v6, v14
+; GFX8-NEXT: v_and_b32_e32 v7, v7, v15
+; GFX8-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX8-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX8-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX8-NEXT: v_and_b32_e32 v3, v3, v7
+; GFX8-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX8-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_and_v8i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_and_b32_e32 v0, v0, v8
+; GFX9-NEXT: v_and_b32_e32 v1, v1, v9
+; GFX9-NEXT: v_and_b32_e32 v2, v2, v10
+; GFX9-NEXT: v_and_b32_e32 v3, v3, v11
+; GFX9-NEXT: v_and_b32_e32 v4, v4, v12
+; GFX9-NEXT: v_and_b32_e32 v5, v5, v13
+; GFX9-NEXT: v_and_b32_e32 v6, v6, v14
+; GFX9-NEXT: v_and_b32_e32 v7, v7, v15
+; GFX9-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX9-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX9-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX9-NEXT: v_and_b32_e32 v3, v3, v7
+; GFX9-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX9-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_and_v8i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_and_b32_e32 v0, v0, v8
+; GFX10-NEXT: v_and_b32_e32 v1, v1, v9
+; GFX10-NEXT: v_and_b32_e32 v2, v2, v10
+; GFX10-NEXT: v_and_b32_e32 v3, v3, v11
+; GFX10-NEXT: v_and_b32_e32 v4, v4, v12
+; GFX10-NEXT: v_and_b32_e32 v5, v5, v13
+; GFX10-NEXT: v_and_b32_e32 v6, v6, v14
+; GFX10-NEXT: v_and_b32_e32 v7, v7, v15
+; GFX10-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX10-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX10-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX10-NEXT: v_and_b32_e32 v3, v3, v7
+; GFX10-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX10-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_and_v8i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v8
+; GFX11-NEXT: v_and_b32_e32 v1, v1, v9
+; GFX11-NEXT: v_and_b32_e32 v2, v2, v10
+; GFX11-NEXT: v_and_b32_e32 v3, v3, v11
+; GFX11-NEXT: v_and_b32_e32 v4, v4, v12
+; GFX11-NEXT: v_and_b32_e32 v5, v5, v13
+; GFX11-NEXT: v_and_b32_e32 v6, v6, v14
+; GFX11-NEXT: v_and_b32_e32 v7, v7, v15
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX11-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX11-NEXT: v_and_b32_e32 v3, v3, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX11-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_and_v8i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v8
+; GFX12-NEXT: v_and_b32_e32 v1, v1, v9
+; GFX12-NEXT: v_and_b32_e32 v2, v2, v10
+; GFX12-NEXT: v_and_b32_e32 v3, v3, v11
+; GFX12-NEXT: v_and_b32_e32 v4, v4, v12
+; GFX12-NEXT: v_and_b32_e32 v5, v5, v13
+; GFX12-NEXT: v_and_b32_e32 v6, v6, v14
+; GFX12-NEXT: v_and_b32_e32 v7, v7, v15
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX12-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX12-NEXT: v_and_b32_e32 v3, v3, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX12-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.and.v8i64(<8 x i64> %v)
+ ret i64 %res
+}
+
+define i64 @test_vector_reduce_and_v16i64(<16 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_and_v16i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v16
+; GFX7-NEXT: buffer_load_dword v16, off, s[0:3], s32
+; GFX7-NEXT: v_and_b32_e32 v2, v2, v18
+; GFX7-NEXT: v_and_b32_e32 v4, v4, v20
+; GFX7-NEXT: v_and_b32_e32 v6, v6, v22
+; GFX7-NEXT: v_and_b32_e32 v8, v8, v24
+; GFX7-NEXT: v_and_b32_e32 v10, v10, v26
+; GFX7-NEXT: v_and_b32_e32 v12, v12, v28
+; GFX7-NEXT: v_and_b32_e32 v14, v14, v30
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v8
+; GFX7-NEXT: v_and_b32_e32 v2, v2, v10
+; GFX7-NEXT: v_and_b32_e32 v4, v4, v12
+; GFX7-NEXT: v_and_b32_e32 v6, v6, v14
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX7-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX7-NEXT: v_and_b32_e32 v1, v1, v17
+; GFX7-NEXT: v_and_b32_e32 v3, v3, v19
+; GFX7-NEXT: v_and_b32_e32 v5, v5, v21
+; GFX7-NEXT: v_and_b32_e32 v7, v7, v23
+; GFX7-NEXT: v_and_b32_e32 v9, v9, v25
+; GFX7-NEXT: v_and_b32_e32 v11, v11, v27
+; GFX7-NEXT: v_and_b32_e32 v13, v13, v29
+; GFX7-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX7-NEXT: v_and_b32_e32 v1, v1, v9
+; GFX7-NEXT: v_and_b32_e32 v3, v3, v11
+; GFX7-NEXT: v_and_b32_e32 v5, v5, v13
+; GFX7-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v2, v15, v16
+; GFX7-NEXT: v_and_b32_e32 v2, v7, v2
+; GFX7-NEXT: v_and_b32_e32 v2, v3, v2
+; GFX7-NEXT: v_and_b32_e32 v1, v1, v2
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_and_v16i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_and_b32_e32 v0, v0, v16
+; GFX8-NEXT: buffer_load_dword v16, off, s[0:3], s32
+; GFX8-NEXT: v_and_b32_e32 v2, v2, v18
+; GFX8-NEXT: v_and_b32_e32 v4, v4, v20
+; GFX8-NEXT: v_and_b32_e32 v6, v6, v22
+; GFX8-NEXT: v_and_b32_e32 v8, v8, v24
+; GFX8-NEXT: v_and_b32_e32 v10, v10, v26
+; GFX8-NEXT: v_and_b32_e32 v12, v12, v28
+; GFX8-NEXT: v_and_b32_e32 v14, v14, v30
+; GFX8-NEXT: v_and_b32_e32 v0, v0, v8
+; GFX8-NEXT: v_and_b32_e32 v2, v2, v10
+; GFX8-NEXT: v_and_b32_e32 v4, v4, v12
+; GFX8-NEXT: v_and_b32_e32 v6, v6, v14
+; GFX8-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX8-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX8-NEXT: v_and_b32_e32 v1, v1, v17
+; GFX8-NEXT: v_and_b32_e32 v3, v3, v19
+; GFX8-NEXT: v_and_b32_e32 v5, v5, v21
+; GFX8-NEXT: v_and_b32_e32 v7, v7, v23
+; GFX8-NEXT: v_and_b32_e32 v9, v9, v25
+; GFX8-NEXT: v_and_b32_e32 v11, v11, v27
+; GFX8-NEXT: v_and_b32_e32 v13, v13, v29
+; GFX8-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX8-NEXT: v_and_b32_e32 v1, v1, v9
+; GFX8-NEXT: v_and_b32_e32 v3, v3, v11
+; GFX8-NEXT: v_and_b32_e32 v5, v5, v13
+; GFX8-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_and_b32_e32 v2, v15, v16
+; GFX8-NEXT: v_and_b32_e32 v2, v7, v2
+; GFX8-NEXT: v_and_b32_e32 v2, v3, v2
+; GFX8-NEXT: v_and_b32_e32 v1, v1, v2
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_and_v16i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: scratch_load_dword v31, off, s32
+; GFX9-NEXT: v_and_b32_e32 v0, v0, v16
+; GFX9-NEXT: v_and_b32_e32 v2, v2, v18
+; GFX9-NEXT: v_and_b32_e32 v4, v4, v20
+; GFX9-NEXT: v_and_b32_e32 v6, v6, v22
+; GFX9-NEXT: v_and_b32_e32 v8, v8, v24
+; GFX9-NEXT: v_and_b32_e32 v10, v10, v26
+; GFX9-NEXT: v_and_b32_e32 v12, v12, v28
+; GFX9-NEXT: v_and_b32_e32 v14, v14, v30
+; GFX9-NEXT: v_and_b32_e32 v0, v0, v8
+; GFX9-NEXT: v_and_b32_e32 v2, v2, v10
+; GFX9-NEXT: v_and_b32_e32 v4, v4, v12
+; GFX9-NEXT: v_and_b32_e32 v6, v6, v14
+; GFX9-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX9-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX9-NEXT: v_and_b32_e32 v1, v1, v17
+; GFX9-NEXT: v_and_b32_e32 v3, v3, v19
+; GFX9-NEXT: v_and_b32_e32 v5, v5, v21
+; GFX9-NEXT: v_and_b32_e32 v7, v7, v23
+; GFX9-NEXT: v_and_b32_e32 v9, v9, v25
+; GFX9-NEXT: v_and_b32_e32 v11, v11, v27
+; GFX9-NEXT: v_and_b32_e32 v13, v13, v29
+; GFX9-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX9-NEXT: v_and_b32_e32 v1, v1, v9
+; GFX9-NEXT: v_and_b32_e32 v3, v3, v11
+; GFX9-NEXT: v_and_b32_e32 v5, v5, v13
+; GFX9-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_and_b32_e32 v2, v15, v31
+; GFX9-NEXT: v_and_b32_e32 v2, v7, v2
+; GFX9-NEXT: v_and_b32_e32 v2, v3, v2
+; GFX9-NEXT: v_and_b32_e32 v1, v1, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_and_v16i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX10-NEXT: v_and_b32_e32 v0, v0, v16
+; GFX10-NEXT: v_and_b32_e32 v8, v8, v24
+; GFX10-NEXT: v_and_b32_e32 v1, v1, v17
+; GFX10-NEXT: v_and_b32_e32 v2, v2, v18
+; GFX10-NEXT: v_and_b32_e32 v3, v3, v19
+; GFX10-NEXT: v_and_b32_e32 v4, v4, v20
+; GFX10-NEXT: v_and_b32_e32 v5, v5, v21
+; GFX10-NEXT: v_and_b32_e32 v6, v6, v22
+; GFX10-NEXT: v_and_b32_e32 v7, v7, v23
+; GFX10-NEXT: v_and_b32_e32 v9, v9, v25
+; GFX10-NEXT: v_and_b32_e32 v10, v10, v26
+; GFX10-NEXT: v_and_b32_e32 v11, v11, v27
+; GFX10-NEXT: v_and_b32_e32 v12, v12, v28
+; GFX10-NEXT: v_and_b32_e32 v13, v13, v29
+; GFX10-NEXT: v_and_b32_e32 v14, v14, v30
+; GFX10-NEXT: v_and_b32_e32 v0, v0, v8
+; GFX10-NEXT: v_and_b32_e32 v1, v1, v9
+; GFX10-NEXT: v_and_b32_e32 v2, v2, v10
+; GFX10-NEXT: v_and_b32_e32 v3, v3, v11
+; GFX10-NEXT: v_and_b32_e32 v4, v4, v12
+; GFX10-NEXT: v_and_b32_e32 v5, v5, v13
+; GFX10-NEXT: v_and_b32_e32 v6, v6, v14
+; GFX10-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX10-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX10-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX10-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_and_b32_e32 v8, v15, v31
+; GFX10-NEXT: v_and_b32_e32 v7, v7, v8
+; GFX10-NEXT: v_and_b32_e32 v3, v3, v7
+; GFX10-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_and_v16i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: scratch_load_b32 v31, off, s32
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v16
+; GFX11-NEXT: v_and_b32_e32 v8, v8, v24
+; GFX11-NEXT: v_and_b32_e32 v1, v1, v17
+; GFX11-NEXT: v_and_b32_e32 v2, v2, v18
+; GFX11-NEXT: v_and_b32_e32 v3, v3, v19
+; GFX11-NEXT: v_and_b32_e32 v4, v4, v20
+; GFX11-NEXT: v_and_b32_e32 v5, v5, v21
+; GFX11-NEXT: v_and_b32_e32 v6, v6, v22
+; GFX11-NEXT: v_and_b32_e32 v7, v7, v23
+; GFX11-NEXT: v_and_b32_e32 v9, v9, v25
+; GFX11-NEXT: v_and_b32_e32 v10, v10, v26
+; GFX11-NEXT: v_and_b32_e32 v11, v11, v27
+; GFX11-NEXT: v_and_b32_e32 v12, v12, v28
+; GFX11-NEXT: v_and_b32_e32 v13, v13, v29
+; GFX11-NEXT: v_and_b32_e32 v14, v14, v30
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v8
+; GFX11-NEXT: v_and_b32_e32 v1, v1, v9
+; GFX11-NEXT: v_and_b32_e32 v2, v2, v10
+; GFX11-NEXT: v_and_b32_e32 v3, v3, v11
+; GFX11-NEXT: v_and_b32_e32 v4, v4, v12
+; GFX11-NEXT: v_and_b32_e32 v5, v5, v13
+; GFX11-NEXT: v_and_b32_e32 v6, v6, v14
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX11-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX11-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v8, v15, v31
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_and_b32_e32 v7, v7, v8
+; GFX11-NEXT: v_and_b32_e32 v3, v3, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_and_v16i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: scratch_load_b32 v31, off, s32
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v16
+; GFX12-NEXT: v_and_b32_e32 v8, v8, v24
+; GFX12-NEXT: v_and_b32_e32 v1, v1, v17
+; GFX12-NEXT: v_and_b32_e32 v2, v2, v18
+; GFX12-NEXT: v_and_b32_e32 v3, v3, v19
+; GFX12-NEXT: v_and_b32_e32 v4, v4, v20
+; GFX12-NEXT: v_and_b32_e32 v5, v5, v21
+; GFX12-NEXT: v_and_b32_e32 v6, v6, v22
+; GFX12-NEXT: v_and_b32_e32 v7, v7, v23
+; GFX12-NEXT: v_and_b32_e32 v9, v9, v25
+; GFX12-NEXT: v_and_b32_e32 v10, v10, v26
+; GFX12-NEXT: v_and_b32_e32 v11, v11, v27
+; GFX12-NEXT: v_and_b32_e32 v12, v12, v28
+; GFX12-NEXT: v_and_b32_e32 v13, v13, v29
+; GFX12-NEXT: v_and_b32_e32 v14, v14, v30
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v8
+; GFX12-NEXT: v_and_b32_e32 v1, v1, v9
+; GFX12-NEXT: v_and_b32_e32 v2, v2, v10
+; GFX12-NEXT: v_and_b32_e32 v3, v3, v11
+; GFX12-NEXT: v_and_b32_e32 v4, v4, v12
+; GFX12-NEXT: v_and_b32_e32 v5, v5, v13
+; GFX12-NEXT: v_and_b32_e32 v6, v6, v14
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX12-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v8, v15, v31
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_and_b32_e32 v7, v7, v8
+; GFX12-NEXT: v_and_b32_e32 v3, v3, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.and.v16i64(<16 x i64> %v)
+ ret i64 %res
+}
+
+declare i8 @llvm.vector.reduce.and.v2i8(<2 x i8>)
+declare i8 @llvm.vector.reduce.and.v3i8(<3 x i8>)
+declare i8 @llvm.vector.reduce.and.v4i8(<4 x i8>)
+declare i8 @llvm.vector.reduce.and.v8i8(<8 x i8>)
+declare i8 @llvm.vector.reduce.and.v16i8(<16 x i8>)
+
+declare i16 @llvm.vector.reduce.and.v2i16(<2 x i16>)
+declare i16 @llvm.vector.reduce.and.v3i16(<3 x i16>)
+declare i16 @llvm.vector.reduce.and.v4i16(<4 x i16>)
+declare i16 @llvm.vector.reduce.and.v8i16(<8 x i16>)
+declare i16 @llvm.vector.reduce.and.v16i16(<16 x i16>)
+
+declare i32 @llvm.vector.reduce.and.v2i32(<2 x i32>)
+declare i32 @llvm.vector.reduce.and.v3i32(<3 x i32>)
+declare i32 @llvm.vector.reduce.and.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.and.v8i32(<8 x i32>)
+declare i32 @llvm.vector.reduce.and.v16i32(<16 x i32>)
+
+declare i64 @llvm.vector.reduce.and.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.and.v3i64(<3 x i64>)
+declare i64 @llvm.vector.reduce.and.v4i64(<4 x i64>)
+declare i64 @llvm.vector.reduce.and.v8i64(<8 x i64>)
+declare i64 @llvm.vector.reduce.and.v16i64(<16 x i64>)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/fadd.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/fadd.ll
new file mode 100644
index 0000000000000..3f34d009ed2b7
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/fadd.ll
@@ -0,0 +1,1461 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx700 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX7 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx801 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX8 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx942 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX9 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX10 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX12 %s
+
+define half @test_vector_reduce_fadd_v2half(half %sp, <2 x half> %v) {
+; GFX7-LABEL: test_vector_reduce_fadd_v2half:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v2
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fadd_v2half:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX8-NEXT: v_add_f16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fadd_v2half:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX9-NEXT: v_add_f16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fadd_v2half:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX10-NEXT: v_add_f16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fadd_v2half:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fadd_v2half:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call half @llvm.vector.reduce.fadd.v2half(half %sp, <2 x half> %v)
+ ret half %res
+}
+
+define half @test_vector_reduce_fadd_v3half(half %sp, <3 x half> %v) {
+; GFX7-LABEL: test_vector_reduce_fadd_v3half:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v2
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v3
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fadd_v3half:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX8-NEXT: v_add_f16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_e32 v0, v0, v2
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fadd_v3half:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX9-NEXT: v_add_f16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_add_f16_e32 v0, v0, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fadd_v3half:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX10-NEXT: v_add_f16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_add_f16_e32 v0, v0, v2
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fadd_v3half:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX11-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_add_f16_e32 v0, v0, v3
+; GFX11-NEXT: v_add_f16_e32 v0, v0, v2
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fadd_v3half:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX12-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_add_f16_e32 v0, v0, v3
+; GFX12-NEXT: v_add_f16_e32 v0, v0, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call half @llvm.vector.reduce.fadd.v3half(half %sp, <3 x half> %v)
+ ret half %res
+}
+
+define half @test_vector_reduce_fadd_v4half(half %sp, <4 x half> %v) {
+; GFX7-LABEL: test_vector_reduce_fadd_v4half:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v2
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v3
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fadd_v4half:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX8-NEXT: v_add_f16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_e32 v0, v0, v2
+; GFX8-NEXT: v_add_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fadd_v4half:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX9-NEXT: v_add_f16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_add_f16_e32 v0, v0, v2
+; GFX9-NEXT: v_add_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fadd_v4half:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX10-NEXT: v_add_f16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_add_f16_e32 v0, v0, v2
+; GFX10-NEXT: v_add_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fadd_v4half:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-NEXT: v_add_f16_e32 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fadd_v4half:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX12-NEXT: v_add_f16_e32 v0, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call half @llvm.vector.reduce.fadd.v4half(half %sp, <4 x half> %v)
+ ret half %res
+}
+
+define half @test_vector_reduce_fadd_v8half(half %sp, <8 x half> %v) {
+; GFX7-LABEL: test_vector_reduce_fadd_v8half:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v2
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v3
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v5
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v6
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v7
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v8
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fadd_v8half:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX8-NEXT: v_add_f16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_e32 v0, v0, v2
+; GFX8-NEXT: v_add_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_e32 v0, v0, v3
+; GFX8-NEXT: v_add_f16_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_e32 v0, v0, v4
+; GFX8-NEXT: v_add_f16_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fadd_v8half:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX9-NEXT: v_add_f16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_add_f16_e32 v0, v0, v2
+; GFX9-NEXT: v_add_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_add_f16_e32 v0, v0, v3
+; GFX9-NEXT: v_add_f16_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_add_f16_e32 v0, v0, v4
+; GFX9-NEXT: v_add_f16_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fadd_v8half:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX10-NEXT: v_add_f16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_add_f16_e32 v0, v0, v2
+; GFX10-NEXT: v_add_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_add_f16_e32 v0, v0, v3
+; GFX10-NEXT: v_add_f16_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_add_f16_e32 v0, v0, v4
+; GFX10-NEXT: v_add_f16_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fadd_v8half:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-NEXT: v_add_f16_e32 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v3
+; GFX11-NEXT: v_add_f16_e32 v0, v0, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v4
+; GFX11-NEXT: v_add_f16_e32 v0, v0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fadd_v8half:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX12-NEXT: v_add_f16_e32 v0, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v3
+; GFX12-NEXT: v_add_f16_e32 v0, v0, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v4
+; GFX12-NEXT: v_add_f16_e32 v0, v0, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call half @llvm.vector.reduce.fadd.v8half(half %sp, <8 x half> %v)
+ ret half %res
+}
+
+define half @test_vector_reduce_fadd_v16half(half %sp, <16 x half> %v) {
+; GFX7-LABEL: test_vector_reduce_fadd_v16half:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v2
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v3
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v5
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v6
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v7
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v8
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v9
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v10
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v11
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v12
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v13
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v14
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v15
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v16
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fadd_v16half:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX8-NEXT: v_add_f16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_e32 v0, v0, v2
+; GFX8-NEXT: v_add_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_e32 v0, v0, v3
+; GFX8-NEXT: v_add_f16_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_e32 v0, v0, v4
+; GFX8-NEXT: v_add_f16_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_e32 v0, v0, v5
+; GFX8-NEXT: v_add_f16_sdwa v0, v0, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_e32 v0, v0, v6
+; GFX8-NEXT: v_add_f16_sdwa v0, v0, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_e32 v0, v0, v7
+; GFX8-NEXT: v_add_f16_sdwa v0, v0, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_e32 v0, v0, v8
+; GFX8-NEXT: v_add_f16_sdwa v0, v0, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fadd_v16half:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX9-NEXT: v_add_f16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_add_f16_e32 v0, v0, v2
+; GFX9-NEXT: v_add_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_add_f16_e32 v0, v0, v3
+; GFX9-NEXT: v_add_f16_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_add_f16_e32 v0, v0, v4
+; GFX9-NEXT: v_add_f16_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_add_f16_e32 v0, v0, v5
+; GFX9-NEXT: v_add_f16_sdwa v0, v0, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_add_f16_e32 v0, v0, v6
+; GFX9-NEXT: v_add_f16_sdwa v0, v0, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_add_f16_e32 v0, v0, v7
+; GFX9-NEXT: v_add_f16_sdwa v0, v0, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_add_f16_e32 v0, v0, v8
+; GFX9-NEXT: v_add_f16_sdwa v0, v0, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fadd_v16half:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX10-NEXT: v_add_f16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_add_f16_e32 v0, v0, v2
+; GFX10-NEXT: v_add_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_add_f16_e32 v0, v0, v3
+; GFX10-NEXT: v_add_f16_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_add_f16_e32 v0, v0, v4
+; GFX10-NEXT: v_add_f16_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_add_f16_e32 v0, v0, v5
+; GFX10-NEXT: v_add_f16_sdwa v0, v0, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_add_f16_e32 v0, v0, v6
+; GFX10-NEXT: v_add_f16_sdwa v0, v0, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_add_f16_e32 v0, v0, v7
+; GFX10-NEXT: v_add_f16_sdwa v0, v0, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_add_f16_e32 v0, v0, v8
+; GFX10-NEXT: v_add_f16_sdwa v0, v0, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fadd_v16half:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-NEXT: v_add_f16_e32 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v3
+; GFX11-NEXT: v_add_f16_e32 v0, v0, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v4
+; GFX11-NEXT: v_add_f16_e32 v0, v0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v5
+; GFX11-NEXT: v_add_f16_e32 v0, v0, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v6
+; GFX11-NEXT: v_add_f16_e32 v0, v0, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v7
+; GFX11-NEXT: v_add_f16_e32 v0, v0, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v8
+; GFX11-NEXT: v_add_f16_e32 v0, v0, v8
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fadd_v16half:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX12-NEXT: v_add_f16_e32 v0, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v3
+; GFX12-NEXT: v_add_f16_e32 v0, v0, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v4
+; GFX12-NEXT: v_add_f16_e32 v0, v0, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v5
+; GFX12-NEXT: v_add_f16_e32 v0, v0, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v6
+; GFX12-NEXT: v_add_f16_e32 v0, v0, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v7
+; GFX12-NEXT: v_add_f16_e32 v0, v0, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v8
+; GFX12-NEXT: v_add_f16_e32 v0, v0, v8
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_add_f16_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call half @llvm.vector.reduce.fadd.v16half(half %sp, <16 x half> %v)
+ ret half %res
+}
+
+define float @test_vector_reduce_fadd_v2float(float %sp, <2 x float> %v) {
+; GFX7-LABEL: test_vector_reduce_fadd_v2float:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v2
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fadd_v2float:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX8-NEXT: v_add_f32_e32 v0, v0, v2
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fadd_v2float:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fadd_v2float:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX10-NEXT: v_add_f32_e32 v0, v0, v2
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fadd_v2float:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_f32_e32 v0, v0, v2
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fadd_v2float:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_add_f32_e32 v0, v0, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call float @llvm.vector.reduce.fadd.v2float(float %sp, <2 x float> %v)
+ ret float %res
+}
+
+define float @test_vector_reduce_fadd_v3float(float %sp, <3 x float> %v) {
+; GFX7-LABEL: test_vector_reduce_fadd_v3float:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v2
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v3
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fadd_v3float:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX8-NEXT: v_add_f32_e32 v0, v0, v2
+; GFX8-NEXT: v_add_f32_e32 v0, v0, v3
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fadd_v3float:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v2
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v3
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fadd_v3float:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX10-NEXT: v_add_f32_e32 v0, v0, v2
+; GFX10-NEXT: v_add_f32_e32 v0, v0, v3
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fadd_v3float:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_add_f32_e32 v0, v0, v2
+; GFX11-NEXT: v_add_f32_e32 v0, v0, v3
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fadd_v3float:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_add_f32_e32 v0, v0, v2
+; GFX12-NEXT: v_add_f32_e32 v0, v0, v3
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call float @llvm.vector.reduce.fadd.v3float(float %sp, <3 x float> %v)
+ ret float %res
+}
+
+define float @test_vector_reduce_fadd_v4float(float %sp, <4 x float> %v) {
+; GFX7-LABEL: test_vector_reduce_fadd_v4float:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v2
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v3
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v4
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fadd_v4float:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX8-NEXT: v_add_f32_e32 v0, v0, v2
+; GFX8-NEXT: v_add_f32_e32 v0, v0, v3
+; GFX8-NEXT: v_add_f32_e32 v0, v0, v4
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fadd_v4float:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v2
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v3
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fadd_v4float:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX10-NEXT: v_add_f32_e32 v0, v0, v2
+; GFX10-NEXT: v_add_f32_e32 v0, v0, v3
+; GFX10-NEXT: v_add_f32_e32 v0, v0, v4
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fadd_v4float:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_add_f32_e32 v0, v0, v2
+; GFX11-NEXT: v_add_f32_e32 v0, v0, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_f32_e32 v0, v0, v4
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fadd_v4float:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_add_f32_e32 v0, v0, v2
+; GFX12-NEXT: v_add_f32_e32 v0, v0, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_add_f32_e32 v0, v0, v4
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call float @llvm.vector.reduce.fadd.v4float(float %sp, <4 x float> %v)
+ ret float %res
+}
+
+define float @test_vector_reduce_fadd_v8float(float %sp, <8 x float> %v) {
+; GFX7-LABEL: test_vector_reduce_fadd_v8float:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v2
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v3
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v4
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v5
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v6
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v7
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v8
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fadd_v8float:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX8-NEXT: v_add_f32_e32 v0, v0, v2
+; GFX8-NEXT: v_add_f32_e32 v0, v0, v3
+; GFX8-NEXT: v_add_f32_e32 v0, v0, v4
+; GFX8-NEXT: v_add_f32_e32 v0, v0, v5
+; GFX8-NEXT: v_add_f32_e32 v0, v0, v6
+; GFX8-NEXT: v_add_f32_e32 v0, v0, v7
+; GFX8-NEXT: v_add_f32_e32 v0, v0, v8
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fadd_v8float:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v2
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v3
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v4
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v5
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v6
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v7
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v8
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fadd_v8float:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX10-NEXT: v_add_f32_e32 v0, v0, v2
+; GFX10-NEXT: v_add_f32_e32 v0, v0, v3
+; GFX10-NEXT: v_add_f32_e32 v0, v0, v4
+; GFX10-NEXT: v_add_f32_e32 v0, v0, v5
+; GFX10-NEXT: v_add_f32_e32 v0, v0, v6
+; GFX10-NEXT: v_add_f32_e32 v0, v0, v7
+; GFX10-NEXT: v_add_f32_e32 v0, v0, v8
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fadd_v8float:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_add_f32_e32 v0, v0, v2
+; GFX11-NEXT: v_add_f32_e32 v0, v0, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_add_f32_e32 v0, v0, v4
+; GFX11-NEXT: v_add_f32_e32 v0, v0, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_add_f32_e32 v0, v0, v6
+; GFX11-NEXT: v_add_f32_e32 v0, v0, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_f32_e32 v0, v0, v8
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fadd_v8float:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_add_f32_e32 v0, v0, v2
+; GFX12-NEXT: v_add_f32_e32 v0, v0, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_add_f32_e32 v0, v0, v4
+; GFX12-NEXT: v_add_f32_e32 v0, v0, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_add_f32_e32 v0, v0, v6
+; GFX12-NEXT: v_add_f32_e32 v0, v0, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_add_f32_e32 v0, v0, v8
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call float @llvm.vector.reduce.fadd.v8float(float %sp, <8 x float> %v)
+ ret float %res
+}
+
+define float @test_vector_reduce_fadd_v16float(float %sp, <16 x float> %v) {
+; GFX7-LABEL: test_vector_reduce_fadd_v16float:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v2
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v3
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v4
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v5
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v6
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v7
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v8
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v9
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v10
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v11
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v12
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v13
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v14
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v15
+; GFX7-NEXT: v_add_f32_e32 v0, v0, v16
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fadd_v16float:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX8-NEXT: v_add_f32_e32 v0, v0, v2
+; GFX8-NEXT: v_add_f32_e32 v0, v0, v3
+; GFX8-NEXT: v_add_f32_e32 v0, v0, v4
+; GFX8-NEXT: v_add_f32_e32 v0, v0, v5
+; GFX8-NEXT: v_add_f32_e32 v0, v0, v6
+; GFX8-NEXT: v_add_f32_e32 v0, v0, v7
+; GFX8-NEXT: v_add_f32_e32 v0, v0, v8
+; GFX8-NEXT: v_add_f32_e32 v0, v0, v9
+; GFX8-NEXT: v_add_f32_e32 v0, v0, v10
+; GFX8-NEXT: v_add_f32_e32 v0, v0, v11
+; GFX8-NEXT: v_add_f32_e32 v0, v0, v12
+; GFX8-NEXT: v_add_f32_e32 v0, v0, v13
+; GFX8-NEXT: v_add_f32_e32 v0, v0, v14
+; GFX8-NEXT: v_add_f32_e32 v0, v0, v15
+; GFX8-NEXT: v_add_f32_e32 v0, v0, v16
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fadd_v16float:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v2
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v3
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v4
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v5
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v6
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v7
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v8
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v9
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v10
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v11
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v12
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v13
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v14
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v15
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v16
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fadd_v16float:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX10-NEXT: v_add_f32_e32 v0, v0, v2
+; GFX10-NEXT: v_add_f32_e32 v0, v0, v3
+; GFX10-NEXT: v_add_f32_e32 v0, v0, v4
+; GFX10-NEXT: v_add_f32_e32 v0, v0, v5
+; GFX10-NEXT: v_add_f32_e32 v0, v0, v6
+; GFX10-NEXT: v_add_f32_e32 v0, v0, v7
+; GFX10-NEXT: v_add_f32_e32 v0, v0, v8
+; GFX10-NEXT: v_add_f32_e32 v0, v0, v9
+; GFX10-NEXT: v_add_f32_e32 v0, v0, v10
+; GFX10-NEXT: v_add_f32_e32 v0, v0, v11
+; GFX10-NEXT: v_add_f32_e32 v0, v0, v12
+; GFX10-NEXT: v_add_f32_e32 v0, v0, v13
+; GFX10-NEXT: v_add_f32_e32 v0, v0, v14
+; GFX10-NEXT: v_add_f32_e32 v0, v0, v15
+; GFX10-NEXT: v_add_f32_e32 v0, v0, v16
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fadd_v16float:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_add_f32_e32 v0, v0, v2
+; GFX11-NEXT: v_add_f32_e32 v0, v0, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_add_f32_e32 v0, v0, v4
+; GFX11-NEXT: v_add_f32_e32 v0, v0, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_add_f32_e32 v0, v0, v6
+; GFX11-NEXT: v_add_f32_e32 v0, v0, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_add_f32_e32 v0, v0, v8
+; GFX11-NEXT: v_add_f32_e32 v0, v0, v9
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_add_f32_e32 v0, v0, v10
+; GFX11-NEXT: v_add_f32_e32 v0, v0, v11
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_add_f32_e32 v0, v0, v12
+; GFX11-NEXT: v_add_f32_e32 v0, v0, v13
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_add_f32_e32 v0, v0, v14
+; GFX11-NEXT: v_add_f32_e32 v0, v0, v15
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_f32_e32 v0, v0, v16
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fadd_v16float:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_add_f32_e32 v0, v0, v2
+; GFX12-NEXT: v_add_f32_e32 v0, v0, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_add_f32_e32 v0, v0, v4
+; GFX12-NEXT: v_add_f32_e32 v0, v0, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_add_f32_e32 v0, v0, v6
+; GFX12-NEXT: v_add_f32_e32 v0, v0, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_add_f32_e32 v0, v0, v8
+; GFX12-NEXT: v_add_f32_e32 v0, v0, v9
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_add_f32_e32 v0, v0, v10
+; GFX12-NEXT: v_add_f32_e32 v0, v0, v11
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_add_f32_e32 v0, v0, v12
+; GFX12-NEXT: v_add_f32_e32 v0, v0, v13
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_add_f32_e32 v0, v0, v14
+; GFX12-NEXT: v_add_f32_e32 v0, v0, v15
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_add_f32_e32 v0, v0, v16
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call float @llvm.vector.reduce.fadd.v16float(float %sp, <16 x float> %v)
+ ret float %res
+}
+
+
+define double @test_vector_reduce_fadd_v2double(double %sp, <2 x double> %v) {
+; GFX7-LABEL: test_vector_reduce_fadd_v2double:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_add_f64 v[0:1], v[0:1], v[2:3]
+; GFX7-NEXT: v_add_f64 v[0:1], v[0:1], v[4:5]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fadd_v2double:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_f64 v[0:1], v[0:1], v[2:3]
+; GFX8-NEXT: v_add_f64 v[0:1], v[0:1], v[4:5]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fadd_v2double:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], v[2:3]
+; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], v[4:5]
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fadd_v2double:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], v[2:3]
+; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], v[4:5]
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fadd_v2double:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], v[2:3]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], v[4:5]
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fadd_v2double:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_add_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_add_f64_e32 v[0:1], v[0:1], v[4:5]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call double @llvm.vector.reduce.fadd.v2double(double %sp, <2 x double> %v)
+ ret double %res
+}
+
+define double @test_vector_reduce_fadd_v3double(double %sp, <3 x double> %v) {
+; GFX7-LABEL: test_vector_reduce_fadd_v3double:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_add_f64 v[0:1], v[0:1], v[2:3]
+; GFX7-NEXT: v_add_f64 v[0:1], v[0:1], v[4:5]
+; GFX7-NEXT: v_add_f64 v[0:1], v[0:1], v[6:7]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fadd_v3double:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_f64 v[0:1], v[0:1], v[2:3]
+; GFX8-NEXT: v_add_f64 v[0:1], v[0:1], v[4:5]
+; GFX8-NEXT: v_add_f64 v[0:1], v[0:1], v[6:7]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fadd_v3double:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], v[2:3]
+; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], v[4:5]
+; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], v[6:7]
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fadd_v3double:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], v[2:3]
+; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], v[4:5]
+; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], v[6:7]
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fadd_v3double:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], v[2:3]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], v[4:5]
+; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], v[6:7]
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fadd_v3double:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_add_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_add_f64_e32 v[0:1], v[0:1], v[4:5]
+; GFX12-NEXT: v_add_f64_e32 v[0:1], v[0:1], v[6:7]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call double @llvm.vector.reduce.fadd.v3double(double %sp, <3 x double> %v)
+ ret double %res
+}
+
+define double @test_vector_reduce_fadd_v4double(double %sp, <4 x double> %v) {
+; GFX7-LABEL: test_vector_reduce_fadd_v4double:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_add_f64 v[0:1], v[0:1], v[2:3]
+; GFX7-NEXT: v_add_f64 v[0:1], v[0:1], v[4:5]
+; GFX7-NEXT: v_add_f64 v[0:1], v[0:1], v[6:7]
+; GFX7-NEXT: v_add_f64 v[0:1], v[0:1], v[8:9]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fadd_v4double:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_f64 v[0:1], v[0:1], v[2:3]
+; GFX8-NEXT: v_add_f64 v[0:1], v[0:1], v[4:5]
+; GFX8-NEXT: v_add_f64 v[0:1], v[0:1], v[6:7]
+; GFX8-NEXT: v_add_f64 v[0:1], v[0:1], v[8:9]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fadd_v4double:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], v[2:3]
+; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], v[4:5]
+; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], v[6:7]
+; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], v[8:9]
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fadd_v4double:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], v[2:3]
+; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], v[4:5]
+; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], v[6:7]
+; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], v[8:9]
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fadd_v4double:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], v[2:3]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], v[4:5]
+; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], v[6:7]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], v[8:9]
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fadd_v4double:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_add_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_add_f64_e32 v[0:1], v[0:1], v[4:5]
+; GFX12-NEXT: v_add_f64_e32 v[0:1], v[0:1], v[6:7]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_add_f64_e32 v[0:1], v[0:1], v[8:9]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call double @llvm.vector.reduce.fadd.v4double(double %sp, <4 x double> %v)
+ ret double %res
+}
+
+define double @test_vector_reduce_fadd_v8double(double %sp, <8 x double> %v) {
+; GFX7-LABEL: test_vector_reduce_fadd_v8double:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_add_f64 v[0:1], v[0:1], v[2:3]
+; GFX7-NEXT: v_add_f64 v[0:1], v[0:1], v[4:5]
+; GFX7-NEXT: v_add_f64 v[0:1], v[0:1], v[6:7]
+; GFX7-NEXT: v_add_f64 v[0:1], v[0:1], v[8:9]
+; GFX7-NEXT: v_add_f64 v[0:1], v[0:1], v[10:11]
+; GFX7-NEXT: v_add_f64 v[0:1], v[0:1], v[12:13]
+; GFX7-NEXT: v_add_f64 v[0:1], v[0:1], v[14:15]
+; GFX7-NEXT: v_add_f64 v[0:1], v[0:1], v[16:17]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fadd_v8double:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_f64 v[0:1], v[0:1], v[2:3]
+; GFX8-NEXT: v_add_f64 v[0:1], v[0:1], v[4:5]
+; GFX8-NEXT: v_add_f64 v[0:1], v[0:1], v[6:7]
+; GFX8-NEXT: v_add_f64 v[0:1], v[0:1], v[8:9]
+; GFX8-NEXT: v_add_f64 v[0:1], v[0:1], v[10:11]
+; GFX8-NEXT: v_add_f64 v[0:1], v[0:1], v[12:13]
+; GFX8-NEXT: v_add_f64 v[0:1], v[0:1], v[14:15]
+; GFX8-NEXT: v_add_f64 v[0:1], v[0:1], v[16:17]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fadd_v8double:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], v[2:3]
+; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], v[4:5]
+; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], v[6:7]
+; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], v[8:9]
+; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], v[10:11]
+; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], v[12:13]
+; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], v[14:15]
+; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], v[16:17]
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fadd_v8double:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], v[2:3]
+; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], v[4:5]
+; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], v[6:7]
+; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], v[8:9]
+; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], v[10:11]
+; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], v[12:13]
+; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], v[14:15]
+; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], v[16:17]
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fadd_v8double:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], v[2:3]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], v[4:5]
+; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], v[6:7]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], v[8:9]
+; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], v[10:11]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], v[12:13]
+; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], v[14:15]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], v[16:17]
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fadd_v8double:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_add_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_add_f64_e32 v[0:1], v[0:1], v[4:5]
+; GFX12-NEXT: v_add_f64_e32 v[0:1], v[0:1], v[6:7]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_add_f64_e32 v[0:1], v[0:1], v[8:9]
+; GFX12-NEXT: v_add_f64_e32 v[0:1], v[0:1], v[10:11]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_add_f64_e32 v[0:1], v[0:1], v[12:13]
+; GFX12-NEXT: v_add_f64_e32 v[0:1], v[0:1], v[14:15]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_add_f64_e32 v[0:1], v[0:1], v[16:17]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call double @llvm.vector.reduce.fadd.v8double(double %sp, <8 x double> %v)
+ ret double %res
+}
+
+define double @test_vector_reduce_fadd_v16double(double %sp, <16 x double> %v) {
+; GFX7-LABEL: test_vector_reduce_fadd_v16double:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_add_f64 v[0:1], v[0:1], v[2:3]
+; GFX7-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX7-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:4
+; GFX7-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:8
+; GFX7-NEXT: v_add_f64 v[0:1], v[0:1], v[4:5]
+; GFX7-NEXT: v_add_f64 v[0:1], v[0:1], v[6:7]
+; GFX7-NEXT: v_add_f64 v[0:1], v[0:1], v[8:9]
+; GFX7-NEXT: v_add_f64 v[0:1], v[0:1], v[10:11]
+; GFX7-NEXT: v_add_f64 v[0:1], v[0:1], v[12:13]
+; GFX7-NEXT: v_add_f64 v[0:1], v[0:1], v[14:15]
+; GFX7-NEXT: v_add_f64 v[0:1], v[0:1], v[16:17]
+; GFX7-NEXT: v_add_f64 v[0:1], v[0:1], v[18:19]
+; GFX7-NEXT: v_add_f64 v[0:1], v[0:1], v[20:21]
+; GFX7-NEXT: v_add_f64 v[0:1], v[0:1], v[22:23]
+; GFX7-NEXT: v_add_f64 v[0:1], v[0:1], v[24:25]
+; GFX7-NEXT: v_add_f64 v[0:1], v[0:1], v[26:27]
+; GFX7-NEXT: v_add_f64 v[0:1], v[0:1], v[28:29]
+; GFX7-NEXT: s_waitcnt vmcnt(2)
+; GFX7-NEXT: v_add_f64 v[0:1], v[0:1], v[30:31]
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_add_f64 v[0:1], v[0:1], v[2:3]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fadd_v16double:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_f64 v[0:1], v[0:1], v[2:3]
+; GFX8-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX8-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:4
+; GFX8-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:8
+; GFX8-NEXT: v_add_f64 v[0:1], v[0:1], v[4:5]
+; GFX8-NEXT: v_add_f64 v[0:1], v[0:1], v[6:7]
+; GFX8-NEXT: v_add_f64 v[0:1], v[0:1], v[8:9]
+; GFX8-NEXT: v_add_f64 v[0:1], v[0:1], v[10:11]
+; GFX8-NEXT: v_add_f64 v[0:1], v[0:1], v[12:13]
+; GFX8-NEXT: v_add_f64 v[0:1], v[0:1], v[14:15]
+; GFX8-NEXT: v_add_f64 v[0:1], v[0:1], v[16:17]
+; GFX8-NEXT: v_add_f64 v[0:1], v[0:1], v[18:19]
+; GFX8-NEXT: v_add_f64 v[0:1], v[0:1], v[20:21]
+; GFX8-NEXT: v_add_f64 v[0:1], v[0:1], v[22:23]
+; GFX8-NEXT: v_add_f64 v[0:1], v[0:1], v[24:25]
+; GFX8-NEXT: v_add_f64 v[0:1], v[0:1], v[26:27]
+; GFX8-NEXT: v_add_f64 v[0:1], v[0:1], v[28:29]
+; GFX8-NEXT: s_waitcnt vmcnt(2)
+; GFX8-NEXT: v_add_f64 v[0:1], v[0:1], v[30:31]
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_add_f64 v[0:1], v[0:1], v[2:3]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fadd_v16double:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: scratch_load_dword v31, off, s32
+; GFX9-NEXT: scratch_load_dword v32, off, s32 offset:4
+; GFX9-NEXT: scratch_load_dword v33, off, s32 offset:8
+; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], v[2:3]
+; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], v[4:5]
+; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], v[6:7]
+; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], v[8:9]
+; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], v[10:11]
+; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], v[12:13]
+; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], v[14:15]
+; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], v[16:17]
+; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], v[18:19]
+; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], v[20:21]
+; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], v[22:23]
+; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], v[24:25]
+; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], v[26:27]
+; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], v[28:29]
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], v[30:31]
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], v[32:33]
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fadd_v16double:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], v[2:3]
+; GFX10-NEXT: s_clause 0x1
+; GFX10-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:4
+; GFX10-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:8
+; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], v[4:5]
+; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], v[6:7]
+; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], v[8:9]
+; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], v[10:11]
+; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], v[12:13]
+; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], v[14:15]
+; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], v[16:17]
+; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], v[18:19]
+; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], v[20:21]
+; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], v[22:23]
+; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], v[24:25]
+; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], v[26:27]
+; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], v[28:29]
+; GFX10-NEXT: s_waitcnt vmcnt(2)
+; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], v[30:31]
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_add_f64 v[0:1], v[0:1], v[2:3]
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fadd_v16double:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], v[2:3]
+; GFX11-NEXT: s_clause 0x2
+; GFX11-NEXT: scratch_load_b32 v31, off, s32
+; GFX11-NEXT: scratch_load_b32 v2, off, s32 offset:4
+; GFX11-NEXT: scratch_load_b32 v3, off, s32 offset:8
+; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], v[4:5]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], v[6:7]
+; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], v[8:9]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], v[10:11]
+; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], v[12:13]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], v[14:15]
+; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], v[16:17]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], v[18:19]
+; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], v[20:21]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], v[22:23]
+; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], v[24:25]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], v[26:27]
+; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], v[28:29]
+; GFX11-NEXT: s_waitcnt vmcnt(2)
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], v[30:31]
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], v[2:3]
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fadd_v16double:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_add_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_clause 0x2
+; GFX12-NEXT: scratch_load_b32 v31, off, s32
+; GFX12-NEXT: scratch_load_b32 v2, off, s32 offset:4
+; GFX12-NEXT: scratch_load_b32 v3, off, s32 offset:8
+; GFX12-NEXT: v_add_f64_e32 v[0:1], v[0:1], v[4:5]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_add_f64_e32 v[0:1], v[0:1], v[6:7]
+; GFX12-NEXT: v_add_f64_e32 v[0:1], v[0:1], v[8:9]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_add_f64_e32 v[0:1], v[0:1], v[10:11]
+; GFX12-NEXT: v_add_f64_e32 v[0:1], v[0:1], v[12:13]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_add_f64_e32 v[0:1], v[0:1], v[14:15]
+; GFX12-NEXT: v_add_f64_e32 v[0:1], v[0:1], v[16:17]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_add_f64_e32 v[0:1], v[0:1], v[18:19]
+; GFX12-NEXT: v_add_f64_e32 v[0:1], v[0:1], v[20:21]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_add_f64_e32 v[0:1], v[0:1], v[22:23]
+; GFX12-NEXT: v_add_f64_e32 v[0:1], v[0:1], v[24:25]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_add_f64_e32 v[0:1], v[0:1], v[26:27]
+; GFX12-NEXT: v_add_f64_e32 v[0:1], v[0:1], v[28:29]
+; GFX12-NEXT: s_wait_loadcnt 0x2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_add_f64_e32 v[0:1], v[0:1], v[30:31]
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_add_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call double @llvm.vector.reduce.fadd.v16double(double %sp, <16 x double> %v)
+ ret double %res
+}
+
+declare half @llvm.vector.reduce.fadd.v2half(half, <2 x half>)
+declare half @llvm.vector.reduce.fadd.v3half(half, <3 x half>)
+declare half @llvm.vector.reduce.fadd.v4half(half, <4 x half>)
+declare half @llvm.vector.reduce.fadd.v8half(half, <8 x half>)
+declare half @llvm.vector.reduce.fadd.v16half(half, <16 x half>)
+declare float @llvm.vector.reduce.fadd.v2float(float, <2 x float>)
+declare float @llvm.vector.reduce.fadd.v3float(float, <3 x float>)
+declare float @llvm.vector.reduce.fadd.v4float(float, <4 x float>)
+declare float @llvm.vector.reduce.fadd.v8float(float, <8 x float>)
+declare float @llvm.vector.reduce.fadd.v16float(float, <16 x float>)
+declare double @llvm.vector.reduce.fadd.v2double(double, <2 x double>)
+declare double @llvm.vector.reduce.fadd.v3double(double, <3 x double>)
+declare double @llvm.vector.reduce.fadd.v4double(double, <4 x double>)
+declare double @llvm.vector.reduce.fadd.v8double(double, <8 x double>)
+declare double @llvm.vector.reduce.fadd.v16double(double, <16 x double>)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/fmax.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/fmax.ll
new file mode 100644
index 0000000000000..65bc2804c7510
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/fmax.ll
@@ -0,0 +1,1806 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx700 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX7 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx801 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX8 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx942 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX9 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX10 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX12 %s
+
+define half @test_vector_reduce_fmax_v2half(<2 x half> %v) {
+; GFX7-LABEL: test_vector_reduce_fmax_v2half:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX7-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmax_v2half:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_f16_e32 v1, v0, v0
+; GFX8-NEXT: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_e32 v0, v1, v0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmax_v2half:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f16_e32 v1, v0, v0
+; GFX9-NEXT: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_e32 v0, v1, v0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmax_v2half:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f16_e32 v1, v0, v0
+; GFX10-NEXT: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v0, v1, v0
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmax_v2half:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmax_v2half:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_max_num_f16_e32 v1, v1, v1
+; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call half @llvm.vector.reduce.fmax.v2half(<2 x half> %v)
+ ret half %res
+}
+
+define half @test_vector_reduce_fmax_v3half(<3 x half> %v) {
+; GFX7-LABEL: test_vector_reduce_fmax_v3half:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX7-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v2
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmax_v3half:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_f16_e32 v2, v0, v0
+; GFX8-NEXT: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_e32 v0, v2, v0
+; GFX8-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX8-NEXT: v_max_f16_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmax_v3half:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f16_e32 v2, v0, v0
+; GFX9-NEXT: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_e32 v0, v2, v0
+; GFX9-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX9-NEXT: v_max_f16_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmax_v3half:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f16_e32 v2, v0, v0
+; GFX10-NEXT: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX10-NEXT: v_max_f16_e32 v0, v2, v0
+; GFX10-NEXT: v_max_f16_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmax_v3half:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX11-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmax_v3half:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v0
+; GFX12-NEXT: v_max_num_f16_e32 v1, v1, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call half @llvm.vector.reduce.fmax.v3half(<3 x half> %v)
+ ret half %res
+}
+
+define half @test_vector_reduce_fmax_v4half(<4 x half> %v) {
+; GFX7-LABEL: test_vector_reduce_fmax_v4half:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v2
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX7-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_max_f32_e32 v1, v2, v3
+; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX7-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmax_v4half:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_f16_e32 v2, v0, v0
+; GFX8-NEXT: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_e32 v0, v2, v0
+; GFX8-NEXT: v_max_f16_e32 v2, v1, v1
+; GFX8-NEXT: v_max_f16_sdwa v1, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_e32 v1, v2, v1
+; GFX8-NEXT: v_max_f16_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmax_v4half:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f16_e32 v2, v0, v0
+; GFX9-NEXT: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_e32 v0, v2, v0
+; GFX9-NEXT: v_max_f16_e32 v2, v1, v1
+; GFX9-NEXT: v_max_f16_sdwa v1, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_e32 v1, v2, v1
+; GFX9-NEXT: v_max_f16_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmax_v4half:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f16_e32 v2, v0, v0
+; GFX10-NEXT: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v3, v1, v1
+; GFX10-NEXT: v_max_f16_sdwa v1, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v0, v2, v0
+; GFX10-NEXT: v_max_f16_e32 v1, v3, v1
+; GFX10-NEXT: v_max_f16_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmax_v4half:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX11-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v2
+; GFX11-NEXT: v_max_f16_e32 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmax_v4half:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX12-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v0
+; GFX12-NEXT: v_max_num_f16_e32 v1, v1, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-NEXT: v_max_num_f16_e32 v3, v3, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v2
+; GFX12-NEXT: v_max_num_f16_e32 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call half @llvm.vector.reduce.fmax.v4half(<4 x half> %v)
+ ret half %res
+}
+
+define half @test_vector_reduce_fmax_v8half(<8 x half> %v) {
+; GFX7-LABEL: test_vector_reduce_fmax_v8half:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v2
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX7-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_max_f32_e32 v1, v2, v3
+; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v5
+; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v6
+; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v7
+; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
+; GFX7-NEXT: v_max_f32_e32 v2, v2, v3
+; GFX7-NEXT: v_cvt_f16_f32_e32 v2, v2
+; GFX7-NEXT: v_max_f32_e32 v3, v4, v5
+; GFX7-NEXT: v_cvt_f16_f32_e32 v3, v3
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v2
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX7-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_max_f32_e32 v1, v2, v3
+; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX7-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmax_v8half:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_f16_e32 v4, v0, v0
+; GFX8-NEXT: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_e32 v0, v4, v0
+; GFX8-NEXT: v_max_f16_e32 v4, v1, v1
+; GFX8-NEXT: v_max_f16_sdwa v1, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_e32 v1, v4, v1
+; GFX8-NEXT: v_max_f16_e32 v4, v2, v2
+; GFX8-NEXT: v_max_f16_sdwa v2, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_e32 v2, v4, v2
+; GFX8-NEXT: v_max_f16_e32 v4, v3, v3
+; GFX8-NEXT: v_max_f16_sdwa v3, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_e32 v3, v4, v3
+; GFX8-NEXT: v_max_f16_e32 v0, v0, v1
+; GFX8-NEXT: v_max_f16_e32 v1, v2, v3
+; GFX8-NEXT: v_max_f16_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmax_v8half:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f16_e32 v4, v0, v0
+; GFX9-NEXT: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_e32 v0, v4, v0
+; GFX9-NEXT: v_max_f16_e32 v4, v1, v1
+; GFX9-NEXT: v_max_f16_sdwa v1, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_e32 v1, v4, v1
+; GFX9-NEXT: v_max_f16_e32 v4, v2, v2
+; GFX9-NEXT: v_max_f16_sdwa v2, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_e32 v2, v4, v2
+; GFX9-NEXT: v_max_f16_e32 v4, v3, v3
+; GFX9-NEXT: v_max_f16_sdwa v3, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_e32 v3, v4, v3
+; GFX9-NEXT: v_max_f16_e32 v0, v0, v1
+; GFX9-NEXT: v_max_f16_e32 v1, v2, v3
+; GFX9-NEXT: v_max_f16_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmax_v8half:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f16_e32 v4, v0, v0
+; GFX10-NEXT: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v5, v1, v1
+; GFX10-NEXT: v_max_f16_sdwa v1, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v6, v2, v2
+; GFX10-NEXT: v_max_f16_sdwa v2, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v7, v3, v3
+; GFX10-NEXT: v_max_f16_sdwa v3, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v0, v4, v0
+; GFX10-NEXT: v_max_f16_e32 v1, v5, v1
+; GFX10-NEXT: v_max_f16_e32 v2, v6, v2
+; GFX10-NEXT: v_max_f16_e32 v3, v7, v3
+; GFX10-NEXT: v_max_f16_e32 v0, v0, v1
+; GFX10-NEXT: v_max_f16_e32 v1, v2, v3
+; GFX10-NEXT: v_max_f16_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmax_v8half:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v0
+; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v3
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX11-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX11-NEXT: v_max_f16_e32 v4, v4, v4
+; GFX11-NEXT: v_max_f16_e32 v5, v5, v5
+; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-NEXT: v_max_f16_e32 v6, v6, v6
+; GFX11-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX11-NEXT: v_max_f16_e32 v7, v7, v7
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v4
+; GFX11-NEXT: v_max_f16_e32 v1, v1, v5
+; GFX11-NEXT: v_max_f16_e32 v2, v2, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_max_f16_e32 v3, v3, v7
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_max_f16_e32 v1, v2, v3
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmax_v8half:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshrrev_b32_e32 v4, 16, v0
+; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v6, 16, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v7, 16, v3
+; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v0
+; GFX12-NEXT: v_max_num_f16_e32 v1, v1, v1
+; GFX12-NEXT: v_max_num_f16_e32 v4, v4, v4
+; GFX12-NEXT: v_max_num_f16_e32 v5, v5, v5
+; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-NEXT: v_max_num_f16_e32 v6, v6, v6
+; GFX12-NEXT: v_max_num_f16_e32 v3, v3, v3
+; GFX12-NEXT: v_max_num_f16_e32 v7, v7, v7
+; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v4
+; GFX12-NEXT: v_max_num_f16_e32 v1, v1, v5
+; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_max_num_f16_e32 v3, v3, v7
+; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_max_num_f16_e32 v1, v2, v3
+; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call half @llvm.vector.reduce.fmax.v8half(<8 x half> %v)
+ ret half %res
+}
+
+define half @test_vector_reduce_fmax_v16half(<16 x half> %v) {
+; GFX7-LABEL: test_vector_reduce_fmax_v16half:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v2
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX7-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_max_f32_e32 v1, v2, v3
+; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v5
+; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v6
+; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v7
+; GFX7-NEXT: v_cvt_f32_f16_e32 v6, v10
+; GFX7-NEXT: v_max_f32_e32 v2, v2, v3
+; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v11
+; GFX7-NEXT: v_max_f32_e32 v3, v4, v5
+; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v8
+; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v9
+; GFX7-NEXT: v_cvt_f32_f16_e32 v8, v14
+; GFX7-NEXT: v_cvt_f32_f16_e32 v9, v15
+; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
+; GFX7-NEXT: v_max_f32_e32 v4, v4, v5
+; GFX7-NEXT: v_max_f32_e32 v5, v6, v7
+; GFX7-NEXT: v_cvt_f32_f16_e32 v6, v12
+; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v13
+; GFX7-NEXT: v_cvt_f16_f32_e32 v2, v2
+; GFX7-NEXT: v_cvt_f16_f32_e32 v3, v3
+; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4
+; GFX7-NEXT: v_max_f32_e32 v6, v6, v7
+; GFX7-NEXT: v_max_f32_e32 v7, v8, v9
+; GFX7-NEXT: v_cvt_f16_f32_e32 v5, v5
+; GFX7-NEXT: v_cvt_f16_f32_e32 v6, v6
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v2
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX7-NEXT: v_cvt_f16_f32_e32 v7, v7
+; GFX7-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_max_f32_e32 v1, v2, v3
+; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v5
+; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v6
+; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v7
+; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
+; GFX7-NEXT: v_max_f32_e32 v2, v2, v3
+; GFX7-NEXT: v_cvt_f16_f32_e32 v2, v2
+; GFX7-NEXT: v_max_f32_e32 v3, v4, v5
+; GFX7-NEXT: v_cvt_f16_f32_e32 v3, v3
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v2
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX7-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_max_f32_e32 v1, v2, v3
+; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX7-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmax_v16half:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_f16_e32 v8, v0, v0
+; GFX8-NEXT: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_e32 v0, v8, v0
+; GFX8-NEXT: v_max_f16_e32 v8, v1, v1
+; GFX8-NEXT: v_max_f16_sdwa v1, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_e32 v1, v8, v1
+; GFX8-NEXT: v_max_f16_e32 v8, v2, v2
+; GFX8-NEXT: v_max_f16_sdwa v2, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_e32 v2, v8, v2
+; GFX8-NEXT: v_max_f16_e32 v8, v3, v3
+; GFX8-NEXT: v_max_f16_sdwa v3, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_e32 v3, v8, v3
+; GFX8-NEXT: v_max_f16_e32 v8, v4, v4
+; GFX8-NEXT: v_max_f16_sdwa v4, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_e32 v4, v8, v4
+; GFX8-NEXT: v_max_f16_e32 v8, v5, v5
+; GFX8-NEXT: v_max_f16_sdwa v5, v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_e32 v5, v8, v5
+; GFX8-NEXT: v_max_f16_e32 v8, v6, v6
+; GFX8-NEXT: v_max_f16_sdwa v6, v6, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_e32 v6, v8, v6
+; GFX8-NEXT: v_max_f16_e32 v8, v7, v7
+; GFX8-NEXT: v_max_f16_sdwa v7, v7, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_e32 v7, v8, v7
+; GFX8-NEXT: v_max_f16_e32 v0, v0, v1
+; GFX8-NEXT: v_max_f16_e32 v1, v2, v3
+; GFX8-NEXT: v_max_f16_e32 v2, v4, v5
+; GFX8-NEXT: v_max_f16_e32 v3, v6, v7
+; GFX8-NEXT: v_max_f16_e32 v0, v0, v1
+; GFX8-NEXT: v_max_f16_e32 v1, v2, v3
+; GFX8-NEXT: v_max_f16_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmax_v16half:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f16_e32 v8, v0, v0
+; GFX9-NEXT: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_e32 v0, v8, v0
+; GFX9-NEXT: v_max_f16_e32 v8, v1, v1
+; GFX9-NEXT: v_max_f16_sdwa v1, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_e32 v1, v8, v1
+; GFX9-NEXT: v_max_f16_e32 v8, v2, v2
+; GFX9-NEXT: v_max_f16_sdwa v2, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_e32 v2, v8, v2
+; GFX9-NEXT: v_max_f16_e32 v8, v3, v3
+; GFX9-NEXT: v_max_f16_sdwa v3, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_e32 v3, v8, v3
+; GFX9-NEXT: v_max_f16_e32 v8, v4, v4
+; GFX9-NEXT: v_max_f16_sdwa v4, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_e32 v4, v8, v4
+; GFX9-NEXT: v_max_f16_e32 v8, v5, v5
+; GFX9-NEXT: v_max_f16_sdwa v5, v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_e32 v5, v8, v5
+; GFX9-NEXT: v_max_f16_e32 v8, v6, v6
+; GFX9-NEXT: v_max_f16_sdwa v6, v6, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_e32 v6, v8, v6
+; GFX9-NEXT: v_max_f16_e32 v8, v7, v7
+; GFX9-NEXT: v_max_f16_sdwa v7, v7, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_max_f16_e32 v7, v8, v7
+; GFX9-NEXT: v_max_f16_e32 v0, v0, v1
+; GFX9-NEXT: v_max_f16_e32 v1, v2, v3
+; GFX9-NEXT: v_max_f16_e32 v2, v4, v5
+; GFX9-NEXT: v_max_f16_e32 v3, v6, v7
+; GFX9-NEXT: v_max_f16_e32 v0, v0, v1
+; GFX9-NEXT: v_max_f16_e32 v1, v2, v3
+; GFX9-NEXT: v_max_f16_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmax_v16half:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f16_e32 v8, v0, v0
+; GFX10-NEXT: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v9, v1, v1
+; GFX10-NEXT: v_max_f16_sdwa v1, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v10, v2, v2
+; GFX10-NEXT: v_max_f16_sdwa v2, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v0, v8, v0
+; GFX10-NEXT: v_max_f16_e32 v8, v3, v3
+; GFX10-NEXT: v_max_f16_e32 v1, v9, v1
+; GFX10-NEXT: v_max_f16_sdwa v3, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v2, v10, v2
+; GFX10-NEXT: v_max_f16_e32 v9, v4, v4
+; GFX10-NEXT: v_max_f16_sdwa v4, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v10, v5, v5
+; GFX10-NEXT: v_max_f16_sdwa v5, v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v11, v6, v6
+; GFX10-NEXT: v_max_f16_sdwa v6, v6, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v12, v7, v7
+; GFX10-NEXT: v_max_f16_sdwa v7, v7, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v3, v8, v3
+; GFX10-NEXT: v_max_f16_e32 v4, v9, v4
+; GFX10-NEXT: v_max_f16_e32 v5, v10, v5
+; GFX10-NEXT: v_max_f16_e32 v6, v11, v6
+; GFX10-NEXT: v_max_f16_e32 v7, v12, v7
+; GFX10-NEXT: v_max_f16_e32 v0, v0, v1
+; GFX10-NEXT: v_max_f16_e32 v1, v2, v3
+; GFX10-NEXT: v_max_f16_e32 v2, v4, v5
+; GFX10-NEXT: v_max_f16_e32 v3, v6, v7
+; GFX10-NEXT: v_max_f16_e32 v0, v0, v1
+; GFX10-NEXT: v_max_f16_e32 v1, v2, v3
+; GFX10-NEXT: v_max_f16_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmax_v16half:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v0
+; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v4
+; GFX11-NEXT: v_lshrrev_b32_e32 v13, 16, v5
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX11-NEXT: v_max_f16_e32 v8, v8, v8
+; GFX11-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX11-NEXT: v_max_f16_e32 v9, v9, v9
+; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-NEXT: v_max_f16_e32 v10, v10, v10
+; GFX11-NEXT: v_lshrrev_b32_e32 v14, 16, v6
+; GFX11-NEXT: v_lshrrev_b32_e32 v15, 16, v7
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v8
+; GFX11-NEXT: v_max_f16_e32 v1, v1, v9
+; GFX11-NEXT: v_max_f16_e32 v2, v2, v10
+; GFX11-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX11-NEXT: v_max_f16_e32 v8, v11, v11
+; GFX11-NEXT: v_max_f16_e32 v4, v4, v4
+; GFX11-NEXT: v_max_f16_e32 v9, v12, v12
+; GFX11-NEXT: v_max_f16_e32 v5, v5, v5
+; GFX11-NEXT: v_max_f16_e32 v10, v13, v13
+; GFX11-NEXT: v_max_f16_e32 v6, v6, v6
+; GFX11-NEXT: v_max_f16_e32 v11, v14, v14
+; GFX11-NEXT: v_max_f16_e32 v7, v7, v7
+; GFX11-NEXT: v_max_f16_e32 v12, v15, v15
+; GFX11-NEXT: v_max_f16_e32 v3, v3, v8
+; GFX11-NEXT: v_max_f16_e32 v4, v4, v9
+; GFX11-NEXT: v_max_f16_e32 v5, v5, v10
+; GFX11-NEXT: v_max_f16_e32 v6, v6, v11
+; GFX11-NEXT: v_max_f16_e32 v7, v7, v12
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v1
+; GFX11-NEXT: v_max_f16_e32 v1, v2, v3
+; GFX11-NEXT: v_max_f16_e32 v2, v4, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_max_f16_e32 v3, v6, v7
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_max_f16_e32 v1, v2, v3
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmax_v16half:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshrrev_b32_e32 v8, 16, v0
+; GFX12-NEXT: v_lshrrev_b32_e32 v9, 16, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v10, 16, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v11, 16, v3
+; GFX12-NEXT: v_lshrrev_b32_e32 v12, 16, v4
+; GFX12-NEXT: v_lshrrev_b32_e32 v13, 16, v5
+; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v0
+; GFX12-NEXT: v_max_num_f16_e32 v8, v8, v8
+; GFX12-NEXT: v_max_num_f16_e32 v1, v1, v1
+; GFX12-NEXT: v_max_num_f16_e32 v9, v9, v9
+; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-NEXT: v_max_num_f16_e32 v10, v10, v10
+; GFX12-NEXT: v_lshrrev_b32_e32 v14, 16, v6
+; GFX12-NEXT: v_lshrrev_b32_e32 v15, 16, v7
+; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v8
+; GFX12-NEXT: v_max_num_f16_e32 v1, v1, v9
+; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v10
+; GFX12-NEXT: v_max_num_f16_e32 v3, v3, v3
+; GFX12-NEXT: v_max_num_f16_e32 v8, v11, v11
+; GFX12-NEXT: v_max_num_f16_e32 v4, v4, v4
+; GFX12-NEXT: v_max_num_f16_e32 v9, v12, v12
+; GFX12-NEXT: v_max_num_f16_e32 v5, v5, v5
+; GFX12-NEXT: v_max_num_f16_e32 v10, v13, v13
+; GFX12-NEXT: v_max_num_f16_e32 v6, v6, v6
+; GFX12-NEXT: v_max_num_f16_e32 v11, v14, v14
+; GFX12-NEXT: v_max_num_f16_e32 v7, v7, v7
+; GFX12-NEXT: v_max_num_f16_e32 v12, v15, v15
+; GFX12-NEXT: v_max_num_f16_e32 v3, v3, v8
+; GFX12-NEXT: v_max_num_f16_e32 v4, v4, v9
+; GFX12-NEXT: v_max_num_f16_e32 v5, v5, v10
+; GFX12-NEXT: v_max_num_f16_e32 v6, v6, v11
+; GFX12-NEXT: v_max_num_f16_e32 v7, v7, v12
+; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v1
+; GFX12-NEXT: v_max_num_f16_e32 v1, v2, v3
+; GFX12-NEXT: v_max_num_f16_e32 v2, v4, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_max_num_f16_e32 v3, v6, v7
+; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_max_num_f16_e32 v1, v2, v3
+; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call half @llvm.vector.reduce.fmax.v16half(<16 x half> %v)
+ ret half %res
+}
+
+define float @test_vector_reduce_fmax_v2float(<2 x float> %v) {
+; GFX7-LABEL: test_vector_reduce_fmax_v2float:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
+; GFX7-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmax_v2float:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GFX8-NEXT: v_mul_f32_e32 v1, 1.0, v1
+; GFX8-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmax_v2float:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f32_e32 v0, v0, v0
+; GFX9-NEXT: v_max_f32_e32 v1, v1, v1
+; GFX9-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmax_v2float:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f32_e32 v0, v0, v0
+; GFX10-NEXT: v_max_f32_e32 v1, v1, v1
+; GFX10-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmax_v2float:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmax_v2float:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_dual_max_num_f32 v0, v0, v0 :: v_dual_max_num_f32 v1, v1, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_max_num_f32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call float @llvm.vector.reduce.fmax.v2float(<2 x float> %v)
+ ret float %res
+}
+
+define float @test_vector_reduce_fmax_v3float(<3 x float> %v) {
+; GFX7-LABEL: test_vector_reduce_fmax_v3float:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
+; GFX7-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v2
+; GFX7-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmax_v3float:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GFX8-NEXT: v_mul_f32_e32 v1, 1.0, v1
+; GFX8-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX8-NEXT: v_mul_f32_e32 v1, 1.0, v2
+; GFX8-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmax_v3float:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f32_e32 v0, v0, v0
+; GFX9-NEXT: v_max_f32_e32 v1, v1, v1
+; GFX9-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX9-NEXT: v_max_f32_e32 v1, v2, v2
+; GFX9-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmax_v3float:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f32_e32 v0, v0, v0
+; GFX10-NEXT: v_max_f32_e32 v1, v1, v1
+; GFX10-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX10-NEXT: v_max_f32_e32 v1, v2, v2
+; GFX10-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmax_v3float:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_dual_max_f32 v0, v0, v1 :: v_dual_max_f32 v1, v2, v2
+; GFX11-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmax_v3float:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_dual_max_num_f32 v0, v0, v0 :: v_dual_max_num_f32 v1, v1, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_dual_max_num_f32 v0, v0, v1 :: v_dual_max_num_f32 v1, v2, v2
+; GFX12-NEXT: v_max_num_f32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call float @llvm.vector.reduce.fmax.v3float(<3 x float> %v)
+ ret float %res
+}
+
+define float @test_vector_reduce_fmax_v4float(<4 x float> %v) {
+; GFX7-LABEL: test_vector_reduce_fmax_v4float:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
+; GFX7-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v2
+; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v3
+; GFX7-NEXT: v_max_f32_e32 v1, v1, v2
+; GFX7-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmax_v4float:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GFX8-NEXT: v_mul_f32_e32 v1, 1.0, v1
+; GFX8-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX8-NEXT: v_mul_f32_e32 v1, 1.0, v2
+; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v3
+; GFX8-NEXT: v_max_f32_e32 v1, v1, v2
+; GFX8-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmax_v4float:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f32_e32 v0, v0, v0
+; GFX9-NEXT: v_max_f32_e32 v1, v1, v1
+; GFX9-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX9-NEXT: v_max_f32_e32 v1, v2, v2
+; GFX9-NEXT: v_max_f32_e32 v2, v3, v3
+; GFX9-NEXT: v_max_f32_e32 v1, v1, v2
+; GFX9-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmax_v4float:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f32_e32 v0, v0, v0
+; GFX10-NEXT: v_max_f32_e32 v1, v1, v1
+; GFX10-NEXT: v_max_f32_e32 v2, v2, v2
+; GFX10-NEXT: v_max_f32_e32 v3, v3, v3
+; GFX10-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX10-NEXT: v_max_f32_e32 v1, v2, v3
+; GFX10-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmax_v4float:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GFX11-NEXT: v_dual_max_f32 v2, v2, v2 :: v_dual_max_f32 v3, v3, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_dual_max_f32 v0, v0, v1 :: v_dual_max_f32 v1, v2, v3
+; GFX11-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmax_v4float:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_dual_max_num_f32 v0, v0, v0 :: v_dual_max_num_f32 v1, v1, v1
+; GFX12-NEXT: v_dual_max_num_f32 v2, v2, v2 :: v_dual_max_num_f32 v3, v3, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_dual_max_num_f32 v0, v0, v1 :: v_dual_max_num_f32 v1, v2, v3
+; GFX12-NEXT: v_max_num_f32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call float @llvm.vector.reduce.fmax.v4float(<4 x float> %v)
+ ret float %res
+}
+
+define float @test_vector_reduce_fmax_v8float(<8 x float> %v) {
+; GFX7-LABEL: test_vector_reduce_fmax_v8float:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
+; GFX7-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v2
+; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v3
+; GFX7-NEXT: v_max_f32_e32 v1, v1, v2
+; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v4
+; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v5
+; GFX7-NEXT: v_max_f32_e32 v2, v2, v3
+; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v6
+; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v7
+; GFX7-NEXT: v_max_f32_e32 v3, v3, v4
+; GFX7-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_max_f32_e32 v1, v2, v3
+; GFX7-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmax_v8float:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GFX8-NEXT: v_mul_f32_e32 v1, 1.0, v1
+; GFX8-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX8-NEXT: v_mul_f32_e32 v1, 1.0, v2
+; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v3
+; GFX8-NEXT: v_max_f32_e32 v1, v1, v2
+; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v4
+; GFX8-NEXT: v_mul_f32_e32 v3, 1.0, v5
+; GFX8-NEXT: v_max_f32_e32 v2, v2, v3
+; GFX8-NEXT: v_mul_f32_e32 v3, 1.0, v6
+; GFX8-NEXT: v_mul_f32_e32 v4, 1.0, v7
+; GFX8-NEXT: v_max_f32_e32 v3, v3, v4
+; GFX8-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX8-NEXT: v_max_f32_e32 v1, v2, v3
+; GFX8-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmax_v8float:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f32_e32 v0, v0, v0
+; GFX9-NEXT: v_max_f32_e32 v1, v1, v1
+; GFX9-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX9-NEXT: v_max_f32_e32 v1, v2, v2
+; GFX9-NEXT: v_max_f32_e32 v2, v3, v3
+; GFX9-NEXT: v_max_f32_e32 v1, v1, v2
+; GFX9-NEXT: v_max_f32_e32 v2, v4, v4
+; GFX9-NEXT: v_max_f32_e32 v3, v5, v5
+; GFX9-NEXT: v_max_f32_e32 v2, v2, v3
+; GFX9-NEXT: v_max_f32_e32 v3, v6, v6
+; GFX9-NEXT: v_max_f32_e32 v4, v7, v7
+; GFX9-NEXT: v_max_f32_e32 v3, v3, v4
+; GFX9-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX9-NEXT: v_max_f32_e32 v1, v2, v3
+; GFX9-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmax_v8float:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f32_e32 v0, v0, v0
+; GFX10-NEXT: v_max_f32_e32 v1, v1, v1
+; GFX10-NEXT: v_max_f32_e32 v2, v2, v2
+; GFX10-NEXT: v_max_f32_e32 v3, v3, v3
+; GFX10-NEXT: v_max_f32_e32 v4, v4, v4
+; GFX10-NEXT: v_max_f32_e32 v5, v5, v5
+; GFX10-NEXT: v_max_f32_e32 v6, v6, v6
+; GFX10-NEXT: v_max_f32_e32 v7, v7, v7
+; GFX10-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX10-NEXT: v_max_f32_e32 v1, v2, v3
+; GFX10-NEXT: v_max_f32_e32 v2, v4, v5
+; GFX10-NEXT: v_max_f32_e32 v3, v6, v7
+; GFX10-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX10-NEXT: v_max_f32_e32 v1, v2, v3
+; GFX10-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmax_v8float:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GFX11-NEXT: v_dual_max_f32 v2, v2, v2 :: v_dual_max_f32 v3, v3, v3
+; GFX11-NEXT: v_dual_max_f32 v4, v4, v4 :: v_dual_max_f32 v5, v5, v5
+; GFX11-NEXT: v_dual_max_f32 v6, v6, v6 :: v_dual_max_f32 v7, v7, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_dual_max_f32 v0, v0, v1 :: v_dual_max_f32 v1, v2, v3
+; GFX11-NEXT: v_dual_max_f32 v2, v4, v5 :: v_dual_max_f32 v3, v6, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_dual_max_f32 v0, v0, v1 :: v_dual_max_f32 v1, v2, v3
+; GFX11-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmax_v8float:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_dual_max_num_f32 v0, v0, v0 :: v_dual_max_num_f32 v1, v1, v1
+; GFX12-NEXT: v_dual_max_num_f32 v2, v2, v2 :: v_dual_max_num_f32 v3, v3, v3
+; GFX12-NEXT: v_dual_max_num_f32 v4, v4, v4 :: v_dual_max_num_f32 v5, v5, v5
+; GFX12-NEXT: v_dual_max_num_f32 v6, v6, v6 :: v_dual_max_num_f32 v7, v7, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_dual_max_num_f32 v0, v0, v1 :: v_dual_max_num_f32 v1, v2, v3
+; GFX12-NEXT: v_dual_max_num_f32 v2, v4, v5 :: v_dual_max_num_f32 v3, v6, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_dual_max_num_f32 v0, v0, v1 :: v_dual_max_num_f32 v1, v2, v3
+; GFX12-NEXT: v_max_num_f32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call float @llvm.vector.reduce.fmax.v8float(<8 x float> %v)
+ ret float %res
+}
+
+define float @test_vector_reduce_fmax_v16float(<16 x float> %v) {
+; GFX7-LABEL: test_vector_reduce_fmax_v16float:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
+; GFX7-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v2
+; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v3
+; GFX7-NEXT: v_max_f32_e32 v1, v1, v2
+; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v4
+; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v5
+; GFX7-NEXT: v_max_f32_e32 v2, v2, v3
+; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v6
+; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v7
+; GFX7-NEXT: v_max_f32_e32 v3, v3, v4
+; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v8
+; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v9
+; GFX7-NEXT: v_max_f32_e32 v4, v4, v5
+; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v10
+; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v11
+; GFX7-NEXT: v_max_f32_e32 v5, v5, v6
+; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v12
+; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v13
+; GFX7-NEXT: v_max_f32_e32 v6, v6, v7
+; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v14
+; GFX7-NEXT: v_mul_f32_e32 v8, 1.0, v15
+; GFX7-NEXT: v_max_f32_e32 v7, v7, v8
+; GFX7-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_max_f32_e32 v1, v2, v3
+; GFX7-NEXT: v_max_f32_e32 v2, v4, v5
+; GFX7-NEXT: v_max_f32_e32 v3, v6, v7
+; GFX7-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_max_f32_e32 v1, v2, v3
+; GFX7-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmax_v16float:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GFX8-NEXT: v_mul_f32_e32 v1, 1.0, v1
+; GFX8-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX8-NEXT: v_mul_f32_e32 v1, 1.0, v2
+; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v3
+; GFX8-NEXT: v_max_f32_e32 v1, v1, v2
+; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v4
+; GFX8-NEXT: v_mul_f32_e32 v3, 1.0, v5
+; GFX8-NEXT: v_max_f32_e32 v2, v2, v3
+; GFX8-NEXT: v_mul_f32_e32 v3, 1.0, v6
+; GFX8-NEXT: v_mul_f32_e32 v4, 1.0, v7
+; GFX8-NEXT: v_max_f32_e32 v3, v3, v4
+; GFX8-NEXT: v_mul_f32_e32 v4, 1.0, v8
+; GFX8-NEXT: v_mul_f32_e32 v5, 1.0, v9
+; GFX8-NEXT: v_max_f32_e32 v4, v4, v5
+; GFX8-NEXT: v_mul_f32_e32 v5, 1.0, v10
+; GFX8-NEXT: v_mul_f32_e32 v6, 1.0, v11
+; GFX8-NEXT: v_max_f32_e32 v5, v5, v6
+; GFX8-NEXT: v_mul_f32_e32 v6, 1.0, v12
+; GFX8-NEXT: v_mul_f32_e32 v7, 1.0, v13
+; GFX8-NEXT: v_max_f32_e32 v6, v6, v7
+; GFX8-NEXT: v_mul_f32_e32 v7, 1.0, v14
+; GFX8-NEXT: v_mul_f32_e32 v8, 1.0, v15
+; GFX8-NEXT: v_max_f32_e32 v7, v7, v8
+; GFX8-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX8-NEXT: v_max_f32_e32 v1, v2, v3
+; GFX8-NEXT: v_max_f32_e32 v2, v4, v5
+; GFX8-NEXT: v_max_f32_e32 v3, v6, v7
+; GFX8-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX8-NEXT: v_max_f32_e32 v1, v2, v3
+; GFX8-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmax_v16float:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f32_e32 v0, v0, v0
+; GFX9-NEXT: v_max_f32_e32 v1, v1, v1
+; GFX9-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX9-NEXT: v_max_f32_e32 v1, v2, v2
+; GFX9-NEXT: v_max_f32_e32 v2, v3, v3
+; GFX9-NEXT: v_max_f32_e32 v1, v1, v2
+; GFX9-NEXT: v_max_f32_e32 v2, v4, v4
+; GFX9-NEXT: v_max_f32_e32 v3, v5, v5
+; GFX9-NEXT: v_max_f32_e32 v2, v2, v3
+; GFX9-NEXT: v_max_f32_e32 v3, v6, v6
+; GFX9-NEXT: v_max_f32_e32 v4, v7, v7
+; GFX9-NEXT: v_max_f32_e32 v3, v3, v4
+; GFX9-NEXT: v_max_f32_e32 v4, v8, v8
+; GFX9-NEXT: v_max_f32_e32 v5, v9, v9
+; GFX9-NEXT: v_max_f32_e32 v4, v4, v5
+; GFX9-NEXT: v_max_f32_e32 v5, v10, v10
+; GFX9-NEXT: v_max_f32_e32 v6, v11, v11
+; GFX9-NEXT: v_max_f32_e32 v5, v5, v6
+; GFX9-NEXT: v_max_f32_e32 v6, v12, v12
+; GFX9-NEXT: v_max_f32_e32 v7, v13, v13
+; GFX9-NEXT: v_max_f32_e32 v6, v6, v7
+; GFX9-NEXT: v_max_f32_e32 v7, v14, v14
+; GFX9-NEXT: v_max_f32_e32 v8, v15, v15
+; GFX9-NEXT: v_max_f32_e32 v7, v7, v8
+; GFX9-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX9-NEXT: v_max_f32_e32 v1, v2, v3
+; GFX9-NEXT: v_max_f32_e32 v2, v4, v5
+; GFX9-NEXT: v_max_f32_e32 v3, v6, v7
+; GFX9-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX9-NEXT: v_max_f32_e32 v1, v2, v3
+; GFX9-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmax_v16float:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f32_e32 v0, v0, v0
+; GFX10-NEXT: v_max_f32_e32 v1, v1, v1
+; GFX10-NEXT: v_max_f32_e32 v2, v2, v2
+; GFX10-NEXT: v_max_f32_e32 v3, v3, v3
+; GFX10-NEXT: v_max_f32_e32 v4, v4, v4
+; GFX10-NEXT: v_max_f32_e32 v5, v5, v5
+; GFX10-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX10-NEXT: v_max_f32_e32 v1, v2, v3
+; GFX10-NEXT: v_max_f32_e32 v3, v6, v6
+; GFX10-NEXT: v_max_f32_e32 v2, v4, v5
+; GFX10-NEXT: v_max_f32_e32 v4, v7, v7
+; GFX10-NEXT: v_max_f32_e32 v5, v8, v8
+; GFX10-NEXT: v_max_f32_e32 v6, v9, v9
+; GFX10-NEXT: v_max_f32_e32 v7, v10, v10
+; GFX10-NEXT: v_max_f32_e32 v8, v11, v11
+; GFX10-NEXT: v_max_f32_e32 v9, v12, v12
+; GFX10-NEXT: v_max_f32_e32 v10, v13, v13
+; GFX10-NEXT: v_max_f32_e32 v11, v14, v14
+; GFX10-NEXT: v_max_f32_e32 v12, v15, v15
+; GFX10-NEXT: v_max_f32_e32 v3, v3, v4
+; GFX10-NEXT: v_max_f32_e32 v4, v5, v6
+; GFX10-NEXT: v_max_f32_e32 v5, v7, v8
+; GFX10-NEXT: v_max_f32_e32 v6, v9, v10
+; GFX10-NEXT: v_max_f32_e32 v7, v11, v12
+; GFX10-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX10-NEXT: v_max_f32_e32 v1, v2, v3
+; GFX10-NEXT: v_max_f32_e32 v2, v4, v5
+; GFX10-NEXT: v_max_f32_e32 v3, v6, v7
+; GFX10-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX10-NEXT: v_max_f32_e32 v1, v2, v3
+; GFX10-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmax_v16float:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GFX11-NEXT: v_dual_max_f32 v2, v2, v2 :: v_dual_max_f32 v3, v3, v3
+; GFX11-NEXT: v_dual_max_f32 v4, v4, v4 :: v_dual_max_f32 v5, v5, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_dual_max_f32 v0, v0, v1 :: v_dual_max_f32 v1, v2, v3
+; GFX11-NEXT: v_dual_max_f32 v3, v6, v6 :: v_dual_max_f32 v2, v4, v5
+; GFX11-NEXT: v_dual_max_f32 v4, v7, v7 :: v_dual_max_f32 v5, v8, v8
+; GFX11-NEXT: v_dual_max_f32 v6, v9, v9 :: v_dual_max_f32 v7, v10, v10
+; GFX11-NEXT: v_dual_max_f32 v8, v11, v11 :: v_dual_max_f32 v9, v12, v12
+; GFX11-NEXT: v_dual_max_f32 v10, v13, v13 :: v_dual_max_f32 v11, v14, v14
+; GFX11-NEXT: v_max_f32_e32 v12, v15, v15
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_dual_max_f32 v3, v3, v4 :: v_dual_max_f32 v4, v5, v6
+; GFX11-NEXT: v_dual_max_f32 v5, v7, v8 :: v_dual_max_f32 v6, v9, v10
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_dual_max_f32 v7, v11, v12 :: v_dual_max_f32 v0, v0, v1
+; GFX11-NEXT: v_dual_max_f32 v1, v2, v3 :: v_dual_max_f32 v2, v4, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_dual_max_f32 v3, v6, v7 :: v_dual_max_f32 v0, v0, v1
+; GFX11-NEXT: v_max_f32_e32 v1, v2, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmax_v16float:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_dual_max_num_f32 v0, v0, v0 :: v_dual_max_num_f32 v1, v1, v1
+; GFX12-NEXT: v_dual_max_num_f32 v2, v2, v2 :: v_dual_max_num_f32 v3, v3, v3
+; GFX12-NEXT: v_dual_max_num_f32 v4, v4, v4 :: v_dual_max_num_f32 v5, v5, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_dual_max_num_f32 v0, v0, v1 :: v_dual_max_num_f32 v1, v2, v3
+; GFX12-NEXT: v_dual_max_num_f32 v3, v6, v6 :: v_dual_max_num_f32 v2, v4, v5
+; GFX12-NEXT: v_dual_max_num_f32 v4, v7, v7 :: v_dual_max_num_f32 v5, v8, v8
+; GFX12-NEXT: v_dual_max_num_f32 v6, v9, v9 :: v_dual_max_num_f32 v7, v10, v10
+; GFX12-NEXT: v_dual_max_num_f32 v8, v11, v11 :: v_dual_max_num_f32 v9, v12, v12
+; GFX12-NEXT: v_dual_max_num_f32 v10, v13, v13 :: v_dual_max_num_f32 v11, v14, v14
+; GFX12-NEXT: v_max_num_f32_e32 v12, v15, v15
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_dual_max_num_f32 v3, v3, v4 :: v_dual_max_num_f32 v4, v5, v6
+; GFX12-NEXT: v_dual_max_num_f32 v5, v7, v8 :: v_dual_max_num_f32 v6, v9, v10
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_dual_max_num_f32 v7, v11, v12 :: v_dual_max_num_f32 v0, v0, v1
+; GFX12-NEXT: v_dual_max_num_f32 v1, v2, v3 :: v_dual_max_num_f32 v2, v4, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_dual_max_num_f32 v3, v6, v7 :: v_dual_max_num_f32 v0, v0, v1
+; GFX12-NEXT: v_max_num_f32_e32 v1, v2, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_max_num_f32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call float @llvm.vector.reduce.fmax.v16float(<16 x float> %v)
+ ret float %res
+}
+
+
+define double @test_vector_reduce_fmax_v2double(<2 x double> %v) {
+; GFX7-LABEL: test_vector_reduce_fmax_v2double:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX7-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX7-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmax_v2double:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX8-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX8-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmax_v2double:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX9-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmax_v2double:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX10-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX10-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmax_v2double:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX11-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmax_v2double:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1]
+; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[2:3], v[2:3]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call double @llvm.vector.reduce.fmax.v2double(<2 x double> %v)
+ ret double %res
+}
+
+define double @test_vector_reduce_fmax_v3double(<3 x double> %v) {
+; GFX7-LABEL: test_vector_reduce_fmax_v3double:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX7-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX7-NEXT: v_max_f64 v[4:5], v[4:5], v[4:5]
+; GFX7-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX7-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmax_v3double:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX8-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX8-NEXT: v_max_f64 v[4:5], v[4:5], v[4:5]
+; GFX8-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX8-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmax_v3double:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX9-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX9-NEXT: v_max_f64 v[2:3], v[4:5], v[4:5]
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmax_v3double:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX10-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX10-NEXT: v_max_f64 v[4:5], v[4:5], v[4:5]
+; GFX10-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX10-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmax_v3double:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX11-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX11-NEXT: v_max_f64 v[4:5], v[4:5], v[4:5]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmax_v3double:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1]
+; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[2:3], v[2:3]
+; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[4:5], v[4:5]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[4:5]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call double @llvm.vector.reduce.fmax.v3double(<3 x double> %v)
+ ret double %res
+}
+
+define double @test_vector_reduce_fmax_v4double(<4 x double> %v) {
+; GFX7-LABEL: test_vector_reduce_fmax_v4double:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX7-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX7-NEXT: v_max_f64 v[4:5], v[4:5], v[4:5]
+; GFX7-NEXT: v_max_f64 v[6:7], v[6:7], v[6:7]
+; GFX7-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX7-NEXT: v_max_f64 v[2:3], v[4:5], v[6:7]
+; GFX7-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmax_v4double:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX8-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX8-NEXT: v_max_f64 v[4:5], v[4:5], v[4:5]
+; GFX8-NEXT: v_max_f64 v[6:7], v[6:7], v[6:7]
+; GFX8-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX8-NEXT: v_max_f64 v[2:3], v[4:5], v[6:7]
+; GFX8-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmax_v4double:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX9-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX9-NEXT: v_max_f64 v[2:3], v[4:5], v[4:5]
+; GFX9-NEXT: v_max_f64 v[4:5], v[6:7], v[6:7]
+; GFX9-NEXT: v_max_f64 v[2:3], v[2:3], v[4:5]
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmax_v4double:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX10-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX10-NEXT: v_max_f64 v[4:5], v[4:5], v[4:5]
+; GFX10-NEXT: v_max_f64 v[6:7], v[6:7], v[6:7]
+; GFX10-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX10-NEXT: v_max_f64 v[2:3], v[4:5], v[6:7]
+; GFX10-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmax_v4double:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX11-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX11-NEXT: v_max_f64 v[4:5], v[4:5], v[4:5]
+; GFX11-NEXT: v_max_f64 v[6:7], v[6:7], v[6:7]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX11-NEXT: v_max_f64 v[2:3], v[4:5], v[6:7]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmax_v4double:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1]
+; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[2:3], v[2:3]
+; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[4:5], v[4:5]
+; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[6:7], v[6:7]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[4:5], v[6:7]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call double @llvm.vector.reduce.fmax.v4double(<4 x double> %v)
+ ret double %res
+}
+
+define double @test_vector_reduce_fmax_v8double(<8 x double> %v) {
+; GFX7-LABEL: test_vector_reduce_fmax_v8double:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX7-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX7-NEXT: v_max_f64 v[4:5], v[4:5], v[4:5]
+; GFX7-NEXT: v_max_f64 v[6:7], v[6:7], v[6:7]
+; GFX7-NEXT: v_max_f64 v[8:9], v[8:9], v[8:9]
+; GFX7-NEXT: v_max_f64 v[10:11], v[10:11], v[10:11]
+; GFX7-NEXT: v_max_f64 v[12:13], v[12:13], v[12:13]
+; GFX7-NEXT: v_max_f64 v[14:15], v[14:15], v[14:15]
+; GFX7-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX7-NEXT: v_max_f64 v[2:3], v[4:5], v[6:7]
+; GFX7-NEXT: v_max_f64 v[4:5], v[8:9], v[10:11]
+; GFX7-NEXT: v_max_f64 v[6:7], v[12:13], v[14:15]
+; GFX7-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX7-NEXT: v_max_f64 v[2:3], v[4:5], v[6:7]
+; GFX7-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmax_v8double:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX8-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX8-NEXT: v_max_f64 v[4:5], v[4:5], v[4:5]
+; GFX8-NEXT: v_max_f64 v[6:7], v[6:7], v[6:7]
+; GFX8-NEXT: v_max_f64 v[8:9], v[8:9], v[8:9]
+; GFX8-NEXT: v_max_f64 v[10:11], v[10:11], v[10:11]
+; GFX8-NEXT: v_max_f64 v[12:13], v[12:13], v[12:13]
+; GFX8-NEXT: v_max_f64 v[14:15], v[14:15], v[14:15]
+; GFX8-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX8-NEXT: v_max_f64 v[2:3], v[4:5], v[6:7]
+; GFX8-NEXT: v_max_f64 v[4:5], v[8:9], v[10:11]
+; GFX8-NEXT: v_max_f64 v[6:7], v[12:13], v[14:15]
+; GFX8-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX8-NEXT: v_max_f64 v[2:3], v[4:5], v[6:7]
+; GFX8-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmax_v8double:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX9-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX9-NEXT: v_max_f64 v[2:3], v[4:5], v[4:5]
+; GFX9-NEXT: v_max_f64 v[4:5], v[6:7], v[6:7]
+; GFX9-NEXT: v_max_f64 v[2:3], v[2:3], v[4:5]
+; GFX9-NEXT: v_max_f64 v[4:5], v[8:9], v[8:9]
+; GFX9-NEXT: v_max_f64 v[6:7], v[10:11], v[10:11]
+; GFX9-NEXT: v_max_f64 v[4:5], v[4:5], v[6:7]
+; GFX9-NEXT: v_max_f64 v[6:7], v[12:13], v[12:13]
+; GFX9-NEXT: v_max_f64 v[8:9], v[14:15], v[14:15]
+; GFX9-NEXT: v_max_f64 v[6:7], v[6:7], v[8:9]
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX9-NEXT: v_max_f64 v[2:3], v[4:5], v[6:7]
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmax_v8double:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX10-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX10-NEXT: v_max_f64 v[4:5], v[4:5], v[4:5]
+; GFX10-NEXT: v_max_f64 v[6:7], v[6:7], v[6:7]
+; GFX10-NEXT: v_max_f64 v[8:9], v[8:9], v[8:9]
+; GFX10-NEXT: v_max_f64 v[10:11], v[10:11], v[10:11]
+; GFX10-NEXT: v_max_f64 v[12:13], v[12:13], v[12:13]
+; GFX10-NEXT: v_max_f64 v[14:15], v[14:15], v[14:15]
+; GFX10-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX10-NEXT: v_max_f64 v[2:3], v[4:5], v[6:7]
+; GFX10-NEXT: v_max_f64 v[4:5], v[8:9], v[10:11]
+; GFX10-NEXT: v_max_f64 v[6:7], v[12:13], v[14:15]
+; GFX10-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX10-NEXT: v_max_f64 v[2:3], v[4:5], v[6:7]
+; GFX10-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmax_v8double:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX11-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX11-NEXT: v_max_f64 v[4:5], v[4:5], v[4:5]
+; GFX11-NEXT: v_max_f64 v[6:7], v[6:7], v[6:7]
+; GFX11-NEXT: v_max_f64 v[8:9], v[8:9], v[8:9]
+; GFX11-NEXT: v_max_f64 v[10:11], v[10:11], v[10:11]
+; GFX11-NEXT: v_max_f64 v[12:13], v[12:13], v[12:13]
+; GFX11-NEXT: v_max_f64 v[14:15], v[14:15], v[14:15]
+; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX11-NEXT: v_max_f64 v[2:3], v[4:5], v[6:7]
+; GFX11-NEXT: v_max_f64 v[4:5], v[8:9], v[10:11]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_max_f64 v[6:7], v[12:13], v[14:15]
+; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_max_f64 v[2:3], v[4:5], v[6:7]
+; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmax_v8double:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1]
+; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[2:3], v[2:3]
+; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[4:5], v[4:5]
+; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[6:7], v[6:7]
+; GFX12-NEXT: v_max_num_f64_e32 v[8:9], v[8:9], v[8:9]
+; GFX12-NEXT: v_max_num_f64_e32 v[10:11], v[10:11], v[10:11]
+; GFX12-NEXT: v_max_num_f64_e32 v[12:13], v[12:13], v[12:13]
+; GFX12-NEXT: v_max_num_f64_e32 v[14:15], v[14:15], v[14:15]
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[4:5], v[6:7]
+; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[8:9], v[10:11]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[12:13], v[14:15]
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[4:5], v[6:7]
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call double @llvm.vector.reduce.fmax.v8double(<8 x double> %v)
+ ret double %res
+}
+
+define double @test_vector_reduce_fmax_v16double(<16 x double> %v) {
+; GFX7-LABEL: test_vector_reduce_fmax_v16double:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX7-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX7-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX7-NEXT: v_max_f64 v[4:5], v[4:5], v[4:5]
+; GFX7-NEXT: v_max_f64 v[6:7], v[6:7], v[6:7]
+; GFX7-NEXT: v_max_f64 v[8:9], v[8:9], v[8:9]
+; GFX7-NEXT: v_max_f64 v[10:11], v[10:11], v[10:11]
+; GFX7-NEXT: v_max_f64 v[12:13], v[12:13], v[12:13]
+; GFX7-NEXT: v_max_f64 v[14:15], v[14:15], v[14:15]
+; GFX7-NEXT: v_max_f64 v[16:17], v[16:17], v[16:17]
+; GFX7-NEXT: v_max_f64 v[18:19], v[18:19], v[18:19]
+; GFX7-NEXT: v_max_f64 v[20:21], v[20:21], v[20:21]
+; GFX7-NEXT: v_max_f64 v[22:23], v[22:23], v[22:23]
+; GFX7-NEXT: v_max_f64 v[24:25], v[24:25], v[24:25]
+; GFX7-NEXT: v_max_f64 v[26:27], v[26:27], v[26:27]
+; GFX7-NEXT: v_max_f64 v[28:29], v[28:29], v[28:29]
+; GFX7-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX7-NEXT: v_max_f64 v[2:3], v[4:5], v[6:7]
+; GFX7-NEXT: v_max_f64 v[4:5], v[8:9], v[10:11]
+; GFX7-NEXT: v_max_f64 v[6:7], v[12:13], v[14:15]
+; GFX7-NEXT: v_max_f64 v[8:9], v[16:17], v[18:19]
+; GFX7-NEXT: v_max_f64 v[10:11], v[20:21], v[22:23]
+; GFX7-NEXT: v_max_f64 v[12:13], v[24:25], v[26:27]
+; GFX7-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX7-NEXT: v_max_f64 v[2:3], v[4:5], v[6:7]
+; GFX7-NEXT: v_max_f64 v[4:5], v[8:9], v[10:11]
+; GFX7-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_max_f64 v[30:31], v[30:31], v[30:31]
+; GFX7-NEXT: v_max_f64 v[14:15], v[28:29], v[30:31]
+; GFX7-NEXT: v_max_f64 v[6:7], v[12:13], v[14:15]
+; GFX7-NEXT: v_max_f64 v[2:3], v[4:5], v[6:7]
+; GFX7-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmax_v16double:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX8-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX8-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX8-NEXT: v_max_f64 v[4:5], v[4:5], v[4:5]
+; GFX8-NEXT: v_max_f64 v[6:7], v[6:7], v[6:7]
+; GFX8-NEXT: v_max_f64 v[8:9], v[8:9], v[8:9]
+; GFX8-NEXT: v_max_f64 v[10:11], v[10:11], v[10:11]
+; GFX8-NEXT: v_max_f64 v[12:13], v[12:13], v[12:13]
+; GFX8-NEXT: v_max_f64 v[14:15], v[14:15], v[14:15]
+; GFX8-NEXT: v_max_f64 v[16:17], v[16:17], v[16:17]
+; GFX8-NEXT: v_max_f64 v[18:19], v[18:19], v[18:19]
+; GFX8-NEXT: v_max_f64 v[20:21], v[20:21], v[20:21]
+; GFX8-NEXT: v_max_f64 v[22:23], v[22:23], v[22:23]
+; GFX8-NEXT: v_max_f64 v[24:25], v[24:25], v[24:25]
+; GFX8-NEXT: v_max_f64 v[26:27], v[26:27], v[26:27]
+; GFX8-NEXT: v_max_f64 v[28:29], v[28:29], v[28:29]
+; GFX8-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX8-NEXT: v_max_f64 v[2:3], v[4:5], v[6:7]
+; GFX8-NEXT: v_max_f64 v[4:5], v[8:9], v[10:11]
+; GFX8-NEXT: v_max_f64 v[6:7], v[12:13], v[14:15]
+; GFX8-NEXT: v_max_f64 v[8:9], v[16:17], v[18:19]
+; GFX8-NEXT: v_max_f64 v[10:11], v[20:21], v[22:23]
+; GFX8-NEXT: v_max_f64 v[12:13], v[24:25], v[26:27]
+; GFX8-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX8-NEXT: v_max_f64 v[2:3], v[4:5], v[6:7]
+; GFX8-NEXT: v_max_f64 v[4:5], v[8:9], v[10:11]
+; GFX8-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_max_f64 v[30:31], v[30:31], v[30:31]
+; GFX8-NEXT: v_max_f64 v[14:15], v[28:29], v[30:31]
+; GFX8-NEXT: v_max_f64 v[6:7], v[12:13], v[14:15]
+; GFX8-NEXT: v_max_f64 v[2:3], v[4:5], v[6:7]
+; GFX8-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmax_v16double:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: scratch_load_dword v31, off, s32
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX9-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX9-NEXT: v_max_f64 v[4:5], v[4:5], v[4:5]
+; GFX9-NEXT: v_max_f64 v[6:7], v[6:7], v[6:7]
+; GFX9-NEXT: v_max_f64 v[8:9], v[8:9], v[8:9]
+; GFX9-NEXT: v_max_f64 v[10:11], v[10:11], v[10:11]
+; GFX9-NEXT: v_max_f64 v[12:13], v[12:13], v[12:13]
+; GFX9-NEXT: v_max_f64 v[14:15], v[14:15], v[14:15]
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX9-NEXT: v_max_f64 v[2:3], v[4:5], v[6:7]
+; GFX9-NEXT: v_max_f64 v[4:5], v[8:9], v[10:11]
+; GFX9-NEXT: v_max_f64 v[6:7], v[12:13], v[14:15]
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX9-NEXT: v_max_f64 v[2:3], v[4:5], v[6:7]
+; GFX9-NEXT: v_max_f64 v[16:17], v[16:17], v[16:17]
+; GFX9-NEXT: v_max_f64 v[18:19], v[18:19], v[18:19]
+; GFX9-NEXT: v_max_f64 v[20:21], v[20:21], v[20:21]
+; GFX9-NEXT: v_max_f64 v[22:23], v[22:23], v[22:23]
+; GFX9-NEXT: v_max_f64 v[24:25], v[24:25], v[24:25]
+; GFX9-NEXT: v_max_f64 v[26:27], v[26:27], v[26:27]
+; GFX9-NEXT: v_max_f64 v[28:29], v[28:29], v[28:29]
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX9-NEXT: v_max_f64 v[8:9], v[16:17], v[18:19]
+; GFX9-NEXT: v_max_f64 v[10:11], v[20:21], v[22:23]
+; GFX9-NEXT: v_max_f64 v[12:13], v[24:25], v[26:27]
+; GFX9-NEXT: v_max_f64 v[4:5], v[8:9], v[10:11]
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_max_f64 v[2:3], v[30:31], v[30:31]
+; GFX9-NEXT: v_max_f64 v[2:3], v[28:29], v[2:3]
+; GFX9-NEXT: v_max_f64 v[2:3], v[12:13], v[2:3]
+; GFX9-NEXT: v_max_f64 v[2:3], v[4:5], v[2:3]
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmax_v16double:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX10-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX10-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX10-NEXT: v_max_f64 v[4:5], v[4:5], v[4:5]
+; GFX10-NEXT: v_max_f64 v[6:7], v[6:7], v[6:7]
+; GFX10-NEXT: v_max_f64 v[8:9], v[8:9], v[8:9]
+; GFX10-NEXT: v_max_f64 v[10:11], v[10:11], v[10:11]
+; GFX10-NEXT: v_max_f64 v[12:13], v[12:13], v[12:13]
+; GFX10-NEXT: v_max_f64 v[14:15], v[14:15], v[14:15]
+; GFX10-NEXT: v_max_f64 v[16:17], v[16:17], v[16:17]
+; GFX10-NEXT: v_max_f64 v[18:19], v[18:19], v[18:19]
+; GFX10-NEXT: v_max_f64 v[20:21], v[20:21], v[20:21]
+; GFX10-NEXT: v_max_f64 v[22:23], v[22:23], v[22:23]
+; GFX10-NEXT: v_max_f64 v[24:25], v[24:25], v[24:25]
+; GFX10-NEXT: v_max_f64 v[26:27], v[26:27], v[26:27]
+; GFX10-NEXT: v_max_f64 v[28:29], v[28:29], v[28:29]
+; GFX10-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX10-NEXT: v_max_f64 v[2:3], v[4:5], v[6:7]
+; GFX10-NEXT: v_max_f64 v[4:5], v[8:9], v[10:11]
+; GFX10-NEXT: v_max_f64 v[6:7], v[12:13], v[14:15]
+; GFX10-NEXT: v_max_f64 v[8:9], v[16:17], v[18:19]
+; GFX10-NEXT: v_max_f64 v[10:11], v[20:21], v[22:23]
+; GFX10-NEXT: v_max_f64 v[12:13], v[24:25], v[26:27]
+; GFX10-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX10-NEXT: v_max_f64 v[2:3], v[4:5], v[6:7]
+; GFX10-NEXT: v_max_f64 v[4:5], v[8:9], v[10:11]
+; GFX10-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_max_f64 v[30:31], v[30:31], v[30:31]
+; GFX10-NEXT: v_max_f64 v[14:15], v[28:29], v[30:31]
+; GFX10-NEXT: v_max_f64 v[6:7], v[12:13], v[14:15]
+; GFX10-NEXT: v_max_f64 v[2:3], v[4:5], v[6:7]
+; GFX10-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmax_v16double:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: scratch_load_b32 v31, off, s32
+; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX11-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX11-NEXT: v_max_f64 v[4:5], v[4:5], v[4:5]
+; GFX11-NEXT: v_max_f64 v[6:7], v[6:7], v[6:7]
+; GFX11-NEXT: v_max_f64 v[8:9], v[8:9], v[8:9]
+; GFX11-NEXT: v_max_f64 v[10:11], v[10:11], v[10:11]
+; GFX11-NEXT: v_max_f64 v[12:13], v[12:13], v[12:13]
+; GFX11-NEXT: v_max_f64 v[14:15], v[14:15], v[14:15]
+; GFX11-NEXT: v_max_f64 v[16:17], v[16:17], v[16:17]
+; GFX11-NEXT: v_max_f64 v[18:19], v[18:19], v[18:19]
+; GFX11-NEXT: v_max_f64 v[20:21], v[20:21], v[20:21]
+; GFX11-NEXT: v_max_f64 v[22:23], v[22:23], v[22:23]
+; GFX11-NEXT: v_max_f64 v[24:25], v[24:25], v[24:25]
+; GFX11-NEXT: v_max_f64 v[26:27], v[26:27], v[26:27]
+; GFX11-NEXT: v_max_f64 v[28:29], v[28:29], v[28:29]
+; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX11-NEXT: v_max_f64 v[2:3], v[4:5], v[6:7]
+; GFX11-NEXT: v_max_f64 v[4:5], v[8:9], v[10:11]
+; GFX11-NEXT: v_max_f64 v[6:7], v[12:13], v[14:15]
+; GFX11-NEXT: v_max_f64 v[8:9], v[16:17], v[18:19]
+; GFX11-NEXT: v_max_f64 v[10:11], v[20:21], v[22:23]
+; GFX11-NEXT: v_max_f64 v[12:13], v[24:25], v[26:27]
+; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX11-NEXT: v_max_f64 v[2:3], v[4:5], v[6:7]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_max_f64 v[4:5], v[8:9], v[10:11]
+; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_max_f64 v[30:31], v[30:31], v[30:31]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_max_f64 v[14:15], v[28:29], v[30:31]
+; GFX11-NEXT: v_max_f64 v[6:7], v[12:13], v[14:15]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_max_f64 v[2:3], v[4:5], v[6:7]
+; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[2:3]
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmax_v16double:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: scratch_load_b32 v31, off, s32
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1]
+; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[2:3], v[2:3]
+; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[4:5], v[4:5]
+; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[6:7], v[6:7]
+; GFX12-NEXT: v_max_num_f64_e32 v[8:9], v[8:9], v[8:9]
+; GFX12-NEXT: v_max_num_f64_e32 v[10:11], v[10:11], v[10:11]
+; GFX12-NEXT: v_max_num_f64_e32 v[12:13], v[12:13], v[12:13]
+; GFX12-NEXT: v_max_num_f64_e32 v[14:15], v[14:15], v[14:15]
+; GFX12-NEXT: v_max_num_f64_e32 v[16:17], v[16:17], v[16:17]
+; GFX12-NEXT: v_max_num_f64_e32 v[18:19], v[18:19], v[18:19]
+; GFX12-NEXT: v_max_num_f64_e32 v[20:21], v[20:21], v[20:21]
+; GFX12-NEXT: v_max_num_f64_e32 v[22:23], v[22:23], v[22:23]
+; GFX12-NEXT: v_max_num_f64_e32 v[24:25], v[24:25], v[24:25]
+; GFX12-NEXT: v_max_num_f64_e32 v[26:27], v[26:27], v[26:27]
+; GFX12-NEXT: v_max_num_f64_e32 v[28:29], v[28:29], v[28:29]
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[4:5], v[6:7]
+; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[8:9], v[10:11]
+; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[12:13], v[14:15]
+; GFX12-NEXT: v_max_num_f64_e32 v[8:9], v[16:17], v[18:19]
+; GFX12-NEXT: v_max_num_f64_e32 v[10:11], v[20:21], v[22:23]
+; GFX12-NEXT: v_max_num_f64_e32 v[12:13], v[24:25], v[26:27]
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[4:5], v[6:7]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[8:9], v[10:11]
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_max_num_f64_e32 v[30:31], v[30:31], v[30:31]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_max_num_f64_e32 v[14:15], v[28:29], v[30:31]
+; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[12:13], v[14:15]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[4:5], v[6:7]
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call double @llvm.vector.reduce.fmax.v16double(<16 x double> %v)
+ ret double %res
+}
+
+declare half @llvm.vector.reduce.fmax.v2half(<2 x half>)
+declare half @llvm.vector.reduce.fmax.v3half(<3 x half>)
+declare half @llvm.vector.reduce.fmax.v4half(<4 x half>)
+declare half @llvm.vector.reduce.fmax.v8half(<8 x half>)
+declare half @llvm.vector.reduce.fmax.v16half(<16 x half>)
+declare float @llvm.vector.reduce.fmax.v2float(<2 x float>)
+declare float @llvm.vector.reduce.fmax.v3float(<3 x float>)
+declare float @llvm.vector.reduce.fmax.v4float(<4 x float>)
+declare float @llvm.vector.reduce.fmax.v8float(<8 x float>)
+declare float @llvm.vector.reduce.fmax.v16float(<16 x float>)
+declare double @llvm.vector.reduce.fmax.v2double(<2 x double>)
+declare double @llvm.vector.reduce.fmax.v3double(<3 x double>)
+declare double @llvm.vector.reduce.fmax.v4double(<4 x double>)
+declare double @llvm.vector.reduce.fmax.v8double(<8 x double>)
+declare double @llvm.vector.reduce.fmax.v16double(<16 x double>)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/fmaximum.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/fmaximum.ll
new file mode 100644
index 0000000000000..9c2c77c88345d
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/fmaximum.ll
@@ -0,0 +1,2671 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx700 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX7 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx801 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX8 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx942 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX9 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX10 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX12 %s
+
+define half @test_vector_reduce_fmaximum_v2half(<2 x half> %v) {
+; GFX7-LABEL: test_vector_reduce_fmaximum_v2half:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v1
+; GFX7-NEXT: v_mov_b32_e32 v5, 0x7e00
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff, v1
+; GFX7-NEXT: v_cmp_gt_f32_e32 vcc, v2, v3
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v1, v0, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v2
+; GFX7-NEXT: v_cvt_f32_f16_e32 v4, 0
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff, v0
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v6
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v3, v4
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmaximum_v2half:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX8-NEXT: v_max_f16_e32 v2, v0, v1
+; GFX8-NEXT: v_mov_b32_e32 v3, 0x7e00
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v0, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmaximum_v2half:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f16_sdwa v1, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_mov_b32_e32 v2, 0x7e00
+; GFX9-NEXT: v_cmp_u_f16_sdwa vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmaximum_v2half:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f16_sdwa v1, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_cmp_u_f16_sdwa s4, v0, v0 src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v1, 0x7e00, s4
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmaximum_v2half:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_max_f16_e32 v2, v0, v1
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v0, v1
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v2, 0x7e00, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmaximum_v2half:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_maximum_f16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call half @llvm.vector.reduce.fmaximum.v2half(<2 x half> %v)
+ ret half %res
+}
+
+define half @test_vector_reduce_fmaximum_v3half(<3 x half> %v) {
+; GFX7-LABEL: test_vector_reduce_fmaximum_v3half:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v1
+; GFX7-NEXT: v_mov_b32_e32 v6, 0x7e00
+; GFX7-NEXT: v_and_b32_e32 v7, 0xffff, v0
+; GFX7-NEXT: v_cmp_gt_f32_e32 vcc, v3, v4
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v1, v0, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v3, v4
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v3
+; GFX7-NEXT: v_cvt_f32_f16_e32 v5, 0
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v7
+; GFX7-NEXT: v_and_b32_e32 v7, 0xffff, v1
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v7
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v4, v5
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v2
+; GFX7-NEXT: v_cmp_gt_f32_e32 vcc, v1, v3
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v2, v0, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v1, v3
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v1
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff, v0
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff, v2
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v6
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v3, v5
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmaximum_v3half:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX8-NEXT: v_max_f16_e32 v3, v0, v2
+; GFX8-NEXT: v_mov_b32_e32 v4, 0x7e00
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v0, v2
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v3, v4, vcc
+; GFX8-NEXT: v_max_f16_e32 v2, v0, v1
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v0, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmaximum_v3half:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f16_sdwa v2, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x7e00
+; GFX9-NEXT: v_cmp_u_f16_sdwa vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
+; GFX9-NEXT: v_max_f16_e32 v2, v0, v1
+; GFX9-NEXT: v_cmp_u_f16_e32 vcc, v0, v1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmaximum_v3half:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f16_sdwa v2, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_cmp_u_f16_sdwa s4, v0, v0 src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v2, 0x7e00, s4
+; GFX10-NEXT: v_max_f16_e32 v2, v0, v1
+; GFX10-NEXT: v_cmp_u_f16_e32 vcc_lo, v0, v1
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v2, 0x7e00, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmaximum_v3half:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_max_f16_e32 v3, v0, v2
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v0, v2
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v3, 0x7e00, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_max_f16_e32 v2, v0, v1
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v0, v1
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v2, 0x7e00, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmaximum_v3half:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_maximum_f16 v0, v0, v2
+; GFX12-NEXT: v_maximum_f16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call half @llvm.vector.reduce.fmaximum.v3half(<3 x half> %v)
+ ret half %res
+}
+
+define half @test_vector_reduce_fmaximum_v4half(<4 x half> %v) {
+; GFX7-LABEL: test_vector_reduce_fmaximum_v4half:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v1
+; GFX7-NEXT: v_mov_b32_e32 v7, 0x7e00
+; GFX7-NEXT: v_and_b32_e32 v8, 0xffff, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v9, v2
+; GFX7-NEXT: v_cmp_gt_f32_e32 vcc, v4, v5
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v1, v0, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v4, v5
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v6, v7, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v6, 0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v10, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v8
+; GFX7-NEXT: v_and_b32_e32 v8, 0xffff, v1
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v8
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v5, v6
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX7-NEXT: v_cmp_gt_f32_e32 vcc, v9, v10
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v9, v10
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v1
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff, v2
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff, v3
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v1, v2, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v4, v6
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v1
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff, v1
+; GFX7-NEXT: v_cmp_gt_f32_e32 vcc, v2, v3
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v1, v0, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v4, v7, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v2
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff, v0
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v3, v6
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmaximum_v4half:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX8-NEXT: v_max_f16_e32 v4, v0, v2
+; GFX8-NEXT: v_mov_b32_e32 v5, 0x7e00
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v0, v2
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
+; GFX8-NEXT: v_max_f16_e32 v2, v1, v3
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v1, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v2, v5, vcc
+; GFX8-NEXT: v_max_f16_e32 v2, v0, v1
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v0, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v5, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmaximum_v4half:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f16_sdwa v2, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x7e00
+; GFX9-NEXT: v_cmp_u_f16_sdwa vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
+; GFX9-NEXT: v_max_f16_sdwa v2, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cmp_u_f16_sdwa vcc, v1, v1 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc
+; GFX9-NEXT: v_max_f16_e32 v2, v0, v1
+; GFX9-NEXT: v_cmp_u_f16_e32 vcc, v0, v1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmaximum_v4half:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f16_sdwa v2, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_cmp_u_f16_sdwa s4, v0, v0 src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_sdwa v3, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v2, 0x7e00, s4
+; GFX10-NEXT: v_cmp_u_f16_sdwa s4, v1, v1 src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v3, 0x7e00, s4
+; GFX10-NEXT: v_max_f16_e32 v2, v0, v1
+; GFX10-NEXT: v_cmp_u_f16_e32 vcc_lo, v0, v1
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v2, 0x7e00, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmaximum_v4half:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_max_f16_e32 v4, v0, v2
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v0, v2
+; GFX11-NEXT: v_max_f16_e32 v5, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v4, 0x7e00, vcc_lo
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v1, v3
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, 0x7e00, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_max_f16_e32 v2, v0, v1
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v0, v1
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v2, 0x7e00, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmaximum_v4half:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX12-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_maximum_f16 v0, v0, v2
+; GFX12-NEXT: v_maximum_f16 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_maximum_f16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call half @llvm.vector.reduce.fmaximum.v4half(<4 x half> %v)
+ ret half %res
+}
+
+define half @test_vector_reduce_fmaximum_v8half(<8 x half> %v) {
+; GFX7-LABEL: test_vector_reduce_fmaximum_v8half:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cvt_f32_f16_e32 v8, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v9, v1
+; GFX7-NEXT: v_mov_b32_e32 v11, 0x7e00
+; GFX7-NEXT: v_and_b32_e32 v12, 0xffff, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v13, v2
+; GFX7-NEXT: v_cmp_gt_f32_e32 vcc, v8, v9
+; GFX7-NEXT: v_cndmask_b32_e32 v10, v1, v0, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v8, v9
+; GFX7-NEXT: v_cndmask_b32_e32 v8, v10, v11, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v9, v8
+; GFX7-NEXT: v_cvt_f32_f16_e32 v10, 0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v14, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v12
+; GFX7-NEXT: v_and_b32_e32 v12, 0xffff, v1
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v12
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v9, v10
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc
+; GFX7-NEXT: v_cmp_gt_f32_e32 vcc, v13, v14
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v13, v14
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v11, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v8, v1
+; GFX7-NEXT: v_and_b32_e32 v9, 0xffff, v2
+; GFX7-NEXT: v_cvt_f32_f16_e32 v12, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v13, v5
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v9
+; GFX7-NEXT: v_and_b32_e32 v9, 0xffff, v3
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v1, v2, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v9
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v8, v10
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
+; GFX7-NEXT: v_cmp_gt_f32_e32 vcc, v12, v13
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v5, v4, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v12, v13
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v2, v11, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v2
+; GFX7-NEXT: v_and_b32_e32 v8, 0xffff, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v9, v6
+; GFX7-NEXT: v_cvt_f32_f16_e32 v12, v7
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v8
+; GFX7-NEXT: v_and_b32_e32 v8, 0xffff, v5
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v2, v4, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v8
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v3, v10
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
+; GFX7-NEXT: v_cmp_gt_f32_e32 vcc, v9, v12
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v7, v6, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v9, v12
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v3, v11, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v3
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff, v6
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
+; GFX7-NEXT: v_cvt_f32_f16_e32 v8, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v9, v1
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v3, v6, vcc
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff, v7
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v6
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v4, v10
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc
+; GFX7-NEXT: v_cmp_gt_f32_e32 vcc, v8, v9
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v1, v0, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v8, v9
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v4, v11, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v4
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v2
+; GFX7-NEXT: v_cvt_f32_f16_e32 v8, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v6
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff, v1
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v6
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v5, v10
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX7-NEXT: v_cmp_gt_f32_e32 vcc, v7, v8
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v7, v8
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v11, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v1
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff, v2
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff, v3
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v1, v2, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v4, v10
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v1
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff, v1
+; GFX7-NEXT: v_cmp_gt_f32_e32 vcc, v2, v3
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v1, v0, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v4, v11, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v2
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff, v0
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v3, v10
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmaximum_v8half:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v0
+; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v1
+; GFX8-NEXT: v_max_f16_e32 v8, v0, v4
+; GFX8-NEXT: v_mov_b32_e32 v9, 0x7e00
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v0, v4
+; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v2
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v8, v9, vcc
+; GFX8-NEXT: v_max_f16_e32 v4, v1, v5
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v1, v5
+; GFX8-NEXT: v_lshrrev_b32_e32 v7, 16, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v4, v9, vcc
+; GFX8-NEXT: v_max_f16_e32 v4, v2, v6
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v2, v6
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v4, v9, vcc
+; GFX8-NEXT: v_max_f16_e32 v4, v3, v7
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v3, v7
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v4, v9, vcc
+; GFX8-NEXT: v_max_f16_e32 v4, v0, v1
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v0, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v9, vcc
+; GFX8-NEXT: v_max_f16_e32 v1, v2, v3
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v2, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc
+; GFX8-NEXT: v_max_f16_e32 v2, v0, v1
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v0, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v9, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmaximum_v8half:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f16_sdwa v4, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_mov_b32_e32 v5, 0x7e00
+; GFX9-NEXT: v_cmp_u_f16_sdwa vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
+; GFX9-NEXT: v_max_f16_sdwa v4, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cmp_u_f16_sdwa vcc, v1, v1 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v4, v5, vcc
+; GFX9-NEXT: v_max_f16_sdwa v4, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cmp_u_f16_sdwa vcc, v2, v2 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc
+; GFX9-NEXT: v_max_f16_sdwa v4, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cmp_u_f16_sdwa vcc, v3, v3 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc
+; GFX9-NEXT: v_max_f16_e32 v4, v0, v1
+; GFX9-NEXT: v_cmp_u_f16_e32 vcc, v0, v1
+; GFX9-NEXT: v_max_f16_e32 v1, v2, v3
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
+; GFX9-NEXT: v_cmp_u_f16_e32 vcc, v2, v3
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc
+; GFX9-NEXT: v_max_f16_e32 v2, v0, v1
+; GFX9-NEXT: v_cmp_u_f16_e32 vcc, v0, v1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v5, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmaximum_v8half:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f16_sdwa v4, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_cmp_u_f16_sdwa s4, v0, v0 src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_sdwa v5, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_sdwa v6, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_sdwa v7, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v4, 0x7e00, s4
+; GFX10-NEXT: v_cmp_u_f16_sdwa s4, v1, v1 src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v5, 0x7e00, s4
+; GFX10-NEXT: v_cmp_u_f16_sdwa s4, v2, v2 src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v4, v0, v1
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v6, 0x7e00, s4
+; GFX10-NEXT: v_cmp_u_f16_sdwa s4, v3, v3 src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_cmp_u_f16_e32 vcc_lo, v0, v1
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v7, 0x7e00, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v4, 0x7e00, vcc_lo
+; GFX10-NEXT: v_max_f16_e32 v5, v2, v3
+; GFX10-NEXT: v_cmp_u_f16_e32 vcc_lo, v2, v3
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v5, 0x7e00, vcc_lo
+; GFX10-NEXT: v_max_f16_e32 v2, v0, v1
+; GFX10-NEXT: v_cmp_u_f16_e32 vcc_lo, v0, v1
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v2, 0x7e00, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmaximum_v8half:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v0
+; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_max_f16_e32 v8, v0, v4
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v0, v4
+; GFX11-NEXT: v_max_f16_e32 v9, v1, v5
+; GFX11-NEXT: v_max_f16_e32 v10, v2, v6
+; GFX11-NEXT: v_max_f16_e32 v11, v3, v7
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v8, 0x7e00, vcc_lo
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v1, v5
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v9, 0x7e00, vcc_lo
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v2, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_max_f16_e32 v4, v0, v1
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v10, 0x7e00, vcc_lo
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v3, v7
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v11, 0x7e00, vcc_lo
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v0, v1
+; GFX11-NEXT: v_max_f16_e32 v5, v2, v3
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v4, 0x7e00, vcc_lo
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v2, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, 0x7e00, vcc_lo
+; GFX11-NEXT: v_max_f16_e32 v2, v0, v1
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v2, 0x7e00, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmaximum_v8half:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshrrev_b32_e32 v4, 16, v0
+; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v6, 16, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v7, 16, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_maximum_f16 v0, v0, v4
+; GFX12-NEXT: v_maximum_f16 v1, v1, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_maximum_f16 v2, v2, v6
+; GFX12-NEXT: v_maximum_f16 v3, v3, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_maximum_f16 v0, v0, v1
+; GFX12-NEXT: v_maximum_f16 v1, v2, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_maximum_f16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call half @llvm.vector.reduce.fmaximum.v8half(<8 x half> %v)
+ ret half %res
+}
+
+define half @test_vector_reduce_fmaximum_v16half(<16 x half> %v) {
+; GFX7-LABEL: test_vector_reduce_fmaximum_v16half:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cvt_f32_f16_e32 v17, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v18, v1
+; GFX7-NEXT: v_mov_b32_e32 v16, 0x7e00
+; GFX7-NEXT: v_cmp_gt_f32_e32 vcc, v17, v18
+; GFX7-NEXT: v_cndmask_b32_e32 v19, v1, v0, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v17, v18
+; GFX7-NEXT: v_cndmask_b32_e32 v17, v19, v16, vcc
+; GFX7-NEXT: v_and_b32_e32 v19, 0xffff, v0
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v19
+; GFX7-NEXT: v_cndmask_b32_e32 v19, v17, v0, vcc
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v18, v17
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, 0
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v19, v1, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v19, v2
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v18, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v18, v3
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v17, v1, vcc
+; GFX7-NEXT: v_cmp_gt_f32_e32 vcc, v19, v18
+; GFX7-NEXT: v_cndmask_b32_e32 v17, v3, v2, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v19, v18
+; GFX7-NEXT: v_and_b32_e32 v19, 0xffff, v2
+; GFX7-NEXT: v_cndmask_b32_e32 v17, v17, v16, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v19
+; GFX7-NEXT: v_and_b32_e32 v19, 0xffff, v3
+; GFX7-NEXT: v_cvt_f32_f16_e32 v18, v17
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v17, v2, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v19
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v19, v5
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v18, v0
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v17, v2, vcc
+; GFX7-NEXT: v_and_b32_e32 v18, 0xffff, v4
+; GFX7-NEXT: v_cmp_gt_f32_e32 vcc, v3, v19
+; GFX7-NEXT: v_cndmask_b32_e32 v17, v5, v4, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v3, v19
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v17, v16, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v17, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v18
+; GFX7-NEXT: v_and_b32_e32 v18, 0xffff, v5
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v3, v4, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v19, v6
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v18
+; GFX7-NEXT: v_cvt_f32_f16_e32 v18, v7
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v17, v0
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
+; GFX7-NEXT: v_cmp_gt_f32_e32 vcc, v19, v18
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v7, v6, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v19, v18
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v4, v16, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v4
+; GFX7-NEXT: v_and_b32_e32 v17, 0xffff, v6
+; GFX7-NEXT: v_cvt_f32_f16_e32 v18, v8
+; GFX7-NEXT: v_cvt_f32_f16_e32 v19, v9
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v17
+; GFX7-NEXT: v_and_b32_e32 v17, 0xffff, v7
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v4, v6, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v17
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v5, v0
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
+; GFX7-NEXT: v_cmp_gt_f32_e32 vcc, v18, v19
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v9, v8, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v18, v19
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v5, v16, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v6, v5
+; GFX7-NEXT: v_and_b32_e32 v7, 0xffff, v8
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v7
+; GFX7-NEXT: v_cvt_f32_f16_e32 v17, v10
+; GFX7-NEXT: v_cvt_f32_f16_e32 v18, v11
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v5, v8, vcc
+; GFX7-NEXT: v_and_b32_e32 v8, 0xffff, v9
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v8
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v7, v9, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v6, v0
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc
+; GFX7-NEXT: v_cmp_gt_f32_e32 vcc, v17, v18
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v11, v10, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v17, v18
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v6, v16, vcc
+; GFX7-NEXT: v_and_b32_e32 v8, 0xffff, v10
+; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v6
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v8
+; GFX7-NEXT: v_cndmask_b32_e32 v8, v6, v10, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v10, v12
+; GFX7-NEXT: v_cvt_f32_f16_e32 v17, v13
+; GFX7-NEXT: v_and_b32_e32 v9, 0xffff, v11
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v9
+; GFX7-NEXT: v_cndmask_b32_e32 v8, v8, v11, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v7, v0
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc
+; GFX7-NEXT: v_cmp_gt_f32_e32 vcc, v10, v17
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v13, v12, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v10, v17
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v7, v16, vcc
+; GFX7-NEXT: v_and_b32_e32 v9, 0xffff, v12
+; GFX7-NEXT: v_cvt_f32_f16_e32 v8, v7
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v9
+; GFX7-NEXT: v_cndmask_b32_e32 v9, v7, v12, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v11, v14
+; GFX7-NEXT: v_cvt_f32_f16_e32 v12, v15
+; GFX7-NEXT: v_and_b32_e32 v10, 0xffff, v13
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v10
+; GFX7-NEXT: v_cndmask_b32_e32 v9, v9, v13, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v8, v0
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v7, v9, vcc
+; GFX7-NEXT: v_cmp_gt_f32_e32 vcc, v11, v12
+; GFX7-NEXT: v_cndmask_b32_e32 v8, v15, v14, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v11, v12
+; GFX7-NEXT: v_cndmask_b32_e32 v8, v8, v16, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v9, v8
+; GFX7-NEXT: v_and_b32_e32 v10, 0xffff, v14
+; GFX7-NEXT: v_cvt_f32_f16_e32 v12, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v13, v2
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v10
+; GFX7-NEXT: v_and_b32_e32 v11, 0xffff, v15
+; GFX7-NEXT: v_cndmask_b32_e32 v10, v8, v14, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v11
+; GFX7-NEXT: v_cndmask_b32_e32 v10, v10, v15, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v9, v0
+; GFX7-NEXT: v_cndmask_b32_e32 v8, v8, v10, vcc
+; GFX7-NEXT: v_cmp_gt_f32_e32 vcc, v12, v13
+; GFX7-NEXT: v_cndmask_b32_e32 v9, v2, v1, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v12, v13
+; GFX7-NEXT: v_cndmask_b32_e32 v9, v9, v16, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v10, v9
+; GFX7-NEXT: v_and_b32_e32 v11, 0xffff, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v12, v3
+; GFX7-NEXT: v_cvt_f32_f16_e32 v13, v4
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v11
+; GFX7-NEXT: v_and_b32_e32 v11, 0xffff, v2
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v11
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v10, v0
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
+; GFX7-NEXT: v_cmp_gt_f32_e32 vcc, v12, v13
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v4, v3, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v12, v13
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v2, v16, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v9, v2
+; GFX7-NEXT: v_and_b32_e32 v10, 0xffff, v3
+; GFX7-NEXT: v_cvt_f32_f16_e32 v11, v5
+; GFX7-NEXT: v_cvt_f32_f16_e32 v12, v6
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v10
+; GFX7-NEXT: v_and_b32_e32 v10, 0xffff, v4
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v2, v3, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v10
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v9, v0
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
+; GFX7-NEXT: v_cmp_gt_f32_e32 vcc, v11, v12
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v11, v12
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v3, v16, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v3
+; GFX7-NEXT: v_and_b32_e32 v9, 0xffff, v5
+; GFX7-NEXT: v_cvt_f32_f16_e32 v10, v7
+; GFX7-NEXT: v_cvt_f32_f16_e32 v11, v8
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v9
+; GFX7-NEXT: v_and_b32_e32 v9, 0xffff, v6
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v3, v5, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v9
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v5, v6, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v4, v0
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc
+; GFX7-NEXT: v_cmp_gt_f32_e32 vcc, v10, v11
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v8, v7, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v10, v11
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v4, v16, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v4
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff, v7
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v6
+; GFX7-NEXT: v_cvt_f32_f16_e32 v9, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v10, v2
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v4, v7, vcc
+; GFX7-NEXT: v_and_b32_e32 v7, 0xffff, v8
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v7
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v5, v0
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
+; GFX7-NEXT: v_cmp_gt_f32_e32 vcc, v9, v10
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v2, v1, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v9, v10
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v5, v16, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v6, v5
+; GFX7-NEXT: v_and_b32_e32 v7, 0xffff, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v8, v3
+; GFX7-NEXT: v_cvt_f32_f16_e32 v9, v4
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v7
+; GFX7-NEXT: v_and_b32_e32 v7, 0xffff, v2
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v7
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v6, v0
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX7-NEXT: v_cmp_gt_f32_e32 vcc, v8, v9
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v4, v3, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v8, v9
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v2, v16, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v2
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v6
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff, v4
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v2, v3, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v6
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v5, v0
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v2
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff, v2
+; GFX7-NEXT: v_cmp_gt_f32_e32 vcc, v3, v4
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v2, v1, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v3, v4
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v5, v16, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v3
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff, v1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v6
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v4, v0
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v3, v1, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmaximum_v16half:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshrrev_b32_e32 v8, 16, v0
+; GFX8-NEXT: v_lshrrev_b32_e32 v9, 16, v1
+; GFX8-NEXT: v_max_f16_e32 v16, v0, v8
+; GFX8-NEXT: v_mov_b32_e32 v17, 0x7e00
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v0, v8
+; GFX8-NEXT: v_lshrrev_b32_e32 v10, 16, v2
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v16, v17, vcc
+; GFX8-NEXT: v_max_f16_e32 v8, v1, v9
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v1, v9
+; GFX8-NEXT: v_lshrrev_b32_e32 v11, 16, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v8, v17, vcc
+; GFX8-NEXT: v_max_f16_e32 v8, v2, v10
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v2, v10
+; GFX8-NEXT: v_lshrrev_b32_e32 v12, 16, v4
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v8, v17, vcc
+; GFX8-NEXT: v_max_f16_e32 v8, v3, v11
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v3, v11
+; GFX8-NEXT: v_lshrrev_b32_e32 v13, 16, v5
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v8, v17, vcc
+; GFX8-NEXT: v_max_f16_e32 v8, v4, v12
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v4, v12
+; GFX8-NEXT: v_lshrrev_b32_e32 v14, 16, v6
+; GFX8-NEXT: v_cndmask_b32_e32 v4, v8, v17, vcc
+; GFX8-NEXT: v_max_f16_e32 v8, v5, v13
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v5, v13
+; GFX8-NEXT: v_lshrrev_b32_e32 v15, 16, v7
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v8, v17, vcc
+; GFX8-NEXT: v_max_f16_e32 v8, v6, v14
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v6, v14
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v8, v17, vcc
+; GFX8-NEXT: v_max_f16_e32 v8, v7, v15
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v7, v15
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v8, v17, vcc
+; GFX8-NEXT: v_max_f16_e32 v8, v0, v1
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v0, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v8, v17, vcc
+; GFX8-NEXT: v_max_f16_e32 v1, v2, v3
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v2, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v17, vcc
+; GFX8-NEXT: v_max_f16_e32 v2, v4, v5
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v4, v5
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v17, vcc
+; GFX8-NEXT: v_max_f16_e32 v3, v6, v7
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v6, v7
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v3, v17, vcc
+; GFX8-NEXT: v_max_f16_e32 v4, v0, v1
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v0, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v17, vcc
+; GFX8-NEXT: v_max_f16_e32 v1, v2, v3
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v2, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v17, vcc
+; GFX8-NEXT: v_max_f16_e32 v2, v0, v1
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v0, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v17, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmaximum_v16half:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f16_sdwa v8, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_mov_b32_e32 v9, 0x7e00
+; GFX9-NEXT: v_cmp_u_f16_sdwa vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v8, v9, vcc
+; GFX9-NEXT: v_max_f16_sdwa v8, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cmp_u_f16_sdwa vcc, v1, v1 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v8, v9, vcc
+; GFX9-NEXT: v_max_f16_sdwa v8, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cmp_u_f16_sdwa vcc, v2, v2 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v8, v9, vcc
+; GFX9-NEXT: v_max_f16_sdwa v8, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cmp_u_f16_sdwa vcc, v3, v3 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v8, v9, vcc
+; GFX9-NEXT: v_max_f16_sdwa v8, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cmp_u_f16_sdwa vcc, v4, v4 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v8, v9, vcc
+; GFX9-NEXT: v_max_f16_sdwa v8, v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cmp_u_f16_sdwa vcc, v5, v5 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
+; GFX9-NEXT: v_max_f16_sdwa v8, v6, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cmp_u_f16_sdwa vcc, v6, v6 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v8, v9, vcc
+; GFX9-NEXT: v_max_f16_sdwa v8, v7, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cmp_u_f16_sdwa vcc, v7, v7 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v7, v8, v9, vcc
+; GFX9-NEXT: v_max_f16_e32 v8, v0, v1
+; GFX9-NEXT: v_cmp_u_f16_e32 vcc, v0, v1
+; GFX9-NEXT: v_max_f16_e32 v1, v2, v3
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v8, v9, vcc
+; GFX9-NEXT: v_cmp_u_f16_e32 vcc, v2, v3
+; GFX9-NEXT: v_max_f16_e32 v2, v4, v5
+; GFX9-NEXT: v_max_f16_e32 v3, v6, v7
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc
+; GFX9-NEXT: v_cmp_u_f16_e32 vcc, v4, v5
+; GFX9-NEXT: v_max_f16_e32 v4, v0, v1
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc
+; GFX9-NEXT: v_cmp_u_f16_e32 vcc, v6, v7
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc
+; GFX9-NEXT: v_cmp_u_f16_e32 vcc, v0, v1
+; GFX9-NEXT: v_max_f16_e32 v1, v2, v3
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v9, vcc
+; GFX9-NEXT: v_cmp_u_f16_e32 vcc, v2, v3
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc
+; GFX9-NEXT: v_max_f16_e32 v2, v0, v1
+; GFX9-NEXT: v_cmp_u_f16_e32 vcc, v0, v1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v9, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmaximum_v16half:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f16_sdwa v8, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_cmp_u_f16_sdwa s4, v0, v0 src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_sdwa v9, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v8, 0x7e00, s4
+; GFX10-NEXT: v_cmp_u_f16_sdwa s4, v1, v1 src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_sdwa v8, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v9, 0x7e00, s4
+; GFX10-NEXT: v_cmp_u_f16_sdwa s4, v2, v2 src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_sdwa v9, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_cmp_u_f16_e32 vcc_lo, v0, v1
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v8, 0x7e00, s4
+; GFX10-NEXT: v_cmp_u_f16_sdwa s4, v3, v3 src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_sdwa v8, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v9, 0x7e00, s4
+; GFX10-NEXT: v_cmp_u_f16_sdwa s4, v4, v4 src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_sdwa v9, v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_cndmask_b32_e64 v4, v8, 0x7e00, s4
+; GFX10-NEXT: v_cmp_u_f16_sdwa s4, v5, v5 src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_sdwa v8, v6, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_cndmask_b32_e64 v5, v9, 0x7e00, s4
+; GFX10-NEXT: v_cmp_u_f16_sdwa s4, v6, v6 src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_sdwa v9, v7, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_cndmask_b32_e64 v6, v8, 0x7e00, s4
+; GFX10-NEXT: v_max_f16_e32 v8, v0, v1
+; GFX10-NEXT: v_cmp_u_f16_sdwa s4, v7, v7 src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v1, v4, v5
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v8, 0x7e00, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v7, v9, 0x7e00, s4
+; GFX10-NEXT: v_max_f16_e32 v9, v2, v3
+; GFX10-NEXT: v_cmp_u_f16_e32 vcc_lo, v2, v3
+; GFX10-NEXT: v_max_f16_e32 v8, v6, v7
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v9, 0x7e00, vcc_lo
+; GFX10-NEXT: v_cmp_u_f16_e32 vcc_lo, v4, v5
+; GFX10-NEXT: v_max_f16_e32 v4, v0, v2
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, 0x7e00, vcc_lo
+; GFX10-NEXT: v_cmp_u_f16_e32 vcc_lo, v6, v7
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v8, 0x7e00, vcc_lo
+; GFX10-NEXT: v_cmp_u_f16_e32 vcc_lo, v0, v2
+; GFX10-NEXT: v_max_f16_e32 v5, v1, v3
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v4, 0x7e00, vcc_lo
+; GFX10-NEXT: v_cmp_u_f16_e32 vcc_lo, v1, v3
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v5, 0x7e00, vcc_lo
+; GFX10-NEXT: v_max_f16_e32 v2, v0, v1
+; GFX10-NEXT: v_cmp_u_f16_e32 vcc_lo, v0, v1
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v2, 0x7e00, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmaximum_v16half:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v0
+; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v4
+; GFX11-NEXT: v_max_f16_e32 v14, v0, v8
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v0, v8
+; GFX11-NEXT: v_max_f16_e32 v17, v1, v9
+; GFX11-NEXT: v_max_f16_e32 v8, v2, v10
+; GFX11-NEXT: v_lshrrev_b32_e32 v13, 16, v5
+; GFX11-NEXT: v_lshrrev_b32_e32 v15, 16, v6
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v14, 0x7e00, vcc_lo
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v1, v9
+; GFX11-NEXT: v_max_f16_e32 v9, v3, v11
+; GFX11-NEXT: v_lshrrev_b32_e32 v16, 16, v7
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v17, 0x7e00, vcc_lo
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v2, v10
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v8, 0x7e00, vcc_lo
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v3, v11
+; GFX11-NEXT: v_max_f16_e32 v8, v4, v12
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v9, 0x7e00, vcc_lo
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v4, v12
+; GFX11-NEXT: v_max_f16_e32 v9, v5, v13
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_cndmask_b32_e64 v4, v8, 0x7e00, vcc_lo
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v5, v13
+; GFX11-NEXT: v_max_f16_e32 v8, v6, v15
+; GFX11-NEXT: v_cndmask_b32_e64 v5, v9, 0x7e00, vcc_lo
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v6, v15
+; GFX11-NEXT: v_max_f16_e32 v9, v7, v16
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_cndmask_b32_e64 v6, v8, 0x7e00, vcc_lo
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v7, v16
+; GFX11-NEXT: v_max_f16_e32 v8, v0, v1
+; GFX11-NEXT: v_cndmask_b32_e64 v7, v9, 0x7e00, vcc_lo
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v0, v1
+; GFX11-NEXT: v_max_f16_e32 v9, v2, v3
+; GFX11-NEXT: v_max_f16_e32 v1, v4, v5
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v8, 0x7e00, vcc_lo
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v2, v3
+; GFX11-NEXT: v_max_f16_e32 v8, v6, v7
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v9, 0x7e00, vcc_lo
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v4, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_max_f16_e32 v4, v0, v2
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, 0x7e00, vcc_lo
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v6, v7
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v8, 0x7e00, vcc_lo
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v0, v2
+; GFX11-NEXT: v_max_f16_e32 v5, v1, v3
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v4, 0x7e00, vcc_lo
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, 0x7e00, vcc_lo
+; GFX11-NEXT: v_max_f16_e32 v2, v0, v1
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v2, 0x7e00, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmaximum_v16half:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshrrev_b32_e32 v8, 16, v0
+; GFX12-NEXT: v_lshrrev_b32_e32 v9, 16, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v10, 16, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v11, 16, v3
+; GFX12-NEXT: v_lshrrev_b32_e32 v12, 16, v4
+; GFX12-NEXT: v_lshrrev_b32_e32 v13, 16, v5
+; GFX12-NEXT: v_lshrrev_b32_e32 v14, 16, v6
+; GFX12-NEXT: v_lshrrev_b32_e32 v15, 16, v7
+; GFX12-NEXT: v_maximum_f16 v0, v0, v8
+; GFX12-NEXT: v_maximum_f16 v1, v1, v9
+; GFX12-NEXT: v_maximum_f16 v2, v2, v10
+; GFX12-NEXT: v_maximum_f16 v3, v3, v11
+; GFX12-NEXT: v_maximum_f16 v4, v4, v12
+; GFX12-NEXT: v_maximum_f16 v5, v5, v13
+; GFX12-NEXT: v_maximum_f16 v6, v6, v14
+; GFX12-NEXT: v_maximum_f16 v7, v7, v15
+; GFX12-NEXT: v_maximum_f16 v0, v0, v1
+; GFX12-NEXT: v_maximum_f16 v1, v2, v3
+; GFX12-NEXT: v_maximum_f16 v2, v4, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_maximum_f16 v3, v6, v7
+; GFX12-NEXT: v_maximum_f16 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_maximum_f16 v1, v2, v3
+; GFX12-NEXT: v_maximum_f16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call half @llvm.vector.reduce.fmaximum.v16half(<16 x half> %v)
+ ret half %res
+}
+
+define float @test_vector_reduce_fmaximum_v2float(<2 x float> %v) {
+; GFX7-LABEL: test_vector_reduce_fmaximum_v2float:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_max_f32_e32 v2, v0, v1
+; GFX7-NEXT: v_mov_b32_e32 v3, 0x7fc00000
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmaximum_v2float:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_f32_e32 v2, v0, v1
+; GFX8-NEXT: v_mov_b32_e32 v3, 0x7fc00000
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmaximum_v2float:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f32_e32 v2, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x7fc00000
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmaximum_v2float:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f32_e32 v2, v0, v1
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v1
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v2, 0x7fc00000, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmaximum_v2float:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_max_f32_e32 v2, v0, v1
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v2, 0x7fc00000, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmaximum_v2float:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum_f32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call float @llvm.vector.reduce.fmaximum.v2float(<2 x float> %v)
+ ret float %res
+}
+
+define float @test_vector_reduce_fmaximum_v3float(<3 x float> %v) {
+; GFX7-LABEL: test_vector_reduce_fmaximum_v3float:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_max_f32_e32 v3, v0, v1
+; GFX7-NEXT: v_mov_b32_e32 v4, 0x7fc00000
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v3, v4, vcc
+; GFX7-NEXT: v_max_f32_e32 v1, v0, v2
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v0, v2
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v1, v4, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmaximum_v3float:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_f32_e32 v3, v0, v1
+; GFX8-NEXT: v_mov_b32_e32 v4, 0x7fc00000
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v3, v4, vcc
+; GFX8-NEXT: v_max_f32_e32 v1, v0, v2
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v2
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v4, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmaximum_v3float:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f32_e32 v3, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7fc00000
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v4, vcc
+; GFX9-NEXT: v_max_f32_e32 v1, v0, v2
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v2
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v4, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmaximum_v3float:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f32_e32 v3, v0, v1
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v1
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v3, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_max_f32_e32 v1, v0, v2
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v2
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v1, 0x7fc00000, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmaximum_v3float:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_max_f32_e32 v3, v0, v1
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v3, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_max_f32_e32 v1, v0, v2
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v1, 0x7fc00000, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmaximum_v3float:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum_f32 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_maximum_f32 v0, v0, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call float @llvm.vector.reduce.fmaximum.v3float(<3 x float> %v)
+ ret float %res
+}
+
+define float @test_vector_reduce_fmaximum_v4float(<4 x float> %v) {
+; GFX7-LABEL: test_vector_reduce_fmaximum_v4float:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_max_f32_e32 v4, v0, v1
+; GFX7-NEXT: v_mov_b32_e32 v5, 0x7fc00000
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
+; GFX7-NEXT: v_max_f32_e32 v1, v2, v3
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc
+; GFX7-NEXT: v_max_f32_e32 v2, v0, v1
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v5, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmaximum_v4float:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_f32_e32 v4, v0, v1
+; GFX8-NEXT: v_mov_b32_e32 v5, 0x7fc00000
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
+; GFX8-NEXT: v_max_f32_e32 v1, v2, v3
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc
+; GFX8-NEXT: v_max_f32_e32 v2, v0, v1
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v5, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmaximum_v4float:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f32_e32 v4, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v5, 0x7fc00000
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX9-NEXT: v_max_f32_e32 v1, v2, v3
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc
+; GFX9-NEXT: v_max_f32_e32 v2, v0, v1
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v5, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmaximum_v4float:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f32_e32 v4, v0, v1
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v1
+; GFX10-NEXT: v_max_f32_e32 v5, v2, v3
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v4, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v3
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v5, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_max_f32_e32 v2, v0, v1
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v1
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v2, 0x7fc00000, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmaximum_v4float:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_dual_max_f32 v4, v0, v1 :: v_dual_max_f32 v5, v2, v3
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v4, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v3
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, 0x7fc00000, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_max_f32_e32 v2, v0, v1
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v1
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v2, 0x7fc00000, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmaximum_v4float:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum_f32 v0, v0, v1
+; GFX12-NEXT: v_maximum_f32 v1, v2, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_maximum_f32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call float @llvm.vector.reduce.fmaximum.v4float(<4 x float> %v)
+ ret float %res
+}
+
+define float @test_vector_reduce_fmaximum_v8float(<8 x float> %v) {
+; GFX7-LABEL: test_vector_reduce_fmaximum_v8float:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_max_f32_e32 v8, v0, v1
+; GFX7-NEXT: v_mov_b32_e32 v9, 0x7fc00000
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v8, v9, vcc
+; GFX7-NEXT: v_max_f32_e32 v1, v2, v3
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc
+; GFX7-NEXT: v_max_f32_e32 v2, v4, v5
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v4, v5
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc
+; GFX7-NEXT: v_max_f32_e32 v3, v6, v7
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v6, v7
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc
+; GFX7-NEXT: v_max_f32_e32 v4, v0, v1
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v4, v9, vcc
+; GFX7-NEXT: v_max_f32_e32 v1, v2, v3
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc
+; GFX7-NEXT: v_max_f32_e32 v2, v0, v1
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v9, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmaximum_v8float:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_f32_e32 v8, v0, v1
+; GFX8-NEXT: v_mov_b32_e32 v9, 0x7fc00000
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v8, v9, vcc
+; GFX8-NEXT: v_max_f32_e32 v1, v2, v3
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc
+; GFX8-NEXT: v_max_f32_e32 v2, v4, v5
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v5
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc
+; GFX8-NEXT: v_max_f32_e32 v3, v6, v7
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v7
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc
+; GFX8-NEXT: v_max_f32_e32 v4, v0, v1
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v9, vcc
+; GFX8-NEXT: v_max_f32_e32 v1, v2, v3
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc
+; GFX8-NEXT: v_max_f32_e32 v2, v0, v1
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v9, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmaximum_v8float:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f32_e32 v8, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v9, 0x7fc00000
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX9-NEXT: v_max_f32_e32 v1, v2, v3
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v8, v9, vcc
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX9-NEXT: v_max_f32_e32 v2, v4, v5
+; GFX9-NEXT: v_max_f32_e32 v3, v6, v7
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v4, v5
+; GFX9-NEXT: v_max_f32_e32 v4, v0, v1
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v6, v7
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX9-NEXT: v_max_f32_e32 v1, v2, v3
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v9, vcc
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc
+; GFX9-NEXT: v_max_f32_e32 v2, v0, v1
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v9, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmaximum_v8float:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f32_e32 v8, v0, v1
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v1
+; GFX10-NEXT: v_max_f32_e32 v9, v2, v3
+; GFX10-NEXT: v_max_f32_e32 v1, v4, v5
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v8, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v3
+; GFX10-NEXT: v_max_f32_e32 v8, v6, v7
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v9, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v5
+; GFX10-NEXT: v_max_f32_e32 v4, v0, v2
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v7
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v8, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v2
+; GFX10-NEXT: v_max_f32_e32 v5, v1, v3
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v4, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v3
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v5, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_max_f32_e32 v2, v0, v1
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v1
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v2, 0x7fc00000, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmaximum_v8float:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_dual_max_f32 v8, v0, v1 :: v_dual_max_f32 v9, v2, v3
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v1
+; GFX11-NEXT: v_max_f32_e32 v1, v4, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v8, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_max_f32_e32 v8, v6, v7
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v3
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v9, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v5
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v7
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v8, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_max_f32_e32 v4, v0, v2
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_max_f32_e32 v5, v1, v3
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v4, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_max_f32_e32 v2, v0, v1
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v2, 0x7fc00000, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmaximum_v8float:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum_f32 v0, v0, v1
+; GFX12-NEXT: v_maximum_f32 v1, v2, v3
+; GFX12-NEXT: v_maximum_f32 v2, v4, v5
+; GFX12-NEXT: v_maximum_f32 v3, v6, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_maximum_f32 v0, v0, v1
+; GFX12-NEXT: v_maximum_f32 v1, v2, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_maximum_f32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call float @llvm.vector.reduce.fmaximum.v8float(<8 x float> %v)
+ ret float %res
+}
+
+define float @test_vector_reduce_fmaximum_v16float(<16 x float> %v) {
+; GFX7-LABEL: test_vector_reduce_fmaximum_v16float:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_max_f32_e32 v16, v0, v1
+; GFX7-NEXT: v_mov_b32_e32 v17, 0x7fc00000
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v16, v17, vcc
+; GFX7-NEXT: v_max_f32_e32 v1, v2, v3
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v17, vcc
+; GFX7-NEXT: v_max_f32_e32 v2, v4, v5
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v4, v5
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v2, v17, vcc
+; GFX7-NEXT: v_max_f32_e32 v3, v6, v7
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v6, v7
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v3, v17, vcc
+; GFX7-NEXT: v_max_f32_e32 v4, v8, v9
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v8, v9
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v4, v17, vcc
+; GFX7-NEXT: v_max_f32_e32 v5, v10, v11
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v10, v11
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v5, v17, vcc
+; GFX7-NEXT: v_max_f32_e32 v6, v12, v13
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v12, v13
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v6, v17, vcc
+; GFX7-NEXT: v_max_f32_e32 v7, v14, v15
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v14, v15
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v7, v17, vcc
+; GFX7-NEXT: v_max_f32_e32 v8, v0, v1
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v8, v17, vcc
+; GFX7-NEXT: v_max_f32_e32 v1, v2, v3
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v17, vcc
+; GFX7-NEXT: v_max_f32_e32 v2, v4, v5
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v4, v5
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v2, v17, vcc
+; GFX7-NEXT: v_max_f32_e32 v3, v6, v7
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v6, v7
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v3, v17, vcc
+; GFX7-NEXT: v_max_f32_e32 v4, v0, v1
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v4, v17, vcc
+; GFX7-NEXT: v_max_f32_e32 v1, v2, v3
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v17, vcc
+; GFX7-NEXT: v_max_f32_e32 v2, v0, v1
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v17, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmaximum_v16float:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_f32_e32 v16, v0, v1
+; GFX8-NEXT: v_mov_b32_e32 v17, 0x7fc00000
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v16, v17, vcc
+; GFX8-NEXT: v_max_f32_e32 v1, v2, v3
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v17, vcc
+; GFX8-NEXT: v_max_f32_e32 v2, v4, v5
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v5
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v17, vcc
+; GFX8-NEXT: v_max_f32_e32 v3, v6, v7
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v7
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v3, v17, vcc
+; GFX8-NEXT: v_max_f32_e32 v4, v8, v9
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v8, v9
+; GFX8-NEXT: v_cndmask_b32_e32 v4, v4, v17, vcc
+; GFX8-NEXT: v_max_f32_e32 v5, v10, v11
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v10, v11
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v5, v17, vcc
+; GFX8-NEXT: v_max_f32_e32 v6, v12, v13
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v12, v13
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v6, v17, vcc
+; GFX8-NEXT: v_max_f32_e32 v7, v14, v15
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v14, v15
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v7, v17, vcc
+; GFX8-NEXT: v_max_f32_e32 v8, v0, v1
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v8, v17, vcc
+; GFX8-NEXT: v_max_f32_e32 v1, v2, v3
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v17, vcc
+; GFX8-NEXT: v_max_f32_e32 v2, v4, v5
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v5
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v17, vcc
+; GFX8-NEXT: v_max_f32_e32 v3, v6, v7
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v7
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v3, v17, vcc
+; GFX8-NEXT: v_max_f32_e32 v4, v0, v1
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v17, vcc
+; GFX8-NEXT: v_max_f32_e32 v1, v2, v3
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v17, vcc
+; GFX8-NEXT: v_max_f32_e32 v2, v0, v1
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v17, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmaximum_v16float:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f32_e32 v16, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v17, 0x7fc00000
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX9-NEXT: v_max_f32_e32 v1, v2, v3
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v16, v17, vcc
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX9-NEXT: v_max_f32_e32 v2, v4, v5
+; GFX9-NEXT: v_max_f32_e32 v3, v6, v7
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v17, vcc
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v4, v5
+; GFX9-NEXT: v_max_f32_e32 v4, v8, v9
+; GFX9-NEXT: v_max_f32_e32 v5, v10, v11
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v17, vcc
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v6, v7
+; GFX9-NEXT: v_max_f32_e32 v6, v12, v13
+; GFX9-NEXT: v_max_f32_e32 v7, v14, v15
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v17, vcc
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v8, v9
+; GFX9-NEXT: v_max_f32_e32 v8, v0, v1
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v17, vcc
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v10, v11
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v5, v17, vcc
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v12, v13
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v6, v17, vcc
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v14, v15
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v7, v7, v17, vcc
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX9-NEXT: v_max_f32_e32 v1, v2, v3
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v8, v17, vcc
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX9-NEXT: v_max_f32_e32 v2, v4, v5
+; GFX9-NEXT: v_max_f32_e32 v3, v6, v7
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v17, vcc
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v4, v5
+; GFX9-NEXT: v_max_f32_e32 v4, v0, v1
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v17, vcc
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v6, v7
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v17, vcc
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX9-NEXT: v_max_f32_e32 v1, v2, v3
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v17, vcc
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v17, vcc
+; GFX9-NEXT: v_max_f32_e32 v2, v0, v1
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v17, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmaximum_v16float:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f32_e32 v16, v0, v1
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v1
+; GFX10-NEXT: v_max_f32_e32 v17, v2, v3
+; GFX10-NEXT: v_max_f32_e32 v1, v4, v5
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v16, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v3
+; GFX10-NEXT: v_max_f32_e32 v3, v6, v7
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v17, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v5
+; GFX10-NEXT: v_max_f32_e32 v4, v8, v9
+; GFX10-NEXT: v_max_f32_e32 v5, v10, v11
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v7
+; GFX10-NEXT: v_max_f32_e32 v6, v12, v13
+; GFX10-NEXT: v_max_f32_e32 v7, v14, v15
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v9
+; GFX10-NEXT: v_max_f32_e32 v8, v0, v2
+; GFX10-NEXT: v_max_f32_e32 v9, v1, v3
+; GFX10-NEXT: v_cndmask_b32_e64 v4, v4, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v11
+; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v13
+; GFX10-NEXT: v_cndmask_b32_e64 v6, v6, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v15
+; GFX10-NEXT: v_cndmask_b32_e64 v7, v7, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v2
+; GFX10-NEXT: v_max_f32_e32 v2, v4, v5
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v8, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v3
+; GFX10-NEXT: v_max_f32_e32 v8, v6, v7
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v9, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v5
+; GFX10-NEXT: v_max_f32_e32 v4, v0, v1
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v7
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v8, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v1
+; GFX10-NEXT: v_max_f32_e32 v5, v2, v3
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v4, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v3
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v5, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_max_f32_e32 v2, v0, v1
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v1
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v2, 0x7fc00000, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmaximum_v16float:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_dual_max_f32 v16, v0, v1 :: v_dual_max_f32 v17, v2, v3
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v1
+; GFX11-NEXT: v_max_f32_e32 v1, v4, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v16, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v3
+; GFX11-NEXT: v_max_f32_e32 v3, v6, v7
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v17, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v5
+; GFX11-NEXT: v_dual_max_f32 v4, v8, v9 :: v_dual_max_f32 v5, v10, v11
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v7
+; GFX11-NEXT: v_dual_max_f32 v6, v12, v13 :: v_dual_max_f32 v7, v14, v15
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v3, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v9
+; GFX11-NEXT: v_max_f32_e32 v8, v0, v2
+; GFX11-NEXT: v_cndmask_b32_e64 v4, v4, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v11
+; GFX11-NEXT: v_cndmask_b32_e64 v5, v5, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v13
+; GFX11-NEXT: v_max_f32_e32 v9, v1, v3
+; GFX11-NEXT: v_cndmask_b32_e64 v6, v6, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v15
+; GFX11-NEXT: v_cndmask_b32_e64 v7, v7, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v2
+; GFX11-NEXT: v_max_f32_e32 v2, v4, v5
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v8, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v3
+; GFX11-NEXT: v_max_f32_e32 v8, v6, v7
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v9, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v5
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v7
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v8, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_max_f32_e32 v4, v0, v1
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_max_f32_e32 v5, v2, v3
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v4, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_max_f32_e32 v2, v0, v1
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v2, 0x7fc00000, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmaximum_v16float:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum_f32 v0, v0, v1
+; GFX12-NEXT: v_maximum_f32 v1, v2, v3
+; GFX12-NEXT: v_maximum_f32 v2, v4, v5
+; GFX12-NEXT: v_maximum_f32 v3, v6, v7
+; GFX12-NEXT: v_maximum_f32 v4, v8, v9
+; GFX12-NEXT: v_maximum_f32 v5, v10, v11
+; GFX12-NEXT: v_maximum_f32 v6, v12, v13
+; GFX12-NEXT: v_maximum_f32 v7, v14, v15
+; GFX12-NEXT: v_maximum_f32 v0, v0, v1
+; GFX12-NEXT: v_maximum_f32 v1, v2, v3
+; GFX12-NEXT: v_maximum_f32 v2, v4, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_maximum_f32 v3, v6, v7
+; GFX12-NEXT: v_maximum_f32 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_maximum_f32 v1, v2, v3
+; GFX12-NEXT: v_maximum_f32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call float @llvm.vector.reduce.fmaximum.v16float(<16 x float> %v)
+ ret float %res
+}
+
+
+define double @test_vector_reduce_fmaximum_v2double(<2 x double> %v) {
+; GFX7-LABEL: test_vector_reduce_fmaximum_v2double:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_max_f64 v[4:5], v[0:1], v[2:3]
+; GFX7-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v1, 0x7ff80000
+; GFX7-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmaximum_v2double:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_f64 v[4:5], v[0:1], v[2:3]
+; GFX8-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v1, 0x7ff80000
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmaximum_v2double:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f64 v[4:5], v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v6, 0x7ff80000
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v6, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmaximum_v2double:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f64 v[4:5], v[0:1], v[2:3]
+; GFX10-NEXT: v_cmp_u_f64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v5, 0x7ff80000, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmaximum_v2double:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_max_f64 v[4:5], v[0:1], v[2:3]
+; GFX11-NEXT: v_cmp_u_f64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, 0x7ff80000, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmaximum_v2double:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum_f64 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call double @llvm.vector.reduce.fmaximum.v2double(<2 x double> %v)
+ ret double %res
+}
+
+define double @test_vector_reduce_fmaximum_v3double(<3 x double> %v) {
+; GFX7-LABEL: test_vector_reduce_fmaximum_v3double:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_max_f64 v[6:7], v[0:1], v[2:3]
+; GFX7-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v8, 0x7ff80000
+; GFX7-NEXT: v_cndmask_b32_e64 v0, v6, 0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v7, v8, vcc
+; GFX7-NEXT: v_max_f64 v[2:3], v[0:1], v[4:5]
+; GFX7-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[4:5]
+; GFX7-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmaximum_v3double:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[2:3]
+; GFX8-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v8, 0x7ff80000
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v6, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v7, v8, vcc
+; GFX8-NEXT: v_max_f64 v[2:3], v[0:1], v[4:5]
+; GFX8-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmaximum_v3double:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f64 v[6:7], v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v8, 0x7ff80000
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v6, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v8, vcc
+; GFX9-NEXT: v_max_f64 v[2:3], v[0:1], v[4:5]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[4:5]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmaximum_v3double:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f64 v[6:7], v[0:1], v[2:3]
+; GFX10-NEXT: v_cmp_u_f64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v6, 0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v7, 0x7ff80000, vcc_lo
+; GFX10-NEXT: v_max_f64 v[2:3], v[0:1], v[4:5]
+; GFX10-NEXT: v_cmp_u_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v3, 0x7ff80000, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmaximum_v3double:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[2:3]
+; GFX11-NEXT: v_cmp_u_f64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v6, 0, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v7, 0x7ff80000, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_max_f64 v[2:3], v[0:1], v[4:5]
+; GFX11-NEXT: v_cmp_u_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v3, 0x7ff80000, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmaximum_v3double:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum_f64 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_maximum_f64 v[0:1], v[0:1], v[4:5]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call double @llvm.vector.reduce.fmaximum.v3double(<3 x double> %v)
+ ret double %res
+}
+
+define double @test_vector_reduce_fmaximum_v4double(<4 x double> %v) {
+; GFX7-LABEL: test_vector_reduce_fmaximum_v4double:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_max_f64 v[8:9], v[0:1], v[2:3]
+; GFX7-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_max_f64 v[0:1], v[4:5], v[6:7]
+; GFX7-NEXT: v_cmp_u_f64_e64 s[4:5], v[4:5], v[6:7]
+; GFX7-NEXT: v_mov_b32_e32 v6, 0x7ff80000
+; GFX7-NEXT: v_cndmask_b32_e64 v2, v8, 0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v9, v6, vcc
+; GFX7-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[4:5]
+; GFX7-NEXT: v_cndmask_b32_e64 v1, v1, v6, s[4:5]
+; GFX7-NEXT: v_max_f64 v[4:5], v[2:3], v[0:1]
+; GFX7-NEXT: v_cmp_u_f64_e32 vcc, v[2:3], v[0:1]
+; GFX7-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v5, v6, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmaximum_v4double:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_f64 v[8:9], v[0:1], v[2:3]
+; GFX8-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[6:7]
+; GFX8-NEXT: v_cmp_u_f64_e64 s[4:5], v[4:5], v[6:7]
+; GFX8-NEXT: v_mov_b32_e32 v6, 0x7ff80000
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v8, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v9, v6, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, v6, s[4:5]
+; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[0:1]
+; GFX8-NEXT: v_cmp_u_f64_e32 vcc, v[2:3], v[0:1]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v6, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmaximum_v4double:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f64 v[8:9], v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v10, 0x7ff80000
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_max_f64 v[2:3], v[4:5], v[6:7]
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v8, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v9, v10, vcc
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[4:5], v[6:7]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v10, vcc
+; GFX9-NEXT: v_max_f64 v[4:5], v[0:1], v[2:3]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v10, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmaximum_v4double:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f64 v[8:9], v[0:1], v[2:3]
+; GFX10-NEXT: v_cmp_u_f64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX10-NEXT: v_max_f64 v[0:1], v[4:5], v[6:7]
+; GFX10-NEXT: v_cmp_u_f64_e64 s4, v[4:5], v[6:7]
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v8, 0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v9, 0x7ff80000, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, 0, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, 0x7ff80000, s4
+; GFX10-NEXT: v_max_f64 v[4:5], v[2:3], v[0:1]
+; GFX10-NEXT: v_cmp_u_f64_e32 vcc_lo, v[2:3], v[0:1]
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v5, 0x7ff80000, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmaximum_v4double:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_max_f64 v[8:9], v[0:1], v[2:3]
+; GFX11-NEXT: v_cmp_u_f64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[6:7]
+; GFX11-NEXT: v_cmp_u_f64_e64 s0, v[4:5], v[6:7]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v8, 0, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v9, 0x7ff80000, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, 0, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, 0x7ff80000, s0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[0:1]
+; GFX11-NEXT: v_cmp_u_f64_e32 vcc_lo, v[2:3], v[0:1]
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, 0x7ff80000, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmaximum_v4double:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum_f64 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: v_maximum_f64 v[2:3], v[4:5], v[6:7]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_maximum_f64 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call double @llvm.vector.reduce.fmaximum.v4double(<4 x double> %v)
+ ret double %res
+}
+
+define double @test_vector_reduce_fmaximum_v8double(<8 x double> %v) {
+; GFX7-LABEL: test_vector_reduce_fmaximum_v8double:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_max_f64 v[16:17], v[0:1], v[2:3]
+; GFX7-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_max_f64 v[0:1], v[4:5], v[6:7]
+; GFX7-NEXT: v_cmp_u_f64_e64 s[4:5], v[4:5], v[6:7]
+; GFX7-NEXT: v_max_f64 v[2:3], v[8:9], v[10:11]
+; GFX7-NEXT: v_cmp_u_f64_e64 s[6:7], v[8:9], v[10:11]
+; GFX7-NEXT: v_max_f64 v[4:5], v[12:13], v[14:15]
+; GFX7-NEXT: v_cmp_u_f64_e64 s[8:9], v[12:13], v[14:15]
+; GFX7-NEXT: v_mov_b32_e32 v18, 0x7ff80000
+; GFX7-NEXT: v_cndmask_b32_e64 v6, v16, 0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v17, v18, vcc
+; GFX7-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[4:5]
+; GFX7-NEXT: v_cndmask_b32_e64 v1, v1, v18, s[4:5]
+; GFX7-NEXT: v_cndmask_b32_e64 v2, v2, 0, s[6:7]
+; GFX7-NEXT: v_cndmask_b32_e64 v3, v3, v18, s[6:7]
+; GFX7-NEXT: v_cndmask_b32_e64 v4, v4, 0, s[8:9]
+; GFX7-NEXT: v_cndmask_b32_e64 v5, v5, v18, s[8:9]
+; GFX7-NEXT: v_max_f64 v[8:9], v[6:7], v[0:1]
+; GFX7-NEXT: v_cmp_u_f64_e32 vcc, v[6:7], v[0:1]
+; GFX7-NEXT: v_max_f64 v[0:1], v[2:3], v[4:5]
+; GFX7-NEXT: v_cmp_u_f64_e64 s[4:5], v[2:3], v[4:5]
+; GFX7-NEXT: v_cndmask_b32_e64 v2, v8, 0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v9, v18, vcc
+; GFX7-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[4:5]
+; GFX7-NEXT: v_cndmask_b32_e64 v1, v1, v18, s[4:5]
+; GFX7-NEXT: v_max_f64 v[4:5], v[2:3], v[0:1]
+; GFX7-NEXT: v_cmp_u_f64_e32 vcc, v[2:3], v[0:1]
+; GFX7-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v5, v18, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmaximum_v8double:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_f64 v[16:17], v[0:1], v[2:3]
+; GFX8-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[6:7]
+; GFX8-NEXT: v_cmp_u_f64_e64 s[4:5], v[4:5], v[6:7]
+; GFX8-NEXT: v_max_f64 v[2:3], v[8:9], v[10:11]
+; GFX8-NEXT: v_cmp_u_f64_e64 s[6:7], v[8:9], v[10:11]
+; GFX8-NEXT: v_max_f64 v[4:5], v[12:13], v[14:15]
+; GFX8-NEXT: v_cmp_u_f64_e64 s[8:9], v[12:13], v[14:15]
+; GFX8-NEXT: v_mov_b32_e32 v18, 0x7ff80000
+; GFX8-NEXT: v_cndmask_b32_e64 v6, v16, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v17, v18, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, v18, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v2, 0, s[6:7]
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v3, v18, s[6:7]
+; GFX8-NEXT: v_cndmask_b32_e64 v4, v4, 0, s[8:9]
+; GFX8-NEXT: v_cndmask_b32_e64 v5, v5, v18, s[8:9]
+; GFX8-NEXT: v_max_f64 v[8:9], v[6:7], v[0:1]
+; GFX8-NEXT: v_cmp_u_f64_e32 vcc, v[6:7], v[0:1]
+; GFX8-NEXT: v_max_f64 v[0:1], v[2:3], v[4:5]
+; GFX8-NEXT: v_cmp_u_f64_e64 s[4:5], v[2:3], v[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v8, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v9, v18, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, v18, s[4:5]
+; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[0:1]
+; GFX8-NEXT: v_cmp_u_f64_e32 vcc, v[2:3], v[0:1]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v18, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmaximum_v8double:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f64 v[16:17], v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v18, 0x7ff80000
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_max_f64 v[2:3], v[4:5], v[6:7]
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v16, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v17, v18, vcc
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[4:5], v[6:7]
+; GFX9-NEXT: v_max_f64 v[4:5], v[8:9], v[10:11]
+; GFX9-NEXT: v_max_f64 v[6:7], v[12:13], v[14:15]
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v18, vcc
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[8:9], v[10:11]
+; GFX9-NEXT: v_max_f64 v[8:9], v[0:1], v[2:3]
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_cndmask_b32_e64 v4, v4, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v5, v18, vcc
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[12:13], v[14:15]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e64 v6, v6, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v7, v7, v18, vcc
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_max_f64 v[2:3], v[4:5], v[6:7]
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v8, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v9, v18, vcc
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[4:5], v[6:7]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v18, vcc
+; GFX9-NEXT: v_max_f64 v[4:5], v[0:1], v[2:3]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v18, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmaximum_v8double:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f64 v[16:17], v[0:1], v[2:3]
+; GFX10-NEXT: v_cmp_u_f64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX10-NEXT: v_max_f64 v[0:1], v[4:5], v[6:7]
+; GFX10-NEXT: v_cmp_u_f64_e64 s4, v[4:5], v[6:7]
+; GFX10-NEXT: v_max_f64 v[2:3], v[8:9], v[10:11]
+; GFX10-NEXT: v_cmp_u_f64_e64 s5, v[8:9], v[10:11]
+; GFX10-NEXT: v_max_f64 v[4:5], v[12:13], v[14:15]
+; GFX10-NEXT: v_cmp_u_f64_e64 s6, v[12:13], v[14:15]
+; GFX10-NEXT: v_cndmask_b32_e64 v6, v16, 0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v7, v17, 0x7ff80000, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, 0, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, 0x7ff80000, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, 0, s5
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, 0x7ff80000, s5
+; GFX10-NEXT: v_cndmask_b32_e64 v4, v4, 0, s6
+; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, 0x7ff80000, s6
+; GFX10-NEXT: v_max_f64 v[8:9], v[6:7], v[0:1]
+; GFX10-NEXT: v_cmp_u_f64_e32 vcc_lo, v[6:7], v[0:1]
+; GFX10-NEXT: v_max_f64 v[0:1], v[2:3], v[4:5]
+; GFX10-NEXT: v_cmp_u_f64_e64 s4, v[2:3], v[4:5]
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v8, 0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v9, 0x7ff80000, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, 0, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, 0x7ff80000, s4
+; GFX10-NEXT: v_max_f64 v[4:5], v[2:3], v[0:1]
+; GFX10-NEXT: v_cmp_u_f64_e32 vcc_lo, v[2:3], v[0:1]
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v5, 0x7ff80000, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmaximum_v8double:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_max_f64 v[16:17], v[0:1], v[2:3]
+; GFX11-NEXT: v_cmp_u_f64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[6:7]
+; GFX11-NEXT: v_cmp_u_f64_e64 s0, v[4:5], v[6:7]
+; GFX11-NEXT: v_max_f64 v[2:3], v[8:9], v[10:11]
+; GFX11-NEXT: v_cmp_u_f64_e64 s1, v[8:9], v[10:11]
+; GFX11-NEXT: v_max_f64 v[4:5], v[12:13], v[14:15]
+; GFX11-NEXT: v_cmp_u_f64_e64 s2, v[12:13], v[14:15]
+; GFX11-NEXT: v_cndmask_b32_e64 v6, v16, 0, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v7, v17, 0x7ff80000, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, 0, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, 0x7ff80000, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, 0, s1
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v3, 0x7ff80000, s1
+; GFX11-NEXT: v_cndmask_b32_e64 v4, v4, 0, s2
+; GFX11-NEXT: v_cndmask_b32_e64 v5, v5, 0x7ff80000, s2
+; GFX11-NEXT: v_max_f64 v[8:9], v[6:7], v[0:1]
+; GFX11-NEXT: v_cmp_u_f64_e32 vcc_lo, v[6:7], v[0:1]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_max_f64 v[0:1], v[2:3], v[4:5]
+; GFX11-NEXT: v_cmp_u_f64_e64 s0, v[2:3], v[4:5]
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v8, 0, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v9, 0x7ff80000, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, 0, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, 0x7ff80000, s0
+; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[0:1]
+; GFX11-NEXT: v_cmp_u_f64_e32 vcc_lo, v[2:3], v[0:1]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, 0x7ff80000, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmaximum_v8double:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum_f64 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: v_maximum_f64 v[2:3], v[4:5], v[6:7]
+; GFX12-NEXT: v_maximum_f64 v[4:5], v[8:9], v[10:11]
+; GFX12-NEXT: v_maximum_f64 v[6:7], v[12:13], v[14:15]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_maximum_f64 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: v_maximum_f64 v[2:3], v[4:5], v[6:7]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_maximum_f64 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call double @llvm.vector.reduce.fmaximum.v8double(<8 x double> %v)
+ ret double %res
+}
+
+define double @test_vector_reduce_fmaximum_v16double(<16 x double> %v) {
+; GFX7-LABEL: test_vector_reduce_fmaximum_v16double:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX7-NEXT: v_max_f64 v[32:33], v[4:5], v[6:7]
+; GFX7-NEXT: v_cmp_u_f64_e32 vcc, v[4:5], v[6:7]
+; GFX7-NEXT: v_max_f64 v[4:5], v[12:13], v[14:15]
+; GFX7-NEXT: v_cmp_u_f64_e64 s[4:5], v[12:13], v[14:15]
+; GFX7-NEXT: v_max_f64 v[12:13], v[8:9], v[10:11]
+; GFX7-NEXT: v_cmp_u_f64_e64 s[8:9], v[8:9], v[10:11]
+; GFX7-NEXT: v_mov_b32_e32 v34, 0x7ff80000
+; GFX7-NEXT: v_max_f64 v[6:7], v[16:17], v[18:19]
+; GFX7-NEXT: v_cmp_u_f64_e64 s[6:7], v[16:17], v[18:19]
+; GFX7-NEXT: v_max_f64 v[8:9], v[20:21], v[22:23]
+; GFX7-NEXT: v_cmp_u_f64_e64 s[10:11], v[20:21], v[22:23]
+; GFX7-NEXT: v_max_f64 v[10:11], v[0:1], v[2:3]
+; GFX7-NEXT: v_cmp_u_f64_e64 s[12:13], v[0:1], v[2:3]
+; GFX7-NEXT: v_max_f64 v[0:1], v[24:25], v[26:27]
+; GFX7-NEXT: v_cmp_u_f64_e64 s[14:15], v[24:25], v[26:27]
+; GFX7-NEXT: v_cndmask_b32_e64 v2, v12, 0, s[8:9]
+; GFX7-NEXT: v_cndmask_b32_e64 v3, v13, v34, s[8:9]
+; GFX7-NEXT: v_cndmask_b32_e64 v12, v32, 0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v13, v33, v34, vcc
+; GFX7-NEXT: v_cndmask_b32_e64 v4, v4, 0, s[4:5]
+; GFX7-NEXT: v_cndmask_b32_e64 v5, v5, v34, s[4:5]
+; GFX7-NEXT: v_cndmask_b32_e64 v6, v6, 0, s[6:7]
+; GFX7-NEXT: v_cndmask_b32_e64 v7, v7, v34, s[6:7]
+; GFX7-NEXT: v_cndmask_b32_e64 v8, v8, 0, s[10:11]
+; GFX7-NEXT: v_cndmask_b32_e64 v9, v9, v34, s[10:11]
+; GFX7-NEXT: v_cndmask_b32_e64 v10, v10, 0, s[12:13]
+; GFX7-NEXT: v_cndmask_b32_e64 v11, v11, v34, s[12:13]
+; GFX7-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[14:15]
+; GFX7-NEXT: v_cndmask_b32_e64 v1, v1, v34, s[14:15]
+; GFX7-NEXT: v_max_f64 v[18:19], v[2:3], v[4:5]
+; GFX7-NEXT: v_cmp_u_f64_e64 s[4:5], v[2:3], v[4:5]
+; GFX7-NEXT: v_max_f64 v[2:3], v[6:7], v[8:9]
+; GFX7-NEXT: v_cmp_u_f64_e64 s[8:9], v[6:7], v[8:9]
+; GFX7-NEXT: v_max_f64 v[14:15], v[10:11], v[12:13]
+; GFX7-NEXT: v_cmp_u_f64_e64 s[6:7], v[10:11], v[12:13]
+; GFX7-NEXT: v_cndmask_b32_e64 v2, v2, 0, s[8:9]
+; GFX7-NEXT: v_cndmask_b32_e64 v3, v3, v34, s[8:9]
+; GFX7-NEXT: v_cndmask_b32_e64 v4, v14, 0, s[6:7]
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_max_f64 v[16:17], v[28:29], v[30:31]
+; GFX7-NEXT: v_cmp_u_f64_e32 vcc, v[28:29], v[30:31]
+; GFX7-NEXT: v_cndmask_b32_e64 v5, v16, 0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v17, v34, vcc
+; GFX7-NEXT: v_max_f64 v[7:8], v[0:1], v[5:6]
+; GFX7-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[5:6]
+; GFX7-NEXT: v_cndmask_b32_e64 v5, v15, v34, s[6:7]
+; GFX7-NEXT: v_cndmask_b32_e64 v0, v18, 0, s[4:5]
+; GFX7-NEXT: v_cndmask_b32_e64 v1, v19, v34, s[4:5]
+; GFX7-NEXT: v_max_f64 v[9:10], v[4:5], v[0:1]
+; GFX7-NEXT: v_cndmask_b32_e64 v6, v7, 0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v8, v34, vcc
+; GFX7-NEXT: v_cmp_u_f64_e32 vcc, v[4:5], v[0:1]
+; GFX7-NEXT: v_max_f64 v[0:1], v[2:3], v[6:7]
+; GFX7-NEXT: v_cmp_u_f64_e64 s[4:5], v[2:3], v[6:7]
+; GFX7-NEXT: v_cndmask_b32_e64 v2, v9, 0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v10, v34, vcc
+; GFX7-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[4:5]
+; GFX7-NEXT: v_cndmask_b32_e64 v1, v1, v34, s[4:5]
+; GFX7-NEXT: v_max_f64 v[4:5], v[2:3], v[0:1]
+; GFX7-NEXT: v_cmp_u_f64_e32 vcc, v[2:3], v[0:1]
+; GFX7-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v5, v34, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmaximum_v16double:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX8-NEXT: v_max_f64 v[32:33], v[4:5], v[6:7]
+; GFX8-NEXT: v_cmp_u_f64_e32 vcc, v[4:5], v[6:7]
+; GFX8-NEXT: v_max_f64 v[4:5], v[12:13], v[14:15]
+; GFX8-NEXT: v_cmp_u_f64_e64 s[4:5], v[12:13], v[14:15]
+; GFX8-NEXT: v_max_f64 v[12:13], v[8:9], v[10:11]
+; GFX8-NEXT: v_cmp_u_f64_e64 s[8:9], v[8:9], v[10:11]
+; GFX8-NEXT: v_mov_b32_e32 v34, 0x7ff80000
+; GFX8-NEXT: v_max_f64 v[6:7], v[16:17], v[18:19]
+; GFX8-NEXT: v_cmp_u_f64_e64 s[6:7], v[16:17], v[18:19]
+; GFX8-NEXT: v_max_f64 v[8:9], v[20:21], v[22:23]
+; GFX8-NEXT: v_cmp_u_f64_e64 s[10:11], v[20:21], v[22:23]
+; GFX8-NEXT: v_max_f64 v[10:11], v[0:1], v[2:3]
+; GFX8-NEXT: v_cmp_u_f64_e64 s[12:13], v[0:1], v[2:3]
+; GFX8-NEXT: v_max_f64 v[0:1], v[24:25], v[26:27]
+; GFX8-NEXT: v_cmp_u_f64_e64 s[14:15], v[24:25], v[26:27]
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v12, 0, s[8:9]
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v13, v34, s[8:9]
+; GFX8-NEXT: v_cndmask_b32_e64 v12, v32, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v13, v33, v34, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v4, v4, 0, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v5, v5, v34, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v6, v6, 0, s[6:7]
+; GFX8-NEXT: v_cndmask_b32_e64 v7, v7, v34, s[6:7]
+; GFX8-NEXT: v_cndmask_b32_e64 v8, v8, 0, s[10:11]
+; GFX8-NEXT: v_cndmask_b32_e64 v9, v9, v34, s[10:11]
+; GFX8-NEXT: v_cndmask_b32_e64 v10, v10, 0, s[12:13]
+; GFX8-NEXT: v_cndmask_b32_e64 v11, v11, v34, s[12:13]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[14:15]
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, v34, s[14:15]
+; GFX8-NEXT: v_max_f64 v[18:19], v[2:3], v[4:5]
+; GFX8-NEXT: v_cmp_u_f64_e64 s[4:5], v[2:3], v[4:5]
+; GFX8-NEXT: v_max_f64 v[2:3], v[6:7], v[8:9]
+; GFX8-NEXT: v_cmp_u_f64_e64 s[8:9], v[6:7], v[8:9]
+; GFX8-NEXT: v_max_f64 v[14:15], v[10:11], v[12:13]
+; GFX8-NEXT: v_cmp_u_f64_e64 s[6:7], v[10:11], v[12:13]
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v2, 0, s[8:9]
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v3, v34, s[8:9]
+; GFX8-NEXT: v_cndmask_b32_e64 v4, v14, 0, s[6:7]
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_max_f64 v[16:17], v[28:29], v[30:31]
+; GFX8-NEXT: v_cmp_u_f64_e32 vcc, v[28:29], v[30:31]
+; GFX8-NEXT: v_cndmask_b32_e64 v5, v16, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v17, v34, vcc
+; GFX8-NEXT: v_max_f64 v[7:8], v[0:1], v[5:6]
+; GFX8-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[5:6]
+; GFX8-NEXT: v_cndmask_b32_e64 v5, v15, v34, s[6:7]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v18, 0, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v19, v34, s[4:5]
+; GFX8-NEXT: v_max_f64 v[9:10], v[4:5], v[0:1]
+; GFX8-NEXT: v_cndmask_b32_e64 v6, v7, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v8, v34, vcc
+; GFX8-NEXT: v_cmp_u_f64_e32 vcc, v[4:5], v[0:1]
+; GFX8-NEXT: v_max_f64 v[0:1], v[2:3], v[6:7]
+; GFX8-NEXT: v_cmp_u_f64_e64 s[4:5], v[2:3], v[6:7]
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v9, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v10, v34, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, v34, s[4:5]
+; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[0:1]
+; GFX8-NEXT: v_cmp_u_f64_e32 vcc, v[2:3], v[0:1]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v34, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmaximum_v16double:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: scratch_load_dword v31, off, s32
+; GFX9-NEXT: v_mov_b32_e32 v54, 0x7ff80000
+; GFX9-NEXT: v_max_f64 v[32:33], v[0:1], v[2:3]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_max_f64 v[34:35], v[4:5], v[6:7]
+; GFX9-NEXT: v_max_f64 v[36:37], v[8:9], v[10:11]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v32, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v33, v54, vcc
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[4:5], v[6:7]
+; GFX9-NEXT: v_max_f64 v[38:39], v[12:13], v[14:15]
+; GFX9-NEXT: v_max_f64 v[48:49], v[16:17], v[18:19]
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v34, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v35, v54, vcc
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[8:9], v[10:11]
+; GFX9-NEXT: v_max_f64 v[50:51], v[20:21], v[22:23]
+; GFX9-NEXT: v_max_f64 v[52:53], v[24:25], v[26:27]
+; GFX9-NEXT: v_cndmask_b32_e64 v4, v36, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v37, v54, vcc
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[12:13], v[14:15]
+; GFX9-NEXT: v_max_f64 v[14:15], v[0:1], v[2:3]
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_u_f64_e64 s[0:1], v[28:29], v[30:31]
+; GFX9-NEXT: v_cndmask_b32_e64 v6, v38, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v7, v39, v54, vcc
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[16:17], v[18:19]
+; GFX9-NEXT: v_max_f64 v[16:17], v[4:5], v[6:7]
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_cndmask_b32_e64 v8, v48, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v9, v49, v54, vcc
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[20:21], v[22:23]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e64 v10, v50, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v11, v51, v54, vcc
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[24:25], v[26:27]
+; GFX9-NEXT: v_max_f64 v[18:19], v[8:9], v[10:11]
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_cndmask_b32_e64 v12, v52, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v13, v53, v54, vcc
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v14, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v15, v54, vcc
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[4:5], v[6:7]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v16, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v17, v54, vcc
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[8:9], v[10:11]
+; GFX9-NEXT: v_max_f64 v[6:7], v[0:1], v[2:3]
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_cndmask_b32_e64 v4, v18, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v19, v54, vcc
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_max_f64 v[2:3], v[28:29], v[30:31]
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, 0, s[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v3, v3, v54, s[0:1]
+; GFX9-NEXT: v_max_f64 v[8:9], v[12:13], v[2:3]
+; GFX9-NEXT: v_cmp_u_f64_e64 s[0:1], v[12:13], v[2:3]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v6, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v54, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v8, 0, s[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v3, v9, v54, s[0:1]
+; GFX9-NEXT: v_max_f64 v[6:7], v[4:5], v[2:3]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[4:5], v[2:3]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v6, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v54, vcc
+; GFX9-NEXT: v_max_f64 v[4:5], v[0:1], v[2:3]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v54, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmaximum_v16double:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX10-NEXT: v_max_f64 v[32:33], v[0:1], v[2:3]
+; GFX10-NEXT: v_cmp_u_f64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX10-NEXT: v_max_f64 v[0:1], v[4:5], v[6:7]
+; GFX10-NEXT: v_cmp_u_f64_e64 s4, v[4:5], v[6:7]
+; GFX10-NEXT: v_max_f64 v[2:3], v[8:9], v[10:11]
+; GFX10-NEXT: v_cmp_u_f64_e64 s5, v[8:9], v[10:11]
+; GFX10-NEXT: v_max_f64 v[4:5], v[12:13], v[14:15]
+; GFX10-NEXT: v_cmp_u_f64_e64 s6, v[12:13], v[14:15]
+; GFX10-NEXT: v_max_f64 v[6:7], v[16:17], v[18:19]
+; GFX10-NEXT: v_cmp_u_f64_e64 s7, v[16:17], v[18:19]
+; GFX10-NEXT: v_max_f64 v[8:9], v[20:21], v[22:23]
+; GFX10-NEXT: v_cmp_u_f64_e64 s8, v[20:21], v[22:23]
+; GFX10-NEXT: v_max_f64 v[10:11], v[24:25], v[26:27]
+; GFX10-NEXT: v_cmp_u_f64_e64 s9, v[24:25], v[26:27]
+; GFX10-NEXT: v_cndmask_b32_e64 v14, v32, 0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v15, v33, 0x7ff80000, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, 0, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, 0x7ff80000, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, 0, s5
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, 0x7ff80000, s5
+; GFX10-NEXT: v_cndmask_b32_e64 v4, v4, 0, s6
+; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, 0x7ff80000, s6
+; GFX10-NEXT: v_cndmask_b32_e64 v6, v6, 0, s7
+; GFX10-NEXT: v_cndmask_b32_e64 v7, v7, 0x7ff80000, s7
+; GFX10-NEXT: v_cndmask_b32_e64 v8, v8, 0, s8
+; GFX10-NEXT: v_cndmask_b32_e64 v9, v9, 0x7ff80000, s8
+; GFX10-NEXT: v_cndmask_b32_e64 v10, v10, 0, s9
+; GFX10-NEXT: v_cndmask_b32_e64 v11, v11, 0x7ff80000, s9
+; GFX10-NEXT: v_max_f64 v[16:17], v[14:15], v[0:1]
+; GFX10-NEXT: v_cmp_u_f64_e32 vcc_lo, v[14:15], v[0:1]
+; GFX10-NEXT: v_max_f64 v[0:1], v[2:3], v[4:5]
+; GFX10-NEXT: v_cmp_u_f64_e64 s4, v[2:3], v[4:5]
+; GFX10-NEXT: v_max_f64 v[2:3], v[6:7], v[8:9]
+; GFX10-NEXT: v_cmp_u_f64_e64 s5, v[6:7], v[8:9]
+; GFX10-NEXT: v_cndmask_b32_e64 v6, v16, 0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v7, v17, 0x7ff80000, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, 0, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, 0x7ff80000, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, 0, s5
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, 0x7ff80000, s5
+; GFX10-NEXT: v_max_f64 v[8:9], v[6:7], v[0:1]
+; GFX10-NEXT: v_cmp_u_f64_e32 vcc_lo, v[6:7], v[0:1]
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_max_f64 v[12:13], v[28:29], v[30:31]
+; GFX10-NEXT: v_cmp_u_f64_e64 s10, v[28:29], v[30:31]
+; GFX10-NEXT: v_cndmask_b32_e64 v12, v12, 0, s10
+; GFX10-NEXT: v_cndmask_b32_e64 v13, v13, 0x7ff80000, s10
+; GFX10-NEXT: v_max_f64 v[4:5], v[10:11], v[12:13]
+; GFX10-NEXT: v_cmp_u_f64_e64 s6, v[10:11], v[12:13]
+; GFX10-NEXT: v_cndmask_b32_e64 v4, v4, 0, s6
+; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, 0x7ff80000, s6
+; GFX10-NEXT: v_max_f64 v[0:1], v[2:3], v[4:5]
+; GFX10-NEXT: v_cmp_u_f64_e64 s4, v[2:3], v[4:5]
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v8, 0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v9, 0x7ff80000, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, 0, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, 0x7ff80000, s4
+; GFX10-NEXT: v_max_f64 v[4:5], v[2:3], v[0:1]
+; GFX10-NEXT: v_cmp_u_f64_e32 vcc_lo, v[2:3], v[0:1]
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v5, 0x7ff80000, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmaximum_v16double:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: scratch_load_b32 v31, off, s32
+; GFX11-NEXT: v_max_f64 v[32:33], v[0:1], v[2:3]
+; GFX11-NEXT: v_cmp_u_f64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[6:7]
+; GFX11-NEXT: v_cmp_u_f64_e64 s0, v[4:5], v[6:7]
+; GFX11-NEXT: v_max_f64 v[2:3], v[8:9], v[10:11]
+; GFX11-NEXT: v_cmp_u_f64_e64 s1, v[8:9], v[10:11]
+; GFX11-NEXT: v_max_f64 v[4:5], v[12:13], v[14:15]
+; GFX11-NEXT: v_cmp_u_f64_e64 s2, v[12:13], v[14:15]
+; GFX11-NEXT: v_max_f64 v[6:7], v[16:17], v[18:19]
+; GFX11-NEXT: v_cmp_u_f64_e64 s3, v[16:17], v[18:19]
+; GFX11-NEXT: v_max_f64 v[8:9], v[20:21], v[22:23]
+; GFX11-NEXT: v_cmp_u_f64_e64 s4, v[20:21], v[22:23]
+; GFX11-NEXT: v_max_f64 v[10:11], v[24:25], v[26:27]
+; GFX11-NEXT: v_cmp_u_f64_e64 s5, v[24:25], v[26:27]
+; GFX11-NEXT: v_cndmask_b32_e64 v14, v32, 0, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v15, v33, 0x7ff80000, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, 0, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, 0x7ff80000, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, 0, s1
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v3, 0x7ff80000, s1
+; GFX11-NEXT: v_cndmask_b32_e64 v4, v4, 0, s2
+; GFX11-NEXT: v_cndmask_b32_e64 v5, v5, 0x7ff80000, s2
+; GFX11-NEXT: v_cndmask_b32_e64 v6, v6, 0, s3
+; GFX11-NEXT: v_cndmask_b32_e64 v7, v7, 0x7ff80000, s3
+; GFX11-NEXT: v_cndmask_b32_e64 v8, v8, 0, s4
+; GFX11-NEXT: v_cndmask_b32_e64 v9, v9, 0x7ff80000, s4
+; GFX11-NEXT: v_cndmask_b32_e64 v10, v10, 0, s5
+; GFX11-NEXT: v_cndmask_b32_e64 v11, v11, 0x7ff80000, s5
+; GFX11-NEXT: v_max_f64 v[16:17], v[14:15], v[0:1]
+; GFX11-NEXT: v_cmp_u_f64_e32 vcc_lo, v[14:15], v[0:1]
+; GFX11-NEXT: v_max_f64 v[0:1], v[2:3], v[4:5]
+; GFX11-NEXT: v_cmp_u_f64_e64 s0, v[2:3], v[4:5]
+; GFX11-NEXT: v_max_f64 v[2:3], v[6:7], v[8:9]
+; GFX11-NEXT: v_cmp_u_f64_e64 s1, v[6:7], v[8:9]
+; GFX11-NEXT: v_cndmask_b32_e64 v6, v16, 0, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v7, v17, 0x7ff80000, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, 0, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, 0x7ff80000, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, 0, s1
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v3, 0x7ff80000, s1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_max_f64 v[8:9], v[6:7], v[0:1]
+; GFX11-NEXT: v_cmp_u_f64_e32 vcc_lo, v[6:7], v[0:1]
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_max_f64 v[12:13], v[28:29], v[30:31]
+; GFX11-NEXT: v_cmp_u_f64_e64 s6, v[28:29], v[30:31]
+; GFX11-NEXT: v_cndmask_b32_e64 v12, v12, 0, s6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_cndmask_b32_e64 v13, v13, 0x7ff80000, s6
+; GFX11-NEXT: v_max_f64 v[4:5], v[10:11], v[12:13]
+; GFX11-NEXT: v_cmp_u_f64_e64 s2, v[10:11], v[12:13]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_cndmask_b32_e64 v4, v4, 0, s2
+; GFX11-NEXT: v_cndmask_b32_e64 v5, v5, 0x7ff80000, s2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_max_f64 v[0:1], v[2:3], v[4:5]
+; GFX11-NEXT: v_cmp_u_f64_e64 s0, v[2:3], v[4:5]
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v8, 0, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v9, 0x7ff80000, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, 0, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, 0x7ff80000, s0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[0:1]
+; GFX11-NEXT: v_cmp_u_f64_e32 vcc_lo, v[2:3], v[0:1]
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, 0x7ff80000, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmaximum_v16double:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: scratch_load_b32 v31, off, s32
+; GFX12-NEXT: v_maximum_f64 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: v_maximum_f64 v[2:3], v[4:5], v[6:7]
+; GFX12-NEXT: v_maximum_f64 v[4:5], v[8:9], v[10:11]
+; GFX12-NEXT: v_maximum_f64 v[6:7], v[12:13], v[14:15]
+; GFX12-NEXT: v_maximum_f64 v[8:9], v[16:17], v[18:19]
+; GFX12-NEXT: v_maximum_f64 v[10:11], v[20:21], v[22:23]
+; GFX12-NEXT: v_maximum_f64 v[12:13], v[24:25], v[26:27]
+; GFX12-NEXT: v_maximum_f64 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: v_maximum_f64 v[2:3], v[4:5], v[6:7]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_maximum_f64 v[4:5], v[8:9], v[10:11]
+; GFX12-NEXT: v_maximum_f64 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_maximum_f64 v[14:15], v[28:29], v[30:31]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_maximum_f64 v[6:7], v[12:13], v[14:15]
+; GFX12-NEXT: v_maximum_f64 v[2:3], v[4:5], v[6:7]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_maximum_f64 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call double @llvm.vector.reduce.fmaximum.v16double(<16 x double> %v)
+ ret double %res
+}
+
+declare half @llvm.vector.reduce.fmaximum.v2half(<2 x half>)
+declare half @llvm.vector.reduce.fmaximum.v3half(<3 x half>)
+declare half @llvm.vector.reduce.fmaximum.v4half(<4 x half>)
+declare half @llvm.vector.reduce.fmaximum.v8half(<8 x half>)
+declare half @llvm.vector.reduce.fmaximum.v16half(<16 x half>)
+declare float @llvm.vector.reduce.fmaximum.v2float(<2 x float>)
+declare float @llvm.vector.reduce.fmaximum.v3float(<3 x float>)
+declare float @llvm.vector.reduce.fmaximum.v4float(<4 x float>)
+declare float @llvm.vector.reduce.fmaximum.v8float(<8 x float>)
+declare float @llvm.vector.reduce.fmaximum.v16float(<16 x float>)
+declare double @llvm.vector.reduce.fmaximum.v2double(<2 x double>)
+declare double @llvm.vector.reduce.fmaximum.v3double(<3 x double>)
+declare double @llvm.vector.reduce.fmaximum.v4double(<4 x double>)
+declare double @llvm.vector.reduce.fmaximum.v8double(<8 x double>)
+declare double @llvm.vector.reduce.fmaximum.v16double(<16 x double>)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/fmin.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/fmin.ll
new file mode 100644
index 0000000000000..63b1ebc90faeb
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/fmin.ll
@@ -0,0 +1,1805 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx700 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX7 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx801 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX8 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx942 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX9 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX10 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX12 %s
+
+define half @test_vector_reduce_fmin_v2half(<2 x half> %v) {
+; GFX7-LABEL: test_vector_reduce_fmin_v2half:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX7-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmin_v2half:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_f16_e32 v1, v0, v0
+; GFX8-NEXT: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_min_f16_e32 v0, v1, v0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmin_v2half:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f16_e32 v1, v0, v0
+; GFX9-NEXT: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_min_f16_e32 v0, v1, v0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmin_v2half:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f16_e32 v1, v0, v0
+; GFX10-NEXT: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_min_f16_e32 v0, v1, v0
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmin_v2half:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX11-NEXT: v_min_f16_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmin_v2half:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_max_num_f16_e32 v1, v1, v1
+; GFX12-NEXT: v_min_num_f16_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call half @llvm.vector.reduce.fmin.v2half(<2 x half> %v)
+ ret half %res
+}
+
+define half @test_vector_reduce_fmin_v3half(<3 x half> %v) {
+; GFX7-LABEL: test_vector_reduce_fmin_v3half:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX7-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v2
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmin_v3half:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_f16_e32 v2, v0, v0
+; GFX8-NEXT: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_min_f16_e32 v0, v2, v0
+; GFX8-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX8-NEXT: v_min_f16_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmin_v3half:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f16_e32 v2, v0, v0
+; GFX9-NEXT: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_min_f16_e32 v0, v2, v0
+; GFX9-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX9-NEXT: v_min_f16_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmin_v3half:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f16_e32 v2, v0, v0
+; GFX10-NEXT: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX10-NEXT: v_min_f16_e32 v0, v2, v0
+; GFX10-NEXT: v_min_f16_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmin_v3half:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX11-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-NEXT: v_min_f16_e32 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_min_f16_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmin_v3half:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v0
+; GFX12-NEXT: v_max_num_f16_e32 v1, v1, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-NEXT: v_min_num_f16_e32 v0, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_min_num_f16_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call half @llvm.vector.reduce.fmin.v3half(<3 x half> %v)
+ ret half %res
+}
+
+define half @test_vector_reduce_fmin_v4half(<4 x half> %v) {
+; GFX7-LABEL: test_vector_reduce_fmin_v4half:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v2
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX7-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_min_f32_e32 v1, v2, v3
+; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX7-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmin_v4half:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_f16_e32 v2, v0, v0
+; GFX8-NEXT: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_min_f16_e32 v0, v2, v0
+; GFX8-NEXT: v_max_f16_e32 v2, v1, v1
+; GFX8-NEXT: v_max_f16_sdwa v1, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_min_f16_e32 v1, v2, v1
+; GFX8-NEXT: v_min_f16_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmin_v4half:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f16_e32 v2, v0, v0
+; GFX9-NEXT: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_min_f16_e32 v0, v2, v0
+; GFX9-NEXT: v_max_f16_e32 v2, v1, v1
+; GFX9-NEXT: v_max_f16_sdwa v1, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_min_f16_e32 v1, v2, v1
+; GFX9-NEXT: v_min_f16_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmin_v4half:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f16_e32 v2, v0, v0
+; GFX10-NEXT: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v3, v1, v1
+; GFX10-NEXT: v_max_f16_sdwa v1, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_min_f16_e32 v0, v2, v0
+; GFX10-NEXT: v_min_f16_e32 v1, v3, v1
+; GFX10-NEXT: v_min_f16_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmin_v4half:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX11-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_min_f16_e32 v0, v0, v2
+; GFX11-NEXT: v_min_f16_e32 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_min_f16_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmin_v4half:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX12-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v0
+; GFX12-NEXT: v_max_num_f16_e32 v1, v1, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-NEXT: v_max_num_f16_e32 v3, v3, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_min_num_f16_e32 v0, v0, v2
+; GFX12-NEXT: v_min_num_f16_e32 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_min_num_f16_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call half @llvm.vector.reduce.fmin.v4half(<4 x half> %v)
+ ret half %res
+}
+
+define half @test_vector_reduce_fmin_v8half(<8 x half> %v) {
+; GFX7-LABEL: test_vector_reduce_fmin_v8half:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v2
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX7-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_min_f32_e32 v1, v2, v3
+; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v5
+; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v6
+; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v7
+; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
+; GFX7-NEXT: v_min_f32_e32 v2, v2, v3
+; GFX7-NEXT: v_cvt_f16_f32_e32 v2, v2
+; GFX7-NEXT: v_min_f32_e32 v3, v4, v5
+; GFX7-NEXT: v_cvt_f16_f32_e32 v3, v3
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v2
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX7-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_min_f32_e32 v1, v2, v3
+; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX7-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmin_v8half:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_f16_e32 v4, v0, v0
+; GFX8-NEXT: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_min_f16_e32 v0, v4, v0
+; GFX8-NEXT: v_max_f16_e32 v4, v1, v1
+; GFX8-NEXT: v_max_f16_sdwa v1, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_min_f16_e32 v1, v4, v1
+; GFX8-NEXT: v_max_f16_e32 v4, v2, v2
+; GFX8-NEXT: v_max_f16_sdwa v2, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_min_f16_e32 v2, v4, v2
+; GFX8-NEXT: v_max_f16_e32 v4, v3, v3
+; GFX8-NEXT: v_max_f16_sdwa v3, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_min_f16_e32 v3, v4, v3
+; GFX8-NEXT: v_min_f16_e32 v0, v0, v1
+; GFX8-NEXT: v_min_f16_e32 v1, v2, v3
+; GFX8-NEXT: v_min_f16_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmin_v8half:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f16_e32 v4, v0, v0
+; GFX9-NEXT: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_min_f16_e32 v0, v4, v0
+; GFX9-NEXT: v_max_f16_e32 v4, v1, v1
+; GFX9-NEXT: v_max_f16_sdwa v1, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_min_f16_e32 v1, v4, v1
+; GFX9-NEXT: v_max_f16_e32 v4, v2, v2
+; GFX9-NEXT: v_max_f16_sdwa v2, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_min_f16_e32 v2, v4, v2
+; GFX9-NEXT: v_max_f16_e32 v4, v3, v3
+; GFX9-NEXT: v_max_f16_sdwa v3, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_min_f16_e32 v3, v4, v3
+; GFX9-NEXT: v_min_f16_e32 v0, v0, v1
+; GFX9-NEXT: v_min_f16_e32 v1, v2, v3
+; GFX9-NEXT: v_min_f16_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmin_v8half:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f16_e32 v4, v0, v0
+; GFX10-NEXT: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v5, v1, v1
+; GFX10-NEXT: v_max_f16_sdwa v1, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v6, v2, v2
+; GFX10-NEXT: v_max_f16_sdwa v2, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v7, v3, v3
+; GFX10-NEXT: v_max_f16_sdwa v3, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_min_f16_e32 v0, v4, v0
+; GFX10-NEXT: v_min_f16_e32 v1, v5, v1
+; GFX10-NEXT: v_min_f16_e32 v2, v6, v2
+; GFX10-NEXT: v_min_f16_e32 v3, v7, v3
+; GFX10-NEXT: v_min_f16_e32 v0, v0, v1
+; GFX10-NEXT: v_min_f16_e32 v1, v2, v3
+; GFX10-NEXT: v_min_f16_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmin_v8half:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v0
+; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v3
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX11-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX11-NEXT: v_max_f16_e32 v4, v4, v4
+; GFX11-NEXT: v_max_f16_e32 v5, v5, v5
+; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-NEXT: v_max_f16_e32 v6, v6, v6
+; GFX11-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX11-NEXT: v_max_f16_e32 v7, v7, v7
+; GFX11-NEXT: v_min_f16_e32 v0, v0, v4
+; GFX11-NEXT: v_min_f16_e32 v1, v1, v5
+; GFX11-NEXT: v_min_f16_e32 v2, v2, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_min_f16_e32 v3, v3, v7
+; GFX11-NEXT: v_min_f16_e32 v0, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_min_f16_e32 v1, v2, v3
+; GFX11-NEXT: v_min_f16_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmin_v8half:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshrrev_b32_e32 v4, 16, v0
+; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v6, 16, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v7, 16, v3
+; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v0
+; GFX12-NEXT: v_max_num_f16_e32 v1, v1, v1
+; GFX12-NEXT: v_max_num_f16_e32 v4, v4, v4
+; GFX12-NEXT: v_max_num_f16_e32 v5, v5, v5
+; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-NEXT: v_max_num_f16_e32 v6, v6, v6
+; GFX12-NEXT: v_max_num_f16_e32 v3, v3, v3
+; GFX12-NEXT: v_max_num_f16_e32 v7, v7, v7
+; GFX12-NEXT: v_min_num_f16_e32 v0, v0, v4
+; GFX12-NEXT: v_min_num_f16_e32 v1, v1, v5
+; GFX12-NEXT: v_min_num_f16_e32 v2, v2, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_min_num_f16_e32 v3, v3, v7
+; GFX12-NEXT: v_min_num_f16_e32 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_min_num_f16_e32 v1, v2, v3
+; GFX12-NEXT: v_min_num_f16_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call half @llvm.vector.reduce.fmin.v8half(<8 x half> %v)
+ ret half %res
+}
+
+define half @test_vector_reduce_fmin_v16half(<16 x half> %v) {
+; GFX7-LABEL: test_vector_reduce_fmin_v16half:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v2
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX7-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_min_f32_e32 v1, v2, v3
+; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v5
+; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v6
+; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v7
+; GFX7-NEXT: v_cvt_f32_f16_e32 v6, v10
+; GFX7-NEXT: v_min_f32_e32 v2, v2, v3
+; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v11
+; GFX7-NEXT: v_min_f32_e32 v3, v4, v5
+; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v8
+; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v9
+; GFX7-NEXT: v_cvt_f32_f16_e32 v8, v14
+; GFX7-NEXT: v_cvt_f32_f16_e32 v9, v15
+; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
+; GFX7-NEXT: v_min_f32_e32 v4, v4, v5
+; GFX7-NEXT: v_min_f32_e32 v5, v6, v7
+; GFX7-NEXT: v_cvt_f32_f16_e32 v6, v12
+; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v13
+; GFX7-NEXT: v_cvt_f16_f32_e32 v2, v2
+; GFX7-NEXT: v_cvt_f16_f32_e32 v3, v3
+; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4
+; GFX7-NEXT: v_min_f32_e32 v6, v6, v7
+; GFX7-NEXT: v_min_f32_e32 v7, v8, v9
+; GFX7-NEXT: v_cvt_f16_f32_e32 v5, v5
+; GFX7-NEXT: v_cvt_f16_f32_e32 v6, v6
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v2
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX7-NEXT: v_cvt_f16_f32_e32 v7, v7
+; GFX7-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_min_f32_e32 v1, v2, v3
+; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v5
+; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v6
+; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v7
+; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
+; GFX7-NEXT: v_min_f32_e32 v2, v2, v3
+; GFX7-NEXT: v_cvt_f16_f32_e32 v2, v2
+; GFX7-NEXT: v_min_f32_e32 v3, v4, v5
+; GFX7-NEXT: v_cvt_f16_f32_e32 v3, v3
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v2
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX7-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_min_f32_e32 v1, v2, v3
+; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX7-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmin_v16half:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_f16_e32 v8, v0, v0
+; GFX8-NEXT: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_min_f16_e32 v0, v8, v0
+; GFX8-NEXT: v_max_f16_e32 v8, v1, v1
+; GFX8-NEXT: v_max_f16_sdwa v1, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_min_f16_e32 v1, v8, v1
+; GFX8-NEXT: v_max_f16_e32 v8, v2, v2
+; GFX8-NEXT: v_max_f16_sdwa v2, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_min_f16_e32 v2, v8, v2
+; GFX8-NEXT: v_max_f16_e32 v8, v3, v3
+; GFX8-NEXT: v_max_f16_sdwa v3, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_min_f16_e32 v3, v8, v3
+; GFX8-NEXT: v_max_f16_e32 v8, v4, v4
+; GFX8-NEXT: v_max_f16_sdwa v4, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_min_f16_e32 v4, v8, v4
+; GFX8-NEXT: v_max_f16_e32 v8, v5, v5
+; GFX8-NEXT: v_max_f16_sdwa v5, v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_min_f16_e32 v5, v8, v5
+; GFX8-NEXT: v_max_f16_e32 v8, v6, v6
+; GFX8-NEXT: v_max_f16_sdwa v6, v6, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_min_f16_e32 v6, v8, v6
+; GFX8-NEXT: v_max_f16_e32 v8, v7, v7
+; GFX8-NEXT: v_max_f16_sdwa v7, v7, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_min_f16_e32 v7, v8, v7
+; GFX8-NEXT: v_min_f16_e32 v0, v0, v1
+; GFX8-NEXT: v_min_f16_e32 v1, v2, v3
+; GFX8-NEXT: v_min_f16_e32 v2, v4, v5
+; GFX8-NEXT: v_min_f16_e32 v3, v6, v7
+; GFX8-NEXT: v_min_f16_e32 v0, v0, v1
+; GFX8-NEXT: v_min_f16_e32 v1, v2, v3
+; GFX8-NEXT: v_min_f16_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmin_v16half:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f16_e32 v8, v0, v0
+; GFX9-NEXT: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_min_f16_e32 v0, v8, v0
+; GFX9-NEXT: v_max_f16_e32 v8, v1, v1
+; GFX9-NEXT: v_max_f16_sdwa v1, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_min_f16_e32 v1, v8, v1
+; GFX9-NEXT: v_max_f16_e32 v8, v2, v2
+; GFX9-NEXT: v_max_f16_sdwa v2, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_min_f16_e32 v2, v8, v2
+; GFX9-NEXT: v_max_f16_e32 v8, v3, v3
+; GFX9-NEXT: v_max_f16_sdwa v3, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_min_f16_e32 v3, v8, v3
+; GFX9-NEXT: v_max_f16_e32 v8, v4, v4
+; GFX9-NEXT: v_max_f16_sdwa v4, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_min_f16_e32 v4, v8, v4
+; GFX9-NEXT: v_max_f16_e32 v8, v5, v5
+; GFX9-NEXT: v_max_f16_sdwa v5, v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_min_f16_e32 v5, v8, v5
+; GFX9-NEXT: v_max_f16_e32 v8, v6, v6
+; GFX9-NEXT: v_max_f16_sdwa v6, v6, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_min_f16_e32 v6, v8, v6
+; GFX9-NEXT: v_max_f16_e32 v8, v7, v7
+; GFX9-NEXT: v_max_f16_sdwa v7, v7, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_min_f16_e32 v7, v8, v7
+; GFX9-NEXT: v_min_f16_e32 v0, v0, v1
+; GFX9-NEXT: v_min_f16_e32 v1, v2, v3
+; GFX9-NEXT: v_min_f16_e32 v2, v4, v5
+; GFX9-NEXT: v_min_f16_e32 v3, v6, v7
+; GFX9-NEXT: v_min_f16_e32 v0, v0, v1
+; GFX9-NEXT: v_min_f16_e32 v1, v2, v3
+; GFX9-NEXT: v_min_f16_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmin_v16half:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f16_e32 v8, v0, v0
+; GFX10-NEXT: v_max_f16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v9, v1, v1
+; GFX10-NEXT: v_max_f16_sdwa v1, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v10, v2, v2
+; GFX10-NEXT: v_max_f16_sdwa v2, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_min_f16_e32 v0, v8, v0
+; GFX10-NEXT: v_max_f16_e32 v8, v3, v3
+; GFX10-NEXT: v_min_f16_e32 v1, v9, v1
+; GFX10-NEXT: v_max_f16_sdwa v3, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_min_f16_e32 v2, v10, v2
+; GFX10-NEXT: v_max_f16_e32 v9, v4, v4
+; GFX10-NEXT: v_max_f16_sdwa v4, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v10, v5, v5
+; GFX10-NEXT: v_max_f16_sdwa v5, v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v11, v6, v6
+; GFX10-NEXT: v_max_f16_sdwa v6, v6, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_max_f16_e32 v12, v7, v7
+; GFX10-NEXT: v_max_f16_sdwa v7, v7, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX10-NEXT: v_min_f16_e32 v3, v8, v3
+; GFX10-NEXT: v_min_f16_e32 v4, v9, v4
+; GFX10-NEXT: v_min_f16_e32 v5, v10, v5
+; GFX10-NEXT: v_min_f16_e32 v6, v11, v6
+; GFX10-NEXT: v_min_f16_e32 v7, v12, v7
+; GFX10-NEXT: v_min_f16_e32 v0, v0, v1
+; GFX10-NEXT: v_min_f16_e32 v1, v2, v3
+; GFX10-NEXT: v_min_f16_e32 v2, v4, v5
+; GFX10-NEXT: v_min_f16_e32 v3, v6, v7
+; GFX10-NEXT: v_min_f16_e32 v0, v0, v1
+; GFX10-NEXT: v_min_f16_e32 v1, v2, v3
+; GFX10-NEXT: v_min_f16_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmin_v16half:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v0
+; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v4
+; GFX11-NEXT: v_lshrrev_b32_e32 v13, 16, v5
+; GFX11-NEXT: v_max_f16_e32 v0, v0, v0
+; GFX11-NEXT: v_max_f16_e32 v8, v8, v8
+; GFX11-NEXT: v_max_f16_e32 v1, v1, v1
+; GFX11-NEXT: v_max_f16_e32 v9, v9, v9
+; GFX11-NEXT: v_max_f16_e32 v2, v2, v2
+; GFX11-NEXT: v_max_f16_e32 v10, v10, v10
+; GFX11-NEXT: v_lshrrev_b32_e32 v14, 16, v6
+; GFX11-NEXT: v_lshrrev_b32_e32 v15, 16, v7
+; GFX11-NEXT: v_min_f16_e32 v0, v0, v8
+; GFX11-NEXT: v_min_f16_e32 v1, v1, v9
+; GFX11-NEXT: v_min_f16_e32 v2, v2, v10
+; GFX11-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX11-NEXT: v_max_f16_e32 v8, v11, v11
+; GFX11-NEXT: v_max_f16_e32 v4, v4, v4
+; GFX11-NEXT: v_max_f16_e32 v9, v12, v12
+; GFX11-NEXT: v_max_f16_e32 v5, v5, v5
+; GFX11-NEXT: v_max_f16_e32 v10, v13, v13
+; GFX11-NEXT: v_max_f16_e32 v6, v6, v6
+; GFX11-NEXT: v_max_f16_e32 v11, v14, v14
+; GFX11-NEXT: v_max_f16_e32 v7, v7, v7
+; GFX11-NEXT: v_max_f16_e32 v12, v15, v15
+; GFX11-NEXT: v_min_f16_e32 v3, v3, v8
+; GFX11-NEXT: v_min_f16_e32 v4, v4, v9
+; GFX11-NEXT: v_min_f16_e32 v5, v5, v10
+; GFX11-NEXT: v_min_f16_e32 v6, v6, v11
+; GFX11-NEXT: v_min_f16_e32 v7, v7, v12
+; GFX11-NEXT: v_min_f16_e32 v0, v0, v1
+; GFX11-NEXT: v_min_f16_e32 v1, v2, v3
+; GFX11-NEXT: v_min_f16_e32 v2, v4, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_min_f16_e32 v3, v6, v7
+; GFX11-NEXT: v_min_f16_e32 v0, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_min_f16_e32 v1, v2, v3
+; GFX11-NEXT: v_min_f16_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmin_v16half:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshrrev_b32_e32 v8, 16, v0
+; GFX12-NEXT: v_lshrrev_b32_e32 v9, 16, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v10, 16, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v11, 16, v3
+; GFX12-NEXT: v_lshrrev_b32_e32 v12, 16, v4
+; GFX12-NEXT: v_lshrrev_b32_e32 v13, 16, v5
+; GFX12-NEXT: v_max_num_f16_e32 v0, v0, v0
+; GFX12-NEXT: v_max_num_f16_e32 v8, v8, v8
+; GFX12-NEXT: v_max_num_f16_e32 v1, v1, v1
+; GFX12-NEXT: v_max_num_f16_e32 v9, v9, v9
+; GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GFX12-NEXT: v_max_num_f16_e32 v10, v10, v10
+; GFX12-NEXT: v_lshrrev_b32_e32 v14, 16, v6
+; GFX12-NEXT: v_lshrrev_b32_e32 v15, 16, v7
+; GFX12-NEXT: v_min_num_f16_e32 v0, v0, v8
+; GFX12-NEXT: v_min_num_f16_e32 v1, v1, v9
+; GFX12-NEXT: v_min_num_f16_e32 v2, v2, v10
+; GFX12-NEXT: v_max_num_f16_e32 v3, v3, v3
+; GFX12-NEXT: v_max_num_f16_e32 v8, v11, v11
+; GFX12-NEXT: v_max_num_f16_e32 v4, v4, v4
+; GFX12-NEXT: v_max_num_f16_e32 v9, v12, v12
+; GFX12-NEXT: v_max_num_f16_e32 v5, v5, v5
+; GFX12-NEXT: v_max_num_f16_e32 v10, v13, v13
+; GFX12-NEXT: v_max_num_f16_e32 v6, v6, v6
+; GFX12-NEXT: v_max_num_f16_e32 v11, v14, v14
+; GFX12-NEXT: v_max_num_f16_e32 v7, v7, v7
+; GFX12-NEXT: v_max_num_f16_e32 v12, v15, v15
+; GFX12-NEXT: v_min_num_f16_e32 v3, v3, v8
+; GFX12-NEXT: v_min_num_f16_e32 v4, v4, v9
+; GFX12-NEXT: v_min_num_f16_e32 v5, v5, v10
+; GFX12-NEXT: v_min_num_f16_e32 v6, v6, v11
+; GFX12-NEXT: v_min_num_f16_e32 v7, v7, v12
+; GFX12-NEXT: v_min_num_f16_e32 v0, v0, v1
+; GFX12-NEXT: v_min_num_f16_e32 v1, v2, v3
+; GFX12-NEXT: v_min_num_f16_e32 v2, v4, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_min_num_f16_e32 v3, v6, v7
+; GFX12-NEXT: v_min_num_f16_e32 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_min_num_f16_e32 v1, v2, v3
+; GFX12-NEXT: v_min_num_f16_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call half @llvm.vector.reduce.fmin.v16half(<16 x half> %v)
+ ret half %res
+}
+
+define float @test_vector_reduce_fmin_v2float(<2 x float> %v) {
+; GFX7-LABEL: test_vector_reduce_fmin_v2float:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
+; GFX7-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmin_v2float:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GFX8-NEXT: v_mul_f32_e32 v1, 1.0, v1
+; GFX8-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmin_v2float:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f32_e32 v0, v0, v0
+; GFX9-NEXT: v_max_f32_e32 v1, v1, v1
+; GFX9-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmin_v2float:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f32_e32 v0, v0, v0
+; GFX10-NEXT: v_max_f32_e32 v1, v1, v1
+; GFX10-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmin_v2float:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmin_v2float:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_dual_max_num_f32 v0, v0, v0 :: v_dual_max_num_f32 v1, v1, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_min_num_f32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call float @llvm.vector.reduce.fmin.v2float(<2 x float> %v)
+ ret float %res
+}
+
+define float @test_vector_reduce_fmin_v3float(<3 x float> %v) {
+; GFX7-LABEL: test_vector_reduce_fmin_v3float:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
+; GFX7-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v2
+; GFX7-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmin_v3float:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GFX8-NEXT: v_mul_f32_e32 v1, 1.0, v1
+; GFX8-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX8-NEXT: v_mul_f32_e32 v1, 1.0, v2
+; GFX8-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmin_v3float:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f32_e32 v0, v0, v0
+; GFX9-NEXT: v_max_f32_e32 v1, v1, v1
+; GFX9-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX9-NEXT: v_max_f32_e32 v1, v2, v2
+; GFX9-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmin_v3float:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f32_e32 v0, v0, v0
+; GFX10-NEXT: v_max_f32_e32 v1, v1, v1
+; GFX10-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX10-NEXT: v_max_f32_e32 v1, v2, v2
+; GFX10-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmin_v3float:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_dual_min_f32 v0, v0, v1 :: v_dual_max_f32 v1, v2, v2
+; GFX11-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmin_v3float:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_dual_max_num_f32 v0, v0, v0 :: v_dual_max_num_f32 v1, v1, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_dual_min_num_f32 v0, v0, v1 :: v_dual_max_num_f32 v1, v2, v2
+; GFX12-NEXT: v_min_num_f32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call float @llvm.vector.reduce.fmin.v3float(<3 x float> %v)
+ ret float %res
+}
+
+define float @test_vector_reduce_fmin_v4float(<4 x float> %v) {
+; GFX7-LABEL: test_vector_reduce_fmin_v4float:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
+; GFX7-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v2
+; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v3
+; GFX7-NEXT: v_min_f32_e32 v1, v1, v2
+; GFX7-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmin_v4float:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GFX8-NEXT: v_mul_f32_e32 v1, 1.0, v1
+; GFX8-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX8-NEXT: v_mul_f32_e32 v1, 1.0, v2
+; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v3
+; GFX8-NEXT: v_min_f32_e32 v1, v1, v2
+; GFX8-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmin_v4float:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f32_e32 v0, v0, v0
+; GFX9-NEXT: v_max_f32_e32 v1, v1, v1
+; GFX9-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX9-NEXT: v_max_f32_e32 v1, v2, v2
+; GFX9-NEXT: v_max_f32_e32 v2, v3, v3
+; GFX9-NEXT: v_min_f32_e32 v1, v1, v2
+; GFX9-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmin_v4float:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f32_e32 v0, v0, v0
+; GFX10-NEXT: v_max_f32_e32 v1, v1, v1
+; GFX10-NEXT: v_max_f32_e32 v2, v2, v2
+; GFX10-NEXT: v_max_f32_e32 v3, v3, v3
+; GFX10-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX10-NEXT: v_min_f32_e32 v1, v2, v3
+; GFX10-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmin_v4float:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GFX11-NEXT: v_dual_max_f32 v2, v2, v2 :: v_dual_max_f32 v3, v3, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_dual_min_f32 v0, v0, v1 :: v_dual_min_f32 v1, v2, v3
+; GFX11-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmin_v4float:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_dual_max_num_f32 v0, v0, v0 :: v_dual_max_num_f32 v1, v1, v1
+; GFX12-NEXT: v_dual_max_num_f32 v2, v2, v2 :: v_dual_max_num_f32 v3, v3, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_dual_min_num_f32 v0, v0, v1 :: v_dual_min_num_f32 v1, v2, v3
+; GFX12-NEXT: v_min_num_f32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call float @llvm.vector.reduce.fmin.v4float(<4 x float> %v)
+ ret float %res
+}
+
+define float @test_vector_reduce_fmin_v8float(<8 x float> %v) {
+; GFX7-LABEL: test_vector_reduce_fmin_v8float:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
+; GFX7-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v2
+; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v3
+; GFX7-NEXT: v_min_f32_e32 v1, v1, v2
+; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v4
+; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v5
+; GFX7-NEXT: v_min_f32_e32 v2, v2, v3
+; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v6
+; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v7
+; GFX7-NEXT: v_min_f32_e32 v3, v3, v4
+; GFX7-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_min_f32_e32 v1, v2, v3
+; GFX7-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmin_v8float:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GFX8-NEXT: v_mul_f32_e32 v1, 1.0, v1
+; GFX8-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX8-NEXT: v_mul_f32_e32 v1, 1.0, v2
+; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v3
+; GFX8-NEXT: v_min_f32_e32 v1, v1, v2
+; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v4
+; GFX8-NEXT: v_mul_f32_e32 v3, 1.0, v5
+; GFX8-NEXT: v_min_f32_e32 v2, v2, v3
+; GFX8-NEXT: v_mul_f32_e32 v3, 1.0, v6
+; GFX8-NEXT: v_mul_f32_e32 v4, 1.0, v7
+; GFX8-NEXT: v_min_f32_e32 v3, v3, v4
+; GFX8-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX8-NEXT: v_min_f32_e32 v1, v2, v3
+; GFX8-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmin_v8float:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f32_e32 v0, v0, v0
+; GFX9-NEXT: v_max_f32_e32 v1, v1, v1
+; GFX9-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX9-NEXT: v_max_f32_e32 v1, v2, v2
+; GFX9-NEXT: v_max_f32_e32 v2, v3, v3
+; GFX9-NEXT: v_min_f32_e32 v1, v1, v2
+; GFX9-NEXT: v_max_f32_e32 v2, v4, v4
+; GFX9-NEXT: v_max_f32_e32 v3, v5, v5
+; GFX9-NEXT: v_min_f32_e32 v2, v2, v3
+; GFX9-NEXT: v_max_f32_e32 v3, v6, v6
+; GFX9-NEXT: v_max_f32_e32 v4, v7, v7
+; GFX9-NEXT: v_min_f32_e32 v3, v3, v4
+; GFX9-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX9-NEXT: v_min_f32_e32 v1, v2, v3
+; GFX9-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmin_v8float:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f32_e32 v0, v0, v0
+; GFX10-NEXT: v_max_f32_e32 v1, v1, v1
+; GFX10-NEXT: v_max_f32_e32 v2, v2, v2
+; GFX10-NEXT: v_max_f32_e32 v3, v3, v3
+; GFX10-NEXT: v_max_f32_e32 v4, v4, v4
+; GFX10-NEXT: v_max_f32_e32 v5, v5, v5
+; GFX10-NEXT: v_max_f32_e32 v6, v6, v6
+; GFX10-NEXT: v_max_f32_e32 v7, v7, v7
+; GFX10-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX10-NEXT: v_min_f32_e32 v1, v2, v3
+; GFX10-NEXT: v_min_f32_e32 v2, v4, v5
+; GFX10-NEXT: v_min_f32_e32 v3, v6, v7
+; GFX10-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX10-NEXT: v_min_f32_e32 v1, v2, v3
+; GFX10-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmin_v8float:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GFX11-NEXT: v_dual_max_f32 v2, v2, v2 :: v_dual_max_f32 v3, v3, v3
+; GFX11-NEXT: v_dual_max_f32 v4, v4, v4 :: v_dual_max_f32 v5, v5, v5
+; GFX11-NEXT: v_dual_max_f32 v6, v6, v6 :: v_dual_max_f32 v7, v7, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_dual_min_f32 v0, v0, v1 :: v_dual_min_f32 v1, v2, v3
+; GFX11-NEXT: v_dual_min_f32 v2, v4, v5 :: v_dual_min_f32 v3, v6, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_dual_min_f32 v0, v0, v1 :: v_dual_min_f32 v1, v2, v3
+; GFX11-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmin_v8float:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_dual_max_num_f32 v0, v0, v0 :: v_dual_max_num_f32 v1, v1, v1
+; GFX12-NEXT: v_dual_max_num_f32 v2, v2, v2 :: v_dual_max_num_f32 v3, v3, v3
+; GFX12-NEXT: v_dual_max_num_f32 v4, v4, v4 :: v_dual_max_num_f32 v5, v5, v5
+; GFX12-NEXT: v_dual_max_num_f32 v6, v6, v6 :: v_dual_max_num_f32 v7, v7, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_dual_min_num_f32 v0, v0, v1 :: v_dual_min_num_f32 v1, v2, v3
+; GFX12-NEXT: v_dual_min_num_f32 v2, v4, v5 :: v_dual_min_num_f32 v3, v6, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_dual_min_num_f32 v0, v0, v1 :: v_dual_min_num_f32 v1, v2, v3
+; GFX12-NEXT: v_min_num_f32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call float @llvm.vector.reduce.fmin.v8float(<8 x float> %v)
+ ret float %res
+}
+
+define float @test_vector_reduce_fmin_v16float(<16 x float> %v) {
+; GFX7-LABEL: test_vector_reduce_fmin_v16float:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
+; GFX7-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v2
+; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v3
+; GFX7-NEXT: v_min_f32_e32 v1, v1, v2
+; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v4
+; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v5
+; GFX7-NEXT: v_min_f32_e32 v2, v2, v3
+; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v6
+; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v7
+; GFX7-NEXT: v_min_f32_e32 v3, v3, v4
+; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v8
+; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v9
+; GFX7-NEXT: v_min_f32_e32 v4, v4, v5
+; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v10
+; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v11
+; GFX7-NEXT: v_min_f32_e32 v5, v5, v6
+; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v12
+; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v13
+; GFX7-NEXT: v_min_f32_e32 v6, v6, v7
+; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v14
+; GFX7-NEXT: v_mul_f32_e32 v8, 1.0, v15
+; GFX7-NEXT: v_min_f32_e32 v7, v7, v8
+; GFX7-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_min_f32_e32 v1, v2, v3
+; GFX7-NEXT: v_min_f32_e32 v2, v4, v5
+; GFX7-NEXT: v_min_f32_e32 v3, v6, v7
+; GFX7-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_min_f32_e32 v1, v2, v3
+; GFX7-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmin_v16float:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GFX8-NEXT: v_mul_f32_e32 v1, 1.0, v1
+; GFX8-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX8-NEXT: v_mul_f32_e32 v1, 1.0, v2
+; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v3
+; GFX8-NEXT: v_min_f32_e32 v1, v1, v2
+; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v4
+; GFX8-NEXT: v_mul_f32_e32 v3, 1.0, v5
+; GFX8-NEXT: v_min_f32_e32 v2, v2, v3
+; GFX8-NEXT: v_mul_f32_e32 v3, 1.0, v6
+; GFX8-NEXT: v_mul_f32_e32 v4, 1.0, v7
+; GFX8-NEXT: v_min_f32_e32 v3, v3, v4
+; GFX8-NEXT: v_mul_f32_e32 v4, 1.0, v8
+; GFX8-NEXT: v_mul_f32_e32 v5, 1.0, v9
+; GFX8-NEXT: v_min_f32_e32 v4, v4, v5
+; GFX8-NEXT: v_mul_f32_e32 v5, 1.0, v10
+; GFX8-NEXT: v_mul_f32_e32 v6, 1.0, v11
+; GFX8-NEXT: v_min_f32_e32 v5, v5, v6
+; GFX8-NEXT: v_mul_f32_e32 v6, 1.0, v12
+; GFX8-NEXT: v_mul_f32_e32 v7, 1.0, v13
+; GFX8-NEXT: v_min_f32_e32 v6, v6, v7
+; GFX8-NEXT: v_mul_f32_e32 v7, 1.0, v14
+; GFX8-NEXT: v_mul_f32_e32 v8, 1.0, v15
+; GFX8-NEXT: v_min_f32_e32 v7, v7, v8
+; GFX8-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX8-NEXT: v_min_f32_e32 v1, v2, v3
+; GFX8-NEXT: v_min_f32_e32 v2, v4, v5
+; GFX8-NEXT: v_min_f32_e32 v3, v6, v7
+; GFX8-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX8-NEXT: v_min_f32_e32 v1, v2, v3
+; GFX8-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmin_v16float:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f32_e32 v0, v0, v0
+; GFX9-NEXT: v_max_f32_e32 v1, v1, v1
+; GFX9-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX9-NEXT: v_max_f32_e32 v1, v2, v2
+; GFX9-NEXT: v_max_f32_e32 v2, v3, v3
+; GFX9-NEXT: v_min_f32_e32 v1, v1, v2
+; GFX9-NEXT: v_max_f32_e32 v2, v4, v4
+; GFX9-NEXT: v_max_f32_e32 v3, v5, v5
+; GFX9-NEXT: v_min_f32_e32 v2, v2, v3
+; GFX9-NEXT: v_max_f32_e32 v3, v6, v6
+; GFX9-NEXT: v_max_f32_e32 v4, v7, v7
+; GFX9-NEXT: v_min_f32_e32 v3, v3, v4
+; GFX9-NEXT: v_max_f32_e32 v4, v8, v8
+; GFX9-NEXT: v_max_f32_e32 v5, v9, v9
+; GFX9-NEXT: v_min_f32_e32 v4, v4, v5
+; GFX9-NEXT: v_max_f32_e32 v5, v10, v10
+; GFX9-NEXT: v_max_f32_e32 v6, v11, v11
+; GFX9-NEXT: v_min_f32_e32 v5, v5, v6
+; GFX9-NEXT: v_max_f32_e32 v6, v12, v12
+; GFX9-NEXT: v_max_f32_e32 v7, v13, v13
+; GFX9-NEXT: v_min_f32_e32 v6, v6, v7
+; GFX9-NEXT: v_max_f32_e32 v7, v14, v14
+; GFX9-NEXT: v_max_f32_e32 v8, v15, v15
+; GFX9-NEXT: v_min_f32_e32 v7, v7, v8
+; GFX9-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX9-NEXT: v_min_f32_e32 v1, v2, v3
+; GFX9-NEXT: v_min_f32_e32 v2, v4, v5
+; GFX9-NEXT: v_min_f32_e32 v3, v6, v7
+; GFX9-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX9-NEXT: v_min_f32_e32 v1, v2, v3
+; GFX9-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmin_v16float:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f32_e32 v0, v0, v0
+; GFX10-NEXT: v_max_f32_e32 v1, v1, v1
+; GFX10-NEXT: v_max_f32_e32 v2, v2, v2
+; GFX10-NEXT: v_max_f32_e32 v3, v3, v3
+; GFX10-NEXT: v_max_f32_e32 v4, v4, v4
+; GFX10-NEXT: v_max_f32_e32 v5, v5, v5
+; GFX10-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX10-NEXT: v_min_f32_e32 v1, v2, v3
+; GFX10-NEXT: v_max_f32_e32 v3, v6, v6
+; GFX10-NEXT: v_min_f32_e32 v2, v4, v5
+; GFX10-NEXT: v_max_f32_e32 v4, v7, v7
+; GFX10-NEXT: v_max_f32_e32 v5, v8, v8
+; GFX10-NEXT: v_max_f32_e32 v6, v9, v9
+; GFX10-NEXT: v_max_f32_e32 v7, v10, v10
+; GFX10-NEXT: v_max_f32_e32 v8, v11, v11
+; GFX10-NEXT: v_max_f32_e32 v9, v12, v12
+; GFX10-NEXT: v_max_f32_e32 v10, v13, v13
+; GFX10-NEXT: v_max_f32_e32 v11, v14, v14
+; GFX10-NEXT: v_max_f32_e32 v12, v15, v15
+; GFX10-NEXT: v_min_f32_e32 v3, v3, v4
+; GFX10-NEXT: v_min_f32_e32 v4, v5, v6
+; GFX10-NEXT: v_min_f32_e32 v5, v7, v8
+; GFX10-NEXT: v_min_f32_e32 v6, v9, v10
+; GFX10-NEXT: v_min_f32_e32 v7, v11, v12
+; GFX10-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX10-NEXT: v_min_f32_e32 v1, v2, v3
+; GFX10-NEXT: v_min_f32_e32 v2, v4, v5
+; GFX10-NEXT: v_min_f32_e32 v3, v6, v7
+; GFX10-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX10-NEXT: v_min_f32_e32 v1, v2, v3
+; GFX10-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmin_v16float:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GFX11-NEXT: v_dual_max_f32 v2, v2, v2 :: v_dual_max_f32 v3, v3, v3
+; GFX11-NEXT: v_dual_max_f32 v4, v4, v4 :: v_dual_max_f32 v5, v5, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_dual_min_f32 v0, v0, v1 :: v_dual_min_f32 v1, v2, v3
+; GFX11-NEXT: v_dual_max_f32 v3, v6, v6 :: v_dual_min_f32 v2, v4, v5
+; GFX11-NEXT: v_dual_max_f32 v4, v7, v7 :: v_dual_max_f32 v5, v8, v8
+; GFX11-NEXT: v_dual_max_f32 v6, v9, v9 :: v_dual_max_f32 v7, v10, v10
+; GFX11-NEXT: v_dual_max_f32 v8, v11, v11 :: v_dual_max_f32 v9, v12, v12
+; GFX11-NEXT: v_dual_max_f32 v10, v13, v13 :: v_dual_max_f32 v11, v14, v14
+; GFX11-NEXT: v_max_f32_e32 v12, v15, v15
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_dual_min_f32 v3, v3, v4 :: v_dual_min_f32 v4, v5, v6
+; GFX11-NEXT: v_dual_min_f32 v5, v7, v8 :: v_dual_min_f32 v6, v9, v10
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_dual_min_f32 v7, v11, v12 :: v_dual_min_f32 v0, v0, v1
+; GFX11-NEXT: v_dual_min_f32 v1, v2, v3 :: v_dual_min_f32 v2, v4, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_dual_min_f32 v3, v6, v7 :: v_dual_min_f32 v0, v0, v1
+; GFX11-NEXT: v_min_f32_e32 v1, v2, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmin_v16float:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_dual_max_num_f32 v0, v0, v0 :: v_dual_max_num_f32 v1, v1, v1
+; GFX12-NEXT: v_dual_max_num_f32 v2, v2, v2 :: v_dual_max_num_f32 v3, v3, v3
+; GFX12-NEXT: v_dual_max_num_f32 v4, v4, v4 :: v_dual_max_num_f32 v5, v5, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_dual_min_num_f32 v0, v0, v1 :: v_dual_min_num_f32 v1, v2, v3
+; GFX12-NEXT: v_dual_max_num_f32 v3, v6, v6 :: v_dual_min_num_f32 v2, v4, v5
+; GFX12-NEXT: v_dual_max_num_f32 v4, v7, v7 :: v_dual_max_num_f32 v5, v8, v8
+; GFX12-NEXT: v_dual_max_num_f32 v6, v9, v9 :: v_dual_max_num_f32 v7, v10, v10
+; GFX12-NEXT: v_dual_max_num_f32 v8, v11, v11 :: v_dual_max_num_f32 v9, v12, v12
+; GFX12-NEXT: v_dual_max_num_f32 v10, v13, v13 :: v_dual_max_num_f32 v11, v14, v14
+; GFX12-NEXT: v_max_num_f32_e32 v12, v15, v15
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_dual_min_num_f32 v3, v3, v4 :: v_dual_min_num_f32 v4, v5, v6
+; GFX12-NEXT: v_dual_min_num_f32 v5, v7, v8 :: v_dual_min_num_f32 v6, v9, v10
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_dual_min_num_f32 v7, v11, v12 :: v_dual_min_num_f32 v0, v0, v1
+; GFX12-NEXT: v_dual_min_num_f32 v1, v2, v3 :: v_dual_min_num_f32 v2, v4, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_dual_min_num_f32 v3, v6, v7 :: v_dual_min_num_f32 v0, v0, v1
+; GFX12-NEXT: v_min_num_f32_e32 v1, v2, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_min_num_f32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call float @llvm.vector.reduce.fmin.v16float(<16 x float> %v)
+ ret float %res
+}
+
+define double @test_vector_reduce_fmin_v2double(<2 x double> %v) {
+; GFX7-LABEL: test_vector_reduce_fmin_v2double:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX7-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX7-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmin_v2double:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX8-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX8-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmin_v2double:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX9-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX9-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmin_v2double:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX10-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX10-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmin_v2double:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX11-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmin_v2double:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1]
+; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[2:3], v[2:3]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call double @llvm.vector.reduce.fmin.v2double(<2 x double> %v)
+ ret double %res
+}
+
+define double @test_vector_reduce_fmin_v3double(<3 x double> %v) {
+; GFX7-LABEL: test_vector_reduce_fmin_v3double:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX7-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX7-NEXT: v_max_f64 v[4:5], v[4:5], v[4:5]
+; GFX7-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX7-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmin_v3double:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX8-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX8-NEXT: v_max_f64 v[4:5], v[4:5], v[4:5]
+; GFX8-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX8-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmin_v3double:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX9-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX9-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX9-NEXT: v_max_f64 v[2:3], v[4:5], v[4:5]
+; GFX9-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmin_v3double:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX10-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX10-NEXT: v_max_f64 v[4:5], v[4:5], v[4:5]
+; GFX10-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX10-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmin_v3double:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX11-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX11-NEXT: v_max_f64 v[4:5], v[4:5], v[4:5]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX11-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmin_v3double:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1]
+; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[2:3], v[2:3]
+; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[4:5], v[4:5]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[4:5]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call double @llvm.vector.reduce.fmin.v3double(<3 x double> %v)
+ ret double %res
+}
+
+define double @test_vector_reduce_fmin_v4double(<4 x double> %v) {
+; GFX7-LABEL: test_vector_reduce_fmin_v4double:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX7-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX7-NEXT: v_max_f64 v[4:5], v[4:5], v[4:5]
+; GFX7-NEXT: v_max_f64 v[6:7], v[6:7], v[6:7]
+; GFX7-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX7-NEXT: v_min_f64 v[2:3], v[4:5], v[6:7]
+; GFX7-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmin_v4double:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX8-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX8-NEXT: v_max_f64 v[4:5], v[4:5], v[4:5]
+; GFX8-NEXT: v_max_f64 v[6:7], v[6:7], v[6:7]
+; GFX8-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX8-NEXT: v_min_f64 v[2:3], v[4:5], v[6:7]
+; GFX8-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmin_v4double:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX9-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX9-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX9-NEXT: v_max_f64 v[2:3], v[4:5], v[4:5]
+; GFX9-NEXT: v_max_f64 v[4:5], v[6:7], v[6:7]
+; GFX9-NEXT: v_min_f64 v[2:3], v[2:3], v[4:5]
+; GFX9-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmin_v4double:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX10-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX10-NEXT: v_max_f64 v[4:5], v[4:5], v[4:5]
+; GFX10-NEXT: v_max_f64 v[6:7], v[6:7], v[6:7]
+; GFX10-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX10-NEXT: v_min_f64 v[2:3], v[4:5], v[6:7]
+; GFX10-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmin_v4double:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX11-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX11-NEXT: v_max_f64 v[4:5], v[4:5], v[4:5]
+; GFX11-NEXT: v_max_f64 v[6:7], v[6:7], v[6:7]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX11-NEXT: v_min_f64 v[2:3], v[4:5], v[6:7]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmin_v4double:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1]
+; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[2:3], v[2:3]
+; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[4:5], v[4:5]
+; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[6:7], v[6:7]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: v_min_num_f64_e32 v[2:3], v[4:5], v[6:7]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call double @llvm.vector.reduce.fmin.v4double(<4 x double> %v)
+ ret double %res
+}
+
+define double @test_vector_reduce_fmin_v8double(<8 x double> %v) {
+; GFX7-LABEL: test_vector_reduce_fmin_v8double:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX7-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX7-NEXT: v_max_f64 v[4:5], v[4:5], v[4:5]
+; GFX7-NEXT: v_max_f64 v[6:7], v[6:7], v[6:7]
+; GFX7-NEXT: v_max_f64 v[8:9], v[8:9], v[8:9]
+; GFX7-NEXT: v_max_f64 v[10:11], v[10:11], v[10:11]
+; GFX7-NEXT: v_max_f64 v[12:13], v[12:13], v[12:13]
+; GFX7-NEXT: v_max_f64 v[14:15], v[14:15], v[14:15]
+; GFX7-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX7-NEXT: v_min_f64 v[2:3], v[4:5], v[6:7]
+; GFX7-NEXT: v_min_f64 v[4:5], v[8:9], v[10:11]
+; GFX7-NEXT: v_min_f64 v[6:7], v[12:13], v[14:15]
+; GFX7-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX7-NEXT: v_min_f64 v[2:3], v[4:5], v[6:7]
+; GFX7-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmin_v8double:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX8-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX8-NEXT: v_max_f64 v[4:5], v[4:5], v[4:5]
+; GFX8-NEXT: v_max_f64 v[6:7], v[6:7], v[6:7]
+; GFX8-NEXT: v_max_f64 v[8:9], v[8:9], v[8:9]
+; GFX8-NEXT: v_max_f64 v[10:11], v[10:11], v[10:11]
+; GFX8-NEXT: v_max_f64 v[12:13], v[12:13], v[12:13]
+; GFX8-NEXT: v_max_f64 v[14:15], v[14:15], v[14:15]
+; GFX8-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX8-NEXT: v_min_f64 v[2:3], v[4:5], v[6:7]
+; GFX8-NEXT: v_min_f64 v[4:5], v[8:9], v[10:11]
+; GFX8-NEXT: v_min_f64 v[6:7], v[12:13], v[14:15]
+; GFX8-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX8-NEXT: v_min_f64 v[2:3], v[4:5], v[6:7]
+; GFX8-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmin_v8double:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX9-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX9-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX9-NEXT: v_max_f64 v[2:3], v[4:5], v[4:5]
+; GFX9-NEXT: v_max_f64 v[4:5], v[6:7], v[6:7]
+; GFX9-NEXT: v_min_f64 v[2:3], v[2:3], v[4:5]
+; GFX9-NEXT: v_max_f64 v[4:5], v[8:9], v[8:9]
+; GFX9-NEXT: v_max_f64 v[6:7], v[10:11], v[10:11]
+; GFX9-NEXT: v_min_f64 v[4:5], v[4:5], v[6:7]
+; GFX9-NEXT: v_max_f64 v[6:7], v[12:13], v[12:13]
+; GFX9-NEXT: v_max_f64 v[8:9], v[14:15], v[14:15]
+; GFX9-NEXT: v_min_f64 v[6:7], v[6:7], v[8:9]
+; GFX9-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX9-NEXT: v_min_f64 v[2:3], v[4:5], v[6:7]
+; GFX9-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmin_v8double:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX10-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX10-NEXT: v_max_f64 v[4:5], v[4:5], v[4:5]
+; GFX10-NEXT: v_max_f64 v[6:7], v[6:7], v[6:7]
+; GFX10-NEXT: v_max_f64 v[8:9], v[8:9], v[8:9]
+; GFX10-NEXT: v_max_f64 v[10:11], v[10:11], v[10:11]
+; GFX10-NEXT: v_max_f64 v[12:13], v[12:13], v[12:13]
+; GFX10-NEXT: v_max_f64 v[14:15], v[14:15], v[14:15]
+; GFX10-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX10-NEXT: v_min_f64 v[2:3], v[4:5], v[6:7]
+; GFX10-NEXT: v_min_f64 v[4:5], v[8:9], v[10:11]
+; GFX10-NEXT: v_min_f64 v[6:7], v[12:13], v[14:15]
+; GFX10-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX10-NEXT: v_min_f64 v[2:3], v[4:5], v[6:7]
+; GFX10-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmin_v8double:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX11-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX11-NEXT: v_max_f64 v[4:5], v[4:5], v[4:5]
+; GFX11-NEXT: v_max_f64 v[6:7], v[6:7], v[6:7]
+; GFX11-NEXT: v_max_f64 v[8:9], v[8:9], v[8:9]
+; GFX11-NEXT: v_max_f64 v[10:11], v[10:11], v[10:11]
+; GFX11-NEXT: v_max_f64 v[12:13], v[12:13], v[12:13]
+; GFX11-NEXT: v_max_f64 v[14:15], v[14:15], v[14:15]
+; GFX11-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX11-NEXT: v_min_f64 v[2:3], v[4:5], v[6:7]
+; GFX11-NEXT: v_min_f64 v[4:5], v[8:9], v[10:11]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_min_f64 v[6:7], v[12:13], v[14:15]
+; GFX11-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_min_f64 v[2:3], v[4:5], v[6:7]
+; GFX11-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmin_v8double:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1]
+; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[2:3], v[2:3]
+; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[4:5], v[4:5]
+; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[6:7], v[6:7]
+; GFX12-NEXT: v_max_num_f64_e32 v[8:9], v[8:9], v[8:9]
+; GFX12-NEXT: v_max_num_f64_e32 v[10:11], v[10:11], v[10:11]
+; GFX12-NEXT: v_max_num_f64_e32 v[12:13], v[12:13], v[12:13]
+; GFX12-NEXT: v_max_num_f64_e32 v[14:15], v[14:15], v[14:15]
+; GFX12-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: v_min_num_f64_e32 v[2:3], v[4:5], v[6:7]
+; GFX12-NEXT: v_min_num_f64_e32 v[4:5], v[8:9], v[10:11]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_min_num_f64_e32 v[6:7], v[12:13], v[14:15]
+; GFX12-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_min_num_f64_e32 v[2:3], v[4:5], v[6:7]
+; GFX12-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call double @llvm.vector.reduce.fmin.v8double(<8 x double> %v)
+ ret double %res
+}
+
+define double @test_vector_reduce_fmin_v16double(<16 x double> %v) {
+; GFX7-LABEL: test_vector_reduce_fmin_v16double:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX7-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX7-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX7-NEXT: v_max_f64 v[4:5], v[4:5], v[4:5]
+; GFX7-NEXT: v_max_f64 v[6:7], v[6:7], v[6:7]
+; GFX7-NEXT: v_max_f64 v[8:9], v[8:9], v[8:9]
+; GFX7-NEXT: v_max_f64 v[10:11], v[10:11], v[10:11]
+; GFX7-NEXT: v_max_f64 v[12:13], v[12:13], v[12:13]
+; GFX7-NEXT: v_max_f64 v[14:15], v[14:15], v[14:15]
+; GFX7-NEXT: v_max_f64 v[16:17], v[16:17], v[16:17]
+; GFX7-NEXT: v_max_f64 v[18:19], v[18:19], v[18:19]
+; GFX7-NEXT: v_max_f64 v[20:21], v[20:21], v[20:21]
+; GFX7-NEXT: v_max_f64 v[22:23], v[22:23], v[22:23]
+; GFX7-NEXT: v_max_f64 v[24:25], v[24:25], v[24:25]
+; GFX7-NEXT: v_max_f64 v[26:27], v[26:27], v[26:27]
+; GFX7-NEXT: v_max_f64 v[28:29], v[28:29], v[28:29]
+; GFX7-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX7-NEXT: v_min_f64 v[2:3], v[4:5], v[6:7]
+; GFX7-NEXT: v_min_f64 v[4:5], v[8:9], v[10:11]
+; GFX7-NEXT: v_min_f64 v[6:7], v[12:13], v[14:15]
+; GFX7-NEXT: v_min_f64 v[8:9], v[16:17], v[18:19]
+; GFX7-NEXT: v_min_f64 v[10:11], v[20:21], v[22:23]
+; GFX7-NEXT: v_min_f64 v[12:13], v[24:25], v[26:27]
+; GFX7-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX7-NEXT: v_min_f64 v[2:3], v[4:5], v[6:7]
+; GFX7-NEXT: v_min_f64 v[4:5], v[8:9], v[10:11]
+; GFX7-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_max_f64 v[30:31], v[30:31], v[30:31]
+; GFX7-NEXT: v_min_f64 v[14:15], v[28:29], v[30:31]
+; GFX7-NEXT: v_min_f64 v[6:7], v[12:13], v[14:15]
+; GFX7-NEXT: v_min_f64 v[2:3], v[4:5], v[6:7]
+; GFX7-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmin_v16double:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX8-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX8-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX8-NEXT: v_max_f64 v[4:5], v[4:5], v[4:5]
+; GFX8-NEXT: v_max_f64 v[6:7], v[6:7], v[6:7]
+; GFX8-NEXT: v_max_f64 v[8:9], v[8:9], v[8:9]
+; GFX8-NEXT: v_max_f64 v[10:11], v[10:11], v[10:11]
+; GFX8-NEXT: v_max_f64 v[12:13], v[12:13], v[12:13]
+; GFX8-NEXT: v_max_f64 v[14:15], v[14:15], v[14:15]
+; GFX8-NEXT: v_max_f64 v[16:17], v[16:17], v[16:17]
+; GFX8-NEXT: v_max_f64 v[18:19], v[18:19], v[18:19]
+; GFX8-NEXT: v_max_f64 v[20:21], v[20:21], v[20:21]
+; GFX8-NEXT: v_max_f64 v[22:23], v[22:23], v[22:23]
+; GFX8-NEXT: v_max_f64 v[24:25], v[24:25], v[24:25]
+; GFX8-NEXT: v_max_f64 v[26:27], v[26:27], v[26:27]
+; GFX8-NEXT: v_max_f64 v[28:29], v[28:29], v[28:29]
+; GFX8-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX8-NEXT: v_min_f64 v[2:3], v[4:5], v[6:7]
+; GFX8-NEXT: v_min_f64 v[4:5], v[8:9], v[10:11]
+; GFX8-NEXT: v_min_f64 v[6:7], v[12:13], v[14:15]
+; GFX8-NEXT: v_min_f64 v[8:9], v[16:17], v[18:19]
+; GFX8-NEXT: v_min_f64 v[10:11], v[20:21], v[22:23]
+; GFX8-NEXT: v_min_f64 v[12:13], v[24:25], v[26:27]
+; GFX8-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX8-NEXT: v_min_f64 v[2:3], v[4:5], v[6:7]
+; GFX8-NEXT: v_min_f64 v[4:5], v[8:9], v[10:11]
+; GFX8-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_max_f64 v[30:31], v[30:31], v[30:31]
+; GFX8-NEXT: v_min_f64 v[14:15], v[28:29], v[30:31]
+; GFX8-NEXT: v_min_f64 v[6:7], v[12:13], v[14:15]
+; GFX8-NEXT: v_min_f64 v[2:3], v[4:5], v[6:7]
+; GFX8-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmin_v16double:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: scratch_load_dword v31, off, s32
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX9-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX9-NEXT: v_max_f64 v[4:5], v[4:5], v[4:5]
+; GFX9-NEXT: v_max_f64 v[6:7], v[6:7], v[6:7]
+; GFX9-NEXT: v_max_f64 v[8:9], v[8:9], v[8:9]
+; GFX9-NEXT: v_max_f64 v[10:11], v[10:11], v[10:11]
+; GFX9-NEXT: v_max_f64 v[12:13], v[12:13], v[12:13]
+; GFX9-NEXT: v_max_f64 v[14:15], v[14:15], v[14:15]
+; GFX9-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX9-NEXT: v_min_f64 v[2:3], v[4:5], v[6:7]
+; GFX9-NEXT: v_min_f64 v[4:5], v[8:9], v[10:11]
+; GFX9-NEXT: v_min_f64 v[6:7], v[12:13], v[14:15]
+; GFX9-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX9-NEXT: v_min_f64 v[2:3], v[4:5], v[6:7]
+; GFX9-NEXT: v_max_f64 v[16:17], v[16:17], v[16:17]
+; GFX9-NEXT: v_max_f64 v[18:19], v[18:19], v[18:19]
+; GFX9-NEXT: v_max_f64 v[20:21], v[20:21], v[20:21]
+; GFX9-NEXT: v_max_f64 v[22:23], v[22:23], v[22:23]
+; GFX9-NEXT: v_max_f64 v[24:25], v[24:25], v[24:25]
+; GFX9-NEXT: v_max_f64 v[26:27], v[26:27], v[26:27]
+; GFX9-NEXT: v_max_f64 v[28:29], v[28:29], v[28:29]
+; GFX9-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX9-NEXT: v_min_f64 v[8:9], v[16:17], v[18:19]
+; GFX9-NEXT: v_min_f64 v[10:11], v[20:21], v[22:23]
+; GFX9-NEXT: v_min_f64 v[12:13], v[24:25], v[26:27]
+; GFX9-NEXT: v_min_f64 v[4:5], v[8:9], v[10:11]
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_max_f64 v[2:3], v[30:31], v[30:31]
+; GFX9-NEXT: v_min_f64 v[2:3], v[28:29], v[2:3]
+; GFX9-NEXT: v_min_f64 v[2:3], v[12:13], v[2:3]
+; GFX9-NEXT: v_min_f64 v[2:3], v[4:5], v[2:3]
+; GFX9-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmin_v16double:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX10-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX10-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX10-NEXT: v_max_f64 v[4:5], v[4:5], v[4:5]
+; GFX10-NEXT: v_max_f64 v[6:7], v[6:7], v[6:7]
+; GFX10-NEXT: v_max_f64 v[8:9], v[8:9], v[8:9]
+; GFX10-NEXT: v_max_f64 v[10:11], v[10:11], v[10:11]
+; GFX10-NEXT: v_max_f64 v[12:13], v[12:13], v[12:13]
+; GFX10-NEXT: v_max_f64 v[14:15], v[14:15], v[14:15]
+; GFX10-NEXT: v_max_f64 v[16:17], v[16:17], v[16:17]
+; GFX10-NEXT: v_max_f64 v[18:19], v[18:19], v[18:19]
+; GFX10-NEXT: v_max_f64 v[20:21], v[20:21], v[20:21]
+; GFX10-NEXT: v_max_f64 v[22:23], v[22:23], v[22:23]
+; GFX10-NEXT: v_max_f64 v[24:25], v[24:25], v[24:25]
+; GFX10-NEXT: v_max_f64 v[26:27], v[26:27], v[26:27]
+; GFX10-NEXT: v_max_f64 v[28:29], v[28:29], v[28:29]
+; GFX10-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX10-NEXT: v_min_f64 v[2:3], v[4:5], v[6:7]
+; GFX10-NEXT: v_min_f64 v[4:5], v[8:9], v[10:11]
+; GFX10-NEXT: v_min_f64 v[6:7], v[12:13], v[14:15]
+; GFX10-NEXT: v_min_f64 v[8:9], v[16:17], v[18:19]
+; GFX10-NEXT: v_min_f64 v[10:11], v[20:21], v[22:23]
+; GFX10-NEXT: v_min_f64 v[12:13], v[24:25], v[26:27]
+; GFX10-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX10-NEXT: v_min_f64 v[2:3], v[4:5], v[6:7]
+; GFX10-NEXT: v_min_f64 v[4:5], v[8:9], v[10:11]
+; GFX10-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_max_f64 v[30:31], v[30:31], v[30:31]
+; GFX10-NEXT: v_min_f64 v[14:15], v[28:29], v[30:31]
+; GFX10-NEXT: v_min_f64 v[6:7], v[12:13], v[14:15]
+; GFX10-NEXT: v_min_f64 v[2:3], v[4:5], v[6:7]
+; GFX10-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmin_v16double:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: scratch_load_b32 v31, off, s32
+; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; GFX11-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3]
+; GFX11-NEXT: v_max_f64 v[4:5], v[4:5], v[4:5]
+; GFX11-NEXT: v_max_f64 v[6:7], v[6:7], v[6:7]
+; GFX11-NEXT: v_max_f64 v[8:9], v[8:9], v[8:9]
+; GFX11-NEXT: v_max_f64 v[10:11], v[10:11], v[10:11]
+; GFX11-NEXT: v_max_f64 v[12:13], v[12:13], v[12:13]
+; GFX11-NEXT: v_max_f64 v[14:15], v[14:15], v[14:15]
+; GFX11-NEXT: v_max_f64 v[16:17], v[16:17], v[16:17]
+; GFX11-NEXT: v_max_f64 v[18:19], v[18:19], v[18:19]
+; GFX11-NEXT: v_max_f64 v[20:21], v[20:21], v[20:21]
+; GFX11-NEXT: v_max_f64 v[22:23], v[22:23], v[22:23]
+; GFX11-NEXT: v_max_f64 v[24:25], v[24:25], v[24:25]
+; GFX11-NEXT: v_max_f64 v[26:27], v[26:27], v[26:27]
+; GFX11-NEXT: v_max_f64 v[28:29], v[28:29], v[28:29]
+; GFX11-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX11-NEXT: v_min_f64 v[2:3], v[4:5], v[6:7]
+; GFX11-NEXT: v_min_f64 v[4:5], v[8:9], v[10:11]
+; GFX11-NEXT: v_min_f64 v[6:7], v[12:13], v[14:15]
+; GFX11-NEXT: v_min_f64 v[8:9], v[16:17], v[18:19]
+; GFX11-NEXT: v_min_f64 v[10:11], v[20:21], v[22:23]
+; GFX11-NEXT: v_min_f64 v[12:13], v[24:25], v[26:27]
+; GFX11-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX11-NEXT: v_min_f64 v[2:3], v[4:5], v[6:7]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_min_f64 v[4:5], v[8:9], v[10:11]
+; GFX11-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_max_f64 v[30:31], v[30:31], v[30:31]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_min_f64 v[14:15], v[28:29], v[30:31]
+; GFX11-NEXT: v_min_f64 v[6:7], v[12:13], v[14:15]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_min_f64 v[2:3], v[4:5], v[6:7]
+; GFX11-NEXT: v_min_f64 v[0:1], v[0:1], v[2:3]
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmin_v16double:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: scratch_load_b32 v31, off, s32
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1]
+; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[2:3], v[2:3]
+; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[4:5], v[4:5]
+; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[6:7], v[6:7]
+; GFX12-NEXT: v_max_num_f64_e32 v[8:9], v[8:9], v[8:9]
+; GFX12-NEXT: v_max_num_f64_e32 v[10:11], v[10:11], v[10:11]
+; GFX12-NEXT: v_max_num_f64_e32 v[12:13], v[12:13], v[12:13]
+; GFX12-NEXT: v_max_num_f64_e32 v[14:15], v[14:15], v[14:15]
+; GFX12-NEXT: v_max_num_f64_e32 v[16:17], v[16:17], v[16:17]
+; GFX12-NEXT: v_max_num_f64_e32 v[18:19], v[18:19], v[18:19]
+; GFX12-NEXT: v_max_num_f64_e32 v[20:21], v[20:21], v[20:21]
+; GFX12-NEXT: v_max_num_f64_e32 v[22:23], v[22:23], v[22:23]
+; GFX12-NEXT: v_max_num_f64_e32 v[24:25], v[24:25], v[24:25]
+; GFX12-NEXT: v_max_num_f64_e32 v[26:27], v[26:27], v[26:27]
+; GFX12-NEXT: v_max_num_f64_e32 v[28:29], v[28:29], v[28:29]
+; GFX12-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: v_min_num_f64_e32 v[2:3], v[4:5], v[6:7]
+; GFX12-NEXT: v_min_num_f64_e32 v[4:5], v[8:9], v[10:11]
+; GFX12-NEXT: v_min_num_f64_e32 v[6:7], v[12:13], v[14:15]
+; GFX12-NEXT: v_min_num_f64_e32 v[8:9], v[16:17], v[18:19]
+; GFX12-NEXT: v_min_num_f64_e32 v[10:11], v[20:21], v[22:23]
+; GFX12-NEXT: v_min_num_f64_e32 v[12:13], v[24:25], v[26:27]
+; GFX12-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: v_min_num_f64_e32 v[2:3], v[4:5], v[6:7]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_min_num_f64_e32 v[4:5], v[8:9], v[10:11]
+; GFX12-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_max_num_f64_e32 v[30:31], v[30:31], v[30:31]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_min_num_f64_e32 v[14:15], v[28:29], v[30:31]
+; GFX12-NEXT: v_min_num_f64_e32 v[6:7], v[12:13], v[14:15]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_min_num_f64_e32 v[2:3], v[4:5], v[6:7]
+; GFX12-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call double @llvm.vector.reduce.fmin.v16double(<16 x double> %v)
+ ret double %res
+}
+
+declare half @llvm.vector.reduce.fmin.v2half(<2 x half>)
+declare half @llvm.vector.reduce.fmin.v3half(<3 x half>)
+declare half @llvm.vector.reduce.fmin.v4half(<4 x half>)
+declare half @llvm.vector.reduce.fmin.v8half(<8 x half>)
+declare half @llvm.vector.reduce.fmin.v16half(<16 x half>)
+declare float @llvm.vector.reduce.fmin.v2float(<2 x float>)
+declare float @llvm.vector.reduce.fmin.v3float(<3 x float>)
+declare float @llvm.vector.reduce.fmin.v4float(<4 x float>)
+declare float @llvm.vector.reduce.fmin.v8float(<8 x float>)
+declare float @llvm.vector.reduce.fmin.v16float(<16 x float>)
+declare double @llvm.vector.reduce.fmin.v2double(<2 x double>)
+declare double @llvm.vector.reduce.fmin.v3double(<3 x double>)
+declare double @llvm.vector.reduce.fmin.v4double(<4 x double>)
+declare double @llvm.vector.reduce.fmin.v8double(<8 x double>)
+declare double @llvm.vector.reduce.fmin.v16double(<16 x double>)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/fminimum.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/fminimum.ll
new file mode 100644
index 0000000000000..db90d07a0b0bf
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/fminimum.ll
@@ -0,0 +1,2675 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx700 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX7 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx801 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX8 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx942 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX9 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX10 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX12 %s
+define half @test_vector_reduce_fminimum_v2half(<2 x half> %v) {
+; GFX7-LABEL: test_vector_reduce_fminimum_v2half:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v1
+; GFX7-NEXT: v_mov_b32_e32 v5, 0x7e00
+; GFX7-NEXT: v_mov_b32_e32 v6, 0x8000
+; GFX7-NEXT: v_and_b32_e32 v7, 0xffff, v1
+; GFX7-NEXT: v_cmp_lt_f32_e32 vcc, v2, v3
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v1, v0, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v2
+; GFX7-NEXT: v_cvt_f32_f16_e32 v4, 0
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff, v0
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v6
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v7, v6
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v3, v4
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fminimum_v2half:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX8-NEXT: v_min_f16_e32 v2, v0, v1
+; GFX8-NEXT: v_mov_b32_e32 v3, 0x7e00
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v0, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fminimum_v2half:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f16_sdwa v1, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_mov_b32_e32 v2, 0x7e00
+; GFX9-NEXT: v_cmp_u_f16_sdwa vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fminimum_v2half:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_min_f16_sdwa v1, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_cmp_u_f16_sdwa s4, v0, v0 src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v1, 0x7e00, s4
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fminimum_v2half:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_min_f16_e32 v2, v0, v1
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v0, v1
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v2, 0x7e00, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fminimum_v2half:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_minimum_f16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call half @llvm.vector.reduce.fminimum.v2half(<2 x half> %v)
+ ret half %res
+}
+
+define half @test_vector_reduce_fminimum_v3half(<3 x half> %v) {
+; GFX7-LABEL: test_vector_reduce_fminimum_v3half:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v1
+; GFX7-NEXT: v_mov_b32_e32 v6, 0x7e00
+; GFX7-NEXT: v_and_b32_e32 v7, 0xffff, v0
+; GFX7-NEXT: v_mov_b32_e32 v8, 0x8000
+; GFX7-NEXT: v_cmp_lt_f32_e32 vcc, v3, v4
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v1, v0, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v3, v4
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v3
+; GFX7-NEXT: v_cvt_f32_f16_e32 v5, 0
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v7, v8
+; GFX7-NEXT: v_and_b32_e32 v7, 0xffff, v1
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v7, v8
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v4, v5
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v2
+; GFX7-NEXT: v_cmp_lt_f32_e32 vcc, v1, v3
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v2, v0, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v1, v3
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v1
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff, v0
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff, v2
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v8
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v6, v8
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v3, v5
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fminimum_v3half:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX8-NEXT: v_min_f16_e32 v3, v0, v2
+; GFX8-NEXT: v_mov_b32_e32 v4, 0x7e00
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v0, v2
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v3, v4, vcc
+; GFX8-NEXT: v_min_f16_e32 v2, v0, v1
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v0, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fminimum_v3half:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f16_sdwa v2, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x7e00
+; GFX9-NEXT: v_cmp_u_f16_sdwa vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
+; GFX9-NEXT: v_min_f16_e32 v2, v0, v1
+; GFX9-NEXT: v_cmp_u_f16_e32 vcc, v0, v1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fminimum_v3half:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_min_f16_sdwa v2, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_cmp_u_f16_sdwa s4, v0, v0 src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v2, 0x7e00, s4
+; GFX10-NEXT: v_min_f16_e32 v2, v0, v1
+; GFX10-NEXT: v_cmp_u_f16_e32 vcc_lo, v0, v1
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v2, 0x7e00, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fminimum_v3half:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_min_f16_e32 v3, v0, v2
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v0, v2
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v3, 0x7e00, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_min_f16_e32 v2, v0, v1
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v0, v1
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v2, 0x7e00, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fminimum_v3half:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_minimum_f16 v0, v0, v2
+; GFX12-NEXT: v_minimum_f16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call half @llvm.vector.reduce.fminimum.v3half(<3 x half> %v)
+ ret half %res
+}
+
+define half @test_vector_reduce_fminimum_v4half(<4 x half> %v) {
+; GFX7-LABEL: test_vector_reduce_fminimum_v4half:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v1
+; GFX7-NEXT: v_mov_b32_e32 v7, 0x7e00
+; GFX7-NEXT: v_and_b32_e32 v8, 0xffff, v0
+; GFX7-NEXT: v_mov_b32_e32 v9, 0x8000
+; GFX7-NEXT: v_cmp_lt_f32_e32 vcc, v4, v5
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v1, v0, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v4, v5
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v6, v7, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v6, 0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v10, v2
+; GFX7-NEXT: v_cvt_f32_f16_e32 v11, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v8, v9
+; GFX7-NEXT: v_and_b32_e32 v8, 0xffff, v1
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v8, v9
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v5, v6
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX7-NEXT: v_cmp_lt_f32_e32 vcc, v10, v11
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v10, v11
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v1
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff, v2
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v9
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff, v3
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v1, v2, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v9
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v4, v6
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v1
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff, v1
+; GFX7-NEXT: v_cmp_lt_f32_e32 vcc, v2, v3
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v1, v0, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v4, v7, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v2
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff, v0
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v9
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v9
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v3, v6
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fminimum_v4half:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX8-NEXT: v_min_f16_e32 v4, v0, v2
+; GFX8-NEXT: v_mov_b32_e32 v5, 0x7e00
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v0, v2
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
+; GFX8-NEXT: v_min_f16_e32 v2, v1, v3
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v1, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v2, v5, vcc
+; GFX8-NEXT: v_min_f16_e32 v2, v0, v1
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v0, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v5, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fminimum_v4half:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f16_sdwa v2, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x7e00
+; GFX9-NEXT: v_cmp_u_f16_sdwa vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
+; GFX9-NEXT: v_min_f16_sdwa v2, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cmp_u_f16_sdwa vcc, v1, v1 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc
+; GFX9-NEXT: v_min_f16_e32 v2, v0, v1
+; GFX9-NEXT: v_cmp_u_f16_e32 vcc, v0, v1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fminimum_v4half:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_min_f16_sdwa v2, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_cmp_u_f16_sdwa s4, v0, v0 src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_min_f16_sdwa v3, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v2, 0x7e00, s4
+; GFX10-NEXT: v_cmp_u_f16_sdwa s4, v1, v1 src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v3, 0x7e00, s4
+; GFX10-NEXT: v_min_f16_e32 v2, v0, v1
+; GFX10-NEXT: v_cmp_u_f16_e32 vcc_lo, v0, v1
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v2, 0x7e00, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fminimum_v4half:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_min_f16_e32 v4, v0, v2
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v0, v2
+; GFX11-NEXT: v_min_f16_e32 v5, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v4, 0x7e00, vcc_lo
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v1, v3
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, 0x7e00, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_min_f16_e32 v2, v0, v1
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v0, v1
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v2, 0x7e00, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fminimum_v4half:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX12-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_minimum_f16 v0, v0, v2
+; GFX12-NEXT: v_minimum_f16 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_minimum_f16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call half @llvm.vector.reduce.fminimum.v4half(<4 x half> %v)
+ ret half %res
+}
+
+define half @test_vector_reduce_fminimum_v8half(<8 x half> %v) {
+; GFX7-LABEL: test_vector_reduce_fminimum_v8half:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cvt_f32_f16_e32 v8, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v9, v1
+; GFX7-NEXT: v_mov_b32_e32 v11, 0x7e00
+; GFX7-NEXT: v_and_b32_e32 v12, 0xffff, v0
+; GFX7-NEXT: v_mov_b32_e32 v13, 0x8000
+; GFX7-NEXT: v_cmp_lt_f32_e32 vcc, v8, v9
+; GFX7-NEXT: v_cndmask_b32_e32 v10, v1, v0, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v8, v9
+; GFX7-NEXT: v_cndmask_b32_e32 v8, v10, v11, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v9, v8
+; GFX7-NEXT: v_cvt_f32_f16_e32 v10, 0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v14, v2
+; GFX7-NEXT: v_cvt_f32_f16_e32 v15, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v12, v13
+; GFX7-NEXT: v_and_b32_e32 v12, 0xffff, v1
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v12, v13
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v9, v10
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc
+; GFX7-NEXT: v_cmp_lt_f32_e32 vcc, v14, v15
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v14, v15
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v11, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v8, v1
+; GFX7-NEXT: v_and_b32_e32 v9, 0xffff, v2
+; GFX7-NEXT: v_cvt_f32_f16_e32 v12, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v14, v5
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v9, v13
+; GFX7-NEXT: v_and_b32_e32 v9, 0xffff, v3
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v1, v2, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v9, v13
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v8, v10
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
+; GFX7-NEXT: v_cmp_lt_f32_e32 vcc, v12, v14
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v5, v4, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v12, v14
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v2, v11, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v2
+; GFX7-NEXT: v_and_b32_e32 v8, 0xffff, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v9, v6
+; GFX7-NEXT: v_cvt_f32_f16_e32 v12, v7
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v8, v13
+; GFX7-NEXT: v_and_b32_e32 v8, 0xffff, v5
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v2, v4, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v8, v13
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v3, v10
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
+; GFX7-NEXT: v_cmp_lt_f32_e32 vcc, v9, v12
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v7, v6, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v9, v12
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v3, v11, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v3
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff, v6
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v13
+; GFX7-NEXT: v_cvt_f32_f16_e32 v8, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v9, v1
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v3, v6, vcc
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff, v7
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v6, v13
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v4, v10
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc
+; GFX7-NEXT: v_cmp_lt_f32_e32 vcc, v8, v9
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v1, v0, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v8, v9
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v4, v11, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v4
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v2
+; GFX7-NEXT: v_cvt_f32_f16_e32 v8, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v6, v13
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff, v1
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v6, v13
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v5, v10
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX7-NEXT: v_cmp_lt_f32_e32 vcc, v7, v8
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v7, v8
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v11, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v1
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff, v2
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v13
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff, v3
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v1, v2, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v13
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v4, v10
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v1
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff, v1
+; GFX7-NEXT: v_cmp_lt_f32_e32 vcc, v2, v3
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v1, v0, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v4, v11, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v2
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff, v0
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v13
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v13
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v3, v10
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fminimum_v8half:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v0
+; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v1
+; GFX8-NEXT: v_min_f16_e32 v8, v0, v4
+; GFX8-NEXT: v_mov_b32_e32 v9, 0x7e00
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v0, v4
+; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v2
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v8, v9, vcc
+; GFX8-NEXT: v_min_f16_e32 v4, v1, v5
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v1, v5
+; GFX8-NEXT: v_lshrrev_b32_e32 v7, 16, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v4, v9, vcc
+; GFX8-NEXT: v_min_f16_e32 v4, v2, v6
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v2, v6
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v4, v9, vcc
+; GFX8-NEXT: v_min_f16_e32 v4, v3, v7
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v3, v7
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v4, v9, vcc
+; GFX8-NEXT: v_min_f16_e32 v4, v0, v1
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v0, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v9, vcc
+; GFX8-NEXT: v_min_f16_e32 v1, v2, v3
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v2, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc
+; GFX8-NEXT: v_min_f16_e32 v2, v0, v1
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v0, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v9, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fminimum_v8half:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f16_sdwa v4, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_mov_b32_e32 v5, 0x7e00
+; GFX9-NEXT: v_cmp_u_f16_sdwa vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
+; GFX9-NEXT: v_min_f16_sdwa v4, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cmp_u_f16_sdwa vcc, v1, v1 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v4, v5, vcc
+; GFX9-NEXT: v_min_f16_sdwa v4, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cmp_u_f16_sdwa vcc, v2, v2 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc
+; GFX9-NEXT: v_min_f16_sdwa v4, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cmp_u_f16_sdwa vcc, v3, v3 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc
+; GFX9-NEXT: v_min_f16_e32 v4, v0, v1
+; GFX9-NEXT: v_cmp_u_f16_e32 vcc, v0, v1
+; GFX9-NEXT: v_min_f16_e32 v1, v2, v3
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
+; GFX9-NEXT: v_cmp_u_f16_e32 vcc, v2, v3
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc
+; GFX9-NEXT: v_min_f16_e32 v2, v0, v1
+; GFX9-NEXT: v_cmp_u_f16_e32 vcc, v0, v1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v5, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fminimum_v8half:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_min_f16_sdwa v4, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_cmp_u_f16_sdwa s4, v0, v0 src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_min_f16_sdwa v5, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_min_f16_sdwa v6, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_min_f16_sdwa v7, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v4, 0x7e00, s4
+; GFX10-NEXT: v_cmp_u_f16_sdwa s4, v1, v1 src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v5, 0x7e00, s4
+; GFX10-NEXT: v_cmp_u_f16_sdwa s4, v2, v2 src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_min_f16_e32 v4, v0, v1
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v6, 0x7e00, s4
+; GFX10-NEXT: v_cmp_u_f16_sdwa s4, v3, v3 src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_cmp_u_f16_e32 vcc_lo, v0, v1
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v7, 0x7e00, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v4, 0x7e00, vcc_lo
+; GFX10-NEXT: v_min_f16_e32 v5, v2, v3
+; GFX10-NEXT: v_cmp_u_f16_e32 vcc_lo, v2, v3
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v5, 0x7e00, vcc_lo
+; GFX10-NEXT: v_min_f16_e32 v2, v0, v1
+; GFX10-NEXT: v_cmp_u_f16_e32 vcc_lo, v0, v1
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v2, 0x7e00, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fminimum_v8half:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v0
+; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v7, 16, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_min_f16_e32 v8, v0, v4
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v0, v4
+; GFX11-NEXT: v_min_f16_e32 v9, v1, v5
+; GFX11-NEXT: v_min_f16_e32 v10, v2, v6
+; GFX11-NEXT: v_min_f16_e32 v11, v3, v7
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v8, 0x7e00, vcc_lo
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v1, v5
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v9, 0x7e00, vcc_lo
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v2, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_min_f16_e32 v4, v0, v1
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v10, 0x7e00, vcc_lo
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v3, v7
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v11, 0x7e00, vcc_lo
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v0, v1
+; GFX11-NEXT: v_min_f16_e32 v5, v2, v3
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v4, 0x7e00, vcc_lo
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v2, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, 0x7e00, vcc_lo
+; GFX11-NEXT: v_min_f16_e32 v2, v0, v1
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v2, 0x7e00, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fminimum_v8half:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshrrev_b32_e32 v4, 16, v0
+; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v6, 16, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v7, 16, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_minimum_f16 v0, v0, v4
+; GFX12-NEXT: v_minimum_f16 v1, v1, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_minimum_f16 v2, v2, v6
+; GFX12-NEXT: v_minimum_f16 v3, v3, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_minimum_f16 v0, v0, v1
+; GFX12-NEXT: v_minimum_f16 v1, v2, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_minimum_f16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call half @llvm.vector.reduce.fminimum.v8half(<8 x half> %v)
+ ret half %res
+}
+
+define half @test_vector_reduce_fminimum_v16half(<16 x half> %v) {
+; GFX7-LABEL: test_vector_reduce_fminimum_v16half:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cvt_f32_f16_e32 v17, v2
+; GFX7-NEXT: v_cvt_f32_f16_e32 v18, v3
+; GFX7-NEXT: v_mov_b32_e32 v16, 0x7e00
+; GFX7-NEXT: v_cmp_lt_f32_e32 vcc, v17, v18
+; GFX7-NEXT: v_cndmask_b32_e32 v19, v3, v2, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v17, v18
+; GFX7-NEXT: v_cvt_f32_f16_e32 v17, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v18, v1
+; GFX7-NEXT: v_cmp_lt_f32_e64 s[4:5], v17, v18
+; GFX7-NEXT: v_cndmask_b32_e64 v20, v1, v0, s[4:5]
+; GFX7-NEXT: v_cmp_u_f32_e64 s[4:5], v17, v18
+; GFX7-NEXT: v_cndmask_b32_e64 v18, v20, v16, s[4:5]
+; GFX7-NEXT: v_and_b32_e32 v20, 0xffff, v0
+; GFX7-NEXT: v_mov_b32_e32 v17, 0x8000
+; GFX7-NEXT: v_cmp_eq_u32_e64 s[4:5], v20, v17
+; GFX7-NEXT: v_and_b32_e32 v20, 0xffff, v1
+; GFX7-NEXT: v_cndmask_b32_e64 v0, v18, v0, s[4:5]
+; GFX7-NEXT: v_cmp_eq_u32_e64 s[4:5], v20, v17
+; GFX7-NEXT: v_cndmask_b32_e64 v1, v0, v1, s[4:5]
+; GFX7-NEXT: v_cvt_f32_f16_e32 v20, v18
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, 0
+; GFX7-NEXT: v_cmp_eq_f32_e64 s[4:5], v20, v0
+; GFX7-NEXT: v_cndmask_b32_e64 v1, v18, v1, s[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v18, v19, v16, vcc
+; GFX7-NEXT: v_and_b32_e32 v19, 0xffff, v2
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v19, v17
+; GFX7-NEXT: v_and_b32_e32 v19, 0xffff, v3
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v18, v2, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v19, v17
+; GFX7-NEXT: v_cvt_f32_f16_e32 v19, v18
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v4
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v19, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v19, v5
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v18, v2, vcc
+; GFX7-NEXT: v_cmp_lt_f32_e32 vcc, v3, v19
+; GFX7-NEXT: v_cndmask_b32_e32 v18, v5, v4, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v3, v19
+; GFX7-NEXT: v_and_b32_e32 v19, 0xffff, v4
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v18, v16, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v19, v17
+; GFX7-NEXT: v_and_b32_e32 v19, 0xffff, v5
+; GFX7-NEXT: v_cvt_f32_f16_e32 v18, v3
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v3, v4, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v19, v17
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v6
+; GFX7-NEXT: v_cvt_f32_f16_e32 v19, v7
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v18, v0
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
+; GFX7-NEXT: v_and_b32_e32 v18, 0xffff, v6
+; GFX7-NEXT: v_cmp_lt_f32_e32 vcc, v5, v19
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v7, v6, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v5, v19
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v4, v16, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v4
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v18, v17
+; GFX7-NEXT: v_and_b32_e32 v18, 0xffff, v7
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v4, v6, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v19, v8
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v18, v17
+; GFX7-NEXT: v_cvt_f32_f16_e32 v18, v9
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v5, v0
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
+; GFX7-NEXT: v_cmp_lt_f32_e32 vcc, v19, v18
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v9, v8, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v19, v18
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v5, v16, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v6, v5
+; GFX7-NEXT: v_and_b32_e32 v7, 0xffff, v8
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v7, v17
+; GFX7-NEXT: v_cvt_f32_f16_e32 v18, v10
+; GFX7-NEXT: v_cvt_f32_f16_e32 v19, v11
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v5, v8, vcc
+; GFX7-NEXT: v_and_b32_e32 v8, 0xffff, v9
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v8, v17
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v7, v9, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v6, v0
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc
+; GFX7-NEXT: v_cmp_lt_f32_e32 vcc, v18, v19
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v11, v10, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v18, v19
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v6, v16, vcc
+; GFX7-NEXT: v_and_b32_e32 v8, 0xffff, v10
+; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v6
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v8, v17
+; GFX7-NEXT: v_cndmask_b32_e32 v8, v6, v10, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v10, v12
+; GFX7-NEXT: v_cvt_f32_f16_e32 v18, v13
+; GFX7-NEXT: v_and_b32_e32 v9, 0xffff, v11
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v9, v17
+; GFX7-NEXT: v_cndmask_b32_e32 v8, v8, v11, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v7, v0
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc
+; GFX7-NEXT: v_cmp_lt_f32_e32 vcc, v10, v18
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v13, v12, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v10, v18
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v7, v16, vcc
+; GFX7-NEXT: v_and_b32_e32 v9, 0xffff, v12
+; GFX7-NEXT: v_cvt_f32_f16_e32 v8, v7
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v9, v17
+; GFX7-NEXT: v_cndmask_b32_e32 v9, v7, v12, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v11, v14
+; GFX7-NEXT: v_cvt_f32_f16_e32 v12, v15
+; GFX7-NEXT: v_and_b32_e32 v10, 0xffff, v13
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v10, v17
+; GFX7-NEXT: v_cndmask_b32_e32 v9, v9, v13, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v8, v0
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v7, v9, vcc
+; GFX7-NEXT: v_cmp_lt_f32_e32 vcc, v11, v12
+; GFX7-NEXT: v_cndmask_b32_e32 v8, v15, v14, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v11, v12
+; GFX7-NEXT: v_cndmask_b32_e32 v8, v8, v16, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v9, v8
+; GFX7-NEXT: v_and_b32_e32 v10, 0xffff, v14
+; GFX7-NEXT: v_cvt_f32_f16_e32 v12, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v13, v2
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v10, v17
+; GFX7-NEXT: v_and_b32_e32 v11, 0xffff, v15
+; GFX7-NEXT: v_cndmask_b32_e32 v10, v8, v14, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v11, v17
+; GFX7-NEXT: v_cndmask_b32_e32 v10, v10, v15, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v9, v0
+; GFX7-NEXT: v_cndmask_b32_e32 v8, v8, v10, vcc
+; GFX7-NEXT: v_cmp_lt_f32_e32 vcc, v12, v13
+; GFX7-NEXT: v_cndmask_b32_e32 v9, v2, v1, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v12, v13
+; GFX7-NEXT: v_cndmask_b32_e32 v9, v9, v16, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v10, v9
+; GFX7-NEXT: v_and_b32_e32 v11, 0xffff, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v12, v3
+; GFX7-NEXT: v_cvt_f32_f16_e32 v13, v4
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v11, v17
+; GFX7-NEXT: v_and_b32_e32 v11, 0xffff, v2
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v11, v17
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v10, v0
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
+; GFX7-NEXT: v_cmp_lt_f32_e32 vcc, v12, v13
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v4, v3, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v12, v13
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v2, v16, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v9, v2
+; GFX7-NEXT: v_and_b32_e32 v10, 0xffff, v3
+; GFX7-NEXT: v_cvt_f32_f16_e32 v11, v5
+; GFX7-NEXT: v_cvt_f32_f16_e32 v12, v6
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v10, v17
+; GFX7-NEXT: v_and_b32_e32 v10, 0xffff, v4
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v2, v3, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v10, v17
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v9, v0
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
+; GFX7-NEXT: v_cmp_lt_f32_e32 vcc, v11, v12
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v11, v12
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v3, v16, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v3
+; GFX7-NEXT: v_and_b32_e32 v9, 0xffff, v5
+; GFX7-NEXT: v_cvt_f32_f16_e32 v10, v7
+; GFX7-NEXT: v_cvt_f32_f16_e32 v11, v8
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v9, v17
+; GFX7-NEXT: v_and_b32_e32 v9, 0xffff, v6
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v3, v5, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v9, v17
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v5, v6, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v4, v0
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc
+; GFX7-NEXT: v_cmp_lt_f32_e32 vcc, v10, v11
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v8, v7, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v10, v11
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v4, v16, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v4
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff, v7
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v6, v17
+; GFX7-NEXT: v_cvt_f32_f16_e32 v9, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v10, v2
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v4, v7, vcc
+; GFX7-NEXT: v_and_b32_e32 v7, 0xffff, v8
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v7, v17
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v5, v0
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
+; GFX7-NEXT: v_cmp_lt_f32_e32 vcc, v9, v10
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v2, v1, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v9, v10
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v5, v16, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v6, v5
+; GFX7-NEXT: v_and_b32_e32 v7, 0xffff, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v8, v3
+; GFX7-NEXT: v_cvt_f32_f16_e32 v9, v4
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v7, v17
+; GFX7-NEXT: v_and_b32_e32 v7, 0xffff, v2
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v7, v17
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v6, v0
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX7-NEXT: v_cmp_lt_f32_e32 vcc, v8, v9
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v4, v3, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v8, v9
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v2, v16, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v2
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v6, v17
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff, v4
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v2, v3, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v6, v17
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v5, v0
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v2
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff, v2
+; GFX7-NEXT: v_cmp_lt_f32_e32 vcc, v3, v4
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v2, v1, vcc
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v3, v4
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v5, v16, vcc
+; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v3
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff, v1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v17
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v6, v17
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
+; GFX7-NEXT: v_cmp_eq_f32_e32 vcc, v4, v0
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v3, v1, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fminimum_v16half:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshrrev_b32_e32 v8, 16, v0
+; GFX8-NEXT: v_lshrrev_b32_e32 v9, 16, v1
+; GFX8-NEXT: v_min_f16_e32 v16, v0, v8
+; GFX8-NEXT: v_mov_b32_e32 v17, 0x7e00
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v0, v8
+; GFX8-NEXT: v_lshrrev_b32_e32 v10, 16, v2
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v16, v17, vcc
+; GFX8-NEXT: v_min_f16_e32 v8, v1, v9
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v1, v9
+; GFX8-NEXT: v_lshrrev_b32_e32 v11, 16, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v8, v17, vcc
+; GFX8-NEXT: v_min_f16_e32 v8, v2, v10
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v2, v10
+; GFX8-NEXT: v_lshrrev_b32_e32 v12, 16, v4
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v8, v17, vcc
+; GFX8-NEXT: v_min_f16_e32 v8, v3, v11
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v3, v11
+; GFX8-NEXT: v_lshrrev_b32_e32 v13, 16, v5
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v8, v17, vcc
+; GFX8-NEXT: v_min_f16_e32 v8, v4, v12
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v4, v12
+; GFX8-NEXT: v_lshrrev_b32_e32 v14, 16, v6
+; GFX8-NEXT: v_cndmask_b32_e32 v4, v8, v17, vcc
+; GFX8-NEXT: v_min_f16_e32 v8, v5, v13
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v5, v13
+; GFX8-NEXT: v_lshrrev_b32_e32 v15, 16, v7
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v8, v17, vcc
+; GFX8-NEXT: v_min_f16_e32 v8, v6, v14
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v6, v14
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v8, v17, vcc
+; GFX8-NEXT: v_min_f16_e32 v8, v7, v15
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v7, v15
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v8, v17, vcc
+; GFX8-NEXT: v_min_f16_e32 v8, v0, v1
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v0, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v8, v17, vcc
+; GFX8-NEXT: v_min_f16_e32 v1, v2, v3
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v2, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v17, vcc
+; GFX8-NEXT: v_min_f16_e32 v2, v4, v5
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v4, v5
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v17, vcc
+; GFX8-NEXT: v_min_f16_e32 v3, v6, v7
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v6, v7
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v3, v17, vcc
+; GFX8-NEXT: v_min_f16_e32 v4, v0, v1
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v0, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v17, vcc
+; GFX8-NEXT: v_min_f16_e32 v1, v2, v3
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v2, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v17, vcc
+; GFX8-NEXT: v_min_f16_e32 v2, v0, v1
+; GFX8-NEXT: v_cmp_u_f16_e32 vcc, v0, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v17, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fminimum_v16half:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f16_sdwa v8, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_mov_b32_e32 v9, 0x7e00
+; GFX9-NEXT: v_cmp_u_f16_sdwa vcc, v0, v0 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v8, v9, vcc
+; GFX9-NEXT: v_min_f16_sdwa v8, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cmp_u_f16_sdwa vcc, v1, v1 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v8, v9, vcc
+; GFX9-NEXT: v_min_f16_sdwa v8, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cmp_u_f16_sdwa vcc, v2, v2 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v8, v9, vcc
+; GFX9-NEXT: v_min_f16_sdwa v8, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cmp_u_f16_sdwa vcc, v3, v3 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v8, v9, vcc
+; GFX9-NEXT: v_min_f16_sdwa v8, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cmp_u_f16_sdwa vcc, v4, v4 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v8, v9, vcc
+; GFX9-NEXT: v_min_f16_sdwa v8, v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cmp_u_f16_sdwa vcc, v5, v5 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
+; GFX9-NEXT: v_min_f16_sdwa v8, v6, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cmp_u_f16_sdwa vcc, v6, v6 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v8, v9, vcc
+; GFX9-NEXT: v_min_f16_sdwa v8, v7, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cmp_u_f16_sdwa vcc, v7, v7 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v7, v8, v9, vcc
+; GFX9-NEXT: v_min_f16_e32 v8, v0, v1
+; GFX9-NEXT: v_cmp_u_f16_e32 vcc, v0, v1
+; GFX9-NEXT: v_min_f16_e32 v1, v2, v3
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v8, v9, vcc
+; GFX9-NEXT: v_cmp_u_f16_e32 vcc, v2, v3
+; GFX9-NEXT: v_min_f16_e32 v2, v4, v5
+; GFX9-NEXT: v_min_f16_e32 v3, v6, v7
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc
+; GFX9-NEXT: v_cmp_u_f16_e32 vcc, v4, v5
+; GFX9-NEXT: v_min_f16_e32 v4, v0, v1
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc
+; GFX9-NEXT: v_cmp_u_f16_e32 vcc, v6, v7
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc
+; GFX9-NEXT: v_cmp_u_f16_e32 vcc, v0, v1
+; GFX9-NEXT: v_min_f16_e32 v1, v2, v3
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v9, vcc
+; GFX9-NEXT: v_cmp_u_f16_e32 vcc, v2, v3
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc
+; GFX9-NEXT: v_min_f16_e32 v2, v0, v1
+; GFX9-NEXT: v_cmp_u_f16_e32 vcc, v0, v1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v9, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fminimum_v16half:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_min_f16_sdwa v8, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_cmp_u_f16_sdwa s4, v0, v0 src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_min_f16_sdwa v9, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v8, 0x7e00, s4
+; GFX10-NEXT: v_cmp_u_f16_sdwa s4, v1, v1 src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_min_f16_sdwa v8, v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v9, 0x7e00, s4
+; GFX10-NEXT: v_cmp_u_f16_sdwa s4, v2, v2 src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_min_f16_sdwa v9, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_cmp_u_f16_e32 vcc_lo, v0, v1
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v8, 0x7e00, s4
+; GFX10-NEXT: v_cmp_u_f16_sdwa s4, v3, v3 src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_min_f16_sdwa v8, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v9, 0x7e00, s4
+; GFX10-NEXT: v_cmp_u_f16_sdwa s4, v4, v4 src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_min_f16_sdwa v9, v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_cndmask_b32_e64 v4, v8, 0x7e00, s4
+; GFX10-NEXT: v_cmp_u_f16_sdwa s4, v5, v5 src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_min_f16_sdwa v8, v6, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_cndmask_b32_e64 v5, v9, 0x7e00, s4
+; GFX10-NEXT: v_cmp_u_f16_sdwa s4, v6, v6 src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_min_f16_sdwa v9, v7, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_cndmask_b32_e64 v6, v8, 0x7e00, s4
+; GFX10-NEXT: v_min_f16_e32 v8, v0, v1
+; GFX10-NEXT: v_cmp_u_f16_sdwa s4, v7, v7 src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_min_f16_e32 v1, v4, v5
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v8, 0x7e00, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v7, v9, 0x7e00, s4
+; GFX10-NEXT: v_min_f16_e32 v9, v2, v3
+; GFX10-NEXT: v_cmp_u_f16_e32 vcc_lo, v2, v3
+; GFX10-NEXT: v_min_f16_e32 v8, v6, v7
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v9, 0x7e00, vcc_lo
+; GFX10-NEXT: v_cmp_u_f16_e32 vcc_lo, v4, v5
+; GFX10-NEXT: v_min_f16_e32 v4, v0, v2
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, 0x7e00, vcc_lo
+; GFX10-NEXT: v_cmp_u_f16_e32 vcc_lo, v6, v7
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v8, 0x7e00, vcc_lo
+; GFX10-NEXT: v_cmp_u_f16_e32 vcc_lo, v0, v2
+; GFX10-NEXT: v_min_f16_e32 v5, v1, v3
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v4, 0x7e00, vcc_lo
+; GFX10-NEXT: v_cmp_u_f16_e32 vcc_lo, v1, v3
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v5, 0x7e00, vcc_lo
+; GFX10-NEXT: v_min_f16_e32 v2, v0, v1
+; GFX10-NEXT: v_cmp_u_f16_e32 vcc_lo, v0, v1
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v2, 0x7e00, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fminimum_v16half:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v0
+; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v4
+; GFX11-NEXT: v_min_f16_e32 v14, v0, v8
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v0, v8
+; GFX11-NEXT: v_min_f16_e32 v17, v1, v9
+; GFX11-NEXT: v_min_f16_e32 v8, v2, v10
+; GFX11-NEXT: v_lshrrev_b32_e32 v13, 16, v5
+; GFX11-NEXT: v_lshrrev_b32_e32 v15, 16, v6
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v14, 0x7e00, vcc_lo
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v1, v9
+; GFX11-NEXT: v_min_f16_e32 v9, v3, v11
+; GFX11-NEXT: v_lshrrev_b32_e32 v16, 16, v7
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v17, 0x7e00, vcc_lo
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v2, v10
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v8, 0x7e00, vcc_lo
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v3, v11
+; GFX11-NEXT: v_min_f16_e32 v8, v4, v12
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v9, 0x7e00, vcc_lo
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v4, v12
+; GFX11-NEXT: v_min_f16_e32 v9, v5, v13
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_cndmask_b32_e64 v4, v8, 0x7e00, vcc_lo
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v5, v13
+; GFX11-NEXT: v_min_f16_e32 v8, v6, v15
+; GFX11-NEXT: v_cndmask_b32_e64 v5, v9, 0x7e00, vcc_lo
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v6, v15
+; GFX11-NEXT: v_min_f16_e32 v9, v7, v16
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_cndmask_b32_e64 v6, v8, 0x7e00, vcc_lo
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v7, v16
+; GFX11-NEXT: v_min_f16_e32 v8, v0, v1
+; GFX11-NEXT: v_cndmask_b32_e64 v7, v9, 0x7e00, vcc_lo
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v0, v1
+; GFX11-NEXT: v_min_f16_e32 v9, v2, v3
+; GFX11-NEXT: v_min_f16_e32 v1, v4, v5
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v8, 0x7e00, vcc_lo
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v2, v3
+; GFX11-NEXT: v_min_f16_e32 v8, v6, v7
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v9, 0x7e00, vcc_lo
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v4, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_min_f16_e32 v4, v0, v2
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, 0x7e00, vcc_lo
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v6, v7
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v8, 0x7e00, vcc_lo
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v0, v2
+; GFX11-NEXT: v_min_f16_e32 v5, v1, v3
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v4, 0x7e00, vcc_lo
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, 0x7e00, vcc_lo
+; GFX11-NEXT: v_min_f16_e32 v2, v0, v1
+; GFX11-NEXT: v_cmp_u_f16_e32 vcc_lo, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v2, 0x7e00, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fminimum_v16half:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshrrev_b32_e32 v8, 16, v0
+; GFX12-NEXT: v_lshrrev_b32_e32 v9, 16, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v10, 16, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v11, 16, v3
+; GFX12-NEXT: v_lshrrev_b32_e32 v12, 16, v4
+; GFX12-NEXT: v_lshrrev_b32_e32 v13, 16, v5
+; GFX12-NEXT: v_lshrrev_b32_e32 v14, 16, v6
+; GFX12-NEXT: v_lshrrev_b32_e32 v15, 16, v7
+; GFX12-NEXT: v_minimum_f16 v0, v0, v8
+; GFX12-NEXT: v_minimum_f16 v1, v1, v9
+; GFX12-NEXT: v_minimum_f16 v2, v2, v10
+; GFX12-NEXT: v_minimum_f16 v3, v3, v11
+; GFX12-NEXT: v_minimum_f16 v4, v4, v12
+; GFX12-NEXT: v_minimum_f16 v5, v5, v13
+; GFX12-NEXT: v_minimum_f16 v6, v6, v14
+; GFX12-NEXT: v_minimum_f16 v7, v7, v15
+; GFX12-NEXT: v_minimum_f16 v0, v0, v1
+; GFX12-NEXT: v_minimum_f16 v1, v2, v3
+; GFX12-NEXT: v_minimum_f16 v2, v4, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_minimum_f16 v3, v6, v7
+; GFX12-NEXT: v_minimum_f16 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_minimum_f16 v1, v2, v3
+; GFX12-NEXT: v_minimum_f16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call half @llvm.vector.reduce.fminimum.v16half(<16 x half> %v)
+ ret half %res
+}
+
+define float @test_vector_reduce_fminimum_v2float(<2 x float> %v) {
+; GFX7-LABEL: test_vector_reduce_fminimum_v2float:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_min_f32_e32 v2, v0, v1
+; GFX7-NEXT: v_mov_b32_e32 v3, 0x7fc00000
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fminimum_v2float:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_min_f32_e32 v2, v0, v1
+; GFX8-NEXT: v_mov_b32_e32 v3, 0x7fc00000
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fminimum_v2float:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f32_e32 v2, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x7fc00000
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fminimum_v2float:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_min_f32_e32 v2, v0, v1
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v1
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v2, 0x7fc00000, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fminimum_v2float:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_min_f32_e32 v2, v0, v1
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v2, 0x7fc00000, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fminimum_v2float:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum_f32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call float @llvm.vector.reduce.fminimum.v2float(<2 x float> %v)
+ ret float %res
+}
+
+define float @test_vector_reduce_fminimum_v3float(<3 x float> %v) {
+; GFX7-LABEL: test_vector_reduce_fminimum_v3float:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_min_f32_e32 v3, v0, v1
+; GFX7-NEXT: v_mov_b32_e32 v4, 0x7fc00000
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v3, v4, vcc
+; GFX7-NEXT: v_min_f32_e32 v1, v0, v2
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v0, v2
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v1, v4, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fminimum_v3float:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_min_f32_e32 v3, v0, v1
+; GFX8-NEXT: v_mov_b32_e32 v4, 0x7fc00000
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v3, v4, vcc
+; GFX8-NEXT: v_min_f32_e32 v1, v0, v2
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v2
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v4, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fminimum_v3float:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f32_e32 v3, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7fc00000
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v4, vcc
+; GFX9-NEXT: v_min_f32_e32 v1, v0, v2
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v2
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v4, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fminimum_v3float:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_min_f32_e32 v3, v0, v1
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v1
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v3, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_min_f32_e32 v1, v0, v2
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v2
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v1, 0x7fc00000, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fminimum_v3float:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_min_f32_e32 v3, v0, v1
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v3, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_min_f32_e32 v1, v0, v2
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v1, 0x7fc00000, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fminimum_v3float:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum_f32 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_minimum_f32 v0, v0, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call float @llvm.vector.reduce.fminimum.v3float(<3 x float> %v)
+ ret float %res
+}
+
+define float @test_vector_reduce_fminimum_v4float(<4 x float> %v) {
+; GFX7-LABEL: test_vector_reduce_fminimum_v4float:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_min_f32_e32 v4, v0, v1
+; GFX7-NEXT: v_mov_b32_e32 v5, 0x7fc00000
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
+; GFX7-NEXT: v_min_f32_e32 v1, v2, v3
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc
+; GFX7-NEXT: v_min_f32_e32 v2, v0, v1
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v5, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fminimum_v4float:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_min_f32_e32 v4, v0, v1
+; GFX8-NEXT: v_mov_b32_e32 v5, 0x7fc00000
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
+; GFX8-NEXT: v_min_f32_e32 v1, v2, v3
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc
+; GFX8-NEXT: v_min_f32_e32 v2, v0, v1
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v5, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fminimum_v4float:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f32_e32 v4, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v5, 0x7fc00000
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX9-NEXT: v_min_f32_e32 v1, v2, v3
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc
+; GFX9-NEXT: v_min_f32_e32 v2, v0, v1
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v5, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fminimum_v4float:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_min_f32_e32 v4, v0, v1
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v1
+; GFX10-NEXT: v_min_f32_e32 v5, v2, v3
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v4, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v3
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v5, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_min_f32_e32 v2, v0, v1
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v1
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v2, 0x7fc00000, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fminimum_v4float:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_dual_min_f32 v4, v0, v1 :: v_dual_min_f32 v5, v2, v3
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v4, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v3
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, 0x7fc00000, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_min_f32_e32 v2, v0, v1
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v1
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v2, 0x7fc00000, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fminimum_v4float:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum_f32 v0, v0, v1
+; GFX12-NEXT: v_minimum_f32 v1, v2, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_minimum_f32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call float @llvm.vector.reduce.fminimum.v4float(<4 x float> %v)
+ ret float %res
+}
+
+define float @test_vector_reduce_fminimum_v8float(<8 x float> %v) {
+; GFX7-LABEL: test_vector_reduce_fminimum_v8float:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_min_f32_e32 v8, v0, v1
+; GFX7-NEXT: v_mov_b32_e32 v9, 0x7fc00000
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v8, v9, vcc
+; GFX7-NEXT: v_min_f32_e32 v1, v2, v3
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc
+; GFX7-NEXT: v_min_f32_e32 v2, v4, v5
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v4, v5
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc
+; GFX7-NEXT: v_min_f32_e32 v3, v6, v7
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v6, v7
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc
+; GFX7-NEXT: v_min_f32_e32 v4, v0, v1
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v4, v9, vcc
+; GFX7-NEXT: v_min_f32_e32 v1, v2, v3
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc
+; GFX7-NEXT: v_min_f32_e32 v2, v0, v1
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v9, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fminimum_v8float:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_min_f32_e32 v8, v0, v1
+; GFX8-NEXT: v_mov_b32_e32 v9, 0x7fc00000
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v8, v9, vcc
+; GFX8-NEXT: v_min_f32_e32 v1, v2, v3
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc
+; GFX8-NEXT: v_min_f32_e32 v2, v4, v5
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v5
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc
+; GFX8-NEXT: v_min_f32_e32 v3, v6, v7
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v7
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc
+; GFX8-NEXT: v_min_f32_e32 v4, v0, v1
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v9, vcc
+; GFX8-NEXT: v_min_f32_e32 v1, v2, v3
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc
+; GFX8-NEXT: v_min_f32_e32 v2, v0, v1
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v9, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fminimum_v8float:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f32_e32 v8, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v9, 0x7fc00000
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX9-NEXT: v_min_f32_e32 v1, v2, v3
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v8, v9, vcc
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX9-NEXT: v_min_f32_e32 v2, v4, v5
+; GFX9-NEXT: v_min_f32_e32 v3, v6, v7
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v4, v5
+; GFX9-NEXT: v_min_f32_e32 v4, v0, v1
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v9, vcc
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v6, v7
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX9-NEXT: v_min_f32_e32 v1, v2, v3
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v9, vcc
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc
+; GFX9-NEXT: v_min_f32_e32 v2, v0, v1
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v9, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fminimum_v8float:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_min_f32_e32 v8, v0, v1
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v1
+; GFX10-NEXT: v_min_f32_e32 v9, v2, v3
+; GFX10-NEXT: v_min_f32_e32 v1, v4, v5
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v8, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v3
+; GFX10-NEXT: v_min_f32_e32 v8, v6, v7
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v9, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v5
+; GFX10-NEXT: v_min_f32_e32 v4, v0, v2
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v7
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v8, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v2
+; GFX10-NEXT: v_min_f32_e32 v5, v1, v3
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v4, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v3
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v5, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_min_f32_e32 v2, v0, v1
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v1
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v2, 0x7fc00000, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fminimum_v8float:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_dual_min_f32 v8, v0, v1 :: v_dual_min_f32 v9, v2, v3
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v1
+; GFX11-NEXT: v_min_f32_e32 v1, v4, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v8, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_min_f32_e32 v8, v6, v7
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v3
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v9, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v5
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v7
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v8, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_min_f32_e32 v4, v0, v2
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_min_f32_e32 v5, v1, v3
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v4, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_min_f32_e32 v2, v0, v1
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v2, 0x7fc00000, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fminimum_v8float:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum_f32 v0, v0, v1
+; GFX12-NEXT: v_minimum_f32 v1, v2, v3
+; GFX12-NEXT: v_minimum_f32 v2, v4, v5
+; GFX12-NEXT: v_minimum_f32 v3, v6, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_minimum_f32 v0, v0, v1
+; GFX12-NEXT: v_minimum_f32 v1, v2, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_minimum_f32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call float @llvm.vector.reduce.fminimum.v8float(<8 x float> %v)
+ ret float %res
+}
+
+define float @test_vector_reduce_fminimum_v16float(<16 x float> %v) {
+; GFX7-LABEL: test_vector_reduce_fminimum_v16float:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_min_f32_e32 v16, v0, v1
+; GFX7-NEXT: v_mov_b32_e32 v17, 0x7fc00000
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v16, v17, vcc
+; GFX7-NEXT: v_min_f32_e32 v1, v2, v3
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v17, vcc
+; GFX7-NEXT: v_min_f32_e32 v2, v4, v5
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v4, v5
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v2, v17, vcc
+; GFX7-NEXT: v_min_f32_e32 v3, v6, v7
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v6, v7
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v3, v17, vcc
+; GFX7-NEXT: v_min_f32_e32 v4, v8, v9
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v8, v9
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v4, v17, vcc
+; GFX7-NEXT: v_min_f32_e32 v5, v10, v11
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v10, v11
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v5, v17, vcc
+; GFX7-NEXT: v_min_f32_e32 v6, v12, v13
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v12, v13
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v6, v17, vcc
+; GFX7-NEXT: v_min_f32_e32 v7, v14, v15
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v14, v15
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v7, v17, vcc
+; GFX7-NEXT: v_min_f32_e32 v8, v0, v1
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v8, v17, vcc
+; GFX7-NEXT: v_min_f32_e32 v1, v2, v3
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v17, vcc
+; GFX7-NEXT: v_min_f32_e32 v2, v4, v5
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v4, v5
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v2, v17, vcc
+; GFX7-NEXT: v_min_f32_e32 v3, v6, v7
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v6, v7
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v3, v17, vcc
+; GFX7-NEXT: v_min_f32_e32 v4, v0, v1
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v4, v17, vcc
+; GFX7-NEXT: v_min_f32_e32 v1, v2, v3
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v17, vcc
+; GFX7-NEXT: v_min_f32_e32 v2, v0, v1
+; GFX7-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v17, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fminimum_v16float:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_min_f32_e32 v16, v0, v1
+; GFX8-NEXT: v_mov_b32_e32 v17, 0x7fc00000
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v16, v17, vcc
+; GFX8-NEXT: v_min_f32_e32 v1, v2, v3
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v17, vcc
+; GFX8-NEXT: v_min_f32_e32 v2, v4, v5
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v5
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v17, vcc
+; GFX8-NEXT: v_min_f32_e32 v3, v6, v7
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v7
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v3, v17, vcc
+; GFX8-NEXT: v_min_f32_e32 v4, v8, v9
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v8, v9
+; GFX8-NEXT: v_cndmask_b32_e32 v4, v4, v17, vcc
+; GFX8-NEXT: v_min_f32_e32 v5, v10, v11
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v10, v11
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v5, v17, vcc
+; GFX8-NEXT: v_min_f32_e32 v6, v12, v13
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v12, v13
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v6, v17, vcc
+; GFX8-NEXT: v_min_f32_e32 v7, v14, v15
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v14, v15
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v7, v17, vcc
+; GFX8-NEXT: v_min_f32_e32 v8, v0, v1
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v8, v17, vcc
+; GFX8-NEXT: v_min_f32_e32 v1, v2, v3
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v17, vcc
+; GFX8-NEXT: v_min_f32_e32 v2, v4, v5
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v5
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v17, vcc
+; GFX8-NEXT: v_min_f32_e32 v3, v6, v7
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v7
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v3, v17, vcc
+; GFX8-NEXT: v_min_f32_e32 v4, v0, v1
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v17, vcc
+; GFX8-NEXT: v_min_f32_e32 v1, v2, v3
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v17, vcc
+; GFX8-NEXT: v_min_f32_e32 v2, v0, v1
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v17, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fminimum_v16float:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f32_e32 v16, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v17, 0x7fc00000
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX9-NEXT: v_min_f32_e32 v1, v2, v3
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v16, v17, vcc
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX9-NEXT: v_min_f32_e32 v2, v4, v5
+; GFX9-NEXT: v_min_f32_e32 v3, v6, v7
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v17, vcc
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v4, v5
+; GFX9-NEXT: v_min_f32_e32 v4, v8, v9
+; GFX9-NEXT: v_min_f32_e32 v5, v10, v11
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v17, vcc
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v6, v7
+; GFX9-NEXT: v_min_f32_e32 v6, v12, v13
+; GFX9-NEXT: v_min_f32_e32 v7, v14, v15
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v17, vcc
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v8, v9
+; GFX9-NEXT: v_min_f32_e32 v8, v0, v1
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v17, vcc
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v10, v11
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v5, v17, vcc
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v12, v13
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v6, v17, vcc
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v14, v15
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v7, v7, v17, vcc
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX9-NEXT: v_min_f32_e32 v1, v2, v3
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v8, v17, vcc
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX9-NEXT: v_min_f32_e32 v2, v4, v5
+; GFX9-NEXT: v_min_f32_e32 v3, v6, v7
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v17, vcc
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v4, v5
+; GFX9-NEXT: v_min_f32_e32 v4, v0, v1
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v17, vcc
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v6, v7
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v17, vcc
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX9-NEXT: v_min_f32_e32 v1, v2, v3
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v17, vcc
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v3
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v17, vcc
+; GFX9-NEXT: v_min_f32_e32 v2, v0, v1
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v1
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v17, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fminimum_v16float:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_min_f32_e32 v16, v0, v1
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v1
+; GFX10-NEXT: v_min_f32_e32 v17, v2, v3
+; GFX10-NEXT: v_min_f32_e32 v1, v4, v5
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v16, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v3
+; GFX10-NEXT: v_min_f32_e32 v3, v6, v7
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v17, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v5
+; GFX10-NEXT: v_min_f32_e32 v4, v8, v9
+; GFX10-NEXT: v_min_f32_e32 v5, v10, v11
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v7
+; GFX10-NEXT: v_min_f32_e32 v6, v12, v13
+; GFX10-NEXT: v_min_f32_e32 v7, v14, v15
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v9
+; GFX10-NEXT: v_min_f32_e32 v8, v0, v2
+; GFX10-NEXT: v_min_f32_e32 v9, v1, v3
+; GFX10-NEXT: v_cndmask_b32_e64 v4, v4, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v11
+; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v13
+; GFX10-NEXT: v_cndmask_b32_e64 v6, v6, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v15
+; GFX10-NEXT: v_cndmask_b32_e64 v7, v7, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v2
+; GFX10-NEXT: v_min_f32_e32 v2, v4, v5
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v8, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v3
+; GFX10-NEXT: v_min_f32_e32 v8, v6, v7
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v9, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v5
+; GFX10-NEXT: v_min_f32_e32 v4, v0, v1
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v7
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v8, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v1
+; GFX10-NEXT: v_min_f32_e32 v5, v2, v3
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v4, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v3
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v5, 0x7fc00000, vcc_lo
+; GFX10-NEXT: v_min_f32_e32 v2, v0, v1
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v1
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v2, 0x7fc00000, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fminimum_v16float:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_dual_min_f32 v16, v0, v1 :: v_dual_min_f32 v17, v2, v3
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v1
+; GFX11-NEXT: v_min_f32_e32 v1, v4, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v16, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v3
+; GFX11-NEXT: v_min_f32_e32 v3, v6, v7
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v17, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v5
+; GFX11-NEXT: v_dual_min_f32 v4, v8, v9 :: v_dual_min_f32 v5, v10, v11
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v7
+; GFX11-NEXT: v_dual_min_f32 v6, v12, v13 :: v_dual_min_f32 v7, v14, v15
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v3, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v9
+; GFX11-NEXT: v_min_f32_e32 v8, v0, v2
+; GFX11-NEXT: v_cndmask_b32_e64 v4, v4, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v11
+; GFX11-NEXT: v_cndmask_b32_e64 v5, v5, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v13
+; GFX11-NEXT: v_min_f32_e32 v9, v1, v3
+; GFX11-NEXT: v_cndmask_b32_e64 v6, v6, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v15
+; GFX11-NEXT: v_cndmask_b32_e64 v7, v7, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v2
+; GFX11-NEXT: v_min_f32_e32 v2, v4, v5
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v8, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v3
+; GFX11-NEXT: v_min_f32_e32 v8, v6, v7
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v9, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v5
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v7
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v8, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_min_f32_e32 v4, v0, v1
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_min_f32_e32 v5, v2, v3
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v4, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, 0x7fc00000, vcc_lo
+; GFX11-NEXT: v_min_f32_e32 v2, v0, v1
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v2, 0x7fc00000, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fminimum_v16float:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum_f32 v0, v0, v1
+; GFX12-NEXT: v_minimum_f32 v1, v2, v3
+; GFX12-NEXT: v_minimum_f32 v2, v4, v5
+; GFX12-NEXT: v_minimum_f32 v3, v6, v7
+; GFX12-NEXT: v_minimum_f32 v4, v8, v9
+; GFX12-NEXT: v_minimum_f32 v5, v10, v11
+; GFX12-NEXT: v_minimum_f32 v6, v12, v13
+; GFX12-NEXT: v_minimum_f32 v7, v14, v15
+; GFX12-NEXT: v_minimum_f32 v0, v0, v1
+; GFX12-NEXT: v_minimum_f32 v1, v2, v3
+; GFX12-NEXT: v_minimum_f32 v2, v4, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_minimum_f32 v3, v6, v7
+; GFX12-NEXT: v_minimum_f32 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_minimum_f32 v1, v2, v3
+; GFX12-NEXT: v_minimum_f32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call float @llvm.vector.reduce.fminimum.v16float(<16 x float> %v)
+ ret float %res
+}
+
+
+define double @test_vector_reduce_fminimum_v2double(<2 x double> %v) {
+; GFX7-LABEL: test_vector_reduce_fminimum_v2double:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_min_f64 v[4:5], v[0:1], v[2:3]
+; GFX7-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v1, 0x7ff80000
+; GFX7-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fminimum_v2double:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_min_f64 v[4:5], v[0:1], v[2:3]
+; GFX8-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v1, 0x7ff80000
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fminimum_v2double:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f64 v[4:5], v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v6, 0x7ff80000
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v6, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fminimum_v2double:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_min_f64 v[4:5], v[0:1], v[2:3]
+; GFX10-NEXT: v_cmp_u_f64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v5, 0x7ff80000, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fminimum_v2double:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_min_f64 v[4:5], v[0:1], v[2:3]
+; GFX11-NEXT: v_cmp_u_f64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, 0x7ff80000, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fminimum_v2double:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum_f64 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call double @llvm.vector.reduce.fminimum.v2double(<2 x double> %v)
+ ret double %res
+}
+
+define double @test_vector_reduce_fminimum_v3double(<3 x double> %v) {
+; GFX7-LABEL: test_vector_reduce_fminimum_v3double:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_min_f64 v[6:7], v[0:1], v[2:3]
+; GFX7-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v8, 0x7ff80000
+; GFX7-NEXT: v_cndmask_b32_e64 v0, v6, 0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v7, v8, vcc
+; GFX7-NEXT: v_min_f64 v[2:3], v[0:1], v[4:5]
+; GFX7-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[4:5]
+; GFX7-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fminimum_v3double:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_min_f64 v[6:7], v[0:1], v[2:3]
+; GFX8-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v8, 0x7ff80000
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v6, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v7, v8, vcc
+; GFX8-NEXT: v_min_f64 v[2:3], v[0:1], v[4:5]
+; GFX8-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fminimum_v3double:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f64 v[6:7], v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v8, 0x7ff80000
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v6, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v8, vcc
+; GFX9-NEXT: v_min_f64 v[2:3], v[0:1], v[4:5]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[4:5]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fminimum_v3double:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_min_f64 v[6:7], v[0:1], v[2:3]
+; GFX10-NEXT: v_cmp_u_f64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v6, 0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v7, 0x7ff80000, vcc_lo
+; GFX10-NEXT: v_min_f64 v[2:3], v[0:1], v[4:5]
+; GFX10-NEXT: v_cmp_u_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v3, 0x7ff80000, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fminimum_v3double:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_min_f64 v[6:7], v[0:1], v[2:3]
+; GFX11-NEXT: v_cmp_u_f64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v6, 0, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v7, 0x7ff80000, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_min_f64 v[2:3], v[0:1], v[4:5]
+; GFX11-NEXT: v_cmp_u_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v3, 0x7ff80000, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fminimum_v3double:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum_f64 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_minimum_f64 v[0:1], v[0:1], v[4:5]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call double @llvm.vector.reduce.fminimum.v3double(<3 x double> %v)
+ ret double %res
+}
+
+define double @test_vector_reduce_fminimum_v4double(<4 x double> %v) {
+; GFX7-LABEL: test_vector_reduce_fminimum_v4double:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_min_f64 v[8:9], v[0:1], v[2:3]
+; GFX7-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_min_f64 v[0:1], v[4:5], v[6:7]
+; GFX7-NEXT: v_cmp_u_f64_e64 s[4:5], v[4:5], v[6:7]
+; GFX7-NEXT: v_mov_b32_e32 v6, 0x7ff80000
+; GFX7-NEXT: v_cndmask_b32_e64 v2, v8, 0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v9, v6, vcc
+; GFX7-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[4:5]
+; GFX7-NEXT: v_cndmask_b32_e64 v1, v1, v6, s[4:5]
+; GFX7-NEXT: v_min_f64 v[4:5], v[2:3], v[0:1]
+; GFX7-NEXT: v_cmp_u_f64_e32 vcc, v[2:3], v[0:1]
+; GFX7-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v5, v6, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fminimum_v4double:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_min_f64 v[8:9], v[0:1], v[2:3]
+; GFX8-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_min_f64 v[0:1], v[4:5], v[6:7]
+; GFX8-NEXT: v_cmp_u_f64_e64 s[4:5], v[4:5], v[6:7]
+; GFX8-NEXT: v_mov_b32_e32 v6, 0x7ff80000
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v8, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v9, v6, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, v6, s[4:5]
+; GFX8-NEXT: v_min_f64 v[4:5], v[2:3], v[0:1]
+; GFX8-NEXT: v_cmp_u_f64_e32 vcc, v[2:3], v[0:1]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v6, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fminimum_v4double:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f64 v[8:9], v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v10, 0x7ff80000
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_min_f64 v[2:3], v[4:5], v[6:7]
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v8, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v9, v10, vcc
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[4:5], v[6:7]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v10, vcc
+; GFX9-NEXT: v_min_f64 v[4:5], v[0:1], v[2:3]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v10, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fminimum_v4double:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_min_f64 v[8:9], v[0:1], v[2:3]
+; GFX10-NEXT: v_cmp_u_f64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX10-NEXT: v_min_f64 v[0:1], v[4:5], v[6:7]
+; GFX10-NEXT: v_cmp_u_f64_e64 s4, v[4:5], v[6:7]
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v8, 0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v9, 0x7ff80000, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, 0, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, 0x7ff80000, s4
+; GFX10-NEXT: v_min_f64 v[4:5], v[2:3], v[0:1]
+; GFX10-NEXT: v_cmp_u_f64_e32 vcc_lo, v[2:3], v[0:1]
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v5, 0x7ff80000, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fminimum_v4double:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_min_f64 v[8:9], v[0:1], v[2:3]
+; GFX11-NEXT: v_cmp_u_f64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX11-NEXT: v_min_f64 v[0:1], v[4:5], v[6:7]
+; GFX11-NEXT: v_cmp_u_f64_e64 s0, v[4:5], v[6:7]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v8, 0, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v9, 0x7ff80000, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, 0, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, 0x7ff80000, s0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_min_f64 v[4:5], v[2:3], v[0:1]
+; GFX11-NEXT: v_cmp_u_f64_e32 vcc_lo, v[2:3], v[0:1]
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, 0x7ff80000, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fminimum_v4double:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum_f64 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: v_minimum_f64 v[2:3], v[4:5], v[6:7]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_minimum_f64 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call double @llvm.vector.reduce.fminimum.v4double(<4 x double> %v)
+ ret double %res
+}
+
+define double @test_vector_reduce_fminimum_v8double(<8 x double> %v) {
+; GFX7-LABEL: test_vector_reduce_fminimum_v8double:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_min_f64 v[16:17], v[0:1], v[2:3]
+; GFX7-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_min_f64 v[0:1], v[4:5], v[6:7]
+; GFX7-NEXT: v_cmp_u_f64_e64 s[4:5], v[4:5], v[6:7]
+; GFX7-NEXT: v_min_f64 v[2:3], v[8:9], v[10:11]
+; GFX7-NEXT: v_cmp_u_f64_e64 s[6:7], v[8:9], v[10:11]
+; GFX7-NEXT: v_min_f64 v[4:5], v[12:13], v[14:15]
+; GFX7-NEXT: v_cmp_u_f64_e64 s[8:9], v[12:13], v[14:15]
+; GFX7-NEXT: v_mov_b32_e32 v18, 0x7ff80000
+; GFX7-NEXT: v_cndmask_b32_e64 v6, v16, 0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v17, v18, vcc
+; GFX7-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[4:5]
+; GFX7-NEXT: v_cndmask_b32_e64 v1, v1, v18, s[4:5]
+; GFX7-NEXT: v_cndmask_b32_e64 v2, v2, 0, s[6:7]
+; GFX7-NEXT: v_cndmask_b32_e64 v3, v3, v18, s[6:7]
+; GFX7-NEXT: v_cndmask_b32_e64 v4, v4, 0, s[8:9]
+; GFX7-NEXT: v_cndmask_b32_e64 v5, v5, v18, s[8:9]
+; GFX7-NEXT: v_min_f64 v[8:9], v[6:7], v[0:1]
+; GFX7-NEXT: v_cmp_u_f64_e32 vcc, v[6:7], v[0:1]
+; GFX7-NEXT: v_min_f64 v[0:1], v[2:3], v[4:5]
+; GFX7-NEXT: v_cmp_u_f64_e64 s[4:5], v[2:3], v[4:5]
+; GFX7-NEXT: v_cndmask_b32_e64 v2, v8, 0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v9, v18, vcc
+; GFX7-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[4:5]
+; GFX7-NEXT: v_cndmask_b32_e64 v1, v1, v18, s[4:5]
+; GFX7-NEXT: v_min_f64 v[4:5], v[2:3], v[0:1]
+; GFX7-NEXT: v_cmp_u_f64_e32 vcc, v[2:3], v[0:1]
+; GFX7-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v5, v18, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fminimum_v8double:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_min_f64 v[16:17], v[0:1], v[2:3]
+; GFX8-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_min_f64 v[0:1], v[4:5], v[6:7]
+; GFX8-NEXT: v_cmp_u_f64_e64 s[4:5], v[4:5], v[6:7]
+; GFX8-NEXT: v_min_f64 v[2:3], v[8:9], v[10:11]
+; GFX8-NEXT: v_cmp_u_f64_e64 s[6:7], v[8:9], v[10:11]
+; GFX8-NEXT: v_min_f64 v[4:5], v[12:13], v[14:15]
+; GFX8-NEXT: v_cmp_u_f64_e64 s[8:9], v[12:13], v[14:15]
+; GFX8-NEXT: v_mov_b32_e32 v18, 0x7ff80000
+; GFX8-NEXT: v_cndmask_b32_e64 v6, v16, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v17, v18, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, v18, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v2, 0, s[6:7]
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v3, v18, s[6:7]
+; GFX8-NEXT: v_cndmask_b32_e64 v4, v4, 0, s[8:9]
+; GFX8-NEXT: v_cndmask_b32_e64 v5, v5, v18, s[8:9]
+; GFX8-NEXT: v_min_f64 v[8:9], v[6:7], v[0:1]
+; GFX8-NEXT: v_cmp_u_f64_e32 vcc, v[6:7], v[0:1]
+; GFX8-NEXT: v_min_f64 v[0:1], v[2:3], v[4:5]
+; GFX8-NEXT: v_cmp_u_f64_e64 s[4:5], v[2:3], v[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v8, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v9, v18, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, v18, s[4:5]
+; GFX8-NEXT: v_min_f64 v[4:5], v[2:3], v[0:1]
+; GFX8-NEXT: v_cmp_u_f64_e32 vcc, v[2:3], v[0:1]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v18, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fminimum_v8double:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f64 v[16:17], v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v18, 0x7ff80000
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_min_f64 v[2:3], v[4:5], v[6:7]
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v16, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v17, v18, vcc
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[4:5], v[6:7]
+; GFX9-NEXT: v_min_f64 v[4:5], v[8:9], v[10:11]
+; GFX9-NEXT: v_min_f64 v[6:7], v[12:13], v[14:15]
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v18, vcc
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[8:9], v[10:11]
+; GFX9-NEXT: v_min_f64 v[8:9], v[0:1], v[2:3]
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_cndmask_b32_e64 v4, v4, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v5, v18, vcc
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[12:13], v[14:15]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e64 v6, v6, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v7, v7, v18, vcc
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_min_f64 v[2:3], v[4:5], v[6:7]
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v8, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v9, v18, vcc
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[4:5], v[6:7]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v18, vcc
+; GFX9-NEXT: v_min_f64 v[4:5], v[0:1], v[2:3]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v18, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fminimum_v8double:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_min_f64 v[16:17], v[0:1], v[2:3]
+; GFX10-NEXT: v_cmp_u_f64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX10-NEXT: v_min_f64 v[0:1], v[4:5], v[6:7]
+; GFX10-NEXT: v_cmp_u_f64_e64 s4, v[4:5], v[6:7]
+; GFX10-NEXT: v_min_f64 v[2:3], v[8:9], v[10:11]
+; GFX10-NEXT: v_cmp_u_f64_e64 s5, v[8:9], v[10:11]
+; GFX10-NEXT: v_min_f64 v[4:5], v[12:13], v[14:15]
+; GFX10-NEXT: v_cmp_u_f64_e64 s6, v[12:13], v[14:15]
+; GFX10-NEXT: v_cndmask_b32_e64 v6, v16, 0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v7, v17, 0x7ff80000, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, 0, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, 0x7ff80000, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, 0, s5
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, 0x7ff80000, s5
+; GFX10-NEXT: v_cndmask_b32_e64 v4, v4, 0, s6
+; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, 0x7ff80000, s6
+; GFX10-NEXT: v_min_f64 v[8:9], v[6:7], v[0:1]
+; GFX10-NEXT: v_cmp_u_f64_e32 vcc_lo, v[6:7], v[0:1]
+; GFX10-NEXT: v_min_f64 v[0:1], v[2:3], v[4:5]
+; GFX10-NEXT: v_cmp_u_f64_e64 s4, v[2:3], v[4:5]
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v8, 0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v9, 0x7ff80000, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, 0, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, 0x7ff80000, s4
+; GFX10-NEXT: v_min_f64 v[4:5], v[2:3], v[0:1]
+; GFX10-NEXT: v_cmp_u_f64_e32 vcc_lo, v[2:3], v[0:1]
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v5, 0x7ff80000, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fminimum_v8double:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_min_f64 v[16:17], v[0:1], v[2:3]
+; GFX11-NEXT: v_cmp_u_f64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX11-NEXT: v_min_f64 v[0:1], v[4:5], v[6:7]
+; GFX11-NEXT: v_cmp_u_f64_e64 s0, v[4:5], v[6:7]
+; GFX11-NEXT: v_min_f64 v[2:3], v[8:9], v[10:11]
+; GFX11-NEXT: v_cmp_u_f64_e64 s1, v[8:9], v[10:11]
+; GFX11-NEXT: v_min_f64 v[4:5], v[12:13], v[14:15]
+; GFX11-NEXT: v_cmp_u_f64_e64 s2, v[12:13], v[14:15]
+; GFX11-NEXT: v_cndmask_b32_e64 v6, v16, 0, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v7, v17, 0x7ff80000, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, 0, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, 0x7ff80000, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, 0, s1
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v3, 0x7ff80000, s1
+; GFX11-NEXT: v_cndmask_b32_e64 v4, v4, 0, s2
+; GFX11-NEXT: v_cndmask_b32_e64 v5, v5, 0x7ff80000, s2
+; GFX11-NEXT: v_min_f64 v[8:9], v[6:7], v[0:1]
+; GFX11-NEXT: v_cmp_u_f64_e32 vcc_lo, v[6:7], v[0:1]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_min_f64 v[0:1], v[2:3], v[4:5]
+; GFX11-NEXT: v_cmp_u_f64_e64 s0, v[2:3], v[4:5]
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v8, 0, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v9, 0x7ff80000, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, 0, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, 0x7ff80000, s0
+; GFX11-NEXT: v_min_f64 v[4:5], v[2:3], v[0:1]
+; GFX11-NEXT: v_cmp_u_f64_e32 vcc_lo, v[2:3], v[0:1]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, 0x7ff80000, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fminimum_v8double:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum_f64 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: v_minimum_f64 v[2:3], v[4:5], v[6:7]
+; GFX12-NEXT: v_minimum_f64 v[4:5], v[8:9], v[10:11]
+; GFX12-NEXT: v_minimum_f64 v[6:7], v[12:13], v[14:15]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_minimum_f64 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: v_minimum_f64 v[2:3], v[4:5], v[6:7]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_minimum_f64 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call double @llvm.vector.reduce.fminimum.v8double(<8 x double> %v)
+ ret double %res
+}
+
+define double @test_vector_reduce_fminimum_v16double(<16 x double> %v) {
+; GFX7-LABEL: test_vector_reduce_fminimum_v16double:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX7-NEXT: v_min_f64 v[32:33], v[4:5], v[6:7]
+; GFX7-NEXT: v_cmp_u_f64_e32 vcc, v[4:5], v[6:7]
+; GFX7-NEXT: v_min_f64 v[4:5], v[12:13], v[14:15]
+; GFX7-NEXT: v_cmp_u_f64_e64 s[4:5], v[12:13], v[14:15]
+; GFX7-NEXT: v_min_f64 v[12:13], v[8:9], v[10:11]
+; GFX7-NEXT: v_cmp_u_f64_e64 s[8:9], v[8:9], v[10:11]
+; GFX7-NEXT: v_mov_b32_e32 v34, 0x7ff80000
+; GFX7-NEXT: v_min_f64 v[6:7], v[16:17], v[18:19]
+; GFX7-NEXT: v_cmp_u_f64_e64 s[6:7], v[16:17], v[18:19]
+; GFX7-NEXT: v_min_f64 v[8:9], v[20:21], v[22:23]
+; GFX7-NEXT: v_cmp_u_f64_e64 s[10:11], v[20:21], v[22:23]
+; GFX7-NEXT: v_min_f64 v[10:11], v[0:1], v[2:3]
+; GFX7-NEXT: v_cmp_u_f64_e64 s[12:13], v[0:1], v[2:3]
+; GFX7-NEXT: v_min_f64 v[0:1], v[24:25], v[26:27]
+; GFX7-NEXT: v_cmp_u_f64_e64 s[14:15], v[24:25], v[26:27]
+; GFX7-NEXT: v_cndmask_b32_e64 v2, v12, 0, s[8:9]
+; GFX7-NEXT: v_cndmask_b32_e64 v3, v13, v34, s[8:9]
+; GFX7-NEXT: v_cndmask_b32_e64 v12, v32, 0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v13, v33, v34, vcc
+; GFX7-NEXT: v_cndmask_b32_e64 v4, v4, 0, s[4:5]
+; GFX7-NEXT: v_cndmask_b32_e64 v5, v5, v34, s[4:5]
+; GFX7-NEXT: v_cndmask_b32_e64 v6, v6, 0, s[6:7]
+; GFX7-NEXT: v_cndmask_b32_e64 v7, v7, v34, s[6:7]
+; GFX7-NEXT: v_cndmask_b32_e64 v8, v8, 0, s[10:11]
+; GFX7-NEXT: v_cndmask_b32_e64 v9, v9, v34, s[10:11]
+; GFX7-NEXT: v_cndmask_b32_e64 v10, v10, 0, s[12:13]
+; GFX7-NEXT: v_cndmask_b32_e64 v11, v11, v34, s[12:13]
+; GFX7-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[14:15]
+; GFX7-NEXT: v_cndmask_b32_e64 v1, v1, v34, s[14:15]
+; GFX7-NEXT: v_min_f64 v[18:19], v[2:3], v[4:5]
+; GFX7-NEXT: v_cmp_u_f64_e64 s[4:5], v[2:3], v[4:5]
+; GFX7-NEXT: v_min_f64 v[2:3], v[6:7], v[8:9]
+; GFX7-NEXT: v_cmp_u_f64_e64 s[8:9], v[6:7], v[8:9]
+; GFX7-NEXT: v_min_f64 v[14:15], v[10:11], v[12:13]
+; GFX7-NEXT: v_cmp_u_f64_e64 s[6:7], v[10:11], v[12:13]
+; GFX7-NEXT: v_cndmask_b32_e64 v2, v2, 0, s[8:9]
+; GFX7-NEXT: v_cndmask_b32_e64 v3, v3, v34, s[8:9]
+; GFX7-NEXT: v_cndmask_b32_e64 v4, v14, 0, s[6:7]
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_min_f64 v[16:17], v[28:29], v[30:31]
+; GFX7-NEXT: v_cmp_u_f64_e32 vcc, v[28:29], v[30:31]
+; GFX7-NEXT: v_cndmask_b32_e64 v5, v16, 0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v17, v34, vcc
+; GFX7-NEXT: v_min_f64 v[7:8], v[0:1], v[5:6]
+; GFX7-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[5:6]
+; GFX7-NEXT: v_cndmask_b32_e64 v5, v15, v34, s[6:7]
+; GFX7-NEXT: v_cndmask_b32_e64 v0, v18, 0, s[4:5]
+; GFX7-NEXT: v_cndmask_b32_e64 v1, v19, v34, s[4:5]
+; GFX7-NEXT: v_min_f64 v[9:10], v[4:5], v[0:1]
+; GFX7-NEXT: v_cndmask_b32_e64 v6, v7, 0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v8, v34, vcc
+; GFX7-NEXT: v_cmp_u_f64_e32 vcc, v[4:5], v[0:1]
+; GFX7-NEXT: v_min_f64 v[0:1], v[2:3], v[6:7]
+; GFX7-NEXT: v_cmp_u_f64_e64 s[4:5], v[2:3], v[6:7]
+; GFX7-NEXT: v_cndmask_b32_e64 v2, v9, 0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v10, v34, vcc
+; GFX7-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[4:5]
+; GFX7-NEXT: v_cndmask_b32_e64 v1, v1, v34, s[4:5]
+; GFX7-NEXT: v_min_f64 v[4:5], v[2:3], v[0:1]
+; GFX7-NEXT: v_cmp_u_f64_e32 vcc, v[2:3], v[0:1]
+; GFX7-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v5, v34, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fminimum_v16double:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX8-NEXT: v_min_f64 v[32:33], v[4:5], v[6:7]
+; GFX8-NEXT: v_cmp_u_f64_e32 vcc, v[4:5], v[6:7]
+; GFX8-NEXT: v_min_f64 v[4:5], v[12:13], v[14:15]
+; GFX8-NEXT: v_cmp_u_f64_e64 s[4:5], v[12:13], v[14:15]
+; GFX8-NEXT: v_min_f64 v[12:13], v[8:9], v[10:11]
+; GFX8-NEXT: v_cmp_u_f64_e64 s[8:9], v[8:9], v[10:11]
+; GFX8-NEXT: v_mov_b32_e32 v34, 0x7ff80000
+; GFX8-NEXT: v_min_f64 v[6:7], v[16:17], v[18:19]
+; GFX8-NEXT: v_cmp_u_f64_e64 s[6:7], v[16:17], v[18:19]
+; GFX8-NEXT: v_min_f64 v[8:9], v[20:21], v[22:23]
+; GFX8-NEXT: v_cmp_u_f64_e64 s[10:11], v[20:21], v[22:23]
+; GFX8-NEXT: v_min_f64 v[10:11], v[0:1], v[2:3]
+; GFX8-NEXT: v_cmp_u_f64_e64 s[12:13], v[0:1], v[2:3]
+; GFX8-NEXT: v_min_f64 v[0:1], v[24:25], v[26:27]
+; GFX8-NEXT: v_cmp_u_f64_e64 s[14:15], v[24:25], v[26:27]
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v12, 0, s[8:9]
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v13, v34, s[8:9]
+; GFX8-NEXT: v_cndmask_b32_e64 v12, v32, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v13, v33, v34, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v4, v4, 0, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v5, v5, v34, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v6, v6, 0, s[6:7]
+; GFX8-NEXT: v_cndmask_b32_e64 v7, v7, v34, s[6:7]
+; GFX8-NEXT: v_cndmask_b32_e64 v8, v8, 0, s[10:11]
+; GFX8-NEXT: v_cndmask_b32_e64 v9, v9, v34, s[10:11]
+; GFX8-NEXT: v_cndmask_b32_e64 v10, v10, 0, s[12:13]
+; GFX8-NEXT: v_cndmask_b32_e64 v11, v11, v34, s[12:13]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[14:15]
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, v34, s[14:15]
+; GFX8-NEXT: v_min_f64 v[18:19], v[2:3], v[4:5]
+; GFX8-NEXT: v_cmp_u_f64_e64 s[4:5], v[2:3], v[4:5]
+; GFX8-NEXT: v_min_f64 v[2:3], v[6:7], v[8:9]
+; GFX8-NEXT: v_cmp_u_f64_e64 s[8:9], v[6:7], v[8:9]
+; GFX8-NEXT: v_min_f64 v[14:15], v[10:11], v[12:13]
+; GFX8-NEXT: v_cmp_u_f64_e64 s[6:7], v[10:11], v[12:13]
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v2, 0, s[8:9]
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v3, v34, s[8:9]
+; GFX8-NEXT: v_cndmask_b32_e64 v4, v14, 0, s[6:7]
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_min_f64 v[16:17], v[28:29], v[30:31]
+; GFX8-NEXT: v_cmp_u_f64_e32 vcc, v[28:29], v[30:31]
+; GFX8-NEXT: v_cndmask_b32_e64 v5, v16, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v17, v34, vcc
+; GFX8-NEXT: v_min_f64 v[7:8], v[0:1], v[5:6]
+; GFX8-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[5:6]
+; GFX8-NEXT: v_cndmask_b32_e64 v5, v15, v34, s[6:7]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v18, 0, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v19, v34, s[4:5]
+; GFX8-NEXT: v_min_f64 v[9:10], v[4:5], v[0:1]
+; GFX8-NEXT: v_cndmask_b32_e64 v6, v7, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v8, v34, vcc
+; GFX8-NEXT: v_cmp_u_f64_e32 vcc, v[4:5], v[0:1]
+; GFX8-NEXT: v_min_f64 v[0:1], v[2:3], v[6:7]
+; GFX8-NEXT: v_cmp_u_f64_e64 s[4:5], v[2:3], v[6:7]
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v9, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v10, v34, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, v34, s[4:5]
+; GFX8-NEXT: v_min_f64 v[4:5], v[2:3], v[0:1]
+; GFX8-NEXT: v_cmp_u_f64_e32 vcc, v[2:3], v[0:1]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v34, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fminimum_v16double:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: scratch_load_dword v31, off, s32
+; GFX9-NEXT: v_mov_b32_e32 v54, 0x7ff80000
+; GFX9-NEXT: v_min_f64 v[32:33], v[0:1], v[2:3]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_min_f64 v[34:35], v[4:5], v[6:7]
+; GFX9-NEXT: v_min_f64 v[36:37], v[8:9], v[10:11]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v32, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v33, v54, vcc
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[4:5], v[6:7]
+; GFX9-NEXT: v_min_f64 v[38:39], v[12:13], v[14:15]
+; GFX9-NEXT: v_min_f64 v[48:49], v[16:17], v[18:19]
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v34, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v35, v54, vcc
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[8:9], v[10:11]
+; GFX9-NEXT: v_min_f64 v[50:51], v[20:21], v[22:23]
+; GFX9-NEXT: v_min_f64 v[52:53], v[24:25], v[26:27]
+; GFX9-NEXT: v_cndmask_b32_e64 v4, v36, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v37, v54, vcc
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[12:13], v[14:15]
+; GFX9-NEXT: v_min_f64 v[14:15], v[0:1], v[2:3]
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_u_f64_e64 s[0:1], v[28:29], v[30:31]
+; GFX9-NEXT: v_cndmask_b32_e64 v6, v38, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v7, v39, v54, vcc
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[16:17], v[18:19]
+; GFX9-NEXT: v_min_f64 v[16:17], v[4:5], v[6:7]
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_cndmask_b32_e64 v8, v48, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v9, v49, v54, vcc
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[20:21], v[22:23]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e64 v10, v50, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v11, v51, v54, vcc
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[24:25], v[26:27]
+; GFX9-NEXT: v_min_f64 v[18:19], v[8:9], v[10:11]
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_cndmask_b32_e64 v12, v52, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v13, v53, v54, vcc
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v14, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v15, v54, vcc
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[4:5], v[6:7]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v16, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v17, v54, vcc
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[8:9], v[10:11]
+; GFX9-NEXT: v_min_f64 v[6:7], v[0:1], v[2:3]
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_cndmask_b32_e64 v4, v18, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v19, v54, vcc
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_min_f64 v[2:3], v[28:29], v[30:31]
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, 0, s[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v3, v3, v54, s[0:1]
+; GFX9-NEXT: v_min_f64 v[8:9], v[12:13], v[2:3]
+; GFX9-NEXT: v_cmp_u_f64_e64 s[0:1], v[12:13], v[2:3]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v6, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v54, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v8, 0, s[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v3, v9, v54, s[0:1]
+; GFX9-NEXT: v_min_f64 v[6:7], v[4:5], v[2:3]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[4:5], v[2:3]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v6, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v54, vcc
+; GFX9-NEXT: v_min_f64 v[4:5], v[0:1], v[2:3]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v54, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fminimum_v16double:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX10-NEXT: v_min_f64 v[32:33], v[0:1], v[2:3]
+; GFX10-NEXT: v_cmp_u_f64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX10-NEXT: v_min_f64 v[0:1], v[4:5], v[6:7]
+; GFX10-NEXT: v_cmp_u_f64_e64 s4, v[4:5], v[6:7]
+; GFX10-NEXT: v_min_f64 v[2:3], v[8:9], v[10:11]
+; GFX10-NEXT: v_cmp_u_f64_e64 s5, v[8:9], v[10:11]
+; GFX10-NEXT: v_min_f64 v[4:5], v[12:13], v[14:15]
+; GFX10-NEXT: v_cmp_u_f64_e64 s6, v[12:13], v[14:15]
+; GFX10-NEXT: v_min_f64 v[6:7], v[16:17], v[18:19]
+; GFX10-NEXT: v_cmp_u_f64_e64 s7, v[16:17], v[18:19]
+; GFX10-NEXT: v_min_f64 v[8:9], v[20:21], v[22:23]
+; GFX10-NEXT: v_cmp_u_f64_e64 s8, v[20:21], v[22:23]
+; GFX10-NEXT: v_min_f64 v[10:11], v[24:25], v[26:27]
+; GFX10-NEXT: v_cmp_u_f64_e64 s9, v[24:25], v[26:27]
+; GFX10-NEXT: v_cndmask_b32_e64 v14, v32, 0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v15, v33, 0x7ff80000, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, 0, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, 0x7ff80000, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, 0, s5
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, 0x7ff80000, s5
+; GFX10-NEXT: v_cndmask_b32_e64 v4, v4, 0, s6
+; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, 0x7ff80000, s6
+; GFX10-NEXT: v_cndmask_b32_e64 v6, v6, 0, s7
+; GFX10-NEXT: v_cndmask_b32_e64 v7, v7, 0x7ff80000, s7
+; GFX10-NEXT: v_cndmask_b32_e64 v8, v8, 0, s8
+; GFX10-NEXT: v_cndmask_b32_e64 v9, v9, 0x7ff80000, s8
+; GFX10-NEXT: v_cndmask_b32_e64 v10, v10, 0, s9
+; GFX10-NEXT: v_cndmask_b32_e64 v11, v11, 0x7ff80000, s9
+; GFX10-NEXT: v_min_f64 v[16:17], v[14:15], v[0:1]
+; GFX10-NEXT: v_cmp_u_f64_e32 vcc_lo, v[14:15], v[0:1]
+; GFX10-NEXT: v_min_f64 v[0:1], v[2:3], v[4:5]
+; GFX10-NEXT: v_cmp_u_f64_e64 s4, v[2:3], v[4:5]
+; GFX10-NEXT: v_min_f64 v[2:3], v[6:7], v[8:9]
+; GFX10-NEXT: v_cmp_u_f64_e64 s5, v[6:7], v[8:9]
+; GFX10-NEXT: v_cndmask_b32_e64 v6, v16, 0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v7, v17, 0x7ff80000, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, 0, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, 0x7ff80000, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, 0, s5
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, 0x7ff80000, s5
+; GFX10-NEXT: v_min_f64 v[8:9], v[6:7], v[0:1]
+; GFX10-NEXT: v_cmp_u_f64_e32 vcc_lo, v[6:7], v[0:1]
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_min_f64 v[12:13], v[28:29], v[30:31]
+; GFX10-NEXT: v_cmp_u_f64_e64 s10, v[28:29], v[30:31]
+; GFX10-NEXT: v_cndmask_b32_e64 v12, v12, 0, s10
+; GFX10-NEXT: v_cndmask_b32_e64 v13, v13, 0x7ff80000, s10
+; GFX10-NEXT: v_min_f64 v[4:5], v[10:11], v[12:13]
+; GFX10-NEXT: v_cmp_u_f64_e64 s6, v[10:11], v[12:13]
+; GFX10-NEXT: v_cndmask_b32_e64 v4, v4, 0, s6
+; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, 0x7ff80000, s6
+; GFX10-NEXT: v_min_f64 v[0:1], v[2:3], v[4:5]
+; GFX10-NEXT: v_cmp_u_f64_e64 s4, v[2:3], v[4:5]
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v8, 0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v9, 0x7ff80000, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, 0, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, 0x7ff80000, s4
+; GFX10-NEXT: v_min_f64 v[4:5], v[2:3], v[0:1]
+; GFX10-NEXT: v_cmp_u_f64_e32 vcc_lo, v[2:3], v[0:1]
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v5, 0x7ff80000, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fminimum_v16double:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: scratch_load_b32 v31, off, s32
+; GFX11-NEXT: v_min_f64 v[32:33], v[0:1], v[2:3]
+; GFX11-NEXT: v_cmp_u_f64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX11-NEXT: v_min_f64 v[0:1], v[4:5], v[6:7]
+; GFX11-NEXT: v_cmp_u_f64_e64 s0, v[4:5], v[6:7]
+; GFX11-NEXT: v_min_f64 v[2:3], v[8:9], v[10:11]
+; GFX11-NEXT: v_cmp_u_f64_e64 s1, v[8:9], v[10:11]
+; GFX11-NEXT: v_min_f64 v[4:5], v[12:13], v[14:15]
+; GFX11-NEXT: v_cmp_u_f64_e64 s2, v[12:13], v[14:15]
+; GFX11-NEXT: v_min_f64 v[6:7], v[16:17], v[18:19]
+; GFX11-NEXT: v_cmp_u_f64_e64 s3, v[16:17], v[18:19]
+; GFX11-NEXT: v_min_f64 v[8:9], v[20:21], v[22:23]
+; GFX11-NEXT: v_cmp_u_f64_e64 s4, v[20:21], v[22:23]
+; GFX11-NEXT: v_min_f64 v[10:11], v[24:25], v[26:27]
+; GFX11-NEXT: v_cmp_u_f64_e64 s5, v[24:25], v[26:27]
+; GFX11-NEXT: v_cndmask_b32_e64 v14, v32, 0, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v15, v33, 0x7ff80000, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, 0, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, 0x7ff80000, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, 0, s1
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v3, 0x7ff80000, s1
+; GFX11-NEXT: v_cndmask_b32_e64 v4, v4, 0, s2
+; GFX11-NEXT: v_cndmask_b32_e64 v5, v5, 0x7ff80000, s2
+; GFX11-NEXT: v_cndmask_b32_e64 v6, v6, 0, s3
+; GFX11-NEXT: v_cndmask_b32_e64 v7, v7, 0x7ff80000, s3
+; GFX11-NEXT: v_cndmask_b32_e64 v8, v8, 0, s4
+; GFX11-NEXT: v_cndmask_b32_e64 v9, v9, 0x7ff80000, s4
+; GFX11-NEXT: v_cndmask_b32_e64 v10, v10, 0, s5
+; GFX11-NEXT: v_cndmask_b32_e64 v11, v11, 0x7ff80000, s5
+; GFX11-NEXT: v_min_f64 v[16:17], v[14:15], v[0:1]
+; GFX11-NEXT: v_cmp_u_f64_e32 vcc_lo, v[14:15], v[0:1]
+; GFX11-NEXT: v_min_f64 v[0:1], v[2:3], v[4:5]
+; GFX11-NEXT: v_cmp_u_f64_e64 s0, v[2:3], v[4:5]
+; GFX11-NEXT: v_min_f64 v[2:3], v[6:7], v[8:9]
+; GFX11-NEXT: v_cmp_u_f64_e64 s1, v[6:7], v[8:9]
+; GFX11-NEXT: v_cndmask_b32_e64 v6, v16, 0, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v7, v17, 0x7ff80000, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, 0, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, 0x7ff80000, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, 0, s1
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v3, 0x7ff80000, s1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_min_f64 v[8:9], v[6:7], v[0:1]
+; GFX11-NEXT: v_cmp_u_f64_e32 vcc_lo, v[6:7], v[0:1]
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_min_f64 v[12:13], v[28:29], v[30:31]
+; GFX11-NEXT: v_cmp_u_f64_e64 s6, v[28:29], v[30:31]
+; GFX11-NEXT: v_cndmask_b32_e64 v12, v12, 0, s6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_cndmask_b32_e64 v13, v13, 0x7ff80000, s6
+; GFX11-NEXT: v_min_f64 v[4:5], v[10:11], v[12:13]
+; GFX11-NEXT: v_cmp_u_f64_e64 s2, v[10:11], v[12:13]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_cndmask_b32_e64 v4, v4, 0, s2
+; GFX11-NEXT: v_cndmask_b32_e64 v5, v5, 0x7ff80000, s2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_min_f64 v[0:1], v[2:3], v[4:5]
+; GFX11-NEXT: v_cmp_u_f64_e64 s0, v[2:3], v[4:5]
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v8, 0, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v9, 0x7ff80000, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, 0, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, 0x7ff80000, s0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_min_f64 v[4:5], v[2:3], v[0:1]
+; GFX11-NEXT: v_cmp_u_f64_e32 vcc_lo, v[2:3], v[0:1]
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, 0x7ff80000, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fminimum_v16double:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: scratch_load_b32 v31, off, s32
+; GFX12-NEXT: v_minimum_f64 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: v_minimum_f64 v[2:3], v[4:5], v[6:7]
+; GFX12-NEXT: v_minimum_f64 v[4:5], v[8:9], v[10:11]
+; GFX12-NEXT: v_minimum_f64 v[6:7], v[12:13], v[14:15]
+; GFX12-NEXT: v_minimum_f64 v[8:9], v[16:17], v[18:19]
+; GFX12-NEXT: v_minimum_f64 v[10:11], v[20:21], v[22:23]
+; GFX12-NEXT: v_minimum_f64 v[12:13], v[24:25], v[26:27]
+; GFX12-NEXT: v_minimum_f64 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: v_minimum_f64 v[2:3], v[4:5], v[6:7]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_minimum_f64 v[4:5], v[8:9], v[10:11]
+; GFX12-NEXT: v_minimum_f64 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_minimum_f64 v[14:15], v[28:29], v[30:31]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_minimum_f64 v[6:7], v[12:13], v[14:15]
+; GFX12-NEXT: v_minimum_f64 v[2:3], v[4:5], v[6:7]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_minimum_f64 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call double @llvm.vector.reduce.fminimum.v16double(<16 x double> %v)
+ ret double %res
+}
+
+declare half @llvm.vector.reduce.fminimum.v2half(<2 x half>)
+declare half @llvm.vector.reduce.fminimum.v3half(<3 x half>)
+declare half @llvm.vector.reduce.fminimum.v4half(<4 x half>)
+declare half @llvm.vector.reduce.fminimum.v8half(<8 x half>)
+declare half @llvm.vector.reduce.fminimum.v16half(<16 x half>)
+declare float @llvm.vector.reduce.fminimum.v2float(<2 x float>)
+declare float @llvm.vector.reduce.fminimum.v3float(<3 x float>)
+declare float @llvm.vector.reduce.fminimum.v4float(<4 x float>)
+declare float @llvm.vector.reduce.fminimum.v8float(<8 x float>)
+declare float @llvm.vector.reduce.fminimum.v16float(<16 x float>)
+declare double @llvm.vector.reduce.fminimum.v2double(<2 x double>)
+declare double @llvm.vector.reduce.fminimum.v3double(<3 x double>)
+declare double @llvm.vector.reduce.fminimum.v4double(<4 x double>)
+declare double @llvm.vector.reduce.fminimum.v8double(<8 x double>)
+declare double @llvm.vector.reduce.fminimum.v16double(<16 x double>)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/fmul.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/fmul.ll
new file mode 100644
index 0000000000000..91281d9dc2c3d
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/fmul.ll
@@ -0,0 +1,1461 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx700 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX7 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx801 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX8 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx942 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX9 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX10 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX12 %s
+
+define half @test_vector_reduce_fmul_v2half(half %sp, <2 x half> %v) {
+; GFX7-LABEL: test_vector_reduce_fmul_v2half:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v2
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmul_v2half:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX8-NEXT: v_mul_f16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmul_v2half:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX9-NEXT: v_mul_f16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmul_v2half:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX10-NEXT: v_mul_f16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmul_v2half:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmul_v2half:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call half @llvm.vector.reduce.fmul.v2half(half %sp, <2 x half> %v)
+ ret half %res
+}
+
+define half @test_vector_reduce_fmul_v3half(half %sp, <3 x half> %v) {
+; GFX7-LABEL: test_vector_reduce_fmul_v3half:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v2
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v3
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmul_v3half:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX8-NEXT: v_mul_f16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_mul_f16_e32 v0, v0, v2
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmul_v3half:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX9-NEXT: v_mul_f16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_mul_f16_e32 v0, v0, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmul_v3half:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX10-NEXT: v_mul_f16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_mul_f16_e32 v0, v0, v2
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmul_v3half:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX11-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mul_f16_e32 v0, v0, v3
+; GFX11-NEXT: v_mul_f16_e32 v0, v0, v2
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmul_v3half:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX12-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_mul_f16_e32 v0, v0, v3
+; GFX12-NEXT: v_mul_f16_e32 v0, v0, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call half @llvm.vector.reduce.fmul.v3half(half %sp, <3 x half> %v)
+ ret half %res
+}
+
+define half @test_vector_reduce_fmul_v4half(half %sp, <4 x half> %v) {
+; GFX7-LABEL: test_vector_reduce_fmul_v4half:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v2
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v3
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmul_v4half:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX8-NEXT: v_mul_f16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_mul_f16_e32 v0, v0, v2
+; GFX8-NEXT: v_mul_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmul_v4half:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX9-NEXT: v_mul_f16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_mul_f16_e32 v0, v0, v2
+; GFX9-NEXT: v_mul_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmul_v4half:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX10-NEXT: v_mul_f16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_mul_f16_e32 v0, v0, v2
+; GFX10-NEXT: v_mul_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmul_v4half:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-NEXT: v_mul_f16_e32 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmul_v4half:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX12-NEXT: v_mul_f16_e32 v0, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call half @llvm.vector.reduce.fmul.v4half(half %sp, <4 x half> %v)
+ ret half %res
+}
+
+define half @test_vector_reduce_fmul_v8half(half %sp, <8 x half> %v) {
+; GFX7-LABEL: test_vector_reduce_fmul_v8half:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v2
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v3
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v5
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v6
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v7
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v8
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmul_v8half:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX8-NEXT: v_mul_f16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_mul_f16_e32 v0, v0, v2
+; GFX8-NEXT: v_mul_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_mul_f16_e32 v0, v0, v3
+; GFX8-NEXT: v_mul_f16_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_mul_f16_e32 v0, v0, v4
+; GFX8-NEXT: v_mul_f16_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmul_v8half:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX9-NEXT: v_mul_f16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_mul_f16_e32 v0, v0, v2
+; GFX9-NEXT: v_mul_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_mul_f16_e32 v0, v0, v3
+; GFX9-NEXT: v_mul_f16_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_mul_f16_e32 v0, v0, v4
+; GFX9-NEXT: v_mul_f16_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmul_v8half:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX10-NEXT: v_mul_f16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_mul_f16_e32 v0, v0, v2
+; GFX10-NEXT: v_mul_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_mul_f16_e32 v0, v0, v3
+; GFX10-NEXT: v_mul_f16_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_mul_f16_e32 v0, v0, v4
+; GFX10-NEXT: v_mul_f16_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmul_v8half:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-NEXT: v_mul_f16_e32 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v3
+; GFX11-NEXT: v_mul_f16_e32 v0, v0, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v4
+; GFX11-NEXT: v_mul_f16_e32 v0, v0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmul_v8half:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX12-NEXT: v_mul_f16_e32 v0, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v3
+; GFX12-NEXT: v_mul_f16_e32 v0, v0, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v4
+; GFX12-NEXT: v_mul_f16_e32 v0, v0, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call half @llvm.vector.reduce.fmul.v8half(half %sp, <8 x half> %v)
+ ret half %res
+}
+
+define half @test_vector_reduce_fmul_v16half(half %sp, <16 x half> %v) {
+; GFX7-LABEL: test_vector_reduce_fmul_v16half:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v2
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v3
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v5
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v6
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v7
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v8
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v9
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v10
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v11
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v12
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v13
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v14
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v15
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v16
+; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmul_v16half:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX8-NEXT: v_mul_f16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_mul_f16_e32 v0, v0, v2
+; GFX8-NEXT: v_mul_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_mul_f16_e32 v0, v0, v3
+; GFX8-NEXT: v_mul_f16_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_mul_f16_e32 v0, v0, v4
+; GFX8-NEXT: v_mul_f16_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_mul_f16_e32 v0, v0, v5
+; GFX8-NEXT: v_mul_f16_sdwa v0, v0, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_mul_f16_e32 v0, v0, v6
+; GFX8-NEXT: v_mul_f16_sdwa v0, v0, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_mul_f16_e32 v0, v0, v7
+; GFX8-NEXT: v_mul_f16_sdwa v0, v0, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_mul_f16_e32 v0, v0, v8
+; GFX8-NEXT: v_mul_f16_sdwa v0, v0, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmul_v16half:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX9-NEXT: v_mul_f16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_mul_f16_e32 v0, v0, v2
+; GFX9-NEXT: v_mul_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_mul_f16_e32 v0, v0, v3
+; GFX9-NEXT: v_mul_f16_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_mul_f16_e32 v0, v0, v4
+; GFX9-NEXT: v_mul_f16_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_mul_f16_e32 v0, v0, v5
+; GFX9-NEXT: v_mul_f16_sdwa v0, v0, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_mul_f16_e32 v0, v0, v6
+; GFX9-NEXT: v_mul_f16_sdwa v0, v0, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_mul_f16_e32 v0, v0, v7
+; GFX9-NEXT: v_mul_f16_sdwa v0, v0, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_mul_f16_e32 v0, v0, v8
+; GFX9-NEXT: v_mul_f16_sdwa v0, v0, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmul_v16half:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX10-NEXT: v_mul_f16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_mul_f16_e32 v0, v0, v2
+; GFX10-NEXT: v_mul_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_mul_f16_e32 v0, v0, v3
+; GFX10-NEXT: v_mul_f16_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_mul_f16_e32 v0, v0, v4
+; GFX10-NEXT: v_mul_f16_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_mul_f16_e32 v0, v0, v5
+; GFX10-NEXT: v_mul_f16_sdwa v0, v0, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_mul_f16_e32 v0, v0, v6
+; GFX10-NEXT: v_mul_f16_sdwa v0, v0, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_mul_f16_e32 v0, v0, v7
+; GFX10-NEXT: v_mul_f16_sdwa v0, v0, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_mul_f16_e32 v0, v0, v8
+; GFX10-NEXT: v_mul_f16_sdwa v0, v0, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmul_v16half:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX11-NEXT: v_mul_f16_e32 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v3
+; GFX11-NEXT: v_mul_f16_e32 v0, v0, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v4
+; GFX11-NEXT: v_mul_f16_e32 v0, v0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v5
+; GFX11-NEXT: v_mul_f16_e32 v0, v0, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v6
+; GFX11-NEXT: v_mul_f16_e32 v0, v0, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v7
+; GFX11-NEXT: v_mul_f16_e32 v0, v0, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v8
+; GFX11-NEXT: v_mul_f16_e32 v0, v0, v8
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmul_v16half:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v2
+; GFX12-NEXT: v_mul_f16_e32 v0, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v3
+; GFX12-NEXT: v_mul_f16_e32 v0, v0, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v4
+; GFX12-NEXT: v_mul_f16_e32 v0, v0, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v5
+; GFX12-NEXT: v_mul_f16_e32 v0, v0, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v6
+; GFX12-NEXT: v_mul_f16_e32 v0, v0, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v7
+; GFX12-NEXT: v_mul_f16_e32 v0, v0, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v8
+; GFX12-NEXT: v_mul_f16_e32 v0, v0, v8
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call half @llvm.vector.reduce.fmul.v16half(half %sp, <16 x half> %v)
+ ret half %res
+}
+
+define float @test_vector_reduce_fmul_v2float(float %sp, <2 x float> %v) {
+; GFX7-LABEL: test_vector_reduce_fmul_v2float:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v2
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmul_v2float:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX8-NEXT: v_mul_f32_e32 v0, v0, v2
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmul_v2float:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmul_v2float:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX10-NEXT: v_mul_f32_e32 v0, v0, v2
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmul_v2float:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mul_f32_e32 v0, v0, v2
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmul_v2float:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_mul_f32_e32 v0, v0, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call float @llvm.vector.reduce.fmul.v2float(float %sp, <2 x float> %v)
+ ret float %res
+}
+
+define float @test_vector_reduce_fmul_v3float(float %sp, <3 x float> %v) {
+; GFX7-LABEL: test_vector_reduce_fmul_v3float:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v2
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v3
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmul_v3float:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX8-NEXT: v_mul_f32_e32 v0, v0, v2
+; GFX8-NEXT: v_mul_f32_e32 v0, v0, v3
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmul_v3float:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v2
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v3
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmul_v3float:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX10-NEXT: v_mul_f32_e32 v0, v0, v2
+; GFX10-NEXT: v_mul_f32_e32 v0, v0, v3
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmul_v3float:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mul_f32_e32 v0, v0, v2
+; GFX11-NEXT: v_mul_f32_e32 v0, v0, v3
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmul_v3float:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_mul_f32_e32 v0, v0, v2
+; GFX12-NEXT: v_mul_f32_e32 v0, v0, v3
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call float @llvm.vector.reduce.fmul.v3float(float %sp, <3 x float> %v)
+ ret float %res
+}
+
+define float @test_vector_reduce_fmul_v4float(float %sp, <4 x float> %v) {
+; GFX7-LABEL: test_vector_reduce_fmul_v4float:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v2
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v3
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v4
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmul_v4float:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX8-NEXT: v_mul_f32_e32 v0, v0, v2
+; GFX8-NEXT: v_mul_f32_e32 v0, v0, v3
+; GFX8-NEXT: v_mul_f32_e32 v0, v0, v4
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmul_v4float:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v2
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v3
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmul_v4float:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX10-NEXT: v_mul_f32_e32 v0, v0, v2
+; GFX10-NEXT: v_mul_f32_e32 v0, v0, v3
+; GFX10-NEXT: v_mul_f32_e32 v0, v0, v4
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmul_v4float:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mul_f32_e32 v0, v0, v2
+; GFX11-NEXT: v_mul_f32_e32 v0, v0, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mul_f32_e32 v0, v0, v4
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmul_v4float:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_mul_f32_e32 v0, v0, v2
+; GFX12-NEXT: v_mul_f32_e32 v0, v0, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_mul_f32_e32 v0, v0, v4
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call float @llvm.vector.reduce.fmul.v4float(float %sp, <4 x float> %v)
+ ret float %res
+}
+
+define float @test_vector_reduce_fmul_v8float(float %sp, <8 x float> %v) {
+; GFX7-LABEL: test_vector_reduce_fmul_v8float:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v2
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v3
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v4
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v5
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v6
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v7
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v8
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmul_v8float:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX8-NEXT: v_mul_f32_e32 v0, v0, v2
+; GFX8-NEXT: v_mul_f32_e32 v0, v0, v3
+; GFX8-NEXT: v_mul_f32_e32 v0, v0, v4
+; GFX8-NEXT: v_mul_f32_e32 v0, v0, v5
+; GFX8-NEXT: v_mul_f32_e32 v0, v0, v6
+; GFX8-NEXT: v_mul_f32_e32 v0, v0, v7
+; GFX8-NEXT: v_mul_f32_e32 v0, v0, v8
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmul_v8float:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v2
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v3
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v4
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v5
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v6
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v7
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v8
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmul_v8float:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX10-NEXT: v_mul_f32_e32 v0, v0, v2
+; GFX10-NEXT: v_mul_f32_e32 v0, v0, v3
+; GFX10-NEXT: v_mul_f32_e32 v0, v0, v4
+; GFX10-NEXT: v_mul_f32_e32 v0, v0, v5
+; GFX10-NEXT: v_mul_f32_e32 v0, v0, v6
+; GFX10-NEXT: v_mul_f32_e32 v0, v0, v7
+; GFX10-NEXT: v_mul_f32_e32 v0, v0, v8
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmul_v8float:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mul_f32_e32 v0, v0, v2
+; GFX11-NEXT: v_mul_f32_e32 v0, v0, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mul_f32_e32 v0, v0, v4
+; GFX11-NEXT: v_mul_f32_e32 v0, v0, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mul_f32_e32 v0, v0, v6
+; GFX11-NEXT: v_mul_f32_e32 v0, v0, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mul_f32_e32 v0, v0, v8
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmul_v8float:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_mul_f32_e32 v0, v0, v2
+; GFX12-NEXT: v_mul_f32_e32 v0, v0, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_mul_f32_e32 v0, v0, v4
+; GFX12-NEXT: v_mul_f32_e32 v0, v0, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_mul_f32_e32 v0, v0, v6
+; GFX12-NEXT: v_mul_f32_e32 v0, v0, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_mul_f32_e32 v0, v0, v8
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call float @llvm.vector.reduce.fmul.v8float(float %sp, <8 x float> %v)
+ ret float %res
+}
+
+define float @test_vector_reduce_fmul_v16float(float %sp, <16 x float> %v) {
+; GFX7-LABEL: test_vector_reduce_fmul_v16float:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v2
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v3
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v4
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v5
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v6
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v7
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v8
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v9
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v10
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v11
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v12
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v13
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v14
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v15
+; GFX7-NEXT: v_mul_f32_e32 v0, v0, v16
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmul_v16float:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX8-NEXT: v_mul_f32_e32 v0, v0, v2
+; GFX8-NEXT: v_mul_f32_e32 v0, v0, v3
+; GFX8-NEXT: v_mul_f32_e32 v0, v0, v4
+; GFX8-NEXT: v_mul_f32_e32 v0, v0, v5
+; GFX8-NEXT: v_mul_f32_e32 v0, v0, v6
+; GFX8-NEXT: v_mul_f32_e32 v0, v0, v7
+; GFX8-NEXT: v_mul_f32_e32 v0, v0, v8
+; GFX8-NEXT: v_mul_f32_e32 v0, v0, v9
+; GFX8-NEXT: v_mul_f32_e32 v0, v0, v10
+; GFX8-NEXT: v_mul_f32_e32 v0, v0, v11
+; GFX8-NEXT: v_mul_f32_e32 v0, v0, v12
+; GFX8-NEXT: v_mul_f32_e32 v0, v0, v13
+; GFX8-NEXT: v_mul_f32_e32 v0, v0, v14
+; GFX8-NEXT: v_mul_f32_e32 v0, v0, v15
+; GFX8-NEXT: v_mul_f32_e32 v0, v0, v16
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmul_v16float:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v2
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v3
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v4
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v5
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v6
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v7
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v8
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v9
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v10
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v11
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v12
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v13
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v14
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v15
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v16
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmul_v16float:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX10-NEXT: v_mul_f32_e32 v0, v0, v2
+; GFX10-NEXT: v_mul_f32_e32 v0, v0, v3
+; GFX10-NEXT: v_mul_f32_e32 v0, v0, v4
+; GFX10-NEXT: v_mul_f32_e32 v0, v0, v5
+; GFX10-NEXT: v_mul_f32_e32 v0, v0, v6
+; GFX10-NEXT: v_mul_f32_e32 v0, v0, v7
+; GFX10-NEXT: v_mul_f32_e32 v0, v0, v8
+; GFX10-NEXT: v_mul_f32_e32 v0, v0, v9
+; GFX10-NEXT: v_mul_f32_e32 v0, v0, v10
+; GFX10-NEXT: v_mul_f32_e32 v0, v0, v11
+; GFX10-NEXT: v_mul_f32_e32 v0, v0, v12
+; GFX10-NEXT: v_mul_f32_e32 v0, v0, v13
+; GFX10-NEXT: v_mul_f32_e32 v0, v0, v14
+; GFX10-NEXT: v_mul_f32_e32 v0, v0, v15
+; GFX10-NEXT: v_mul_f32_e32 v0, v0, v16
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmul_v16float:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mul_f32_e32 v0, v0, v2
+; GFX11-NEXT: v_mul_f32_e32 v0, v0, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mul_f32_e32 v0, v0, v4
+; GFX11-NEXT: v_mul_f32_e32 v0, v0, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mul_f32_e32 v0, v0, v6
+; GFX11-NEXT: v_mul_f32_e32 v0, v0, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mul_f32_e32 v0, v0, v8
+; GFX11-NEXT: v_mul_f32_e32 v0, v0, v9
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mul_f32_e32 v0, v0, v10
+; GFX11-NEXT: v_mul_f32_e32 v0, v0, v11
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mul_f32_e32 v0, v0, v12
+; GFX11-NEXT: v_mul_f32_e32 v0, v0, v13
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mul_f32_e32 v0, v0, v14
+; GFX11-NEXT: v_mul_f32_e32 v0, v0, v15
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mul_f32_e32 v0, v0, v16
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmul_v16float:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_mul_f32_e32 v0, v0, v2
+; GFX12-NEXT: v_mul_f32_e32 v0, v0, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_mul_f32_e32 v0, v0, v4
+; GFX12-NEXT: v_mul_f32_e32 v0, v0, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_mul_f32_e32 v0, v0, v6
+; GFX12-NEXT: v_mul_f32_e32 v0, v0, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_mul_f32_e32 v0, v0, v8
+; GFX12-NEXT: v_mul_f32_e32 v0, v0, v9
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_mul_f32_e32 v0, v0, v10
+; GFX12-NEXT: v_mul_f32_e32 v0, v0, v11
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_mul_f32_e32 v0, v0, v12
+; GFX12-NEXT: v_mul_f32_e32 v0, v0, v13
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_mul_f32_e32 v0, v0, v14
+; GFX12-NEXT: v_mul_f32_e32 v0, v0, v15
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_mul_f32_e32 v0, v0, v16
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call float @llvm.vector.reduce.fmul.v16float(float %sp, <16 x float> %v)
+ ret float %res
+}
+
+
+define double @test_vector_reduce_fmul_v2double(double %sp, <2 x double> %v) {
+; GFX7-LABEL: test_vector_reduce_fmul_v2double:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3]
+; GFX7-NEXT: v_mul_f64 v[0:1], v[0:1], v[4:5]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmul_v2double:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3]
+; GFX8-NEXT: v_mul_f64 v[0:1], v[0:1], v[4:5]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmul_v2double:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3]
+; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[4:5]
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmul_v2double:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3]
+; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[4:5]
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmul_v2double:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mul_f64 v[0:1], v[0:1], v[4:5]
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmul_v2double:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mul_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_mul_f64_e32 v[0:1], v[0:1], v[4:5]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call double @llvm.vector.reduce.fmul.v2double(double %sp, <2 x double> %v)
+ ret double %res
+}
+
+define double @test_vector_reduce_fmul_v3double(double %sp, <3 x double> %v) {
+; GFX7-LABEL: test_vector_reduce_fmul_v3double:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3]
+; GFX7-NEXT: v_mul_f64 v[0:1], v[0:1], v[4:5]
+; GFX7-NEXT: v_mul_f64 v[0:1], v[0:1], v[6:7]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmul_v3double:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3]
+; GFX8-NEXT: v_mul_f64 v[0:1], v[0:1], v[4:5]
+; GFX8-NEXT: v_mul_f64 v[0:1], v[0:1], v[6:7]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmul_v3double:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3]
+; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[4:5]
+; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[6:7]
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmul_v3double:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3]
+; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[4:5]
+; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[6:7]
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmul_v3double:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mul_f64 v[0:1], v[0:1], v[4:5]
+; GFX11-NEXT: v_mul_f64 v[0:1], v[0:1], v[6:7]
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmul_v3double:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mul_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_mul_f64_e32 v[0:1], v[0:1], v[4:5]
+; GFX12-NEXT: v_mul_f64_e32 v[0:1], v[0:1], v[6:7]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call double @llvm.vector.reduce.fmul.v3double(double %sp, <3 x double> %v)
+ ret double %res
+}
+
+define double @test_vector_reduce_fmul_v4double(double %sp, <4 x double> %v) {
+; GFX7-LABEL: test_vector_reduce_fmul_v4double:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3]
+; GFX7-NEXT: v_mul_f64 v[0:1], v[0:1], v[4:5]
+; GFX7-NEXT: v_mul_f64 v[0:1], v[0:1], v[6:7]
+; GFX7-NEXT: v_mul_f64 v[0:1], v[0:1], v[8:9]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmul_v4double:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3]
+; GFX8-NEXT: v_mul_f64 v[0:1], v[0:1], v[4:5]
+; GFX8-NEXT: v_mul_f64 v[0:1], v[0:1], v[6:7]
+; GFX8-NEXT: v_mul_f64 v[0:1], v[0:1], v[8:9]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmul_v4double:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3]
+; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[4:5]
+; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[6:7]
+; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[8:9]
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmul_v4double:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3]
+; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[4:5]
+; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[6:7]
+; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[8:9]
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmul_v4double:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mul_f64 v[0:1], v[0:1], v[4:5]
+; GFX11-NEXT: v_mul_f64 v[0:1], v[0:1], v[6:7]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mul_f64 v[0:1], v[0:1], v[8:9]
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmul_v4double:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mul_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_mul_f64_e32 v[0:1], v[0:1], v[4:5]
+; GFX12-NEXT: v_mul_f64_e32 v[0:1], v[0:1], v[6:7]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_mul_f64_e32 v[0:1], v[0:1], v[8:9]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call double @llvm.vector.reduce.fmul.v4double(double %sp, <4 x double> %v)
+ ret double %res
+}
+
+define double @test_vector_reduce_fmul_v8double(double %sp, <8 x double> %v) {
+; GFX7-LABEL: test_vector_reduce_fmul_v8double:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3]
+; GFX7-NEXT: v_mul_f64 v[0:1], v[0:1], v[4:5]
+; GFX7-NEXT: v_mul_f64 v[0:1], v[0:1], v[6:7]
+; GFX7-NEXT: v_mul_f64 v[0:1], v[0:1], v[8:9]
+; GFX7-NEXT: v_mul_f64 v[0:1], v[0:1], v[10:11]
+; GFX7-NEXT: v_mul_f64 v[0:1], v[0:1], v[12:13]
+; GFX7-NEXT: v_mul_f64 v[0:1], v[0:1], v[14:15]
+; GFX7-NEXT: v_mul_f64 v[0:1], v[0:1], v[16:17]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmul_v8double:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3]
+; GFX8-NEXT: v_mul_f64 v[0:1], v[0:1], v[4:5]
+; GFX8-NEXT: v_mul_f64 v[0:1], v[0:1], v[6:7]
+; GFX8-NEXT: v_mul_f64 v[0:1], v[0:1], v[8:9]
+; GFX8-NEXT: v_mul_f64 v[0:1], v[0:1], v[10:11]
+; GFX8-NEXT: v_mul_f64 v[0:1], v[0:1], v[12:13]
+; GFX8-NEXT: v_mul_f64 v[0:1], v[0:1], v[14:15]
+; GFX8-NEXT: v_mul_f64 v[0:1], v[0:1], v[16:17]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmul_v8double:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3]
+; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[4:5]
+; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[6:7]
+; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[8:9]
+; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[10:11]
+; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[12:13]
+; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[14:15]
+; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[16:17]
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmul_v8double:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3]
+; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[4:5]
+; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[6:7]
+; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[8:9]
+; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[10:11]
+; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[12:13]
+; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[14:15]
+; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[16:17]
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmul_v8double:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mul_f64 v[0:1], v[0:1], v[4:5]
+; GFX11-NEXT: v_mul_f64 v[0:1], v[0:1], v[6:7]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mul_f64 v[0:1], v[0:1], v[8:9]
+; GFX11-NEXT: v_mul_f64 v[0:1], v[0:1], v[10:11]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mul_f64 v[0:1], v[0:1], v[12:13]
+; GFX11-NEXT: v_mul_f64 v[0:1], v[0:1], v[14:15]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mul_f64 v[0:1], v[0:1], v[16:17]
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmul_v8double:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mul_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_mul_f64_e32 v[0:1], v[0:1], v[4:5]
+; GFX12-NEXT: v_mul_f64_e32 v[0:1], v[0:1], v[6:7]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_mul_f64_e32 v[0:1], v[0:1], v[8:9]
+; GFX12-NEXT: v_mul_f64_e32 v[0:1], v[0:1], v[10:11]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_mul_f64_e32 v[0:1], v[0:1], v[12:13]
+; GFX12-NEXT: v_mul_f64_e32 v[0:1], v[0:1], v[14:15]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_mul_f64_e32 v[0:1], v[0:1], v[16:17]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call double @llvm.vector.reduce.fmul.v8double(double %sp, <8 x double> %v)
+ ret double %res
+}
+
+define double @test_vector_reduce_fmul_v16double(double %sp, <16 x double> %v) {
+; GFX7-LABEL: test_vector_reduce_fmul_v16double:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3]
+; GFX7-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX7-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:4
+; GFX7-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:8
+; GFX7-NEXT: v_mul_f64 v[0:1], v[0:1], v[4:5]
+; GFX7-NEXT: v_mul_f64 v[0:1], v[0:1], v[6:7]
+; GFX7-NEXT: v_mul_f64 v[0:1], v[0:1], v[8:9]
+; GFX7-NEXT: v_mul_f64 v[0:1], v[0:1], v[10:11]
+; GFX7-NEXT: v_mul_f64 v[0:1], v[0:1], v[12:13]
+; GFX7-NEXT: v_mul_f64 v[0:1], v[0:1], v[14:15]
+; GFX7-NEXT: v_mul_f64 v[0:1], v[0:1], v[16:17]
+; GFX7-NEXT: v_mul_f64 v[0:1], v[0:1], v[18:19]
+; GFX7-NEXT: v_mul_f64 v[0:1], v[0:1], v[20:21]
+; GFX7-NEXT: v_mul_f64 v[0:1], v[0:1], v[22:23]
+; GFX7-NEXT: v_mul_f64 v[0:1], v[0:1], v[24:25]
+; GFX7-NEXT: v_mul_f64 v[0:1], v[0:1], v[26:27]
+; GFX7-NEXT: v_mul_f64 v[0:1], v[0:1], v[28:29]
+; GFX7-NEXT: s_waitcnt vmcnt(2)
+; GFX7-NEXT: v_mul_f64 v[0:1], v[0:1], v[30:31]
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_fmul_v16double:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3]
+; GFX8-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX8-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:4
+; GFX8-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:8
+; GFX8-NEXT: v_mul_f64 v[0:1], v[0:1], v[4:5]
+; GFX8-NEXT: v_mul_f64 v[0:1], v[0:1], v[6:7]
+; GFX8-NEXT: v_mul_f64 v[0:1], v[0:1], v[8:9]
+; GFX8-NEXT: v_mul_f64 v[0:1], v[0:1], v[10:11]
+; GFX8-NEXT: v_mul_f64 v[0:1], v[0:1], v[12:13]
+; GFX8-NEXT: v_mul_f64 v[0:1], v[0:1], v[14:15]
+; GFX8-NEXT: v_mul_f64 v[0:1], v[0:1], v[16:17]
+; GFX8-NEXT: v_mul_f64 v[0:1], v[0:1], v[18:19]
+; GFX8-NEXT: v_mul_f64 v[0:1], v[0:1], v[20:21]
+; GFX8-NEXT: v_mul_f64 v[0:1], v[0:1], v[22:23]
+; GFX8-NEXT: v_mul_f64 v[0:1], v[0:1], v[24:25]
+; GFX8-NEXT: v_mul_f64 v[0:1], v[0:1], v[26:27]
+; GFX8-NEXT: v_mul_f64 v[0:1], v[0:1], v[28:29]
+; GFX8-NEXT: s_waitcnt vmcnt(2)
+; GFX8-NEXT: v_mul_f64 v[0:1], v[0:1], v[30:31]
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_fmul_v16double:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: scratch_load_dword v31, off, s32
+; GFX9-NEXT: scratch_load_dword v32, off, s32 offset:4
+; GFX9-NEXT: scratch_load_dword v33, off, s32 offset:8
+; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3]
+; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[4:5]
+; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[6:7]
+; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[8:9]
+; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[10:11]
+; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[12:13]
+; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[14:15]
+; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[16:17]
+; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[18:19]
+; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[20:21]
+; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[22:23]
+; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[24:25]
+; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[26:27]
+; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[28:29]
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[30:31]
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[32:33]
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_fmul_v16double:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3]
+; GFX10-NEXT: s_clause 0x1
+; GFX10-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:4
+; GFX10-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:8
+; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[4:5]
+; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[6:7]
+; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[8:9]
+; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[10:11]
+; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[12:13]
+; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[14:15]
+; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[16:17]
+; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[18:19]
+; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[20:21]
+; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[22:23]
+; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[24:25]
+; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[26:27]
+; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[28:29]
+; GFX10-NEXT: s_waitcnt vmcnt(2)
+; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[30:31]
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3]
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_fmul_v16double:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3]
+; GFX11-NEXT: s_clause 0x2
+; GFX11-NEXT: scratch_load_b32 v31, off, s32
+; GFX11-NEXT: scratch_load_b32 v2, off, s32 offset:4
+; GFX11-NEXT: scratch_load_b32 v3, off, s32 offset:8
+; GFX11-NEXT: v_mul_f64 v[0:1], v[0:1], v[4:5]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mul_f64 v[0:1], v[0:1], v[6:7]
+; GFX11-NEXT: v_mul_f64 v[0:1], v[0:1], v[8:9]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mul_f64 v[0:1], v[0:1], v[10:11]
+; GFX11-NEXT: v_mul_f64 v[0:1], v[0:1], v[12:13]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mul_f64 v[0:1], v[0:1], v[14:15]
+; GFX11-NEXT: v_mul_f64 v[0:1], v[0:1], v[16:17]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mul_f64 v[0:1], v[0:1], v[18:19]
+; GFX11-NEXT: v_mul_f64 v[0:1], v[0:1], v[20:21]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mul_f64 v[0:1], v[0:1], v[22:23]
+; GFX11-NEXT: v_mul_f64 v[0:1], v[0:1], v[24:25]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mul_f64 v[0:1], v[0:1], v[26:27]
+; GFX11-NEXT: v_mul_f64 v[0:1], v[0:1], v[28:29]
+; GFX11-NEXT: s_waitcnt vmcnt(2)
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mul_f64 v[0:1], v[0:1], v[30:31]
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3]
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_fmul_v16double:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mul_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_clause 0x2
+; GFX12-NEXT: scratch_load_b32 v31, off, s32
+; GFX12-NEXT: scratch_load_b32 v2, off, s32 offset:4
+; GFX12-NEXT: scratch_load_b32 v3, off, s32 offset:8
+; GFX12-NEXT: v_mul_f64_e32 v[0:1], v[0:1], v[4:5]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_mul_f64_e32 v[0:1], v[0:1], v[6:7]
+; GFX12-NEXT: v_mul_f64_e32 v[0:1], v[0:1], v[8:9]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_mul_f64_e32 v[0:1], v[0:1], v[10:11]
+; GFX12-NEXT: v_mul_f64_e32 v[0:1], v[0:1], v[12:13]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_mul_f64_e32 v[0:1], v[0:1], v[14:15]
+; GFX12-NEXT: v_mul_f64_e32 v[0:1], v[0:1], v[16:17]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_mul_f64_e32 v[0:1], v[0:1], v[18:19]
+; GFX12-NEXT: v_mul_f64_e32 v[0:1], v[0:1], v[20:21]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_mul_f64_e32 v[0:1], v[0:1], v[22:23]
+; GFX12-NEXT: v_mul_f64_e32 v[0:1], v[0:1], v[24:25]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_mul_f64_e32 v[0:1], v[0:1], v[26:27]
+; GFX12-NEXT: v_mul_f64_e32 v[0:1], v[0:1], v[28:29]
+; GFX12-NEXT: s_wait_loadcnt 0x2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_mul_f64_e32 v[0:1], v[0:1], v[30:31]
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_mul_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call double @llvm.vector.reduce.fmul.v16double(double %sp, <16 x double> %v)
+ ret double %res
+}
+
+declare half @llvm.vector.reduce.fmul.v2half(half, <2 x half>)
+declare half @llvm.vector.reduce.fmul.v3half(half, <3 x half>)
+declare half @llvm.vector.reduce.fmul.v4half(half, <4 x half>)
+declare half @llvm.vector.reduce.fmul.v8half(half, <8 x half>)
+declare half @llvm.vector.reduce.fmul.v16half(half, <16 x half>)
+declare float @llvm.vector.reduce.fmul.v2float(float, <2 x float>)
+declare float @llvm.vector.reduce.fmul.v3float(float, <3 x float>)
+declare float @llvm.vector.reduce.fmul.v4float(float, <4 x float>)
+declare float @llvm.vector.reduce.fmul.v8float(float, <8 x float>)
+declare float @llvm.vector.reduce.fmul.v16float(float, <16 x float>)
+declare double @llvm.vector.reduce.fmul.v2double(double, <2 x double>)
+declare double @llvm.vector.reduce.fmul.v3double(double, <3 x double>)
+declare double @llvm.vector.reduce.fmul.v4double(double, <4 x double>)
+declare double @llvm.vector.reduce.fmul.v8double(double, <8 x double>)
+declare double @llvm.vector.reduce.fmul.v16double(double, <16 x double>)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/mul.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/mul.ll
new file mode 100644
index 0000000000000..3e1951e801731
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/mul.ll
@@ -0,0 +1,2711 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx700 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX7 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx801 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX8 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx942 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX9 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX10 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX12 %s
+
+define i8 @test_vector_reduce_mul_v2i8(<2 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_mul_v2i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_mul_v2i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mul_lo_u16_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_mul_v2i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mul_lo_u16_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_mul_v2i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mul_lo_u16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_mul_v2i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_mul_lo_u16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_mul_v2i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mul_lo_u16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.mul.v2i8(<2 x i8> %v)
+ ret i8 %res
+}
+
+define i8 @test_vector_reduce_mul_v3i8(<3 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_mul_v3i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX7-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_mul_v3i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mul_lo_u16_e32 v0, v0, v1
+; GFX8-NEXT: v_mul_lo_u16_e32 v0, v0, v2
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_mul_v3i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mul_lo_u16_e32 v0, v0, v1
+; GFX9-NEXT: v_mul_lo_u16_e32 v0, v0, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_mul_v3i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mul_lo_u16 v0, v0, v1
+; GFX10-NEXT: v_mul_lo_u16 v0, v0, v2
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_mul_v3i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_mul_lo_u16 v0, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mul_lo_u16 v0, v0, v2
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_mul_v3i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mul_lo_u16 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_mul_lo_u16 v0, v0, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.mul.v3i8(<3 x i8> %v)
+ ret i8 %res
+}
+
+define i8 @test_vector_reduce_mul_v4i8(<4 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_mul_v4i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX7-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 24, v2
+; GFX7-NEXT: v_mul_lo_u32 v1, v1, v2
+; GFX7-NEXT: v_mul_lo_u32 v0, v0, v3
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX7-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_mul_v4i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v4, 8
+; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX8-NEXT: v_or_b32_sdwa v5, v0, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX8-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX8-NEXT: v_or_b32_e32 v2, v5, v2
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX8-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX8-NEXT: v_mul_lo_u16_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_mul_lo_u16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX8-NEXT: v_mul_lo_u16_e32 v0, v0, v1
+; GFX8-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_mul_v4i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v4, 0xff
+; GFX9-NEXT: v_lshlrev_b32_sdwa v6, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX9-NEXT: v_and_or_b32 v6, v0, v4, v6
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX9-NEXT: v_or3_b32 v2, v6, v2, v3
+; GFX9-NEXT: v_mul_lo_u16_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_mul_lo_u16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v1, v0, v4, v1
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX9-NEXT: v_mul_lo_u16_e32 v0, v0, v1
+; GFX9-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_mul_v4i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v4, 8
+; GFX10-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX10-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX10-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX10-NEXT: v_and_or_b32 v5, 0xff, v0, v5
+; GFX10-NEXT: v_or3_b32 v2, v5, v2, v3
+; GFX10-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX10-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX10-NEXT: v_mul_lo_u16 v1, v1, v3
+; GFX10-NEXT: v_mul_lo_u16 v0, v0, v2
+; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX10-NEXT: v_mul_lo_u16 v0, v0, v1
+; GFX10-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_mul_v4i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v1
+; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 8, v4
+; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_mul_lo_u16 v1, v1, v3
+; GFX11-NEXT: v_mul_lo_u16 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mul_lo_u16 v0, v0, v1
+; GFX11-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_mul_v4i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v4, 0xff, v1
+; GFX12-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v4, 8, v4
+; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX12-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_mul_lo_u16 v1, v1, v3
+; GFX12-NEXT: v_mul_lo_u16 v0, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX12-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_mul_lo_u16 v0, v0, v1
+; GFX12-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.mul.v4i8(<4 x i8> %v)
+ ret i8 %res
+}
+
+define i8 @test_vector_reduce_mul_v8i8(<8 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_mul_v8i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v7
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX7-NEXT: v_mul_lo_u32 v1, v1, v5
+; GFX7-NEXT: v_lshrrev_b32_e32 v7, 24, v4
+; GFX7-NEXT: v_mul_lo_u32 v0, v0, v4
+; GFX7-NEXT: v_mul_lo_u32 v2, v2, v6
+; GFX7-NEXT: v_mul_lo_u32 v3, v3, v7
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX7-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 24, v2
+; GFX7-NEXT: v_mul_lo_u32 v1, v1, v2
+; GFX7-NEXT: v_mul_lo_u32 v0, v0, v3
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX7-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_mul_v8i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v8, 8
+; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v8, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v4, v4, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v7
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX8-NEXT: v_mul_lo_u16_e32 v1, v1, v5
+; GFX8-NEXT: v_mul_lo_u16_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_mul_lo_u16_e32 v0, v0, v4
+; GFX8-NEXT: v_mul_lo_u16_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX8-NEXT: v_or_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX8-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX8-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX8-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX8-NEXT: v_mul_lo_u16_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_mul_lo_u16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX8-NEXT: v_mul_lo_u16_e32 v0, v0, v1
+; GFX8-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_mul_v8i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v9, 8
+; GFX9-NEXT: v_mov_b32_e32 v8, 0xff
+; GFX9-NEXT: v_lshlrev_b32_sdwa v5, v9, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v4, v4, v8, v5
+; GFX9-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX9-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX9-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX9-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX9-NEXT: v_mul_lo_u16_e32 v1, v1, v5
+; GFX9-NEXT: v_mul_lo_u16_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_mul_lo_u16_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_mul_lo_u16_e32 v0, v0, v4
+; GFX9-NEXT: v_lshlrev_b32_sdwa v4, v9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX9-NEXT: v_and_or_b32 v4, v0, v8, v4
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX9-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX9-NEXT: v_mul_lo_u16_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_mul_lo_u16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v1, v0, v8, v1
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX9-NEXT: v_mul_lo_u16_e32 v0, v0, v1
+; GFX9-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_mul_v8i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v8, 8
+; GFX10-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX10-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v8, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v7
+; GFX10-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX10-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX10-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX10-NEXT: v_lshrrev_b32_e32 v7, 24, v4
+; GFX10-NEXT: v_mul_lo_u16 v0, v0, v4
+; GFX10-NEXT: v_mul_lo_u16 v1, v1, v5
+; GFX10-NEXT: v_mul_lo_u16 v2, v2, v6
+; GFX10-NEXT: v_mul_lo_u16 v3, v3, v7
+; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX10-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX10-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX10-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX10-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX10-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX10-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX10-NEXT: v_mul_lo_u16 v1, v1, v3
+; GFX10-NEXT: v_mul_lo_u16 v0, v0, v2
+; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX10-NEXT: v_mul_lo_u16 v0, v0, v1
+; GFX10-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_mul_v8i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v7, 24, v7
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v4, v4, v6, v7
+; GFX11-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX11-NEXT: v_mul_lo_u16 v0, v0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_mul_lo_u16 v1, v1, v5
+; GFX11-NEXT: v_lshrrev_b32_e32 v5, 24, v4
+; GFX11-NEXT: v_mul_lo_u16 v2, v2, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v1
+; GFX11-NEXT: v_mul_lo_u16 v3, v3, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 8, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX11-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_mul_lo_u16 v1, v1, v3
+; GFX11-NEXT: v_mul_lo_u16 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mul_lo_u16 v0, v0, v1
+; GFX11-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_mul_v8i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX12-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v7, 24, v7
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v4, v4, v6, v7
+; GFX12-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX12-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX12-NEXT: v_mul_lo_u16 v0, v0, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_mul_lo_u16 v1, v1, v5
+; GFX12-NEXT: v_lshrrev_b32_e32 v5, 24, v4
+; GFX12-NEXT: v_mul_lo_u16 v2, v2, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v1
+; GFX12-NEXT: v_mul_lo_u16 v3, v3, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-NEXT: v_lshlrev_b32_e32 v4, 8, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX12-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX12-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_mul_lo_u16 v1, v1, v3
+; GFX12-NEXT: v_mul_lo_u16 v0, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX12-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_mul_lo_u16 v0, v0, v1
+; GFX12-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.mul.v8i8(<8 x i8> %v)
+ ret i8 %res
+}
+
+define i8 @test_vector_reduce_mul_v16i8(<16 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_mul_v16i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v13, 0xff, v13
+; GFX7-NEXT: v_and_b32_e32 v12, 0xff, v12
+; GFX7-NEXT: v_lshlrev_b32_e32 v13, 8, v13
+; GFX7-NEXT: v_or_b32_e32 v12, v12, v13
+; GFX7-NEXT: v_and_b32_e32 v13, 0xff, v14
+; GFX7-NEXT: v_lshlrev_b32_e32 v13, 16, v13
+; GFX7-NEXT: v_or_b32_e32 v12, v12, v13
+; GFX7-NEXT: v_and_b32_e32 v13, 0xff, v15
+; GFX7-NEXT: v_lshlrev_b32_e32 v13, 24, v13
+; GFX7-NEXT: v_or_b32_e32 v12, v12, v13
+; GFX7-NEXT: v_lshrrev_b32_e32 v13, 8, v12
+; GFX7-NEXT: v_and_b32_e32 v9, 0xff, v9
+; GFX7-NEXT: v_mul_lo_u32 v5, v5, v13
+; GFX7-NEXT: v_and_b32_e32 v8, 0xff, v8
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX7-NEXT: v_lshrrev_b32_e32 v14, 16, v12
+; GFX7-NEXT: v_mul_lo_u32 v4, v4, v12
+; GFX7-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX7-NEXT: v_and_b32_e32 v9, 0xff, v10
+; GFX7-NEXT: v_mul_lo_u32 v6, v6, v14
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX7-NEXT: v_lshrrev_b32_e32 v15, 24, v12
+; GFX7-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX7-NEXT: v_and_b32_e32 v9, 0xff, v11
+; GFX7-NEXT: v_mul_lo_u32 v7, v7, v15
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 24, v9
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX7-NEXT: v_lshrrev_b32_e32 v9, 8, v8
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_lshrrev_b32_e32 v10, 16, v8
+; GFX7-NEXT: v_mul_lo_u32 v1, v1, v9
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v7
+; GFX7-NEXT: v_lshrrev_b32_e32 v11, 24, v8
+; GFX7-NEXT: v_mul_lo_u32 v0, v0, v8
+; GFX7-NEXT: v_mul_lo_u32 v2, v2, v10
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX7-NEXT: v_mul_lo_u32 v3, v3, v11
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX7-NEXT: v_mul_lo_u32 v1, v1, v5
+; GFX7-NEXT: v_lshrrev_b32_e32 v7, 24, v4
+; GFX7-NEXT: v_mul_lo_u32 v0, v0, v4
+; GFX7-NEXT: v_mul_lo_u32 v2, v2, v6
+; GFX7-NEXT: v_mul_lo_u32 v3, v3, v7
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX7-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 24, v2
+; GFX7-NEXT: v_mul_lo_u32 v1, v1, v2
+; GFX7-NEXT: v_mul_lo_u32 v0, v0, v3
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX7-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_mul_v16i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v16, 8
+; GFX8-NEXT: v_lshlrev_b32_sdwa v9, v16, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v8, v8, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v9, 0xff, v10
+; GFX8-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX8-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX8-NEXT: v_and_b32_e32 v9, 0xff, v11
+; GFX8-NEXT: v_lshlrev_b32_sdwa v10, v16, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v11, 0xff, v14
+; GFX8-NEXT: v_or_b32_sdwa v10, v12, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v11, 16, v11
+; GFX8-NEXT: v_or_b32_e32 v10, v10, v11
+; GFX8-NEXT: v_and_b32_e32 v11, 0xff, v15
+; GFX8-NEXT: v_lshlrev_b32_e32 v11, 24, v11
+; GFX8-NEXT: v_or_b32_e32 v10, v10, v11
+; GFX8-NEXT: v_lshrrev_b32_e32 v11, 8, v10
+; GFX8-NEXT: v_mul_lo_u16_e32 v5, v5, v11
+; GFX8-NEXT: v_mul_lo_u16_e32 v4, v4, v10
+; GFX8-NEXT: v_mul_lo_u16_sdwa v6, v6, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v16, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v4, v4, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX8-NEXT: v_mul_lo_u16_sdwa v7, v7, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX8-NEXT: v_lshlrev_b32_e32 v9, 24, v9
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v7
+; GFX8-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX8-NEXT: v_lshrrev_b32_e32 v9, 8, v8
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_mul_lo_u16_e32 v1, v1, v9
+; GFX8-NEXT: v_mul_lo_u16_sdwa v2, v2, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX8-NEXT: v_mul_lo_u16_e32 v0, v0, v8
+; GFX8-NEXT: v_mul_lo_u16_sdwa v3, v3, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_mul_lo_u16_e32 v1, v1, v5
+; GFX8-NEXT: v_mul_lo_u16_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_mul_lo_u16_e32 v0, v0, v4
+; GFX8-NEXT: v_mul_lo_u16_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v16, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX8-NEXT: v_or_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX8-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX8-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX8-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX8-NEXT: v_mul_lo_u16_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_mul_lo_u16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v16, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX8-NEXT: v_mul_lo_u16_e32 v0, v0, v1
+; GFX8-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_mul_v16i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v17, 8
+; GFX9-NEXT: v_mov_b32_e32 v16, 0xff
+; GFX9-NEXT: v_lshlrev_b32_sdwa v9, v17, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v8, v8, v16, v9
+; GFX9-NEXT: v_and_b32_e32 v9, 0xff, v10
+; GFX9-NEXT: v_and_b32_e32 v10, 0xff, v11
+; GFX9-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX9-NEXT: v_lshlrev_b32_e32 v10, 24, v10
+; GFX9-NEXT: v_or3_b32 v8, v8, v9, v10
+; GFX9-NEXT: v_lshlrev_b32_sdwa v10, v17, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v10, v12, v16, v10
+; GFX9-NEXT: v_and_b32_e32 v11, 0xff, v14
+; GFX9-NEXT: v_and_b32_e32 v12, 0xff, v15
+; GFX9-NEXT: v_lshlrev_b32_e32 v11, 16, v11
+; GFX9-NEXT: v_lshlrev_b32_e32 v12, 24, v12
+; GFX9-NEXT: v_or3_b32 v10, v10, v11, v12
+; GFX9-NEXT: v_lshrrev_b32_e32 v11, 8, v10
+; GFX9-NEXT: v_mul_lo_u16_e32 v5, v5, v11
+; GFX9-NEXT: v_mul_lo_u16_e32 v4, v4, v10
+; GFX9-NEXT: v_mul_lo_u16_sdwa v6, v6, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_mul_lo_u16_sdwa v7, v7, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_lshlrev_b32_sdwa v5, v17, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v4, v4, v16, v5
+; GFX9-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX9-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX9-NEXT: v_lshrrev_b32_e32 v9, 8, v8
+; GFX9-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX9-NEXT: v_mul_lo_u16_e32 v1, v1, v9
+; GFX9-NEXT: v_mul_lo_u16_sdwa v2, v2, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_mul_lo_u16_sdwa v3, v3, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX9-NEXT: v_mul_lo_u16_e32 v0, v0, v8
+; GFX9-NEXT: v_mul_lo_u16_e32 v1, v1, v5
+; GFX9-NEXT: v_mul_lo_u16_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_mul_lo_u16_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_mul_lo_u16_e32 v0, v0, v4
+; GFX9-NEXT: v_lshlrev_b32_sdwa v4, v17, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX9-NEXT: v_and_or_b32 v4, v0, v16, v4
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX9-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX9-NEXT: v_mul_lo_u16_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_mul_lo_u16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v17, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v1, v0, v16, v1
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX9-NEXT: v_mul_lo_u16_e32 v0, v0, v1
+; GFX9-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_mul_v16i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v16, 8
+; GFX10-NEXT: v_and_b32_e32 v14, 0xff, v14
+; GFX10-NEXT: v_and_b32_e32 v15, 0xff, v15
+; GFX10-NEXT: v_and_b32_e32 v11, 0xff, v11
+; GFX10-NEXT: v_lshlrev_b32_sdwa v13, v16, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_lshlrev_b32_sdwa v9, v16, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_or_b32 v12, 0xff, v12, v13
+; GFX10-NEXT: v_lshlrev_b32_e32 v13, 16, v14
+; GFX10-NEXT: v_lshlrev_b32_e32 v14, 24, v15
+; GFX10-NEXT: v_and_or_b32 v8, 0xff, v8, v9
+; GFX10-NEXT: v_and_b32_e32 v9, 0xff, v10
+; GFX10-NEXT: v_or3_b32 v12, v12, v13, v14
+; GFX10-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX10-NEXT: v_lshrrev_b32_e32 v10, 8, v12
+; GFX10-NEXT: v_lshrrev_b32_e32 v13, 16, v12
+; GFX10-NEXT: v_lshrrev_b32_e32 v14, 24, v12
+; GFX10-NEXT: v_mul_lo_u16 v4, v4, v12
+; GFX10-NEXT: v_mul_lo_u16 v5, v5, v10
+; GFX10-NEXT: v_mul_lo_u16 v6, v6, v13
+; GFX10-NEXT: v_mul_lo_u16 v7, v7, v14
+; GFX10-NEXT: v_lshlrev_b32_e32 v10, 24, v11
+; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v16, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX10-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX10-NEXT: v_or3_b32 v8, v8, v9, v10
+; GFX10-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v7
+; GFX10-NEXT: v_lshrrev_b32_e32 v7, 8, v8
+; GFX10-NEXT: v_lshrrev_b32_e32 v9, 16, v8
+; GFX10-NEXT: v_lshrrev_b32_e32 v10, 24, v8
+; GFX10-NEXT: v_mul_lo_u16 v0, v0, v8
+; GFX10-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX10-NEXT: v_mul_lo_u16 v1, v1, v7
+; GFX10-NEXT: v_mul_lo_u16 v2, v2, v9
+; GFX10-NEXT: v_mul_lo_u16 v3, v3, v10
+; GFX10-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX10-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX10-NEXT: v_lshrrev_b32_e32 v7, 24, v4
+; GFX10-NEXT: v_mul_lo_u16 v0, v0, v4
+; GFX10-NEXT: v_mul_lo_u16 v1, v1, v5
+; GFX10-NEXT: v_mul_lo_u16 v2, v2, v6
+; GFX10-NEXT: v_mul_lo_u16 v3, v3, v7
+; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v16, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX10-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX10-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX10-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX10-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX10-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX10-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX10-NEXT: v_mul_lo_u16 v1, v1, v3
+; GFX10-NEXT: v_mul_lo_u16 v0, v0, v2
+; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v16, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX10-NEXT: v_mul_lo_u16 v0, v0, v1
+; GFX10-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_mul_v16i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v13, 0xff, v13
+; GFX11-NEXT: v_and_b32_e32 v14, 0xff, v14
+; GFX11-NEXT: v_and_b32_e32 v15, 0xff, v15
+; GFX11-NEXT: v_and_b32_e32 v9, 0xff, v9
+; GFX11-NEXT: v_and_b32_e32 v11, 0xff, v11
+; GFX11-NEXT: v_lshlrev_b32_e32 v13, 8, v13
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX11-NEXT: v_and_or_b32 v12, 0xff, v12, v13
+; GFX11-NEXT: v_lshlrev_b32_e32 v13, 16, v14
+; GFX11-NEXT: v_lshlrev_b32_e32 v14, 24, v15
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_or_b32 v8, 0xff, v8, v9
+; GFX11-NEXT: v_and_b32_e32 v9, 0xff, v10
+; GFX11-NEXT: v_or3_b32 v12, v12, v13, v14
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX11-NEXT: v_lshrrev_b32_e32 v13, 8, v12
+; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v12
+; GFX11-NEXT: v_lshrrev_b32_e32 v14, 24, v12
+; GFX11-NEXT: v_mul_lo_u16 v4, v4, v12
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_mul_lo_u16 v5, v5, v13
+; GFX11-NEXT: v_mul_lo_u16 v6, v6, v10
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_mul_lo_u16 v7, v7, v14
+; GFX11-NEXT: v_lshlrev_b32_e32 v10, 24, v11
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_or3_b32 v8, v8, v9, v10
+; GFX11-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX11-NEXT: v_lshlrev_b32_e32 v7, 24, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_mul_lo_u16 v0, v0, v8
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX11-NEXT: v_lshrrev_b32_e32 v5, 8, v8
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_or3_b32 v4, v4, v6, v7
+; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v8
+; GFX11-NEXT: v_lshrrev_b32_e32 v7, 24, v8
+; GFX11-NEXT: v_mul_lo_u16 v1, v1, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX11-NEXT: v_mul_lo_u16 v2, v2, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_mul_lo_u16 v3, v3, v7
+; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX11-NEXT: v_mul_lo_u16 v0, v0, v4
+; GFX11-NEXT: v_mul_lo_u16 v1, v1, v5
+; GFX11-NEXT: v_lshrrev_b32_e32 v5, 24, v4
+; GFX11-NEXT: v_mul_lo_u16 v2, v2, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v1
+; GFX11-NEXT: v_mul_lo_u16 v3, v3, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 8, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX11-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_mul_lo_u16 v1, v1, v3
+; GFX11-NEXT: v_mul_lo_u16 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mul_lo_u16 v0, v0, v1
+; GFX11-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_mul_v16i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v13, 0xff, v13
+; GFX12-NEXT: v_and_b32_e32 v14, 0xff, v14
+; GFX12-NEXT: v_and_b32_e32 v15, 0xff, v15
+; GFX12-NEXT: v_and_b32_e32 v9, 0xff, v9
+; GFX12-NEXT: v_and_b32_e32 v11, 0xff, v11
+; GFX12-NEXT: v_lshlrev_b32_e32 v13, 8, v13
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX12-NEXT: v_and_or_b32 v12, 0xff, v12, v13
+; GFX12-NEXT: v_lshlrev_b32_e32 v13, 16, v14
+; GFX12-NEXT: v_lshlrev_b32_e32 v14, 24, v15
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_or_b32 v8, 0xff, v8, v9
+; GFX12-NEXT: v_and_b32_e32 v9, 0xff, v10
+; GFX12-NEXT: v_or3_b32 v12, v12, v13, v14
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX12-NEXT: v_lshrrev_b32_e32 v13, 8, v12
+; GFX12-NEXT: v_lshrrev_b32_e32 v10, 16, v12
+; GFX12-NEXT: v_lshrrev_b32_e32 v14, 24, v12
+; GFX12-NEXT: v_mul_lo_u16 v4, v4, v12
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_mul_lo_u16 v5, v5, v13
+; GFX12-NEXT: v_mul_lo_u16 v6, v6, v10
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_mul_lo_u16 v7, v7, v14
+; GFX12-NEXT: v_lshlrev_b32_e32 v10, 24, v11
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX12-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_or3_b32 v8, v8, v9, v10
+; GFX12-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX12-NEXT: v_lshlrev_b32_e32 v7, 24, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_mul_lo_u16 v0, v0, v8
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX12-NEXT: v_lshrrev_b32_e32 v5, 8, v8
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_or3_b32 v4, v4, v6, v7
+; GFX12-NEXT: v_lshrrev_b32_e32 v6, 16, v8
+; GFX12-NEXT: v_lshrrev_b32_e32 v7, 24, v8
+; GFX12-NEXT: v_mul_lo_u16 v1, v1, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX12-NEXT: v_mul_lo_u16 v2, v2, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_mul_lo_u16 v3, v3, v7
+; GFX12-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX12-NEXT: v_mul_lo_u16 v0, v0, v4
+; GFX12-NEXT: v_mul_lo_u16 v1, v1, v5
+; GFX12-NEXT: v_lshrrev_b32_e32 v5, 24, v4
+; GFX12-NEXT: v_mul_lo_u16 v2, v2, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v1
+; GFX12-NEXT: v_mul_lo_u16 v3, v3, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-NEXT: v_lshlrev_b32_e32 v4, 8, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX12-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX12-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_mul_lo_u16 v1, v1, v3
+; GFX12-NEXT: v_mul_lo_u16 v0, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX12-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_mul_lo_u16 v0, v0, v1
+; GFX12-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.mul.v16i8(<16 x i8> %v)
+ ret i8 %res
+}
+
+define i16 @test_vector_reduce_mul_v2i16(<2 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_mul_v2i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v0
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX7-NEXT: v_bfe_u32 v0, v0, 0, 16
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_mul_v2i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mul_lo_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_mul_v2i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX9-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_mul_v2i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX10-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_mul_v2i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_mul_v2i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.mul.v2i16(<2 x i16> %v)
+ ret i16 %res
+}
+
+define i16 @test_vector_reduce_mul_v3i16(<3 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_mul_v3i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX7-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_mul_v3i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mul_lo_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_mul_lo_u16_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_mul_v3i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mul_lo_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_mul_lo_u16_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_mul_v3i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX10-NEXT: v_mul_lo_u16 v0, v0, v2
+; GFX10-NEXT: v_mul_lo_u16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_mul_v3i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mul_lo_u16 v0, v0, v2
+; GFX11-NEXT: v_mul_lo_u16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_mul_v3i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_mul_lo_u16 v0, v0, v2
+; GFX12-NEXT: v_mul_lo_u16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.mul.v3i16(<3 x i16> %v)
+ ret i16 %res
+}
+
+define i16 @test_vector_reduce_mul_v4i16(<4 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_mul_v4i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-NEXT: v_or_b32_e32 v2, v3, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX7-NEXT: v_mul_lo_u32 v1, v1, v3
+; GFX7-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX7-NEXT: v_bfe_u32 v0, v0, 0, 16
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_mul_v4i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mul_lo_u16_e32 v2, v0, v1
+; GFX8-NEXT: v_mul_lo_u16_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v0, v2, v0
+; GFX8-NEXT: v_mul_lo_u16_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_mul_v4i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX9-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_mul_v4i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX10-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_mul_v4i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX11-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_mul_v4i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX12-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.mul.v4i16(<4 x i16> %v)
+ ret i16 %res
+}
+
+define i16 @test_vector_reduce_mul_v8i16(<8 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_mul_v8i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v7
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX7-NEXT: v_or_b32_e32 v5, v5, v6
+; GFX7-NEXT: v_lshrrev_b32_e32 v7, 16, v5
+; GFX7-NEXT: v_mul_lo_u32 v3, v3, v7
+; GFX7-NEXT: v_mul_lo_u32 v2, v2, v5
+; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX7-NEXT: v_mul_lo_u32 v1, v1, v6
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX7-NEXT: v_mul_lo_u32 v0, v0, v4
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX7-NEXT: v_mul_lo_u32 v1, v1, v3
+; GFX7-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX7-NEXT: v_bfe_u32 v0, v0, 0, 16
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_mul_v8i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mul_lo_u16_e32 v4, v0, v2
+; GFX8-NEXT: v_mul_lo_u16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_mul_lo_u16_e32 v2, v1, v3
+; GFX8-NEXT: v_mul_lo_u16_sdwa v1, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX8-NEXT: v_mul_lo_u16_e32 v2, v4, v1
+; GFX8-NEXT: v_mul_lo_u16_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v0, v2, v0
+; GFX8-NEXT: v_mul_lo_u16_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_mul_v8i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_mul_lo_u16 v0, v0, v2
+; GFX9-NEXT: v_pk_mul_lo_u16 v1, v1, v3
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX9-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_mul_v8i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_pk_mul_lo_u16 v0, v0, v2
+; GFX10-NEXT: v_pk_mul_lo_u16 v1, v1, v3
+; GFX10-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX10-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_mul_v8i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_pk_mul_lo_u16 v0, v0, v2
+; GFX11-NEXT: v_pk_mul_lo_u16 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_mul_v8i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_mul_lo_u16 v0, v0, v2
+; GFX12-NEXT: v_pk_mul_lo_u16 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.mul.v8i16(<8 x i16> %v)
+ ret i16 %res
+}
+
+define i16 @test_vector_reduce_mul_v16i16(<16 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_mul_v16i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX7-NEXT: v_and_b32_e32 v8, 0xffff, v8
+; GFX7-NEXT: v_or_b32_e32 v8, v9, v8
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v11
+; GFX7-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX7-NEXT: v_or_b32_e32 v9, v9, v10
+; GFX7-NEXT: v_lshlrev_b32_e32 v10, 16, v13
+; GFX7-NEXT: v_and_b32_e32 v11, 0xffff, v12
+; GFX7-NEXT: v_or_b32_e32 v10, v10, v11
+; GFX7-NEXT: v_lshlrev_b32_e32 v11, 16, v15
+; GFX7-NEXT: v_and_b32_e32 v12, 0xffff, v14
+; GFX7-NEXT: v_lshrrev_b32_e32 v14, 16, v10
+; GFX7-NEXT: v_or_b32_e32 v11, v11, v12
+; GFX7-NEXT: v_mul_lo_u32 v5, v5, v14
+; GFX7-NEXT: v_lshrrev_b32_e32 v15, 16, v11
+; GFX7-NEXT: v_mul_lo_u32 v4, v4, v10
+; GFX7-NEXT: v_mul_lo_u32 v6, v6, v11
+; GFX7-NEXT: v_mul_lo_u32 v7, v7, v15
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX7-NEXT: v_lshrrev_b32_e32 v13, 16, v9
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_mul_lo_u32 v3, v3, v13
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff, v6
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff, v7
+; GFX7-NEXT: v_mul_lo_u32 v2, v2, v9
+; GFX7-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX7-NEXT: v_or_b32_e32 v5, v5, v6
+; GFX7-NEXT: v_lshrrev_b32_e32 v12, 16, v8
+; GFX7-NEXT: v_lshrrev_b32_e32 v7, 16, v5
+; GFX7-NEXT: v_mul_lo_u32 v1, v1, v12
+; GFX7-NEXT: v_mul_lo_u32 v3, v3, v7
+; GFX7-NEXT: v_mul_lo_u32 v0, v0, v8
+; GFX7-NEXT: v_mul_lo_u32 v2, v2, v5
+; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX7-NEXT: v_mul_lo_u32 v1, v1, v6
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX7-NEXT: v_mul_lo_u32 v0, v0, v4
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX7-NEXT: v_mul_lo_u32 v1, v1, v3
+; GFX7-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX7-NEXT: v_bfe_u32 v0, v0, 0, 16
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_mul_v16i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mul_lo_u16_e32 v8, v0, v4
+; GFX8-NEXT: v_mul_lo_u16_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_mul_lo_u16_e32 v4, v1, v5
+; GFX8-NEXT: v_mul_lo_u16_sdwa v1, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_mul_lo_u16_e32 v5, v2, v6
+; GFX8-NEXT: v_mul_lo_u16_sdwa v2, v2, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v2, v5, v2
+; GFX8-NEXT: v_mul_lo_u16_e32 v5, v3, v7
+; GFX8-NEXT: v_mul_lo_u16_sdwa v3, v3, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: v_mul_lo_u16_e32 v5, v8, v2
+; GFX8-NEXT: v_mul_lo_u16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_mul_lo_u16_e32 v2, v4, v3
+; GFX8-NEXT: v_mul_lo_u16_sdwa v1, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX8-NEXT: v_mul_lo_u16_e32 v2, v5, v1
+; GFX8-NEXT: v_mul_lo_u16_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v0, v2, v0
+; GFX8-NEXT: v_mul_lo_u16_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_mul_v16i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_mul_lo_u16 v0, v0, v4
+; GFX9-NEXT: v_pk_mul_lo_u16 v1, v1, v5
+; GFX9-NEXT: v_pk_mul_lo_u16 v2, v2, v6
+; GFX9-NEXT: v_pk_mul_lo_u16 v3, v3, v7
+; GFX9-NEXT: v_pk_mul_lo_u16 v0, v0, v2
+; GFX9-NEXT: v_pk_mul_lo_u16 v1, v1, v3
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX9-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_mul_v16i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_pk_mul_lo_u16 v0, v0, v4
+; GFX10-NEXT: v_pk_mul_lo_u16 v1, v1, v5
+; GFX10-NEXT: v_pk_mul_lo_u16 v2, v2, v6
+; GFX10-NEXT: v_pk_mul_lo_u16 v3, v3, v7
+; GFX10-NEXT: v_pk_mul_lo_u16 v0, v0, v2
+; GFX10-NEXT: v_pk_mul_lo_u16 v1, v1, v3
+; GFX10-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX10-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_mul_v16i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_pk_mul_lo_u16 v0, v0, v4
+; GFX11-NEXT: v_pk_mul_lo_u16 v1, v1, v5
+; GFX11-NEXT: v_pk_mul_lo_u16 v2, v2, v6
+; GFX11-NEXT: v_pk_mul_lo_u16 v3, v3, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_pk_mul_lo_u16 v0, v0, v2
+; GFX11-NEXT: v_pk_mul_lo_u16 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_mul_v16i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_mul_lo_u16 v0, v0, v4
+; GFX12-NEXT: v_pk_mul_lo_u16 v1, v1, v5
+; GFX12-NEXT: v_pk_mul_lo_u16 v2, v2, v6
+; GFX12-NEXT: v_pk_mul_lo_u16 v3, v3, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_pk_mul_lo_u16 v0, v0, v2
+; GFX12-NEXT: v_pk_mul_lo_u16 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.mul.v16i16(<16 x i16> %v)
+ ret i16 %res
+}
+
+define i32 @test_vector_reduce_mul_v2i32(<2 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_mul_v2i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_mul_v2i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_mul_v2i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_mul_v2i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_mul_v2i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_mul_v2i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.mul.v2i32(<2 x i32> %v)
+ ret i32 %res
+}
+
+define i32 @test_vector_reduce_mul_v3i32(<3 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_mul_v3i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX7-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_mul_v3i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX8-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_mul_v3i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX9-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_mul_v3i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX10-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_mul_v3i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_mul_v3i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.mul.v3i32(<3 x i32> %v)
+ ret i32 %res
+}
+
+define i32 @test_vector_reduce_mul_v4i32(<4 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_mul_v4i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX7-NEXT: v_mul_lo_u32 v1, v1, v3
+; GFX7-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_mul_v4i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX8-NEXT: v_mul_lo_u32 v1, v1, v3
+; GFX8-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_mul_v4i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX9-NEXT: v_mul_lo_u32 v1, v1, v3
+; GFX9-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_mul_v4i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX10-NEXT: v_mul_lo_u32 v1, v1, v3
+; GFX10-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_mul_v4i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX11-NEXT: v_mul_lo_u32 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_mul_v4i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX12-NEXT: v_mul_lo_u32 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> %v)
+ ret i32 %res
+}
+
+define i32 @test_vector_reduce_mul_v8i32(<8 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_mul_v8i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mul_lo_u32 v0, v0, v4
+; GFX7-NEXT: v_mul_lo_u32 v1, v1, v5
+; GFX7-NEXT: v_mul_lo_u32 v2, v2, v6
+; GFX7-NEXT: v_mul_lo_u32 v3, v3, v7
+; GFX7-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX7-NEXT: v_mul_lo_u32 v1, v1, v3
+; GFX7-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_mul_v8i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mul_lo_u32 v0, v0, v4
+; GFX8-NEXT: v_mul_lo_u32 v1, v1, v5
+; GFX8-NEXT: v_mul_lo_u32 v2, v2, v6
+; GFX8-NEXT: v_mul_lo_u32 v3, v3, v7
+; GFX8-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX8-NEXT: v_mul_lo_u32 v1, v1, v3
+; GFX8-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_mul_v8i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mul_lo_u32 v0, v0, v4
+; GFX9-NEXT: v_mul_lo_u32 v1, v1, v5
+; GFX9-NEXT: v_mul_lo_u32 v2, v2, v6
+; GFX9-NEXT: v_mul_lo_u32 v3, v3, v7
+; GFX9-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX9-NEXT: v_mul_lo_u32 v1, v1, v3
+; GFX9-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_mul_v8i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mul_lo_u32 v0, v0, v4
+; GFX10-NEXT: v_mul_lo_u32 v1, v1, v5
+; GFX10-NEXT: v_mul_lo_u32 v2, v2, v6
+; GFX10-NEXT: v_mul_lo_u32 v3, v3, v7
+; GFX10-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX10-NEXT: v_mul_lo_u32 v1, v1, v3
+; GFX10-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_mul_v8i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_mul_lo_u32 v0, v0, v4
+; GFX11-NEXT: v_mul_lo_u32 v1, v1, v5
+; GFX11-NEXT: v_mul_lo_u32 v2, v2, v6
+; GFX11-NEXT: v_mul_lo_u32 v3, v3, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX11-NEXT: v_mul_lo_u32 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_mul_v8i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mul_lo_u32 v0, v0, v4
+; GFX12-NEXT: v_mul_lo_u32 v1, v1, v5
+; GFX12-NEXT: v_mul_lo_u32 v2, v2, v6
+; GFX12-NEXT: v_mul_lo_u32 v3, v3, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX12-NEXT: v_mul_lo_u32 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> %v)
+ ret i32 %res
+}
+
+define i32 @test_vector_reduce_mul_v16i32(<16 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_mul_v16i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mul_lo_u32 v0, v0, v8
+; GFX7-NEXT: v_mul_lo_u32 v1, v1, v9
+; GFX7-NEXT: v_mul_lo_u32 v2, v2, v10
+; GFX7-NEXT: v_mul_lo_u32 v3, v3, v11
+; GFX7-NEXT: v_mul_lo_u32 v4, v4, v12
+; GFX7-NEXT: v_mul_lo_u32 v5, v5, v13
+; GFX7-NEXT: v_mul_lo_u32 v6, v6, v14
+; GFX7-NEXT: v_mul_lo_u32 v7, v7, v15
+; GFX7-NEXT: v_mul_lo_u32 v0, v0, v4
+; GFX7-NEXT: v_mul_lo_u32 v1, v1, v5
+; GFX7-NEXT: v_mul_lo_u32 v2, v2, v6
+; GFX7-NEXT: v_mul_lo_u32 v3, v3, v7
+; GFX7-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX7-NEXT: v_mul_lo_u32 v1, v1, v3
+; GFX7-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_mul_v16i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mul_lo_u32 v0, v0, v8
+; GFX8-NEXT: v_mul_lo_u32 v1, v1, v9
+; GFX8-NEXT: v_mul_lo_u32 v2, v2, v10
+; GFX8-NEXT: v_mul_lo_u32 v3, v3, v11
+; GFX8-NEXT: v_mul_lo_u32 v4, v4, v12
+; GFX8-NEXT: v_mul_lo_u32 v5, v5, v13
+; GFX8-NEXT: v_mul_lo_u32 v6, v6, v14
+; GFX8-NEXT: v_mul_lo_u32 v7, v7, v15
+; GFX8-NEXT: v_mul_lo_u32 v0, v0, v4
+; GFX8-NEXT: v_mul_lo_u32 v1, v1, v5
+; GFX8-NEXT: v_mul_lo_u32 v2, v2, v6
+; GFX8-NEXT: v_mul_lo_u32 v3, v3, v7
+; GFX8-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX8-NEXT: v_mul_lo_u32 v1, v1, v3
+; GFX8-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_mul_v16i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mul_lo_u32 v0, v0, v8
+; GFX9-NEXT: v_mul_lo_u32 v1, v1, v9
+; GFX9-NEXT: v_mul_lo_u32 v2, v2, v10
+; GFX9-NEXT: v_mul_lo_u32 v3, v3, v11
+; GFX9-NEXT: v_mul_lo_u32 v4, v4, v12
+; GFX9-NEXT: v_mul_lo_u32 v5, v5, v13
+; GFX9-NEXT: v_mul_lo_u32 v6, v6, v14
+; GFX9-NEXT: v_mul_lo_u32 v7, v7, v15
+; GFX9-NEXT: v_mul_lo_u32 v0, v0, v4
+; GFX9-NEXT: v_mul_lo_u32 v1, v1, v5
+; GFX9-NEXT: v_mul_lo_u32 v2, v2, v6
+; GFX9-NEXT: v_mul_lo_u32 v3, v3, v7
+; GFX9-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX9-NEXT: v_mul_lo_u32 v1, v1, v3
+; GFX9-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_mul_v16i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mul_lo_u32 v0, v0, v8
+; GFX10-NEXT: v_mul_lo_u32 v1, v1, v9
+; GFX10-NEXT: v_mul_lo_u32 v2, v2, v10
+; GFX10-NEXT: v_mul_lo_u32 v3, v3, v11
+; GFX10-NEXT: v_mul_lo_u32 v4, v4, v12
+; GFX10-NEXT: v_mul_lo_u32 v5, v5, v13
+; GFX10-NEXT: v_mul_lo_u32 v6, v6, v14
+; GFX10-NEXT: v_mul_lo_u32 v7, v7, v15
+; GFX10-NEXT: v_mul_lo_u32 v0, v0, v4
+; GFX10-NEXT: v_mul_lo_u32 v1, v1, v5
+; GFX10-NEXT: v_mul_lo_u32 v2, v2, v6
+; GFX10-NEXT: v_mul_lo_u32 v3, v3, v7
+; GFX10-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX10-NEXT: v_mul_lo_u32 v1, v1, v3
+; GFX10-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_mul_v16i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_mul_lo_u32 v0, v0, v8
+; GFX11-NEXT: v_mul_lo_u32 v1, v1, v9
+; GFX11-NEXT: v_mul_lo_u32 v2, v2, v10
+; GFX11-NEXT: v_mul_lo_u32 v3, v3, v11
+; GFX11-NEXT: v_mul_lo_u32 v4, v4, v12
+; GFX11-NEXT: v_mul_lo_u32 v5, v5, v13
+; GFX11-NEXT: v_mul_lo_u32 v6, v6, v14
+; GFX11-NEXT: v_mul_lo_u32 v7, v7, v15
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_mul_lo_u32 v0, v0, v4
+; GFX11-NEXT: v_mul_lo_u32 v1, v1, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_mul_lo_u32 v2, v2, v6
+; GFX11-NEXT: v_mul_lo_u32 v3, v3, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX11-NEXT: v_mul_lo_u32 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_mul_v16i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mul_lo_u32 v0, v0, v8
+; GFX12-NEXT: v_mul_lo_u32 v1, v1, v9
+; GFX12-NEXT: v_mul_lo_u32 v2, v2, v10
+; GFX12-NEXT: v_mul_lo_u32 v3, v3, v11
+; GFX12-NEXT: v_mul_lo_u32 v4, v4, v12
+; GFX12-NEXT: v_mul_lo_u32 v5, v5, v13
+; GFX12-NEXT: v_mul_lo_u32 v6, v6, v14
+; GFX12-NEXT: v_mul_lo_u32 v7, v7, v15
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_mul_lo_u32 v0, v0, v4
+; GFX12-NEXT: v_mul_lo_u32 v1, v1, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_mul_lo_u32 v2, v2, v6
+; GFX12-NEXT: v_mul_lo_u32 v3, v3, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX12-NEXT: v_mul_lo_u32 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.mul.v16i32(<16 x i32> %v)
+ ret i32 %res
+}
+
+define i64 @test_vector_reduce_mul_v2i64(<2 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_mul_v2i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v4, v0
+; GFX7-NEXT: v_mov_b32_e32 v5, v1
+; GFX7-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v4, v2, 0
+; GFX7-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v4, v3, v[1:2]
+; GFX7-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v5, v2, v[3:4]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_mul_v2i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v4, v0
+; GFX8-NEXT: v_mov_b32_e32 v5, v1
+; GFX8-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v4, v2, 0
+; GFX8-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v4, v3, v[1:2]
+; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v5, v2, v[3:4]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_mul_v2i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v4, v0
+; GFX9-NEXT: v_mov_b32_e32 v5, v1
+; GFX9-NEXT: v_mad_u64_u32 v[6:7], s[0:1], v4, v3, 0
+; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v4, v2, 0
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[0:1], v5, v2, v[6:7]
+; GFX9-NEXT: v_add_u32_e32 v1, v1, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_mul_v2i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v4, v0
+; GFX10-NEXT: v_mov_b32_e32 v5, v1
+; GFX10-NEXT: v_mad_u64_u32 v[0:1], s4, v4, v2, 0
+; GFX10-NEXT: v_mad_u64_u32 v[3:4], s4, v4, v3, v[1:2]
+; GFX10-NEXT: v_mad_u64_u32 v[1:2], s4, v5, v2, v[3:4]
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_mul_v2i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_dual_mov_b32 v4, v0 :: v_dual_mov_b32 v5, v2
+; GFX11-NEXT: v_mov_b32_e32 v6, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, v4, v5, 0
+; GFX11-NEXT: v_mad_u64_u32 v[7:8], null, v4, v3, v[1:2]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mad_u64_u32 v[1:2], null, v6, v5, v[7:8]
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_mul_v2i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mul_hi_u32 v4, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_mad_co_u64_u32 v[3:4], null, v0, v3, v[4:5]
+; GFX12-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX12-NEXT: v_mad_co_u64_u32 v[1:2], null, v1, v2, v[3:4]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.mul.v2i64(<2 x i64> %v)
+ ret i64 %res
+}
+
+define i64 @test_vector_reduce_mul_v3i64(<3 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_mul_v3i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v0, v2, 0
+; GFX7-NEXT: v_mov_b32_e32 v6, v1
+; GFX7-NEXT: v_mov_b32_e32 v1, v8
+; GFX7-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v0, v3, v[1:2]
+; GFX7-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v7, v4, 0
+; GFX7-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v6, v2, v[8:9]
+; GFX7-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v7, v5, v[1:2]
+; GFX7-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v2, v4, v[5:6]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_mul_v3i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v0, v2, 0
+; GFX8-NEXT: v_mov_b32_e32 v6, v1
+; GFX8-NEXT: v_mov_b32_e32 v1, v8
+; GFX8-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v0, v3, v[1:2]
+; GFX8-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v7, v4, 0
+; GFX8-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v6, v2, v[8:9]
+; GFX8-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v7, v5, v[1:2]
+; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v2, v4, v[5:6]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_mul_v3i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mad_u64_u32 v[8:9], s[0:1], v0, v3, 0
+; GFX9-NEXT: v_mad_u64_u32 v[6:7], s[0:1], v0, v2, 0
+; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v1, v2, v[8:9]
+; GFX9-NEXT: v_add_u32_e32 v7, v7, v0
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[0:1], v6, v5, 0
+; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v6, v4, 0
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[0:1], v7, v4, v[2:3]
+; GFX9-NEXT: v_add_u32_e32 v1, v1, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_mul_v3i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v6, v0
+; GFX10-NEXT: v_mov_b32_e32 v7, v1
+; GFX10-NEXT: v_mad_u64_u32 v[8:9], s4, v6, v2, 0
+; GFX10-NEXT: v_mad_u64_u32 v[9:10], s4, v6, v3, v[9:10]
+; GFX10-NEXT: v_mad_u64_u32 v[0:1], s4, v8, v4, 0
+; GFX10-NEXT: v_mad_u64_u32 v[2:3], s4, v7, v2, v[9:10]
+; GFX10-NEXT: v_mad_u64_u32 v[5:6], s4, v8, v5, v[1:2]
+; GFX10-NEXT: v_mad_u64_u32 v[1:2], s4, v2, v4, v[5:6]
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_mul_v3i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_dual_mov_b32 v6, v0 :: v_dual_mov_b32 v7, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mad_u64_u32 v[8:9], null, v6, v2, 0
+; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, v8, v4, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mad_u64_u32 v[10:11], null, v6, v3, v[9:10]
+; GFX11-NEXT: v_mad_u64_u32 v[11:12], null, v7, v2, v[10:11]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mad_u64_u32 v[6:7], null, v8, v5, v[1:2]
+; GFX11-NEXT: v_mad_u64_u32 v[1:2], null, v11, v4, v[6:7]
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_mul_v3i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mul_hi_u32 v6, v0, v2
+; GFX12-NEXT: v_mul_lo_u32 v8, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_mad_co_u64_u32 v[6:7], null, v0, v3, v[6:7]
+; GFX12-NEXT: v_mul_hi_u32 v0, v8, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_mad_co_u64_u32 v[1:2], null, v1, v2, v[6:7]
+; GFX12-NEXT: v_mad_co_u64_u32 v[2:3], null, v8, v5, v[0:1]
+; GFX12-NEXT: v_mul_lo_u32 v0, v8, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_mad_co_u64_u32 v[1:2], null, v1, v4, v[2:3]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.mul.v3i64(<3 x i64> %v)
+ ret i64 %res
+}
+
+define i64 @test_vector_reduce_mul_v4i64(<4 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_mul_v4i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mad_u64_u32 v[9:10], s[4:5], v0, v4, 0
+; GFX7-NEXT: v_mad_u64_u32 v[11:12], s[4:5], v2, v6, 0
+; GFX7-NEXT: v_mov_b32_e32 v8, v1
+; GFX7-NEXT: v_mov_b32_e32 v1, v10
+; GFX7-NEXT: v_mad_u64_u32 v[13:14], s[4:5], v0, v5, v[1:2]
+; GFX7-NEXT: v_mov_b32_e32 v0, v12
+; GFX7-NEXT: v_mad_u64_u32 v[15:16], s[4:5], v2, v7, v[0:1]
+; GFX7-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v9, v11, 0
+; GFX7-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v3, v6, v[15:16]
+; GFX7-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v8, v4, v[13:14]
+; GFX7-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v9, v2, v[1:2]
+; GFX7-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v3, v11, v[1:2]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_mul_v4i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mad_u64_u32 v[9:10], s[4:5], v0, v4, 0
+; GFX8-NEXT: v_mad_u64_u32 v[11:12], s[4:5], v2, v6, 0
+; GFX8-NEXT: v_mov_b32_e32 v8, v1
+; GFX8-NEXT: v_mov_b32_e32 v1, v10
+; GFX8-NEXT: v_mad_u64_u32 v[13:14], s[4:5], v0, v5, v[1:2]
+; GFX8-NEXT: v_mov_b32_e32 v0, v12
+; GFX8-NEXT: v_mad_u64_u32 v[15:16], s[4:5], v2, v7, v[0:1]
+; GFX8-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v9, v11, 0
+; GFX8-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v3, v6, v[15:16]
+; GFX8-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v8, v4, v[13:14]
+; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v9, v2, v[1:2]
+; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v3, v11, v[1:2]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_mul_v4i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mad_u64_u32 v[10:11], s[0:1], v0, v5, 0
+; GFX9-NEXT: v_mad_u64_u32 v[8:9], s[0:1], v0, v4, 0
+; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v1, v4, v[10:11]
+; GFX9-NEXT: v_add_u32_e32 v9, v9, v0
+; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v2, v7, 0
+; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v2, v6, 0
+; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v3, v6, v[0:1]
+; GFX9-NEXT: v_add_u32_e32 v2, v5, v0
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[0:1], v8, v2, 0
+; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v8, v4, 0
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[0:1], v9, v4, v[2:3]
+; GFX9-NEXT: v_add_u32_e32 v1, v1, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_mul_v4i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mad_u64_u32 v[10:11], s4, v2, v6, 0
+; GFX10-NEXT: v_mov_b32_e32 v8, v0
+; GFX10-NEXT: v_mov_b32_e32 v9, v1
+; GFX10-NEXT: v_mad_u64_u32 v[12:13], s4, v8, v4, 0
+; GFX10-NEXT: v_mov_b32_e32 v0, v11
+; GFX10-NEXT: v_mad_u64_u32 v[14:15], s4, v2, v7, v[0:1]
+; GFX10-NEXT: v_mov_b32_e32 v2, v13
+; GFX10-NEXT: v_mad_u64_u32 v[0:1], s4, v12, v10, 0
+; GFX10-NEXT: v_mad_u64_u32 v[7:8], s4, v8, v5, v[2:3]
+; GFX10-NEXT: v_mad_u64_u32 v[2:3], s4, v3, v6, v[14:15]
+; GFX10-NEXT: v_mad_u64_u32 v[3:4], s4, v9, v4, v[7:8]
+; GFX10-NEXT: v_mad_u64_u32 v[1:2], s4, v12, v2, v[1:2]
+; GFX10-NEXT: v_mad_u64_u32 v[1:2], s4, v3, v10, v[1:2]
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_mul_v4i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_dual_mov_b32 v8, v0 :: v_dual_mov_b32 v9, v1
+; GFX11-NEXT: v_mad_u64_u32 v[10:11], null, v2, v6, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_mad_u64_u32 v[12:13], null, v8, v4, 0
+; GFX11-NEXT: v_mov_b32_e32 v0, v11
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_mad_u64_u32 v[14:15], null, v2, v7, v[0:1]
+; GFX11-NEXT: v_mov_b32_e32 v2, v13
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, v12, v10, 0
+; GFX11-NEXT: v_mad_u64_u32 v[15:16], null, v8, v5, v[2:3]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_mad_u64_u32 v[7:8], null, v3, v6, v[14:15]
+; GFX11-NEXT: v_mad_u64_u32 v[5:6], null, v9, v4, v[15:16]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mad_u64_u32 v[3:4], null, v12, v7, v[1:2]
+; GFX11-NEXT: v_mad_u64_u32 v[1:2], null, v5, v10, v[3:4]
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_mul_v4i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mul_hi_u32 v8, v2, v6
+; GFX12-NEXT: v_mul_hi_u32 v9, v0, v4
+; GFX12-NEXT: v_mul_lo_u32 v10, v0, v4
+; GFX12-NEXT: v_mul_lo_u32 v11, v2, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_mad_co_u64_u32 v[7:8], null, v2, v7, v[8:9]
+; GFX12-NEXT: v_mad_co_u64_u32 v[8:9], null, v0, v5, v[9:10]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_mul_hi_u32 v0, v10, v11
+; GFX12-NEXT: v_mad_co_u64_u32 v[2:3], null, v3, v6, v[7:8]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_mad_co_u64_u32 v[3:4], null, v1, v4, v[8:9]
+; GFX12-NEXT: v_mad_co_u64_u32 v[1:2], null, v10, v2, v[0:1]
+; GFX12-NEXT: v_mul_lo_u32 v0, v10, v11
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_mad_co_u64_u32 v[1:2], null, v3, v11, v[1:2]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.mul.v4i64(<4 x i64> %v)
+ ret i64 %res
+}
+
+define i64 @test_vector_reduce_mul_v8i64(<8 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_mul_v8i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mad_u64_u32 v[16:17], s[4:5], v4, v12, 0
+; GFX7-NEXT: v_mad_u64_u32 v[19:20], s[4:5], v0, v8, 0
+; GFX7-NEXT: v_mad_u64_u32 v[17:18], s[4:5], v4, v13, v[17:18]
+; GFX7-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v5, v12, v[17:18]
+; GFX7-NEXT: v_mad_u64_u32 v[12:13], s[4:5], v19, v16, 0
+; GFX7-NEXT: v_mov_b32_e32 v5, v13
+; GFX7-NEXT: v_mov_b32_e32 v13, v20
+; GFX7-NEXT: v_mad_u64_u32 v[17:18], s[4:5], v0, v9, v[13:14]
+; GFX7-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v19, v4, v[5:6]
+; GFX7-NEXT: v_mad_u64_u32 v[19:20], s[4:5], v6, v14, 0
+; GFX7-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v1, v8, v[17:18]
+; GFX7-NEXT: v_mad_u64_u32 v[17:18], s[4:5], v2, v10, 0
+; GFX7-NEXT: v_mov_b32_e32 v0, v20
+; GFX7-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v6, v15, v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v6, v18
+; GFX7-NEXT: v_mad_u64_u32 v[20:21], s[4:5], v2, v11, v[6:7]
+; GFX7-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v7, v14, v[0:1]
+; GFX7-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v17, v19, 0
+; GFX7-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v3, v10, v[20:21]
+; GFX7-NEXT: v_mov_b32_e32 v1, v7
+; GFX7-NEXT: v_mad_u64_u32 v[9:10], s[4:5], v17, v0, v[1:2]
+; GFX7-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v12, v6, 0
+; GFX7-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v2, v19, v[9:10]
+; GFX7-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v8, v16, v[4:5]
+; GFX7-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v12, v2, v[1:2]
+; GFX7-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v3, v6, v[1:2]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_mul_v8i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mad_u64_u32 v[16:17], s[4:5], v4, v12, 0
+; GFX8-NEXT: v_mad_u64_u32 v[19:20], s[4:5], v0, v8, 0
+; GFX8-NEXT: v_mad_u64_u32 v[17:18], s[4:5], v4, v13, v[17:18]
+; GFX8-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v5, v12, v[17:18]
+; GFX8-NEXT: v_mad_u64_u32 v[12:13], s[4:5], v19, v16, 0
+; GFX8-NEXT: v_mov_b32_e32 v5, v13
+; GFX8-NEXT: v_mov_b32_e32 v13, v20
+; GFX8-NEXT: v_mad_u64_u32 v[17:18], s[4:5], v0, v9, v[13:14]
+; GFX8-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v19, v4, v[5:6]
+; GFX8-NEXT: v_mad_u64_u32 v[19:20], s[4:5], v6, v14, 0
+; GFX8-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v1, v8, v[17:18]
+; GFX8-NEXT: v_mad_u64_u32 v[17:18], s[4:5], v2, v10, 0
+; GFX8-NEXT: v_mov_b32_e32 v0, v20
+; GFX8-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v6, v15, v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v6, v18
+; GFX8-NEXT: v_mad_u64_u32 v[20:21], s[4:5], v2, v11, v[6:7]
+; GFX8-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v7, v14, v[0:1]
+; GFX8-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v17, v19, 0
+; GFX8-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v3, v10, v[20:21]
+; GFX8-NEXT: v_mov_b32_e32 v1, v7
+; GFX8-NEXT: v_mad_u64_u32 v[9:10], s[4:5], v17, v0, v[1:2]
+; GFX8-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v12, v6, 0
+; GFX8-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v2, v19, v[9:10]
+; GFX8-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v8, v16, v[4:5]
+; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v12, v2, v[1:2]
+; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v3, v6, v[1:2]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_mul_v8i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mad_u64_u32 v[18:19], s[0:1], v0, v9, 0
+; GFX9-NEXT: v_mad_u64_u32 v[16:17], s[0:1], v0, v8, 0
+; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v1, v8, v[18:19]
+; GFX9-NEXT: v_mad_u64_u32 v[8:9], s[0:1], v2, v11, 0
+; GFX9-NEXT: v_add_u32_e32 v17, v17, v0
+; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v2, v10, 0
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[0:1], v3, v10, v[8:9]
+; GFX9-NEXT: v_mad_u64_u32 v[8:9], s[0:1], v4, v13, 0
+; GFX9-NEXT: v_add_u32_e32 v10, v1, v2
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[0:1], v4, v12, 0
+; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v5, v12, v[8:9]
+; GFX9-NEXT: v_add_u32_e32 v1, v3, v4
+; GFX9-NEXT: v_mad_u64_u32 v[8:9], s[0:1], v6, v15, 0
+; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v6, v14, 0
+; GFX9-NEXT: v_mad_u64_u32 v[6:7], s[0:1], v7, v14, v[8:9]
+; GFX9-NEXT: v_mad_u64_u32 v[8:9], s[0:1], v16, v1, 0
+; GFX9-NEXT: v_add_u32_e32 v5, v5, v6
+; GFX9-NEXT: v_mad_u64_u32 v[6:7], s[0:1], v16, v2, 0
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[0:1], v17, v2, v[8:9]
+; GFX9-NEXT: v_add_u32_e32 v7, v7, v2
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[0:1], v0, v4, 0
+; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v0, v5, 0
+; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v10, v4, v[0:1]
+; GFX9-NEXT: v_add_u32_e32 v3, v3, v0
+; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v6, v3, 0
+; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v6, v2, 0
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[0:1], v7, v2, v[4:5]
+; GFX9-NEXT: v_add_u32_e32 v1, v1, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_mul_v8i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mad_u64_u32 v[17:18], s4, v0, v8, 0
+; GFX10-NEXT: v_mad_u64_u32 v[19:20], s4, v2, v10, 0
+; GFX10-NEXT: v_mov_b32_e32 v16, v1
+; GFX10-NEXT: v_mad_u64_u32 v[21:22], s4, v6, v14, 0
+; GFX10-NEXT: v_mad_u64_u32 v[23:24], s4, v4, v12, 0
+; GFX10-NEXT: v_mov_b32_e32 v1, v18
+; GFX10-NEXT: v_mov_b32_e32 v18, v20
+; GFX10-NEXT: v_mad_u64_u32 v[25:26], s4, v0, v9, v[1:2]
+; GFX10-NEXT: v_mad_u64_u32 v[1:2], s4, v2, v11, v[18:19]
+; GFX10-NEXT: v_mov_b32_e32 v0, v22
+; GFX10-NEXT: v_mov_b32_e32 v2, v24
+; GFX10-NEXT: v_mad_u64_u32 v[29:30], s4, v4, v13, v[2:3]
+; GFX10-NEXT: v_mad_u64_u32 v[26:27], s4, v6, v15, v[0:1]
+; GFX10-NEXT: v_mad_u64_u32 v[27:28], s4, v19, v21, 0
+; GFX10-NEXT: v_mad_u64_u32 v[30:31], s4, v17, v23, 0
+; GFX10-NEXT: v_mad_u64_u32 v[2:3], s4, v3, v10, v[1:2]
+; GFX10-NEXT: v_mad_u64_u32 v[6:7], s4, v7, v14, v[26:27]
+; GFX10-NEXT: v_mov_b32_e32 v0, v28
+; GFX10-NEXT: v_mad_u64_u32 v[3:4], s4, v5, v12, v[29:30]
+; GFX10-NEXT: v_mov_b32_e32 v4, v31
+; GFX10-NEXT: v_mad_u64_u32 v[5:6], s4, v19, v6, v[0:1]
+; GFX10-NEXT: v_mad_u64_u32 v[6:7], s4, v16, v8, v[25:26]
+; GFX10-NEXT: v_mad_u64_u32 v[0:1], s4, v30, v27, 0
+; GFX10-NEXT: v_mad_u64_u32 v[3:4], s4, v17, v3, v[4:5]
+; GFX10-NEXT: v_mad_u64_u32 v[4:5], s4, v2, v21, v[5:6]
+; GFX10-NEXT: v_mad_u64_u32 v[2:3], s4, v6, v23, v[3:4]
+; GFX10-NEXT: v_mad_u64_u32 v[3:4], s4, v30, v4, v[1:2]
+; GFX10-NEXT: v_mad_u64_u32 v[1:2], s4, v2, v27, v[3:4]
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_mul_v8i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_mad_u64_u32 v[17:18], null, v0, v8, 0
+; GFX11-NEXT: v_mad_u64_u32 v[19:20], null, v2, v10, 0
+; GFX11-NEXT: v_mad_u64_u32 v[21:22], null, v6, v14, 0
+; GFX11-NEXT: v_mad_u64_u32 v[23:24], null, v4, v12, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_dual_mov_b32 v16, v1 :: v_dual_mov_b32 v1, v18
+; GFX11-NEXT: v_mov_b32_e32 v18, v20
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_mad_u64_u32 v[25:26], null, v0, v9, v[1:2]
+; GFX11-NEXT: v_dual_mov_b32 v0, v22 :: v_dual_mov_b32 v1, v24
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_mad_u64_u32 v[26:27], null, v2, v11, v[18:19]
+; GFX11-NEXT: v_mad_u64_u32 v[27:28], null, v6, v15, v[0:1]
+; GFX11-NEXT: v_mad_u64_u32 v[30:31], null, v4, v13, v[1:2]
+; GFX11-NEXT: v_mad_u64_u32 v[28:29], null, v19, v21, 0
+; GFX11-NEXT: v_mad_u64_u32 v[31:32], null, v17, v23, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, v7, v14, v[27:28]
+; GFX11-NEXT: v_mad_u64_u32 v[6:7], null, v3, v10, v[26:27]
+; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, v5, v12, v[30:31]
+; GFX11-NEXT: v_mov_b32_e32 v1, v29
+; GFX11-NEXT: v_mov_b32_e32 v3, v32
+; GFX11-NEXT: v_mad_u64_u32 v[9:10], null, v16, v8, v[25:26]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_mad_u64_u32 v[4:5], null, v19, v0, v[1:2]
+; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, v31, v28, 0
+; GFX11-NEXT: v_mad_u64_u32 v[7:8], null, v17, v2, v[3:4]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, v6, v21, v[4:5]
+; GFX11-NEXT: v_mad_u64_u32 v[3:4], null, v9, v23, v[7:8]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mad_u64_u32 v[4:5], null, v31, v2, v[1:2]
+; GFX11-NEXT: v_mad_u64_u32 v[1:2], null, v3, v28, v[4:5]
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_mul_v8i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mul_hi_u32 v16, v0, v8
+; GFX12-NEXT: v_mul_hi_u32 v17, v6, v14
+; GFX12-NEXT: v_mul_lo_u32 v21, v0, v8
+; GFX12-NEXT: v_mul_lo_u32 v22, v2, v10
+; GFX12-NEXT: v_mul_lo_u32 v23, v6, v14
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_mad_co_u64_u32 v[18:19], null, v0, v9, v[16:17]
+; GFX12-NEXT: v_mul_hi_u32 v9, v2, v10
+; GFX12-NEXT: v_mul_hi_u32 v0, v4, v12
+; GFX12-NEXT: v_mad_co_u64_u32 v[15:16], null, v6, v15, v[17:18]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_mad_co_u64_u32 v[16:17], null, v2, v11, v[9:10]
+; GFX12-NEXT: v_mad_co_u64_u32 v[19:20], null, v4, v13, v[0:1]
+; GFX12-NEXT: v_mul_lo_u32 v9, v4, v12
+; GFX12-NEXT: v_mul_hi_u32 v0, v22, v23
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX12-NEXT: v_mad_co_u64_u32 v[6:7], null, v7, v14, v[15:16]
+; GFX12-NEXT: v_mad_co_u64_u32 v[2:3], null, v3, v10, v[16:17]
+; GFX12-NEXT: v_mad_co_u64_u32 v[3:4], null, v5, v12, v[19:20]
+; GFX12-NEXT: v_mul_hi_u32 v4, v21, v9
+; GFX12-NEXT: v_mul_lo_u32 v7, v21, v9
+; GFX12-NEXT: v_mad_co_u64_u32 v[5:6], null, v22, v6, v[0:1]
+; GFX12-NEXT: v_mul_lo_u32 v6, v22, v23
+; GFX12-NEXT: v_mad_co_u64_u32 v[0:1], null, v1, v8, v[18:19]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_mad_co_u64_u32 v[3:4], null, v21, v3, v[4:5]
+; GFX12-NEXT: v_mad_co_u64_u32 v[1:2], null, v2, v23, v[5:6]
+; GFX12-NEXT: v_mul_hi_u32 v2, v7, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_mad_co_u64_u32 v[3:4], null, v0, v9, v[3:4]
+; GFX12-NEXT: v_mul_lo_u32 v0, v7, v6
+; GFX12-NEXT: v_mad_co_u64_u32 v[1:2], null, v7, v1, v[2:3]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_mad_co_u64_u32 v[1:2], null, v3, v6, v[1:2]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.mul.v8i64(<8 x i64> %v)
+ ret i64 %res
+}
+
+define i64 @test_vector_reduce_mul_v16i64(<16 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_mul_v16i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mad_u64_u32 v[31:32], s[4:5], v8, v24, 0
+; GFX7-NEXT: v_mad_u64_u32 v[32:33], s[4:5], v8, v25, v[32:33]
+; GFX7-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v9, v24, v[32:33]
+; GFX7-NEXT: v_mad_u64_u32 v[24:25], s[4:5], v0, v16, 0
+; GFX7-NEXT: v_mad_u64_u32 v[32:33], s[4:5], v24, v31, 0
+; GFX7-NEXT: v_mov_b32_e32 v9, v33
+; GFX7-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v24, v8, v[9:10]
+; GFX7-NEXT: v_mov_b32_e32 v24, v25
+; GFX7-NEXT: v_mad_u64_u32 v[24:25], s[4:5], v0, v17, v[24:25]
+; GFX7-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v1, v16, v[24:25]
+; GFX7-NEXT: v_mad_u64_u32 v[16:17], s[4:5], v2, v18, 0
+; GFX7-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v0, v31, v[8:9]
+; GFX7-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v10, v26, 0
+; GFX7-NEXT: v_mad_u64_u32 v[9:10], s[4:5], v10, v27, v[1:2]
+; GFX7-NEXT: v_mad_u64_u32 v[9:10], s[4:5], v11, v26, v[9:10]
+; GFX7-NEXT: v_mad_u64_u32 v[10:11], s[4:5], v16, v0, 0
+; GFX7-NEXT: v_mov_b32_e32 v1, v11
+; GFX7-NEXT: v_mad_u64_u32 v[24:25], s[4:5], v16, v9, v[1:2]
+; GFX7-NEXT: buffer_load_dword v9, off, s[0:3], s32
+; GFX7-NEXT: v_mov_b32_e32 v1, v17
+; GFX7-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v2, v19, v[1:2]
+; GFX7-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v3, v18, v[1:2]
+; GFX7-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v12, v28, 0
+; GFX7-NEXT: v_mad_u64_u32 v[16:17], s[4:5], v1, v0, v[24:25]
+; GFX7-NEXT: v_mad_u64_u32 v[17:18], s[4:5], v4, v20, 0
+; GFX7-NEXT: v_mov_b32_e32 v0, v3
+; GFX7-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v12, v29, v[0:1]
+; GFX7-NEXT: v_mad_u64_u32 v[11:12], s[4:5], v17, v2, 0
+; GFX7-NEXT: v_mov_b32_e32 v3, v18
+; GFX7-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v13, v28, v[0:1]
+; GFX7-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v4, v21, v[3:4]
+; GFX7-NEXT: v_mov_b32_e32 v1, v12
+; GFX7-NEXT: v_mad_u64_u32 v[12:13], s[4:5], v14, v30, 0
+; GFX7-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v5, v20, v[3:4]
+; GFX7-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v17, v0, v[1:2]
+; GFX7-NEXT: v_mov_b32_e32 v4, v13
+; GFX7-NEXT: v_mad_u64_u32 v[17:18], s[4:5], v6, v22, 0
+; GFX7-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v3, v2, v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v3, v18
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v14, v9, v[4:5]
+; GFX7-NEXT: v_mad_u64_u32 v[13:14], s[4:5], v17, v12, 0
+; GFX7-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v15, v30, v[4:5]
+; GFX7-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v6, v23, v[3:4]
+; GFX7-NEXT: v_mov_b32_e32 v2, v14
+; GFX7-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v17, v1, v[2:3]
+; GFX7-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v7, v22, v[3:4]
+; GFX7-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v32, v11, 0
+; GFX7-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v3, v12, v[1:2]
+; GFX7-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v10, v13, 0
+; GFX7-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v32, v0, v[5:6]
+; GFX7-NEXT: v_mov_b32_e32 v0, v3
+; GFX7-NEXT: v_mad_u64_u32 v[9:10], s[4:5], v10, v1, v[0:1]
+; GFX7-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v4, v2, 0
+; GFX7-NEXT: v_mad_u64_u32 v[9:10], s[4:5], v16, v13, v[9:10]
+; GFX7-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v8, v11, v[5:6]
+; GFX7-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v4, v9, v[1:2]
+; GFX7-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v5, v2, v[3:4]
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_mul_v16i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mad_u64_u32 v[31:32], s[4:5], v8, v24, 0
+; GFX8-NEXT: v_mad_u64_u32 v[32:33], s[4:5], v8, v25, v[32:33]
+; GFX8-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v9, v24, v[32:33]
+; GFX8-NEXT: v_mad_u64_u32 v[24:25], s[4:5], v0, v16, 0
+; GFX8-NEXT: v_mad_u64_u32 v[32:33], s[4:5], v24, v31, 0
+; GFX8-NEXT: v_mov_b32_e32 v9, v33
+; GFX8-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v24, v8, v[9:10]
+; GFX8-NEXT: v_mov_b32_e32 v24, v25
+; GFX8-NEXT: v_mad_u64_u32 v[24:25], s[4:5], v0, v17, v[24:25]
+; GFX8-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v1, v16, v[24:25]
+; GFX8-NEXT: v_mad_u64_u32 v[16:17], s[4:5], v2, v18, 0
+; GFX8-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v0, v31, v[8:9]
+; GFX8-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v10, v26, 0
+; GFX8-NEXT: v_mad_u64_u32 v[9:10], s[4:5], v10, v27, v[1:2]
+; GFX8-NEXT: v_mad_u64_u32 v[9:10], s[4:5], v11, v26, v[9:10]
+; GFX8-NEXT: v_mad_u64_u32 v[10:11], s[4:5], v16, v0, 0
+; GFX8-NEXT: v_mov_b32_e32 v1, v11
+; GFX8-NEXT: v_mad_u64_u32 v[24:25], s[4:5], v16, v9, v[1:2]
+; GFX8-NEXT: buffer_load_dword v9, off, s[0:3], s32
+; GFX8-NEXT: v_mov_b32_e32 v1, v17
+; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v2, v19, v[1:2]
+; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v3, v18, v[1:2]
+; GFX8-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v12, v28, 0
+; GFX8-NEXT: v_mad_u64_u32 v[16:17], s[4:5], v1, v0, v[24:25]
+; GFX8-NEXT: v_mad_u64_u32 v[17:18], s[4:5], v4, v20, 0
+; GFX8-NEXT: v_mov_b32_e32 v0, v3
+; GFX8-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v12, v29, v[0:1]
+; GFX8-NEXT: v_mad_u64_u32 v[11:12], s[4:5], v17, v2, 0
+; GFX8-NEXT: v_mov_b32_e32 v3, v18
+; GFX8-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v13, v28, v[0:1]
+; GFX8-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v4, v21, v[3:4]
+; GFX8-NEXT: v_mov_b32_e32 v1, v12
+; GFX8-NEXT: v_mad_u64_u32 v[12:13], s[4:5], v14, v30, 0
+; GFX8-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v5, v20, v[3:4]
+; GFX8-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v17, v0, v[1:2]
+; GFX8-NEXT: v_mov_b32_e32 v4, v13
+; GFX8-NEXT: v_mad_u64_u32 v[17:18], s[4:5], v6, v22, 0
+; GFX8-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v3, v2, v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v3, v18
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v14, v9, v[4:5]
+; GFX8-NEXT: v_mad_u64_u32 v[13:14], s[4:5], v17, v12, 0
+; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v15, v30, v[4:5]
+; GFX8-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v6, v23, v[3:4]
+; GFX8-NEXT: v_mov_b32_e32 v2, v14
+; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v17, v1, v[2:3]
+; GFX8-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v7, v22, v[3:4]
+; GFX8-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v32, v11, 0
+; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v3, v12, v[1:2]
+; GFX8-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v10, v13, 0
+; GFX8-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v32, v0, v[5:6]
+; GFX8-NEXT: v_mov_b32_e32 v0, v3
+; GFX8-NEXT: v_mad_u64_u32 v[9:10], s[4:5], v10, v1, v[0:1]
+; GFX8-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v4, v2, 0
+; GFX8-NEXT: v_mad_u64_u32 v[9:10], s[4:5], v16, v13, v[9:10]
+; GFX8-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v8, v11, v[5:6]
+; GFX8-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v4, v9, v[1:2]
+; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v5, v2, v[3:4]
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_mul_v16i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: scratch_load_dword v31, off, s32
+; GFX9-NEXT: v_mad_u64_u32 v[34:35], s[0:1], v0, v17, 0
+; GFX9-NEXT: v_mad_u64_u32 v[32:33], s[0:1], v0, v16, 0
+; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v1, v16, v[34:35]
+; GFX9-NEXT: v_mad_u64_u32 v[16:17], s[0:1], v2, v19, 0
+; GFX9-NEXT: v_add_u32_e32 v33, v33, v0
+; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v2, v18, 0
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[0:1], v3, v18, v[16:17]
+; GFX9-NEXT: v_mad_u64_u32 v[16:17], s[0:1], v4, v21, 0
+; GFX9-NEXT: v_add_u32_e32 v18, v1, v2
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[0:1], v4, v20, 0
+; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v5, v20, v[16:17]
+; GFX9-NEXT: v_mad_u64_u32 v[16:17], s[0:1], v6, v23, 0
+; GFX9-NEXT: v_add_u32_e32 v19, v3, v4
+; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v6, v22, 0
+; GFX9-NEXT: v_mad_u64_u32 v[6:7], s[0:1], v7, v22, v[16:17]
+; GFX9-NEXT: v_mad_u64_u32 v[16:17], s[0:1], v8, v25, 0
+; GFX9-NEXT: v_add_u32_e32 v20, v5, v6
+; GFX9-NEXT: v_mad_u64_u32 v[6:7], s[0:1], v8, v24, 0
+; GFX9-NEXT: v_mad_u64_u32 v[8:9], s[0:1], v9, v24, v[16:17]
+; GFX9-NEXT: v_mad_u64_u32 v[16:17], s[0:1], v10, v27, 0
+; GFX9-NEXT: v_add_u32_e32 v1, v7, v8
+; GFX9-NEXT: v_mad_u64_u32 v[8:9], s[0:1], v10, v26, 0
+; GFX9-NEXT: v_mad_u64_u32 v[10:11], s[0:1], v11, v26, v[16:17]
+; GFX9-NEXT: v_mad_u64_u32 v[16:17], s[0:1], v12, v29, 0
+; GFX9-NEXT: v_add_u32_e32 v3, v9, v10
+; GFX9-NEXT: v_mad_u64_u32 v[10:11], s[0:1], v12, v28, 0
+; GFX9-NEXT: v_mad_u64_u32 v[12:13], s[0:1], v13, v28, v[16:17]
+; GFX9-NEXT: v_add_u32_e32 v5, v11, v12
+; GFX9-NEXT: v_mad_u64_u32 v[12:13], s[0:1], v14, v30, 0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_mad_u64_u32 v[16:17], s[0:1], v14, v31, 0
+; GFX9-NEXT: v_mad_u64_u32 v[14:15], s[0:1], v15, v30, v[16:17]
+; GFX9-NEXT: v_mad_u64_u32 v[16:17], s[0:1], v32, v1, 0
+; GFX9-NEXT: v_add_u32_e32 v9, v13, v14
+; GFX9-NEXT: v_mad_u64_u32 v[14:15], s[0:1], v32, v6, 0
+; GFX9-NEXT: v_mad_u64_u32 v[6:7], s[0:1], v33, v6, v[16:17]
+; GFX9-NEXT: v_add_u32_e32 v11, v15, v6
+; GFX9-NEXT: v_mad_u64_u32 v[6:7], s[0:1], v0, v8, 0
+; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v0, v3, 0
+; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v18, v8, v[0:1]
+; GFX9-NEXT: v_add_u32_e32 v7, v7, v0
+; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v2, v10, 0
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[0:1], v2, v5, 0
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[0:1], v19, v10, v[2:3]
+; GFX9-NEXT: v_add_u32_e32 v1, v1, v2
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[0:1], v4, v12, 0
+; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v4, v9, 0
+; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v20, v12, v[4:5]
+; GFX9-NEXT: v_mad_u64_u32 v[8:9], s[0:1], v14, v1, 0
+; GFX9-NEXT: v_add_u32_e32 v3, v3, v4
+; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v14, v0, 0
+; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v11, v0, v[8:9]
+; GFX9-NEXT: v_add_u32_e32 v5, v5, v0
+; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v6, v3, 0
+; GFX9-NEXT: v_mad_u64_u32 v[8:9], s[0:1], v6, v2, 0
+; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v7, v2, v[0:1]
+; GFX9-NEXT: v_add_u32_e32 v2, v9, v0
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[0:1], v4, v2, 0
+; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v4, v8, 0
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[0:1], v5, v8, v[2:3]
+; GFX9-NEXT: v_add_u32_e32 v1, v1, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_mul_v16i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mad_u64_u32 v[31:32], s4, v0, v16, 0
+; GFX10-NEXT: v_mad_u64_u32 v[33:34], s4, v2, v18, 0
+; GFX10-NEXT: v_mad_u64_u32 v[38:39], s4, v6, v22, 0
+; GFX10-NEXT: v_mad_u64_u32 v[35:36], s4, v0, v17, v[32:33]
+; GFX10-NEXT: v_mad_u64_u32 v[36:37], s4, v4, v20, 0
+; GFX10-NEXT: v_mov_b32_e32 v0, v34
+; GFX10-NEXT: v_mad_u64_u32 v[16:17], s4, v1, v16, v[35:36]
+; GFX10-NEXT: v_mad_u64_u32 v[0:1], s4, v2, v19, v[0:1]
+; GFX10-NEXT: v_mov_b32_e32 v1, v37
+; GFX10-NEXT: v_mov_b32_e32 v2, v39
+; GFX10-NEXT: v_mad_u64_u32 v[34:35], s4, v8, v24, 0
+; GFX10-NEXT: v_mad_u64_u32 v[48:49], s4, v4, v21, v[1:2]
+; GFX10-NEXT: v_mad_u64_u32 v[49:50], s4, v10, v26, 0
+; GFX10-NEXT: v_mad_u64_u32 v[1:2], s4, v6, v23, v[2:3]
+; GFX10-NEXT: v_mov_b32_e32 v2, v35
+; GFX10-NEXT: v_mad_u64_u32 v[5:6], s4, v5, v20, v[48:49]
+; GFX10-NEXT: v_mad_u64_u32 v[51:52], s4, v8, v25, v[2:3]
+; GFX10-NEXT: v_mov_b32_e32 v2, v50
+; GFX10-NEXT: v_mad_u64_u32 v[52:53], s4, v10, v27, v[2:3]
+; GFX10-NEXT: v_mad_u64_u32 v[53:54], s4, v12, v28, 0
+; GFX10-NEXT: v_mad_u64_u32 v[9:10], s4, v9, v24, v[51:52]
+; GFX10-NEXT: v_mov_b32_e32 v2, v54
+; GFX10-NEXT: v_mad_u64_u32 v[10:11], s4, v11, v26, v[52:53]
+; GFX10-NEXT: v_mad_u64_u32 v[19:20], s4, v36, v53, 0
+; GFX10-NEXT: v_mad_u64_u32 v[54:55], s4, v12, v29, v[2:3]
+; GFX10-NEXT: buffer_load_dword v12, off, s[0:3], s32
+; GFX10-NEXT: v_mad_u64_u32 v[2:3], s4, v3, v18, v[0:1]
+; GFX10-NEXT: v_mad_u64_u32 v[3:4], s4, v14, v30, 0
+; GFX10-NEXT: v_mad_u64_u32 v[17:18], s4, v33, v49, 0
+; GFX10-NEXT: v_mad_u64_u32 v[6:7], s4, v7, v22, v[1:2]
+; GFX10-NEXT: v_mov_b32_e32 v0, v4
+; GFX10-NEXT: v_mad_u64_u32 v[7:8], s4, v31, v34, 0
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_mad_u64_u32 v[0:1], s4, v14, v12, v[0:1]
+; GFX10-NEXT: v_mad_u64_u32 v[11:12], s4, v38, v3, 0
+; GFX10-NEXT: v_mov_b32_e32 v1, v8
+; GFX10-NEXT: v_mad_u64_u32 v[13:14], s4, v13, v28, v[54:55]
+; GFX10-NEXT: v_mad_u64_u32 v[14:15], s4, v15, v30, v[0:1]
+; GFX10-NEXT: v_mov_b32_e32 v0, v18
+; GFX10-NEXT: v_mad_u64_u32 v[8:9], s4, v31, v9, v[1:2]
+; GFX10-NEXT: v_mov_b32_e32 v1, v12
+; GFX10-NEXT: v_mad_u64_u32 v[9:10], s4, v33, v10, v[0:1]
+; GFX10-NEXT: v_mov_b32_e32 v0, v20
+; GFX10-NEXT: v_mad_u64_u32 v[14:15], s4, v38, v14, v[1:2]
+; GFX10-NEXT: v_mad_u64_u32 v[20:21], s4, v17, v11, 0
+; GFX10-NEXT: v_mad_u64_u32 v[0:1], s4, v36, v13, v[0:1]
+; GFX10-NEXT: v_mad_u64_u32 v[12:13], s4, v7, v19, 0
+; GFX10-NEXT: v_mad_u64_u32 v[3:4], s4, v6, v3, v[14:15]
+; GFX10-NEXT: v_mov_b32_e32 v1, v21
+; GFX10-NEXT: v_mad_u64_u32 v[9:10], s4, v2, v49, v[9:10]
+; GFX10-NEXT: v_mov_b32_e32 v2, v13
+; GFX10-NEXT: v_mad_u64_u32 v[4:5], s4, v5, v53, v[0:1]
+; GFX10-NEXT: v_mad_u64_u32 v[5:6], s4, v17, v3, v[1:2]
+; GFX10-NEXT: v_mad_u64_u32 v[0:1], s4, v12, v20, 0
+; GFX10-NEXT: v_mad_u64_u32 v[2:3], s4, v7, v4, v[2:3]
+; GFX10-NEXT: v_mad_u64_u32 v[13:14], s4, v16, v34, v[8:9]
+; GFX10-NEXT: v_mad_u64_u32 v[3:4], s4, v9, v11, v[5:6]
+; GFX10-NEXT: v_mad_u64_u32 v[4:5], s4, v13, v19, v[2:3]
+; GFX10-NEXT: v_mad_u64_u32 v[1:2], s4, v12, v3, v[1:2]
+; GFX10-NEXT: v_mad_u64_u32 v[1:2], s4, v4, v20, v[1:2]
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_mul_v16i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: scratch_load_b32 v71, off, s32
+; GFX11-NEXT: v_mad_u64_u32 v[31:32], null, v0, v16, 0
+; GFX11-NEXT: v_mad_u64_u32 v[33:34], null, v2, v18, 0
+; GFX11-NEXT: v_mad_u64_u32 v[35:36], null, v4, v20, 0
+; GFX11-NEXT: v_mad_u64_u32 v[37:38], null, v6, v22, 0
+; GFX11-NEXT: v_mad_u64_u32 v[50:51], null, v10, v26, 0
+; GFX11-NEXT: v_mad_u64_u32 v[52:53], null, v12, v28, 0
+; GFX11-NEXT: v_mad_u64_u32 v[48:49], null, v8, v24, 0
+; GFX11-NEXT: v_mad_u64_u32 v[54:55], null, v14, v30, 0
+; GFX11-NEXT: v_mad_u64_u32 v[82:83], null, v0, v17, v[32:33]
+; GFX11-NEXT: v_mad_u64_u32 v[83:84], null, v2, v19, v[34:35]
+; GFX11-NEXT: v_mad_u64_u32 v[84:85], null, v4, v21, v[36:37]
+; GFX11-NEXT: v_mad_u64_u32 v[85:86], null, v6, v23, v[38:39]
+; GFX11-NEXT: v_mad_u64_u32 v[86:87], null, v10, v27, v[51:52]
+; GFX11-NEXT: v_mad_u64_u32 v[65:66], null, v31, v48, 0
+; GFX11-NEXT: v_mad_u64_u32 v[38:39], null, v8, v25, v[49:50]
+; GFX11-NEXT: v_mov_b32_e32 v64, v55
+; GFX11-NEXT: v_mad_u64_u32 v[96:97], null, v12, v29, v[53:54]
+; GFX11-NEXT: v_mad_u64_u32 v[97:98], null, v1, v16, v[82:83]
+; GFX11-NEXT: v_mad_u64_u32 v[1:2], null, v3, v18, v[83:84]
+; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, v5, v20, v[84:85]
+; GFX11-NEXT: v_mad_u64_u32 v[3:4], null, v7, v22, v[85:86]
+; GFX11-NEXT: v_mad_u64_u32 v[67:68], null, v33, v50, 0
+; GFX11-NEXT: v_mad_u64_u32 v[80:81], null, v37, v54, 0
+; GFX11-NEXT: v_mov_b32_e32 v0, v66
+; GFX11-NEXT: v_mad_u64_u32 v[69:70], null, v35, v52, 0
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_mad_u64_u32 v[4:5], null, v14, v71, v[64:65]
+; GFX11-NEXT: v_mad_u64_u32 v[5:6], null, v9, v24, v[38:39]
+; GFX11-NEXT: v_mad_u64_u32 v[6:7], null, v11, v26, v[86:87]
+; GFX11-NEXT: v_mad_u64_u32 v[7:8], null, v13, v28, v[96:97]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_mad_u64_u32 v[8:9], null, v15, v30, v[4:5]
+; GFX11-NEXT: v_mov_b32_e32 v4, v68
+; GFX11-NEXT: v_mad_u64_u32 v[9:10], null, v31, v5, v[0:1]
+; GFX11-NEXT: v_mov_b32_e32 v0, v81
+; GFX11-NEXT: v_mad_u64_u32 v[10:11], null, v33, v6, v[4:5]
+; GFX11-NEXT: v_mov_b32_e32 v4, v70
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_mad_u64_u32 v[5:6], null, v37, v8, v[0:1]
+; GFX11-NEXT: v_mad_u64_u32 v[11:12], null, v67, v80, 0
+; GFX11-NEXT: v_mad_u64_u32 v[13:14], null, v35, v7, v[4:5]
+; GFX11-NEXT: v_mad_u64_u32 v[6:7], null, v65, v69, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_mov_b32_e32 v0, v12
+; GFX11-NEXT: v_mad_u64_u32 v[14:15], null, v3, v54, v[5:6]
+; GFX11-NEXT: v_mad_u64_u32 v[3:4], null, v1, v50, v[10:11]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_mad_u64_u32 v[4:5], null, v2, v52, v[13:14]
+; GFX11-NEXT: v_mov_b32_e32 v2, v7
+; GFX11-NEXT: v_mad_u64_u32 v[7:8], null, v67, v14, v[0:1]
+; GFX11-NEXT: v_mad_u64_u32 v[12:13], null, v97, v48, v[9:10]
+; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, v6, v11, 0
+; GFX11-NEXT: v_mad_u64_u32 v[8:9], null, v65, v4, v[2:3]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_mad_u64_u32 v[4:5], null, v3, v80, v[7:8]
+; GFX11-NEXT: v_mad_u64_u32 v[9:10], null, v12, v69, v[8:9]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mad_u64_u32 v[7:8], null, v6, v4, v[1:2]
+; GFX11-NEXT: v_mad_u64_u32 v[1:2], null, v9, v11, v[7:8]
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_mul_v16i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: scratch_load_b32 v39, off, s32
+; GFX12-NEXT: v_mul_hi_u32 v31, v0, v16
+; GFX12-NEXT: v_mul_hi_u32 v32, v2, v18
+; GFX12-NEXT: v_mul_hi_u32 v33, v4, v20
+; GFX12-NEXT: v_mul_hi_u32 v34, v6, v22
+; GFX12-NEXT: v_mul_hi_u32 v35, v8, v24
+; GFX12-NEXT: v_mul_hi_u32 v36, v10, v26
+; GFX12-NEXT: v_mul_hi_u32 v37, v12, v28
+; GFX12-NEXT: v_mul_hi_u32 v38, v14, v30
+; GFX12-NEXT: v_mul_lo_u32 v51, v2, v18
+; GFX12-NEXT: v_mul_lo_u32 v53, v6, v22
+; GFX12-NEXT: v_mul_lo_u32 v55, v10, v26
+; GFX12-NEXT: v_mul_lo_u32 v65, v14, v30
+; GFX12-NEXT: v_mad_co_u64_u32 v[48:49], null, v0, v17, v[31:32]
+; GFX12-NEXT: v_mad_co_u64_u32 v[31:32], null, v2, v19, v[32:33]
+; GFX12-NEXT: v_mad_co_u64_u32 v[32:33], null, v4, v21, v[33:34]
+; GFX12-NEXT: v_mad_co_u64_u32 v[33:34], null, v6, v23, v[34:35]
+; GFX12-NEXT: v_mad_co_u64_u32 v[34:35], null, v8, v25, v[35:36]
+; GFX12-NEXT: v_mad_co_u64_u32 v[35:36], null, v10, v27, v[36:37]
+; GFX12-NEXT: v_mad_co_u64_u32 v[36:37], null, v12, v29, v[37:38]
+; GFX12-NEXT: v_mul_lo_u32 v50, v0, v16
+; GFX12-NEXT: v_mul_lo_u32 v52, v4, v20
+; GFX12-NEXT: v_mul_lo_u32 v54, v8, v24
+; GFX12-NEXT: v_mul_lo_u32 v64, v12, v28
+; GFX12-NEXT: v_mul_lo_u32 v23, v51, v55
+; GFX12-NEXT: v_mul_lo_u32 v27, v53, v65
+; GFX12-NEXT: v_mul_hi_u32 v6, v53, v65
+; GFX12-NEXT: v_mad_co_u64_u32 v[7:8], null, v7, v22, v[33:34]
+; GFX12-NEXT: v_mad_co_u64_u32 v[16:17], null, v1, v16, v[48:49]
+; GFX12-NEXT: v_mul_lo_u32 v21, v50, v54
+; GFX12-NEXT: v_mul_lo_u32 v25, v52, v64
+; GFX12-NEXT: v_mad_co_u64_u32 v[8:9], null, v9, v24, v[34:35]
+; GFX12-NEXT: v_mul_hi_u32 v2, v51, v55
+; GFX12-NEXT: v_mad_co_u64_u32 v[17:18], null, v3, v18, v[31:32]
+; GFX12-NEXT: v_mad_co_u64_u32 v[9:10], null, v11, v26, v[35:36]
+; GFX12-NEXT: v_mul_hi_u32 v3, v23, v27
+; GFX12-NEXT: v_mul_hi_u32 v4, v52, v64
+; GFX12-NEXT: v_mad_co_u64_u32 v[18:19], null, v5, v20, v[32:33]
+; GFX12-NEXT: v_mul_hi_u32 v0, v50, v54
+; GFX12-NEXT: v_mul_hi_u32 v1, v21, v25
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_mad_co_u64_u32 v[37:38], null, v14, v39, v[38:39]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_mad_co_u64_u32 v[14:15], null, v15, v30, v[37:38]
+; GFX12-NEXT: v_mad_co_u64_u32 v[10:11], null, v13, v28, v[36:37]
+; GFX12-NEXT: v_mad_co_u64_u32 v[11:12], null, v51, v9, v[2:3]
+; GFX12-NEXT: v_mul_lo_u32 v13, v21, v25
+; GFX12-NEXT: v_mad_co_u64_u32 v[5:6], null, v53, v14, v[6:7]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_mad_co_u64_u32 v[9:10], null, v52, v10, v[4:5]
+; GFX12-NEXT: v_mad_co_u64_u32 v[4:5], null, v7, v65, v[5:6]
+; GFX12-NEXT: v_mad_co_u64_u32 v[5:6], null, v50, v8, v[0:1]
+; GFX12-NEXT: v_mad_co_u64_u32 v[6:7], null, v17, v55, v[11:12]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_mad_co_u64_u32 v[7:8], null, v18, v64, v[9:10]
+; GFX12-NEXT: v_mad_co_u64_u32 v[2:3], null, v23, v4, v[3:4]
+; GFX12-NEXT: v_mul_lo_u32 v8, v23, v27
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_mad_co_u64_u32 v[3:4], null, v16, v54, v[5:6]
+; GFX12-NEXT: v_mad_co_u64_u32 v[0:1], null, v21, v7, v[1:2]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_mad_co_u64_u32 v[1:2], null, v6, v27, v[2:3]
+; GFX12-NEXT: v_mul_hi_u32 v2, v13, v8
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_mad_co_u64_u32 v[3:4], null, v3, v25, v[0:1]
+; GFX12-NEXT: v_mul_lo_u32 v0, v13, v8
+; GFX12-NEXT: v_mad_co_u64_u32 v[1:2], null, v13, v1, v[2:3]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_mad_co_u64_u32 v[1:2], null, v3, v8, v[1:2]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.mul.v16i64(<16 x i64> %v)
+ ret i64 %res
+}
+
+declare i8 @llvm.vector.reduce.mul.v2i8(<2 x i8>)
+declare i8 @llvm.vector.reduce.mul.v3i8(<3 x i8>)
+declare i8 @llvm.vector.reduce.mul.v4i8(<4 x i8>)
+declare i8 @llvm.vector.reduce.mul.v8i8(<8 x i8>)
+declare i8 @llvm.vector.reduce.mul.v16i8(<16 x i8>)
+
+declare i16 @llvm.vector.reduce.mul.v2i16(<2 x i16>)
+declare i16 @llvm.vector.reduce.mul.v3i16(<3 x i16>)
+declare i16 @llvm.vector.reduce.mul.v4i16(<4 x i16>)
+declare i16 @llvm.vector.reduce.mul.v8i16(<8 x i16>)
+declare i16 @llvm.vector.reduce.mul.v16i16(<16 x i16>)
+
+declare i32 @llvm.vector.reduce.mul.v2i32(<2 x i32>)
+declare i32 @llvm.vector.reduce.mul.v3i32(<3 x i32>)
+declare i32 @llvm.vector.reduce.mul.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.mul.v8i32(<8 x i32>)
+declare i32 @llvm.vector.reduce.mul.v16i32(<16 x i32>)
+
+declare i64 @llvm.vector.reduce.mul.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.mul.v3i64(<3 x i64>)
+declare i64 @llvm.vector.reduce.mul.v4i64(<4 x i64>)
+declare i64 @llvm.vector.reduce.mul.v8i64(<8 x i64>)
+declare i64 @llvm.vector.reduce.mul.v16i64(<16 x i64>)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/or.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/or.ll
new file mode 100644
index 0000000000000..27e7d44192a3e
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/or.ll
@@ -0,0 +1,2347 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx700 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX7 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx801 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX8 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx942 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX9 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX10 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX12 %s
+
+define i8 @test_vector_reduce_or_v2i8(<2 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_or_v2i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_or_v2i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_or_v2i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_or_v2i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_or_v2i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_or_v2i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.or.v2i8(<2 x i8> %v)
+ ret i8 %res
+}
+
+define i8 @test_vector_reduce_or_v3i8(<3 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_or_v3i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_or_v3i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_or_v3i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_or3_b32 v0, v0, v1, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_or_v3i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_or3_b32 v0, v0, v1, v2
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_or_v3i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_or3_b32 v0, v0, v1, v2
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_or_v3i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_or3_b32 v0, v0, v1, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.or.v3i8(<3 x i8> %v)
+ ret i8 %res
+}
+
+define i8 @test_vector_reduce_or_v4i8(<4 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_or_v4i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX7-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 24, v2
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v3
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX7-NEXT: v_or_b32_e32 v1, 0xff0000, v1
+; GFX7-NEXT: v_or_b32_e32 v1, 0xff000000, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX7-NEXT: v_or_b32_e32 v0, 0xff00, v0
+; GFX7-NEXT: v_or_b32_e32 v0, 0xff0000, v0
+; GFX7-NEXT: v_or_b32_e32 v0, 0xff000000, v0
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_or_v4i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v4, 8
+; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX8-NEXT: v_or_b32_sdwa v5, v0, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX8-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX8-NEXT: v_or_b32_e32 v2, v5, v2
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX8-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX8-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v1, 0xff0000, v1
+; GFX8-NEXT: v_or_b32_e32 v1, 0xff000000, v1
+; GFX8-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_mov_b32_e32 v1, 0xff00
+; GFX8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, 0xff0000, v0
+; GFX8-NEXT: v_or_b32_e32 v0, 0xff000000, v0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_or_v4i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v4, 0xff
+; GFX9-NEXT: v_lshlrev_b32_sdwa v6, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX9-NEXT: v_and_or_b32 v6, v0, v4, v6
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX9-NEXT: v_or3_b32 v2, v6, v2, v3
+; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v1, v0, v4, v1
+; GFX9-NEXT: v_mov_b32_e32 v2, 0xff0000
+; GFX9-NEXT: v_mov_b32_e32 v3, 0xff000000
+; GFX9-NEXT: v_or3_b32 v1, v1, v2, v3
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX9-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v1, 0xff00
+; GFX9-NEXT: v_and_or_b32 v0, v0, v4, v1
+; GFX9-NEXT: v_or3_b32 v0, v0, v2, v3
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_or_v4i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v4, 8
+; GFX10-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX10-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX10-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX10-NEXT: v_and_or_b32 v5, 0xff, v0, v5
+; GFX10-NEXT: v_or3_b32 v2, v5, v2, v3
+; GFX10-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX10-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_mov_b32_e32 v2, 0xff000000
+; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX10-NEXT: v_or3_b32 v1, 0xff0000, v1, v2
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX10-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX10-NEXT: v_mov_b32_e32 v1, 0xff00
+; GFX10-NEXT: v_and_or_b32 v0, 0xff, v0, v1
+; GFX10-NEXT: v_or3_b32 v0, 0xff0000, v0, v2
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_or_v4i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v1
+; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 8, v4
+; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX11-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX11-NEXT: v_mov_b32_e32 v2, 0xff000000
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX11-NEXT: v_or3_b32 v1, 0xff0000, v1, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX11-NEXT: v_mov_b32_e32 v1, 0xff00
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_and_or_b32 v0, 0xff, v0, v1
+; GFX11-NEXT: v_or3_b32 v0, 0xff0000, v0, v2
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_or_v4i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v4, 0xff, v1
+; GFX12-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v4, 8, v4
+; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX12-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX12-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX12-NEXT: v_mov_b32_e32 v2, 0xff000000
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX12-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX12-NEXT: v_or3_b32 v1, 0xff0000, v1, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX12-NEXT: v_mov_b32_e32 v1, 0xff00
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_and_or_b32 v0, 0xff, v0, v1
+; GFX12-NEXT: v_or3_b32 v0, 0xff0000, v0, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.or.v4i8(<4 x i8> %v)
+ ret i8 %res
+}
+
+define i8 @test_vector_reduce_or_v8i8(<8 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_or_v8i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v7
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v5
+; GFX7-NEXT: v_lshrrev_b32_e32 v7, 24, v4
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v4
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v6
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX7-NEXT: v_or_b32_e32 v3, v3, v7
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX7-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 24, v2
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v3
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX7-NEXT: v_or_b32_e32 v1, 0xff0000, v1
+; GFX7-NEXT: v_or_b32_e32 v1, 0xff000000, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX7-NEXT: v_or_b32_e32 v0, 0xff00, v0
+; GFX7-NEXT: v_or_b32_e32 v0, 0xff0000, v0
+; GFX7-NEXT: v_or_b32_e32 v0, 0xff000000, v0
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_or_v8i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v8, 8
+; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v8, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v4, v4, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v7
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v5
+; GFX8-NEXT: v_or_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v4
+; GFX8-NEXT: v_or_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX8-NEXT: v_or_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX8-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX8-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX8-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX8-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v1, 0xff0000, v1
+; GFX8-NEXT: v_or_b32_e32 v1, 0xff000000, v1
+; GFX8-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_mov_b32_e32 v1, 0xff00
+; GFX8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, 0xff0000, v0
+; GFX8-NEXT: v_or_b32_e32 v0, 0xff000000, v0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_or_v8i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v9, 8
+; GFX9-NEXT: v_mov_b32_e32 v8, 0xff
+; GFX9-NEXT: v_lshlrev_b32_sdwa v5, v9, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v4, v4, v8, v5
+; GFX9-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX9-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX9-NEXT: v_lshlrev_b32_e32 v7, 24, v7
+; GFX9-NEXT: v_or_b32_e32 v6, v4, v5
+; GFX9-NEXT: v_or3_b32 v4, v4, v5, v7
+; GFX9-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX9-NEXT: v_or_b32_e32 v1, v1, v5
+; GFX9-NEXT: v_or_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_or_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_or3_b32 v0, v6, v7, v0
+; GFX9-NEXT: v_lshlrev_b32_sdwa v4, v9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX9-NEXT: v_and_or_b32 v4, v0, v8, v4
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX9-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v1, v0, v8, v1
+; GFX9-NEXT: v_mov_b32_e32 v2, 0xff0000
+; GFX9-NEXT: v_mov_b32_e32 v3, 0xff000000
+; GFX9-NEXT: v_or3_b32 v1, v1, v2, v3
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX9-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v1, 0xff00
+; GFX9-NEXT: v_and_or_b32 v0, v0, v8, v1
+; GFX9-NEXT: v_or3_b32 v0, v0, v2, v3
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_or_v8i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v8, 8
+; GFX10-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX10-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v8, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v7
+; GFX10-NEXT: v_or3_b32 v7, v4, v5, v6
+; GFX10-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX10-NEXT: v_lshrrev_b32_e32 v9, 8, v7
+; GFX10-NEXT: v_or_b32_sdwa v2, v2, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_or_b32_sdwa v3, v3, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX10-NEXT: v_or3_b32 v0, v4, v6, v0
+; GFX10-NEXT: v_or_b32_e32 v1, v1, v9
+; GFX10-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX10-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX10-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX10-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX10-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX10-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX10-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_mov_b32_e32 v2, 0xff000000
+; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX10-NEXT: v_or3_b32 v1, 0xff0000, v1, v2
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX10-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX10-NEXT: v_mov_b32_e32 v1, 0xff00
+; GFX10-NEXT: v_and_or_b32 v0, 0xff, v0, v1
+; GFX10-NEXT: v_or3_b32 v0, 0xff0000, v0, v2
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_or_v8i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v7, 24, v7
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_or3_b32 v5, v4, v6, v7
+; GFX11-NEXT: v_or_b32_e32 v4, v4, v6
+; GFX11-NEXT: v_lshrrev_b32_e32 v8, 8, v5
+; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v5
+; GFX11-NEXT: v_lshrrev_b32_e32 v5, 24, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_or3_b32 v0, v4, v7, v0
+; GFX11-NEXT: v_or_b32_e32 v1, v1, v8
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_or_b32_e32 v2, v2, v9
+; GFX11-NEXT: v_or_b32_e32 v3, v3, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 8, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX11-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX11-NEXT: v_mov_b32_e32 v2, 0xff000000
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v1, 0xff0000, v1, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX11-NEXT: v_mov_b32_e32 v1, 0xff00
+; GFX11-NEXT: v_and_or_b32 v0, 0xff, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v0, 0xff0000, v0, v2
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_or_v8i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX12-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v7, 24, v7
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_or3_b32 v5, v4, v6, v7
+; GFX12-NEXT: v_or_b32_e32 v4, v4, v6
+; GFX12-NEXT: v_lshrrev_b32_e32 v8, 8, v5
+; GFX12-NEXT: v_lshrrev_b32_e32 v9, 16, v5
+; GFX12-NEXT: v_lshrrev_b32_e32 v5, 24, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_or3_b32 v0, v4, v7, v0
+; GFX12-NEXT: v_or_b32_e32 v1, v1, v8
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_or_b32_e32 v2, v2, v9
+; GFX12-NEXT: v_or_b32_e32 v3, v3, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX12-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX12-NEXT: v_lshlrev_b32_e32 v4, 8, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX12-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX12-NEXT: v_mov_b32_e32 v2, 0xff000000
+; GFX12-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v1, 0xff0000, v1, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX12-NEXT: v_mov_b32_e32 v1, 0xff00
+; GFX12-NEXT: v_and_or_b32 v0, 0xff, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v0, 0xff0000, v0, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.or.v8i8(<8 x i8> %v)
+ ret i8 %res
+}
+
+define i8 @test_vector_reduce_or_v16i8(<16 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_or_v16i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v13, 0xff, v13
+; GFX7-NEXT: v_and_b32_e32 v12, 0xff, v12
+; GFX7-NEXT: v_lshlrev_b32_e32 v13, 8, v13
+; GFX7-NEXT: v_or_b32_e32 v12, v12, v13
+; GFX7-NEXT: v_and_b32_e32 v13, 0xff, v14
+; GFX7-NEXT: v_lshlrev_b32_e32 v13, 16, v13
+; GFX7-NEXT: v_or_b32_e32 v12, v12, v13
+; GFX7-NEXT: v_and_b32_e32 v13, 0xff, v15
+; GFX7-NEXT: v_lshlrev_b32_e32 v13, 24, v13
+; GFX7-NEXT: v_or_b32_e32 v12, v12, v13
+; GFX7-NEXT: v_lshrrev_b32_e32 v13, 8, v12
+; GFX7-NEXT: v_and_b32_e32 v9, 0xff, v9
+; GFX7-NEXT: v_or_b32_e32 v5, v5, v13
+; GFX7-NEXT: v_and_b32_e32 v8, 0xff, v8
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX7-NEXT: v_lshrrev_b32_e32 v14, 16, v12
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v12
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX7-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX7-NEXT: v_and_b32_e32 v9, 0xff, v10
+; GFX7-NEXT: v_or_b32_e32 v6, v6, v14
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX7-NEXT: v_lshrrev_b32_e32 v15, 24, v12
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX7-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX7-NEXT: v_and_b32_e32 v9, 0xff, v11
+; GFX7-NEXT: v_or_b32_e32 v7, v7, v15
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 24, v9
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v7
+; GFX7-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX7-NEXT: v_lshrrev_b32_e32 v9, 8, v8
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshrrev_b32_e32 v10, 16, v8
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v9
+; GFX7-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX7-NEXT: v_lshrrev_b32_e32 v11, 24, v8
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v8
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v10
+; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v5
+; GFX7-NEXT: v_or_b32_e32 v3, v3, v11
+; GFX7-NEXT: v_lshrrev_b32_e32 v7, 24, v4
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v4
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v6
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX7-NEXT: v_or_b32_e32 v3, v3, v7
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX7-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 24, v2
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v3
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX7-NEXT: v_or_b32_e32 v1, 0xff0000, v1
+; GFX7-NEXT: v_or_b32_e32 v1, 0xff000000, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX7-NEXT: v_or_b32_e32 v0, 0xff00, v0
+; GFX7-NEXT: v_or_b32_e32 v0, 0xff0000, v0
+; GFX7-NEXT: v_or_b32_e32 v0, 0xff000000, v0
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_or_v16i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v16, 8
+; GFX8-NEXT: v_lshlrev_b32_sdwa v9, v16, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v8, v8, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v9, 0xff, v10
+; GFX8-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX8-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX8-NEXT: v_and_b32_e32 v9, 0xff, v11
+; GFX8-NEXT: v_lshlrev_b32_sdwa v10, v16, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v11, 0xff, v14
+; GFX8-NEXT: v_or_b32_sdwa v10, v12, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v11, 16, v11
+; GFX8-NEXT: v_or_b32_e32 v10, v10, v11
+; GFX8-NEXT: v_and_b32_e32 v11, 0xff, v15
+; GFX8-NEXT: v_lshlrev_b32_e32 v11, 24, v11
+; GFX8-NEXT: v_or_b32_e32 v10, v10, v11
+; GFX8-NEXT: v_lshrrev_b32_e32 v11, 8, v10
+; GFX8-NEXT: v_or_b32_e32 v5, v5, v11
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v10
+; GFX8-NEXT: v_or_b32_sdwa v6, v6, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v16, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v4, v4, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX8-NEXT: v_or_b32_sdwa v7, v7, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX8-NEXT: v_lshlrev_b32_e32 v9, 24, v9
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v7
+; GFX8-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX8-NEXT: v_lshrrev_b32_e32 v9, 8, v8
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v9
+; GFX8-NEXT: v_or_b32_sdwa v2, v2, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v8
+; GFX8-NEXT: v_or_b32_sdwa v3, v3, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v5
+; GFX8-NEXT: v_or_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v4
+; GFX8-NEXT: v_or_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v16, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX8-NEXT: v_or_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX8-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX8-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX8-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX8-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v16, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v1, 0xff0000, v1
+; GFX8-NEXT: v_or_b32_e32 v1, 0xff000000, v1
+; GFX8-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_mov_b32_e32 v1, 0xff00
+; GFX8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, 0xff0000, v0
+; GFX8-NEXT: v_or_b32_e32 v0, 0xff000000, v0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_or_v16i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v17, 8
+; GFX9-NEXT: v_mov_b32_e32 v16, 0xff
+; GFX9-NEXT: v_lshlrev_b32_sdwa v13, v17, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v12, v12, v16, v13
+; GFX9-NEXT: v_and_b32_e32 v13, 0xff, v14
+; GFX9-NEXT: v_and_b32_e32 v15, 0xff, v15
+; GFX9-NEXT: v_lshlrev_b32_e32 v13, 16, v13
+; GFX9-NEXT: v_lshlrev_b32_e32 v15, 24, v15
+; GFX9-NEXT: v_or_b32_e32 v14, v12, v13
+; GFX9-NEXT: v_or3_b32 v12, v12, v13, v15
+; GFX9-NEXT: v_lshrrev_b32_e32 v13, 8, v12
+; GFX9-NEXT: v_or_b32_e32 v5, v5, v13
+; GFX9-NEXT: v_lshlrev_b32_sdwa v9, v17, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or3_b32 v4, v14, v15, v4
+; GFX9-NEXT: v_or_b32_sdwa v6, v6, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_or_b32_sdwa v7, v7, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_lshlrev_b32_sdwa v5, v17, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v8, v8, v16, v9
+; GFX9-NEXT: v_and_b32_e32 v9, 0xff, v10
+; GFX9-NEXT: v_and_b32_e32 v10, 0xff, v11
+; GFX9-NEXT: v_and_or_b32 v4, v4, v16, v5
+; GFX9-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX9-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX9-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX9-NEXT: v_lshlrev_b32_e32 v10, 24, v10
+; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX9-NEXT: v_or3_b32 v8, v8, v9, v10
+; GFX9-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX9-NEXT: v_lshrrev_b32_e32 v9, 8, v8
+; GFX9-NEXT: v_lshrrev_b32_e32 v10, 16, v8
+; GFX9-NEXT: v_lshrrev_b32_e32 v11, 24, v8
+; GFX9-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX9-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX9-NEXT: v_lshrrev_b32_e32 v7, 24, v4
+; GFX9-NEXT: v_or3_b32 v1, v1, v9, v5
+; GFX9-NEXT: v_or3_b32 v2, v2, v10, v6
+; GFX9-NEXT: v_or3_b32 v3, v3, v11, v7
+; GFX9-NEXT: v_or3_b32 v0, v0, v8, v4
+; GFX9-NEXT: v_lshlrev_b32_sdwa v4, v17, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX9-NEXT: v_and_or_b32 v4, v0, v16, v4
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX9-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v17, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v1, v0, v16, v1
+; GFX9-NEXT: v_mov_b32_e32 v2, 0xff0000
+; GFX9-NEXT: v_mov_b32_e32 v3, 0xff000000
+; GFX9-NEXT: v_or3_b32 v1, v1, v2, v3
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX9-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v1, 0xff00
+; GFX9-NEXT: v_and_or_b32 v0, v0, v16, v1
+; GFX9-NEXT: v_or3_b32 v0, v0, v2, v3
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_or_v16i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v16, 8
+; GFX10-NEXT: v_and_b32_e32 v14, 0xff, v14
+; GFX10-NEXT: v_and_b32_e32 v15, 0xff, v15
+; GFX10-NEXT: v_and_b32_e32 v11, 0xff, v11
+; GFX10-NEXT: v_lshlrev_b32_sdwa v13, v16, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_lshlrev_b32_sdwa v9, v16, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_or_b32 v12, 0xff, v12, v13
+; GFX10-NEXT: v_lshlrev_b32_e32 v13, 16, v14
+; GFX10-NEXT: v_lshlrev_b32_e32 v14, 24, v15
+; GFX10-NEXT: v_and_or_b32 v8, 0xff, v8, v9
+; GFX10-NEXT: v_and_b32_e32 v9, 0xff, v10
+; GFX10-NEXT: v_or_b32_e32 v10, v12, v13
+; GFX10-NEXT: v_or3_b32 v15, v12, v13, v14
+; GFX10-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX10-NEXT: v_or3_b32 v4, v10, v14, v4
+; GFX10-NEXT: v_lshrrev_b32_e32 v17, 8, v15
+; GFX10-NEXT: v_or_b32_sdwa v6, v6, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_or_b32_sdwa v7, v7, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX10-NEXT: v_lshlrev_b32_e32 v10, 24, v11
+; GFX10-NEXT: v_or_b32_e32 v5, v5, v17
+; GFX10-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX10-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v16, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v7
+; GFX10-NEXT: v_or3_b32 v7, v8, v9, v10
+; GFX10-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX10-NEXT: v_lshrrev_b32_e32 v5, 8, v7
+; GFX10-NEXT: v_lshrrev_b32_e32 v6, 16, v7
+; GFX10-NEXT: v_lshrrev_b32_e32 v8, 24, v7
+; GFX10-NEXT: v_lshrrev_b32_e32 v9, 8, v4
+; GFX10-NEXT: v_lshrrev_b32_e32 v10, 16, v4
+; GFX10-NEXT: v_lshrrev_b32_e32 v11, 24, v4
+; GFX10-NEXT: v_or3_b32 v0, v0, v7, v4
+; GFX10-NEXT: v_or3_b32 v1, v1, v5, v9
+; GFX10-NEXT: v_or3_b32 v2, v2, v6, v10
+; GFX10-NEXT: v_or3_b32 v3, v3, v8, v11
+; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v16, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX10-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX10-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX10-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX10-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX10-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX10-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_mov_b32_e32 v2, 0xff000000
+; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v16, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX10-NEXT: v_or3_b32 v1, 0xff0000, v1, v2
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX10-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX10-NEXT: v_mov_b32_e32 v1, 0xff00
+; GFX10-NEXT: v_and_or_b32 v0, 0xff, v0, v1
+; GFX10-NEXT: v_or3_b32 v0, 0xff0000, v0, v2
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_or_v16i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v13, 0xff, v13
+; GFX11-NEXT: v_and_b32_e32 v14, 0xff, v14
+; GFX11-NEXT: v_and_b32_e32 v15, 0xff, v15
+; GFX11-NEXT: v_and_b32_e32 v9, 0xff, v9
+; GFX11-NEXT: v_and_b32_e32 v10, 0xff, v10
+; GFX11-NEXT: v_lshlrev_b32_e32 v13, 8, v13
+; GFX11-NEXT: v_and_b32_e32 v11, 0xff, v11
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX11-NEXT: v_lshlrev_b32_e32 v10, 16, v10
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_or_b32 v12, 0xff, v12, v13
+; GFX11-NEXT: v_lshlrev_b32_e32 v13, 16, v14
+; GFX11-NEXT: v_lshlrev_b32_e32 v14, 24, v15
+; GFX11-NEXT: v_lshlrev_b32_e32 v11, 24, v11
+; GFX11-NEXT: v_and_or_b32 v8, 0xff, v8, v9
+; GFX11-NEXT: v_or3_b32 v15, v12, v13, v14
+; GFX11-NEXT: v_or_b32_e32 v12, v12, v13
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_lshrrev_b32_e32 v16, 8, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v17, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v15, 24, v15
+; GFX11-NEXT: v_or3_b32 v4, v12, v14, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_or_b32_e32 v5, v5, v16
+; GFX11-NEXT: v_or_b32_e32 v6, v6, v17
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_or_b32_e32 v7, v7, v15
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v7, 24, v7
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX11-NEXT: v_or3_b32 v5, v8, v10, v11
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_or3_b32 v4, v4, v6, v7
+; GFX11-NEXT: v_lshrrev_b32_e32 v6, 8, v5
+; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v5
+; GFX11-NEXT: v_lshrrev_b32_e32 v9, 24, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_lshrrev_b32_e32 v7, 8, v4
+; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v4
+; GFX11-NEXT: v_lshrrev_b32_e32 v11, 24, v4
+; GFX11-NEXT: v_or3_b32 v0, v0, v5, v4
+; GFX11-NEXT: v_or3_b32 v1, v1, v6, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_or3_b32 v2, v2, v8, v10
+; GFX11-NEXT: v_or3_b32 v3, v3, v9, v11
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v1
+; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 8, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX11-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX11-NEXT: v_mov_b32_e32 v2, 0xff000000
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v1, 0xff0000, v1, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX11-NEXT: v_mov_b32_e32 v1, 0xff00
+; GFX11-NEXT: v_and_or_b32 v0, 0xff, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v0, 0xff0000, v0, v2
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_or_v16i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v13, 0xff, v13
+; GFX12-NEXT: v_and_b32_e32 v14, 0xff, v14
+; GFX12-NEXT: v_and_b32_e32 v15, 0xff, v15
+; GFX12-NEXT: v_and_b32_e32 v9, 0xff, v9
+; GFX12-NEXT: v_and_b32_e32 v10, 0xff, v10
+; GFX12-NEXT: v_lshlrev_b32_e32 v13, 8, v13
+; GFX12-NEXT: v_and_b32_e32 v11, 0xff, v11
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX12-NEXT: v_lshlrev_b32_e32 v10, 16, v10
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_or_b32 v12, 0xff, v12, v13
+; GFX12-NEXT: v_lshlrev_b32_e32 v13, 16, v14
+; GFX12-NEXT: v_lshlrev_b32_e32 v14, 24, v15
+; GFX12-NEXT: v_lshlrev_b32_e32 v11, 24, v11
+; GFX12-NEXT: v_and_or_b32 v8, 0xff, v8, v9
+; GFX12-NEXT: v_or3_b32 v15, v12, v13, v14
+; GFX12-NEXT: v_or_b32_e32 v12, v12, v13
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_lshrrev_b32_e32 v16, 8, v15
+; GFX12-NEXT: v_lshrrev_b32_e32 v17, 16, v15
+; GFX12-NEXT: v_lshrrev_b32_e32 v15, 24, v15
+; GFX12-NEXT: v_or3_b32 v4, v12, v14, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_or_b32_e32 v5, v5, v16
+; GFX12-NEXT: v_or_b32_e32 v6, v6, v17
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_or_b32_e32 v7, v7, v15
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX12-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v7, 24, v7
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX12-NEXT: v_or3_b32 v5, v8, v10, v11
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_or3_b32 v4, v4, v6, v7
+; GFX12-NEXT: v_lshrrev_b32_e32 v6, 8, v5
+; GFX12-NEXT: v_lshrrev_b32_e32 v8, 16, v5
+; GFX12-NEXT: v_lshrrev_b32_e32 v9, 24, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_lshrrev_b32_e32 v7, 8, v4
+; GFX12-NEXT: v_lshrrev_b32_e32 v10, 16, v4
+; GFX12-NEXT: v_lshrrev_b32_e32 v11, 24, v4
+; GFX12-NEXT: v_or3_b32 v0, v0, v5, v4
+; GFX12-NEXT: v_or3_b32 v1, v1, v6, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_or3_b32 v2, v2, v8, v10
+; GFX12-NEXT: v_or3_b32 v3, v3, v9, v11
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v1
+; GFX12-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX12-NEXT: v_lshlrev_b32_e32 v4, 8, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX12-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX12-NEXT: v_mov_b32_e32 v2, 0xff000000
+; GFX12-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v1, 0xff0000, v1, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX12-NEXT: v_mov_b32_e32 v1, 0xff00
+; GFX12-NEXT: v_and_or_b32 v0, 0xff, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v0, 0xff0000, v0, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.or.v16i8(<16 x i8> %v)
+ ret i8 %res
+}
+
+define i16 @test_vector_reduce_or_v2i16(<2 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_or_v2i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_or_v2i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_or_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_or_v2i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_or_v2i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_or_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_or_v2i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_or_v2i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.or.v2i16(<2 x i16> %v)
+ ret i16 %res
+}
+
+define i16 @test_vector_reduce_or_v3i16(<3 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_or_v3i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_or_v3i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_or_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_or_v3i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX9-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_or_v3i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX10-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_or_v3i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_or_v3i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.or.v3i16(<3 x i16> %v)
+ ret i16 %res
+}
+
+define i16 @test_vector_reduce_or_v4i16(<4 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_or_v4i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v3
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_or_v4i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX8-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_or_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_or_v4i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_or_v4i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX10-NEXT: v_or_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_or_v4i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX11-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_or_v4i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX12-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.or.v4i16(<4 x i16> %v)
+ ret i16 %res
+}
+
+define i16 @test_vector_reduce_or_v8i16(<8 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_or_v8i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v7
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_or_b32_e32 v5, v5, v6
+; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX7-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v3
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v7, 16, v5
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v7
+; GFX7-NEXT: v_or_b32_e32 v3, v3, v4
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_or_v8i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v2
+; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX8-NEXT: v_or_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v5
+; GFX8-NEXT: v_or_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX8-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_or_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_or_v8i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX9-NEXT: s_mov_b32 s0, 0xffff
+; GFX9-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX9-NEXT: v_bfi_b32 v2, s0, v1, v1
+; GFX9-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_or_b32_e32 v1, s0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_or_v8i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX10-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX10-NEXT: v_bfi_b32 v2, 0xffff, v1, v1
+; GFX10-NEXT: v_or_b32_e32 v1, s4, v1
+; GFX10-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX10-NEXT: v_or_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_or_v8i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX11-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_bfi_b32 v2, 0xffff, v1, v1
+; GFX11-NEXT: v_or_b32_e32 v1, s0, v1
+; GFX11-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX11-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_or_v8i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX12-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_bfi_b32 v2, 0xffff, v1, v1
+; GFX12-NEXT: v_or_b32_e32 v1, s0, v1
+; GFX12-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX12-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.or.v8i16(<8 x i16> %v)
+ ret i16 %res
+}
+
+define i16 @test_vector_reduce_or_v16i16(<16 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_or_v16i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX7-NEXT: v_and_b32_e32 v8, 0xffff, v8
+; GFX7-NEXT: v_or_b32_e32 v8, v9, v8
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v11
+; GFX7-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_or_b32_e32 v9, v9, v10
+; GFX7-NEXT: v_lshlrev_b32_e32 v10, 16, v13
+; GFX7-NEXT: v_and_b32_e32 v11, 0xffff, v12
+; GFX7-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v3
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-NEXT: v_or_b32_e32 v10, v10, v11
+; GFX7-NEXT: v_lshlrev_b32_e32 v11, 16, v15
+; GFX7-NEXT: v_and_b32_e32 v12, 0xffff, v14
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v5
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff, v4
+; GFX7-NEXT: v_or_b32_e32 v11, v11, v12
+; GFX7-NEXT: v_lshrrev_b32_e32 v12, 16, v8
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v7
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff, v6
+; GFX7-NEXT: v_lshrrev_b32_e32 v13, 16, v9
+; GFX7-NEXT: v_or_b32_e32 v3, v3, v4
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff, v8
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v12
+; GFX7-NEXT: v_lshrrev_b32_e32 v14, 16, v10
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff, v9
+; GFX7-NEXT: v_lshlrev_b32_e32 v6, 16, v13
+; GFX7-NEXT: v_lshrrev_b32_e32 v15, 16, v11
+; GFX7-NEXT: v_or_b32_e32 v5, v5, v6
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff, v10
+; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v14
+; GFX7-NEXT: v_or_b32_e32 v6, v6, v7
+; GFX7-NEXT: v_and_b32_e32 v7, 0xffff, v11
+; GFX7-NEXT: v_lshlrev_b32_e32 v8, 16, v15
+; GFX7-NEXT: v_or_b32_e32 v7, v7, v8
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v6
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v4
+; GFX7-NEXT: v_or_b32_e32 v3, v3, v7
+; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v2
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v5
+; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v3
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v4
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v5
+; GFX7-NEXT: v_or_b32_e32 v3, v3, v4
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_or_v16i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshrrev_b32_e32 v8, 16, v4
+; GFX8-NEXT: v_lshrrev_b32_e32 v9, 16, v5
+; GFX8-NEXT: v_lshlrev_b32_e32 v8, 16, v8
+; GFX8-NEXT: v_lshrrev_b32_e32 v10, 16, v6
+; GFX8-NEXT: v_or_b32_sdwa v4, v4, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v8, 16, v9
+; GFX8-NEXT: v_lshrrev_b32_e32 v11, 16, v7
+; GFX8-NEXT: v_or_b32_sdwa v5, v5, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v8, 16, v10
+; GFX8-NEXT: v_or_b32_sdwa v6, v6, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v8, 16, v11
+; GFX8-NEXT: v_or_b32_sdwa v7, v7, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v2, v2, v6
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v4
+; GFX8-NEXT: v_or_b32_e32 v3, v3, v7
+; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v2
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v5
+; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX8-NEXT: v_or_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v5
+; GFX8-NEXT: v_or_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX8-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_or_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_or_v16i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_or_b32_e32 v5, v1, v5
+; GFX9-NEXT: v_or_b32_e32 v1, v3, v7
+; GFX9-NEXT: s_mov_b32 s0, 0xffff
+; GFX9-NEXT: v_or_b32_e32 v4, v0, v4
+; GFX9-NEXT: v_or_b32_e32 v0, v2, v6
+; GFX9-NEXT: v_bfi_b32 v1, s0, v1, v1
+; GFX9-NEXT: v_bfi_b32 v0, s0, v0, v0
+; GFX9-NEXT: v_or_b32_e32 v1, v5, v1
+; GFX9-NEXT: v_or_b32_e32 v0, v4, v0
+; GFX9-NEXT: v_bfi_b32 v2, s0, v1, v1
+; GFX9-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_or_b32_e32 v1, s0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_or_v16i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_or_b32_e32 v3, v3, v7
+; GFX10-NEXT: v_or_b32_e32 v1, v1, v5
+; GFX10-NEXT: v_or_b32_e32 v2, v2, v6
+; GFX10-NEXT: v_or_b32_e32 v0, v0, v4
+; GFX10-NEXT: v_bfi_b32 v3, 0xffff, v3, v3
+; GFX10-NEXT: v_bfi_b32 v2, 0xffff, v2, v2
+; GFX10-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX10-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX10-NEXT: v_bfi_b32 v2, 0xffff, v1, v1
+; GFX10-NEXT: v_or_b32_e32 v1, s4, v1
+; GFX10-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX10-NEXT: v_or_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_or_v16i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_or_b32_e32 v3, v3, v7
+; GFX11-NEXT: v_or_b32_e32 v1, v1, v5
+; GFX11-NEXT: v_or_b32_e32 v2, v2, v6
+; GFX11-NEXT: v_or_b32_e32 v0, v0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_bfi_b32 v3, 0xffff, v3, v3
+; GFX11-NEXT: v_bfi_b32 v2, 0xffff, v2, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX11-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_bfi_b32 v2, 0xffff, v1, v1
+; GFX11-NEXT: v_or_b32_e32 v1, s0, v1
+; GFX11-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX11-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_or_v16i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_or_b32_e32 v3, v3, v7
+; GFX12-NEXT: v_or_b32_e32 v1, v1, v5
+; GFX12-NEXT: v_or_b32_e32 v2, v2, v6
+; GFX12-NEXT: v_or_b32_e32 v0, v0, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_bfi_b32 v3, 0xffff, v3, v3
+; GFX12-NEXT: v_bfi_b32 v2, 0xffff, v2, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX12-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_bfi_b32 v2, 0xffff, v1, v1
+; GFX12-NEXT: v_or_b32_e32 v1, s0, v1
+; GFX12-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX12-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.or.v16i16(<16 x i16> %v)
+ ret i16 %res
+}
+
+
+define i32 @test_vector_reduce_or_v2i32(<2 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_or_v2i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_or_v2i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_or_v2i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_or_v2i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_or_v2i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_or_v2i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> %v)
+ ret i32 %res
+}
+
+define i32 @test_vector_reduce_or_v3i32(<3 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_or_v3i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_or_v3i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_or_v3i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_or3_b32 v0, v0, v1, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_or_v3i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_or3_b32 v0, v0, v1, v2
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_or_v3i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_or3_b32 v0, v0, v1, v2
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_or_v3i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_or3_b32 v0, v0, v1, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.or.v3i32(<3 x i32> %v)
+ ret i32 %res
+}
+
+define i32 @test_vector_reduce_or_v4i32(<4 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_or_v4i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_or_v4i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_or_v4i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX9-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_or_v4i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX10-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_or_v4i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_or_v4i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> %v)
+ ret i32 %res
+}
+
+define i32 @test_vector_reduce_or_v8i32(<8 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_or_v8i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v4
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v5
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v6
+; GFX7-NEXT: v_or_b32_e32 v3, v3, v7
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_or_v8i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v4
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v5
+; GFX8-NEXT: v_or_b32_e32 v2, v2, v6
+; GFX8-NEXT: v_or_b32_e32 v3, v3, v7
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_or_v8i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_or_b32_e32 v3, v3, v7
+; GFX9-NEXT: v_or_b32_e32 v0, v0, v4
+; GFX9-NEXT: v_or_b32_e32 v2, v2, v6
+; GFX9-NEXT: v_or3_b32 v1, v1, v5, v3
+; GFX9-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_or_v8i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_or_b32_e32 v3, v3, v7
+; GFX10-NEXT: v_or_b32_e32 v0, v0, v4
+; GFX10-NEXT: v_or_b32_e32 v2, v2, v6
+; GFX10-NEXT: v_or3_b32 v1, v1, v5, v3
+; GFX10-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_or_v8i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_or_b32_e32 v3, v3, v7
+; GFX11-NEXT: v_or_b32_e32 v0, v0, v4
+; GFX11-NEXT: v_or_b32_e32 v2, v2, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v1, v1, v5, v3
+; GFX11-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_or_v8i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_or_b32_e32 v3, v3, v7
+; GFX12-NEXT: v_or_b32_e32 v0, v0, v4
+; GFX12-NEXT: v_or_b32_e32 v2, v2, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v1, v1, v5, v3
+; GFX12-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> %v)
+ ret i32 %res
+}
+
+define i32 @test_vector_reduce_or_v16i32(<16 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_or_v16i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v8
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v9
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v10
+; GFX7-NEXT: v_or_b32_e32 v3, v3, v11
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v12
+; GFX7-NEXT: v_or_b32_e32 v5, v5, v13
+; GFX7-NEXT: v_or_b32_e32 v6, v6, v14
+; GFX7-NEXT: v_or_b32_e32 v7, v7, v15
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v4
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v5
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v6
+; GFX7-NEXT: v_or_b32_e32 v3, v3, v7
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_or_v16i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v8
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v9
+; GFX8-NEXT: v_or_b32_e32 v2, v2, v10
+; GFX8-NEXT: v_or_b32_e32 v3, v3, v11
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v12
+; GFX8-NEXT: v_or_b32_e32 v5, v5, v13
+; GFX8-NEXT: v_or_b32_e32 v6, v6, v14
+; GFX8-NEXT: v_or_b32_e32 v7, v7, v15
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v4
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v5
+; GFX8-NEXT: v_or_b32_e32 v2, v2, v6
+; GFX8-NEXT: v_or_b32_e32 v3, v3, v7
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_or_v16i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_or_b32_e32 v7, v7, v15
+; GFX9-NEXT: v_or_b32_e32 v1, v1, v9
+; GFX9-NEXT: v_or_b32_e32 v4, v4, v12
+; GFX9-NEXT: v_or_b32_e32 v5, v5, v13
+; GFX9-NEXT: v_or_b32_e32 v6, v6, v14
+; GFX9-NEXT: v_or3_b32 v3, v3, v11, v7
+; GFX9-NEXT: v_or3_b32 v0, v0, v8, v4
+; GFX9-NEXT: v_or3_b32 v2, v2, v10, v6
+; GFX9-NEXT: v_or3_b32 v1, v1, v5, v3
+; GFX9-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_or_v16i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_or_b32_e32 v7, v7, v15
+; GFX10-NEXT: v_or_b32_e32 v1, v1, v9
+; GFX10-NEXT: v_or_b32_e32 v4, v4, v12
+; GFX10-NEXT: v_or_b32_e32 v5, v5, v13
+; GFX10-NEXT: v_or_b32_e32 v6, v6, v14
+; GFX10-NEXT: v_or3_b32 v3, v3, v11, v7
+; GFX10-NEXT: v_or3_b32 v0, v0, v8, v4
+; GFX10-NEXT: v_or3_b32 v2, v2, v10, v6
+; GFX10-NEXT: v_or3_b32 v1, v1, v5, v3
+; GFX10-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_or_v16i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_or_b32_e32 v7, v7, v15
+; GFX11-NEXT: v_or_b32_e32 v1, v1, v9
+; GFX11-NEXT: v_or_b32_e32 v4, v4, v12
+; GFX11-NEXT: v_or_b32_e32 v5, v5, v13
+; GFX11-NEXT: v_or_b32_e32 v6, v6, v14
+; GFX11-NEXT: v_or3_b32 v3, v3, v11, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_or3_b32 v0, v0, v8, v4
+; GFX11-NEXT: v_or3_b32 v2, v2, v10, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v1, v1, v5, v3
+; GFX11-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_or_v16i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_or_b32_e32 v7, v7, v15
+; GFX12-NEXT: v_or_b32_e32 v1, v1, v9
+; GFX12-NEXT: v_or_b32_e32 v4, v4, v12
+; GFX12-NEXT: v_or_b32_e32 v5, v5, v13
+; GFX12-NEXT: v_or_b32_e32 v6, v6, v14
+; GFX12-NEXT: v_or3_b32 v3, v3, v11, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_or3_b32 v0, v0, v8, v4
+; GFX12-NEXT: v_or3_b32 v2, v2, v10, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v1, v1, v5, v3
+; GFX12-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.or.v16i32(<16 x i32> %v)
+ ret i32 %res
+}
+
+define i64 @test_vector_reduce_or_v2i64(<2 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_or_v2i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_or_v2i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_or_v2i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX9-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_or_v2i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX10-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_or_v2i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX11-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_or_v2i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX12-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.or.v2i64(<2 x i64> %v)
+ ret i64 %res
+}
+
+define i64 @test_vector_reduce_or_v3i64(<3 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_or_v3i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v4
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v5
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_or_v3i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v4
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v5
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_or_v3i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_or3_b32 v0, v0, v2, v4
+; GFX9-NEXT: v_or3_b32 v1, v1, v3, v5
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_or_v3i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_or3_b32 v0, v0, v2, v4
+; GFX10-NEXT: v_or3_b32 v1, v1, v3, v5
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_or_v3i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_or3_b32 v0, v0, v2, v4
+; GFX11-NEXT: v_or3_b32 v1, v1, v3, v5
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_or_v3i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_or3_b32 v0, v0, v2, v4
+; GFX12-NEXT: v_or3_b32 v1, v1, v3, v5
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.or.v3i64(<3 x i64> %v)
+ ret i64 %res
+}
+
+define i64 @test_vector_reduce_or_v4i64(<4 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_or_v4i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v4
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v5
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v6
+; GFX7-NEXT: v_or_b32_e32 v3, v3, v7
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_or_v4i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v4
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v5
+; GFX8-NEXT: v_or_b32_e32 v2, v2, v6
+; GFX8-NEXT: v_or_b32_e32 v3, v3, v7
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_or_v4i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_or_b32_e32 v2, v2, v6
+; GFX9-NEXT: v_or_b32_e32 v3, v3, v7
+; GFX9-NEXT: v_or3_b32 v0, v0, v4, v2
+; GFX9-NEXT: v_or3_b32 v1, v1, v5, v3
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_or_v4i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_or_b32_e32 v2, v2, v6
+; GFX10-NEXT: v_or_b32_e32 v3, v3, v7
+; GFX10-NEXT: v_or3_b32 v0, v0, v4, v2
+; GFX10-NEXT: v_or3_b32 v1, v1, v5, v3
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_or_v4i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_or_b32_e32 v2, v2, v6
+; GFX11-NEXT: v_or_b32_e32 v3, v3, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_or3_b32 v0, v0, v4, v2
+; GFX11-NEXT: v_or3_b32 v1, v1, v5, v3
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_or_v4i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_or_b32_e32 v2, v2, v6
+; GFX12-NEXT: v_or_b32_e32 v3, v3, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_or3_b32 v0, v0, v4, v2
+; GFX12-NEXT: v_or3_b32 v1, v1, v5, v3
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.or.v4i64(<4 x i64> %v)
+ ret i64 %res
+}
+
+define i64 @test_vector_reduce_or_v8i64(<8 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_or_v8i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v8
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v9
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v10
+; GFX7-NEXT: v_or_b32_e32 v3, v3, v11
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v12
+; GFX7-NEXT: v_or_b32_e32 v5, v5, v13
+; GFX7-NEXT: v_or_b32_e32 v6, v6, v14
+; GFX7-NEXT: v_or_b32_e32 v7, v7, v15
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v4
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v5
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v6
+; GFX7-NEXT: v_or_b32_e32 v3, v3, v7
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_or_v8i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v8
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v9
+; GFX8-NEXT: v_or_b32_e32 v2, v2, v10
+; GFX8-NEXT: v_or_b32_e32 v3, v3, v11
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v12
+; GFX8-NEXT: v_or_b32_e32 v5, v5, v13
+; GFX8-NEXT: v_or_b32_e32 v6, v6, v14
+; GFX8-NEXT: v_or_b32_e32 v7, v7, v15
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v4
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v5
+; GFX8-NEXT: v_or_b32_e32 v2, v2, v6
+; GFX8-NEXT: v_or_b32_e32 v3, v3, v7
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_or_v8i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_or_b32_e32 v6, v6, v14
+; GFX9-NEXT: v_or_b32_e32 v7, v7, v15
+; GFX9-NEXT: v_or_b32_e32 v0, v0, v8
+; GFX9-NEXT: v_or_b32_e32 v1, v1, v9
+; GFX9-NEXT: v_or_b32_e32 v4, v4, v12
+; GFX9-NEXT: v_or_b32_e32 v5, v5, v13
+; GFX9-NEXT: v_or3_b32 v2, v2, v10, v6
+; GFX9-NEXT: v_or3_b32 v3, v3, v11, v7
+; GFX9-NEXT: v_or3_b32 v0, v0, v4, v2
+; GFX9-NEXT: v_or3_b32 v1, v1, v5, v3
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_or_v8i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_or_b32_e32 v6, v6, v14
+; GFX10-NEXT: v_or_b32_e32 v7, v7, v15
+; GFX10-NEXT: v_or_b32_e32 v0, v0, v8
+; GFX10-NEXT: v_or_b32_e32 v1, v1, v9
+; GFX10-NEXT: v_or_b32_e32 v4, v4, v12
+; GFX10-NEXT: v_or_b32_e32 v5, v5, v13
+; GFX10-NEXT: v_or3_b32 v2, v2, v10, v6
+; GFX10-NEXT: v_or3_b32 v3, v3, v11, v7
+; GFX10-NEXT: v_or3_b32 v0, v0, v4, v2
+; GFX10-NEXT: v_or3_b32 v1, v1, v5, v3
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_or_v8i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_or_b32_e32 v6, v6, v14
+; GFX11-NEXT: v_or_b32_e32 v7, v7, v15
+; GFX11-NEXT: v_or_b32_e32 v0, v0, v8
+; GFX11-NEXT: v_or_b32_e32 v1, v1, v9
+; GFX11-NEXT: v_or_b32_e32 v4, v4, v12
+; GFX11-NEXT: v_or_b32_e32 v5, v5, v13
+; GFX11-NEXT: v_or3_b32 v2, v2, v10, v6
+; GFX11-NEXT: v_or3_b32 v3, v3, v11, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_or3_b32 v0, v0, v4, v2
+; GFX11-NEXT: v_or3_b32 v1, v1, v5, v3
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_or_v8i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_or_b32_e32 v6, v6, v14
+; GFX12-NEXT: v_or_b32_e32 v7, v7, v15
+; GFX12-NEXT: v_or_b32_e32 v0, v0, v8
+; GFX12-NEXT: v_or_b32_e32 v1, v1, v9
+; GFX12-NEXT: v_or_b32_e32 v4, v4, v12
+; GFX12-NEXT: v_or_b32_e32 v5, v5, v13
+; GFX12-NEXT: v_or3_b32 v2, v2, v10, v6
+; GFX12-NEXT: v_or3_b32 v3, v3, v11, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_or3_b32 v0, v0, v4, v2
+; GFX12-NEXT: v_or3_b32 v1, v1, v5, v3
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.or.v8i64(<8 x i64> %v)
+ ret i64 %res
+}
+
+define i64 @test_vector_reduce_or_v16i64(<16 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_or_v16i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v16
+; GFX7-NEXT: buffer_load_dword v16, off, s[0:3], s32
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v18
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v20
+; GFX7-NEXT: v_or_b32_e32 v6, v6, v22
+; GFX7-NEXT: v_or_b32_e32 v8, v8, v24
+; GFX7-NEXT: v_or_b32_e32 v10, v10, v26
+; GFX7-NEXT: v_or_b32_e32 v12, v12, v28
+; GFX7-NEXT: v_or_b32_e32 v14, v14, v30
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v8
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v10
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v12
+; GFX7-NEXT: v_or_b32_e32 v6, v6, v14
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v4
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v6
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v17
+; GFX7-NEXT: v_or_b32_e32 v3, v3, v19
+; GFX7-NEXT: v_or_b32_e32 v5, v5, v21
+; GFX7-NEXT: v_or_b32_e32 v7, v7, v23
+; GFX7-NEXT: v_or_b32_e32 v9, v9, v25
+; GFX7-NEXT: v_or_b32_e32 v11, v11, v27
+; GFX7-NEXT: v_or_b32_e32 v13, v13, v29
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v9
+; GFX7-NEXT: v_or_b32_e32 v3, v3, v11
+; GFX7-NEXT: v_or_b32_e32 v5, v5, v13
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v5
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_or_b32_e32 v2, v15, v16
+; GFX7-NEXT: v_or_b32_e32 v2, v7, v2
+; GFX7-NEXT: v_or_b32_e32 v2, v3, v2
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_or_v16i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v16
+; GFX8-NEXT: buffer_load_dword v16, off, s[0:3], s32
+; GFX8-NEXT: v_or_b32_e32 v2, v2, v18
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v20
+; GFX8-NEXT: v_or_b32_e32 v6, v6, v22
+; GFX8-NEXT: v_or_b32_e32 v8, v8, v24
+; GFX8-NEXT: v_or_b32_e32 v10, v10, v26
+; GFX8-NEXT: v_or_b32_e32 v12, v12, v28
+; GFX8-NEXT: v_or_b32_e32 v14, v14, v30
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v8
+; GFX8-NEXT: v_or_b32_e32 v2, v2, v10
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v12
+; GFX8-NEXT: v_or_b32_e32 v6, v6, v14
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v4
+; GFX8-NEXT: v_or_b32_e32 v2, v2, v6
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v17
+; GFX8-NEXT: v_or_b32_e32 v3, v3, v19
+; GFX8-NEXT: v_or_b32_e32 v5, v5, v21
+; GFX8-NEXT: v_or_b32_e32 v7, v7, v23
+; GFX8-NEXT: v_or_b32_e32 v9, v9, v25
+; GFX8-NEXT: v_or_b32_e32 v11, v11, v27
+; GFX8-NEXT: v_or_b32_e32 v13, v13, v29
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v9
+; GFX8-NEXT: v_or_b32_e32 v3, v3, v11
+; GFX8-NEXT: v_or_b32_e32 v5, v5, v13
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v5
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_or_b32_e32 v2, v15, v16
+; GFX8-NEXT: v_or_b32_e32 v2, v7, v2
+; GFX8-NEXT: v_or_b32_e32 v2, v3, v2
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_or_v16i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: scratch_load_dword v31, off, s32
+; GFX9-NEXT: v_or_b32_e32 v14, v14, v30
+; GFX9-NEXT: v_or_b32_e32 v2, v2, v18
+; GFX9-NEXT: v_or_b32_e32 v8, v8, v24
+; GFX9-NEXT: v_or_b32_e32 v10, v10, v26
+; GFX9-NEXT: v_or_b32_e32 v12, v12, v28
+; GFX9-NEXT: v_or3_b32 v6, v6, v22, v14
+; GFX9-NEXT: v_or3_b32 v0, v0, v16, v8
+; GFX9-NEXT: v_or3_b32 v4, v4, v20, v12
+; GFX9-NEXT: v_or3_b32 v2, v2, v10, v6
+; GFX9-NEXT: v_or3_b32 v0, v0, v4, v2
+; GFX9-NEXT: v_or_b32_e32 v3, v3, v19
+; GFX9-NEXT: v_or_b32_e32 v9, v9, v25
+; GFX9-NEXT: v_or_b32_e32 v11, v11, v27
+; GFX9-NEXT: v_or_b32_e32 v13, v13, v29
+; GFX9-NEXT: v_or3_b32 v1, v1, v17, v9
+; GFX9-NEXT: v_or3_b32 v5, v5, v21, v13
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_e32 v2, v15, v31
+; GFX9-NEXT: v_or3_b32 v2, v7, v23, v2
+; GFX9-NEXT: v_or3_b32 v2, v3, v11, v2
+; GFX9-NEXT: v_or3_b32 v1, v1, v5, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_or_v16i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX10-NEXT: v_or_b32_e32 v14, v14, v30
+; GFX10-NEXT: v_or_b32_e32 v2, v2, v18
+; GFX10-NEXT: v_or_b32_e32 v3, v3, v19
+; GFX10-NEXT: v_or_b32_e32 v8, v8, v24
+; GFX10-NEXT: v_or_b32_e32 v9, v9, v25
+; GFX10-NEXT: v_or_b32_e32 v10, v10, v26
+; GFX10-NEXT: v_or_b32_e32 v11, v11, v27
+; GFX10-NEXT: v_or_b32_e32 v12, v12, v28
+; GFX10-NEXT: v_or_b32_e32 v13, v13, v29
+; GFX10-NEXT: v_or3_b32 v6, v6, v22, v14
+; GFX10-NEXT: v_or3_b32 v0, v0, v16, v8
+; GFX10-NEXT: v_or3_b32 v1, v1, v17, v9
+; GFX10-NEXT: v_or3_b32 v4, v4, v20, v12
+; GFX10-NEXT: v_or3_b32 v5, v5, v21, v13
+; GFX10-NEXT: v_or3_b32 v2, v2, v10, v6
+; GFX10-NEXT: v_or3_b32 v0, v0, v4, v2
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_or_b32_e32 v15, v15, v31
+; GFX10-NEXT: v_or3_b32 v7, v7, v23, v15
+; GFX10-NEXT: v_or3_b32 v3, v3, v11, v7
+; GFX10-NEXT: v_or3_b32 v1, v1, v5, v3
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_or_v16i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: scratch_load_b32 v31, off, s32
+; GFX11-NEXT: v_or_b32_e32 v14, v14, v30
+; GFX11-NEXT: v_or_b32_e32 v2, v2, v18
+; GFX11-NEXT: v_or_b32_e32 v3, v3, v19
+; GFX11-NEXT: v_or_b32_e32 v8, v8, v24
+; GFX11-NEXT: v_or_b32_e32 v9, v9, v25
+; GFX11-NEXT: v_or_b32_e32 v10, v10, v26
+; GFX11-NEXT: v_or_b32_e32 v11, v11, v27
+; GFX11-NEXT: v_or_b32_e32 v12, v12, v28
+; GFX11-NEXT: v_or_b32_e32 v13, v13, v29
+; GFX11-NEXT: v_or3_b32 v6, v6, v22, v14
+; GFX11-NEXT: v_or3_b32 v0, v0, v16, v8
+; GFX11-NEXT: v_or3_b32 v1, v1, v17, v9
+; GFX11-NEXT: v_or3_b32 v4, v4, v20, v12
+; GFX11-NEXT: v_or3_b32 v5, v5, v21, v13
+; GFX11-NEXT: v_or3_b32 v2, v2, v10, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v0, v0, v4, v2
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_or_b32_e32 v15, v15, v31
+; GFX11-NEXT: v_or3_b32 v7, v7, v23, v15
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v3, v3, v11, v7
+; GFX11-NEXT: v_or3_b32 v1, v1, v5, v3
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_or_v16i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: scratch_load_b32 v31, off, s32
+; GFX12-NEXT: v_or_b32_e32 v14, v14, v30
+; GFX12-NEXT: v_or_b32_e32 v2, v2, v18
+; GFX12-NEXT: v_or_b32_e32 v3, v3, v19
+; GFX12-NEXT: v_or_b32_e32 v8, v8, v24
+; GFX12-NEXT: v_or_b32_e32 v9, v9, v25
+; GFX12-NEXT: v_or_b32_e32 v10, v10, v26
+; GFX12-NEXT: v_or_b32_e32 v11, v11, v27
+; GFX12-NEXT: v_or_b32_e32 v12, v12, v28
+; GFX12-NEXT: v_or_b32_e32 v13, v13, v29
+; GFX12-NEXT: v_or3_b32 v6, v6, v22, v14
+; GFX12-NEXT: v_or3_b32 v0, v0, v16, v8
+; GFX12-NEXT: v_or3_b32 v1, v1, v17, v9
+; GFX12-NEXT: v_or3_b32 v4, v4, v20, v12
+; GFX12-NEXT: v_or3_b32 v5, v5, v21, v13
+; GFX12-NEXT: v_or3_b32 v2, v2, v10, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v0, v0, v4, v2
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_or_b32_e32 v15, v15, v31
+; GFX12-NEXT: v_or3_b32 v7, v7, v23, v15
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v3, v3, v11, v7
+; GFX12-NEXT: v_or3_b32 v1, v1, v5, v3
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.or.v16i64(<16 x i64> %v)
+ ret i64 %res
+}
+
+declare i8 @llvm.vector.reduce.or.v2i8(<2 x i8>)
+declare i8 @llvm.vector.reduce.or.v3i8(<3 x i8>)
+declare i8 @llvm.vector.reduce.or.v4i8(<4 x i8>)
+declare i8 @llvm.vector.reduce.or.v8i8(<8 x i8>)
+declare i8 @llvm.vector.reduce.or.v16i8(<16 x i8>)
+
+declare i16 @llvm.vector.reduce.or.v2i16(<2 x i16>)
+declare i16 @llvm.vector.reduce.or.v3i16(<3 x i16>)
+declare i16 @llvm.vector.reduce.or.v4i16(<4 x i16>)
+declare i16 @llvm.vector.reduce.or.v8i16(<8 x i16>)
+declare i16 @llvm.vector.reduce.or.v16i16(<16 x i16>)
+
+declare i32 @llvm.vector.reduce.or.v2i32(<2 x i32>)
+declare i32 @llvm.vector.reduce.or.v3i32(<3 x i32>)
+declare i32 @llvm.vector.reduce.or.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.or.v8i32(<8 x i32>)
+declare i32 @llvm.vector.reduce.or.v16i32(<16 x i32>)
+
+declare i64 @llvm.vector.reduce.or.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.or.v3i64(<3 x i64>)
+declare i64 @llvm.vector.reduce.or.v4i64(<4 x i64>)
+declare i64 @llvm.vector.reduce.or.v8i64(<8 x i64>)
+declare i64 @llvm.vector.reduce.or.v16i64(<16 x i64>)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/smax.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/smax.ll
new file mode 100644
index 0000000000000..93e68c24e1d7e
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/smax.ll
@@ -0,0 +1,3098 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx700 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX7 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx801 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX8 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx942 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX9 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX10 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX12 %s
+
+define i8 @test_vector_reduce_smax_v2i8(<2 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_smax_v2i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX7-NEXT: v_max_i32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_smax_v2i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshlrev_b16_e32 v0, 8, v0
+; GFX8-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX8-NEXT: v_max_i16_sdwa v0, sext(v0), sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_smax_v2i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_i16_sdwa v0, sext(v0), sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_smax_v2i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX10-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX10-NEXT: v_max_i16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_smax_v2i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX11-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_max_i16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_smax_v2i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX12-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_max_i16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.smax.v2i8(<2 x i8> %v)
+ ret i8 %res
+}
+
+define i8 @test_vector_reduce_smax_v3i8(<3 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_smax_v3i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX7-NEXT: v_max_i32_e32 v0, v0, v1
+; GFX7-NEXT: v_bfe_i32 v1, v2, 0, 8
+; GFX7-NEXT: v_max_i32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_smax_v3i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshlrev_b16_e32 v0, 8, v0
+; GFX8-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX8-NEXT: v_max_i16_sdwa v0, sext(v0), sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
+; GFX8-NEXT: v_lshlrev_b16_e32 v1, 8, v2
+; GFX8-NEXT: v_max_i16_sdwa v0, v0, sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_smax_v3i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_i16_sdwa v0, sext(v0), sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-NEXT: v_max_i16_sdwa v0, v0, sext(v2) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_smax_v3i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX10-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX10-NEXT: v_max_i16 v0, v0, v1
+; GFX10-NEXT: v_bfe_i32 v1, v2, 0, 8
+; GFX10-NEXT: v_max_i16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_smax_v3i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX11-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_max_i16 v0, v0, v1
+; GFX11-NEXT: v_bfe_i32 v1, v2, 0, 8
+; GFX11-NEXT: v_max_i16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_smax_v3i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX12-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_max_i16 v0, v0, v1
+; GFX12-NEXT: v_bfe_i32 v1, v2, 0, 8
+; GFX12-NEXT: v_max_i16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.smax.v3i8(<3 x i8> %v)
+ ret i8 %res
+}
+
+define i8 @test_vector_reduce_smax_v4i8(<4 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_smax_v4i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_bfe_i32 v5, v4, 16, 8
+; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX7-NEXT: v_bfe_i32 v4, v4, 24, 8
+; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX7-NEXT: v_max_i32_e32 v1, v1, v4
+; GFX7-NEXT: v_max_i32_e32 v0, v0, v5
+; GFX7-NEXT: v_bfe_i32 v2, v2, 0, 8
+; GFX7-NEXT: s_sext_i32_i8 s4, s4
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX7-NEXT: v_max_i32_e32 v2, s4, v2
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX7-NEXT: v_max_i32_e32 v3, s4, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: s_sext_i32_i8 s4, s4
+; GFX7-NEXT: v_bfe_i32 v4, v4, 8, 8
+; GFX7-NEXT: v_max_i32_e32 v1, s4, v1
+; GFX7-NEXT: v_max_i32_e32 v0, v0, v4
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX7-NEXT: v_max_i32_e32 v2, s4, v2
+; GFX7-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v2
+; GFX7-NEXT: v_max_i32_e32 v3, s4, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_smax_v4i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v4, 8
+; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v6, 0xff, v2
+; GFX8-NEXT: v_or_b32_sdwa v5, v0, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX8-NEXT: v_or_b32_e32 v5, v5, v6
+; GFX8-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX8-NEXT: v_or_b32_e32 v5, v5, v6
+; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v5
+; GFX8-NEXT: v_lshrrev_b32_e32 v5, 24, v5
+; GFX8-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX8-NEXT: v_lshlrev_b16_e32 v5, 8, v5
+; GFX8-NEXT: v_max_i16_sdwa v1, sext(v1), sext(v5) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
+; GFX8-NEXT: v_lshlrev_b16_e32 v2, 8, v2
+; GFX8-NEXT: v_mov_b32_e32 v5, 0
+; GFX8-NEXT: v_lshlrev_b16_e32 v0, 8, v0
+; GFX8-NEXT: v_lshlrev_b16_e32 v6, 8, v6
+; GFX8-NEXT: v_max_i16_sdwa v2, sext(v2), v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b16_e32 v3, 8, v3
+; GFX8-NEXT: v_max_i16_sdwa v0, sext(v0), sext(v6) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
+; GFX8-NEXT: v_max_i16_sdwa v3, sext(v3), v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v6, 0xff, v2
+; GFX8-NEXT: v_or_b32_sdwa v5, v0, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX8-NEXT: v_or_b32_e32 v5, v5, v6
+; GFX8-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX8-NEXT: v_or_b32_e32 v5, v5, v6
+; GFX8-NEXT: v_lshrrev_b32_sdwa v5, v4, v5 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_max_i16_e32 v1, 0, v1
+; GFX8-NEXT: v_max_i16_sdwa v0, v0, sext(v5) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX8-NEXT: v_max_i16_e32 v2, 0, v2
+; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v1, 0xff, v2
+; GFX8-NEXT: v_max_i16_e32 v3, 0, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_and_b32_e32 v1, 0xff, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_smax_v4i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v4, 0xff
+; GFX9-NEXT: v_lshlrev_b32_sdwa v6, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_e32 v7, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v8, 0xff, v3
+; GFX9-NEXT: v_and_or_b32 v6, v0, v4, v6
+; GFX9-NEXT: v_lshlrev_b32_e32 v7, 16, v7
+; GFX9-NEXT: v_lshlrev_b32_e32 v8, 24, v8
+; GFX9-NEXT: v_or3_b32 v6, v6, v7, v8
+; GFX9-NEXT: s_sext_i32_i8 s0, s0
+; GFX9-NEXT: v_max_i16_sdwa v1, sext(v1), sext(v6) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_3
+; GFX9-NEXT: v_max_i16_sdwa v2, sext(v2), s0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_max_i16_sdwa v3, sext(v3), s0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_max_i16_sdwa v0, sext(v0), sext(v6) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_2
+; GFX9-NEXT: v_lshlrev_b32_sdwa v6, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_e32 v7, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v8, 0xff, v3
+; GFX9-NEXT: v_and_or_b32 v6, v0, v4, v6
+; GFX9-NEXT: v_lshlrev_b32_e32 v7, 16, v7
+; GFX9-NEXT: v_lshlrev_b32_e32 v8, 24, v8
+; GFX9-NEXT: s_sext_i32_i8 s0, s0
+; GFX9-NEXT: v_or3_b32 v6, v6, v7, v8
+; GFX9-NEXT: v_max_i16_e32 v1, s0, v1
+; GFX9-NEXT: v_max_i16_sdwa v0, v0, sext(v6) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX9-NEXT: v_max_i16_e32 v2, s0, v2
+; GFX9-NEXT: v_max_i16_e32 v3, s0, v3
+; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v0, v0, v4, v1
+; GFX9-NEXT: v_and_b32_e32 v1, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v2, 0xff, v3
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 24, v2
+; GFX9-NEXT: v_or3_b32 v0, v0, v1, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_smax_v4i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v4, 8
+; GFX10-NEXT: v_and_b32_e32 v6, 0xff, v2
+; GFX10-NEXT: v_and_b32_e32 v7, 0xff, v3
+; GFX10-NEXT: v_bfe_i32 v2, v2, 0, 8
+; GFX10-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX10-NEXT: v_lshlrev_b32_e32 v7, 24, v7
+; GFX10-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX10-NEXT: s_sext_i32_i8 s4, s4
+; GFX10-NEXT: v_and_or_b32 v5, 0xff, v0, v5
+; GFX10-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX10-NEXT: v_max_i16 v2, v2, s4
+; GFX10-NEXT: v_max_i16 v3, v3, s4
+; GFX10-NEXT: s_sext_i32_i8 s4, s4
+; GFX10-NEXT: v_or3_b32 v5, v5, v6, v7
+; GFX10-NEXT: v_and_b32_e32 v7, 0xff, v3
+; GFX10-NEXT: v_max_i16 v3, v3, s4
+; GFX10-NEXT: v_bfe_i32 v6, v5, 24, 8
+; GFX10-NEXT: v_bfe_i32 v5, v5, 16, 8
+; GFX10-NEXT: v_lshlrev_b32_e32 v7, 24, v7
+; GFX10-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX10-NEXT: v_max_i16 v1, v1, v6
+; GFX10-NEXT: v_and_b32_e32 v6, 0xff, v2
+; GFX10-NEXT: v_max_i16 v0, v0, v5
+; GFX10-NEXT: v_max_i16 v2, v2, s4
+; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX10-NEXT: v_max_i16 v1, v1, s4
+; GFX10-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX10-NEXT: v_and_or_b32 v5, 0xff, v0, v5
+; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX10-NEXT: v_or3_b32 v5, v5, v6, v7
+; GFX10-NEXT: v_bfe_i32 v5, v5, 8, 8
+; GFX10-NEXT: v_max_i16 v0, v0, v5
+; GFX10-NEXT: v_and_or_b32 v0, 0xff, v0, v1
+; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v3
+; GFX10-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_smax_v4i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v1
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX11-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX11-NEXT: v_bfe_i32 v2, v2, 0, 8
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 8, v4
+; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX11-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX11-NEXT: s_sext_i32_i8 s0, s0
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX11-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX11-NEXT: v_max_i16 v2, v2, s0
+; GFX11-NEXT: v_max_i16 v3, v3, s0
+; GFX11-NEXT: s_sext_i32_i8 s0, s0
+; GFX11-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v2
+; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v3
+; GFX11-NEXT: v_max_i16 v2, v2, s0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_bfe_i32 v5, v4, 24, 8
+; GFX11-NEXT: v_bfe_i32 v4, v4, 16, 8
+; GFX11-NEXT: v_max_i16 v3, v3, s0
+; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_max_i16 v1, v1, v5
+; GFX11-NEXT: v_max_i16 v0, v0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX11-NEXT: v_max_i16 v1, v1, s0
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 8, v5
+; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 24, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_bfe_i32 v4, v4, 8, 8
+; GFX11-NEXT: v_max_i16 v0, v0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_and_or_b32 v0, 0xff, v0, v1
+; GFX11-NEXT: v_lshlrev_b32_e32 v1, 24, v3
+; GFX11-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_smax_v4i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v4, 0xff, v1
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX12-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX12-NEXT: v_bfe_i32 v2, v2, 0, 8
+; GFX12-NEXT: v_lshlrev_b32_e32 v4, 8, v4
+; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX12-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX12-NEXT: s_sext_i32_i8 s0, s0
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX12-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: v_max_i16 v2, v2, s0
+; GFX12-NEXT: v_max_i16 v3, v3, s0
+; GFX12-NEXT: s_sext_i32_i8 s0, s0
+; GFX12-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v2
+; GFX12-NEXT: v_and_b32_e32 v7, 0xff, v3
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: v_max_i16 v2, v2, s0
+; GFX12-NEXT: v_bfe_i32 v5, v4, 24, 8
+; GFX12-NEXT: v_bfe_i32 v4, v4, 16, 8
+; GFX12-NEXT: v_max_i16 v3, v3, s0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-NEXT: v_max_i16 v1, v1, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_max_i16 v0, v0, v4
+; GFX12-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX12-NEXT: v_max_i16 v1, v1, s0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_lshlrev_b32_e32 v4, 8, v5
+; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX12-NEXT: v_lshlrev_b32_e32 v6, 24, v7
+; GFX12-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX12-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX12-NEXT: v_bfe_i32 v4, v4, 8, 8
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_max_i16 v0, v0, v4
+; GFX12-NEXT: v_and_or_b32 v0, 0xff, v0, v1
+; GFX12-NEXT: v_lshlrev_b32_e32 v1, 24, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.smax.v4i8(<4 x i8> %v)
+ ret i8 %res
+}
+
+define i8 @test_vector_reduce_smax_v8i8(<8 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_smax_v8i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v7
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX7-NEXT: v_bfe_i32 v5, v4, 0, 8
+; GFX7-NEXT: v_max_i32_e32 v0, v0, v5
+; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX7-NEXT: v_bfe_i32 v5, v4, 8, 8
+; GFX7-NEXT: v_max_i32_e32 v1, v1, v5
+; GFX7-NEXT: v_bfe_i32 v2, v2, 0, 8
+; GFX7-NEXT: v_bfe_i32 v5, v4, 16, 8
+; GFX7-NEXT: v_max_i32_e32 v2, v2, v5
+; GFX7-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX7-NEXT: v_bfe_i32 v4, v4, 24, 8
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX7-NEXT: v_max_i32_e32 v3, v3, v4
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_bfe_i32 v5, v4, 16, 8
+; GFX7-NEXT: v_bfe_i32 v4, v4, 24, 8
+; GFX7-NEXT: v_max_i32_e32 v1, v1, v4
+; GFX7-NEXT: v_max_i32_e32 v0, v0, v5
+; GFX7-NEXT: s_sext_i32_i8 s4, s4
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX7-NEXT: v_max_i32_e32 v2, s4, v2
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX7-NEXT: v_max_i32_e32 v3, s4, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: s_sext_i32_i8 s4, s4
+; GFX7-NEXT: v_bfe_i32 v4, v4, 8, 8
+; GFX7-NEXT: v_max_i32_e32 v1, s4, v1
+; GFX7-NEXT: v_max_i32_e32 v0, v0, v4
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX7-NEXT: v_max_i32_e32 v2, s4, v2
+; GFX7-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v2
+; GFX7-NEXT: v_max_i32_e32 v3, s4, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_smax_v8i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v8, 8
+; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v8, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v4, v4, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v7
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_lshrrev_b32_sdwa v5, v8, v4 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX8-NEXT: v_lshrrev_b32_e32 v7, 24, v4
+; GFX8-NEXT: v_lshlrev_b16_e32 v0, 8, v0
+; GFX8-NEXT: v_lshlrev_b16_e32 v4, 8, v4
+; GFX8-NEXT: v_max_i16_sdwa v0, sext(v0), sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
+; GFX8-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX8-NEXT: v_lshlrev_b16_e32 v2, 8, v2
+; GFX8-NEXT: v_lshlrev_b16_e32 v4, 8, v6
+; GFX8-NEXT: v_max_i16_sdwa v1, sext(v1), sext(v5) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
+; GFX8-NEXT: v_max_i16_sdwa v2, sext(v2), sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
+; GFX8-NEXT: v_lshlrev_b16_e32 v3, 8, v3
+; GFX8-NEXT: v_lshlrev_b16_e32 v4, 8, v7
+; GFX8-NEXT: v_max_i16_sdwa v3, sext(v3), sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
+; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX8-NEXT: v_or_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v4
+; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v4
+; GFX8-NEXT: v_lshlrev_b16_e32 v4, 8, v4
+; GFX8-NEXT: v_lshlrev_b16_e32 v5, 8, v5
+; GFX8-NEXT: v_max_i16_sdwa v1, v1, sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX8-NEXT: v_max_i16_e32 v2, 0, v2
+; GFX8-NEXT: v_max_i16_sdwa v0, v0, sext(v5) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX8-NEXT: v_max_i16_e32 v3, 0, v3
+; GFX8-NEXT: v_or_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_lshrrev_b32_sdwa v4, v8, v4 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_max_i16_e32 v1, 0, v1
+; GFX8-NEXT: v_max_i16_sdwa v0, v0, sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX8-NEXT: v_max_i16_e32 v2, 0, v2
+; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v1, 0xff, v2
+; GFX8-NEXT: v_max_i16_e32 v3, 0, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_and_b32_e32 v1, 0xff, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_smax_v8i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v9, 8
+; GFX9-NEXT: v_mov_b32_e32 v8, 0xff
+; GFX9-NEXT: v_lshlrev_b32_sdwa v5, v9, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v4, v4, v8, v5
+; GFX9-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX9-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX9-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX9-NEXT: v_max_i16_sdwa v1, sext(v1), sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_1
+; GFX9-NEXT: v_max_i16_sdwa v2, sext(v2), sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_2
+; GFX9-NEXT: v_max_i16_sdwa v3, sext(v3), sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_3
+; GFX9-NEXT: v_max_i16_sdwa v0, sext(v0), sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-NEXT: v_lshlrev_b32_sdwa v4, v9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX9-NEXT: v_and_or_b32 v4, v0, v8, v4
+; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX9-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX9-NEXT: s_sext_i32_i8 s0, s0
+; GFX9-NEXT: v_max_i16_sdwa v1, v1, sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_max_i16_e32 v2, s0, v2
+; GFX9-NEXT: v_max_i16_e32 v3, s0, v3
+; GFX9-NEXT: v_max_i16_sdwa v0, v0, sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+; GFX9-NEXT: v_lshlrev_b32_sdwa v4, v9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX9-NEXT: v_and_or_b32 v4, v0, v8, v4
+; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX9-NEXT: s_sext_i32_i8 s0, s0
+; GFX9-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX9-NEXT: v_max_i16_e32 v1, s0, v1
+; GFX9-NEXT: v_max_i16_sdwa v0, v0, sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX9-NEXT: v_max_i16_e32 v2, s0, v2
+; GFX9-NEXT: v_max_i16_e32 v3, s0, v3
+; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v0, v0, v8, v1
+; GFX9-NEXT: v_and_b32_e32 v1, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v2, 0xff, v3
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 24, v2
+; GFX9-NEXT: v_or3_b32 v0, v0, v1, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_smax_v8i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v8, 8
+; GFX10-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX10-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX10-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX10-NEXT: v_bfe_i32 v2, v2, 0, 8
+; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v8, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX10-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX10-NEXT: s_sext_i32_i8 s4, s4
+; GFX10-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v7
+; GFX10-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX10-NEXT: v_bfe_i32 v5, v4, 8, 8
+; GFX10-NEXT: v_bfe_i32 v6, v4, 16, 8
+; GFX10-NEXT: v_bfe_i32 v7, v4, 24, 8
+; GFX10-NEXT: v_bfe_i32 v4, v4, 0, 8
+; GFX10-NEXT: v_max_i16 v1, v1, v5
+; GFX10-NEXT: v_max_i16 v2, v2, v6
+; GFX10-NEXT: v_max_i16 v3, v3, v7
+; GFX10-NEXT: v_max_i16 v0, v0, v4
+; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX10-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX10-NEXT: v_max_i16 v2, v2, s4
+; GFX10-NEXT: v_max_i16 v3, v3, s4
+; GFX10-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX10-NEXT: s_sext_i32_i8 s4, s4
+; GFX10-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX10-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX10-NEXT: v_max_i16 v3, v3, s4
+; GFX10-NEXT: v_bfe_i32 v5, v4, 24, 8
+; GFX10-NEXT: v_bfe_i32 v4, v4, 16, 8
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX10-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX10-NEXT: v_max_i16 v1, v1, v5
+; GFX10-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX10-NEXT: v_max_i16 v0, v0, v4
+; GFX10-NEXT: v_max_i16 v2, v2, s4
+; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX10-NEXT: v_max_i16 v1, v1, s4
+; GFX10-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX10-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX10-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX10-NEXT: v_bfe_i32 v4, v4, 8, 8
+; GFX10-NEXT: v_max_i16 v0, v0, v4
+; GFX10-NEXT: v_and_or_b32 v0, 0xff, v0, v1
+; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v3
+; GFX10-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_smax_v8i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX11-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX11-NEXT: v_bfe_i32 v2, v2, 0, 8
+; GFX11-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX11-NEXT: v_lshlrev_b32_e32 v7, 24, v7
+; GFX11-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX11-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX11-NEXT: s_sext_i32_i8 s0, s0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v4, v4, v6, v7
+; GFX11-NEXT: v_bfe_i32 v5, v4, 8, 8
+; GFX11-NEXT: v_bfe_i32 v6, v4, 16, 8
+; GFX11-NEXT: v_bfe_i32 v7, v4, 24, 8
+; GFX11-NEXT: v_bfe_i32 v4, v4, 0, 8
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_max_i16 v1, v1, v5
+; GFX11-NEXT: v_max_i16 v2, v2, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_max_i16 v3, v3, v7
+; GFX11-NEXT: v_max_i16 v0, v0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX11-NEXT: v_max_i16 v2, v2, s0
+; GFX11-NEXT: v_max_i16 v3, v3, s0
+; GFX11-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX11-NEXT: s_sext_i32_i8 s0, s0
+; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v3
+; GFX11-NEXT: v_and_or_b32 v5, 0xff, v0, v5
+; GFX11-NEXT: v_max_i16 v3, v3, s0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_or3_b32 v4, v5, v4, v6
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v2
+; GFX11-NEXT: v_max_i16 v2, v2, s0
+; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_bfe_i32 v5, v4, 24, 8
+; GFX11-NEXT: v_bfe_i32 v4, v4, 16, 8
+; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_max_i16 v1, v1, v5
+; GFX11-NEXT: v_max_i16 v0, v0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX11-NEXT: v_max_i16 v1, v1, s0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 8, v5
+; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 24, v7
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX11-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX11-NEXT: v_bfe_i32 v4, v4, 8, 8
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_max_i16 v0, v0, v4
+; GFX11-NEXT: v_and_or_b32 v0, 0xff, v0, v1
+; GFX11-NEXT: v_lshlrev_b32_e32 v1, 24, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_smax_v8i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX12-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX12-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX12-NEXT: v_bfe_i32 v2, v2, 0, 8
+; GFX12-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX12-NEXT: v_lshlrev_b32_e32 v7, 24, v7
+; GFX12-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX12-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX12-NEXT: s_sext_i32_i8 s0, s0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v4, v4, v6, v7
+; GFX12-NEXT: v_bfe_i32 v5, v4, 8, 8
+; GFX12-NEXT: v_bfe_i32 v6, v4, 16, 8
+; GFX12-NEXT: v_bfe_i32 v7, v4, 24, 8
+; GFX12-NEXT: v_bfe_i32 v4, v4, 0, 8
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_max_i16 v1, v1, v5
+; GFX12-NEXT: v_max_i16 v2, v2, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_max_i16 v3, v3, v7
+; GFX12-NEXT: v_max_i16 v0, v0, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX12-NEXT: v_and_b32_e32 v4, 0xff, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: v_max_i16 v2, v2, s0
+; GFX12-NEXT: v_max_i16 v3, v3, s0
+; GFX12-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX12-NEXT: s_sext_i32_i8 s0, s0
+; GFX12-NEXT: v_and_b32_e32 v7, 0xff, v3
+; GFX12-NEXT: v_and_or_b32 v5, 0xff, v0, v5
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: v_max_i16 v3, v3, s0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_or3_b32 v4, v5, v4, v6
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v2
+; GFX12-NEXT: v_max_i16 v2, v2, s0
+; GFX12-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_bfe_i32 v5, v4, 24, 8
+; GFX12-NEXT: v_bfe_i32 v4, v4, 16, 8
+; GFX12-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_max_i16 v1, v1, v5
+; GFX12-NEXT: v_max_i16 v0, v0, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX12-NEXT: v_max_i16 v1, v1, s0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_lshlrev_b32_e32 v4, 8, v5
+; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX12-NEXT: v_lshlrev_b32_e32 v6, 24, v7
+; GFX12-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX12-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX12-NEXT: v_bfe_i32 v4, v4, 8, 8
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_max_i16 v0, v0, v4
+; GFX12-NEXT: v_and_or_b32 v0, 0xff, v0, v1
+; GFX12-NEXT: v_lshlrev_b32_e32 v1, 24, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.smax.v8i8(<8 x i8> %v)
+ ret i8 %res
+}
+
+define i8 @test_vector_reduce_smax_v16i8(<16 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_smax_v16i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v9, 0xff, v9
+; GFX7-NEXT: v_and_b32_e32 v8, 0xff, v8
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX7-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX7-NEXT: v_and_b32_e32 v9, 0xff, v10
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX7-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX7-NEXT: v_and_b32_e32 v9, 0xff, v11
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 24, v9
+; GFX7-NEXT: v_and_b32_e32 v10, 0xff, v13
+; GFX7-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX7-NEXT: v_and_b32_e32 v9, 0xff, v12
+; GFX7-NEXT: v_lshlrev_b32_e32 v10, 8, v10
+; GFX7-NEXT: v_or_b32_e32 v9, v9, v10
+; GFX7-NEXT: v_and_b32_e32 v10, 0xff, v14
+; GFX7-NEXT: v_lshlrev_b32_e32 v10, 16, v10
+; GFX7-NEXT: v_or_b32_e32 v9, v9, v10
+; GFX7-NEXT: v_and_b32_e32 v10, 0xff, v15
+; GFX7-NEXT: v_lshlrev_b32_e32 v10, 24, v10
+; GFX7-NEXT: v_or_b32_e32 v9, v9, v10
+; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX7-NEXT: v_bfe_i32 v10, v8, 0, 8
+; GFX7-NEXT: v_max_i32_e32 v0, v0, v10
+; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX7-NEXT: v_bfe_i32 v10, v8, 8, 8
+; GFX7-NEXT: v_max_i32_e32 v1, v1, v10
+; GFX7-NEXT: v_bfe_i32 v10, v8, 16, 8
+; GFX7-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX7-NEXT: v_bfe_i32 v8, v8, 24, 8
+; GFX7-NEXT: v_max_i32_e32 v3, v3, v8
+; GFX7-NEXT: v_bfe_i32 v4, v4, 0, 8
+; GFX7-NEXT: v_bfe_i32 v8, v9, 0, 8
+; GFX7-NEXT: v_max_i32_e32 v4, v4, v8
+; GFX7-NEXT: v_bfe_i32 v5, v5, 0, 8
+; GFX7-NEXT: v_bfe_i32 v8, v9, 8, 8
+; GFX7-NEXT: v_max_i32_e32 v5, v5, v8
+; GFX7-NEXT: v_bfe_i32 v6, v6, 0, 8
+; GFX7-NEXT: v_bfe_i32 v8, v9, 16, 8
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX7-NEXT: v_max_i32_e32 v6, v6, v8
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_bfe_i32 v7, v7, 0, 8
+; GFX7-NEXT: v_bfe_i32 v8, v9, 24, 8
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX7-NEXT: v_max_i32_e32 v7, v7, v8
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v7
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_bfe_i32 v5, v4, 0, 8
+; GFX7-NEXT: v_bfe_i32 v2, v2, 0, 8
+; GFX7-NEXT: v_max_i32_e32 v0, v0, v5
+; GFX7-NEXT: v_bfe_i32 v5, v4, 8, 8
+; GFX7-NEXT: v_max_i32_e32 v2, v2, v10
+; GFX7-NEXT: v_max_i32_e32 v1, v1, v5
+; GFX7-NEXT: v_bfe_i32 v5, v4, 16, 8
+; GFX7-NEXT: v_max_i32_e32 v2, v2, v5
+; GFX7-NEXT: v_bfe_i32 v4, v4, 24, 8
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX7-NEXT: v_max_i32_e32 v3, v3, v4
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_bfe_i32 v5, v4, 16, 8
+; GFX7-NEXT: v_bfe_i32 v4, v4, 24, 8
+; GFX7-NEXT: v_max_i32_e32 v1, v1, v4
+; GFX7-NEXT: v_max_i32_e32 v0, v0, v5
+; GFX7-NEXT: s_sext_i32_i8 s4, s4
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX7-NEXT: v_max_i32_e32 v2, s4, v2
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX7-NEXT: v_max_i32_e32 v3, s4, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: s_sext_i32_i8 s4, s4
+; GFX7-NEXT: v_bfe_i32 v4, v4, 8, 8
+; GFX7-NEXT: v_max_i32_e32 v1, s4, v1
+; GFX7-NEXT: v_max_i32_e32 v0, v0, v4
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX7-NEXT: v_max_i32_e32 v2, s4, v2
+; GFX7-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v2
+; GFX7-NEXT: v_max_i32_e32 v3, s4, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_smax_v16i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v16, 8
+; GFX8-NEXT: v_lshlrev_b32_sdwa v9, v16, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v8, v8, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v9, 0xff, v10
+; GFX8-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX8-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX8-NEXT: v_and_b32_e32 v9, 0xff, v11
+; GFX8-NEXT: v_lshlrev_b32_sdwa v13, v16, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_lshlrev_b32_e32 v9, 24, v9
+; GFX8-NEXT: v_or_b32_sdwa v12, v12, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v13, 0xff, v14
+; GFX8-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX8-NEXT: v_lshlrev_b32_e32 v13, 16, v13
+; GFX8-NEXT: v_lshrrev_b32_sdwa v9, v16, v8 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_lshrrev_b32_e32 v10, 16, v8
+; GFX8-NEXT: v_lshrrev_b32_e32 v11, 24, v8
+; GFX8-NEXT: v_or_b32_e32 v12, v12, v13
+; GFX8-NEXT: v_and_b32_e32 v13, 0xff, v15
+; GFX8-NEXT: v_lshlrev_b16_e32 v0, 8, v0
+; GFX8-NEXT: v_lshlrev_b16_e32 v8, 8, v8
+; GFX8-NEXT: v_lshlrev_b32_e32 v13, 24, v13
+; GFX8-NEXT: v_max_i16_sdwa v0, sext(v0), sext(v8) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
+; GFX8-NEXT: v_lshlrev_b16_e32 v2, 8, v2
+; GFX8-NEXT: v_lshlrev_b16_e32 v8, 8, v10
+; GFX8-NEXT: v_or_b32_e32 v12, v12, v13
+; GFX8-NEXT: v_max_i16_sdwa v2, sext(v2), sext(v8) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
+; GFX8-NEXT: v_lshlrev_b16_e32 v3, 8, v3
+; GFX8-NEXT: v_lshlrev_b16_e32 v8, 8, v11
+; GFX8-NEXT: v_lshrrev_b32_sdwa v13, v16, v12 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_lshrrev_b32_e32 v14, 16, v12
+; GFX8-NEXT: v_max_i16_sdwa v3, sext(v3), sext(v8) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
+; GFX8-NEXT: v_lshlrev_b16_e32 v4, 8, v4
+; GFX8-NEXT: v_lshlrev_b16_e32 v8, 8, v12
+; GFX8-NEXT: v_lshlrev_b16_e32 v5, 8, v5
+; GFX8-NEXT: v_max_i16_sdwa v4, sext(v4), sext(v8) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
+; GFX8-NEXT: v_max_i16_sdwa v5, sext(v5), sext(v13) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
+; GFX8-NEXT: v_lshlrev_b16_e32 v6, 8, v6
+; GFX8-NEXT: v_lshlrev_b16_e32 v8, 8, v14
+; GFX8-NEXT: v_lshrrev_b32_e32 v15, 24, v12
+; GFX8-NEXT: v_max_i16_sdwa v6, sext(v6), sext(v8) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
+; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v16, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_lshlrev_b16_e32 v7, 8, v7
+; GFX8-NEXT: v_lshlrev_b16_e32 v8, 8, v15
+; GFX8-NEXT: v_or_b32_sdwa v4, v4, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX8-NEXT: v_max_i16_sdwa v7, sext(v7), sext(v8) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v7
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX8-NEXT: v_lshrrev_b32_sdwa v5, v16, v4 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX8-NEXT: v_lshrrev_b32_e32 v7, 24, v4
+; GFX8-NEXT: v_lshlrev_b16_e32 v4, 8, v4
+; GFX8-NEXT: v_max_i16_sdwa v1, sext(v1), sext(v9) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
+; GFX8-NEXT: v_max_i16_sdwa v0, v0, sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX8-NEXT: v_lshlrev_b16_e32 v4, 8, v6
+; GFX8-NEXT: v_max_i16_sdwa v1, v1, sext(v5) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX8-NEXT: v_max_i16_sdwa v2, v2, sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX8-NEXT: v_lshlrev_b16_e32 v4, 8, v7
+; GFX8-NEXT: v_max_i16_sdwa v3, v3, sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v16, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX8-NEXT: v_or_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v4
+; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v4
+; GFX8-NEXT: v_lshlrev_b16_e32 v4, 8, v4
+; GFX8-NEXT: v_lshlrev_b16_e32 v5, 8, v5
+; GFX8-NEXT: v_max_i16_sdwa v1, v1, sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX8-NEXT: v_max_i16_e32 v2, 0, v2
+; GFX8-NEXT: v_max_i16_sdwa v0, v0, sext(v5) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v16, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX8-NEXT: v_max_i16_e32 v3, 0, v3
+; GFX8-NEXT: v_or_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_lshrrev_b32_sdwa v4, v16, v4 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_max_i16_e32 v1, 0, v1
+; GFX8-NEXT: v_max_i16_sdwa v0, v0, sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX8-NEXT: v_max_i16_e32 v2, 0, v2
+; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v16, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v1, 0xff, v2
+; GFX8-NEXT: v_max_i16_e32 v3, 0, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_and_b32_e32 v1, 0xff, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_smax_v16i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v17, 8
+; GFX9-NEXT: v_mov_b32_e32 v16, 0xff
+; GFX9-NEXT: v_lshlrev_b32_sdwa v9, v17, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v8, v8, v16, v9
+; GFX9-NEXT: v_and_b32_e32 v9, 0xff, v10
+; GFX9-NEXT: v_and_b32_e32 v10, 0xff, v11
+; GFX9-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX9-NEXT: v_lshlrev_b32_e32 v10, 24, v10
+; GFX9-NEXT: v_or3_b32 v8, v8, v9, v10
+; GFX9-NEXT: v_lshlrev_b32_sdwa v9, v17, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_e32 v10, 0xff, v14
+; GFX9-NEXT: v_and_b32_e32 v11, 0xff, v15
+; GFX9-NEXT: v_and_or_b32 v9, v12, v16, v9
+; GFX9-NEXT: v_lshlrev_b32_e32 v10, 16, v10
+; GFX9-NEXT: v_lshlrev_b32_e32 v11, 24, v11
+; GFX9-NEXT: v_or3_b32 v9, v9, v10, v11
+; GFX9-NEXT: v_max_i16_sdwa v5, sext(v5), sext(v9) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_1
+; GFX9-NEXT: v_max_i16_sdwa v4, sext(v4), sext(v9) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-NEXT: v_max_i16_sdwa v6, sext(v6), sext(v9) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_2
+; GFX9-NEXT: v_max_i16_sdwa v7, sext(v7), sext(v9) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_3
+; GFX9-NEXT: v_lshlrev_b32_sdwa v5, v17, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v4, v4, v16, v5
+; GFX9-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX9-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX9-NEXT: v_max_i16_sdwa v1, sext(v1), sext(v8) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_1
+; GFX9-NEXT: v_max_i16_sdwa v2, sext(v2), sext(v8) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_2
+; GFX9-NEXT: v_max_i16_sdwa v3, sext(v3), sext(v8) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_3
+; GFX9-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX9-NEXT: v_max_i16_sdwa v0, sext(v0), sext(v8) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-NEXT: v_max_i16_sdwa v1, v1, sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX9-NEXT: v_max_i16_sdwa v2, v2, sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+; GFX9-NEXT: v_max_i16_sdwa v3, v3, sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_max_i16_sdwa v0, v0, sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_lshlrev_b32_sdwa v4, v17, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX9-NEXT: v_and_or_b32 v4, v0, v16, v4
+; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX9-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX9-NEXT: s_sext_i32_i8 s0, s0
+; GFX9-NEXT: v_max_i16_sdwa v1, v1, sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_max_i16_e32 v2, s0, v2
+; GFX9-NEXT: v_max_i16_e32 v3, s0, v3
+; GFX9-NEXT: v_max_i16_sdwa v0, v0, sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+; GFX9-NEXT: v_lshlrev_b32_sdwa v4, v17, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX9-NEXT: v_and_or_b32 v4, v0, v16, v4
+; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX9-NEXT: s_sext_i32_i8 s0, s0
+; GFX9-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX9-NEXT: v_max_i16_e32 v1, s0, v1
+; GFX9-NEXT: v_max_i16_sdwa v0, v0, sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX9-NEXT: v_max_i16_e32 v2, s0, v2
+; GFX9-NEXT: v_max_i16_e32 v3, s0, v3
+; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v17, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v0, v0, v16, v1
+; GFX9-NEXT: v_and_b32_e32 v1, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v2, 0xff, v3
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 24, v2
+; GFX9-NEXT: v_or3_b32 v0, v0, v1, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_smax_v16i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v16, 8
+; GFX10-NEXT: v_and_b32_e32 v14, 0xff, v14
+; GFX10-NEXT: v_and_b32_e32 v15, 0xff, v15
+; GFX10-NEXT: v_and_b32_e32 v10, 0xff, v10
+; GFX10-NEXT: v_and_b32_e32 v11, 0xff, v11
+; GFX10-NEXT: v_lshlrev_b32_sdwa v13, v16, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_lshlrev_b32_sdwa v9, v16, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_bfe_i32 v5, v5, 0, 8
+; GFX10-NEXT: v_bfe_i32 v6, v6, 0, 8
+; GFX10-NEXT: v_bfe_i32 v7, v7, 0, 8
+; GFX10-NEXT: v_and_or_b32 v12, 0xff, v12, v13
+; GFX10-NEXT: v_lshlrev_b32_e32 v13, 16, v14
+; GFX10-NEXT: v_lshlrev_b32_e32 v14, 24, v15
+; GFX10-NEXT: v_and_or_b32 v8, 0xff, v8, v9
+; GFX10-NEXT: v_lshlrev_b32_e32 v9, 16, v10
+; GFX10-NEXT: v_lshlrev_b32_e32 v10, 24, v11
+; GFX10-NEXT: v_bfe_i32 v4, v4, 0, 8
+; GFX10-NEXT: v_or3_b32 v11, v12, v13, v14
+; GFX10-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX10-NEXT: v_bfe_i32 v2, v2, 0, 8
+; GFX10-NEXT: v_or3_b32 v8, v8, v9, v10
+; GFX10-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX10-NEXT: v_bfe_i32 v9, v11, 8, 8
+; GFX10-NEXT: v_bfe_i32 v10, v11, 16, 8
+; GFX10-NEXT: v_bfe_i32 v12, v11, 24, 8
+; GFX10-NEXT: v_bfe_i32 v11, v11, 0, 8
+; GFX10-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX10-NEXT: v_max_i16 v5, v5, v9
+; GFX10-NEXT: v_max_i16 v6, v6, v10
+; GFX10-NEXT: v_max_i16 v7, v7, v12
+; GFX10-NEXT: v_max_i16 v4, v4, v11
+; GFX10-NEXT: v_bfe_i32 v9, v8, 8, 8
+; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v16, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX10-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX10-NEXT: v_bfe_i32 v10, v8, 24, 8
+; GFX10-NEXT: v_max_i16 v1, v1, v9
+; GFX10-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v7
+; GFX10-NEXT: v_bfe_i32 v7, v8, 16, 8
+; GFX10-NEXT: v_max_i16 v3, v3, v10
+; GFX10-NEXT: s_sext_i32_i8 s4, s4
+; GFX10-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX10-NEXT: v_bfe_i32 v5, v8, 0, 8
+; GFX10-NEXT: v_max_i16 v2, v2, v7
+; GFX10-NEXT: v_bfe_i32 v6, v4, 8, 8
+; GFX10-NEXT: v_bfe_i32 v7, v4, 16, 8
+; GFX10-NEXT: v_bfe_i32 v8, v4, 24, 8
+; GFX10-NEXT: v_max_i16 v0, v0, v5
+; GFX10-NEXT: v_bfe_i32 v4, v4, 0, 8
+; GFX10-NEXT: v_max_i16 v1, v1, v6
+; GFX10-NEXT: v_max_i16 v2, v2, v7
+; GFX10-NEXT: v_max_i16 v3, v3, v8
+; GFX10-NEXT: v_max_i16 v0, v0, v4
+; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v16, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX10-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX10-NEXT: v_max_i16 v2, v2, s4
+; GFX10-NEXT: v_max_i16 v3, v3, s4
+; GFX10-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX10-NEXT: s_sext_i32_i8 s4, s4
+; GFX10-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX10-NEXT: v_bfe_i32 v5, v4, 24, 8
+; GFX10-NEXT: v_bfe_i32 v4, v4, 16, 8
+; GFX10-NEXT: v_max_i16 v1, v1, v5
+; GFX10-NEXT: v_max_i16 v0, v0, v4
+; GFX10-NEXT: v_and_b32_e32 v4, 0xff, v2
+; GFX10-NEXT: v_and_b32_e32 v5, 0xff, v3
+; GFX10-NEXT: v_max_i16 v2, v2, s4
+; GFX10-NEXT: v_lshlrev_b32_sdwa v6, v16, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_max_i16 v1, v1, s4
+; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX10-NEXT: v_max_i16 v3, v3, s4
+; GFX10-NEXT: v_and_or_b32 v6, 0xff, v0, v6
+; GFX10-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v16, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX10-NEXT: v_or3_b32 v4, v6, v4, v5
+; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX10-NEXT: v_bfe_i32 v4, v4, 8, 8
+; GFX10-NEXT: v_max_i16 v0, v0, v4
+; GFX10-NEXT: v_and_or_b32 v0, 0xff, v0, v1
+; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v3
+; GFX10-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_smax_v16i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v13, 0xff, v13
+; GFX11-NEXT: v_and_b32_e32 v14, 0xff, v14
+; GFX11-NEXT: v_and_b32_e32 v15, 0xff, v15
+; GFX11-NEXT: v_and_b32_e32 v9, 0xff, v9
+; GFX11-NEXT: v_and_b32_e32 v10, 0xff, v10
+; GFX11-NEXT: v_lshlrev_b32_e32 v13, 8, v13
+; GFX11-NEXT: v_bfe_i32 v5, v5, 0, 8
+; GFX11-NEXT: v_bfe_i32 v6, v6, 0, 8
+; GFX11-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX11-NEXT: v_bfe_i32 v7, v7, 0, 8
+; GFX11-NEXT: v_and_or_b32 v12, 0xff, v12, v13
+; GFX11-NEXT: v_lshlrev_b32_e32 v13, 16, v14
+; GFX11-NEXT: v_lshlrev_b32_e32 v14, 24, v15
+; GFX11-NEXT: v_and_or_b32 v8, 0xff, v8, v9
+; GFX11-NEXT: v_lshlrev_b32_e32 v9, 16, v10
+; GFX11-NEXT: v_and_b32_e32 v10, 0xff, v11
+; GFX11-NEXT: v_bfe_i32 v4, v4, 0, 8
+; GFX11-NEXT: v_or3_b32 v12, v12, v13, v14
+; GFX11-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX11-NEXT: v_bfe_i32 v2, v2, 0, 8
+; GFX11-NEXT: v_lshlrev_b32_e32 v10, 24, v10
+; GFX11-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX11-NEXT: v_bfe_i32 v13, v12, 8, 8
+; GFX11-NEXT: v_bfe_i32 v11, v12, 16, 8
+; GFX11-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX11-NEXT: v_or3_b32 v8, v8, v9, v10
+; GFX11-NEXT: s_sext_i32_i8 s0, s0
+; GFX11-NEXT: v_max_i16 v5, v5, v13
+; GFX11-NEXT: v_bfe_i32 v13, v12, 24, 8
+; GFX11-NEXT: v_bfe_i32 v12, v12, 0, 8
+; GFX11-NEXT: v_max_i16 v6, v6, v11
+; GFX11-NEXT: v_bfe_i32 v9, v8, 16, 8
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX11-NEXT: v_max_i16 v7, v7, v13
+; GFX11-NEXT: v_max_i16 v4, v4, v12
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX11-NEXT: v_max_i16 v2, v2, v9
+; GFX11-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 24, v7
+; GFX11-NEXT: v_bfe_i32 v7, v8, 8, 8
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX11-NEXT: v_bfe_i32 v5, v8, 24, 8
+; GFX11-NEXT: v_max_i16 v1, v1, v7
+; GFX11-NEXT: v_bfe_i32 v6, v8, 0, 8
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_bfe_i32 v7, v4, 8, 8
+; GFX11-NEXT: v_max_i16 v3, v3, v5
+; GFX11-NEXT: v_bfe_i32 v5, v4, 16, 8
+; GFX11-NEXT: v_bfe_i32 v8, v4, 24, 8
+; GFX11-NEXT: v_max_i16 v0, v0, v6
+; GFX11-NEXT: v_max_i16 v1, v1, v7
+; GFX11-NEXT: v_bfe_i32 v4, v4, 0, 8
+; GFX11-NEXT: v_max_i16 v2, v2, v5
+; GFX11-NEXT: v_max_i16 v3, v3, v8
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX11-NEXT: v_max_i16 v0, v0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v2
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX11-NEXT: v_max_i16 v2, v2, s0
+; GFX11-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX11-NEXT: v_max_i16 v3, v3, s0
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX11-NEXT: s_sext_i32_i8 s0, s0
+; GFX11-NEXT: v_and_or_b32 v5, 0xff, v0, v5
+; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v3
+; GFX11-NEXT: v_max_i16 v3, v3, s0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_or3_b32 v4, v5, v4, v6
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v2
+; GFX11-NEXT: v_max_i16 v2, v2, s0
+; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_bfe_i32 v5, v4, 24, 8
+; GFX11-NEXT: v_bfe_i32 v4, v4, 16, 8
+; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_max_i16 v1, v1, v5
+; GFX11-NEXT: v_max_i16 v0, v0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX11-NEXT: v_max_i16 v1, v1, s0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 8, v5
+; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 24, v7
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX11-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX11-NEXT: v_bfe_i32 v4, v4, 8, 8
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_max_i16 v0, v0, v4
+; GFX11-NEXT: v_and_or_b32 v0, 0xff, v0, v1
+; GFX11-NEXT: v_lshlrev_b32_e32 v1, 24, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_smax_v16i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v13, 0xff, v13
+; GFX12-NEXT: v_and_b32_e32 v14, 0xff, v14
+; GFX12-NEXT: v_and_b32_e32 v15, 0xff, v15
+; GFX12-NEXT: v_and_b32_e32 v9, 0xff, v9
+; GFX12-NEXT: v_and_b32_e32 v10, 0xff, v10
+; GFX12-NEXT: v_lshlrev_b32_e32 v13, 8, v13
+; GFX12-NEXT: v_bfe_i32 v5, v5, 0, 8
+; GFX12-NEXT: v_bfe_i32 v6, v6, 0, 8
+; GFX12-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX12-NEXT: v_bfe_i32 v7, v7, 0, 8
+; GFX12-NEXT: v_and_or_b32 v12, 0xff, v12, v13
+; GFX12-NEXT: v_lshlrev_b32_e32 v13, 16, v14
+; GFX12-NEXT: v_lshlrev_b32_e32 v14, 24, v15
+; GFX12-NEXT: v_and_or_b32 v8, 0xff, v8, v9
+; GFX12-NEXT: v_lshlrev_b32_e32 v9, 16, v10
+; GFX12-NEXT: v_and_b32_e32 v10, 0xff, v11
+; GFX12-NEXT: v_bfe_i32 v4, v4, 0, 8
+; GFX12-NEXT: v_or3_b32 v12, v12, v13, v14
+; GFX12-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX12-NEXT: v_bfe_i32 v2, v2, 0, 8
+; GFX12-NEXT: v_lshlrev_b32_e32 v10, 24, v10
+; GFX12-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX12-NEXT: v_bfe_i32 v13, v12, 8, 8
+; GFX12-NEXT: v_bfe_i32 v11, v12, 16, 8
+; GFX12-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX12-NEXT: v_or3_b32 v8, v8, v9, v10
+; GFX12-NEXT: s_sext_i32_i8 s0, s0
+; GFX12-NEXT: v_max_i16 v5, v5, v13
+; GFX12-NEXT: v_bfe_i32 v13, v12, 24, 8
+; GFX12-NEXT: v_bfe_i32 v12, v12, 0, 8
+; GFX12-NEXT: v_max_i16 v6, v6, v11
+; GFX12-NEXT: v_bfe_i32 v9, v8, 16, 8
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX12-NEXT: v_max_i16 v7, v7, v13
+; GFX12-NEXT: v_max_i16 v4, v4, v12
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX12-NEXT: v_max_i16 v2, v2, v9
+; GFX12-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX12-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX12-NEXT: v_lshlrev_b32_e32 v6, 24, v7
+; GFX12-NEXT: v_bfe_i32 v7, v8, 8, 8
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX12-NEXT: v_bfe_i32 v5, v8, 24, 8
+; GFX12-NEXT: v_max_i16 v1, v1, v7
+; GFX12-NEXT: v_bfe_i32 v6, v8, 0, 8
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_bfe_i32 v7, v4, 8, 8
+; GFX12-NEXT: v_max_i16 v3, v3, v5
+; GFX12-NEXT: v_bfe_i32 v5, v4, 16, 8
+; GFX12-NEXT: v_bfe_i32 v8, v4, 24, 8
+; GFX12-NEXT: v_max_i16 v0, v0, v6
+; GFX12-NEXT: v_max_i16 v1, v1, v7
+; GFX12-NEXT: v_bfe_i32 v4, v4, 0, 8
+; GFX12-NEXT: v_max_i16 v2, v2, v5
+; GFX12-NEXT: v_max_i16 v3, v3, v8
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX12-NEXT: v_max_i16 v0, v0, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_and_b32_e32 v4, 0xff, v2
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: v_max_i16 v2, v2, s0
+; GFX12-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX12-NEXT: v_max_i16 v3, v3, s0
+; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX12-NEXT: s_sext_i32_i8 s0, s0
+; GFX12-NEXT: v_and_or_b32 v5, 0xff, v0, v5
+; GFX12-NEXT: v_and_b32_e32 v7, 0xff, v3
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: v_max_i16 v3, v3, s0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_or3_b32 v4, v5, v4, v6
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v2
+; GFX12-NEXT: v_max_i16 v2, v2, s0
+; GFX12-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_bfe_i32 v5, v4, 24, 8
+; GFX12-NEXT: v_bfe_i32 v4, v4, 16, 8
+; GFX12-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_max_i16 v1, v1, v5
+; GFX12-NEXT: v_max_i16 v0, v0, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX12-NEXT: v_max_i16 v1, v1, s0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_lshlrev_b32_e32 v4, 8, v5
+; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX12-NEXT: v_lshlrev_b32_e32 v6, 24, v7
+; GFX12-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX12-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX12-NEXT: v_bfe_i32 v4, v4, 8, 8
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_max_i16 v0, v0, v4
+; GFX12-NEXT: v_and_or_b32 v0, 0xff, v0, v1
+; GFX12-NEXT: v_lshlrev_b32_e32 v1, 24, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.smax.v16i8(<16 x i8> %v)
+ ret i8 %res
+}
+
+define i16 @test_vector_reduce_smax_v2i16(<2 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_smax_v2i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff, v0
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 16
+; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX7-NEXT: v_bfe_i32 v2, v2, 16, 16
+; GFX7-NEXT: v_max_i32_e32 v1, 0, v1
+; GFX7-NEXT: v_max_i32_e32 v0, v0, v2
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_smax_v2i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v2, s4
+; GFX8-NEXT: v_max_i16_sdwa v1, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_max_i16_sdwa v0, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_smax_v2i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX9-NEXT: v_pk_max_i16 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_smax_v2i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX10-NEXT: v_pk_max_i16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_smax_v2i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_pk_max_i16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_smax_v2i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_pk_max_i16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.smax.v2i16(<2 x i16> %v)
+ ret i16 %res
+}
+
+define i16 @test_vector_reduce_smax_v3i16(<3 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_smax_v3i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v0, v0, v1
+; GFX7-NEXT: v_bfe_i32 v1, v2, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_smax_v3i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_max_i16_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_smax_v3i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_max_i16_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_smax_v3i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX10-NEXT: v_max_i16 v0, v0, v2
+; GFX10-NEXT: v_max_i16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_smax_v3i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_max_i16 v0, v0, v2
+; GFX11-NEXT: v_max_i16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_smax_v3i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_max_i16 v0, v0, v2
+; GFX12-NEXT: v_max_i16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.smax.v3i16(<3 x i16> %v)
+ ret i16 %res
+}
+
+define i16 @test_vector_reduce_smax_v4i16(<4 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_smax_v4i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-NEXT: v_or_b32_e32 v2, v3, v2
+; GFX7-NEXT: v_bfe_i32 v3, v2, 0, 16
+; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 16
+; GFX7-NEXT: v_bfe_i32 v2, v2, 16, 16
+; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v1, v1, v2
+; GFX7-NEXT: v_max_i32_e32 v0, v0, v3
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff, v1
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_bfe_i32 v2, v2, 16, 16
+; GFX7-NEXT: v_max_i32_e32 v1, 0, v1
+; GFX7-NEXT: v_max_i32_e32 v0, v0, v2
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_smax_v4i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_i16_e32 v2, v0, v1
+; GFX8-NEXT: v_max_i16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v0
+; GFX8-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX8-NEXT: v_max_i16_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_mov_b32_e32 v2, s4
+; GFX8-NEXT: v_max_i16_sdwa v0, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_smax_v4i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_max_i16 v0, v0, v1
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX9-NEXT: v_pk_max_i16 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_smax_v4i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_pk_max_i16 v0, v0, v1
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX10-NEXT: v_pk_max_i16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_smax_v4i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_pk_max_i16 v0, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX11-NEXT: v_pk_max_i16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_smax_v4i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_max_i16 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX12-NEXT: v_pk_max_i16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.smax.v4i16(<4 x i16> %v)
+ ret i16 %res
+}
+
+define i16 @test_vector_reduce_smax_v8i16(<8 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_smax_v8i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v7
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX7-NEXT: v_or_b32_e32 v5, v5, v6
+; GFX7-NEXT: v_bfe_i32 v6, v4, 0, 16
+; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 16
+; GFX7-NEXT: v_bfe_i32 v4, v4, 16, 16
+; GFX7-NEXT: v_max_i32_e32 v1, v1, v4
+; GFX7-NEXT: v_bfe_i32 v2, v2, 0, 16
+; GFX7-NEXT: v_bfe_i32 v4, v5, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v2, v2, v4
+; GFX7-NEXT: v_bfe_i32 v3, v3, 0, 16
+; GFX7-NEXT: v_bfe_i32 v4, v5, 16, 16
+; GFX7-NEXT: v_max_i32_e32 v3, v3, v4
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX7-NEXT: v_bfe_i32 v3, v2, 0, 16
+; GFX7-NEXT: v_bfe_i32 v2, v2, 16, 16
+; GFX7-NEXT: v_max_i32_e32 v0, v0, v6
+; GFX7-NEXT: v_max_i32_e32 v1, v1, v2
+; GFX7-NEXT: v_max_i32_e32 v0, v0, v3
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff, v1
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_bfe_i32 v2, v2, 16, 16
+; GFX7-NEXT: v_max_i32_e32 v1, 0, v1
+; GFX7-NEXT: v_max_i32_e32 v0, v0, v2
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_smax_v8i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_i16_e32 v4, v0, v2
+; GFX8-NEXT: v_max_i16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_i16_e32 v2, v1, v3
+; GFX8-NEXT: v_max_i16_sdwa v1, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX8-NEXT: v_max_i16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_max_i16_e32 v2, v4, v1
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v0
+; GFX8-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX8-NEXT: v_max_i16_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_mov_b32_e32 v2, s4
+; GFX8-NEXT: v_max_i16_sdwa v0, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_smax_v8i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_max_i16 v0, v0, v2
+; GFX9-NEXT: v_pk_max_i16 v1, v1, v3
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_pk_max_i16 v0, v0, v1
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX9-NEXT: v_pk_max_i16 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_smax_v8i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_pk_max_i16 v0, v0, v2
+; GFX10-NEXT: v_pk_max_i16 v1, v1, v3
+; GFX10-NEXT: v_pk_max_i16 v0, v0, v1
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX10-NEXT: v_pk_max_i16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_smax_v8i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_pk_max_i16 v0, v0, v2
+; GFX11-NEXT: v_pk_max_i16 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_pk_max_i16 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_pk_max_i16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_smax_v8i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_max_i16 v0, v0, v2
+; GFX12-NEXT: v_pk_max_i16 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_pk_max_i16 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_pk_max_i16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.smax.v8i16(<8 x i16> %v)
+ ret i16 %res
+}
+
+define i16 @test_vector_reduce_smax_v16i16(<16 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_smax_v16i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX7-NEXT: v_and_b32_e32 v8, 0xffff, v8
+; GFX7-NEXT: v_or_b32_e32 v8, v9, v8
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v11
+; GFX7-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX7-NEXT: v_or_b32_e32 v9, v9, v10
+; GFX7-NEXT: v_lshlrev_b32_e32 v10, 16, v13
+; GFX7-NEXT: v_and_b32_e32 v11, 0xffff, v12
+; GFX7-NEXT: v_or_b32_e32 v10, v10, v11
+; GFX7-NEXT: v_lshlrev_b32_e32 v11, 16, v15
+; GFX7-NEXT: v_and_b32_e32 v12, 0xffff, v14
+; GFX7-NEXT: v_or_b32_e32 v11, v11, v12
+; GFX7-NEXT: v_bfe_i32 v12, v8, 0, 16
+; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 16
+; GFX7-NEXT: v_bfe_i32 v8, v8, 16, 16
+; GFX7-NEXT: v_max_i32_e32 v1, v1, v8
+; GFX7-NEXT: v_bfe_i32 v2, v2, 0, 16
+; GFX7-NEXT: v_bfe_i32 v8, v9, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v2, v2, v8
+; GFX7-NEXT: v_bfe_i32 v3, v3, 0, 16
+; GFX7-NEXT: v_bfe_i32 v8, v9, 16, 16
+; GFX7-NEXT: v_max_i32_e32 v3, v3, v8
+; GFX7-NEXT: v_bfe_i32 v4, v4, 0, 16
+; GFX7-NEXT: v_bfe_i32 v8, v10, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v4, v4, v8
+; GFX7-NEXT: v_bfe_i32 v5, v5, 0, 16
+; GFX7-NEXT: v_bfe_i32 v8, v10, 16, 16
+; GFX7-NEXT: v_max_i32_e32 v5, v5, v8
+; GFX7-NEXT: v_bfe_i32 v6, v6, 0, 16
+; GFX7-NEXT: v_bfe_i32 v8, v11, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v6, v6, v8
+; GFX7-NEXT: v_bfe_i32 v7, v7, 0, 16
+; GFX7-NEXT: v_bfe_i32 v8, v11, 16, 16
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX7-NEXT: v_max_i32_e32 v7, v7, v8
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff, v6
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff, v7
+; GFX7-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX7-NEXT: v_or_b32_e32 v5, v5, v6
+; GFX7-NEXT: v_bfe_i32 v6, v4, 0, 16
+; GFX7-NEXT: v_bfe_i32 v4, v4, 16, 16
+; GFX7-NEXT: v_max_i32_e32 v1, v1, v4
+; GFX7-NEXT: v_bfe_i32 v4, v5, 0, 16
+; GFX7-NEXT: v_max_i32_e32 v2, v2, v4
+; GFX7-NEXT: v_bfe_i32 v4, v5, 16, 16
+; GFX7-NEXT: v_max_i32_e32 v3, v3, v4
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_max_i32_e32 v0, v0, v12
+; GFX7-NEXT: v_bfe_i32 v3, v2, 0, 16
+; GFX7-NEXT: v_bfe_i32 v2, v2, 16, 16
+; GFX7-NEXT: v_max_i32_e32 v0, v0, v6
+; GFX7-NEXT: v_max_i32_e32 v1, v1, v2
+; GFX7-NEXT: v_max_i32_e32 v0, v0, v3
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff, v1
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_bfe_i32 v2, v2, 16, 16
+; GFX7-NEXT: v_max_i32_e32 v1, 0, v1
+; GFX7-NEXT: v_max_i32_e32 v0, v0, v2
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_smax_v16i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_i16_e32 v8, v0, v4
+; GFX8-NEXT: v_max_i16_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_i16_e32 v4, v1, v5
+; GFX8-NEXT: v_max_i16_sdwa v1, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_i16_e32 v5, v2, v6
+; GFX8-NEXT: v_max_i16_sdwa v2, v2, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v2, v5, v2
+; GFX8-NEXT: v_max_i16_e32 v5, v3, v7
+; GFX8-NEXT: v_max_i16_sdwa v3, v3, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: v_max_i16_e32 v5, v8, v2
+; GFX8-NEXT: v_max_i16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_max_i16_e32 v2, v4, v3
+; GFX8-NEXT: v_max_i16_sdwa v1, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX8-NEXT: v_max_i16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_max_i16_e32 v2, v5, v1
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v0
+; GFX8-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX8-NEXT: v_max_i16_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_mov_b32_e32 v2, s4
+; GFX8-NEXT: v_max_i16_sdwa v0, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_smax_v16i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_max_i16 v0, v0, v4
+; GFX9-NEXT: v_pk_max_i16 v1, v1, v5
+; GFX9-NEXT: v_pk_max_i16 v2, v2, v6
+; GFX9-NEXT: v_pk_max_i16 v3, v3, v7
+; GFX9-NEXT: v_pk_max_i16 v0, v0, v2
+; GFX9-NEXT: v_pk_max_i16 v1, v1, v3
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_pk_max_i16 v0, v0, v1
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX9-NEXT: v_pk_max_i16 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_smax_v16i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_pk_max_i16 v0, v0, v4
+; GFX10-NEXT: v_pk_max_i16 v1, v1, v5
+; GFX10-NEXT: v_pk_max_i16 v2, v2, v6
+; GFX10-NEXT: v_pk_max_i16 v3, v3, v7
+; GFX10-NEXT: v_pk_max_i16 v0, v0, v2
+; GFX10-NEXT: v_pk_max_i16 v1, v1, v3
+; GFX10-NEXT: v_pk_max_i16 v0, v0, v1
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX10-NEXT: v_pk_max_i16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_smax_v16i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_pk_max_i16 v0, v0, v4
+; GFX11-NEXT: v_pk_max_i16 v1, v1, v5
+; GFX11-NEXT: v_pk_max_i16 v2, v2, v6
+; GFX11-NEXT: v_pk_max_i16 v3, v3, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_pk_max_i16 v0, v0, v2
+; GFX11-NEXT: v_pk_max_i16 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_pk_max_i16 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_pk_max_i16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_smax_v16i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_max_i16 v0, v0, v4
+; GFX12-NEXT: v_pk_max_i16 v1, v1, v5
+; GFX12-NEXT: v_pk_max_i16 v2, v2, v6
+; GFX12-NEXT: v_pk_max_i16 v3, v3, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_pk_max_i16 v0, v0, v2
+; GFX12-NEXT: v_pk_max_i16 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_pk_max_i16 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_pk_max_i16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.smax.v16i16(<16 x i16> %v)
+ ret i16 %res
+}
+
+
+define i32 @test_vector_reduce_smax_v2i32(<2 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_smax_v2i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_max_i32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_smax_v2i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_i32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_smax_v2i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_i32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_smax_v2i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_i32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_smax_v2i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_max_i32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_smax_v2i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_max_i32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.smax.v2i32(<2 x i32> %v)
+ ret i32 %res
+}
+
+define i32 @test_vector_reduce_smax_v3i32(<3 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_smax_v3i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_max_i32_e32 v0, v0, v1
+; GFX7-NEXT: v_max_i32_e32 v0, v0, v2
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_smax_v3i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_i32_e32 v0, v0, v1
+; GFX8-NEXT: v_max_i32_e32 v0, v0, v2
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_smax_v3i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_i32_e32 v0, v0, v1
+; GFX9-NEXT: v_max_i32_e32 v0, v0, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_smax_v3i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_i32_e32 v0, v0, v1
+; GFX10-NEXT: v_max_i32_e32 v0, v0, v2
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_smax_v3i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_max_i32_e32 v0, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_max_i32_e32 v0, v0, v2
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_smax_v3i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_max_i32_e32 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_max_i32_e32 v0, v0, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.smax.v3i32(<3 x i32> %v)
+ ret i32 %res
+}
+
+define i32 @test_vector_reduce_smax_v4i32(<4 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_smax_v4i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_max_i32_e32 v0, v0, v2
+; GFX7-NEXT: v_max_i32_e32 v1, v1, v3
+; GFX7-NEXT: v_max_i32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_smax_v4i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_i32_e32 v0, v0, v2
+; GFX8-NEXT: v_max_i32_e32 v1, v1, v3
+; GFX8-NEXT: v_max_i32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_smax_v4i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_i32_e32 v0, v0, v2
+; GFX9-NEXT: v_max_i32_e32 v1, v1, v3
+; GFX9-NEXT: v_max_i32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_smax_v4i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_i32_e32 v0, v0, v2
+; GFX10-NEXT: v_max_i32_e32 v1, v1, v3
+; GFX10-NEXT: v_max_i32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_smax_v4i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_max_i32_e32 v0, v0, v2
+; GFX11-NEXT: v_max_i32_e32 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_max_i32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_smax_v4i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_max_i32_e32 v0, v0, v2
+; GFX12-NEXT: v_max_i32_e32 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_max_i32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> %v)
+ ret i32 %res
+}
+
+define i32 @test_vector_reduce_smax_v8i32(<8 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_smax_v8i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_max_i32_e32 v0, v0, v4
+; GFX7-NEXT: v_max_i32_e32 v1, v1, v5
+; GFX7-NEXT: v_max_i32_e32 v2, v2, v6
+; GFX7-NEXT: v_max_i32_e32 v3, v3, v7
+; GFX7-NEXT: v_max_i32_e32 v0, v0, v2
+; GFX7-NEXT: v_max_i32_e32 v1, v1, v3
+; GFX7-NEXT: v_max_i32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_smax_v8i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_i32_e32 v0, v0, v4
+; GFX8-NEXT: v_max_i32_e32 v1, v1, v5
+; GFX8-NEXT: v_max_i32_e32 v2, v2, v6
+; GFX8-NEXT: v_max_i32_e32 v3, v3, v7
+; GFX8-NEXT: v_max_i32_e32 v0, v0, v2
+; GFX8-NEXT: v_max_i32_e32 v1, v1, v3
+; GFX8-NEXT: v_max_i32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_smax_v8i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_i32_e32 v0, v0, v4
+; GFX9-NEXT: v_max_i32_e32 v1, v1, v5
+; GFX9-NEXT: v_max_i32_e32 v2, v2, v6
+; GFX9-NEXT: v_max_i32_e32 v3, v3, v7
+; GFX9-NEXT: v_max_i32_e32 v0, v0, v2
+; GFX9-NEXT: v_max_i32_e32 v1, v1, v3
+; GFX9-NEXT: v_max_i32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_smax_v8i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_i32_e32 v0, v0, v4
+; GFX10-NEXT: v_max_i32_e32 v1, v1, v5
+; GFX10-NEXT: v_max_i32_e32 v2, v2, v6
+; GFX10-NEXT: v_max_i32_e32 v3, v3, v7
+; GFX10-NEXT: v_max_i32_e32 v0, v0, v2
+; GFX10-NEXT: v_max_i32_e32 v1, v1, v3
+; GFX10-NEXT: v_max_i32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_smax_v8i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_max_i32_e32 v0, v0, v4
+; GFX11-NEXT: v_max_i32_e32 v1, v1, v5
+; GFX11-NEXT: v_max_i32_e32 v2, v2, v6
+; GFX11-NEXT: v_max_i32_e32 v3, v3, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_max_i32_e32 v0, v0, v2
+; GFX11-NEXT: v_max_i32_e32 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_max_i32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_smax_v8i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_max_i32_e32 v0, v0, v4
+; GFX12-NEXT: v_max_i32_e32 v1, v1, v5
+; GFX12-NEXT: v_max_i32_e32 v2, v2, v6
+; GFX12-NEXT: v_max_i32_e32 v3, v3, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_max_i32_e32 v0, v0, v2
+; GFX12-NEXT: v_max_i32_e32 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_max_i32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.smax.v8i32(<8 x i32> %v)
+ ret i32 %res
+}
+
+define i32 @test_vector_reduce_smax_v16i32(<16 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_smax_v16i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_max_i32_e32 v0, v0, v8
+; GFX7-NEXT: v_max_i32_e32 v1, v1, v9
+; GFX7-NEXT: v_max_i32_e32 v2, v2, v10
+; GFX7-NEXT: v_max_i32_e32 v3, v3, v11
+; GFX7-NEXT: v_max_i32_e32 v4, v4, v12
+; GFX7-NEXT: v_max_i32_e32 v5, v5, v13
+; GFX7-NEXT: v_max_i32_e32 v6, v6, v14
+; GFX7-NEXT: v_max_i32_e32 v7, v7, v15
+; GFX7-NEXT: v_max_i32_e32 v0, v0, v4
+; GFX7-NEXT: v_max_i32_e32 v1, v1, v5
+; GFX7-NEXT: v_max_i32_e32 v2, v2, v6
+; GFX7-NEXT: v_max_i32_e32 v3, v3, v7
+; GFX7-NEXT: v_max_i32_e32 v0, v0, v2
+; GFX7-NEXT: v_max_i32_e32 v1, v1, v3
+; GFX7-NEXT: v_max_i32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_smax_v16i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_i32_e32 v0, v0, v8
+; GFX8-NEXT: v_max_i32_e32 v1, v1, v9
+; GFX8-NEXT: v_max_i32_e32 v2, v2, v10
+; GFX8-NEXT: v_max_i32_e32 v3, v3, v11
+; GFX8-NEXT: v_max_i32_e32 v4, v4, v12
+; GFX8-NEXT: v_max_i32_e32 v5, v5, v13
+; GFX8-NEXT: v_max_i32_e32 v6, v6, v14
+; GFX8-NEXT: v_max_i32_e32 v7, v7, v15
+; GFX8-NEXT: v_max_i32_e32 v0, v0, v4
+; GFX8-NEXT: v_max_i32_e32 v1, v1, v5
+; GFX8-NEXT: v_max_i32_e32 v2, v2, v6
+; GFX8-NEXT: v_max_i32_e32 v3, v3, v7
+; GFX8-NEXT: v_max_i32_e32 v0, v0, v2
+; GFX8-NEXT: v_max_i32_e32 v1, v1, v3
+; GFX8-NEXT: v_max_i32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_smax_v16i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_i32_e32 v0, v0, v8
+; GFX9-NEXT: v_max_i32_e32 v1, v1, v9
+; GFX9-NEXT: v_max_i32_e32 v2, v2, v10
+; GFX9-NEXT: v_max_i32_e32 v3, v3, v11
+; GFX9-NEXT: v_max_i32_e32 v4, v4, v12
+; GFX9-NEXT: v_max_i32_e32 v5, v5, v13
+; GFX9-NEXT: v_max_i32_e32 v6, v6, v14
+; GFX9-NEXT: v_max_i32_e32 v7, v7, v15
+; GFX9-NEXT: v_max_i32_e32 v0, v0, v4
+; GFX9-NEXT: v_max_i32_e32 v1, v1, v5
+; GFX9-NEXT: v_max_i32_e32 v2, v2, v6
+; GFX9-NEXT: v_max_i32_e32 v3, v3, v7
+; GFX9-NEXT: v_max_i32_e32 v0, v0, v2
+; GFX9-NEXT: v_max_i32_e32 v1, v1, v3
+; GFX9-NEXT: v_max_i32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_smax_v16i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_i32_e32 v0, v0, v8
+; GFX10-NEXT: v_max_i32_e32 v1, v1, v9
+; GFX10-NEXT: v_max_i32_e32 v2, v2, v10
+; GFX10-NEXT: v_max_i32_e32 v3, v3, v11
+; GFX10-NEXT: v_max_i32_e32 v4, v4, v12
+; GFX10-NEXT: v_max_i32_e32 v5, v5, v13
+; GFX10-NEXT: v_max_i32_e32 v6, v6, v14
+; GFX10-NEXT: v_max_i32_e32 v7, v7, v15
+; GFX10-NEXT: v_max_i32_e32 v0, v0, v4
+; GFX10-NEXT: v_max_i32_e32 v1, v1, v5
+; GFX10-NEXT: v_max_i32_e32 v2, v2, v6
+; GFX10-NEXT: v_max_i32_e32 v3, v3, v7
+; GFX10-NEXT: v_max_i32_e32 v0, v0, v2
+; GFX10-NEXT: v_max_i32_e32 v1, v1, v3
+; GFX10-NEXT: v_max_i32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_smax_v16i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_max_i32_e32 v0, v0, v8
+; GFX11-NEXT: v_max_i32_e32 v1, v1, v9
+; GFX11-NEXT: v_max_i32_e32 v2, v2, v10
+; GFX11-NEXT: v_max_i32_e32 v3, v3, v11
+; GFX11-NEXT: v_max_i32_e32 v4, v4, v12
+; GFX11-NEXT: v_max_i32_e32 v5, v5, v13
+; GFX11-NEXT: v_max_i32_e32 v6, v6, v14
+; GFX11-NEXT: v_max_i32_e32 v7, v7, v15
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_max_i32_e32 v0, v0, v4
+; GFX11-NEXT: v_max_i32_e32 v1, v1, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_max_i32_e32 v2, v2, v6
+; GFX11-NEXT: v_max_i32_e32 v3, v3, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_max_i32_e32 v0, v0, v2
+; GFX11-NEXT: v_max_i32_e32 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_max_i32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_smax_v16i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_max_i32_e32 v0, v0, v8
+; GFX12-NEXT: v_max_i32_e32 v1, v1, v9
+; GFX12-NEXT: v_max_i32_e32 v2, v2, v10
+; GFX12-NEXT: v_max_i32_e32 v3, v3, v11
+; GFX12-NEXT: v_max_i32_e32 v4, v4, v12
+; GFX12-NEXT: v_max_i32_e32 v5, v5, v13
+; GFX12-NEXT: v_max_i32_e32 v6, v6, v14
+; GFX12-NEXT: v_max_i32_e32 v7, v7, v15
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_max_i32_e32 v0, v0, v4
+; GFX12-NEXT: v_max_i32_e32 v1, v1, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_max_i32_e32 v2, v2, v6
+; GFX12-NEXT: v_max_i32_e32 v3, v3, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_max_i32_e32 v0, v0, v2
+; GFX12-NEXT: v_max_i32_e32 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_max_i32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.smax.v16i32(<16 x i32> %v)
+ ret i32 %res
+}
+
+define i64 @test_vector_reduce_smax_v2i64(<2 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_smax_v2i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_smax_v2i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_smax_v2i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_smax_v2i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_smax_v2i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_smax_v2i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.smax.v2i64(<2 x i64> %v)
+ ret i64 %res
+}
+
+define i64 @test_vector_reduce_smax_v3i64(<3 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_smax_v3i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX7-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_smax_v3i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX8-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_smax_v3i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[4:5]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_smax_v3i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
+; GFX10-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_smax_v3i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_smax_v3i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.smax.v3i64(<3 x i64> %v)
+ ret i64 %res
+}
+
+define i64 @test_vector_reduce_smax_v4i64(<4 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_smax_v4i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX7-NEXT: v_cmp_gt_i64_e32 vcc, v[2:3], v[6:7]
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX7-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_smax_v4i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX8-NEXT: v_cmp_gt_i64_e32 vcc, v[2:3], v[6:7]
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX8-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_smax_v4i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[4:5]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, v[2:3], v[6:7]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_smax_v4i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX10-NEXT: v_cmp_gt_i64_e64 s4, v[2:3], v[6:7]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v6, v2, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v7, v3, s4
+; GFX10-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_smax_v4i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX11-NEXT: v_cmp_gt_i64_e64 s0, v[2:3], v[6:7]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0
+; GFX11-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_smax_v4i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX12-NEXT: v_cmp_gt_i64_e64 s0, v[2:3], v[6:7]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0
+; GFX12-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.smax.v4i64(<4 x i64> %v)
+ ret i64 %res
+}
+
+define i64 @test_vector_reduce_smax_v8i64(<8 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_smax_v8i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[8:9]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
+; GFX7-NEXT: v_cmp_gt_i64_e32 vcc, v[2:3], v[10:11]
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v10, v2, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v11, v3, vcc
+; GFX7-NEXT: v_cmp_gt_i64_e32 vcc, v[4:5], v[12:13]
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v12, v4, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v13, v5, vcc
+; GFX7-NEXT: v_cmp_gt_i64_e32 vcc, v[6:7], v[14:15]
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v14, v6, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v15, v7, vcc
+; GFX7-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX7-NEXT: v_cmp_gt_i64_e32 vcc, v[2:3], v[6:7]
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX7-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_smax_v8i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[8:9]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
+; GFX8-NEXT: v_cmp_gt_i64_e32 vcc, v[2:3], v[10:11]
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v10, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v11, v3, vcc
+; GFX8-NEXT: v_cmp_gt_i64_e32 vcc, v[4:5], v[12:13]
+; GFX8-NEXT: v_cndmask_b32_e32 v4, v12, v4, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v13, v5, vcc
+; GFX8-NEXT: v_cmp_gt_i64_e32 vcc, v[6:7], v[14:15]
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v14, v6, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v15, v7, vcc
+; GFX8-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX8-NEXT: v_cmp_gt_i64_e32 vcc, v[2:3], v[6:7]
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX8-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_smax_v8i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[8:9]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
+; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, v[2:3], v[10:11]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v10, v2, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v11, v3, vcc
+; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, v[4:5], v[12:13]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v12, v4, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v13, v5, vcc
+; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, v[6:7], v[14:15]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v14, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v7, v15, v7, vcc
+; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[4:5]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, v[2:3], v[6:7]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_smax_v8i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX10-NEXT: v_cmp_gt_i64_e64 s4, v[2:3], v[10:11]
+; GFX10-NEXT: v_cmp_gt_i64_e64 s5, v[4:5], v[12:13]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc_lo
+; GFX10-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[6:7], v[14:15]
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v10, v2, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v11, v3, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v4, v12, v4, s5
+; GFX10-NEXT: v_cndmask_b32_e64 v5, v13, v5, s5
+; GFX10-NEXT: v_cndmask_b32_e32 v6, v14, v6, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v7, v15, v7, vcc_lo
+; GFX10-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX10-NEXT: v_cmp_gt_i64_e64 s4, v[2:3], v[6:7]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v6, v2, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v7, v3, s4
+; GFX10-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_smax_v8i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX11-NEXT: v_cmp_gt_i64_e64 s0, v[2:3], v[10:11]
+; GFX11-NEXT: v_cmp_gt_i64_e64 s1, v[4:5], v[12:13]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v8, v0 :: v_dual_cndmask_b32 v1, v9, v1
+; GFX11-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[6:7], v[14:15]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v10, v2, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v11, v3, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v4, v12, v4, s1
+; GFX11-NEXT: v_cndmask_b32_e64 v5, v13, v5, s1
+; GFX11-NEXT: v_dual_cndmask_b32 v6, v14, v6 :: v_dual_cndmask_b32 v7, v15, v7
+; GFX11-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_cmp_gt_i64_e64 s0, v[2:3], v[6:7]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_smax_v8i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX12-NEXT: v_cmp_gt_i64_e64 s0, v[2:3], v[10:11]
+; GFX12-NEXT: v_cmp_gt_i64_e64 s1, v[4:5], v[12:13]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v8, v0 :: v_dual_cndmask_b32 v1, v9, v1
+; GFX12-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[6:7], v[14:15]
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: v_cndmask_b32_e64 v2, v10, v2, s0
+; GFX12-NEXT: v_cndmask_b32_e64 v3, v11, v3, s0
+; GFX12-NEXT: v_cndmask_b32_e64 v4, v12, v4, s1
+; GFX12-NEXT: v_cndmask_b32_e64 v5, v13, v5, s1
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v6, v14, v6 :: v_dual_cndmask_b32 v7, v15, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX12-NEXT: v_cmp_gt_i64_e64 s0, v[2:3], v[6:7]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0
+; GFX12-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.smax.v8i64(<8 x i64> %v)
+ ret i64 %res
+}
+
+define i64 @test_vector_reduce_smax_v16i64(<16 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_smax_v16i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX7-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[16:17]
+; GFX7-NEXT: v_cmp_gt_i64_e64 s[4:5], v[10:11], v[26:27]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v16, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v17, v1, vcc
+; GFX7-NEXT: v_cmp_gt_i64_e32 vcc, v[2:3], v[18:19]
+; GFX7-NEXT: v_cndmask_b32_e64 v10, v26, v10, s[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v18, v2, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v19, v3, vcc
+; GFX7-NEXT: v_cmp_gt_i64_e32 vcc, v[4:5], v[20:21]
+; GFX7-NEXT: v_cndmask_b32_e64 v11, v27, v11, s[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v20, v4, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v21, v5, vcc
+; GFX7-NEXT: v_cmp_gt_i64_e32 vcc, v[6:7], v[22:23]
+; GFX7-NEXT: v_cmp_gt_i64_e64 s[4:5], v[2:3], v[10:11]
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v22, v6, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v23, v7, vcc
+; GFX7-NEXT: v_cmp_gt_i64_e32 vcc, v[8:9], v[24:25]
+; GFX7-NEXT: v_cndmask_b32_e64 v2, v10, v2, s[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v8, v24, v8, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v9, v25, v9, vcc
+; GFX7-NEXT: v_cmp_gt_i64_e32 vcc, v[12:13], v[28:29]
+; GFX7-NEXT: v_cndmask_b32_e64 v3, v11, v3, s[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v12, v28, v12, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v13, v29, v13, vcc
+; GFX7-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[8:9]
+; GFX7-NEXT: v_cmp_gt_i64_e64 s[6:7], v[4:5], v[12:13]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e64 v4, v12, v4, s[6:7]
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
+; GFX7-NEXT: v_cndmask_b32_e64 v5, v13, v5, s[6:7]
+; GFX7-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_cmp_gt_i64_e32 vcc, v[14:15], v[30:31]
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v30, v14, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v31, v15, vcc
+; GFX7-NEXT: v_cmp_gt_i64_e32 vcc, v[6:7], v[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc
+; GFX7-NEXT: v_cmp_gt_i64_e32 vcc, v[2:3], v[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX7-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_smax_v16i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX8-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[16:17]
+; GFX8-NEXT: v_cmp_gt_i64_e64 s[4:5], v[10:11], v[26:27]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v16, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v17, v1, vcc
+; GFX8-NEXT: v_cmp_gt_i64_e32 vcc, v[2:3], v[18:19]
+; GFX8-NEXT: v_cndmask_b32_e64 v10, v26, v10, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v18, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v19, v3, vcc
+; GFX8-NEXT: v_cmp_gt_i64_e32 vcc, v[4:5], v[20:21]
+; GFX8-NEXT: v_cndmask_b32_e64 v11, v27, v11, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v4, v20, v4, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v21, v5, vcc
+; GFX8-NEXT: v_cmp_gt_i64_e32 vcc, v[6:7], v[22:23]
+; GFX8-NEXT: v_cmp_gt_i64_e64 s[4:5], v[2:3], v[10:11]
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v22, v6, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v23, v7, vcc
+; GFX8-NEXT: v_cmp_gt_i64_e32 vcc, v[8:9], v[24:25]
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v10, v2, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v8, v24, v8, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v9, v25, v9, vcc
+; GFX8-NEXT: v_cmp_gt_i64_e32 vcc, v[12:13], v[28:29]
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v11, v3, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v12, v28, v12, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v13, v29, v13, vcc
+; GFX8-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[8:9]
+; GFX8-NEXT: v_cmp_gt_i64_e64 s[6:7], v[4:5], v[12:13]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v4, v12, v4, s[6:7]
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v5, v13, v5, s[6:7]
+; GFX8-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_cmp_gt_i64_e32 vcc, v[14:15], v[30:31]
+; GFX8-NEXT: v_cndmask_b32_e32 v4, v30, v14, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v31, v15, vcc
+; GFX8-NEXT: v_cmp_gt_i64_e32 vcc, v[6:7], v[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc
+; GFX8-NEXT: v_cmp_gt_i64_e32 vcc, v[2:3], v[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX8-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_smax_v16i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: scratch_load_dword v31, off, s32
+; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[16:17]
+; GFX9-NEXT: v_cmp_gt_i64_e64 s[2:3], v[4:5], v[20:21]
+; GFX9-NEXT: v_cmp_gt_i64_e64 s[6:7], v[8:9], v[24:25]
+; GFX9-NEXT: v_cmp_gt_i64_e64 s[10:11], v[12:13], v[28:29]
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v16, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v4, v20, v4, s[2:3]
+; GFX9-NEXT: v_cndmask_b32_e64 v8, v24, v8, s[6:7]
+; GFX9-NEXT: v_cndmask_b32_e64 v12, v28, v12, s[10:11]
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v17, v1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v5, v21, v5, s[2:3]
+; GFX9-NEXT: v_cndmask_b32_e64 v9, v25, v9, s[6:7]
+; GFX9-NEXT: v_cndmask_b32_e64 v13, v29, v13, s[10:11]
+; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[8:9]
+; GFX9-NEXT: v_cmp_gt_i64_e64 s[2:3], v[4:5], v[12:13]
+; GFX9-NEXT: v_cmp_gt_i64_e64 s[0:1], v[2:3], v[18:19]
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v4, v12, v4, s[2:3]
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v5, v13, v5, s[2:3]
+; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[4:5]
+; GFX9-NEXT: v_cmp_gt_i64_e64 s[4:5], v[6:7], v[22:23]
+; GFX9-NEXT: v_cmp_gt_i64_e64 s[8:9], v[10:11], v[26:27]
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v18, v2, s[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v6, v22, v6, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v10, v26, v10, s[8:9]
+; GFX9-NEXT: v_cndmask_b32_e64 v3, v19, v3, s[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v7, v23, v7, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v11, v27, v11, s[8:9]
+; GFX9-NEXT: v_cmp_gt_i64_e64 s[0:1], v[2:3], v[10:11]
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, v[14:15], v[30:31]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v30, v14, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v31, v15, vcc
+; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, v[6:7], v[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v10, v2, s[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v3, v11, v3, s[0:1]
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc
+; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, v[2:3], v[4:5]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_smax_v16i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX10-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[16:17]
+; GFX10-NEXT: v_cmp_gt_i64_e64 s4, v[2:3], v[18:19]
+; GFX10-NEXT: v_cmp_gt_i64_e64 s5, v[4:5], v[20:21]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v16, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v17, v1, vcc_lo
+; GFX10-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[6:7], v[22:23]
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v18, v2, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v19, v3, s4
+; GFX10-NEXT: v_cmp_gt_i64_e64 s4, v[8:9], v[24:25]
+; GFX10-NEXT: v_cndmask_b32_e64 v4, v20, v4, s5
+; GFX10-NEXT: v_cndmask_b32_e64 v5, v21, v5, s5
+; GFX10-NEXT: v_cndmask_b32_e32 v6, v22, v6, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v7, v23, v7, vcc_lo
+; GFX10-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[10:11], v[26:27]
+; GFX10-NEXT: v_cndmask_b32_e64 v8, v24, v8, s4
+; GFX10-NEXT: v_cmp_gt_i64_e64 s5, v[12:13], v[28:29]
+; GFX10-NEXT: v_cndmask_b32_e64 v9, v25, v9, s4
+; GFX10-NEXT: v_cndmask_b32_e32 v10, v26, v10, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v11, v27, v11, vcc_lo
+; GFX10-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX10-NEXT: v_cndmask_b32_e64 v12, v28, v12, s5
+; GFX10-NEXT: v_cndmask_b32_e64 v13, v29, v13, s5
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc_lo
+; GFX10-NEXT: v_cmp_gt_i64_e64 s5, v[4:5], v[12:13]
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v4, v12, v4, s5
+; GFX10-NEXT: v_cndmask_b32_e64 v5, v13, v5, s5
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_cmp_gt_i64_e64 s4, v[14:15], v[30:31]
+; GFX10-NEXT: v_cndmask_b32_e64 v14, v30, v14, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v15, v31, v15, s4
+; GFX10-NEXT: v_cmp_gt_i64_e64 s4, v[2:3], v[10:11]
+; GFX10-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[6:7], v[14:15]
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v10, v2, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v11, v3, s4
+; GFX10-NEXT: v_cndmask_b32_e32 v6, v14, v6, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v7, v15, v7, vcc_lo
+; GFX10-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX10-NEXT: v_cmp_gt_i64_e64 s4, v[2:3], v[6:7]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v6, v2, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v7, v3, s4
+; GFX10-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_smax_v16i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: scratch_load_b32 v31, off, s32
+; GFX11-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[16:17]
+; GFX11-NEXT: v_cmp_gt_i64_e64 s0, v[2:3], v[18:19]
+; GFX11-NEXT: v_cmp_gt_i64_e64 s1, v[4:5], v[20:21]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v16, v0 :: v_dual_cndmask_b32 v1, v17, v1
+; GFX11-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[6:7], v[22:23]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v18, v2, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v19, v3, s0
+; GFX11-NEXT: v_cmp_gt_i64_e64 s0, v[8:9], v[24:25]
+; GFX11-NEXT: v_cndmask_b32_e64 v4, v20, v4, s1
+; GFX11-NEXT: v_cndmask_b32_e64 v5, v21, v5, s1
+; GFX11-NEXT: v_dual_cndmask_b32 v6, v22, v6 :: v_dual_cndmask_b32 v7, v23, v7
+; GFX11-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[10:11], v[26:27]
+; GFX11-NEXT: v_cndmask_b32_e64 v8, v24, v8, s0
+; GFX11-NEXT: v_cmp_gt_i64_e64 s1, v[12:13], v[28:29]
+; GFX11-NEXT: v_cndmask_b32_e64 v9, v25, v9, s0
+; GFX11-NEXT: v_dual_cndmask_b32 v10, v26, v10 :: v_dual_cndmask_b32 v11, v27, v11
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX11-NEXT: v_cndmask_b32_e64 v12, v28, v12, s1
+; GFX11-NEXT: v_cndmask_b32_e64 v13, v29, v13, s1
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v8, v0 :: v_dual_cndmask_b32 v1, v9, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_cmp_gt_i64_e64 s1, v[4:5], v[12:13]
+; GFX11-NEXT: v_cndmask_b32_e64 v4, v12, v4, s1
+; GFX11-NEXT: v_cndmask_b32_e64 v5, v13, v5, s1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_gt_i64_e64 s0, v[14:15], v[30:31]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_cndmask_b32_e64 v14, v30, v14, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v15, v31, v15, s0
+; GFX11-NEXT: v_cmp_gt_i64_e64 s0, v[2:3], v[10:11]
+; GFX11-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[6:7], v[14:15]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v10, v2, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v11, v3, s0
+; GFX11-NEXT: v_dual_cndmask_b32 v6, v14, v6 :: v_dual_cndmask_b32 v7, v15, v7
+; GFX11-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX11-NEXT: v_cmp_gt_i64_e64 s0, v[2:3], v[6:7]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0
+; GFX11-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_smax_v16i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: scratch_load_b32 v31, off, s32
+; GFX12-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[16:17]
+; GFX12-NEXT: v_cmp_gt_i64_e64 s0, v[2:3], v[18:19]
+; GFX12-NEXT: v_cmp_gt_i64_e64 s1, v[4:5], v[20:21]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v16, v0 :: v_dual_cndmask_b32 v1, v17, v1
+; GFX12-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[6:7], v[22:23]
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: v_cndmask_b32_e64 v2, v18, v2, s0
+; GFX12-NEXT: v_cndmask_b32_e64 v3, v19, v3, s0
+; GFX12-NEXT: v_cmp_gt_i64_e64 s0, v[8:9], v[24:25]
+; GFX12-NEXT: v_cndmask_b32_e64 v4, v20, v4, s1
+; GFX12-NEXT: v_cndmask_b32_e64 v5, v21, v5, s1
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v6, v22, v6 :: v_dual_cndmask_b32 v7, v23, v7
+; GFX12-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[10:11], v[26:27]
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: v_cndmask_b32_e64 v8, v24, v8, s0
+; GFX12-NEXT: v_cmp_gt_i64_e64 s1, v[12:13], v[28:29]
+; GFX12-NEXT: v_cndmask_b32_e64 v9, v25, v9, s0
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v10, v26, v10 :: v_dual_cndmask_b32 v11, v27, v11
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: v_cndmask_b32_e64 v12, v28, v12, s1
+; GFX12-NEXT: v_cndmask_b32_e64 v13, v29, v13, s1
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v8, v0 :: v_dual_cndmask_b32 v1, v9, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_cmp_gt_i64_e64 s1, v[4:5], v[12:13]
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: v_cndmask_b32_e64 v4, v12, v4, s1
+; GFX12-NEXT: v_cndmask_b32_e64 v5, v13, v5, s1
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_gt_i64_e64 s0, v[14:15], v[30:31]
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: v_cndmask_b32_e64 v14, v30, v14, s0
+; GFX12-NEXT: v_cndmask_b32_e64 v15, v31, v15, s0
+; GFX12-NEXT: v_cmp_gt_i64_e64 s0, v[2:3], v[10:11]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[6:7], v[14:15]
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: v_cndmask_b32_e64 v2, v10, v2, s0
+; GFX12-NEXT: v_cndmask_b32_e64 v3, v11, v3, s0
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v6, v14, v6 :: v_dual_cndmask_b32 v7, v15, v7
+; GFX12-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_cmp_gt_i64_e64 s0, v[2:3], v[6:7]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0
+; GFX12-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.smax.v16i64(<16 x i64> %v)
+ ret i64 %res
+}
+
+declare i8 @llvm.vector.reduce.smax.v2i8(<2 x i8>)
+declare i8 @llvm.vector.reduce.smax.v3i8(<3 x i8>)
+declare i8 @llvm.vector.reduce.smax.v4i8(<4 x i8>)
+declare i8 @llvm.vector.reduce.smax.v8i8(<8 x i8>)
+declare i8 @llvm.vector.reduce.smax.v16i8(<16 x i8>)
+
+declare i16 @llvm.vector.reduce.smax.v2i16(<2 x i16>)
+declare i16 @llvm.vector.reduce.smax.v3i16(<3 x i16>)
+declare i16 @llvm.vector.reduce.smax.v4i16(<4 x i16>)
+declare i16 @llvm.vector.reduce.smax.v8i16(<8 x i16>)
+declare i16 @llvm.vector.reduce.smax.v16i16(<16 x i16>)
+
+declare i32 @llvm.vector.reduce.smax.v2i32(<2 x i32>)
+declare i32 @llvm.vector.reduce.smax.v3i32(<3 x i32>)
+declare i32 @llvm.vector.reduce.smax.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.smax.v8i32(<8 x i32>)
+declare i32 @llvm.vector.reduce.smax.v16i32(<16 x i32>)
+
+declare i64 @llvm.vector.reduce.smax.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.smax.v3i64(<3 x i64>)
+declare i64 @llvm.vector.reduce.smax.v4i64(<4 x i64>)
+declare i64 @llvm.vector.reduce.smax.v8i64(<8 x i64>)
+declare i64 @llvm.vector.reduce.smax.v16i64(<16 x i64>)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/smin.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/smin.ll
new file mode 100644
index 0000000000000..176a718c1a2e7
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/smin.ll
@@ -0,0 +1,3098 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx700 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX7 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx801 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX8 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx942 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX9 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX10 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX12 %s
+
+define i8 @test_vector_reduce_smin_v2i8(<2 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_smin_v2i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX7-NEXT: v_min_i32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_smin_v2i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshlrev_b16_e32 v0, 8, v0
+; GFX8-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX8-NEXT: v_min_i16_sdwa v0, sext(v0), sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_smin_v2i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_i16_sdwa v0, sext(v0), sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_smin_v2i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX10-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX10-NEXT: v_min_i16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_smin_v2i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX11-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_min_i16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_smin_v2i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX12-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_min_i16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.smin.v2i8(<2 x i8> %v)
+ ret i8 %res
+}
+
+define i8 @test_vector_reduce_smin_v3i8(<3 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_smin_v3i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX7-NEXT: v_min_i32_e32 v0, v0, v1
+; GFX7-NEXT: v_bfe_i32 v1, v2, 0, 8
+; GFX7-NEXT: v_min_i32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_smin_v3i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshlrev_b16_e32 v0, 8, v0
+; GFX8-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX8-NEXT: v_min_i16_sdwa v0, sext(v0), sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
+; GFX8-NEXT: v_lshlrev_b16_e32 v1, 8, v2
+; GFX8-NEXT: v_min_i16_sdwa v0, v0, sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_smin_v3i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_i16_sdwa v0, sext(v0), sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-NEXT: v_min_i16_sdwa v0, v0, sext(v2) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_smin_v3i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX10-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX10-NEXT: v_min_i16 v0, v0, v1
+; GFX10-NEXT: v_bfe_i32 v1, v2, 0, 8
+; GFX10-NEXT: v_min_i16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_smin_v3i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX11-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_min_i16 v0, v0, v1
+; GFX11-NEXT: v_bfe_i32 v1, v2, 0, 8
+; GFX11-NEXT: v_min_i16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_smin_v3i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX12-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_min_i16 v0, v0, v1
+; GFX12-NEXT: v_bfe_i32 v1, v2, 0, 8
+; GFX12-NEXT: v_min_i16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.smin.v3i8(<3 x i8> %v)
+ ret i8 %res
+}
+
+define i8 @test_vector_reduce_smin_v4i8(<4 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_smin_v4i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_bfe_i32 v5, v4, 16, 8
+; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX7-NEXT: v_bfe_i32 v4, v4, 24, 8
+; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX7-NEXT: v_min_i32_e32 v1, v1, v4
+; GFX7-NEXT: v_min_i32_e32 v0, v0, v5
+; GFX7-NEXT: v_bfe_i32 v2, v2, 0, 8
+; GFX7-NEXT: s_sext_i32_i8 s4, s4
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX7-NEXT: v_min_i32_e32 v2, s4, v2
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX7-NEXT: v_min_i32_e32 v3, s4, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: s_sext_i32_i8 s4, s4
+; GFX7-NEXT: v_bfe_i32 v4, v4, 8, 8
+; GFX7-NEXT: v_min_i32_e32 v1, s4, v1
+; GFX7-NEXT: v_min_i32_e32 v0, v0, v4
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX7-NEXT: v_min_i32_e32 v2, s4, v2
+; GFX7-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v2
+; GFX7-NEXT: v_min_i32_e32 v3, s4, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_smin_v4i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v4, 8
+; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v6, 0xff, v2
+; GFX8-NEXT: v_or_b32_sdwa v5, v0, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX8-NEXT: v_or_b32_e32 v5, v5, v6
+; GFX8-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX8-NEXT: v_or_b32_e32 v5, v5, v6
+; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v5
+; GFX8-NEXT: v_lshrrev_b32_e32 v5, 24, v5
+; GFX8-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX8-NEXT: v_lshlrev_b16_e32 v5, 8, v5
+; GFX8-NEXT: v_min_i16_sdwa v1, sext(v1), sext(v5) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
+; GFX8-NEXT: v_lshlrev_b16_e32 v2, 8, v2
+; GFX8-NEXT: v_mov_b32_e32 v5, 0
+; GFX8-NEXT: v_lshlrev_b16_e32 v0, 8, v0
+; GFX8-NEXT: v_lshlrev_b16_e32 v6, 8, v6
+; GFX8-NEXT: v_min_i16_sdwa v2, sext(v2), v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b16_e32 v3, 8, v3
+; GFX8-NEXT: v_min_i16_sdwa v0, sext(v0), sext(v6) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
+; GFX8-NEXT: v_min_i16_sdwa v3, sext(v3), v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v6, 0xff, v2
+; GFX8-NEXT: v_or_b32_sdwa v5, v0, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX8-NEXT: v_or_b32_e32 v5, v5, v6
+; GFX8-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX8-NEXT: v_or_b32_e32 v5, v5, v6
+; GFX8-NEXT: v_lshrrev_b32_sdwa v5, v4, v5 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_min_i16_e32 v1, 0, v1
+; GFX8-NEXT: v_min_i16_sdwa v0, v0, sext(v5) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX8-NEXT: v_min_i16_e32 v2, 0, v2
+; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v1, 0xff, v2
+; GFX8-NEXT: v_min_i16_e32 v3, 0, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_and_b32_e32 v1, 0xff, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_smin_v4i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v4, 0xff
+; GFX9-NEXT: v_lshlrev_b32_sdwa v6, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_e32 v7, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v8, 0xff, v3
+; GFX9-NEXT: v_and_or_b32 v6, v0, v4, v6
+; GFX9-NEXT: v_lshlrev_b32_e32 v7, 16, v7
+; GFX9-NEXT: v_lshlrev_b32_e32 v8, 24, v8
+; GFX9-NEXT: v_or3_b32 v6, v6, v7, v8
+; GFX9-NEXT: s_sext_i32_i8 s0, s0
+; GFX9-NEXT: v_min_i16_sdwa v1, sext(v1), sext(v6) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_3
+; GFX9-NEXT: v_min_i16_sdwa v2, sext(v2), s0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_min_i16_sdwa v3, sext(v3), s0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_min_i16_sdwa v0, sext(v0), sext(v6) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_2
+; GFX9-NEXT: v_lshlrev_b32_sdwa v6, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_e32 v7, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v8, 0xff, v3
+; GFX9-NEXT: v_and_or_b32 v6, v0, v4, v6
+; GFX9-NEXT: v_lshlrev_b32_e32 v7, 16, v7
+; GFX9-NEXT: v_lshlrev_b32_e32 v8, 24, v8
+; GFX9-NEXT: s_sext_i32_i8 s0, s0
+; GFX9-NEXT: v_or3_b32 v6, v6, v7, v8
+; GFX9-NEXT: v_min_i16_e32 v1, s0, v1
+; GFX9-NEXT: v_min_i16_sdwa v0, v0, sext(v6) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX9-NEXT: v_min_i16_e32 v2, s0, v2
+; GFX9-NEXT: v_min_i16_e32 v3, s0, v3
+; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v0, v0, v4, v1
+; GFX9-NEXT: v_and_b32_e32 v1, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v2, 0xff, v3
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 24, v2
+; GFX9-NEXT: v_or3_b32 v0, v0, v1, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_smin_v4i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v4, 8
+; GFX10-NEXT: v_and_b32_e32 v6, 0xff, v2
+; GFX10-NEXT: v_and_b32_e32 v7, 0xff, v3
+; GFX10-NEXT: v_bfe_i32 v2, v2, 0, 8
+; GFX10-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX10-NEXT: v_lshlrev_b32_e32 v7, 24, v7
+; GFX10-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX10-NEXT: s_sext_i32_i8 s4, s4
+; GFX10-NEXT: v_and_or_b32 v5, 0xff, v0, v5
+; GFX10-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX10-NEXT: v_min_i16 v2, v2, s4
+; GFX10-NEXT: v_min_i16 v3, v3, s4
+; GFX10-NEXT: s_sext_i32_i8 s4, s4
+; GFX10-NEXT: v_or3_b32 v5, v5, v6, v7
+; GFX10-NEXT: v_and_b32_e32 v7, 0xff, v3
+; GFX10-NEXT: v_min_i16 v3, v3, s4
+; GFX10-NEXT: v_bfe_i32 v6, v5, 24, 8
+; GFX10-NEXT: v_bfe_i32 v5, v5, 16, 8
+; GFX10-NEXT: v_lshlrev_b32_e32 v7, 24, v7
+; GFX10-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX10-NEXT: v_min_i16 v1, v1, v6
+; GFX10-NEXT: v_and_b32_e32 v6, 0xff, v2
+; GFX10-NEXT: v_min_i16 v0, v0, v5
+; GFX10-NEXT: v_min_i16 v2, v2, s4
+; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX10-NEXT: v_min_i16 v1, v1, s4
+; GFX10-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX10-NEXT: v_and_or_b32 v5, 0xff, v0, v5
+; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX10-NEXT: v_or3_b32 v5, v5, v6, v7
+; GFX10-NEXT: v_bfe_i32 v5, v5, 8, 8
+; GFX10-NEXT: v_min_i16 v0, v0, v5
+; GFX10-NEXT: v_and_or_b32 v0, 0xff, v0, v1
+; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v3
+; GFX10-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_smin_v4i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v1
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX11-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX11-NEXT: v_bfe_i32 v2, v2, 0, 8
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 8, v4
+; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX11-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX11-NEXT: s_sext_i32_i8 s0, s0
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX11-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX11-NEXT: v_min_i16 v2, v2, s0
+; GFX11-NEXT: v_min_i16 v3, v3, s0
+; GFX11-NEXT: s_sext_i32_i8 s0, s0
+; GFX11-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v2
+; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v3
+; GFX11-NEXT: v_min_i16 v2, v2, s0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_bfe_i32 v5, v4, 24, 8
+; GFX11-NEXT: v_bfe_i32 v4, v4, 16, 8
+; GFX11-NEXT: v_min_i16 v3, v3, s0
+; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_min_i16 v1, v1, v5
+; GFX11-NEXT: v_min_i16 v0, v0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX11-NEXT: v_min_i16 v1, v1, s0
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 8, v5
+; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 24, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_bfe_i32 v4, v4, 8, 8
+; GFX11-NEXT: v_min_i16 v0, v0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_and_or_b32 v0, 0xff, v0, v1
+; GFX11-NEXT: v_lshlrev_b32_e32 v1, 24, v3
+; GFX11-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_smin_v4i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v4, 0xff, v1
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX12-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX12-NEXT: v_bfe_i32 v2, v2, 0, 8
+; GFX12-NEXT: v_lshlrev_b32_e32 v4, 8, v4
+; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX12-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX12-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX12-NEXT: s_sext_i32_i8 s0, s0
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX12-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: v_min_i16 v2, v2, s0
+; GFX12-NEXT: v_min_i16 v3, v3, s0
+; GFX12-NEXT: s_sext_i32_i8 s0, s0
+; GFX12-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v2
+; GFX12-NEXT: v_and_b32_e32 v7, 0xff, v3
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: v_min_i16 v2, v2, s0
+; GFX12-NEXT: v_bfe_i32 v5, v4, 24, 8
+; GFX12-NEXT: v_bfe_i32 v4, v4, 16, 8
+; GFX12-NEXT: v_min_i16 v3, v3, s0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-NEXT: v_min_i16 v1, v1, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_min_i16 v0, v0, v4
+; GFX12-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX12-NEXT: v_min_i16 v1, v1, s0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_lshlrev_b32_e32 v4, 8, v5
+; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX12-NEXT: v_lshlrev_b32_e32 v6, 24, v7
+; GFX12-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX12-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX12-NEXT: v_bfe_i32 v4, v4, 8, 8
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_min_i16 v0, v0, v4
+; GFX12-NEXT: v_and_or_b32 v0, 0xff, v0, v1
+; GFX12-NEXT: v_lshlrev_b32_e32 v1, 24, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.smin.v4i8(<4 x i8> %v)
+ ret i8 %res
+}
+
+define i8 @test_vector_reduce_smin_v8i8(<8 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_smin_v8i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v7
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX7-NEXT: v_bfe_i32 v5, v4, 0, 8
+; GFX7-NEXT: v_min_i32_e32 v0, v0, v5
+; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX7-NEXT: v_bfe_i32 v5, v4, 8, 8
+; GFX7-NEXT: v_min_i32_e32 v1, v1, v5
+; GFX7-NEXT: v_bfe_i32 v2, v2, 0, 8
+; GFX7-NEXT: v_bfe_i32 v5, v4, 16, 8
+; GFX7-NEXT: v_min_i32_e32 v2, v2, v5
+; GFX7-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX7-NEXT: v_bfe_i32 v4, v4, 24, 8
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX7-NEXT: v_min_i32_e32 v3, v3, v4
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_bfe_i32 v5, v4, 16, 8
+; GFX7-NEXT: v_bfe_i32 v4, v4, 24, 8
+; GFX7-NEXT: v_min_i32_e32 v1, v1, v4
+; GFX7-NEXT: v_min_i32_e32 v0, v0, v5
+; GFX7-NEXT: s_sext_i32_i8 s4, s4
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX7-NEXT: v_min_i32_e32 v2, s4, v2
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX7-NEXT: v_min_i32_e32 v3, s4, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: s_sext_i32_i8 s4, s4
+; GFX7-NEXT: v_bfe_i32 v4, v4, 8, 8
+; GFX7-NEXT: v_min_i32_e32 v1, s4, v1
+; GFX7-NEXT: v_min_i32_e32 v0, v0, v4
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX7-NEXT: v_min_i32_e32 v2, s4, v2
+; GFX7-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v2
+; GFX7-NEXT: v_min_i32_e32 v3, s4, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_smin_v8i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v8, 8
+; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v8, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v4, v4, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v7
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_lshrrev_b32_sdwa v5, v8, v4 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX8-NEXT: v_lshrrev_b32_e32 v7, 24, v4
+; GFX8-NEXT: v_lshlrev_b16_e32 v0, 8, v0
+; GFX8-NEXT: v_lshlrev_b16_e32 v4, 8, v4
+; GFX8-NEXT: v_min_i16_sdwa v0, sext(v0), sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
+; GFX8-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX8-NEXT: v_lshlrev_b16_e32 v2, 8, v2
+; GFX8-NEXT: v_lshlrev_b16_e32 v4, 8, v6
+; GFX8-NEXT: v_min_i16_sdwa v1, sext(v1), sext(v5) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
+; GFX8-NEXT: v_min_i16_sdwa v2, sext(v2), sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
+; GFX8-NEXT: v_lshlrev_b16_e32 v3, 8, v3
+; GFX8-NEXT: v_lshlrev_b16_e32 v4, 8, v7
+; GFX8-NEXT: v_min_i16_sdwa v3, sext(v3), sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
+; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX8-NEXT: v_or_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v4
+; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v4
+; GFX8-NEXT: v_lshlrev_b16_e32 v4, 8, v4
+; GFX8-NEXT: v_lshlrev_b16_e32 v5, 8, v5
+; GFX8-NEXT: v_min_i16_sdwa v1, v1, sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX8-NEXT: v_min_i16_e32 v2, 0, v2
+; GFX8-NEXT: v_min_i16_sdwa v0, v0, sext(v5) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX8-NEXT: v_min_i16_e32 v3, 0, v3
+; GFX8-NEXT: v_or_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_lshrrev_b32_sdwa v4, v8, v4 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_min_i16_e32 v1, 0, v1
+; GFX8-NEXT: v_min_i16_sdwa v0, v0, sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX8-NEXT: v_min_i16_e32 v2, 0, v2
+; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v1, 0xff, v2
+; GFX8-NEXT: v_min_i16_e32 v3, 0, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_and_b32_e32 v1, 0xff, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_smin_v8i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v9, 8
+; GFX9-NEXT: v_mov_b32_e32 v8, 0xff
+; GFX9-NEXT: v_lshlrev_b32_sdwa v5, v9, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v4, v4, v8, v5
+; GFX9-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX9-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX9-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX9-NEXT: v_min_i16_sdwa v1, sext(v1), sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_1
+; GFX9-NEXT: v_min_i16_sdwa v2, sext(v2), sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_2
+; GFX9-NEXT: v_min_i16_sdwa v3, sext(v3), sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_3
+; GFX9-NEXT: v_min_i16_sdwa v0, sext(v0), sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-NEXT: v_lshlrev_b32_sdwa v4, v9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX9-NEXT: v_and_or_b32 v4, v0, v8, v4
+; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX9-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX9-NEXT: s_sext_i32_i8 s0, s0
+; GFX9-NEXT: v_min_i16_sdwa v1, v1, sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_min_i16_e32 v2, s0, v2
+; GFX9-NEXT: v_min_i16_e32 v3, s0, v3
+; GFX9-NEXT: v_min_i16_sdwa v0, v0, sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+; GFX9-NEXT: v_lshlrev_b32_sdwa v4, v9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX9-NEXT: v_and_or_b32 v4, v0, v8, v4
+; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX9-NEXT: s_sext_i32_i8 s0, s0
+; GFX9-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX9-NEXT: v_min_i16_e32 v1, s0, v1
+; GFX9-NEXT: v_min_i16_sdwa v0, v0, sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX9-NEXT: v_min_i16_e32 v2, s0, v2
+; GFX9-NEXT: v_min_i16_e32 v3, s0, v3
+; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v0, v0, v8, v1
+; GFX9-NEXT: v_and_b32_e32 v1, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v2, 0xff, v3
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 24, v2
+; GFX9-NEXT: v_or3_b32 v0, v0, v1, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_smin_v8i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v8, 8
+; GFX10-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX10-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX10-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX10-NEXT: v_bfe_i32 v2, v2, 0, 8
+; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v8, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX10-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX10-NEXT: s_sext_i32_i8 s4, s4
+; GFX10-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v7
+; GFX10-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX10-NEXT: v_bfe_i32 v5, v4, 8, 8
+; GFX10-NEXT: v_bfe_i32 v6, v4, 16, 8
+; GFX10-NEXT: v_bfe_i32 v7, v4, 24, 8
+; GFX10-NEXT: v_bfe_i32 v4, v4, 0, 8
+; GFX10-NEXT: v_min_i16 v1, v1, v5
+; GFX10-NEXT: v_min_i16 v2, v2, v6
+; GFX10-NEXT: v_min_i16 v3, v3, v7
+; GFX10-NEXT: v_min_i16 v0, v0, v4
+; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX10-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX10-NEXT: v_min_i16 v2, v2, s4
+; GFX10-NEXT: v_min_i16 v3, v3, s4
+; GFX10-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX10-NEXT: s_sext_i32_i8 s4, s4
+; GFX10-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX10-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX10-NEXT: v_min_i16 v3, v3, s4
+; GFX10-NEXT: v_bfe_i32 v5, v4, 24, 8
+; GFX10-NEXT: v_bfe_i32 v4, v4, 16, 8
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX10-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX10-NEXT: v_min_i16 v1, v1, v5
+; GFX10-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX10-NEXT: v_min_i16 v0, v0, v4
+; GFX10-NEXT: v_min_i16 v2, v2, s4
+; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX10-NEXT: v_min_i16 v1, v1, s4
+; GFX10-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX10-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX10-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX10-NEXT: v_bfe_i32 v4, v4, 8, 8
+; GFX10-NEXT: v_min_i16 v0, v0, v4
+; GFX10-NEXT: v_and_or_b32 v0, 0xff, v0, v1
+; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v3
+; GFX10-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_smin_v8i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX11-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX11-NEXT: v_bfe_i32 v2, v2, 0, 8
+; GFX11-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX11-NEXT: v_lshlrev_b32_e32 v7, 24, v7
+; GFX11-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX11-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX11-NEXT: s_sext_i32_i8 s0, s0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v4, v4, v6, v7
+; GFX11-NEXT: v_bfe_i32 v5, v4, 8, 8
+; GFX11-NEXT: v_bfe_i32 v6, v4, 16, 8
+; GFX11-NEXT: v_bfe_i32 v7, v4, 24, 8
+; GFX11-NEXT: v_bfe_i32 v4, v4, 0, 8
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_min_i16 v1, v1, v5
+; GFX11-NEXT: v_min_i16 v2, v2, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_min_i16 v3, v3, v7
+; GFX11-NEXT: v_min_i16 v0, v0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX11-NEXT: v_min_i16 v2, v2, s0
+; GFX11-NEXT: v_min_i16 v3, v3, s0
+; GFX11-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX11-NEXT: s_sext_i32_i8 s0, s0
+; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v3
+; GFX11-NEXT: v_and_or_b32 v5, 0xff, v0, v5
+; GFX11-NEXT: v_min_i16 v3, v3, s0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_or3_b32 v4, v5, v4, v6
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v2
+; GFX11-NEXT: v_min_i16 v2, v2, s0
+; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_bfe_i32 v5, v4, 24, 8
+; GFX11-NEXT: v_bfe_i32 v4, v4, 16, 8
+; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_min_i16 v1, v1, v5
+; GFX11-NEXT: v_min_i16 v0, v0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX11-NEXT: v_min_i16 v1, v1, s0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 8, v5
+; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 24, v7
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX11-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX11-NEXT: v_bfe_i32 v4, v4, 8, 8
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_min_i16 v0, v0, v4
+; GFX11-NEXT: v_and_or_b32 v0, 0xff, v0, v1
+; GFX11-NEXT: v_lshlrev_b32_e32 v1, 24, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_smin_v8i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX12-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX12-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX12-NEXT: v_bfe_i32 v2, v2, 0, 8
+; GFX12-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX12-NEXT: v_lshlrev_b32_e32 v7, 24, v7
+; GFX12-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX12-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX12-NEXT: s_sext_i32_i8 s0, s0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v4, v4, v6, v7
+; GFX12-NEXT: v_bfe_i32 v5, v4, 8, 8
+; GFX12-NEXT: v_bfe_i32 v6, v4, 16, 8
+; GFX12-NEXT: v_bfe_i32 v7, v4, 24, 8
+; GFX12-NEXT: v_bfe_i32 v4, v4, 0, 8
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_min_i16 v1, v1, v5
+; GFX12-NEXT: v_min_i16 v2, v2, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_min_i16 v3, v3, v7
+; GFX12-NEXT: v_min_i16 v0, v0, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX12-NEXT: v_and_b32_e32 v4, 0xff, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: v_min_i16 v2, v2, s0
+; GFX12-NEXT: v_min_i16 v3, v3, s0
+; GFX12-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX12-NEXT: s_sext_i32_i8 s0, s0
+; GFX12-NEXT: v_and_b32_e32 v7, 0xff, v3
+; GFX12-NEXT: v_and_or_b32 v5, 0xff, v0, v5
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: v_min_i16 v3, v3, s0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_or3_b32 v4, v5, v4, v6
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v2
+; GFX12-NEXT: v_min_i16 v2, v2, s0
+; GFX12-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_bfe_i32 v5, v4, 24, 8
+; GFX12-NEXT: v_bfe_i32 v4, v4, 16, 8
+; GFX12-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_min_i16 v1, v1, v5
+; GFX12-NEXT: v_min_i16 v0, v0, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX12-NEXT: v_min_i16 v1, v1, s0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_lshlrev_b32_e32 v4, 8, v5
+; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX12-NEXT: v_lshlrev_b32_e32 v6, 24, v7
+; GFX12-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX12-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX12-NEXT: v_bfe_i32 v4, v4, 8, 8
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_min_i16 v0, v0, v4
+; GFX12-NEXT: v_and_or_b32 v0, 0xff, v0, v1
+; GFX12-NEXT: v_lshlrev_b32_e32 v1, 24, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.smin.v8i8(<8 x i8> %v)
+ ret i8 %res
+}
+
+define i8 @test_vector_reduce_smin_v16i8(<16 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_smin_v16i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v9, 0xff, v9
+; GFX7-NEXT: v_and_b32_e32 v8, 0xff, v8
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX7-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX7-NEXT: v_and_b32_e32 v9, 0xff, v10
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX7-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX7-NEXT: v_and_b32_e32 v9, 0xff, v11
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 24, v9
+; GFX7-NEXT: v_and_b32_e32 v10, 0xff, v13
+; GFX7-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX7-NEXT: v_and_b32_e32 v9, 0xff, v12
+; GFX7-NEXT: v_lshlrev_b32_e32 v10, 8, v10
+; GFX7-NEXT: v_or_b32_e32 v9, v9, v10
+; GFX7-NEXT: v_and_b32_e32 v10, 0xff, v14
+; GFX7-NEXT: v_lshlrev_b32_e32 v10, 16, v10
+; GFX7-NEXT: v_or_b32_e32 v9, v9, v10
+; GFX7-NEXT: v_and_b32_e32 v10, 0xff, v15
+; GFX7-NEXT: v_lshlrev_b32_e32 v10, 24, v10
+; GFX7-NEXT: v_or_b32_e32 v9, v9, v10
+; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX7-NEXT: v_bfe_i32 v10, v8, 0, 8
+; GFX7-NEXT: v_min_i32_e32 v0, v0, v10
+; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX7-NEXT: v_bfe_i32 v10, v8, 8, 8
+; GFX7-NEXT: v_min_i32_e32 v1, v1, v10
+; GFX7-NEXT: v_bfe_i32 v10, v8, 16, 8
+; GFX7-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX7-NEXT: v_bfe_i32 v8, v8, 24, 8
+; GFX7-NEXT: v_min_i32_e32 v3, v3, v8
+; GFX7-NEXT: v_bfe_i32 v4, v4, 0, 8
+; GFX7-NEXT: v_bfe_i32 v8, v9, 0, 8
+; GFX7-NEXT: v_min_i32_e32 v4, v4, v8
+; GFX7-NEXT: v_bfe_i32 v5, v5, 0, 8
+; GFX7-NEXT: v_bfe_i32 v8, v9, 8, 8
+; GFX7-NEXT: v_min_i32_e32 v5, v5, v8
+; GFX7-NEXT: v_bfe_i32 v6, v6, 0, 8
+; GFX7-NEXT: v_bfe_i32 v8, v9, 16, 8
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX7-NEXT: v_min_i32_e32 v6, v6, v8
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_bfe_i32 v7, v7, 0, 8
+; GFX7-NEXT: v_bfe_i32 v8, v9, 24, 8
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX7-NEXT: v_min_i32_e32 v7, v7, v8
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v7
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_bfe_i32 v5, v4, 0, 8
+; GFX7-NEXT: v_bfe_i32 v2, v2, 0, 8
+; GFX7-NEXT: v_min_i32_e32 v0, v0, v5
+; GFX7-NEXT: v_bfe_i32 v5, v4, 8, 8
+; GFX7-NEXT: v_min_i32_e32 v2, v2, v10
+; GFX7-NEXT: v_min_i32_e32 v1, v1, v5
+; GFX7-NEXT: v_bfe_i32 v5, v4, 16, 8
+; GFX7-NEXT: v_min_i32_e32 v2, v2, v5
+; GFX7-NEXT: v_bfe_i32 v4, v4, 24, 8
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX7-NEXT: v_min_i32_e32 v3, v3, v4
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_bfe_i32 v5, v4, 16, 8
+; GFX7-NEXT: v_bfe_i32 v4, v4, 24, 8
+; GFX7-NEXT: v_min_i32_e32 v1, v1, v4
+; GFX7-NEXT: v_min_i32_e32 v0, v0, v5
+; GFX7-NEXT: s_sext_i32_i8 s4, s4
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX7-NEXT: v_min_i32_e32 v2, s4, v2
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX7-NEXT: v_min_i32_e32 v3, s4, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: s_sext_i32_i8 s4, s4
+; GFX7-NEXT: v_bfe_i32 v4, v4, 8, 8
+; GFX7-NEXT: v_min_i32_e32 v1, s4, v1
+; GFX7-NEXT: v_min_i32_e32 v0, v0, v4
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX7-NEXT: v_min_i32_e32 v2, s4, v2
+; GFX7-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v2
+; GFX7-NEXT: v_min_i32_e32 v3, s4, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_smin_v16i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v16, 8
+; GFX8-NEXT: v_lshlrev_b32_sdwa v9, v16, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v8, v8, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v9, 0xff, v10
+; GFX8-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX8-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX8-NEXT: v_and_b32_e32 v9, 0xff, v11
+; GFX8-NEXT: v_lshlrev_b32_sdwa v13, v16, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_lshlrev_b32_e32 v9, 24, v9
+; GFX8-NEXT: v_or_b32_sdwa v12, v12, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v13, 0xff, v14
+; GFX8-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX8-NEXT: v_lshlrev_b32_e32 v13, 16, v13
+; GFX8-NEXT: v_lshrrev_b32_sdwa v9, v16, v8 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_lshrrev_b32_e32 v10, 16, v8
+; GFX8-NEXT: v_lshrrev_b32_e32 v11, 24, v8
+; GFX8-NEXT: v_or_b32_e32 v12, v12, v13
+; GFX8-NEXT: v_and_b32_e32 v13, 0xff, v15
+; GFX8-NEXT: v_lshlrev_b16_e32 v0, 8, v0
+; GFX8-NEXT: v_lshlrev_b16_e32 v8, 8, v8
+; GFX8-NEXT: v_lshlrev_b32_e32 v13, 24, v13
+; GFX8-NEXT: v_min_i16_sdwa v0, sext(v0), sext(v8) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
+; GFX8-NEXT: v_lshlrev_b16_e32 v2, 8, v2
+; GFX8-NEXT: v_lshlrev_b16_e32 v8, 8, v10
+; GFX8-NEXT: v_or_b32_e32 v12, v12, v13
+; GFX8-NEXT: v_min_i16_sdwa v2, sext(v2), sext(v8) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
+; GFX8-NEXT: v_lshlrev_b16_e32 v3, 8, v3
+; GFX8-NEXT: v_lshlrev_b16_e32 v8, 8, v11
+; GFX8-NEXT: v_lshrrev_b32_sdwa v13, v16, v12 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_lshrrev_b32_e32 v14, 16, v12
+; GFX8-NEXT: v_min_i16_sdwa v3, sext(v3), sext(v8) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
+; GFX8-NEXT: v_lshlrev_b16_e32 v4, 8, v4
+; GFX8-NEXT: v_lshlrev_b16_e32 v8, 8, v12
+; GFX8-NEXT: v_lshlrev_b16_e32 v5, 8, v5
+; GFX8-NEXT: v_min_i16_sdwa v4, sext(v4), sext(v8) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
+; GFX8-NEXT: v_min_i16_sdwa v5, sext(v5), sext(v13) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
+; GFX8-NEXT: v_lshlrev_b16_e32 v6, 8, v6
+; GFX8-NEXT: v_lshlrev_b16_e32 v8, 8, v14
+; GFX8-NEXT: v_lshrrev_b32_e32 v15, 24, v12
+; GFX8-NEXT: v_min_i16_sdwa v6, sext(v6), sext(v8) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
+; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v16, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_lshlrev_b16_e32 v7, 8, v7
+; GFX8-NEXT: v_lshlrev_b16_e32 v8, 8, v15
+; GFX8-NEXT: v_or_b32_sdwa v4, v4, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX8-NEXT: v_min_i16_sdwa v7, sext(v7), sext(v8) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v7
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX8-NEXT: v_lshrrev_b32_sdwa v5, v16, v4 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX8-NEXT: v_lshrrev_b32_e32 v7, 24, v4
+; GFX8-NEXT: v_lshlrev_b16_e32 v4, 8, v4
+; GFX8-NEXT: v_min_i16_sdwa v1, sext(v1), sext(v9) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
+; GFX8-NEXT: v_min_i16_sdwa v0, v0, sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX8-NEXT: v_lshlrev_b16_e32 v4, 8, v6
+; GFX8-NEXT: v_min_i16_sdwa v1, v1, sext(v5) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX8-NEXT: v_min_i16_sdwa v2, v2, sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX8-NEXT: v_lshlrev_b16_e32 v4, 8, v7
+; GFX8-NEXT: v_min_i16_sdwa v3, v3, sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v16, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX8-NEXT: v_or_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v4
+; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v4
+; GFX8-NEXT: v_lshlrev_b16_e32 v4, 8, v4
+; GFX8-NEXT: v_lshlrev_b16_e32 v5, 8, v5
+; GFX8-NEXT: v_min_i16_sdwa v1, v1, sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX8-NEXT: v_min_i16_e32 v2, 0, v2
+; GFX8-NEXT: v_min_i16_sdwa v0, v0, sext(v5) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v16, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX8-NEXT: v_min_i16_e32 v3, 0, v3
+; GFX8-NEXT: v_or_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_lshrrev_b32_sdwa v4, v16, v4 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_min_i16_e32 v1, 0, v1
+; GFX8-NEXT: v_min_i16_sdwa v0, v0, sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX8-NEXT: v_min_i16_e32 v2, 0, v2
+; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v16, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v1, 0xff, v2
+; GFX8-NEXT: v_min_i16_e32 v3, 0, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_and_b32_e32 v1, 0xff, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_smin_v16i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v17, 8
+; GFX9-NEXT: v_mov_b32_e32 v16, 0xff
+; GFX9-NEXT: v_lshlrev_b32_sdwa v9, v17, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v8, v8, v16, v9
+; GFX9-NEXT: v_and_b32_e32 v9, 0xff, v10
+; GFX9-NEXT: v_and_b32_e32 v10, 0xff, v11
+; GFX9-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX9-NEXT: v_lshlrev_b32_e32 v10, 24, v10
+; GFX9-NEXT: v_or3_b32 v8, v8, v9, v10
+; GFX9-NEXT: v_lshlrev_b32_sdwa v9, v17, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_e32 v10, 0xff, v14
+; GFX9-NEXT: v_and_b32_e32 v11, 0xff, v15
+; GFX9-NEXT: v_and_or_b32 v9, v12, v16, v9
+; GFX9-NEXT: v_lshlrev_b32_e32 v10, 16, v10
+; GFX9-NEXT: v_lshlrev_b32_e32 v11, 24, v11
+; GFX9-NEXT: v_or3_b32 v9, v9, v10, v11
+; GFX9-NEXT: v_min_i16_sdwa v5, sext(v5), sext(v9) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_1
+; GFX9-NEXT: v_min_i16_sdwa v4, sext(v4), sext(v9) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-NEXT: v_min_i16_sdwa v6, sext(v6), sext(v9) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_2
+; GFX9-NEXT: v_min_i16_sdwa v7, sext(v7), sext(v9) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_3
+; GFX9-NEXT: v_lshlrev_b32_sdwa v5, v17, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v4, v4, v16, v5
+; GFX9-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX9-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX9-NEXT: v_min_i16_sdwa v1, sext(v1), sext(v8) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_1
+; GFX9-NEXT: v_min_i16_sdwa v2, sext(v2), sext(v8) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_2
+; GFX9-NEXT: v_min_i16_sdwa v3, sext(v3), sext(v8) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_3
+; GFX9-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX9-NEXT: v_min_i16_sdwa v0, sext(v0), sext(v8) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-NEXT: v_min_i16_sdwa v1, v1, sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX9-NEXT: v_min_i16_sdwa v2, v2, sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+; GFX9-NEXT: v_min_i16_sdwa v3, v3, sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_min_i16_sdwa v0, v0, sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_lshlrev_b32_sdwa v4, v17, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX9-NEXT: v_and_or_b32 v4, v0, v16, v4
+; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX9-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX9-NEXT: s_sext_i32_i8 s0, s0
+; GFX9-NEXT: v_min_i16_sdwa v1, v1, sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_min_i16_e32 v2, s0, v2
+; GFX9-NEXT: v_min_i16_e32 v3, s0, v3
+; GFX9-NEXT: v_min_i16_sdwa v0, v0, sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
+; GFX9-NEXT: v_lshlrev_b32_sdwa v4, v17, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX9-NEXT: v_and_or_b32 v4, v0, v16, v4
+; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX9-NEXT: s_sext_i32_i8 s0, s0
+; GFX9-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX9-NEXT: v_min_i16_e32 v1, s0, v1
+; GFX9-NEXT: v_min_i16_sdwa v0, v0, sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX9-NEXT: v_min_i16_e32 v2, s0, v2
+; GFX9-NEXT: v_min_i16_e32 v3, s0, v3
+; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v17, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v0, v0, v16, v1
+; GFX9-NEXT: v_and_b32_e32 v1, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v2, 0xff, v3
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 24, v2
+; GFX9-NEXT: v_or3_b32 v0, v0, v1, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_smin_v16i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v16, 8
+; GFX10-NEXT: v_and_b32_e32 v14, 0xff, v14
+; GFX10-NEXT: v_and_b32_e32 v15, 0xff, v15
+; GFX10-NEXT: v_and_b32_e32 v10, 0xff, v10
+; GFX10-NEXT: v_and_b32_e32 v11, 0xff, v11
+; GFX10-NEXT: v_lshlrev_b32_sdwa v13, v16, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_lshlrev_b32_sdwa v9, v16, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_bfe_i32 v5, v5, 0, 8
+; GFX10-NEXT: v_bfe_i32 v6, v6, 0, 8
+; GFX10-NEXT: v_bfe_i32 v7, v7, 0, 8
+; GFX10-NEXT: v_and_or_b32 v12, 0xff, v12, v13
+; GFX10-NEXT: v_lshlrev_b32_e32 v13, 16, v14
+; GFX10-NEXT: v_lshlrev_b32_e32 v14, 24, v15
+; GFX10-NEXT: v_and_or_b32 v8, 0xff, v8, v9
+; GFX10-NEXT: v_lshlrev_b32_e32 v9, 16, v10
+; GFX10-NEXT: v_lshlrev_b32_e32 v10, 24, v11
+; GFX10-NEXT: v_bfe_i32 v4, v4, 0, 8
+; GFX10-NEXT: v_or3_b32 v11, v12, v13, v14
+; GFX10-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX10-NEXT: v_bfe_i32 v2, v2, 0, 8
+; GFX10-NEXT: v_or3_b32 v8, v8, v9, v10
+; GFX10-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX10-NEXT: v_bfe_i32 v9, v11, 8, 8
+; GFX10-NEXT: v_bfe_i32 v10, v11, 16, 8
+; GFX10-NEXT: v_bfe_i32 v12, v11, 24, 8
+; GFX10-NEXT: v_bfe_i32 v11, v11, 0, 8
+; GFX10-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX10-NEXT: v_min_i16 v5, v5, v9
+; GFX10-NEXT: v_min_i16 v6, v6, v10
+; GFX10-NEXT: v_min_i16 v7, v7, v12
+; GFX10-NEXT: v_min_i16 v4, v4, v11
+; GFX10-NEXT: v_bfe_i32 v9, v8, 8, 8
+; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v16, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX10-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX10-NEXT: v_bfe_i32 v10, v8, 24, 8
+; GFX10-NEXT: v_min_i16 v1, v1, v9
+; GFX10-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v7
+; GFX10-NEXT: v_bfe_i32 v7, v8, 16, 8
+; GFX10-NEXT: v_min_i16 v3, v3, v10
+; GFX10-NEXT: s_sext_i32_i8 s4, s4
+; GFX10-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX10-NEXT: v_bfe_i32 v5, v8, 0, 8
+; GFX10-NEXT: v_min_i16 v2, v2, v7
+; GFX10-NEXT: v_bfe_i32 v6, v4, 8, 8
+; GFX10-NEXT: v_bfe_i32 v7, v4, 16, 8
+; GFX10-NEXT: v_bfe_i32 v8, v4, 24, 8
+; GFX10-NEXT: v_min_i16 v0, v0, v5
+; GFX10-NEXT: v_bfe_i32 v4, v4, 0, 8
+; GFX10-NEXT: v_min_i16 v1, v1, v6
+; GFX10-NEXT: v_min_i16 v2, v2, v7
+; GFX10-NEXT: v_min_i16 v3, v3, v8
+; GFX10-NEXT: v_min_i16 v0, v0, v4
+; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v16, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX10-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX10-NEXT: v_min_i16 v2, v2, s4
+; GFX10-NEXT: v_min_i16 v3, v3, s4
+; GFX10-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX10-NEXT: s_sext_i32_i8 s4, s4
+; GFX10-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX10-NEXT: v_bfe_i32 v5, v4, 24, 8
+; GFX10-NEXT: v_bfe_i32 v4, v4, 16, 8
+; GFX10-NEXT: v_min_i16 v1, v1, v5
+; GFX10-NEXT: v_min_i16 v0, v0, v4
+; GFX10-NEXT: v_and_b32_e32 v4, 0xff, v2
+; GFX10-NEXT: v_and_b32_e32 v5, 0xff, v3
+; GFX10-NEXT: v_min_i16 v2, v2, s4
+; GFX10-NEXT: v_lshlrev_b32_sdwa v6, v16, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_min_i16 v1, v1, s4
+; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX10-NEXT: v_min_i16 v3, v3, s4
+; GFX10-NEXT: v_and_or_b32 v6, 0xff, v0, v6
+; GFX10-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v16, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX10-NEXT: v_or3_b32 v4, v6, v4, v5
+; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX10-NEXT: v_bfe_i32 v4, v4, 8, 8
+; GFX10-NEXT: v_min_i16 v0, v0, v4
+; GFX10-NEXT: v_and_or_b32 v0, 0xff, v0, v1
+; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v3
+; GFX10-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_smin_v16i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v13, 0xff, v13
+; GFX11-NEXT: v_and_b32_e32 v14, 0xff, v14
+; GFX11-NEXT: v_and_b32_e32 v15, 0xff, v15
+; GFX11-NEXT: v_and_b32_e32 v9, 0xff, v9
+; GFX11-NEXT: v_and_b32_e32 v10, 0xff, v10
+; GFX11-NEXT: v_lshlrev_b32_e32 v13, 8, v13
+; GFX11-NEXT: v_bfe_i32 v5, v5, 0, 8
+; GFX11-NEXT: v_bfe_i32 v6, v6, 0, 8
+; GFX11-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX11-NEXT: v_bfe_i32 v7, v7, 0, 8
+; GFX11-NEXT: v_and_or_b32 v12, 0xff, v12, v13
+; GFX11-NEXT: v_lshlrev_b32_e32 v13, 16, v14
+; GFX11-NEXT: v_lshlrev_b32_e32 v14, 24, v15
+; GFX11-NEXT: v_and_or_b32 v8, 0xff, v8, v9
+; GFX11-NEXT: v_lshlrev_b32_e32 v9, 16, v10
+; GFX11-NEXT: v_and_b32_e32 v10, 0xff, v11
+; GFX11-NEXT: v_bfe_i32 v4, v4, 0, 8
+; GFX11-NEXT: v_or3_b32 v12, v12, v13, v14
+; GFX11-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX11-NEXT: v_bfe_i32 v2, v2, 0, 8
+; GFX11-NEXT: v_lshlrev_b32_e32 v10, 24, v10
+; GFX11-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX11-NEXT: v_bfe_i32 v13, v12, 8, 8
+; GFX11-NEXT: v_bfe_i32 v11, v12, 16, 8
+; GFX11-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX11-NEXT: v_or3_b32 v8, v8, v9, v10
+; GFX11-NEXT: s_sext_i32_i8 s0, s0
+; GFX11-NEXT: v_min_i16 v5, v5, v13
+; GFX11-NEXT: v_bfe_i32 v13, v12, 24, 8
+; GFX11-NEXT: v_bfe_i32 v12, v12, 0, 8
+; GFX11-NEXT: v_min_i16 v6, v6, v11
+; GFX11-NEXT: v_bfe_i32 v9, v8, 16, 8
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX11-NEXT: v_min_i16 v7, v7, v13
+; GFX11-NEXT: v_min_i16 v4, v4, v12
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX11-NEXT: v_min_i16 v2, v2, v9
+; GFX11-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 24, v7
+; GFX11-NEXT: v_bfe_i32 v7, v8, 8, 8
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX11-NEXT: v_bfe_i32 v5, v8, 24, 8
+; GFX11-NEXT: v_min_i16 v1, v1, v7
+; GFX11-NEXT: v_bfe_i32 v6, v8, 0, 8
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_bfe_i32 v7, v4, 8, 8
+; GFX11-NEXT: v_min_i16 v3, v3, v5
+; GFX11-NEXT: v_bfe_i32 v5, v4, 16, 8
+; GFX11-NEXT: v_bfe_i32 v8, v4, 24, 8
+; GFX11-NEXT: v_min_i16 v0, v0, v6
+; GFX11-NEXT: v_min_i16 v1, v1, v7
+; GFX11-NEXT: v_bfe_i32 v4, v4, 0, 8
+; GFX11-NEXT: v_min_i16 v2, v2, v5
+; GFX11-NEXT: v_min_i16 v3, v3, v8
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX11-NEXT: v_min_i16 v0, v0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v2
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX11-NEXT: v_min_i16 v2, v2, s0
+; GFX11-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX11-NEXT: v_min_i16 v3, v3, s0
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX11-NEXT: s_sext_i32_i8 s0, s0
+; GFX11-NEXT: v_and_or_b32 v5, 0xff, v0, v5
+; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v3
+; GFX11-NEXT: v_min_i16 v3, v3, s0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_or3_b32 v4, v5, v4, v6
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v2
+; GFX11-NEXT: v_min_i16 v2, v2, s0
+; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_bfe_i32 v5, v4, 24, 8
+; GFX11-NEXT: v_bfe_i32 v4, v4, 16, 8
+; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_min_i16 v1, v1, v5
+; GFX11-NEXT: v_min_i16 v0, v0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX11-NEXT: v_min_i16 v1, v1, s0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 8, v5
+; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 24, v7
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX11-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX11-NEXT: v_bfe_i32 v4, v4, 8, 8
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_min_i16 v0, v0, v4
+; GFX11-NEXT: v_and_or_b32 v0, 0xff, v0, v1
+; GFX11-NEXT: v_lshlrev_b32_e32 v1, 24, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_smin_v16i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v13, 0xff, v13
+; GFX12-NEXT: v_and_b32_e32 v14, 0xff, v14
+; GFX12-NEXT: v_and_b32_e32 v15, 0xff, v15
+; GFX12-NEXT: v_and_b32_e32 v9, 0xff, v9
+; GFX12-NEXT: v_and_b32_e32 v10, 0xff, v10
+; GFX12-NEXT: v_lshlrev_b32_e32 v13, 8, v13
+; GFX12-NEXT: v_bfe_i32 v5, v5, 0, 8
+; GFX12-NEXT: v_bfe_i32 v6, v6, 0, 8
+; GFX12-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX12-NEXT: v_bfe_i32 v7, v7, 0, 8
+; GFX12-NEXT: v_and_or_b32 v12, 0xff, v12, v13
+; GFX12-NEXT: v_lshlrev_b32_e32 v13, 16, v14
+; GFX12-NEXT: v_lshlrev_b32_e32 v14, 24, v15
+; GFX12-NEXT: v_and_or_b32 v8, 0xff, v8, v9
+; GFX12-NEXT: v_lshlrev_b32_e32 v9, 16, v10
+; GFX12-NEXT: v_and_b32_e32 v10, 0xff, v11
+; GFX12-NEXT: v_bfe_i32 v4, v4, 0, 8
+; GFX12-NEXT: v_or3_b32 v12, v12, v13, v14
+; GFX12-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX12-NEXT: v_bfe_i32 v2, v2, 0, 8
+; GFX12-NEXT: v_lshlrev_b32_e32 v10, 24, v10
+; GFX12-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX12-NEXT: v_bfe_i32 v13, v12, 8, 8
+; GFX12-NEXT: v_bfe_i32 v11, v12, 16, 8
+; GFX12-NEXT: v_bfe_i32 v0, v0, 0, 8
+; GFX12-NEXT: v_or3_b32 v8, v8, v9, v10
+; GFX12-NEXT: s_sext_i32_i8 s0, s0
+; GFX12-NEXT: v_min_i16 v5, v5, v13
+; GFX12-NEXT: v_bfe_i32 v13, v12, 24, 8
+; GFX12-NEXT: v_bfe_i32 v12, v12, 0, 8
+; GFX12-NEXT: v_min_i16 v6, v6, v11
+; GFX12-NEXT: v_bfe_i32 v9, v8, 16, 8
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX12-NEXT: v_min_i16 v7, v7, v13
+; GFX12-NEXT: v_min_i16 v4, v4, v12
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX12-NEXT: v_min_i16 v2, v2, v9
+; GFX12-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX12-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX12-NEXT: v_lshlrev_b32_e32 v6, 24, v7
+; GFX12-NEXT: v_bfe_i32 v7, v8, 8, 8
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX12-NEXT: v_bfe_i32 v5, v8, 24, 8
+; GFX12-NEXT: v_min_i16 v1, v1, v7
+; GFX12-NEXT: v_bfe_i32 v6, v8, 0, 8
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_bfe_i32 v7, v4, 8, 8
+; GFX12-NEXT: v_min_i16 v3, v3, v5
+; GFX12-NEXT: v_bfe_i32 v5, v4, 16, 8
+; GFX12-NEXT: v_bfe_i32 v8, v4, 24, 8
+; GFX12-NEXT: v_min_i16 v0, v0, v6
+; GFX12-NEXT: v_min_i16 v1, v1, v7
+; GFX12-NEXT: v_bfe_i32 v4, v4, 0, 8
+; GFX12-NEXT: v_min_i16 v2, v2, v5
+; GFX12-NEXT: v_min_i16 v3, v3, v8
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX12-NEXT: v_min_i16 v0, v0, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_and_b32_e32 v4, 0xff, v2
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: v_min_i16 v2, v2, s0
+; GFX12-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX12-NEXT: v_min_i16 v3, v3, s0
+; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX12-NEXT: s_sext_i32_i8 s0, s0
+; GFX12-NEXT: v_and_or_b32 v5, 0xff, v0, v5
+; GFX12-NEXT: v_and_b32_e32 v7, 0xff, v3
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: v_min_i16 v3, v3, s0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_or3_b32 v4, v5, v4, v6
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v2
+; GFX12-NEXT: v_min_i16 v2, v2, s0
+; GFX12-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_bfe_i32 v5, v4, 24, 8
+; GFX12-NEXT: v_bfe_i32 v4, v4, 16, 8
+; GFX12-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_min_i16 v1, v1, v5
+; GFX12-NEXT: v_min_i16 v0, v0, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX12-NEXT: v_min_i16 v1, v1, s0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_lshlrev_b32_e32 v4, 8, v5
+; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX12-NEXT: v_lshlrev_b32_e32 v6, 24, v7
+; GFX12-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX12-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX12-NEXT: v_bfe_i32 v4, v4, 8, 8
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_min_i16 v0, v0, v4
+; GFX12-NEXT: v_and_or_b32 v0, 0xff, v0, v1
+; GFX12-NEXT: v_lshlrev_b32_e32 v1, 24, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.smin.v16i8(<16 x i8> %v)
+ ret i8 %res
+}
+
+define i16 @test_vector_reduce_smin_v2i16(<2 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_smin_v2i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff, v0
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 16
+; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX7-NEXT: v_bfe_i32 v2, v2, 16, 16
+; GFX7-NEXT: v_min_i32_e32 v1, 0, v1
+; GFX7-NEXT: v_min_i32_e32 v0, v0, v2
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_smin_v2i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v2, s4
+; GFX8-NEXT: v_min_i16_sdwa v1, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_min_i16_sdwa v0, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_smin_v2i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX9-NEXT: v_pk_min_i16 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_smin_v2i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX10-NEXT: v_pk_min_i16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_smin_v2i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_pk_min_i16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_smin_v2i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_pk_min_i16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.smin.v2i16(<2 x i16> %v)
+ ret i16 %res
+}
+
+define i16 @test_vector_reduce_smin_v3i16(<3 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_smin_v3i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 16
+; GFX7-NEXT: v_min_i32_e32 v0, v0, v1
+; GFX7-NEXT: v_bfe_i32 v1, v2, 0, 16
+; GFX7-NEXT: v_min_i32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_smin_v3i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_min_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_min_i16_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_smin_v3i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_i16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_min_i16_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_smin_v3i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX10-NEXT: v_min_i16 v0, v0, v2
+; GFX10-NEXT: v_min_i16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_smin_v3i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_min_i16 v0, v0, v2
+; GFX11-NEXT: v_min_i16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_smin_v3i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_min_i16 v0, v0, v2
+; GFX12-NEXT: v_min_i16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.smin.v3i16(<3 x i16> %v)
+ ret i16 %res
+}
+
+define i16 @test_vector_reduce_smin_v4i16(<4 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_smin_v4i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-NEXT: v_or_b32_e32 v2, v3, v2
+; GFX7-NEXT: v_bfe_i32 v3, v2, 0, 16
+; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 16
+; GFX7-NEXT: v_bfe_i32 v2, v2, 16, 16
+; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX7-NEXT: v_min_i32_e32 v1, v1, v2
+; GFX7-NEXT: v_min_i32_e32 v0, v0, v3
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff, v1
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_bfe_i32 v2, v2, 16, 16
+; GFX7-NEXT: v_min_i32_e32 v1, 0, v1
+; GFX7-NEXT: v_min_i32_e32 v0, v0, v2
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_smin_v4i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_min_i16_e32 v2, v0, v1
+; GFX8-NEXT: v_min_i16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v0
+; GFX8-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX8-NEXT: v_min_i16_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_mov_b32_e32 v2, s4
+; GFX8-NEXT: v_min_i16_sdwa v0, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_smin_v4i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_min_i16 v0, v0, v1
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX9-NEXT: v_pk_min_i16 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_smin_v4i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_pk_min_i16 v0, v0, v1
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX10-NEXT: v_pk_min_i16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_smin_v4i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_pk_min_i16 v0, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX11-NEXT: v_pk_min_i16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_smin_v4i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_min_i16 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX12-NEXT: v_pk_min_i16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.smin.v4i16(<4 x i16> %v)
+ ret i16 %res
+}
+
+define i16 @test_vector_reduce_smin_v8i16(<8 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_smin_v8i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v7
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX7-NEXT: v_or_b32_e32 v5, v5, v6
+; GFX7-NEXT: v_bfe_i32 v6, v4, 0, 16
+; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 16
+; GFX7-NEXT: v_bfe_i32 v4, v4, 16, 16
+; GFX7-NEXT: v_min_i32_e32 v1, v1, v4
+; GFX7-NEXT: v_bfe_i32 v2, v2, 0, 16
+; GFX7-NEXT: v_bfe_i32 v4, v5, 0, 16
+; GFX7-NEXT: v_min_i32_e32 v2, v2, v4
+; GFX7-NEXT: v_bfe_i32 v3, v3, 0, 16
+; GFX7-NEXT: v_bfe_i32 v4, v5, 16, 16
+; GFX7-NEXT: v_min_i32_e32 v3, v3, v4
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX7-NEXT: v_bfe_i32 v3, v2, 0, 16
+; GFX7-NEXT: v_bfe_i32 v2, v2, 16, 16
+; GFX7-NEXT: v_min_i32_e32 v0, v0, v6
+; GFX7-NEXT: v_min_i32_e32 v1, v1, v2
+; GFX7-NEXT: v_min_i32_e32 v0, v0, v3
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff, v1
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_bfe_i32 v2, v2, 16, 16
+; GFX7-NEXT: v_min_i32_e32 v1, 0, v1
+; GFX7-NEXT: v_min_i32_e32 v0, v0, v2
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_smin_v8i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_min_i16_e32 v4, v0, v2
+; GFX8-NEXT: v_min_i16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_min_i16_e32 v2, v1, v3
+; GFX8-NEXT: v_min_i16_sdwa v1, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX8-NEXT: v_min_i16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_min_i16_e32 v2, v4, v1
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v0
+; GFX8-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX8-NEXT: v_min_i16_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_mov_b32_e32 v2, s4
+; GFX8-NEXT: v_min_i16_sdwa v0, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_smin_v8i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_min_i16 v0, v0, v2
+; GFX9-NEXT: v_pk_min_i16 v1, v1, v3
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_pk_min_i16 v0, v0, v1
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX9-NEXT: v_pk_min_i16 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_smin_v8i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_pk_min_i16 v0, v0, v2
+; GFX10-NEXT: v_pk_min_i16 v1, v1, v3
+; GFX10-NEXT: v_pk_min_i16 v0, v0, v1
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX10-NEXT: v_pk_min_i16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_smin_v8i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_pk_min_i16 v0, v0, v2
+; GFX11-NEXT: v_pk_min_i16 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_pk_min_i16 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_pk_min_i16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_smin_v8i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_min_i16 v0, v0, v2
+; GFX12-NEXT: v_pk_min_i16 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_pk_min_i16 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_pk_min_i16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.smin.v8i16(<8 x i16> %v)
+ ret i16 %res
+}
+
+define i16 @test_vector_reduce_smin_v16i16(<16 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_smin_v16i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX7-NEXT: v_and_b32_e32 v8, 0xffff, v8
+; GFX7-NEXT: v_or_b32_e32 v8, v9, v8
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v11
+; GFX7-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX7-NEXT: v_or_b32_e32 v9, v9, v10
+; GFX7-NEXT: v_lshlrev_b32_e32 v10, 16, v13
+; GFX7-NEXT: v_and_b32_e32 v11, 0xffff, v12
+; GFX7-NEXT: v_or_b32_e32 v10, v10, v11
+; GFX7-NEXT: v_lshlrev_b32_e32 v11, 16, v15
+; GFX7-NEXT: v_and_b32_e32 v12, 0xffff, v14
+; GFX7-NEXT: v_or_b32_e32 v11, v11, v12
+; GFX7-NEXT: v_bfe_i32 v12, v8, 0, 16
+; GFX7-NEXT: v_bfe_i32 v1, v1, 0, 16
+; GFX7-NEXT: v_bfe_i32 v8, v8, 16, 16
+; GFX7-NEXT: v_min_i32_e32 v1, v1, v8
+; GFX7-NEXT: v_bfe_i32 v2, v2, 0, 16
+; GFX7-NEXT: v_bfe_i32 v8, v9, 0, 16
+; GFX7-NEXT: v_min_i32_e32 v2, v2, v8
+; GFX7-NEXT: v_bfe_i32 v3, v3, 0, 16
+; GFX7-NEXT: v_bfe_i32 v8, v9, 16, 16
+; GFX7-NEXT: v_min_i32_e32 v3, v3, v8
+; GFX7-NEXT: v_bfe_i32 v4, v4, 0, 16
+; GFX7-NEXT: v_bfe_i32 v8, v10, 0, 16
+; GFX7-NEXT: v_min_i32_e32 v4, v4, v8
+; GFX7-NEXT: v_bfe_i32 v5, v5, 0, 16
+; GFX7-NEXT: v_bfe_i32 v8, v10, 16, 16
+; GFX7-NEXT: v_min_i32_e32 v5, v5, v8
+; GFX7-NEXT: v_bfe_i32 v6, v6, 0, 16
+; GFX7-NEXT: v_bfe_i32 v8, v11, 0, 16
+; GFX7-NEXT: v_min_i32_e32 v6, v6, v8
+; GFX7-NEXT: v_bfe_i32 v7, v7, 0, 16
+; GFX7-NEXT: v_bfe_i32 v8, v11, 16, 16
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX7-NEXT: v_min_i32_e32 v7, v7, v8
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff, v6
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff, v7
+; GFX7-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX7-NEXT: v_or_b32_e32 v5, v5, v6
+; GFX7-NEXT: v_bfe_i32 v6, v4, 0, 16
+; GFX7-NEXT: v_bfe_i32 v4, v4, 16, 16
+; GFX7-NEXT: v_min_i32_e32 v1, v1, v4
+; GFX7-NEXT: v_bfe_i32 v4, v5, 0, 16
+; GFX7-NEXT: v_min_i32_e32 v2, v2, v4
+; GFX7-NEXT: v_bfe_i32 v4, v5, 16, 16
+; GFX7-NEXT: v_min_i32_e32 v3, v3, v4
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_min_i32_e32 v0, v0, v12
+; GFX7-NEXT: v_bfe_i32 v3, v2, 0, 16
+; GFX7-NEXT: v_bfe_i32 v2, v2, 16, 16
+; GFX7-NEXT: v_min_i32_e32 v0, v0, v6
+; GFX7-NEXT: v_min_i32_e32 v1, v1, v2
+; GFX7-NEXT: v_min_i32_e32 v0, v0, v3
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff, v1
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_bfe_i32 v2, v2, 16, 16
+; GFX7-NEXT: v_min_i32_e32 v1, 0, v1
+; GFX7-NEXT: v_min_i32_e32 v0, v0, v2
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_smin_v16i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_min_i16_e32 v8, v0, v4
+; GFX8-NEXT: v_min_i16_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_min_i16_e32 v4, v1, v5
+; GFX8-NEXT: v_min_i16_sdwa v1, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_min_i16_e32 v5, v2, v6
+; GFX8-NEXT: v_min_i16_sdwa v2, v2, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v2, v5, v2
+; GFX8-NEXT: v_min_i16_e32 v5, v3, v7
+; GFX8-NEXT: v_min_i16_sdwa v3, v3, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: v_min_i16_e32 v5, v8, v2
+; GFX8-NEXT: v_min_i16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_min_i16_e32 v2, v4, v3
+; GFX8-NEXT: v_min_i16_sdwa v1, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX8-NEXT: v_min_i16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_min_i16_e32 v2, v5, v1
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v0
+; GFX8-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX8-NEXT: v_min_i16_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_mov_b32_e32 v2, s4
+; GFX8-NEXT: v_min_i16_sdwa v0, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_smin_v16i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_min_i16 v0, v0, v4
+; GFX9-NEXT: v_pk_min_i16 v1, v1, v5
+; GFX9-NEXT: v_pk_min_i16 v2, v2, v6
+; GFX9-NEXT: v_pk_min_i16 v3, v3, v7
+; GFX9-NEXT: v_pk_min_i16 v0, v0, v2
+; GFX9-NEXT: v_pk_min_i16 v1, v1, v3
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_pk_min_i16 v0, v0, v1
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX9-NEXT: v_pk_min_i16 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_smin_v16i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_pk_min_i16 v0, v0, v4
+; GFX10-NEXT: v_pk_min_i16 v1, v1, v5
+; GFX10-NEXT: v_pk_min_i16 v2, v2, v6
+; GFX10-NEXT: v_pk_min_i16 v3, v3, v7
+; GFX10-NEXT: v_pk_min_i16 v0, v0, v2
+; GFX10-NEXT: v_pk_min_i16 v1, v1, v3
+; GFX10-NEXT: v_pk_min_i16 v0, v0, v1
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX10-NEXT: v_pk_min_i16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_smin_v16i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_pk_min_i16 v0, v0, v4
+; GFX11-NEXT: v_pk_min_i16 v1, v1, v5
+; GFX11-NEXT: v_pk_min_i16 v2, v2, v6
+; GFX11-NEXT: v_pk_min_i16 v3, v3, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_pk_min_i16 v0, v0, v2
+; GFX11-NEXT: v_pk_min_i16 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_pk_min_i16 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_pk_min_i16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_smin_v16i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_min_i16 v0, v0, v4
+; GFX12-NEXT: v_pk_min_i16 v1, v1, v5
+; GFX12-NEXT: v_pk_min_i16 v2, v2, v6
+; GFX12-NEXT: v_pk_min_i16 v3, v3, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_pk_min_i16 v0, v0, v2
+; GFX12-NEXT: v_pk_min_i16 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_pk_min_i16 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_pk_min_i16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.smin.v16i16(<16 x i16> %v)
+ ret i16 %res
+}
+
+
+define i32 @test_vector_reduce_smin_v2i32(<2 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_smin_v2i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_min_i32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_smin_v2i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_min_i32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_smin_v2i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_i32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_smin_v2i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_min_i32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_smin_v2i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_min_i32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_smin_v2i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_min_i32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.smin.v2i32(<2 x i32> %v)
+ ret i32 %res
+}
+
+define i32 @test_vector_reduce_smin_v3i32(<3 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_smin_v3i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_min_i32_e32 v0, v0, v1
+; GFX7-NEXT: v_min_i32_e32 v0, v0, v2
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_smin_v3i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_min_i32_e32 v0, v0, v1
+; GFX8-NEXT: v_min_i32_e32 v0, v0, v2
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_smin_v3i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_i32_e32 v0, v0, v1
+; GFX9-NEXT: v_min_i32_e32 v0, v0, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_smin_v3i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_min_i32_e32 v0, v0, v1
+; GFX10-NEXT: v_min_i32_e32 v0, v0, v2
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_smin_v3i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_min_i32_e32 v0, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_min_i32_e32 v0, v0, v2
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_smin_v3i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_min_i32_e32 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_min_i32_e32 v0, v0, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.smin.v3i32(<3 x i32> %v)
+ ret i32 %res
+}
+
+define i32 @test_vector_reduce_smin_v4i32(<4 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_smin_v4i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_min_i32_e32 v0, v0, v2
+; GFX7-NEXT: v_min_i32_e32 v1, v1, v3
+; GFX7-NEXT: v_min_i32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_smin_v4i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_min_i32_e32 v0, v0, v2
+; GFX8-NEXT: v_min_i32_e32 v1, v1, v3
+; GFX8-NEXT: v_min_i32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_smin_v4i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_i32_e32 v0, v0, v2
+; GFX9-NEXT: v_min_i32_e32 v1, v1, v3
+; GFX9-NEXT: v_min_i32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_smin_v4i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_min_i32_e32 v0, v0, v2
+; GFX10-NEXT: v_min_i32_e32 v1, v1, v3
+; GFX10-NEXT: v_min_i32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_smin_v4i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_min_i32_e32 v0, v0, v2
+; GFX11-NEXT: v_min_i32_e32 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_min_i32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_smin_v4i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_min_i32_e32 v0, v0, v2
+; GFX12-NEXT: v_min_i32_e32 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_min_i32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> %v)
+ ret i32 %res
+}
+
+define i32 @test_vector_reduce_smin_v8i32(<8 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_smin_v8i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_min_i32_e32 v0, v0, v4
+; GFX7-NEXT: v_min_i32_e32 v1, v1, v5
+; GFX7-NEXT: v_min_i32_e32 v2, v2, v6
+; GFX7-NEXT: v_min_i32_e32 v3, v3, v7
+; GFX7-NEXT: v_min_i32_e32 v0, v0, v2
+; GFX7-NEXT: v_min_i32_e32 v1, v1, v3
+; GFX7-NEXT: v_min_i32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_smin_v8i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_min_i32_e32 v0, v0, v4
+; GFX8-NEXT: v_min_i32_e32 v1, v1, v5
+; GFX8-NEXT: v_min_i32_e32 v2, v2, v6
+; GFX8-NEXT: v_min_i32_e32 v3, v3, v7
+; GFX8-NEXT: v_min_i32_e32 v0, v0, v2
+; GFX8-NEXT: v_min_i32_e32 v1, v1, v3
+; GFX8-NEXT: v_min_i32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_smin_v8i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_i32_e32 v0, v0, v4
+; GFX9-NEXT: v_min_i32_e32 v1, v1, v5
+; GFX9-NEXT: v_min_i32_e32 v2, v2, v6
+; GFX9-NEXT: v_min_i32_e32 v3, v3, v7
+; GFX9-NEXT: v_min_i32_e32 v0, v0, v2
+; GFX9-NEXT: v_min_i32_e32 v1, v1, v3
+; GFX9-NEXT: v_min_i32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_smin_v8i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_min_i32_e32 v0, v0, v4
+; GFX10-NEXT: v_min_i32_e32 v1, v1, v5
+; GFX10-NEXT: v_min_i32_e32 v2, v2, v6
+; GFX10-NEXT: v_min_i32_e32 v3, v3, v7
+; GFX10-NEXT: v_min_i32_e32 v0, v0, v2
+; GFX10-NEXT: v_min_i32_e32 v1, v1, v3
+; GFX10-NEXT: v_min_i32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_smin_v8i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_min_i32_e32 v0, v0, v4
+; GFX11-NEXT: v_min_i32_e32 v1, v1, v5
+; GFX11-NEXT: v_min_i32_e32 v2, v2, v6
+; GFX11-NEXT: v_min_i32_e32 v3, v3, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_min_i32_e32 v0, v0, v2
+; GFX11-NEXT: v_min_i32_e32 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_min_i32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_smin_v8i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_min_i32_e32 v0, v0, v4
+; GFX12-NEXT: v_min_i32_e32 v1, v1, v5
+; GFX12-NEXT: v_min_i32_e32 v2, v2, v6
+; GFX12-NEXT: v_min_i32_e32 v3, v3, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_min_i32_e32 v0, v0, v2
+; GFX12-NEXT: v_min_i32_e32 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_min_i32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.smin.v8i32(<8 x i32> %v)
+ ret i32 %res
+}
+
+define i32 @test_vector_reduce_smin_v16i32(<16 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_smin_v16i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_min_i32_e32 v0, v0, v8
+; GFX7-NEXT: v_min_i32_e32 v1, v1, v9
+; GFX7-NEXT: v_min_i32_e32 v2, v2, v10
+; GFX7-NEXT: v_min_i32_e32 v3, v3, v11
+; GFX7-NEXT: v_min_i32_e32 v4, v4, v12
+; GFX7-NEXT: v_min_i32_e32 v5, v5, v13
+; GFX7-NEXT: v_min_i32_e32 v6, v6, v14
+; GFX7-NEXT: v_min_i32_e32 v7, v7, v15
+; GFX7-NEXT: v_min_i32_e32 v0, v0, v4
+; GFX7-NEXT: v_min_i32_e32 v1, v1, v5
+; GFX7-NEXT: v_min_i32_e32 v2, v2, v6
+; GFX7-NEXT: v_min_i32_e32 v3, v3, v7
+; GFX7-NEXT: v_min_i32_e32 v0, v0, v2
+; GFX7-NEXT: v_min_i32_e32 v1, v1, v3
+; GFX7-NEXT: v_min_i32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_smin_v16i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_min_i32_e32 v0, v0, v8
+; GFX8-NEXT: v_min_i32_e32 v1, v1, v9
+; GFX8-NEXT: v_min_i32_e32 v2, v2, v10
+; GFX8-NEXT: v_min_i32_e32 v3, v3, v11
+; GFX8-NEXT: v_min_i32_e32 v4, v4, v12
+; GFX8-NEXT: v_min_i32_e32 v5, v5, v13
+; GFX8-NEXT: v_min_i32_e32 v6, v6, v14
+; GFX8-NEXT: v_min_i32_e32 v7, v7, v15
+; GFX8-NEXT: v_min_i32_e32 v0, v0, v4
+; GFX8-NEXT: v_min_i32_e32 v1, v1, v5
+; GFX8-NEXT: v_min_i32_e32 v2, v2, v6
+; GFX8-NEXT: v_min_i32_e32 v3, v3, v7
+; GFX8-NEXT: v_min_i32_e32 v0, v0, v2
+; GFX8-NEXT: v_min_i32_e32 v1, v1, v3
+; GFX8-NEXT: v_min_i32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_smin_v16i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_i32_e32 v0, v0, v8
+; GFX9-NEXT: v_min_i32_e32 v1, v1, v9
+; GFX9-NEXT: v_min_i32_e32 v2, v2, v10
+; GFX9-NEXT: v_min_i32_e32 v3, v3, v11
+; GFX9-NEXT: v_min_i32_e32 v4, v4, v12
+; GFX9-NEXT: v_min_i32_e32 v5, v5, v13
+; GFX9-NEXT: v_min_i32_e32 v6, v6, v14
+; GFX9-NEXT: v_min_i32_e32 v7, v7, v15
+; GFX9-NEXT: v_min_i32_e32 v0, v0, v4
+; GFX9-NEXT: v_min_i32_e32 v1, v1, v5
+; GFX9-NEXT: v_min_i32_e32 v2, v2, v6
+; GFX9-NEXT: v_min_i32_e32 v3, v3, v7
+; GFX9-NEXT: v_min_i32_e32 v0, v0, v2
+; GFX9-NEXT: v_min_i32_e32 v1, v1, v3
+; GFX9-NEXT: v_min_i32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_smin_v16i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_min_i32_e32 v0, v0, v8
+; GFX10-NEXT: v_min_i32_e32 v1, v1, v9
+; GFX10-NEXT: v_min_i32_e32 v2, v2, v10
+; GFX10-NEXT: v_min_i32_e32 v3, v3, v11
+; GFX10-NEXT: v_min_i32_e32 v4, v4, v12
+; GFX10-NEXT: v_min_i32_e32 v5, v5, v13
+; GFX10-NEXT: v_min_i32_e32 v6, v6, v14
+; GFX10-NEXT: v_min_i32_e32 v7, v7, v15
+; GFX10-NEXT: v_min_i32_e32 v0, v0, v4
+; GFX10-NEXT: v_min_i32_e32 v1, v1, v5
+; GFX10-NEXT: v_min_i32_e32 v2, v2, v6
+; GFX10-NEXT: v_min_i32_e32 v3, v3, v7
+; GFX10-NEXT: v_min_i32_e32 v0, v0, v2
+; GFX10-NEXT: v_min_i32_e32 v1, v1, v3
+; GFX10-NEXT: v_min_i32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_smin_v16i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_min_i32_e32 v0, v0, v8
+; GFX11-NEXT: v_min_i32_e32 v1, v1, v9
+; GFX11-NEXT: v_min_i32_e32 v2, v2, v10
+; GFX11-NEXT: v_min_i32_e32 v3, v3, v11
+; GFX11-NEXT: v_min_i32_e32 v4, v4, v12
+; GFX11-NEXT: v_min_i32_e32 v5, v5, v13
+; GFX11-NEXT: v_min_i32_e32 v6, v6, v14
+; GFX11-NEXT: v_min_i32_e32 v7, v7, v15
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_min_i32_e32 v0, v0, v4
+; GFX11-NEXT: v_min_i32_e32 v1, v1, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_min_i32_e32 v2, v2, v6
+; GFX11-NEXT: v_min_i32_e32 v3, v3, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_min_i32_e32 v0, v0, v2
+; GFX11-NEXT: v_min_i32_e32 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_min_i32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_smin_v16i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_min_i32_e32 v0, v0, v8
+; GFX12-NEXT: v_min_i32_e32 v1, v1, v9
+; GFX12-NEXT: v_min_i32_e32 v2, v2, v10
+; GFX12-NEXT: v_min_i32_e32 v3, v3, v11
+; GFX12-NEXT: v_min_i32_e32 v4, v4, v12
+; GFX12-NEXT: v_min_i32_e32 v5, v5, v13
+; GFX12-NEXT: v_min_i32_e32 v6, v6, v14
+; GFX12-NEXT: v_min_i32_e32 v7, v7, v15
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_min_i32_e32 v0, v0, v4
+; GFX12-NEXT: v_min_i32_e32 v1, v1, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_min_i32_e32 v2, v2, v6
+; GFX12-NEXT: v_min_i32_e32 v3, v3, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_min_i32_e32 v0, v0, v2
+; GFX12-NEXT: v_min_i32_e32 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_min_i32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.smin.v16i32(<16 x i32> %v)
+ ret i32 %res
+}
+
+define i64 @test_vector_reduce_smin_v2i64(<2 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_smin_v2i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_smin_v2i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_smin_v2i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_smin_v2i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_smin_v2i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_smin_v2i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.smin.v2i64(<2 x i64> %v)
+ ret i64 %res
+}
+
+define i64 @test_vector_reduce_smin_v3i64(<3 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_smin_v3i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX7-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_smin_v3i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_smin_v3i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[4:5]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_smin_v3i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
+; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_smin_v3i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_smin_v3i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.smin.v3i64(<3 x i64> %v)
+ ret i64 %res
+}
+
+define i64 @test_vector_reduce_smin_v4i64(<4 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_smin_v4i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX7-NEXT: v_cmp_lt_i64_e32 vcc, v[2:3], v[6:7]
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX7-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_smin_v4i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, v[2:3], v[6:7]
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_smin_v4i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[4:5]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[2:3], v[6:7]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_smin_v4i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX10-NEXT: v_cmp_lt_i64_e64 s4, v[2:3], v[6:7]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v6, v2, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v7, v3, s4
+; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_smin_v4i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX11-NEXT: v_cmp_lt_i64_e64 s0, v[2:3], v[6:7]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0
+; GFX11-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_smin_v4i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX12-NEXT: v_cmp_lt_i64_e64 s0, v[2:3], v[6:7]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0
+; GFX12-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.smin.v4i64(<4 x i64> %v)
+ ret i64 %res
+}
+
+define i64 @test_vector_reduce_smin_v8i64(<8 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_smin_v8i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[8:9]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
+; GFX7-NEXT: v_cmp_lt_i64_e32 vcc, v[2:3], v[10:11]
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v10, v2, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v11, v3, vcc
+; GFX7-NEXT: v_cmp_lt_i64_e32 vcc, v[4:5], v[12:13]
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v12, v4, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v13, v5, vcc
+; GFX7-NEXT: v_cmp_lt_i64_e32 vcc, v[6:7], v[14:15]
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v14, v6, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v15, v7, vcc
+; GFX7-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX7-NEXT: v_cmp_lt_i64_e32 vcc, v[2:3], v[6:7]
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX7-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_smin_v8i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[8:9]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, v[2:3], v[10:11]
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v10, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v11, v3, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, v[4:5], v[12:13]
+; GFX8-NEXT: v_cndmask_b32_e32 v4, v12, v4, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v13, v5, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, v[6:7], v[14:15]
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v14, v6, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v15, v7, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, v[2:3], v[6:7]
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_smin_v8i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[8:9]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[2:3], v[10:11]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v10, v2, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v11, v3, vcc
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[4:5], v[12:13]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v12, v4, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v13, v5, vcc
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[6:7], v[14:15]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v14, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v7, v15, v7, vcc
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[4:5]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[2:3], v[6:7]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_smin_v8i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX10-NEXT: v_cmp_lt_i64_e64 s4, v[2:3], v[10:11]
+; GFX10-NEXT: v_cmp_lt_i64_e64 s5, v[4:5], v[12:13]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc_lo
+; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[6:7], v[14:15]
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v10, v2, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v11, v3, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v4, v12, v4, s5
+; GFX10-NEXT: v_cndmask_b32_e64 v5, v13, v5, s5
+; GFX10-NEXT: v_cndmask_b32_e32 v6, v14, v6, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v7, v15, v7, vcc_lo
+; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX10-NEXT: v_cmp_lt_i64_e64 s4, v[2:3], v[6:7]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v6, v2, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v7, v3, s4
+; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_smin_v8i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX11-NEXT: v_cmp_lt_i64_e64 s0, v[2:3], v[10:11]
+; GFX11-NEXT: v_cmp_lt_i64_e64 s1, v[4:5], v[12:13]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v8, v0 :: v_dual_cndmask_b32 v1, v9, v1
+; GFX11-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[6:7], v[14:15]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v10, v2, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v11, v3, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v4, v12, v4, s1
+; GFX11-NEXT: v_cndmask_b32_e64 v5, v13, v5, s1
+; GFX11-NEXT: v_dual_cndmask_b32 v6, v14, v6 :: v_dual_cndmask_b32 v7, v15, v7
+; GFX11-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_cmp_lt_i64_e64 s0, v[2:3], v[6:7]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_smin_v8i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX12-NEXT: v_cmp_lt_i64_e64 s0, v[2:3], v[10:11]
+; GFX12-NEXT: v_cmp_lt_i64_e64 s1, v[4:5], v[12:13]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v8, v0 :: v_dual_cndmask_b32 v1, v9, v1
+; GFX12-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[6:7], v[14:15]
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: v_cndmask_b32_e64 v2, v10, v2, s0
+; GFX12-NEXT: v_cndmask_b32_e64 v3, v11, v3, s0
+; GFX12-NEXT: v_cndmask_b32_e64 v4, v12, v4, s1
+; GFX12-NEXT: v_cndmask_b32_e64 v5, v13, v5, s1
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v6, v14, v6 :: v_dual_cndmask_b32 v7, v15, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX12-NEXT: v_cmp_lt_i64_e64 s0, v[2:3], v[6:7]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0
+; GFX12-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.smin.v8i64(<8 x i64> %v)
+ ret i64 %res
+}
+
+define i64 @test_vector_reduce_smin_v16i64(<16 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_smin_v16i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX7-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[16:17]
+; GFX7-NEXT: v_cmp_lt_i64_e64 s[4:5], v[10:11], v[26:27]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v16, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v17, v1, vcc
+; GFX7-NEXT: v_cmp_lt_i64_e32 vcc, v[2:3], v[18:19]
+; GFX7-NEXT: v_cndmask_b32_e64 v10, v26, v10, s[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v18, v2, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v19, v3, vcc
+; GFX7-NEXT: v_cmp_lt_i64_e32 vcc, v[4:5], v[20:21]
+; GFX7-NEXT: v_cndmask_b32_e64 v11, v27, v11, s[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v20, v4, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v21, v5, vcc
+; GFX7-NEXT: v_cmp_lt_i64_e32 vcc, v[6:7], v[22:23]
+; GFX7-NEXT: v_cmp_lt_i64_e64 s[4:5], v[2:3], v[10:11]
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v22, v6, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v23, v7, vcc
+; GFX7-NEXT: v_cmp_lt_i64_e32 vcc, v[8:9], v[24:25]
+; GFX7-NEXT: v_cndmask_b32_e64 v2, v10, v2, s[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v8, v24, v8, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v9, v25, v9, vcc
+; GFX7-NEXT: v_cmp_lt_i64_e32 vcc, v[12:13], v[28:29]
+; GFX7-NEXT: v_cndmask_b32_e64 v3, v11, v3, s[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v12, v28, v12, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v13, v29, v13, vcc
+; GFX7-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[8:9]
+; GFX7-NEXT: v_cmp_lt_i64_e64 s[6:7], v[4:5], v[12:13]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e64 v4, v12, v4, s[6:7]
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
+; GFX7-NEXT: v_cndmask_b32_e64 v5, v13, v5, s[6:7]
+; GFX7-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_cmp_lt_i64_e32 vcc, v[14:15], v[30:31]
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v30, v14, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v31, v15, vcc
+; GFX7-NEXT: v_cmp_lt_i64_e32 vcc, v[6:7], v[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc
+; GFX7-NEXT: v_cmp_lt_i64_e32 vcc, v[2:3], v[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX7-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_smin_v16i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[16:17]
+; GFX8-NEXT: v_cmp_lt_i64_e64 s[4:5], v[10:11], v[26:27]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v16, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v17, v1, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, v[2:3], v[18:19]
+; GFX8-NEXT: v_cndmask_b32_e64 v10, v26, v10, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v18, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v19, v3, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, v[4:5], v[20:21]
+; GFX8-NEXT: v_cndmask_b32_e64 v11, v27, v11, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v4, v20, v4, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v21, v5, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, v[6:7], v[22:23]
+; GFX8-NEXT: v_cmp_lt_i64_e64 s[4:5], v[2:3], v[10:11]
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v22, v6, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v23, v7, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, v[8:9], v[24:25]
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v10, v2, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v8, v24, v8, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v9, v25, v9, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, v[12:13], v[28:29]
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v11, v3, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v12, v28, v12, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v13, v29, v13, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[8:9]
+; GFX8-NEXT: v_cmp_lt_i64_e64 s[6:7], v[4:5], v[12:13]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v4, v12, v4, s[6:7]
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v5, v13, v5, s[6:7]
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, v[14:15], v[30:31]
+; GFX8-NEXT: v_cndmask_b32_e32 v4, v30, v14, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v31, v15, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, v[6:7], v[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, v[2:3], v[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_smin_v16i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: scratch_load_dword v31, off, s32
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[16:17]
+; GFX9-NEXT: v_cmp_lt_i64_e64 s[2:3], v[4:5], v[20:21]
+; GFX9-NEXT: v_cmp_lt_i64_e64 s[6:7], v[8:9], v[24:25]
+; GFX9-NEXT: v_cmp_lt_i64_e64 s[10:11], v[12:13], v[28:29]
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v16, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v4, v20, v4, s[2:3]
+; GFX9-NEXT: v_cndmask_b32_e64 v8, v24, v8, s[6:7]
+; GFX9-NEXT: v_cndmask_b32_e64 v12, v28, v12, s[10:11]
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v17, v1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v5, v21, v5, s[2:3]
+; GFX9-NEXT: v_cndmask_b32_e64 v9, v25, v9, s[6:7]
+; GFX9-NEXT: v_cndmask_b32_e64 v13, v29, v13, s[10:11]
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[8:9]
+; GFX9-NEXT: v_cmp_lt_i64_e64 s[2:3], v[4:5], v[12:13]
+; GFX9-NEXT: v_cmp_lt_i64_e64 s[0:1], v[2:3], v[18:19]
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v4, v12, v4, s[2:3]
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v5, v13, v5, s[2:3]
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[4:5]
+; GFX9-NEXT: v_cmp_lt_i64_e64 s[4:5], v[6:7], v[22:23]
+; GFX9-NEXT: v_cmp_lt_i64_e64 s[8:9], v[10:11], v[26:27]
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v18, v2, s[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v6, v22, v6, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v10, v26, v10, s[8:9]
+; GFX9-NEXT: v_cndmask_b32_e64 v3, v19, v3, s[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v7, v23, v7, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v11, v27, v11, s[8:9]
+; GFX9-NEXT: v_cmp_lt_i64_e64 s[0:1], v[2:3], v[10:11]
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[14:15], v[30:31]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v30, v14, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v31, v15, vcc
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[6:7], v[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v10, v2, s[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v3, v11, v3, s[0:1]
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[2:3], v[4:5]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_smin_v16i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[16:17]
+; GFX10-NEXT: v_cmp_lt_i64_e64 s4, v[2:3], v[18:19]
+; GFX10-NEXT: v_cmp_lt_i64_e64 s5, v[4:5], v[20:21]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v16, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v17, v1, vcc_lo
+; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[6:7], v[22:23]
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v18, v2, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v19, v3, s4
+; GFX10-NEXT: v_cmp_lt_i64_e64 s4, v[8:9], v[24:25]
+; GFX10-NEXT: v_cndmask_b32_e64 v4, v20, v4, s5
+; GFX10-NEXT: v_cndmask_b32_e64 v5, v21, v5, s5
+; GFX10-NEXT: v_cndmask_b32_e32 v6, v22, v6, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v7, v23, v7, vcc_lo
+; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[10:11], v[26:27]
+; GFX10-NEXT: v_cndmask_b32_e64 v8, v24, v8, s4
+; GFX10-NEXT: v_cmp_lt_i64_e64 s5, v[12:13], v[28:29]
+; GFX10-NEXT: v_cndmask_b32_e64 v9, v25, v9, s4
+; GFX10-NEXT: v_cndmask_b32_e32 v10, v26, v10, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v11, v27, v11, vcc_lo
+; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX10-NEXT: v_cndmask_b32_e64 v12, v28, v12, s5
+; GFX10-NEXT: v_cndmask_b32_e64 v13, v29, v13, s5
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc_lo
+; GFX10-NEXT: v_cmp_lt_i64_e64 s5, v[4:5], v[12:13]
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v4, v12, v4, s5
+; GFX10-NEXT: v_cndmask_b32_e64 v5, v13, v5, s5
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_cmp_lt_i64_e64 s4, v[14:15], v[30:31]
+; GFX10-NEXT: v_cndmask_b32_e64 v14, v30, v14, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v15, v31, v15, s4
+; GFX10-NEXT: v_cmp_lt_i64_e64 s4, v[2:3], v[10:11]
+; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[6:7], v[14:15]
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v10, v2, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v11, v3, s4
+; GFX10-NEXT: v_cndmask_b32_e32 v6, v14, v6, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v7, v15, v7, vcc_lo
+; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX10-NEXT: v_cmp_lt_i64_e64 s4, v[2:3], v[6:7]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v6, v2, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v7, v3, s4
+; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_smin_v16i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: scratch_load_b32 v31, off, s32
+; GFX11-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[16:17]
+; GFX11-NEXT: v_cmp_lt_i64_e64 s0, v[2:3], v[18:19]
+; GFX11-NEXT: v_cmp_lt_i64_e64 s1, v[4:5], v[20:21]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v16, v0 :: v_dual_cndmask_b32 v1, v17, v1
+; GFX11-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[6:7], v[22:23]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v18, v2, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v19, v3, s0
+; GFX11-NEXT: v_cmp_lt_i64_e64 s0, v[8:9], v[24:25]
+; GFX11-NEXT: v_cndmask_b32_e64 v4, v20, v4, s1
+; GFX11-NEXT: v_cndmask_b32_e64 v5, v21, v5, s1
+; GFX11-NEXT: v_dual_cndmask_b32 v6, v22, v6 :: v_dual_cndmask_b32 v7, v23, v7
+; GFX11-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[10:11], v[26:27]
+; GFX11-NEXT: v_cndmask_b32_e64 v8, v24, v8, s0
+; GFX11-NEXT: v_cmp_lt_i64_e64 s1, v[12:13], v[28:29]
+; GFX11-NEXT: v_cndmask_b32_e64 v9, v25, v9, s0
+; GFX11-NEXT: v_dual_cndmask_b32 v10, v26, v10 :: v_dual_cndmask_b32 v11, v27, v11
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX11-NEXT: v_cndmask_b32_e64 v12, v28, v12, s1
+; GFX11-NEXT: v_cndmask_b32_e64 v13, v29, v13, s1
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v8, v0 :: v_dual_cndmask_b32 v1, v9, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_cmp_lt_i64_e64 s1, v[4:5], v[12:13]
+; GFX11-NEXT: v_cndmask_b32_e64 v4, v12, v4, s1
+; GFX11-NEXT: v_cndmask_b32_e64 v5, v13, v5, s1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_lt_i64_e64 s0, v[14:15], v[30:31]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_cndmask_b32_e64 v14, v30, v14, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v15, v31, v15, s0
+; GFX11-NEXT: v_cmp_lt_i64_e64 s0, v[2:3], v[10:11]
+; GFX11-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[6:7], v[14:15]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v10, v2, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v11, v3, s0
+; GFX11-NEXT: v_dual_cndmask_b32 v6, v14, v6 :: v_dual_cndmask_b32 v7, v15, v7
+; GFX11-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX11-NEXT: v_cmp_lt_i64_e64 s0, v[2:3], v[6:7]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0
+; GFX11-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_smin_v16i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: scratch_load_b32 v31, off, s32
+; GFX12-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[16:17]
+; GFX12-NEXT: v_cmp_lt_i64_e64 s0, v[2:3], v[18:19]
+; GFX12-NEXT: v_cmp_lt_i64_e64 s1, v[4:5], v[20:21]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v16, v0 :: v_dual_cndmask_b32 v1, v17, v1
+; GFX12-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[6:7], v[22:23]
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: v_cndmask_b32_e64 v2, v18, v2, s0
+; GFX12-NEXT: v_cndmask_b32_e64 v3, v19, v3, s0
+; GFX12-NEXT: v_cmp_lt_i64_e64 s0, v[8:9], v[24:25]
+; GFX12-NEXT: v_cndmask_b32_e64 v4, v20, v4, s1
+; GFX12-NEXT: v_cndmask_b32_e64 v5, v21, v5, s1
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v6, v22, v6 :: v_dual_cndmask_b32 v7, v23, v7
+; GFX12-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[10:11], v[26:27]
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: v_cndmask_b32_e64 v8, v24, v8, s0
+; GFX12-NEXT: v_cmp_lt_i64_e64 s1, v[12:13], v[28:29]
+; GFX12-NEXT: v_cndmask_b32_e64 v9, v25, v9, s0
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v10, v26, v10 :: v_dual_cndmask_b32 v11, v27, v11
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: v_cndmask_b32_e64 v12, v28, v12, s1
+; GFX12-NEXT: v_cndmask_b32_e64 v13, v29, v13, s1
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v8, v0 :: v_dual_cndmask_b32 v1, v9, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_cmp_lt_i64_e64 s1, v[4:5], v[12:13]
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: v_cndmask_b32_e64 v4, v12, v4, s1
+; GFX12-NEXT: v_cndmask_b32_e64 v5, v13, v5, s1
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_lt_i64_e64 s0, v[14:15], v[30:31]
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: v_cndmask_b32_e64 v14, v30, v14, s0
+; GFX12-NEXT: v_cndmask_b32_e64 v15, v31, v15, s0
+; GFX12-NEXT: v_cmp_lt_i64_e64 s0, v[2:3], v[10:11]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[6:7], v[14:15]
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: v_cndmask_b32_e64 v2, v10, v2, s0
+; GFX12-NEXT: v_cndmask_b32_e64 v3, v11, v3, s0
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v6, v14, v6 :: v_dual_cndmask_b32 v7, v15, v7
+; GFX12-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_cmp_lt_i64_e64 s0, v[2:3], v[6:7]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0
+; GFX12-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.smin.v16i64(<16 x i64> %v)
+ ret i64 %res
+}
+
+declare i8 @llvm.vector.reduce.smin.v2i8(<2 x i8>)
+declare i8 @llvm.vector.reduce.smin.v3i8(<3 x i8>)
+declare i8 @llvm.vector.reduce.smin.v4i8(<4 x i8>)
+declare i8 @llvm.vector.reduce.smin.v8i8(<8 x i8>)
+declare i8 @llvm.vector.reduce.smin.v16i8(<16 x i8>)
+
+declare i16 @llvm.vector.reduce.smin.v2i16(<2 x i16>)
+declare i16 @llvm.vector.reduce.smin.v3i16(<3 x i16>)
+declare i16 @llvm.vector.reduce.smin.v4i16(<4 x i16>)
+declare i16 @llvm.vector.reduce.smin.v8i16(<8 x i16>)
+declare i16 @llvm.vector.reduce.smin.v16i16(<16 x i16>)
+
+declare i32 @llvm.vector.reduce.smin.v2i32(<2 x i32>)
+declare i32 @llvm.vector.reduce.smin.v3i32(<3 x i32>)
+declare i32 @llvm.vector.reduce.smin.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.smin.v8i32(<8 x i32>)
+declare i32 @llvm.vector.reduce.smin.v16i32(<16 x i32>)
+
+declare i64 @llvm.vector.reduce.smin.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.smin.v3i64(<3 x i64>)
+declare i64 @llvm.vector.reduce.smin.v4i64(<4 x i64>)
+declare i64 @llvm.vector.reduce.smin.v8i64(<8 x i64>)
+declare i64 @llvm.vector.reduce.smin.v16i64(<16 x i64>)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/umax.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/umax.ll
new file mode 100644
index 0000000000000..63a8227736956
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/umax.ll
@@ -0,0 +1,3031 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx700 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX7 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx801 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX8 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx942 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX9 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX10 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX12 %s
+
+define i8 @test_vector_reduce_umax_v2i8(<2 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_umax_v2i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX7-NEXT: v_max_u32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_umax_v2i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_u16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_umax_v2i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_u16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_umax_v2i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX10-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX10-NEXT: v_max_u16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_umax_v2i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_max_u16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_umax_v2i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX12-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_max_u16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.umax.v2i8(<2 x i8> %v)
+ ret i8 %res
+}
+
+define i8 @test_vector_reduce_umax_v3i8(<3 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_umax_v3i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX7-NEXT: v_max_u32_e32 v0, v0, v1
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v2
+; GFX7-NEXT: v_max_u32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_umax_v3i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_u16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX8-NEXT: v_max_u16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_umax_v3i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_u16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-NEXT: v_max_u16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_umax_v3i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX10-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX10-NEXT: v_max_u16 v0, v0, v1
+; GFX10-NEXT: v_and_b32_e32 v1, 0xff, v2
+; GFX10-NEXT: v_max_u16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_umax_v3i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_max_u16 v0, v0, v1
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v2
+; GFX11-NEXT: v_max_u16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_umax_v3i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX12-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_max_u16 v0, v0, v1
+; GFX12-NEXT: v_and_b32_e32 v1, 0xff, v2
+; GFX12-NEXT: v_max_u16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.umax.v3i8(<3 x i8> %v)
+ ret i8 %res
+}
+
+define i8 @test_vector_reduce_umax_v4i8(<4 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_umax_v4i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX7-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v4, 8, v1
+; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX7-NEXT: v_or_b32_e32 v4, v0, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX7-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v3
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshrrev_b32_e32 v5, 24, v4
+; GFX7-NEXT: v_bfe_u32 v4, v4, 16, 8
+; GFX7-NEXT: v_max_u32_e32 v1, v1, v5
+; GFX7-NEXT: v_max_u32_e32 v0, v0, v4
+; GFX7-NEXT: v_max_u32_e32 v2, 0, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v4, 8, v1
+; GFX7-NEXT: v_max_u32_e32 v3, 0, v3
+; GFX7-NEXT: v_or_b32_e32 v4, v0, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v3
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_bfe_u32 v4, v4, 8, 8
+; GFX7-NEXT: v_max_u32_e32 v1, 0, v1
+; GFX7-NEXT: v_max_u32_e32 v0, v0, v4
+; GFX7-NEXT: v_max_u32_e32 v2, 0, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_max_u32_e32 v3, 0, v3
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v3
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_umax_v4i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v5, 8
+; GFX8-NEXT: v_lshlrev_b32_sdwa v6, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v7, 0xff, v2
+; GFX8-NEXT: v_or_b32_sdwa v6, v0, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v7, 16, v7
+; GFX8-NEXT: v_or_b32_e32 v6, v6, v7
+; GFX8-NEXT: v_and_b32_e32 v7, 0xff, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v7, 24, v7
+; GFX8-NEXT: v_mov_b32_e32 v4, 0xff
+; GFX8-NEXT: v_or_b32_e32 v6, v6, v7
+; GFX8-NEXT: v_and_b32_sdwa v4, v6, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX8-NEXT: v_max_u16_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_mov_b32_e32 v4, 0
+; GFX8-NEXT: v_max_u16_sdwa v1, v1, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_3
+; GFX8-NEXT: v_max_u16_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_max_u16_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v6, 0xff, v2
+; GFX8-NEXT: v_or_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v6
+; GFX8-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v6
+; GFX8-NEXT: v_lshrrev_b32_e32 v4, 8, v4
+; GFX8-NEXT: v_max_u16_e32 v1, 0, v1
+; GFX8-NEXT: v_max_u16_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_max_u16_e32 v2, 0, v2
+; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v1, 0xff, v2
+; GFX8-NEXT: v_max_u16_e32 v3, 0, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_and_b32_e32 v1, 0xff, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_umax_v4i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v4, 0xff
+; GFX9-NEXT: v_lshlrev_b32_sdwa v6, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_e32 v7, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v8, 0xff, v3
+; GFX9-NEXT: v_and_or_b32 v6, v0, v4, v6
+; GFX9-NEXT: v_lshlrev_b32_e32 v7, 16, v7
+; GFX9-NEXT: v_lshlrev_b32_e32 v8, 24, v8
+; GFX9-NEXT: v_or3_b32 v6, v6, v7, v8
+; GFX9-NEXT: v_and_b32_sdwa v7, v6, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX9-NEXT: v_max_u16_sdwa v1, v1, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_3
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_max_u16_sdwa v2, v2, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_max_u16_sdwa v3, v3, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_max_u16_sdwa v0, v0, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_lshlrev_b32_sdwa v6, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_e32 v7, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v8, 0xff, v3
+; GFX9-NEXT: v_and_or_b32 v6, v0, v4, v6
+; GFX9-NEXT: v_lshlrev_b32_e32 v7, 16, v7
+; GFX9-NEXT: v_lshlrev_b32_e32 v8, 24, v8
+; GFX9-NEXT: v_or3_b32 v6, v6, v7, v8
+; GFX9-NEXT: v_lshrrev_b32_e32 v6, 8, v6
+; GFX9-NEXT: v_max_u16_e32 v1, 0, v1
+; GFX9-NEXT: v_max_u16_sdwa v0, v0, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_max_u16_e32 v2, 0, v2
+; GFX9-NEXT: v_max_u16_e32 v3, 0, v3
+; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v0, v0, v4, v1
+; GFX9-NEXT: v_and_b32_e32 v1, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v2, 0xff, v3
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 24, v2
+; GFX9-NEXT: v_or3_b32 v0, v0, v1, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_umax_v4i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v4, 8
+; GFX10-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX10-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v2
+; GFX10-NEXT: v_lshlrev_b32_e32 v7, 24, v3
+; GFX10-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX10-NEXT: v_max_u16 v2, v2, 0
+; GFX10-NEXT: v_and_or_b32 v5, 0xff, v0, v5
+; GFX10-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX10-NEXT: v_max_u16 v3, v3, 0
+; GFX10-NEXT: v_or3_b32 v5, v5, v6, v7
+; GFX10-NEXT: v_mov_b32_e32 v6, 0xff
+; GFX10-NEXT: v_lshrrev_b32_e32 v7, 24, v5
+; GFX10-NEXT: v_and_b32_sdwa v5, v5, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX10-NEXT: v_and_b32_e32 v6, 0xff, v2
+; GFX10-NEXT: v_max_u16 v2, v2, 0
+; GFX10-NEXT: v_max_u16 v1, v1, v7
+; GFX10-NEXT: v_and_b32_e32 v7, 0xff, v3
+; GFX10-NEXT: v_max_u16 v0, v0, v5
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX10-NEXT: v_max_u16 v3, v3, 0
+; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_lshlrev_b32_e32 v7, 24, v7
+; GFX10-NEXT: v_max_u16 v1, v1, 0
+; GFX10-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX10-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX10-NEXT: v_and_or_b32 v5, 0xff, v0, v5
+; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX10-NEXT: v_or3_b32 v5, v5, v6, v7
+; GFX10-NEXT: v_lshrrev_b32_e32 v5, 8, v5
+; GFX10-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX10-NEXT: v_max_u16 v0, v0, v5
+; GFX10-NEXT: v_and_or_b32 v0, 0xff, v0, v1
+; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v3
+; GFX10-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_umax_v4i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 8, v1
+; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 24, v3
+; GFX11-NEXT: v_max_u16 v2, v2, 0
+; GFX11-NEXT: v_max_u16 v3, v3, 0
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v2
+; GFX11-NEXT: v_max_u16 v2, v2, 0
+; GFX11-NEXT: v_max_u16 v3, v3, 0
+; GFX11-NEXT: v_lshrrev_b32_e32 v5, 24, v4
+; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_max_u16 v1, v1, v5
+; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_max_u16 v0, v0, v4
+; GFX11-NEXT: v_max_u16 v1, v1, 0
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 8, v5
+; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 24, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_lshrrev_b32_e32 v4, 8, v4
+; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_max_u16 v0, v0, v4
+; GFX11-NEXT: v_and_or_b32 v0, 0xff, v0, v1
+; GFX11-NEXT: v_lshlrev_b32_e32 v1, 24, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_umax_v4i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX12-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v4, 8, v1
+; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v6, 24, v3
+; GFX12-NEXT: v_max_u16 v2, v2, 0
+; GFX12-NEXT: v_max_u16 v3, v3, 0
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX12-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX12-NEXT: v_and_b32_e32 v7, 0xff, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v2
+; GFX12-NEXT: v_max_u16 v2, v2, 0
+; GFX12-NEXT: v_max_u16 v3, v3, 0
+; GFX12-NEXT: v_lshrrev_b32_e32 v5, 24, v4
+; GFX12-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_max_u16 v1, v1, v5
+; GFX12-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_max_u16 v0, v0, v4
+; GFX12-NEXT: v_max_u16 v1, v1, 0
+; GFX12-NEXT: v_lshlrev_b32_e32 v4, 8, v5
+; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX12-NEXT: v_lshlrev_b32_e32 v6, 24, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_lshrrev_b32_e32 v4, 8, v4
+; GFX12-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_max_u16 v0, v0, v4
+; GFX12-NEXT: v_and_or_b32 v0, 0xff, v0, v1
+; GFX12-NEXT: v_lshlrev_b32_e32 v1, 24, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.umax.v4i8(<4 x i8> %v)
+ ret i8 %res
+}
+
+define i8 @test_vector_reduce_umax_v8i8(<8 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_umax_v8i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v7
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX7-NEXT: v_and_b32_e32 v6, 0xff, v4
+; GFX7-NEXT: v_max_u32_e32 v0, v0, v6
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX7-NEXT: v_bfe_u32 v6, v4, 8, 8
+; GFX7-NEXT: v_lshrrev_b32_e32 v5, 24, v4
+; GFX7-NEXT: v_max_u32_e32 v1, v1, v6
+; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX7-NEXT: v_bfe_u32 v4, v4, 16, 8
+; GFX7-NEXT: v_max_u32_e32 v2, v2, v4
+; GFX7-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v4, 8, v1
+; GFX7-NEXT: v_max_u32_e32 v3, v3, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v0, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v3
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshrrev_b32_e32 v5, 24, v4
+; GFX7-NEXT: v_bfe_u32 v4, v4, 16, 8
+; GFX7-NEXT: v_max_u32_e32 v1, v1, v5
+; GFX7-NEXT: v_max_u32_e32 v0, v0, v4
+; GFX7-NEXT: v_max_u32_e32 v2, 0, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v4, 8, v1
+; GFX7-NEXT: v_max_u32_e32 v3, 0, v3
+; GFX7-NEXT: v_or_b32_e32 v4, v0, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v3
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_bfe_u32 v4, v4, 8, 8
+; GFX7-NEXT: v_max_u32_e32 v1, 0, v1
+; GFX7-NEXT: v_max_u32_e32 v0, v0, v4
+; GFX7-NEXT: v_max_u32_e32 v2, 0, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_max_u32_e32 v3, 0, v3
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v3
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_umax_v8i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v9, 8
+; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v9, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v4, v4, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v7
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_mov_b32_e32 v8, 0xff
+; GFX8-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX8-NEXT: v_max_u16_sdwa v1, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_sdwa v5, v4, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX8-NEXT: v_max_u16_sdwa v2, v2, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_max_u16_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX8-NEXT: v_max_u16_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_3
+; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX8-NEXT: v_or_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_and_b32_sdwa v5, v4, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX8-NEXT: v_max_u16_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_max_u16_e32 v2, 0, v2
+; GFX8-NEXT: v_max_u16_e32 v0, v0, v5
+; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX8-NEXT: v_max_u16_e32 v3, 0, v3
+; GFX8-NEXT: v_or_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_lshrrev_b32_e32 v4, 8, v4
+; GFX8-NEXT: v_max_u16_e32 v1, 0, v1
+; GFX8-NEXT: v_max_u16_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_max_u16_e32 v2, 0, v2
+; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v1, 0xff, v2
+; GFX8-NEXT: v_max_u16_e32 v3, 0, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_and_b32_e32 v1, 0xff, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_umax_v8i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v9, 8
+; GFX9-NEXT: v_mov_b32_e32 v8, 0xff
+; GFX9-NEXT: v_lshlrev_b32_sdwa v5, v9, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v4, v4, v8, v5
+; GFX9-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX9-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX9-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX9-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX9-NEXT: v_max_u16_sdwa v1, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_sdwa v5, v4, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX9-NEXT: v_max_u16_sdwa v2, v2, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_max_u16_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_3
+; GFX9-NEXT: v_max_u16_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-NEXT: v_lshlrev_b32_sdwa v4, v9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX9-NEXT: v_and_or_b32 v4, v0, v8, v4
+; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX9-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX9-NEXT: v_and_b32_sdwa v5, v4, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX9-NEXT: v_max_u16_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_max_u16_e32 v2, 0, v2
+; GFX9-NEXT: v_max_u16_e32 v3, 0, v3
+; GFX9-NEXT: v_max_u16_e32 v0, v0, v5
+; GFX9-NEXT: v_lshlrev_b32_sdwa v4, v9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX9-NEXT: v_and_or_b32 v4, v0, v8, v4
+; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX9-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX9-NEXT: v_lshrrev_b32_e32 v4, 8, v4
+; GFX9-NEXT: v_max_u16_e32 v1, 0, v1
+; GFX9-NEXT: v_max_u16_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_max_u16_e32 v2, 0, v2
+; GFX9-NEXT: v_max_u16_e32 v3, 0, v3
+; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v0, v0, v8, v1
+; GFX9-NEXT: v_and_b32_e32 v1, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v2, 0xff, v3
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 24, v2
+; GFX9-NEXT: v_or3_b32 v0, v0, v1, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_umax_v8i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v8, 8
+; GFX10-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX10-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX10-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX10-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v8, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX10-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX10-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v7
+; GFX10-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX10-NEXT: v_mov_b32_e32 v5, 0xff
+; GFX10-NEXT: v_lshrrev_b32_e32 v6, 8, v4
+; GFX10-NEXT: v_and_b32_sdwa v7, v4, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX10-NEXT: v_lshrrev_b32_e32 v9, 24, v4
+; GFX10-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX10-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX10-NEXT: v_max_u16 v2, v2, v7
+; GFX10-NEXT: v_max_u16 v3, v3, v9
+; GFX10-NEXT: v_max_u16 v0, v0, v4
+; GFX10-NEXT: v_max_u16 v1, v1, v6
+; GFX10-NEXT: v_and_b32_e32 v4, 0xff, v2
+; GFX10-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX10-NEXT: v_max_u16 v2, v2, 0
+; GFX10-NEXT: v_max_u16 v3, v3, 0
+; GFX10-NEXT: v_lshlrev_b32_sdwa v7, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX10-NEXT: v_and_or_b32 v7, 0xff, v0, v7
+; GFX10-NEXT: v_or3_b32 v4, v7, v4, v6
+; GFX10-NEXT: v_lshrrev_b32_e32 v6, 24, v4
+; GFX10-NEXT: v_and_b32_sdwa v4, v4, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX10-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX10-NEXT: v_max_u16 v2, v2, 0
+; GFX10-NEXT: v_max_u16 v1, v1, v6
+; GFX10-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX10-NEXT: v_max_u16 v0, v0, v4
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX10-NEXT: v_max_u16 v3, v3, 0
+; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX10-NEXT: v_max_u16 v1, v1, 0
+; GFX10-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX10-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX10-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX10-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX10-NEXT: v_lshrrev_b32_e32 v4, 8, v4
+; GFX10-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX10-NEXT: v_max_u16 v0, v0, v4
+; GFX10-NEXT: v_and_or_b32 v0, 0xff, v0, v1
+; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v3
+; GFX10-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_umax_v8i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX11-NEXT: v_lshlrev_b32_e32 v7, 24, v7
+; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v4, v4, v6, v7
+; GFX11-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX11-NEXT: v_lshrrev_b32_e32 v7, 24, v4
+; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_max_u16 v3, v3, v7
+; GFX11-NEXT: v_max_u16 v0, v0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_max_u16 v1, v1, v5
+; GFX11-NEXT: v_max_u16 v2, v2, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v3
+; GFX11-NEXT: v_max_u16 v3, v3, 0
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v2
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 24, v4
+; GFX11-NEXT: v_max_u16 v2, v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v2
+; GFX11-NEXT: v_max_u16 v2, v2, 0
+; GFX11-NEXT: v_and_or_b32 v5, 0xff, v0, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-NEXT: v_or3_b32 v4, v5, v6, v4
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX11-NEXT: v_max_u16 v3, v3, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v5, 24, v4
+; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-NEXT: v_max_u16 v1, v1, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_max_u16 v0, v0, v4
+; GFX11-NEXT: v_max_u16 v1, v1, 0
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 8, v5
+; GFX11-NEXT: v_lshlrev_b32_e32 v5, 24, v6
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: v_or3_b32 v4, v4, v6, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_lshrrev_b32_e32 v4, 8, v4
+; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_max_u16 v0, v0, v4
+; GFX11-NEXT: v_and_or_b32 v0, 0xff, v0, v1
+; GFX11-NEXT: v_lshlrev_b32_e32 v1, 24, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_umax_v8i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX12-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX12-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX12-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX12-NEXT: v_lshlrev_b32_e32 v7, 24, v7
+; GFX12-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX12-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v4, v4, v6, v7
+; GFX12-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX12-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX12-NEXT: v_lshrrev_b32_e32 v7, 24, v4
+; GFX12-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_max_u16 v3, v3, v7
+; GFX12-NEXT: v_max_u16 v0, v0, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_max_u16 v1, v1, v5
+; GFX12-NEXT: v_max_u16 v2, v2, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_and_b32_e32 v4, 0xff, v3
+; GFX12-NEXT: v_max_u16 v3, v3, 0
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v2
+; GFX12-NEXT: v_lshlrev_b32_e32 v4, 24, v4
+; GFX12-NEXT: v_max_u16 v2, v2, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_and_b32_e32 v7, 0xff, v2
+; GFX12-NEXT: v_max_u16 v2, v2, 0
+; GFX12-NEXT: v_and_or_b32 v5, 0xff, v0, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-NEXT: v_or3_b32 v4, v5, v6, v4
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX12-NEXT: v_max_u16 v3, v3, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v5, 24, v4
+; GFX12-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX12-NEXT: v_max_u16 v1, v1, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_max_u16 v0, v0, v4
+; GFX12-NEXT: v_max_u16 v1, v1, 0
+; GFX12-NEXT: v_lshlrev_b32_e32 v4, 8, v5
+; GFX12-NEXT: v_lshlrev_b32_e32 v5, 24, v6
+; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: v_or3_b32 v4, v4, v6, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_lshrrev_b32_e32 v4, 8, v4
+; GFX12-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_max_u16 v0, v0, v4
+; GFX12-NEXT: v_and_or_b32 v0, 0xff, v0, v1
+; GFX12-NEXT: v_lshlrev_b32_e32 v1, 24, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.umax.v8i8(<8 x i8> %v)
+ ret i8 %res
+}
+
+define i8 @test_vector_reduce_umax_v16i8(<16 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_umax_v16i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v9, 0xff, v9
+; GFX7-NEXT: v_and_b32_e32 v8, 0xff, v8
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX7-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX7-NEXT: v_and_b32_e32 v9, 0xff, v10
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX7-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX7-NEXT: v_and_b32_e32 v9, 0xff, v11
+; GFX7-NEXT: v_and_b32_e32 v11, 0xff, v13
+; GFX7-NEXT: v_and_b32_e32 v10, 0xff, v12
+; GFX7-NEXT: v_lshlrev_b32_e32 v11, 8, v11
+; GFX7-NEXT: v_or_b32_e32 v10, v10, v11
+; GFX7-NEXT: v_and_b32_e32 v11, 0xff, v14
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 24, v9
+; GFX7-NEXT: v_lshlrev_b32_e32 v11, 16, v11
+; GFX7-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX7-NEXT: v_or_b32_e32 v10, v10, v11
+; GFX7-NEXT: v_and_b32_e32 v11, 0xff, v15
+; GFX7-NEXT: v_lshlrev_b32_e32 v11, 24, v11
+; GFX7-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX7-NEXT: v_and_b32_e32 v12, 0xff, v8
+; GFX7-NEXT: v_lshrrev_b32_e32 v9, 24, v8
+; GFX7-NEXT: v_or_b32_e32 v10, v10, v11
+; GFX7-NEXT: v_max_u32_e32 v0, v0, v12
+; GFX7-NEXT: v_bfe_u32 v12, v8, 8, 8
+; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX7-NEXT: v_bfe_u32 v8, v8, 16, 8
+; GFX7-NEXT: v_max_u32_e32 v2, v2, v8
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX7-NEXT: v_and_b32_e32 v8, 0xff, v10
+; GFX7-NEXT: v_max_u32_e32 v4, v4, v8
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX7-NEXT: v_bfe_u32 v8, v10, 8, 8
+; GFX7-NEXT: v_max_u32_e32 v5, v5, v8
+; GFX7-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX7-NEXT: v_bfe_u32 v8, v10, 16, 8
+; GFX7-NEXT: v_lshrrev_b32_e32 v11, 24, v10
+; GFX7-NEXT: v_max_u32_e32 v6, v6, v8
+; GFX7-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_max_u32_e32 v7, v7, v11
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v7
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX7-NEXT: v_and_b32_e32 v6, 0xff, v4
+; GFX7-NEXT: v_max_u32_e32 v1, v1, v12
+; GFX7-NEXT: v_max_u32_e32 v0, v0, v6
+; GFX7-NEXT: v_bfe_u32 v6, v4, 8, 8
+; GFX7-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v5, 24, v4
+; GFX7-NEXT: v_max_u32_e32 v1, v1, v6
+; GFX7-NEXT: v_bfe_u32 v4, v4, 16, 8
+; GFX7-NEXT: v_max_u32_e32 v3, v3, v9
+; GFX7-NEXT: v_max_u32_e32 v2, v2, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v4, 8, v1
+; GFX7-NEXT: v_max_u32_e32 v3, v3, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v0, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v3
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshrrev_b32_e32 v5, 24, v4
+; GFX7-NEXT: v_bfe_u32 v4, v4, 16, 8
+; GFX7-NEXT: v_max_u32_e32 v1, v1, v5
+; GFX7-NEXT: v_max_u32_e32 v0, v0, v4
+; GFX7-NEXT: v_max_u32_e32 v2, 0, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v4, 8, v1
+; GFX7-NEXT: v_max_u32_e32 v3, 0, v3
+; GFX7-NEXT: v_or_b32_e32 v4, v0, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v3
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_bfe_u32 v4, v4, 8, 8
+; GFX7-NEXT: v_max_u32_e32 v1, 0, v1
+; GFX7-NEXT: v_max_u32_e32 v0, v0, v4
+; GFX7-NEXT: v_max_u32_e32 v2, 0, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_max_u32_e32 v3, 0, v3
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v3
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_umax_v16i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v17, 8
+; GFX8-NEXT: v_lshlrev_b32_sdwa v9, v17, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v8, v8, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v9, 0xff, v10
+; GFX8-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX8-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX8-NEXT: v_and_b32_e32 v9, 0xff, v11
+; GFX8-NEXT: v_lshlrev_b32_sdwa v10, v17, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v11, 0xff, v14
+; GFX8-NEXT: v_or_b32_sdwa v10, v12, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v11, 16, v11
+; GFX8-NEXT: v_or_b32_e32 v10, v10, v11
+; GFX8-NEXT: v_and_b32_e32 v11, 0xff, v15
+; GFX8-NEXT: v_lshlrev_b32_e32 v9, 24, v9
+; GFX8-NEXT: v_lshlrev_b32_e32 v11, 24, v11
+; GFX8-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX8-NEXT: v_or_b32_e32 v10, v10, v11
+; GFX8-NEXT: v_mov_b32_e32 v16, 0xff
+; GFX8-NEXT: v_lshrrev_b32_e32 v9, 8, v8
+; GFX8-NEXT: v_lshrrev_b32_e32 v11, 8, v10
+; GFX8-NEXT: v_max_u16_sdwa v0, v0, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX8-NEXT: v_max_u16_sdwa v1, v1, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_sdwa v9, v8, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX8-NEXT: v_max_u16_sdwa v3, v3, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_3
+; GFX8-NEXT: v_max_u16_sdwa v5, v5, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_sdwa v8, v10, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX8-NEXT: v_max_u16_sdwa v4, v4, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX8-NEXT: v_max_u16_sdwa v6, v6, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v17, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v4, v4, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX8-NEXT: v_max_u16_sdwa v7, v7, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_3
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v7
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX8-NEXT: v_max_u16_sdwa v2, v2, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_max_u16_sdwa v1, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_sdwa v5, v4, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX8-NEXT: v_max_u16_e32 v2, v2, v5
+; GFX8-NEXT: v_max_u16_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_max_u16_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v17, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX8-NEXT: v_or_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_and_b32_sdwa v5, v4, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX8-NEXT: v_max_u16_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_max_u16_e32 v2, 0, v2
+; GFX8-NEXT: v_max_u16_e32 v0, v0, v5
+; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v17, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX8-NEXT: v_max_u16_e32 v3, 0, v3
+; GFX8-NEXT: v_or_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_lshrrev_b32_e32 v4, 8, v4
+; GFX8-NEXT: v_max_u16_e32 v1, 0, v1
+; GFX8-NEXT: v_max_u16_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_max_u16_e32 v2, 0, v2
+; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v17, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v1, 0xff, v2
+; GFX8-NEXT: v_max_u16_e32 v3, 0, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_and_b32_e32 v1, 0xff, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_umax_v16i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v17, 8
+; GFX9-NEXT: v_mov_b32_e32 v16, 0xff
+; GFX9-NEXT: v_lshlrev_b32_sdwa v9, v17, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v8, v8, v16, v9
+; GFX9-NEXT: v_and_b32_e32 v9, 0xff, v10
+; GFX9-NEXT: v_and_b32_e32 v10, 0xff, v11
+; GFX9-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX9-NEXT: v_lshlrev_b32_e32 v10, 24, v10
+; GFX9-NEXT: v_or3_b32 v8, v8, v9, v10
+; GFX9-NEXT: v_lshlrev_b32_sdwa v10, v17, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v10, v12, v16, v10
+; GFX9-NEXT: v_and_b32_e32 v11, 0xff, v14
+; GFX9-NEXT: v_and_b32_e32 v12, 0xff, v15
+; GFX9-NEXT: v_lshlrev_b32_e32 v11, 16, v11
+; GFX9-NEXT: v_lshlrev_b32_e32 v12, 24, v12
+; GFX9-NEXT: v_or3_b32 v10, v10, v11, v12
+; GFX9-NEXT: v_lshrrev_b32_e32 v9, 8, v8
+; GFX9-NEXT: v_lshrrev_b32_e32 v11, 8, v10
+; GFX9-NEXT: v_max_u16_sdwa v0, v0, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-NEXT: v_max_u16_sdwa v1, v1, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_sdwa v9, v8, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX9-NEXT: v_max_u16_sdwa v3, v3, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_3
+; GFX9-NEXT: v_max_u16_sdwa v5, v5, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_sdwa v8, v10, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX9-NEXT: v_max_u16_sdwa v4, v4, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-NEXT: v_max_u16_sdwa v6, v6, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_max_u16_sdwa v7, v7, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_3
+; GFX9-NEXT: v_lshlrev_b32_sdwa v5, v17, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v4, v4, v16, v5
+; GFX9-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX9-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX9-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX9-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX9-NEXT: v_max_u16_sdwa v2, v2, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_max_u16_sdwa v1, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_sdwa v5, v4, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX9-NEXT: v_max_u16_e32 v2, v2, v5
+; GFX9-NEXT: v_max_u16_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_max_u16_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_lshlrev_b32_sdwa v4, v17, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX9-NEXT: v_and_or_b32 v4, v0, v16, v4
+; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX9-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX9-NEXT: v_and_b32_sdwa v5, v4, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX9-NEXT: v_max_u16_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_max_u16_e32 v2, 0, v2
+; GFX9-NEXT: v_max_u16_e32 v3, 0, v3
+; GFX9-NEXT: v_max_u16_e32 v0, v0, v5
+; GFX9-NEXT: v_lshlrev_b32_sdwa v4, v17, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX9-NEXT: v_and_or_b32 v4, v0, v16, v4
+; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX9-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX9-NEXT: v_lshrrev_b32_e32 v4, 8, v4
+; GFX9-NEXT: v_max_u16_e32 v1, 0, v1
+; GFX9-NEXT: v_max_u16_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_max_u16_e32 v2, 0, v2
+; GFX9-NEXT: v_max_u16_e32 v3, 0, v3
+; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v17, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v0, v0, v16, v1
+; GFX9-NEXT: v_and_b32_e32 v1, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v2, 0xff, v3
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 24, v2
+; GFX9-NEXT: v_or3_b32 v0, v0, v1, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_umax_v16i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v16, 8
+; GFX10-NEXT: v_and_b32_e32 v14, 0xff, v14
+; GFX10-NEXT: v_and_b32_e32 v15, 0xff, v15
+; GFX10-NEXT: v_and_b32_e32 v10, 0xff, v10
+; GFX10-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX10-NEXT: v_lshlrev_b32_sdwa v13, v16, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_lshlrev_b32_sdwa v9, v16, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX10-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX10-NEXT: v_and_b32_e32 v11, 0xff, v11
+; GFX10-NEXT: v_and_or_b32 v12, 0xff, v12, v13
+; GFX10-NEXT: v_lshlrev_b32_e32 v13, 16, v14
+; GFX10-NEXT: v_lshlrev_b32_e32 v14, 24, v15
+; GFX10-NEXT: v_and_or_b32 v8, 0xff, v8, v9
+; GFX10-NEXT: v_lshlrev_b32_e32 v9, 16, v10
+; GFX10-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX10-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX10-NEXT: v_or3_b32 v12, v12, v13, v14
+; GFX10-NEXT: v_mov_b32_e32 v13, 0xff
+; GFX10-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX10-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX10-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX10-NEXT: v_lshrrev_b32_e32 v10, 8, v12
+; GFX10-NEXT: v_lshrrev_b32_e32 v14, 24, v12
+; GFX10-NEXT: v_and_b32_sdwa v15, v12, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX10-NEXT: v_and_b32_e32 v12, 0xff, v12
+; GFX10-NEXT: v_and_b32_e32 v10, 0xff, v10
+; GFX10-NEXT: v_max_u16 v7, v7, v14
+; GFX10-NEXT: v_max_u16 v6, v6, v15
+; GFX10-NEXT: v_max_u16 v4, v4, v12
+; GFX10-NEXT: v_max_u16 v5, v5, v10
+; GFX10-NEXT: v_lshlrev_b32_e32 v10, 24, v11
+; GFX10-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX10-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v16, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_or3_b32 v8, v8, v9, v10
+; GFX10-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v7
+; GFX10-NEXT: v_lshrrev_b32_e32 v7, 8, v8
+; GFX10-NEXT: v_lshrrev_b32_e32 v9, 24, v8
+; GFX10-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX10-NEXT: v_and_b32_e32 v5, 0xff, v7
+; GFX10-NEXT: v_and_b32_sdwa v6, v8, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX10-NEXT: v_and_b32_e32 v8, 0xff, v8
+; GFX10-NEXT: v_max_u16 v3, v3, v9
+; GFX10-NEXT: v_lshrrev_b32_e32 v7, 8, v4
+; GFX10-NEXT: v_max_u16 v1, v1, v5
+; GFX10-NEXT: v_max_u16 v2, v2, v6
+; GFX10-NEXT: v_and_b32_sdwa v6, v4, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX10-NEXT: v_max_u16 v0, v0, v8
+; GFX10-NEXT: v_and_b32_e32 v5, 0xff, v7
+; GFX10-NEXT: v_lshrrev_b32_e32 v7, 24, v4
+; GFX10-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX10-NEXT: v_max_u16 v2, v2, v6
+; GFX10-NEXT: v_max_u16 v1, v1, v5
+; GFX10-NEXT: v_max_u16 v3, v3, v7
+; GFX10-NEXT: v_max_u16 v0, v0, v4
+; GFX10-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX10-NEXT: v_max_u16 v2, v2, 0
+; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v16, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX10-NEXT: v_max_u16 v3, v3, 0
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX10-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX10-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX10-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX10-NEXT: v_max_u16 v3, v3, 0
+; GFX10-NEXT: v_lshrrev_b32_e32 v5, 24, v4
+; GFX10-NEXT: v_and_b32_sdwa v4, v4, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX10-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX10-NEXT: v_max_u16 v1, v1, v5
+; GFX10-NEXT: v_and_b32_e32 v5, 0xff, v2
+; GFX10-NEXT: v_max_u16 v0, v0, v4
+; GFX10-NEXT: v_max_u16 v2, v2, 0
+; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v16, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX10-NEXT: v_max_u16 v1, v1, 0
+; GFX10-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX10-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v16, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX10-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX10-NEXT: v_lshrrev_b32_e32 v4, 8, v4
+; GFX10-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX10-NEXT: v_max_u16 v0, v0, v4
+; GFX10-NEXT: v_and_or_b32 v0, 0xff, v0, v1
+; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v3
+; GFX10-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_umax_v16i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v13, 0xff, v13
+; GFX11-NEXT: v_and_b32_e32 v14, 0xff, v14
+; GFX11-NEXT: v_and_b32_e32 v15, 0xff, v15
+; GFX11-NEXT: v_and_b32_e32 v9, 0xff, v9
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX11-NEXT: v_lshlrev_b32_e32 v13, 8, v13
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX11-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX11-NEXT: v_and_b32_e32 v10, 0xff, v10
+; GFX11-NEXT: v_and_or_b32 v12, 0xff, v12, v13
+; GFX11-NEXT: v_lshlrev_b32_e32 v13, 16, v14
+; GFX11-NEXT: v_lshlrev_b32_e32 v14, 24, v15
+; GFX11-NEXT: v_and_or_b32 v8, 0xff, v8, v9
+; GFX11-NEXT: v_and_b32_e32 v11, 0xff, v11
+; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-NEXT: v_lshlrev_b32_e32 v10, 16, v10
+; GFX11-NEXT: v_or3_b32 v12, v12, v13, v14
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX11-NEXT: v_lshrrev_b32_e32 v13, 8, v12
+; GFX11-NEXT: v_lshrrev_b32_e32 v14, 16, v12
+; GFX11-NEXT: v_lshrrev_b32_e32 v9, 24, v12
+; GFX11-NEXT: v_and_b32_e32 v12, 0xff, v12
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_b32_e32 v13, 0xff, v13
+; GFX11-NEXT: v_max_u16 v7, v7, v9
+; GFX11-NEXT: v_lshlrev_b32_e32 v9, 24, v11
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_max_u16 v4, v4, v12
+; GFX11-NEXT: v_max_u16 v5, v5, v13
+; GFX11-NEXT: v_and_b32_e32 v13, 0xff, v14
+; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX11-NEXT: v_or3_b32 v8, v8, v10, v9
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX11-NEXT: v_max_u16 v6, v6, v13
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v8
+; GFX11-NEXT: v_lshrrev_b32_e32 v10, 24, v8
+; GFX11-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX11-NEXT: v_max_u16 v3, v3, v10
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 24, v7
+; GFX11-NEXT: v_lshrrev_b32_e32 v7, 8, v8
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v8
+; GFX11-NEXT: v_and_b32_e32 v8, 0xff, v9
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_lshrrev_b32_e32 v7, 8, v4
+; GFX11-NEXT: v_max_u16 v1, v1, v6
+; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_max_u16 v2, v2, v8
+; GFX11-NEXT: v_lshrrev_b32_e32 v8, 24, v4
+; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX11-NEXT: v_max_u16 v0, v0, v5
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-NEXT: v_max_u16 v3, v3, v8
+; GFX11-NEXT: v_max_u16 v1, v1, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_max_u16 v2, v2, v6
+; GFX11-NEXT: v_max_u16 v0, v0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v3
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX11-NEXT: v_max_u16 v3, v3, 0
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v2
+; GFX11-NEXT: v_max_u16 v2, v2, 0
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 24, v4
+; GFX11-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v2
+; GFX11-NEXT: v_max_u16 v2, v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_and_or_b32 v5, 0xff, v0, v5
+; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_or3_b32 v4, v5, v6, v4
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX11-NEXT: v_max_u16 v3, v3, 0
+; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_lshrrev_b32_e32 v5, 24, v4
+; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_max_u16 v1, v1, v5
+; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX11-NEXT: v_max_u16 v0, v0, v4
+; GFX11-NEXT: v_max_u16 v1, v1, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 8, v5
+; GFX11-NEXT: v_lshlrev_b32_e32 v5, 24, v6
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v7
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX11-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v4, v4, v6, v5
+; GFX11-NEXT: v_lshrrev_b32_e32 v4, 8, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-NEXT: v_max_u16 v0, v0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_and_or_b32 v0, 0xff, v0, v1
+; GFX11-NEXT: v_lshlrev_b32_e32 v1, 24, v3
+; GFX11-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_umax_v16i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v13, 0xff, v13
+; GFX12-NEXT: v_and_b32_e32 v14, 0xff, v14
+; GFX12-NEXT: v_and_b32_e32 v15, 0xff, v15
+; GFX12-NEXT: v_and_b32_e32 v9, 0xff, v9
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX12-NEXT: v_lshlrev_b32_e32 v13, 8, v13
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX12-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX12-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX12-NEXT: v_and_b32_e32 v10, 0xff, v10
+; GFX12-NEXT: v_and_or_b32 v12, 0xff, v12, v13
+; GFX12-NEXT: v_lshlrev_b32_e32 v13, 16, v14
+; GFX12-NEXT: v_lshlrev_b32_e32 v14, 24, v15
+; GFX12-NEXT: v_and_or_b32 v8, 0xff, v8, v9
+; GFX12-NEXT: v_and_b32_e32 v11, 0xff, v11
+; GFX12-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX12-NEXT: v_lshlrev_b32_e32 v10, 16, v10
+; GFX12-NEXT: v_or3_b32 v12, v12, v13, v14
+; GFX12-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX12-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX12-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX12-NEXT: v_lshrrev_b32_e32 v13, 8, v12
+; GFX12-NEXT: v_lshrrev_b32_e32 v14, 16, v12
+; GFX12-NEXT: v_lshrrev_b32_e32 v9, 24, v12
+; GFX12-NEXT: v_and_b32_e32 v12, 0xff, v12
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_b32_e32 v13, 0xff, v13
+; GFX12-NEXT: v_max_u16 v7, v7, v9
+; GFX12-NEXT: v_lshlrev_b32_e32 v9, 24, v11
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_max_u16 v4, v4, v12
+; GFX12-NEXT: v_max_u16 v5, v5, v13
+; GFX12-NEXT: v_and_b32_e32 v13, 0xff, v14
+; GFX12-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX12-NEXT: v_or3_b32 v8, v8, v10, v9
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX12-NEXT: v_max_u16 v6, v6, v13
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_lshrrev_b32_e32 v9, 16, v8
+; GFX12-NEXT: v_lshrrev_b32_e32 v10, 24, v8
+; GFX12-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX12-NEXT: v_max_u16 v3, v3, v10
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX12-NEXT: v_lshlrev_b32_e32 v6, 24, v7
+; GFX12-NEXT: v_lshrrev_b32_e32 v7, 8, v8
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v8
+; GFX12-NEXT: v_and_b32_e32 v8, 0xff, v9
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_lshrrev_b32_e32 v7, 8, v4
+; GFX12-NEXT: v_max_u16 v1, v1, v6
+; GFX12-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX12-NEXT: v_max_u16 v2, v2, v8
+; GFX12-NEXT: v_lshrrev_b32_e32 v8, 24, v4
+; GFX12-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX12-NEXT: v_max_u16 v0, v0, v5
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX12-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX12-NEXT: v_max_u16 v3, v3, v8
+; GFX12-NEXT: v_max_u16 v1, v1, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_max_u16 v2, v2, v6
+; GFX12-NEXT: v_max_u16 v0, v0, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_and_b32_e32 v4, 0xff, v3
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX12-NEXT: v_max_u16 v3, v3, 0
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v2
+; GFX12-NEXT: v_max_u16 v2, v2, 0
+; GFX12-NEXT: v_lshlrev_b32_e32 v4, 24, v4
+; GFX12-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX12-NEXT: v_and_b32_e32 v7, 0xff, v2
+; GFX12-NEXT: v_max_u16 v2, v2, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_and_or_b32 v5, 0xff, v0, v5
+; GFX12-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_or3_b32 v4, v5, v6, v4
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v3
+; GFX12-NEXT: v_max_u16 v3, v3, 0
+; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_lshrrev_b32_e32 v5, 24, v4
+; GFX12-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX12-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_max_u16 v1, v1, v5
+; GFX12-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX12-NEXT: v_max_u16 v0, v0, v4
+; GFX12-NEXT: v_max_u16 v1, v1, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_lshlrev_b32_e32 v4, 8, v5
+; GFX12-NEXT: v_lshlrev_b32_e32 v5, 24, v6
+; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v7
+; GFX12-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX12-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v4, v4, v6, v5
+; GFX12-NEXT: v_lshrrev_b32_e32 v4, 8, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX12-NEXT: v_max_u16 v0, v0, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_and_or_b32 v0, 0xff, v0, v1
+; GFX12-NEXT: v_lshlrev_b32_e32 v1, 24, v3
+; GFX12-NEXT: v_or3_b32 v0, v0, v2, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.umax.v16i8(<16 x i8> %v)
+ ret i8 %res
+}
+
+define i16 @test_vector_reduce_umax_v2i16(<2 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_umax_v2i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v0
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX7-NEXT: v_max_u32_e32 v1, 0, v1
+; GFX7-NEXT: v_max_u32_e32 v0, v0, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_umax_v2i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v2, s4
+; GFX8-NEXT: v_max_u16_sdwa v1, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_max_u16_sdwa v0, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_umax_v2i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX9-NEXT: v_pk_max_u16 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_umax_v2i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX10-NEXT: v_pk_max_u16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_umax_v2i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_pk_max_u16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_umax_v2i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_pk_max_u16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.umax.v2i16(<2 x i16> %v)
+ ret i16 %res
+}
+
+define i16 @test_vector_reduce_umax_v3i16(<3 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_umax_v3i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: v_max_u32_e32 v0, v0, v1
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v2
+; GFX7-NEXT: v_max_u32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_umax_v3i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_max_u16_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_umax_v3i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_max_u16_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_umax_v3i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX10-NEXT: v_max_u16 v0, v0, v2
+; GFX10-NEXT: v_max_u16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_umax_v3i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_max_u16 v0, v0, v2
+; GFX11-NEXT: v_max_u16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_umax_v3i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_max_u16 v0, v0, v2
+; GFX12-NEXT: v_max_u16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.umax.v3i16(<3 x i16> %v)
+ ret i16 %res
+}
+
+define i16 @test_vector_reduce_umax_v4i16(<4 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_umax_v4i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-NEXT: v_or_b32_e32 v2, v3, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-NEXT: v_max_u32_e32 v1, v1, v3
+; GFX7-NEXT: v_max_u32_e32 v0, v0, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v2, v0, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX7-NEXT: v_max_u32_e32 v1, 0, v1
+; GFX7-NEXT: v_max_u32_e32 v0, v0, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_umax_v4i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_u16_e32 v2, v0, v1
+; GFX8-NEXT: v_max_u16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v0
+; GFX8-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX8-NEXT: v_max_u16_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_mov_b32_e32 v2, s4
+; GFX8-NEXT: v_max_u16_sdwa v0, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_umax_v4i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_max_u16 v0, v0, v1
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX9-NEXT: v_pk_max_u16 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_umax_v4i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_pk_max_u16 v0, v0, v1
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX10-NEXT: v_pk_max_u16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_umax_v4i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_pk_max_u16 v0, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX11-NEXT: v_pk_max_u16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_umax_v4i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_max_u16 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX12-NEXT: v_pk_max_u16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.umax.v4i16(<4 x i16> %v)
+ ret i16 %res
+}
+
+define i16 @test_vector_reduce_umax_v8i16(<8 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_umax_v8i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v7
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX7-NEXT: v_or_b32_e32 v5, v5, v6
+; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX7-NEXT: v_lshrrev_b32_e32 v7, 16, v5
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX7-NEXT: v_max_u32_e32 v0, v0, v4
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff, v5
+; GFX7-NEXT: v_max_u32_e32 v3, v3, v7
+; GFX7-NEXT: v_max_u32_e32 v2, v2, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_max_u32_e32 v1, v1, v6
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-NEXT: v_max_u32_e32 v1, v1, v3
+; GFX7-NEXT: v_max_u32_e32 v0, v0, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v2, v0, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX7-NEXT: v_max_u32_e32 v1, 0, v1
+; GFX7-NEXT: v_max_u32_e32 v0, v0, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_umax_v8i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_u16_e32 v4, v0, v2
+; GFX8-NEXT: v_max_u16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_u16_e32 v2, v1, v3
+; GFX8-NEXT: v_max_u16_sdwa v1, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX8-NEXT: v_max_u16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_max_u16_e32 v2, v4, v1
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v0
+; GFX8-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX8-NEXT: v_max_u16_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_mov_b32_e32 v2, s4
+; GFX8-NEXT: v_max_u16_sdwa v0, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_umax_v8i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_max_u16 v0, v0, v2
+; GFX9-NEXT: v_pk_max_u16 v1, v1, v3
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_pk_max_u16 v0, v0, v1
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX9-NEXT: v_pk_max_u16 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_umax_v8i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_pk_max_u16 v0, v0, v2
+; GFX10-NEXT: v_pk_max_u16 v1, v1, v3
+; GFX10-NEXT: v_pk_max_u16 v0, v0, v1
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX10-NEXT: v_pk_max_u16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_umax_v8i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_pk_max_u16 v0, v0, v2
+; GFX11-NEXT: v_pk_max_u16 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_pk_max_u16 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_pk_max_u16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_umax_v8i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_max_u16 v0, v0, v2
+; GFX12-NEXT: v_pk_max_u16 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_pk_max_u16 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_pk_max_u16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.umax.v8i16(<8 x i16> %v)
+ ret i16 %res
+}
+
+define i16 @test_vector_reduce_umax_v16i16(<16 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_umax_v16i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX7-NEXT: v_and_b32_e32 v8, 0xffff, v8
+; GFX7-NEXT: v_or_b32_e32 v8, v9, v8
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v11
+; GFX7-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX7-NEXT: v_or_b32_e32 v9, v9, v10
+; GFX7-NEXT: v_lshlrev_b32_e32 v10, 16, v13
+; GFX7-NEXT: v_and_b32_e32 v11, 0xffff, v12
+; GFX7-NEXT: v_or_b32_e32 v10, v10, v11
+; GFX7-NEXT: v_lshlrev_b32_e32 v11, 16, v15
+; GFX7-NEXT: v_and_b32_e32 v12, 0xffff, v14
+; GFX7-NEXT: v_or_b32_e32 v11, v11, v12
+; GFX7-NEXT: v_lshrrev_b32_e32 v12, 16, v8
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_and_b32_e32 v8, 0xffff, v8
+; GFX7-NEXT: v_lshrrev_b32_e32 v14, 16, v10
+; GFX7-NEXT: v_max_u32_e32 v0, v0, v8
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-NEXT: v_and_b32_e32 v8, 0xffff, v9
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX7-NEXT: v_lshrrev_b32_e32 v15, 16, v11
+; GFX7-NEXT: v_max_u32_e32 v2, v2, v8
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX7-NEXT: v_and_b32_e32 v8, 0xffff, v10
+; GFX7-NEXT: v_max_u32_e32 v5, v5, v14
+; GFX7-NEXT: v_and_b32_e32 v7, 0xffff, v7
+; GFX7-NEXT: v_max_u32_e32 v4, v4, v8
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX7-NEXT: v_and_b32_e32 v8, 0xffff, v11
+; GFX7-NEXT: v_max_u32_e32 v7, v7, v15
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_max_u32_e32 v6, v6, v8
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v7
+; GFX7-NEXT: v_lshrrev_b32_e32 v13, 16, v9
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX7-NEXT: v_or_b32_e32 v5, v6, v5
+; GFX7-NEXT: v_max_u32_e32 v3, v3, v13
+; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX7-NEXT: v_lshrrev_b32_e32 v7, 16, v5
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX7-NEXT: v_max_u32_e32 v0, v0, v4
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff, v5
+; GFX7-NEXT: v_max_u32_e32 v3, v3, v7
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: v_max_u32_e32 v2, v2, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_max_u32_e32 v1, v1, v12
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_max_u32_e32 v1, v1, v6
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-NEXT: v_max_u32_e32 v1, v1, v3
+; GFX7-NEXT: v_max_u32_e32 v0, v0, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v2, v0, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX7-NEXT: v_max_u32_e32 v1, 0, v1
+; GFX7-NEXT: v_max_u32_e32 v0, v0, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_umax_v16i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_u16_e32 v8, v0, v4
+; GFX8-NEXT: v_max_u16_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_u16_e32 v4, v1, v5
+; GFX8-NEXT: v_max_u16_sdwa v1, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_u16_e32 v5, v2, v6
+; GFX8-NEXT: v_max_u16_sdwa v2, v2, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v2, v5, v2
+; GFX8-NEXT: v_max_u16_e32 v5, v3, v7
+; GFX8-NEXT: v_max_u16_sdwa v3, v3, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: v_max_u16_e32 v5, v8, v2
+; GFX8-NEXT: v_max_u16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_max_u16_e32 v2, v4, v3
+; GFX8-NEXT: v_max_u16_sdwa v1, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX8-NEXT: v_max_u16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_max_u16_e32 v2, v5, v1
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v0
+; GFX8-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX8-NEXT: v_max_u16_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_mov_b32_e32 v2, s4
+; GFX8-NEXT: v_max_u16_sdwa v0, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_umax_v16i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_max_u16 v0, v0, v4
+; GFX9-NEXT: v_pk_max_u16 v1, v1, v5
+; GFX9-NEXT: v_pk_max_u16 v2, v2, v6
+; GFX9-NEXT: v_pk_max_u16 v3, v3, v7
+; GFX9-NEXT: v_pk_max_u16 v0, v0, v2
+; GFX9-NEXT: v_pk_max_u16 v1, v1, v3
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_pk_max_u16 v0, v0, v1
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX9-NEXT: v_pk_max_u16 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_umax_v16i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_pk_max_u16 v0, v0, v4
+; GFX10-NEXT: v_pk_max_u16 v1, v1, v5
+; GFX10-NEXT: v_pk_max_u16 v2, v2, v6
+; GFX10-NEXT: v_pk_max_u16 v3, v3, v7
+; GFX10-NEXT: v_pk_max_u16 v0, v0, v2
+; GFX10-NEXT: v_pk_max_u16 v1, v1, v3
+; GFX10-NEXT: v_pk_max_u16 v0, v0, v1
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX10-NEXT: v_pk_max_u16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_umax_v16i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_pk_max_u16 v0, v0, v4
+; GFX11-NEXT: v_pk_max_u16 v1, v1, v5
+; GFX11-NEXT: v_pk_max_u16 v2, v2, v6
+; GFX11-NEXT: v_pk_max_u16 v3, v3, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_pk_max_u16 v0, v0, v2
+; GFX11-NEXT: v_pk_max_u16 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_pk_max_u16 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_pk_max_u16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_umax_v16i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_max_u16 v0, v0, v4
+; GFX12-NEXT: v_pk_max_u16 v1, v1, v5
+; GFX12-NEXT: v_pk_max_u16 v2, v2, v6
+; GFX12-NEXT: v_pk_max_u16 v3, v3, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_pk_max_u16 v0, v0, v2
+; GFX12-NEXT: v_pk_max_u16 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_pk_max_u16 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_pk_max_u16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.umax.v16i16(<16 x i16> %v)
+ ret i16 %res
+}
+
+
+define i32 @test_vector_reduce_umax_v2i32(<2 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_umax_v2i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_max_u32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_umax_v2i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_u32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_umax_v2i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_u32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_umax_v2i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_u32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_umax_v2i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_max_u32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_umax_v2i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_max_u32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.umax.v2i32(<2 x i32> %v)
+ ret i32 %res
+}
+
+define i32 @test_vector_reduce_umax_v3i32(<3 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_umax_v3i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_max_u32_e32 v0, v0, v1
+; GFX7-NEXT: v_max_u32_e32 v0, v0, v2
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_umax_v3i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_u32_e32 v0, v0, v1
+; GFX8-NEXT: v_max_u32_e32 v0, v0, v2
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_umax_v3i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_u32_e32 v0, v0, v1
+; GFX9-NEXT: v_max_u32_e32 v0, v0, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_umax_v3i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_u32_e32 v0, v0, v1
+; GFX10-NEXT: v_max_u32_e32 v0, v0, v2
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_umax_v3i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_max_u32_e32 v0, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_max_u32_e32 v0, v0, v2
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_umax_v3i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_max_u32_e32 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_max_u32_e32 v0, v0, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.umax.v3i32(<3 x i32> %v)
+ ret i32 %res
+}
+
+define i32 @test_vector_reduce_umax_v4i32(<4 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_umax_v4i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_max_u32_e32 v0, v0, v2
+; GFX7-NEXT: v_max_u32_e32 v1, v1, v3
+; GFX7-NEXT: v_max_u32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_umax_v4i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_u32_e32 v0, v0, v2
+; GFX8-NEXT: v_max_u32_e32 v1, v1, v3
+; GFX8-NEXT: v_max_u32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_umax_v4i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_u32_e32 v0, v0, v2
+; GFX9-NEXT: v_max_u32_e32 v1, v1, v3
+; GFX9-NEXT: v_max_u32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_umax_v4i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_u32_e32 v0, v0, v2
+; GFX10-NEXT: v_max_u32_e32 v1, v1, v3
+; GFX10-NEXT: v_max_u32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_umax_v4i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_max_u32_e32 v0, v0, v2
+; GFX11-NEXT: v_max_u32_e32 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_max_u32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_umax_v4i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_max_u32_e32 v0, v0, v2
+; GFX12-NEXT: v_max_u32_e32 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_max_u32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> %v)
+ ret i32 %res
+}
+
+define i32 @test_vector_reduce_umax_v8i32(<8 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_umax_v8i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_max_u32_e32 v0, v0, v4
+; GFX7-NEXT: v_max_u32_e32 v1, v1, v5
+; GFX7-NEXT: v_max_u32_e32 v2, v2, v6
+; GFX7-NEXT: v_max_u32_e32 v3, v3, v7
+; GFX7-NEXT: v_max_u32_e32 v0, v0, v2
+; GFX7-NEXT: v_max_u32_e32 v1, v1, v3
+; GFX7-NEXT: v_max_u32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_umax_v8i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_u32_e32 v0, v0, v4
+; GFX8-NEXT: v_max_u32_e32 v1, v1, v5
+; GFX8-NEXT: v_max_u32_e32 v2, v2, v6
+; GFX8-NEXT: v_max_u32_e32 v3, v3, v7
+; GFX8-NEXT: v_max_u32_e32 v0, v0, v2
+; GFX8-NEXT: v_max_u32_e32 v1, v1, v3
+; GFX8-NEXT: v_max_u32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_umax_v8i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_u32_e32 v0, v0, v4
+; GFX9-NEXT: v_max_u32_e32 v1, v1, v5
+; GFX9-NEXT: v_max_u32_e32 v2, v2, v6
+; GFX9-NEXT: v_max_u32_e32 v3, v3, v7
+; GFX9-NEXT: v_max_u32_e32 v0, v0, v2
+; GFX9-NEXT: v_max_u32_e32 v1, v1, v3
+; GFX9-NEXT: v_max_u32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_umax_v8i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_u32_e32 v0, v0, v4
+; GFX10-NEXT: v_max_u32_e32 v1, v1, v5
+; GFX10-NEXT: v_max_u32_e32 v2, v2, v6
+; GFX10-NEXT: v_max_u32_e32 v3, v3, v7
+; GFX10-NEXT: v_max_u32_e32 v0, v0, v2
+; GFX10-NEXT: v_max_u32_e32 v1, v1, v3
+; GFX10-NEXT: v_max_u32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_umax_v8i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_max_u32_e32 v0, v0, v4
+; GFX11-NEXT: v_max_u32_e32 v1, v1, v5
+; GFX11-NEXT: v_max_u32_e32 v2, v2, v6
+; GFX11-NEXT: v_max_u32_e32 v3, v3, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_max_u32_e32 v0, v0, v2
+; GFX11-NEXT: v_max_u32_e32 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_max_u32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_umax_v8i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_max_u32_e32 v0, v0, v4
+; GFX12-NEXT: v_max_u32_e32 v1, v1, v5
+; GFX12-NEXT: v_max_u32_e32 v2, v2, v6
+; GFX12-NEXT: v_max_u32_e32 v3, v3, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_max_u32_e32 v0, v0, v2
+; GFX12-NEXT: v_max_u32_e32 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_max_u32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.umax.v8i32(<8 x i32> %v)
+ ret i32 %res
+}
+
+define i32 @test_vector_reduce_umax_v16i32(<16 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_umax_v16i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_max_u32_e32 v0, v0, v8
+; GFX7-NEXT: v_max_u32_e32 v1, v1, v9
+; GFX7-NEXT: v_max_u32_e32 v2, v2, v10
+; GFX7-NEXT: v_max_u32_e32 v3, v3, v11
+; GFX7-NEXT: v_max_u32_e32 v4, v4, v12
+; GFX7-NEXT: v_max_u32_e32 v5, v5, v13
+; GFX7-NEXT: v_max_u32_e32 v6, v6, v14
+; GFX7-NEXT: v_max_u32_e32 v7, v7, v15
+; GFX7-NEXT: v_max_u32_e32 v0, v0, v4
+; GFX7-NEXT: v_max_u32_e32 v1, v1, v5
+; GFX7-NEXT: v_max_u32_e32 v2, v2, v6
+; GFX7-NEXT: v_max_u32_e32 v3, v3, v7
+; GFX7-NEXT: v_max_u32_e32 v0, v0, v2
+; GFX7-NEXT: v_max_u32_e32 v1, v1, v3
+; GFX7-NEXT: v_max_u32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_umax_v16i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_u32_e32 v0, v0, v8
+; GFX8-NEXT: v_max_u32_e32 v1, v1, v9
+; GFX8-NEXT: v_max_u32_e32 v2, v2, v10
+; GFX8-NEXT: v_max_u32_e32 v3, v3, v11
+; GFX8-NEXT: v_max_u32_e32 v4, v4, v12
+; GFX8-NEXT: v_max_u32_e32 v5, v5, v13
+; GFX8-NEXT: v_max_u32_e32 v6, v6, v14
+; GFX8-NEXT: v_max_u32_e32 v7, v7, v15
+; GFX8-NEXT: v_max_u32_e32 v0, v0, v4
+; GFX8-NEXT: v_max_u32_e32 v1, v1, v5
+; GFX8-NEXT: v_max_u32_e32 v2, v2, v6
+; GFX8-NEXT: v_max_u32_e32 v3, v3, v7
+; GFX8-NEXT: v_max_u32_e32 v0, v0, v2
+; GFX8-NEXT: v_max_u32_e32 v1, v1, v3
+; GFX8-NEXT: v_max_u32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_umax_v16i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_u32_e32 v0, v0, v8
+; GFX9-NEXT: v_max_u32_e32 v1, v1, v9
+; GFX9-NEXT: v_max_u32_e32 v2, v2, v10
+; GFX9-NEXT: v_max_u32_e32 v3, v3, v11
+; GFX9-NEXT: v_max_u32_e32 v4, v4, v12
+; GFX9-NEXT: v_max_u32_e32 v5, v5, v13
+; GFX9-NEXT: v_max_u32_e32 v6, v6, v14
+; GFX9-NEXT: v_max_u32_e32 v7, v7, v15
+; GFX9-NEXT: v_max_u32_e32 v0, v0, v4
+; GFX9-NEXT: v_max_u32_e32 v1, v1, v5
+; GFX9-NEXT: v_max_u32_e32 v2, v2, v6
+; GFX9-NEXT: v_max_u32_e32 v3, v3, v7
+; GFX9-NEXT: v_max_u32_e32 v0, v0, v2
+; GFX9-NEXT: v_max_u32_e32 v1, v1, v3
+; GFX9-NEXT: v_max_u32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_umax_v16i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_max_u32_e32 v0, v0, v8
+; GFX10-NEXT: v_max_u32_e32 v1, v1, v9
+; GFX10-NEXT: v_max_u32_e32 v2, v2, v10
+; GFX10-NEXT: v_max_u32_e32 v3, v3, v11
+; GFX10-NEXT: v_max_u32_e32 v4, v4, v12
+; GFX10-NEXT: v_max_u32_e32 v5, v5, v13
+; GFX10-NEXT: v_max_u32_e32 v6, v6, v14
+; GFX10-NEXT: v_max_u32_e32 v7, v7, v15
+; GFX10-NEXT: v_max_u32_e32 v0, v0, v4
+; GFX10-NEXT: v_max_u32_e32 v1, v1, v5
+; GFX10-NEXT: v_max_u32_e32 v2, v2, v6
+; GFX10-NEXT: v_max_u32_e32 v3, v3, v7
+; GFX10-NEXT: v_max_u32_e32 v0, v0, v2
+; GFX10-NEXT: v_max_u32_e32 v1, v1, v3
+; GFX10-NEXT: v_max_u32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_umax_v16i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_max_u32_e32 v0, v0, v8
+; GFX11-NEXT: v_max_u32_e32 v1, v1, v9
+; GFX11-NEXT: v_max_u32_e32 v2, v2, v10
+; GFX11-NEXT: v_max_u32_e32 v3, v3, v11
+; GFX11-NEXT: v_max_u32_e32 v4, v4, v12
+; GFX11-NEXT: v_max_u32_e32 v5, v5, v13
+; GFX11-NEXT: v_max_u32_e32 v6, v6, v14
+; GFX11-NEXT: v_max_u32_e32 v7, v7, v15
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_max_u32_e32 v0, v0, v4
+; GFX11-NEXT: v_max_u32_e32 v1, v1, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_max_u32_e32 v2, v2, v6
+; GFX11-NEXT: v_max_u32_e32 v3, v3, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_max_u32_e32 v0, v0, v2
+; GFX11-NEXT: v_max_u32_e32 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_max_u32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_umax_v16i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_max_u32_e32 v0, v0, v8
+; GFX12-NEXT: v_max_u32_e32 v1, v1, v9
+; GFX12-NEXT: v_max_u32_e32 v2, v2, v10
+; GFX12-NEXT: v_max_u32_e32 v3, v3, v11
+; GFX12-NEXT: v_max_u32_e32 v4, v4, v12
+; GFX12-NEXT: v_max_u32_e32 v5, v5, v13
+; GFX12-NEXT: v_max_u32_e32 v6, v6, v14
+; GFX12-NEXT: v_max_u32_e32 v7, v7, v15
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_max_u32_e32 v0, v0, v4
+; GFX12-NEXT: v_max_u32_e32 v1, v1, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_max_u32_e32 v2, v2, v6
+; GFX12-NEXT: v_max_u32_e32 v3, v3, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_max_u32_e32 v0, v0, v2
+; GFX12-NEXT: v_max_u32_e32 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_max_u32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.umax.v16i32(<16 x i32> %v)
+ ret i32 %res
+}
+
+define i64 @test_vector_reduce_umax_v2i64(<2 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_umax_v2i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_umax_v2i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_umax_v2i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_umax_v2i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_umax_v2i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_umax_v2i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.umax.v2i64(<2 x i64> %v)
+ ret i64 %res
+}
+
+define i64 @test_vector_reduce_umax_v3i64(<3 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_umax_v3i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX7-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_umax_v3i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_umax_v3i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[4:5]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_umax_v3i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
+; GFX10-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_umax_v3i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_umax_v3i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.umax.v3i64(<3 x i64> %v)
+ ret i64 %res
+}
+
+define i64 @test_vector_reduce_umax_v4i64(<4 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_umax_v4i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX7-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[6:7]
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX7-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_umax_v4i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[6:7]
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_umax_v4i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[4:5]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[6:7]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_umax_v4i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX10-NEXT: v_cmp_gt_u64_e64 s4, v[2:3], v[6:7]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v6, v2, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v7, v3, s4
+; GFX10-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_umax_v4i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX11-NEXT: v_cmp_gt_u64_e64 s0, v[2:3], v[6:7]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0
+; GFX11-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_umax_v4i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX12-NEXT: v_cmp_gt_u64_e64 s0, v[2:3], v[6:7]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0
+; GFX12-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.umax.v4i64(<4 x i64> %v)
+ ret i64 %res
+}
+
+define i64 @test_vector_reduce_umax_v8i64(<8 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_umax_v8i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[8:9]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
+; GFX7-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[10:11]
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v10, v2, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v11, v3, vcc
+; GFX7-NEXT: v_cmp_gt_u64_e32 vcc, v[4:5], v[12:13]
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v12, v4, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v13, v5, vcc
+; GFX7-NEXT: v_cmp_gt_u64_e32 vcc, v[6:7], v[14:15]
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v14, v6, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v15, v7, vcc
+; GFX7-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX7-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[6:7]
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX7-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_umax_v8i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[8:9]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[10:11]
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v10, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v11, v3, vcc
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, v[4:5], v[12:13]
+; GFX8-NEXT: v_cndmask_b32_e32 v4, v12, v4, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v13, v5, vcc
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, v[6:7], v[14:15]
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v14, v6, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v15, v7, vcc
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[6:7]
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_umax_v8i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[8:9]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
+; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[10:11]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v10, v2, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v11, v3, vcc
+; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[4:5], v[12:13]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v12, v4, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v13, v5, vcc
+; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[6:7], v[14:15]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v14, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v7, v15, v7, vcc
+; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[4:5]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[6:7]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_umax_v8i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX10-NEXT: v_cmp_gt_u64_e64 s4, v[2:3], v[10:11]
+; GFX10-NEXT: v_cmp_gt_u64_e64 s5, v[4:5], v[12:13]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc_lo
+; GFX10-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[6:7], v[14:15]
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v10, v2, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v11, v3, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v4, v12, v4, s5
+; GFX10-NEXT: v_cndmask_b32_e64 v5, v13, v5, s5
+; GFX10-NEXT: v_cndmask_b32_e32 v6, v14, v6, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v7, v15, v7, vcc_lo
+; GFX10-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX10-NEXT: v_cmp_gt_u64_e64 s4, v[2:3], v[6:7]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v6, v2, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v7, v3, s4
+; GFX10-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_umax_v8i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX11-NEXT: v_cmp_gt_u64_e64 s0, v[2:3], v[10:11]
+; GFX11-NEXT: v_cmp_gt_u64_e64 s1, v[4:5], v[12:13]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v8, v0 :: v_dual_cndmask_b32 v1, v9, v1
+; GFX11-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[6:7], v[14:15]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v10, v2, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v11, v3, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v4, v12, v4, s1
+; GFX11-NEXT: v_cndmask_b32_e64 v5, v13, v5, s1
+; GFX11-NEXT: v_dual_cndmask_b32 v6, v14, v6 :: v_dual_cndmask_b32 v7, v15, v7
+; GFX11-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_cmp_gt_u64_e64 s0, v[2:3], v[6:7]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_umax_v8i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX12-NEXT: v_cmp_gt_u64_e64 s0, v[2:3], v[10:11]
+; GFX12-NEXT: v_cmp_gt_u64_e64 s1, v[4:5], v[12:13]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v8, v0 :: v_dual_cndmask_b32 v1, v9, v1
+; GFX12-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[6:7], v[14:15]
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: v_cndmask_b32_e64 v2, v10, v2, s0
+; GFX12-NEXT: v_cndmask_b32_e64 v3, v11, v3, s0
+; GFX12-NEXT: v_cndmask_b32_e64 v4, v12, v4, s1
+; GFX12-NEXT: v_cndmask_b32_e64 v5, v13, v5, s1
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v6, v14, v6 :: v_dual_cndmask_b32 v7, v15, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX12-NEXT: v_cmp_gt_u64_e64 s0, v[2:3], v[6:7]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0
+; GFX12-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.umax.v8i64(<8 x i64> %v)
+ ret i64 %res
+}
+
+define i64 @test_vector_reduce_umax_v16i64(<16 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_umax_v16i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX7-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[16:17]
+; GFX7-NEXT: v_cmp_gt_u64_e64 s[4:5], v[10:11], v[26:27]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v16, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v17, v1, vcc
+; GFX7-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[18:19]
+; GFX7-NEXT: v_cndmask_b32_e64 v10, v26, v10, s[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v18, v2, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v19, v3, vcc
+; GFX7-NEXT: v_cmp_gt_u64_e32 vcc, v[4:5], v[20:21]
+; GFX7-NEXT: v_cndmask_b32_e64 v11, v27, v11, s[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v20, v4, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v21, v5, vcc
+; GFX7-NEXT: v_cmp_gt_u64_e32 vcc, v[6:7], v[22:23]
+; GFX7-NEXT: v_cmp_gt_u64_e64 s[4:5], v[2:3], v[10:11]
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v22, v6, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v23, v7, vcc
+; GFX7-NEXT: v_cmp_gt_u64_e32 vcc, v[8:9], v[24:25]
+; GFX7-NEXT: v_cndmask_b32_e64 v2, v10, v2, s[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v8, v24, v8, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v9, v25, v9, vcc
+; GFX7-NEXT: v_cmp_gt_u64_e32 vcc, v[12:13], v[28:29]
+; GFX7-NEXT: v_cndmask_b32_e64 v3, v11, v3, s[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v12, v28, v12, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v13, v29, v13, vcc
+; GFX7-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[8:9]
+; GFX7-NEXT: v_cmp_gt_u64_e64 s[6:7], v[4:5], v[12:13]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e64 v4, v12, v4, s[6:7]
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
+; GFX7-NEXT: v_cndmask_b32_e64 v5, v13, v5, s[6:7]
+; GFX7-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_cmp_gt_u64_e32 vcc, v[14:15], v[30:31]
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v30, v14, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v31, v15, vcc
+; GFX7-NEXT: v_cmp_gt_u64_e32 vcc, v[6:7], v[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc
+; GFX7-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX7-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_umax_v16i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[16:17]
+; GFX8-NEXT: v_cmp_gt_u64_e64 s[4:5], v[10:11], v[26:27]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v16, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v17, v1, vcc
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[18:19]
+; GFX8-NEXT: v_cndmask_b32_e64 v10, v26, v10, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v18, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v19, v3, vcc
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, v[4:5], v[20:21]
+; GFX8-NEXT: v_cndmask_b32_e64 v11, v27, v11, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v4, v20, v4, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v21, v5, vcc
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, v[6:7], v[22:23]
+; GFX8-NEXT: v_cmp_gt_u64_e64 s[4:5], v[2:3], v[10:11]
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v22, v6, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v23, v7, vcc
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, v[8:9], v[24:25]
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v10, v2, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v8, v24, v8, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v9, v25, v9, vcc
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, v[12:13], v[28:29]
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v11, v3, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v12, v28, v12, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v13, v29, v13, vcc
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[8:9]
+; GFX8-NEXT: v_cmp_gt_u64_e64 s[6:7], v[4:5], v[12:13]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v4, v12, v4, s[6:7]
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v5, v13, v5, s[6:7]
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, v[14:15], v[30:31]
+; GFX8-NEXT: v_cndmask_b32_e32 v4, v30, v14, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v31, v15, vcc
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, v[6:7], v[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_umax_v16i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: scratch_load_dword v31, off, s32
+; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[16:17]
+; GFX9-NEXT: v_cmp_gt_u64_e64 s[2:3], v[4:5], v[20:21]
+; GFX9-NEXT: v_cmp_gt_u64_e64 s[6:7], v[8:9], v[24:25]
+; GFX9-NEXT: v_cmp_gt_u64_e64 s[10:11], v[12:13], v[28:29]
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v16, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v4, v20, v4, s[2:3]
+; GFX9-NEXT: v_cndmask_b32_e64 v8, v24, v8, s[6:7]
+; GFX9-NEXT: v_cndmask_b32_e64 v12, v28, v12, s[10:11]
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v17, v1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v5, v21, v5, s[2:3]
+; GFX9-NEXT: v_cndmask_b32_e64 v9, v25, v9, s[6:7]
+; GFX9-NEXT: v_cndmask_b32_e64 v13, v29, v13, s[10:11]
+; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[8:9]
+; GFX9-NEXT: v_cmp_gt_u64_e64 s[2:3], v[4:5], v[12:13]
+; GFX9-NEXT: v_cmp_gt_u64_e64 s[0:1], v[2:3], v[18:19]
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v4, v12, v4, s[2:3]
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v5, v13, v5, s[2:3]
+; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[4:5]
+; GFX9-NEXT: v_cmp_gt_u64_e64 s[4:5], v[6:7], v[22:23]
+; GFX9-NEXT: v_cmp_gt_u64_e64 s[8:9], v[10:11], v[26:27]
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v18, v2, s[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v6, v22, v6, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v10, v26, v10, s[8:9]
+; GFX9-NEXT: v_cndmask_b32_e64 v3, v19, v3, s[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v7, v23, v7, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v11, v27, v11, s[8:9]
+; GFX9-NEXT: v_cmp_gt_u64_e64 s[0:1], v[2:3], v[10:11]
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[14:15], v[30:31]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v30, v14, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v31, v15, vcc
+; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[6:7], v[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v10, v2, s[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v3, v11, v3, s[0:1]
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc
+; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_umax_v16i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX10-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[16:17]
+; GFX10-NEXT: v_cmp_gt_u64_e64 s4, v[2:3], v[18:19]
+; GFX10-NEXT: v_cmp_gt_u64_e64 s5, v[4:5], v[20:21]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v16, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v17, v1, vcc_lo
+; GFX10-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[6:7], v[22:23]
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v18, v2, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v19, v3, s4
+; GFX10-NEXT: v_cmp_gt_u64_e64 s4, v[8:9], v[24:25]
+; GFX10-NEXT: v_cndmask_b32_e64 v4, v20, v4, s5
+; GFX10-NEXT: v_cndmask_b32_e64 v5, v21, v5, s5
+; GFX10-NEXT: v_cndmask_b32_e32 v6, v22, v6, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v7, v23, v7, vcc_lo
+; GFX10-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[10:11], v[26:27]
+; GFX10-NEXT: v_cndmask_b32_e64 v8, v24, v8, s4
+; GFX10-NEXT: v_cmp_gt_u64_e64 s5, v[12:13], v[28:29]
+; GFX10-NEXT: v_cndmask_b32_e64 v9, v25, v9, s4
+; GFX10-NEXT: v_cndmask_b32_e32 v10, v26, v10, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v11, v27, v11, vcc_lo
+; GFX10-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX10-NEXT: v_cndmask_b32_e64 v12, v28, v12, s5
+; GFX10-NEXT: v_cndmask_b32_e64 v13, v29, v13, s5
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc_lo
+; GFX10-NEXT: v_cmp_gt_u64_e64 s5, v[4:5], v[12:13]
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v4, v12, v4, s5
+; GFX10-NEXT: v_cndmask_b32_e64 v5, v13, v5, s5
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_cmp_gt_u64_e64 s4, v[14:15], v[30:31]
+; GFX10-NEXT: v_cndmask_b32_e64 v14, v30, v14, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v15, v31, v15, s4
+; GFX10-NEXT: v_cmp_gt_u64_e64 s4, v[2:3], v[10:11]
+; GFX10-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[6:7], v[14:15]
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v10, v2, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v11, v3, s4
+; GFX10-NEXT: v_cndmask_b32_e32 v6, v14, v6, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v7, v15, v7, vcc_lo
+; GFX10-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX10-NEXT: v_cmp_gt_u64_e64 s4, v[2:3], v[6:7]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v6, v2, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v7, v3, s4
+; GFX10-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_umax_v16i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: scratch_load_b32 v31, off, s32
+; GFX11-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[16:17]
+; GFX11-NEXT: v_cmp_gt_u64_e64 s0, v[2:3], v[18:19]
+; GFX11-NEXT: v_cmp_gt_u64_e64 s1, v[4:5], v[20:21]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v16, v0 :: v_dual_cndmask_b32 v1, v17, v1
+; GFX11-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[6:7], v[22:23]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v18, v2, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v19, v3, s0
+; GFX11-NEXT: v_cmp_gt_u64_e64 s0, v[8:9], v[24:25]
+; GFX11-NEXT: v_cndmask_b32_e64 v4, v20, v4, s1
+; GFX11-NEXT: v_cndmask_b32_e64 v5, v21, v5, s1
+; GFX11-NEXT: v_dual_cndmask_b32 v6, v22, v6 :: v_dual_cndmask_b32 v7, v23, v7
+; GFX11-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[10:11], v[26:27]
+; GFX11-NEXT: v_cndmask_b32_e64 v8, v24, v8, s0
+; GFX11-NEXT: v_cmp_gt_u64_e64 s1, v[12:13], v[28:29]
+; GFX11-NEXT: v_cndmask_b32_e64 v9, v25, v9, s0
+; GFX11-NEXT: v_dual_cndmask_b32 v10, v26, v10 :: v_dual_cndmask_b32 v11, v27, v11
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX11-NEXT: v_cndmask_b32_e64 v12, v28, v12, s1
+; GFX11-NEXT: v_cndmask_b32_e64 v13, v29, v13, s1
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v8, v0 :: v_dual_cndmask_b32 v1, v9, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_cmp_gt_u64_e64 s1, v[4:5], v[12:13]
+; GFX11-NEXT: v_cndmask_b32_e64 v4, v12, v4, s1
+; GFX11-NEXT: v_cndmask_b32_e64 v5, v13, v5, s1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_gt_u64_e64 s0, v[14:15], v[30:31]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_cndmask_b32_e64 v14, v30, v14, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v15, v31, v15, s0
+; GFX11-NEXT: v_cmp_gt_u64_e64 s0, v[2:3], v[10:11]
+; GFX11-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[6:7], v[14:15]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v10, v2, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v11, v3, s0
+; GFX11-NEXT: v_dual_cndmask_b32 v6, v14, v6 :: v_dual_cndmask_b32 v7, v15, v7
+; GFX11-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX11-NEXT: v_cmp_gt_u64_e64 s0, v[2:3], v[6:7]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0
+; GFX11-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_umax_v16i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: scratch_load_b32 v31, off, s32
+; GFX12-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[16:17]
+; GFX12-NEXT: v_cmp_gt_u64_e64 s0, v[2:3], v[18:19]
+; GFX12-NEXT: v_cmp_gt_u64_e64 s1, v[4:5], v[20:21]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v16, v0 :: v_dual_cndmask_b32 v1, v17, v1
+; GFX12-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[6:7], v[22:23]
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: v_cndmask_b32_e64 v2, v18, v2, s0
+; GFX12-NEXT: v_cndmask_b32_e64 v3, v19, v3, s0
+; GFX12-NEXT: v_cmp_gt_u64_e64 s0, v[8:9], v[24:25]
+; GFX12-NEXT: v_cndmask_b32_e64 v4, v20, v4, s1
+; GFX12-NEXT: v_cndmask_b32_e64 v5, v21, v5, s1
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v6, v22, v6 :: v_dual_cndmask_b32 v7, v23, v7
+; GFX12-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[10:11], v[26:27]
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: v_cndmask_b32_e64 v8, v24, v8, s0
+; GFX12-NEXT: v_cmp_gt_u64_e64 s1, v[12:13], v[28:29]
+; GFX12-NEXT: v_cndmask_b32_e64 v9, v25, v9, s0
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v10, v26, v10 :: v_dual_cndmask_b32 v11, v27, v11
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: v_cndmask_b32_e64 v12, v28, v12, s1
+; GFX12-NEXT: v_cndmask_b32_e64 v13, v29, v13, s1
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v8, v0 :: v_dual_cndmask_b32 v1, v9, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_cmp_gt_u64_e64 s1, v[4:5], v[12:13]
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: v_cndmask_b32_e64 v4, v12, v4, s1
+; GFX12-NEXT: v_cndmask_b32_e64 v5, v13, v5, s1
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_gt_u64_e64 s0, v[14:15], v[30:31]
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: v_cndmask_b32_e64 v14, v30, v14, s0
+; GFX12-NEXT: v_cndmask_b32_e64 v15, v31, v15, s0
+; GFX12-NEXT: v_cmp_gt_u64_e64 s0, v[2:3], v[10:11]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[6:7], v[14:15]
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: v_cndmask_b32_e64 v2, v10, v2, s0
+; GFX12-NEXT: v_cndmask_b32_e64 v3, v11, v3, s0
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v6, v14, v6 :: v_dual_cndmask_b32 v7, v15, v7
+; GFX12-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_cmp_gt_u64_e64 s0, v[2:3], v[6:7]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0
+; GFX12-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.umax.v16i64(<16 x i64> %v)
+ ret i64 %res
+}
+
+declare i8 @llvm.vector.reduce.umax.v2i8(<2 x i8>)
+declare i8 @llvm.vector.reduce.umax.v3i8(<3 x i8>)
+declare i8 @llvm.vector.reduce.umax.v4i8(<4 x i8>)
+declare i8 @llvm.vector.reduce.umax.v8i8(<8 x i8>)
+declare i8 @llvm.vector.reduce.umax.v16i8(<16 x i8>)
+
+declare i16 @llvm.vector.reduce.umax.v2i16(<2 x i16>)
+declare i16 @llvm.vector.reduce.umax.v3i16(<3 x i16>)
+declare i16 @llvm.vector.reduce.umax.v4i16(<4 x i16>)
+declare i16 @llvm.vector.reduce.umax.v8i16(<8 x i16>)
+declare i16 @llvm.vector.reduce.umax.v16i16(<16 x i16>)
+
+declare i32 @llvm.vector.reduce.umax.v2i32(<2 x i32>)
+declare i32 @llvm.vector.reduce.umax.v3i32(<3 x i32>)
+declare i32 @llvm.vector.reduce.umax.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.umax.v8i32(<8 x i32>)
+declare i32 @llvm.vector.reduce.umax.v16i32(<16 x i32>)
+
+declare i64 @llvm.vector.reduce.umax.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.umax.v3i64(<3 x i64>)
+declare i64 @llvm.vector.reduce.umax.v4i64(<4 x i64>)
+declare i64 @llvm.vector.reduce.umax.v8i64(<8 x i64>)
+declare i64 @llvm.vector.reduce.umax.v16i64(<16 x i64>)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/umin.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/umin.ll
new file mode 100644
index 0000000000000..8369f8458c495
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/umin.ll
@@ -0,0 +1,2678 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx700 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX7 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx801 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX8 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx942 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX9 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX10 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX12 %s
+
+define i8 @test_vector_reduce_umin_v2i8(<2 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_umin_v2i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX7-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_umin_v2i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_min_u16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_umin_v2i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_u16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_umin_v2i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX10-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX10-NEXT: v_min_u16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_umin_v2i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_min_u16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_umin_v2i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX12-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_min_u16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.umin.v2i8(<2 x i8> %v)
+ ret i8 %res
+}
+
+define i8 @test_vector_reduce_umin_v3i8(<3 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_umin_v3i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX7-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v2
+; GFX7-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_umin_v3i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_min_u16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX8-NEXT: v_min_u16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_umin_v3i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_u16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-NEXT: v_min_u16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_umin_v3i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX10-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX10-NEXT: v_min_u16 v0, v0, v1
+; GFX10-NEXT: v_and_b32_e32 v1, 0xff, v2
+; GFX10-NEXT: v_min_u16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_umin_v3i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_min_u16 v0, v0, v1
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v2
+; GFX11-NEXT: v_min_u16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_umin_v3i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX12-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_min_u16 v0, v0, v1
+; GFX12-NEXT: v_and_b32_e32 v1, 0xff, v2
+; GFX12-NEXT: v_min_u16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.umin.v3i8(<3 x i8> %v)
+ ret i8 %res
+}
+
+define i8 @test_vector_reduce_umin_v4i8(<4 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_umin_v4i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX7-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v4, 8, v1
+; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX7-NEXT: v_or_b32_e32 v4, v0, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX7-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX7-NEXT: v_bfe_u32 v2, v2, 16, 8
+; GFX7-NEXT: v_min_u32_e32 v1, v1, v3
+; GFX7-NEXT: v_min_u32_e32 v0, v0, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v0, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_umin_v4i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v5, 8
+; GFX8-NEXT: v_lshlrev_b32_sdwa v6, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX8-NEXT: v_or_b32_sdwa v6, v0, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX8-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX8-NEXT: v_or_b32_e32 v2, v6, v2
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX8-NEXT: v_mov_b32_e32 v4, 0xff
+; GFX8-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX8-NEXT: v_and_b32_sdwa v3, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX8-NEXT: v_min_u16_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_3
+; GFX8-NEXT: v_min_u16_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX8-NEXT: v_min_u16_e32 v0, v0, v1
+; GFX8-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_umin_v4i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v4, 0xff
+; GFX9-NEXT: v_lshlrev_b32_sdwa v6, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX9-NEXT: v_and_or_b32 v6, v0, v4, v6
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX9-NEXT: v_or3_b32 v2, v6, v2, v3
+; GFX9-NEXT: v_and_b32_sdwa v3, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX9-NEXT: v_min_u16_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_3
+; GFX9-NEXT: v_min_u16_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v1, v0, v4, v1
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX9-NEXT: v_min_u16_e32 v0, v0, v1
+; GFX9-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_umin_v4i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v4, 8
+; GFX10-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX10-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX10-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX10-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX10-NEXT: v_and_or_b32 v5, 0xff, v0, v5
+; GFX10-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX10-NEXT: v_or3_b32 v2, v5, v2, v3
+; GFX10-NEXT: v_mov_b32_e32 v3, 0xff
+; GFX10-NEXT: v_lshrrev_b32_e32 v5, 24, v2
+; GFX10-NEXT: v_and_b32_sdwa v2, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX10-NEXT: v_min_u16 v1, v1, v5
+; GFX10-NEXT: v_min_u16 v0, v0, v2
+; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX10-NEXT: v_min_u16 v0, v0, v1
+; GFX10-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_umin_v4i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 8, v1
+; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_min_u16 v1, v1, v3
+; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-NEXT: v_min_u16 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: v_min_u16 v0, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_umin_v4i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX12-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v4, 8, v1
+; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX12-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX12-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_min_u16 v1, v1, v3
+; GFX12-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX12-NEXT: v_min_u16 v0, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: v_min_u16 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.umin.v4i8(<4 x i8> %v)
+ ret i8 %res
+}
+
+define i8 @test_vector_reduce_umin_v8i8(<8 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_umin_v8i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v7
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX7-NEXT: v_and_b32_e32 v6, 0xff, v4
+; GFX7-NEXT: v_min_u32_e32 v0, v0, v6
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX7-NEXT: v_bfe_u32 v6, v4, 8, 8
+; GFX7-NEXT: v_lshrrev_b32_e32 v5, 24, v4
+; GFX7-NEXT: v_min_u32_e32 v1, v1, v6
+; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX7-NEXT: v_bfe_u32 v4, v4, 16, 8
+; GFX7-NEXT: v_min_u32_e32 v2, v2, v4
+; GFX7-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v4, 8, v1
+; GFX7-NEXT: v_min_u32_e32 v3, v3, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v0, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX7-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX7-NEXT: v_bfe_u32 v2, v2, 16, 8
+; GFX7-NEXT: v_min_u32_e32 v1, v1, v3
+; GFX7-NEXT: v_min_u32_e32 v0, v0, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v0, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_umin_v8i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v9, 8
+; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v9, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v4, v4, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v7
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_mov_b32_e32 v8, 0xff
+; GFX8-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX8-NEXT: v_min_u16_sdwa v1, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_sdwa v5, v4, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX8-NEXT: v_min_u16_sdwa v2, v2, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_min_u16_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX8-NEXT: v_min_u16_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_3
+; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX8-NEXT: v_or_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX8-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX8-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX8-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX8-NEXT: v_and_b32_sdwa v3, v2, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX8-NEXT: v_min_u16_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_min_u16_e32 v0, v0, v3
+; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX8-NEXT: v_min_u16_e32 v0, v0, v1
+; GFX8-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_umin_v8i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v9, 8
+; GFX9-NEXT: v_mov_b32_e32 v8, 0xff
+; GFX9-NEXT: v_lshlrev_b32_sdwa v5, v9, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v4, v4, v8, v5
+; GFX9-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX9-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX9-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX9-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX9-NEXT: v_min_u16_sdwa v1, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_sdwa v5, v4, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX9-NEXT: v_min_u16_sdwa v2, v2, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_min_u16_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_3
+; GFX9-NEXT: v_min_u16_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-NEXT: v_lshlrev_b32_sdwa v4, v9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX9-NEXT: v_and_or_b32 v4, v0, v8, v4
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX9-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX9-NEXT: v_and_b32_sdwa v3, v2, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX9-NEXT: v_min_u16_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_min_u16_e32 v0, v0, v3
+; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v1, v0, v8, v1
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX9-NEXT: v_min_u16_e32 v0, v0, v1
+; GFX9-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_umin_v8i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v8, 8
+; GFX10-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX10-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX10-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX10-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v8, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX10-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX10-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v7
+; GFX10-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX10-NEXT: v_mov_b32_e32 v5, 0xff
+; GFX10-NEXT: v_lshrrev_b32_e32 v6, 8, v4
+; GFX10-NEXT: v_and_b32_sdwa v7, v4, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX10-NEXT: v_lshrrev_b32_e32 v9, 24, v4
+; GFX10-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX10-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX10-NEXT: v_min_u16 v2, v2, v7
+; GFX10-NEXT: v_min_u16 v3, v3, v9
+; GFX10-NEXT: v_min_u16 v0, v0, v4
+; GFX10-NEXT: v_min_u16 v1, v1, v6
+; GFX10-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX10-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX10-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX10-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX10-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX10-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX10-NEXT: v_and_b32_sdwa v2, v2, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX10-NEXT: v_min_u16 v1, v1, v3
+; GFX10-NEXT: v_min_u16 v0, v0, v2
+; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX10-NEXT: v_min_u16 v0, v0, v1
+; GFX10-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_umin_v8i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX11-NEXT: v_lshlrev_b32_e32 v7, 24, v7
+; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v4, v4, v6, v7
+; GFX11-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_min_u16 v1, v1, v5
+; GFX11-NEXT: v_lshrrev_b32_e32 v5, 24, v4
+; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-NEXT: v_min_u16 v2, v2, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v1
+; GFX11-NEXT: v_min_u16 v3, v3, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_min_u16 v0, v0, v4
+; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 8, v6
+; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX11-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: v_min_u16 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_min_u16 v0, v0, v2
+; GFX11-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_min_u16 v0, v0, v1
+; GFX11-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_umin_v8i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX12-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX12-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX12-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX12-NEXT: v_lshlrev_b32_e32 v7, 24, v7
+; GFX12-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX12-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v4, v4, v6, v7
+; GFX12-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX12-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_min_u16 v1, v1, v5
+; GFX12-NEXT: v_lshrrev_b32_e32 v5, 24, v4
+; GFX12-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX12-NEXT: v_min_u16 v2, v2, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v1
+; GFX12-NEXT: v_min_u16 v3, v3, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_min_u16 v0, v0, v4
+; GFX12-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_lshlrev_b32_e32 v4, 8, v6
+; GFX12-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX12-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: v_min_u16 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_min_u16 v0, v0, v2
+; GFX12-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_min_u16 v0, v0, v1
+; GFX12-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.umin.v8i8(<8 x i8> %v)
+ ret i8 %res
+}
+
+define i8 @test_vector_reduce_umin_v16i8(<16 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_umin_v16i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v9, 0xff, v9
+; GFX7-NEXT: v_and_b32_e32 v8, 0xff, v8
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX7-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX7-NEXT: v_and_b32_e32 v9, 0xff, v10
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX7-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX7-NEXT: v_and_b32_e32 v9, 0xff, v11
+; GFX7-NEXT: v_and_b32_e32 v11, 0xff, v13
+; GFX7-NEXT: v_and_b32_e32 v10, 0xff, v12
+; GFX7-NEXT: v_lshlrev_b32_e32 v11, 8, v11
+; GFX7-NEXT: v_or_b32_e32 v10, v10, v11
+; GFX7-NEXT: v_and_b32_e32 v11, 0xff, v14
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 24, v9
+; GFX7-NEXT: v_lshlrev_b32_e32 v11, 16, v11
+; GFX7-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX7-NEXT: v_or_b32_e32 v10, v10, v11
+; GFX7-NEXT: v_and_b32_e32 v11, 0xff, v15
+; GFX7-NEXT: v_lshlrev_b32_e32 v11, 24, v11
+; GFX7-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX7-NEXT: v_and_b32_e32 v12, 0xff, v8
+; GFX7-NEXT: v_lshrrev_b32_e32 v9, 24, v8
+; GFX7-NEXT: v_or_b32_e32 v10, v10, v11
+; GFX7-NEXT: v_min_u32_e32 v0, v0, v12
+; GFX7-NEXT: v_bfe_u32 v12, v8, 8, 8
+; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX7-NEXT: v_bfe_u32 v8, v8, 16, 8
+; GFX7-NEXT: v_min_u32_e32 v2, v2, v8
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX7-NEXT: v_and_b32_e32 v8, 0xff, v10
+; GFX7-NEXT: v_min_u32_e32 v4, v4, v8
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX7-NEXT: v_bfe_u32 v8, v10, 8, 8
+; GFX7-NEXT: v_min_u32_e32 v5, v5, v8
+; GFX7-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX7-NEXT: v_bfe_u32 v8, v10, 16, 8
+; GFX7-NEXT: v_lshrrev_b32_e32 v11, 24, v10
+; GFX7-NEXT: v_min_u32_e32 v6, v6, v8
+; GFX7-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_min_u32_e32 v7, v7, v11
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v7
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX7-NEXT: v_and_b32_e32 v6, 0xff, v4
+; GFX7-NEXT: v_min_u32_e32 v1, v1, v12
+; GFX7-NEXT: v_min_u32_e32 v0, v0, v6
+; GFX7-NEXT: v_bfe_u32 v6, v4, 8, 8
+; GFX7-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v5, 24, v4
+; GFX7-NEXT: v_min_u32_e32 v1, v1, v6
+; GFX7-NEXT: v_bfe_u32 v4, v4, 16, 8
+; GFX7-NEXT: v_min_u32_e32 v3, v3, v9
+; GFX7-NEXT: v_min_u32_e32 v2, v2, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v4, 8, v1
+; GFX7-NEXT: v_min_u32_e32 v3, v3, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v0, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX7-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX7-NEXT: v_bfe_u32 v2, v2, 16, 8
+; GFX7-NEXT: v_min_u32_e32 v1, v1, v3
+; GFX7-NEXT: v_min_u32_e32 v0, v0, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v0, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_umin_v16i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v17, 8
+; GFX8-NEXT: v_lshlrev_b32_sdwa v9, v17, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v8, v8, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v9, 0xff, v10
+; GFX8-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX8-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX8-NEXT: v_and_b32_e32 v9, 0xff, v11
+; GFX8-NEXT: v_lshlrev_b32_sdwa v10, v17, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v11, 0xff, v14
+; GFX8-NEXT: v_or_b32_sdwa v10, v12, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v11, 16, v11
+; GFX8-NEXT: v_or_b32_e32 v10, v10, v11
+; GFX8-NEXT: v_and_b32_e32 v11, 0xff, v15
+; GFX8-NEXT: v_lshlrev_b32_e32 v9, 24, v9
+; GFX8-NEXT: v_lshlrev_b32_e32 v11, 24, v11
+; GFX8-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX8-NEXT: v_or_b32_e32 v10, v10, v11
+; GFX8-NEXT: v_mov_b32_e32 v16, 0xff
+; GFX8-NEXT: v_lshrrev_b32_e32 v9, 8, v8
+; GFX8-NEXT: v_lshrrev_b32_e32 v11, 8, v10
+; GFX8-NEXT: v_min_u16_sdwa v0, v0, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX8-NEXT: v_min_u16_sdwa v1, v1, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_sdwa v9, v8, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX8-NEXT: v_min_u16_sdwa v3, v3, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_3
+; GFX8-NEXT: v_min_u16_sdwa v5, v5, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_sdwa v8, v10, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX8-NEXT: v_min_u16_sdwa v4, v4, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX8-NEXT: v_min_u16_sdwa v6, v6, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v17, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v4, v4, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX8-NEXT: v_min_u16_sdwa v7, v7, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_3
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v7
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX8-NEXT: v_min_u16_sdwa v2, v2, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_min_u16_sdwa v1, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_sdwa v5, v4, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX8-NEXT: v_min_u16_e32 v2, v2, v5
+; GFX8-NEXT: v_min_u16_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_min_u16_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v17, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX8-NEXT: v_or_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX8-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX8-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX8-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX8-NEXT: v_and_b32_sdwa v3, v2, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX8-NEXT: v_min_u16_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_min_u16_e32 v0, v0, v3
+; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v17, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX8-NEXT: v_min_u16_e32 v0, v0, v1
+; GFX8-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_umin_v16i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v17, 8
+; GFX9-NEXT: v_mov_b32_e32 v16, 0xff
+; GFX9-NEXT: v_lshlrev_b32_sdwa v9, v17, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v8, v8, v16, v9
+; GFX9-NEXT: v_and_b32_e32 v9, 0xff, v10
+; GFX9-NEXT: v_and_b32_e32 v10, 0xff, v11
+; GFX9-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX9-NEXT: v_lshlrev_b32_e32 v10, 24, v10
+; GFX9-NEXT: v_or3_b32 v8, v8, v9, v10
+; GFX9-NEXT: v_lshlrev_b32_sdwa v10, v17, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v10, v12, v16, v10
+; GFX9-NEXT: v_and_b32_e32 v11, 0xff, v14
+; GFX9-NEXT: v_and_b32_e32 v12, 0xff, v15
+; GFX9-NEXT: v_lshlrev_b32_e32 v11, 16, v11
+; GFX9-NEXT: v_lshlrev_b32_e32 v12, 24, v12
+; GFX9-NEXT: v_or3_b32 v10, v10, v11, v12
+; GFX9-NEXT: v_lshrrev_b32_e32 v9, 8, v8
+; GFX9-NEXT: v_lshrrev_b32_e32 v11, 8, v10
+; GFX9-NEXT: v_min_u16_sdwa v0, v0, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-NEXT: v_min_u16_sdwa v1, v1, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_sdwa v9, v8, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX9-NEXT: v_min_u16_sdwa v3, v3, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_3
+; GFX9-NEXT: v_min_u16_sdwa v5, v5, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_sdwa v8, v10, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX9-NEXT: v_min_u16_sdwa v4, v4, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-NEXT: v_min_u16_sdwa v6, v6, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_min_u16_sdwa v7, v7, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_3
+; GFX9-NEXT: v_lshlrev_b32_sdwa v5, v17, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v4, v4, v16, v5
+; GFX9-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX9-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX9-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX9-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX9-NEXT: v_min_u16_sdwa v2, v2, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_min_u16_sdwa v1, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_sdwa v5, v4, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX9-NEXT: v_min_u16_e32 v2, v2, v5
+; GFX9-NEXT: v_min_u16_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_min_u16_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_lshlrev_b32_sdwa v4, v17, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX9-NEXT: v_and_or_b32 v4, v0, v16, v4
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX9-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX9-NEXT: v_and_b32_sdwa v3, v2, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX9-NEXT: v_min_u16_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_min_u16_e32 v0, v0, v3
+; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v17, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v1, v0, v16, v1
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX9-NEXT: v_min_u16_e32 v0, v0, v1
+; GFX9-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_umin_v16i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v16, 8
+; GFX10-NEXT: v_and_b32_e32 v14, 0xff, v14
+; GFX10-NEXT: v_and_b32_e32 v15, 0xff, v15
+; GFX10-NEXT: v_and_b32_e32 v10, 0xff, v10
+; GFX10-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX10-NEXT: v_lshlrev_b32_sdwa v13, v16, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_lshlrev_b32_sdwa v9, v16, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX10-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX10-NEXT: v_and_b32_e32 v11, 0xff, v11
+; GFX10-NEXT: v_and_or_b32 v12, 0xff, v12, v13
+; GFX10-NEXT: v_lshlrev_b32_e32 v13, 16, v14
+; GFX10-NEXT: v_lshlrev_b32_e32 v14, 24, v15
+; GFX10-NEXT: v_and_or_b32 v8, 0xff, v8, v9
+; GFX10-NEXT: v_lshlrev_b32_e32 v9, 16, v10
+; GFX10-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX10-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX10-NEXT: v_or3_b32 v12, v12, v13, v14
+; GFX10-NEXT: v_mov_b32_e32 v13, 0xff
+; GFX10-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX10-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX10-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX10-NEXT: v_lshrrev_b32_e32 v10, 8, v12
+; GFX10-NEXT: v_lshrrev_b32_e32 v14, 24, v12
+; GFX10-NEXT: v_and_b32_sdwa v15, v12, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX10-NEXT: v_and_b32_e32 v12, 0xff, v12
+; GFX10-NEXT: v_and_b32_e32 v10, 0xff, v10
+; GFX10-NEXT: v_min_u16 v7, v7, v14
+; GFX10-NEXT: v_min_u16 v6, v6, v15
+; GFX10-NEXT: v_min_u16 v4, v4, v12
+; GFX10-NEXT: v_min_u16 v5, v5, v10
+; GFX10-NEXT: v_lshlrev_b32_e32 v10, 24, v11
+; GFX10-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX10-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v16, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_or3_b32 v8, v8, v9, v10
+; GFX10-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v7
+; GFX10-NEXT: v_lshrrev_b32_e32 v7, 8, v8
+; GFX10-NEXT: v_lshrrev_b32_e32 v9, 24, v8
+; GFX10-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX10-NEXT: v_and_b32_e32 v5, 0xff, v7
+; GFX10-NEXT: v_and_b32_sdwa v6, v8, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX10-NEXT: v_and_b32_e32 v8, 0xff, v8
+; GFX10-NEXT: v_min_u16 v3, v3, v9
+; GFX10-NEXT: v_lshrrev_b32_e32 v7, 8, v4
+; GFX10-NEXT: v_min_u16 v1, v1, v5
+; GFX10-NEXT: v_min_u16 v2, v2, v6
+; GFX10-NEXT: v_and_b32_sdwa v6, v4, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX10-NEXT: v_min_u16 v0, v0, v8
+; GFX10-NEXT: v_and_b32_e32 v5, 0xff, v7
+; GFX10-NEXT: v_lshrrev_b32_e32 v7, 24, v4
+; GFX10-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX10-NEXT: v_min_u16 v2, v2, v6
+; GFX10-NEXT: v_min_u16 v1, v1, v5
+; GFX10-NEXT: v_min_u16 v3, v3, v7
+; GFX10-NEXT: v_min_u16 v0, v0, v4
+; GFX10-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v16, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX10-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX10-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX10-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX10-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX10-NEXT: v_and_b32_sdwa v2, v2, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX10-NEXT: v_min_u16 v1, v1, v3
+; GFX10-NEXT: v_min_u16 v0, v0, v2
+; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v16, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX10-NEXT: v_min_u16 v0, v0, v1
+; GFX10-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_umin_v16i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v13, 0xff, v13
+; GFX11-NEXT: v_and_b32_e32 v14, 0xff, v14
+; GFX11-NEXT: v_and_b32_e32 v15, 0xff, v15
+; GFX11-NEXT: v_and_b32_e32 v9, 0xff, v9
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX11-NEXT: v_lshlrev_b32_e32 v13, 8, v13
+; GFX11-NEXT: v_and_b32_e32 v10, 0xff, v10
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX11-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX11-NEXT: v_and_or_b32 v12, 0xff, v12, v13
+; GFX11-NEXT: v_lshlrev_b32_e32 v13, 16, v14
+; GFX11-NEXT: v_lshlrev_b32_e32 v14, 24, v15
+; GFX11-NEXT: v_and_or_b32 v8, 0xff, v8, v9
+; GFX11-NEXT: v_and_b32_e32 v9, 0xff, v11
+; GFX11-NEXT: v_lshlrev_b32_e32 v10, 16, v10
+; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-NEXT: v_or3_b32 v12, v12, v13, v14
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-NEXT: v_lshlrev_b32_e32 v9, 24, v9
+; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v13, 8, v12
+; GFX11-NEXT: v_lshrrev_b32_e32 v14, 24, v12
+; GFX11-NEXT: v_or3_b32 v8, v8, v10, v9
+; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_and_b32_e32 v11, 0xff, v13
+; GFX11-NEXT: v_lshrrev_b32_e32 v13, 16, v12
+; GFX11-NEXT: v_and_b32_e32 v12, 0xff, v12
+; GFX11-NEXT: v_min_u16 v7, v7, v14
+; GFX11-NEXT: v_lshrrev_b32_e32 v9, 8, v8
+; GFX11-NEXT: v_min_u16 v5, v5, v11
+; GFX11-NEXT: v_and_b32_e32 v11, 0xff, v13
+; GFX11-NEXT: v_min_u16 v4, v4, v12
+; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX11-NEXT: v_min_u16 v6, v6, v11
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 24, v7
+; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v9
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX11-NEXT: v_lshrrev_b32_e32 v5, 16, v8
+; GFX11-NEXT: v_min_u16 v1, v1, v7
+; GFX11-NEXT: v_lshrrev_b32_e32 v6, 24, v8
+; GFX11-NEXT: v_and_b32_e32 v8, 0xff, v8
+; GFX11-NEXT: v_lshrrev_b32_e32 v7, 8, v4
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX11-NEXT: v_lshrrev_b32_e32 v9, 16, v4
+; GFX11-NEXT: v_min_u16 v3, v3, v6
+; GFX11-NEXT: v_lshrrev_b32_e32 v6, 24, v4
+; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX11-NEXT: v_min_u16 v2, v2, v5
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v9
+; GFX11-NEXT: v_min_u16 v0, v0, v8
+; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-NEXT: v_min_u16 v1, v1, v7
+; GFX11-NEXT: v_min_u16 v3, v3, v6
+; GFX11-NEXT: v_min_u16 v2, v2, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_min_u16 v0, v0, v4
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 8, v5
+; GFX11-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_min_u16 v1, v1, v3
+; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-NEXT: v_min_u16 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: v_min_u16 v0, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_umin_v16i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v13, 0xff, v13
+; GFX12-NEXT: v_and_b32_e32 v14, 0xff, v14
+; GFX12-NEXT: v_and_b32_e32 v15, 0xff, v15
+; GFX12-NEXT: v_and_b32_e32 v9, 0xff, v9
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX12-NEXT: v_lshlrev_b32_e32 v13, 8, v13
+; GFX12-NEXT: v_and_b32_e32 v10, 0xff, v10
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX12-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX12-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX12-NEXT: v_and_or_b32 v12, 0xff, v12, v13
+; GFX12-NEXT: v_lshlrev_b32_e32 v13, 16, v14
+; GFX12-NEXT: v_lshlrev_b32_e32 v14, 24, v15
+; GFX12-NEXT: v_and_or_b32 v8, 0xff, v8, v9
+; GFX12-NEXT: v_and_b32_e32 v9, 0xff, v11
+; GFX12-NEXT: v_lshlrev_b32_e32 v10, 16, v10
+; GFX12-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX12-NEXT: v_or3_b32 v12, v12, v13, v14
+; GFX12-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX12-NEXT: v_lshlrev_b32_e32 v9, 24, v9
+; GFX12-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX12-NEXT: v_lshrrev_b32_e32 v13, 8, v12
+; GFX12-NEXT: v_lshrrev_b32_e32 v14, 24, v12
+; GFX12-NEXT: v_or3_b32 v8, v8, v10, v9
+; GFX12-NEXT: v_and_b32_e32 v0, 0xff, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX12-NEXT: v_and_b32_e32 v11, 0xff, v13
+; GFX12-NEXT: v_lshrrev_b32_e32 v13, 16, v12
+; GFX12-NEXT: v_and_b32_e32 v12, 0xff, v12
+; GFX12-NEXT: v_min_u16 v7, v7, v14
+; GFX12-NEXT: v_lshrrev_b32_e32 v9, 8, v8
+; GFX12-NEXT: v_min_u16 v5, v5, v11
+; GFX12-NEXT: v_and_b32_e32 v11, 0xff, v13
+; GFX12-NEXT: v_min_u16 v4, v4, v12
+; GFX12-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX12-NEXT: v_min_u16 v6, v6, v11
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX12-NEXT: v_lshlrev_b32_e32 v6, 24, v7
+; GFX12-NEXT: v_and_b32_e32 v7, 0xff, v9
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v8
+; GFX12-NEXT: v_min_u16 v1, v1, v7
+; GFX12-NEXT: v_lshrrev_b32_e32 v6, 24, v8
+; GFX12-NEXT: v_and_b32_e32 v8, 0xff, v8
+; GFX12-NEXT: v_lshrrev_b32_e32 v7, 8, v4
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX12-NEXT: v_lshrrev_b32_e32 v9, 16, v4
+; GFX12-NEXT: v_min_u16 v3, v3, v6
+; GFX12-NEXT: v_lshrrev_b32_e32 v6, 24, v4
+; GFX12-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX12-NEXT: v_min_u16 v2, v2, v5
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v9
+; GFX12-NEXT: v_min_u16 v0, v0, v8
+; GFX12-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX12-NEXT: v_min_u16 v1, v1, v7
+; GFX12-NEXT: v_min_u16 v3, v3, v6
+; GFX12-NEXT: v_min_u16 v2, v2, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_min_u16 v0, v0, v4
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX12-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v4, 8, v5
+; GFX12-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX12-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_min_u16 v1, v1, v3
+; GFX12-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX12-NEXT: v_min_u16 v0, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: v_min_u16 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.umin.v16i8(<16 x i8> %v)
+ ret i8 %res
+}
+
+define i16 @test_vector_reduce_umin_v2i16(<2 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_umin_v2i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v0
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_umin_v2i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v2, s4
+; GFX8-NEXT: v_min_u16_sdwa v1, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_min_u16_sdwa v0, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_umin_v2i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX9-NEXT: v_pk_min_u16 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_umin_v2i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX10-NEXT: v_pk_min_u16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_umin_v2i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_pk_min_u16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_umin_v2i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_pk_min_u16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.umin.v2i16(<2 x i16> %v)
+ ret i16 %res
+}
+
+define i16 @test_vector_reduce_umin_v3i16(<3 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_umin_v3i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v2
+; GFX7-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_umin_v3i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_min_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_min_u16_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_umin_v3i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_min_u16_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_umin_v3i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX10-NEXT: v_min_u16 v0, v0, v2
+; GFX10-NEXT: v_min_u16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_umin_v3i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_min_u16 v0, v0, v2
+; GFX11-NEXT: v_min_u16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_umin_v3i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_min_u16 v0, v0, v2
+; GFX12-NEXT: v_min_u16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.umin.v3i16(<3 x i16> %v)
+ ret i16 %res
+}
+
+define i16 @test_vector_reduce_umin_v4i16(<4 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_umin_v4i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-NEXT: v_or_b32_e32 v2, v3, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-NEXT: v_min_u32_e32 v1, v1, v3
+; GFX7-NEXT: v_min_u32_e32 v0, v0, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v0, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_umin_v4i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_min_u16_e32 v2, v0, v1
+; GFX8-NEXT: v_min_u16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v0
+; GFX8-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX8-NEXT: v_min_u16_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_mov_b32_e32 v2, s4
+; GFX8-NEXT: v_min_u16_sdwa v0, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_umin_v4i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_min_u16 v0, v0, v1
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX9-NEXT: v_pk_min_u16 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_umin_v4i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_pk_min_u16 v0, v0, v1
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX10-NEXT: v_pk_min_u16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_umin_v4i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_pk_min_u16 v0, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX11-NEXT: v_pk_min_u16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_umin_v4i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_min_u16 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX12-NEXT: v_pk_min_u16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.umin.v4i16(<4 x i16> %v)
+ ret i16 %res
+}
+
+define i16 @test_vector_reduce_umin_v8i16(<8 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_umin_v8i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v7
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX7-NEXT: v_or_b32_e32 v5, v5, v6
+; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX7-NEXT: v_lshrrev_b32_e32 v7, 16, v5
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX7-NEXT: v_min_u32_e32 v0, v0, v4
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff, v5
+; GFX7-NEXT: v_min_u32_e32 v3, v3, v7
+; GFX7-NEXT: v_min_u32_e32 v2, v2, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_min_u32_e32 v1, v1, v6
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-NEXT: v_min_u32_e32 v1, v1, v3
+; GFX7-NEXT: v_min_u32_e32 v0, v0, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v0, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_umin_v8i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_min_u16_e32 v4, v0, v2
+; GFX8-NEXT: v_min_u16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_min_u16_e32 v2, v1, v3
+; GFX8-NEXT: v_min_u16_sdwa v1, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX8-NEXT: v_min_u16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_min_u16_e32 v2, v4, v1
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v0
+; GFX8-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX8-NEXT: v_min_u16_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_mov_b32_e32 v2, s4
+; GFX8-NEXT: v_min_u16_sdwa v0, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_umin_v8i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_min_u16 v0, v0, v2
+; GFX9-NEXT: v_pk_min_u16 v1, v1, v3
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_pk_min_u16 v0, v0, v1
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX9-NEXT: v_pk_min_u16 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_umin_v8i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_pk_min_u16 v0, v0, v2
+; GFX10-NEXT: v_pk_min_u16 v1, v1, v3
+; GFX10-NEXT: v_pk_min_u16 v0, v0, v1
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX10-NEXT: v_pk_min_u16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_umin_v8i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_pk_min_u16 v0, v0, v2
+; GFX11-NEXT: v_pk_min_u16 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_pk_min_u16 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_pk_min_u16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_umin_v8i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_min_u16 v0, v0, v2
+; GFX12-NEXT: v_pk_min_u16 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_pk_min_u16 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_pk_min_u16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.umin.v8i16(<8 x i16> %v)
+ ret i16 %res
+}
+
+define i16 @test_vector_reduce_umin_v16i16(<16 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_umin_v16i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX7-NEXT: v_and_b32_e32 v8, 0xffff, v8
+; GFX7-NEXT: v_or_b32_e32 v8, v9, v8
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v11
+; GFX7-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX7-NEXT: v_or_b32_e32 v9, v9, v10
+; GFX7-NEXT: v_lshlrev_b32_e32 v10, 16, v13
+; GFX7-NEXT: v_and_b32_e32 v11, 0xffff, v12
+; GFX7-NEXT: v_or_b32_e32 v10, v10, v11
+; GFX7-NEXT: v_lshlrev_b32_e32 v11, 16, v15
+; GFX7-NEXT: v_and_b32_e32 v12, 0xffff, v14
+; GFX7-NEXT: v_or_b32_e32 v11, v11, v12
+; GFX7-NEXT: v_lshrrev_b32_e32 v12, 16, v8
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_and_b32_e32 v8, 0xffff, v8
+; GFX7-NEXT: v_lshrrev_b32_e32 v14, 16, v10
+; GFX7-NEXT: v_min_u32_e32 v0, v0, v8
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-NEXT: v_and_b32_e32 v8, 0xffff, v9
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX7-NEXT: v_lshrrev_b32_e32 v15, 16, v11
+; GFX7-NEXT: v_min_u32_e32 v2, v2, v8
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX7-NEXT: v_and_b32_e32 v8, 0xffff, v10
+; GFX7-NEXT: v_min_u32_e32 v5, v5, v14
+; GFX7-NEXT: v_and_b32_e32 v7, 0xffff, v7
+; GFX7-NEXT: v_min_u32_e32 v4, v4, v8
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX7-NEXT: v_and_b32_e32 v8, 0xffff, v11
+; GFX7-NEXT: v_min_u32_e32 v7, v7, v15
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_min_u32_e32 v6, v6, v8
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v7
+; GFX7-NEXT: v_lshrrev_b32_e32 v13, 16, v9
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX7-NEXT: v_or_b32_e32 v5, v6, v5
+; GFX7-NEXT: v_min_u32_e32 v3, v3, v13
+; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX7-NEXT: v_lshrrev_b32_e32 v7, 16, v5
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX7-NEXT: v_min_u32_e32 v0, v0, v4
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff, v5
+; GFX7-NEXT: v_min_u32_e32 v3, v3, v7
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: v_min_u32_e32 v2, v2, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_min_u32_e32 v1, v1, v12
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_min_u32_e32 v1, v1, v6
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-NEXT: v_min_u32_e32 v1, v1, v3
+; GFX7-NEXT: v_min_u32_e32 v0, v0, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v0, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_umin_v16i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_min_u16_e32 v8, v0, v4
+; GFX8-NEXT: v_min_u16_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_min_u16_e32 v4, v1, v5
+; GFX8-NEXT: v_min_u16_sdwa v1, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_min_u16_e32 v5, v2, v6
+; GFX8-NEXT: v_min_u16_sdwa v2, v2, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v2, v5, v2
+; GFX8-NEXT: v_min_u16_e32 v5, v3, v7
+; GFX8-NEXT: v_min_u16_sdwa v3, v3, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: v_min_u16_e32 v5, v8, v2
+; GFX8-NEXT: v_min_u16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_min_u16_e32 v2, v4, v3
+; GFX8-NEXT: v_min_u16_sdwa v1, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX8-NEXT: v_min_u16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_min_u16_e32 v2, v5, v1
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v0
+; GFX8-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX8-NEXT: v_min_u16_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_mov_b32_e32 v2, s4
+; GFX8-NEXT: v_min_u16_sdwa v0, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_umin_v16i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_min_u16 v0, v0, v4
+; GFX9-NEXT: v_pk_min_u16 v1, v1, v5
+; GFX9-NEXT: v_pk_min_u16 v2, v2, v6
+; GFX9-NEXT: v_pk_min_u16 v3, v3, v7
+; GFX9-NEXT: v_pk_min_u16 v0, v0, v2
+; GFX9-NEXT: v_pk_min_u16 v1, v1, v3
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_pk_min_u16 v0, v0, v1
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX9-NEXT: v_pk_min_u16 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_umin_v16i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_pk_min_u16 v0, v0, v4
+; GFX10-NEXT: v_pk_min_u16 v1, v1, v5
+; GFX10-NEXT: v_pk_min_u16 v2, v2, v6
+; GFX10-NEXT: v_pk_min_u16 v3, v3, v7
+; GFX10-NEXT: v_pk_min_u16 v0, v0, v2
+; GFX10-NEXT: v_pk_min_u16 v1, v1, v3
+; GFX10-NEXT: v_pk_min_u16 v0, v0, v1
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX10-NEXT: v_pk_min_u16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_umin_v16i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_pk_min_u16 v0, v0, v4
+; GFX11-NEXT: v_pk_min_u16 v1, v1, v5
+; GFX11-NEXT: v_pk_min_u16 v2, v2, v6
+; GFX11-NEXT: v_pk_min_u16 v3, v3, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_pk_min_u16 v0, v0, v2
+; GFX11-NEXT: v_pk_min_u16 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_pk_min_u16 v0, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_pk_min_u16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_umin_v16i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_min_u16 v0, v0, v4
+; GFX12-NEXT: v_pk_min_u16 v1, v1, v5
+; GFX12-NEXT: v_pk_min_u16 v2, v2, v6
+; GFX12-NEXT: v_pk_min_u16 v3, v3, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_pk_min_u16 v0, v0, v2
+; GFX12-NEXT: v_pk_min_u16 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_pk_min_u16 v0, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_pk_min_u16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.umin.v16i16(<16 x i16> %v)
+ ret i16 %res
+}
+
+
+define i32 @test_vector_reduce_umin_v2i32(<2 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_umin_v2i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_umin_v2i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_umin_v2i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_umin_v2i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_umin_v2i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_umin_v2i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.umin.v2i32(<2 x i32> %v)
+ ret i32 %res
+}
+
+define i32 @test_vector_reduce_umin_v3i32(<3 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_umin_v3i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX7-NEXT: v_min_u32_e32 v0, v0, v2
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_umin_v3i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX8-NEXT: v_min_u32_e32 v0, v0, v2
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_umin_v3i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX9-NEXT: v_min_u32_e32 v0, v0, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_umin_v3i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX10-NEXT: v_min_u32_e32 v0, v0, v2
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_umin_v3i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_min_u32_e32 v0, v0, v2
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_umin_v3i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_min_u32_e32 v0, v0, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.umin.v3i32(<3 x i32> %v)
+ ret i32 %res
+}
+
+define i32 @test_vector_reduce_umin_v4i32(<4 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_umin_v4i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_min_u32_e32 v0, v0, v2
+; GFX7-NEXT: v_min_u32_e32 v1, v1, v3
+; GFX7-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_umin_v4i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_min_u32_e32 v0, v0, v2
+; GFX8-NEXT: v_min_u32_e32 v1, v1, v3
+; GFX8-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_umin_v4i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_u32_e32 v0, v0, v2
+; GFX9-NEXT: v_min_u32_e32 v1, v1, v3
+; GFX9-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_umin_v4i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_min_u32_e32 v0, v0, v2
+; GFX10-NEXT: v_min_u32_e32 v1, v1, v3
+; GFX10-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_umin_v4i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_min_u32_e32 v0, v0, v2
+; GFX11-NEXT: v_min_u32_e32 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_umin_v4i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_min_u32_e32 v0, v0, v2
+; GFX12-NEXT: v_min_u32_e32 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> %v)
+ ret i32 %res
+}
+
+define i32 @test_vector_reduce_umin_v8i32(<8 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_umin_v8i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_min_u32_e32 v0, v0, v4
+; GFX7-NEXT: v_min_u32_e32 v1, v1, v5
+; GFX7-NEXT: v_min_u32_e32 v2, v2, v6
+; GFX7-NEXT: v_min_u32_e32 v3, v3, v7
+; GFX7-NEXT: v_min_u32_e32 v0, v0, v2
+; GFX7-NEXT: v_min_u32_e32 v1, v1, v3
+; GFX7-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_umin_v8i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_min_u32_e32 v0, v0, v4
+; GFX8-NEXT: v_min_u32_e32 v1, v1, v5
+; GFX8-NEXT: v_min_u32_e32 v2, v2, v6
+; GFX8-NEXT: v_min_u32_e32 v3, v3, v7
+; GFX8-NEXT: v_min_u32_e32 v0, v0, v2
+; GFX8-NEXT: v_min_u32_e32 v1, v1, v3
+; GFX8-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_umin_v8i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_u32_e32 v0, v0, v4
+; GFX9-NEXT: v_min_u32_e32 v1, v1, v5
+; GFX9-NEXT: v_min_u32_e32 v2, v2, v6
+; GFX9-NEXT: v_min_u32_e32 v3, v3, v7
+; GFX9-NEXT: v_min_u32_e32 v0, v0, v2
+; GFX9-NEXT: v_min_u32_e32 v1, v1, v3
+; GFX9-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_umin_v8i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_min_u32_e32 v0, v0, v4
+; GFX10-NEXT: v_min_u32_e32 v1, v1, v5
+; GFX10-NEXT: v_min_u32_e32 v2, v2, v6
+; GFX10-NEXT: v_min_u32_e32 v3, v3, v7
+; GFX10-NEXT: v_min_u32_e32 v0, v0, v2
+; GFX10-NEXT: v_min_u32_e32 v1, v1, v3
+; GFX10-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_umin_v8i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_min_u32_e32 v0, v0, v4
+; GFX11-NEXT: v_min_u32_e32 v1, v1, v5
+; GFX11-NEXT: v_min_u32_e32 v2, v2, v6
+; GFX11-NEXT: v_min_u32_e32 v3, v3, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_min_u32_e32 v0, v0, v2
+; GFX11-NEXT: v_min_u32_e32 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_umin_v8i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_min_u32_e32 v0, v0, v4
+; GFX12-NEXT: v_min_u32_e32 v1, v1, v5
+; GFX12-NEXT: v_min_u32_e32 v2, v2, v6
+; GFX12-NEXT: v_min_u32_e32 v3, v3, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_min_u32_e32 v0, v0, v2
+; GFX12-NEXT: v_min_u32_e32 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.umin.v8i32(<8 x i32> %v)
+ ret i32 %res
+}
+
+define i32 @test_vector_reduce_umin_v16i32(<16 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_umin_v16i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_min_u32_e32 v0, v0, v8
+; GFX7-NEXT: v_min_u32_e32 v1, v1, v9
+; GFX7-NEXT: v_min_u32_e32 v2, v2, v10
+; GFX7-NEXT: v_min_u32_e32 v3, v3, v11
+; GFX7-NEXT: v_min_u32_e32 v4, v4, v12
+; GFX7-NEXT: v_min_u32_e32 v5, v5, v13
+; GFX7-NEXT: v_min_u32_e32 v6, v6, v14
+; GFX7-NEXT: v_min_u32_e32 v7, v7, v15
+; GFX7-NEXT: v_min_u32_e32 v0, v0, v4
+; GFX7-NEXT: v_min_u32_e32 v1, v1, v5
+; GFX7-NEXT: v_min_u32_e32 v2, v2, v6
+; GFX7-NEXT: v_min_u32_e32 v3, v3, v7
+; GFX7-NEXT: v_min_u32_e32 v0, v0, v2
+; GFX7-NEXT: v_min_u32_e32 v1, v1, v3
+; GFX7-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_umin_v16i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_min_u32_e32 v0, v0, v8
+; GFX8-NEXT: v_min_u32_e32 v1, v1, v9
+; GFX8-NEXT: v_min_u32_e32 v2, v2, v10
+; GFX8-NEXT: v_min_u32_e32 v3, v3, v11
+; GFX8-NEXT: v_min_u32_e32 v4, v4, v12
+; GFX8-NEXT: v_min_u32_e32 v5, v5, v13
+; GFX8-NEXT: v_min_u32_e32 v6, v6, v14
+; GFX8-NEXT: v_min_u32_e32 v7, v7, v15
+; GFX8-NEXT: v_min_u32_e32 v0, v0, v4
+; GFX8-NEXT: v_min_u32_e32 v1, v1, v5
+; GFX8-NEXT: v_min_u32_e32 v2, v2, v6
+; GFX8-NEXT: v_min_u32_e32 v3, v3, v7
+; GFX8-NEXT: v_min_u32_e32 v0, v0, v2
+; GFX8-NEXT: v_min_u32_e32 v1, v1, v3
+; GFX8-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_umin_v16i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_u32_e32 v0, v0, v8
+; GFX9-NEXT: v_min_u32_e32 v1, v1, v9
+; GFX9-NEXT: v_min_u32_e32 v2, v2, v10
+; GFX9-NEXT: v_min_u32_e32 v3, v3, v11
+; GFX9-NEXT: v_min_u32_e32 v4, v4, v12
+; GFX9-NEXT: v_min_u32_e32 v5, v5, v13
+; GFX9-NEXT: v_min_u32_e32 v6, v6, v14
+; GFX9-NEXT: v_min_u32_e32 v7, v7, v15
+; GFX9-NEXT: v_min_u32_e32 v0, v0, v4
+; GFX9-NEXT: v_min_u32_e32 v1, v1, v5
+; GFX9-NEXT: v_min_u32_e32 v2, v2, v6
+; GFX9-NEXT: v_min_u32_e32 v3, v3, v7
+; GFX9-NEXT: v_min_u32_e32 v0, v0, v2
+; GFX9-NEXT: v_min_u32_e32 v1, v1, v3
+; GFX9-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_umin_v16i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_min_u32_e32 v0, v0, v8
+; GFX10-NEXT: v_min_u32_e32 v1, v1, v9
+; GFX10-NEXT: v_min_u32_e32 v2, v2, v10
+; GFX10-NEXT: v_min_u32_e32 v3, v3, v11
+; GFX10-NEXT: v_min_u32_e32 v4, v4, v12
+; GFX10-NEXT: v_min_u32_e32 v5, v5, v13
+; GFX10-NEXT: v_min_u32_e32 v6, v6, v14
+; GFX10-NEXT: v_min_u32_e32 v7, v7, v15
+; GFX10-NEXT: v_min_u32_e32 v0, v0, v4
+; GFX10-NEXT: v_min_u32_e32 v1, v1, v5
+; GFX10-NEXT: v_min_u32_e32 v2, v2, v6
+; GFX10-NEXT: v_min_u32_e32 v3, v3, v7
+; GFX10-NEXT: v_min_u32_e32 v0, v0, v2
+; GFX10-NEXT: v_min_u32_e32 v1, v1, v3
+; GFX10-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_umin_v16i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_min_u32_e32 v0, v0, v8
+; GFX11-NEXT: v_min_u32_e32 v1, v1, v9
+; GFX11-NEXT: v_min_u32_e32 v2, v2, v10
+; GFX11-NEXT: v_min_u32_e32 v3, v3, v11
+; GFX11-NEXT: v_min_u32_e32 v4, v4, v12
+; GFX11-NEXT: v_min_u32_e32 v5, v5, v13
+; GFX11-NEXT: v_min_u32_e32 v6, v6, v14
+; GFX11-NEXT: v_min_u32_e32 v7, v7, v15
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_min_u32_e32 v0, v0, v4
+; GFX11-NEXT: v_min_u32_e32 v1, v1, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_min_u32_e32 v2, v2, v6
+; GFX11-NEXT: v_min_u32_e32 v3, v3, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_min_u32_e32 v0, v0, v2
+; GFX11-NEXT: v_min_u32_e32 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_umin_v16i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_min_u32_e32 v0, v0, v8
+; GFX12-NEXT: v_min_u32_e32 v1, v1, v9
+; GFX12-NEXT: v_min_u32_e32 v2, v2, v10
+; GFX12-NEXT: v_min_u32_e32 v3, v3, v11
+; GFX12-NEXT: v_min_u32_e32 v4, v4, v12
+; GFX12-NEXT: v_min_u32_e32 v5, v5, v13
+; GFX12-NEXT: v_min_u32_e32 v6, v6, v14
+; GFX12-NEXT: v_min_u32_e32 v7, v7, v15
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_min_u32_e32 v0, v0, v4
+; GFX12-NEXT: v_min_u32_e32 v1, v1, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_min_u32_e32 v2, v2, v6
+; GFX12-NEXT: v_min_u32_e32 v3, v3, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_min_u32_e32 v0, v0, v2
+; GFX12-NEXT: v_min_u32_e32 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.umin.v16i32(<16 x i32> %v)
+ ret i32 %res
+}
+
+define i64 @test_vector_reduce_umin_v2i64(<2 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_umin_v2i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_umin_v2i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_umin_v2i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_umin_v2i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_umin_v2i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_umin_v2i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.umin.v2i64(<2 x i64> %v)
+ ret i64 %res
+}
+
+define i64 @test_vector_reduce_umin_v3i64(<3 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_umin_v3i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX7-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_umin_v3i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_umin_v3i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[4:5]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_umin_v3i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
+; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_umin_v3i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_umin_v3i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.umin.v3i64(<3 x i64> %v)
+ ret i64 %res
+}
+
+define i64 @test_vector_reduce_umin_v4i64(<4 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_umin_v4i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX7-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[6:7]
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX7-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_umin_v4i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[6:7]
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_umin_v4i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[4:5]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[6:7]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_umin_v4i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX10-NEXT: v_cmp_lt_u64_e64 s4, v[2:3], v[6:7]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v6, v2, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v7, v3, s4
+; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_umin_v4i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX11-NEXT: v_cmp_lt_u64_e64 s0, v[2:3], v[6:7]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0
+; GFX11-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_umin_v4i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX12-NEXT: v_cmp_lt_u64_e64 s0, v[2:3], v[6:7]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0
+; GFX12-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.umin.v4i64(<4 x i64> %v)
+ ret i64 %res
+}
+
+define i64 @test_vector_reduce_umin_v8i64(<8 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_umin_v8i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[8:9]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
+; GFX7-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[10:11]
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v10, v2, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v11, v3, vcc
+; GFX7-NEXT: v_cmp_lt_u64_e32 vcc, v[4:5], v[12:13]
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v12, v4, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v13, v5, vcc
+; GFX7-NEXT: v_cmp_lt_u64_e32 vcc, v[6:7], v[14:15]
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v14, v6, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v15, v7, vcc
+; GFX7-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX7-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[6:7]
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX7-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_umin_v8i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[8:9]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[10:11]
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v10, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v11, v3, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[4:5], v[12:13]
+; GFX8-NEXT: v_cndmask_b32_e32 v4, v12, v4, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v13, v5, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[6:7], v[14:15]
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v14, v6, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v15, v7, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[6:7]
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_umin_v8i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[8:9]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[10:11]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v10, v2, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v11, v3, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[4:5], v[12:13]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v12, v4, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v13, v5, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[6:7], v[14:15]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v14, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v7, v15, v7, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[4:5]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[6:7]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_umin_v8i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX10-NEXT: v_cmp_lt_u64_e64 s4, v[2:3], v[10:11]
+; GFX10-NEXT: v_cmp_lt_u64_e64 s5, v[4:5], v[12:13]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc_lo
+; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[6:7], v[14:15]
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v10, v2, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v11, v3, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v4, v12, v4, s5
+; GFX10-NEXT: v_cndmask_b32_e64 v5, v13, v5, s5
+; GFX10-NEXT: v_cndmask_b32_e32 v6, v14, v6, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v7, v15, v7, vcc_lo
+; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX10-NEXT: v_cmp_lt_u64_e64 s4, v[2:3], v[6:7]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v6, v2, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v7, v3, s4
+; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_umin_v8i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX11-NEXT: v_cmp_lt_u64_e64 s0, v[2:3], v[10:11]
+; GFX11-NEXT: v_cmp_lt_u64_e64 s1, v[4:5], v[12:13]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v8, v0 :: v_dual_cndmask_b32 v1, v9, v1
+; GFX11-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[6:7], v[14:15]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v10, v2, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v11, v3, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v4, v12, v4, s1
+; GFX11-NEXT: v_cndmask_b32_e64 v5, v13, v5, s1
+; GFX11-NEXT: v_dual_cndmask_b32 v6, v14, v6 :: v_dual_cndmask_b32 v7, v15, v7
+; GFX11-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_cmp_lt_u64_e64 s0, v[2:3], v[6:7]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_umin_v8i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX12-NEXT: v_cmp_lt_u64_e64 s0, v[2:3], v[10:11]
+; GFX12-NEXT: v_cmp_lt_u64_e64 s1, v[4:5], v[12:13]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v8, v0 :: v_dual_cndmask_b32 v1, v9, v1
+; GFX12-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[6:7], v[14:15]
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: v_cndmask_b32_e64 v2, v10, v2, s0
+; GFX12-NEXT: v_cndmask_b32_e64 v3, v11, v3, s0
+; GFX12-NEXT: v_cndmask_b32_e64 v4, v12, v4, s1
+; GFX12-NEXT: v_cndmask_b32_e64 v5, v13, v5, s1
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v6, v14, v6 :: v_dual_cndmask_b32 v7, v15, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX12-NEXT: v_cmp_lt_u64_e64 s0, v[2:3], v[6:7]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0
+; GFX12-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.umin.v8i64(<8 x i64> %v)
+ ret i64 %res
+}
+
+define i64 @test_vector_reduce_umin_v16i64(<16 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_umin_v16i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX7-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[16:17]
+; GFX7-NEXT: v_cmp_lt_u64_e64 s[4:5], v[10:11], v[26:27]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v16, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v17, v1, vcc
+; GFX7-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[18:19]
+; GFX7-NEXT: v_cndmask_b32_e64 v10, v26, v10, s[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v18, v2, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v19, v3, vcc
+; GFX7-NEXT: v_cmp_lt_u64_e32 vcc, v[4:5], v[20:21]
+; GFX7-NEXT: v_cndmask_b32_e64 v11, v27, v11, s[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v20, v4, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v21, v5, vcc
+; GFX7-NEXT: v_cmp_lt_u64_e32 vcc, v[6:7], v[22:23]
+; GFX7-NEXT: v_cmp_lt_u64_e64 s[4:5], v[2:3], v[10:11]
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v22, v6, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v23, v7, vcc
+; GFX7-NEXT: v_cmp_lt_u64_e32 vcc, v[8:9], v[24:25]
+; GFX7-NEXT: v_cndmask_b32_e64 v2, v10, v2, s[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v8, v24, v8, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v9, v25, v9, vcc
+; GFX7-NEXT: v_cmp_lt_u64_e32 vcc, v[12:13], v[28:29]
+; GFX7-NEXT: v_cndmask_b32_e64 v3, v11, v3, s[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v12, v28, v12, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v13, v29, v13, vcc
+; GFX7-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[8:9]
+; GFX7-NEXT: v_cmp_lt_u64_e64 s[6:7], v[4:5], v[12:13]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e64 v4, v12, v4, s[6:7]
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
+; GFX7-NEXT: v_cndmask_b32_e64 v5, v13, v5, s[6:7]
+; GFX7-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_cmp_lt_u64_e32 vcc, v[14:15], v[30:31]
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v30, v14, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v31, v15, vcc
+; GFX7-NEXT: v_cmp_lt_u64_e32 vcc, v[6:7], v[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc
+; GFX7-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX7-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_umin_v16i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[16:17]
+; GFX8-NEXT: v_cmp_lt_u64_e64 s[4:5], v[10:11], v[26:27]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v16, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v17, v1, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[18:19]
+; GFX8-NEXT: v_cndmask_b32_e64 v10, v26, v10, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v18, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v19, v3, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[4:5], v[20:21]
+; GFX8-NEXT: v_cndmask_b32_e64 v11, v27, v11, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v4, v20, v4, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v21, v5, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[6:7], v[22:23]
+; GFX8-NEXT: v_cmp_lt_u64_e64 s[4:5], v[2:3], v[10:11]
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v22, v6, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v23, v7, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[8:9], v[24:25]
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v10, v2, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v8, v24, v8, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v9, v25, v9, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[12:13], v[28:29]
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v11, v3, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v12, v28, v12, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v13, v29, v13, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[8:9]
+; GFX8-NEXT: v_cmp_lt_u64_e64 s[6:7], v[4:5], v[12:13]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v4, v12, v4, s[6:7]
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v5, v13, v5, s[6:7]
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[14:15], v[30:31]
+; GFX8-NEXT: v_cndmask_b32_e32 v4, v30, v14, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v31, v15, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[6:7], v[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_umin_v16i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: scratch_load_dword v31, off, s32
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[16:17]
+; GFX9-NEXT: v_cmp_lt_u64_e64 s[2:3], v[4:5], v[20:21]
+; GFX9-NEXT: v_cmp_lt_u64_e64 s[6:7], v[8:9], v[24:25]
+; GFX9-NEXT: v_cmp_lt_u64_e64 s[10:11], v[12:13], v[28:29]
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v16, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v4, v20, v4, s[2:3]
+; GFX9-NEXT: v_cndmask_b32_e64 v8, v24, v8, s[6:7]
+; GFX9-NEXT: v_cndmask_b32_e64 v12, v28, v12, s[10:11]
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v17, v1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v5, v21, v5, s[2:3]
+; GFX9-NEXT: v_cndmask_b32_e64 v9, v25, v9, s[6:7]
+; GFX9-NEXT: v_cndmask_b32_e64 v13, v29, v13, s[10:11]
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[8:9]
+; GFX9-NEXT: v_cmp_lt_u64_e64 s[2:3], v[4:5], v[12:13]
+; GFX9-NEXT: v_cmp_lt_u64_e64 s[0:1], v[2:3], v[18:19]
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v4, v12, v4, s[2:3]
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v5, v13, v5, s[2:3]
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[4:5]
+; GFX9-NEXT: v_cmp_lt_u64_e64 s[4:5], v[6:7], v[22:23]
+; GFX9-NEXT: v_cmp_lt_u64_e64 s[8:9], v[10:11], v[26:27]
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v18, v2, s[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v6, v22, v6, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v10, v26, v10, s[8:9]
+; GFX9-NEXT: v_cndmask_b32_e64 v3, v19, v3, s[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v7, v23, v7, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v11, v27, v11, s[8:9]
+; GFX9-NEXT: v_cmp_lt_u64_e64 s[0:1], v[2:3], v[10:11]
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[14:15], v[30:31]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v30, v14, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v31, v15, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[6:7], v[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v10, v2, s[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v3, v11, v3, s[0:1]
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: s_nop 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_umin_v16i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[16:17]
+; GFX10-NEXT: v_cmp_lt_u64_e64 s4, v[2:3], v[18:19]
+; GFX10-NEXT: v_cmp_lt_u64_e64 s5, v[4:5], v[20:21]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v16, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v17, v1, vcc_lo
+; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[6:7], v[22:23]
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v18, v2, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v19, v3, s4
+; GFX10-NEXT: v_cmp_lt_u64_e64 s4, v[8:9], v[24:25]
+; GFX10-NEXT: v_cndmask_b32_e64 v4, v20, v4, s5
+; GFX10-NEXT: v_cndmask_b32_e64 v5, v21, v5, s5
+; GFX10-NEXT: v_cndmask_b32_e32 v6, v22, v6, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v7, v23, v7, vcc_lo
+; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[10:11], v[26:27]
+; GFX10-NEXT: v_cndmask_b32_e64 v8, v24, v8, s4
+; GFX10-NEXT: v_cmp_lt_u64_e64 s5, v[12:13], v[28:29]
+; GFX10-NEXT: v_cndmask_b32_e64 v9, v25, v9, s4
+; GFX10-NEXT: v_cndmask_b32_e32 v10, v26, v10, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v11, v27, v11, vcc_lo
+; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX10-NEXT: v_cndmask_b32_e64 v12, v28, v12, s5
+; GFX10-NEXT: v_cndmask_b32_e64 v13, v29, v13, s5
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc_lo
+; GFX10-NEXT: v_cmp_lt_u64_e64 s5, v[4:5], v[12:13]
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v4, v12, v4, s5
+; GFX10-NEXT: v_cndmask_b32_e64 v5, v13, v5, s5
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_cmp_lt_u64_e64 s4, v[14:15], v[30:31]
+; GFX10-NEXT: v_cndmask_b32_e64 v14, v30, v14, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v15, v31, v15, s4
+; GFX10-NEXT: v_cmp_lt_u64_e64 s4, v[2:3], v[10:11]
+; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[6:7], v[14:15]
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v10, v2, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v11, v3, s4
+; GFX10-NEXT: v_cndmask_b32_e32 v6, v14, v6, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v7, v15, v7, vcc_lo
+; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX10-NEXT: v_cmp_lt_u64_e64 s4, v[2:3], v[6:7]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v6, v2, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v7, v3, s4
+; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_umin_v16i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: scratch_load_b32 v31, off, s32
+; GFX11-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[16:17]
+; GFX11-NEXT: v_cmp_lt_u64_e64 s0, v[2:3], v[18:19]
+; GFX11-NEXT: v_cmp_lt_u64_e64 s1, v[4:5], v[20:21]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v16, v0 :: v_dual_cndmask_b32 v1, v17, v1
+; GFX11-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[6:7], v[22:23]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v18, v2, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v19, v3, s0
+; GFX11-NEXT: v_cmp_lt_u64_e64 s0, v[8:9], v[24:25]
+; GFX11-NEXT: v_cndmask_b32_e64 v4, v20, v4, s1
+; GFX11-NEXT: v_cndmask_b32_e64 v5, v21, v5, s1
+; GFX11-NEXT: v_dual_cndmask_b32 v6, v22, v6 :: v_dual_cndmask_b32 v7, v23, v7
+; GFX11-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[10:11], v[26:27]
+; GFX11-NEXT: v_cndmask_b32_e64 v8, v24, v8, s0
+; GFX11-NEXT: v_cmp_lt_u64_e64 s1, v[12:13], v[28:29]
+; GFX11-NEXT: v_cndmask_b32_e64 v9, v25, v9, s0
+; GFX11-NEXT: v_dual_cndmask_b32 v10, v26, v10 :: v_dual_cndmask_b32 v11, v27, v11
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX11-NEXT: v_cndmask_b32_e64 v12, v28, v12, s1
+; GFX11-NEXT: v_cndmask_b32_e64 v13, v29, v13, s1
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v8, v0 :: v_dual_cndmask_b32 v1, v9, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_cmp_lt_u64_e64 s1, v[4:5], v[12:13]
+; GFX11-NEXT: v_cndmask_b32_e64 v4, v12, v4, s1
+; GFX11-NEXT: v_cndmask_b32_e64 v5, v13, v5, s1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_lt_u64_e64 s0, v[14:15], v[30:31]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_cndmask_b32_e64 v14, v30, v14, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v15, v31, v15, s0
+; GFX11-NEXT: v_cmp_lt_u64_e64 s0, v[2:3], v[10:11]
+; GFX11-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[6:7], v[14:15]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v10, v2, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v11, v3, s0
+; GFX11-NEXT: v_dual_cndmask_b32 v6, v14, v6 :: v_dual_cndmask_b32 v7, v15, v7
+; GFX11-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX11-NEXT: v_cmp_lt_u64_e64 s0, v[2:3], v[6:7]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0
+; GFX11-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0
+; GFX11-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_umin_v16i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: scratch_load_b32 v31, off, s32
+; GFX12-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[16:17]
+; GFX12-NEXT: v_cmp_lt_u64_e64 s0, v[2:3], v[18:19]
+; GFX12-NEXT: v_cmp_lt_u64_e64 s1, v[4:5], v[20:21]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v16, v0 :: v_dual_cndmask_b32 v1, v17, v1
+; GFX12-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[6:7], v[22:23]
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: v_cndmask_b32_e64 v2, v18, v2, s0
+; GFX12-NEXT: v_cndmask_b32_e64 v3, v19, v3, s0
+; GFX12-NEXT: v_cmp_lt_u64_e64 s0, v[8:9], v[24:25]
+; GFX12-NEXT: v_cndmask_b32_e64 v4, v20, v4, s1
+; GFX12-NEXT: v_cndmask_b32_e64 v5, v21, v5, s1
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v6, v22, v6 :: v_dual_cndmask_b32 v7, v23, v7
+; GFX12-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[10:11], v[26:27]
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: v_cndmask_b32_e64 v8, v24, v8, s0
+; GFX12-NEXT: v_cmp_lt_u64_e64 s1, v[12:13], v[28:29]
+; GFX12-NEXT: v_cndmask_b32_e64 v9, v25, v9, s0
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v10, v26, v10 :: v_dual_cndmask_b32 v11, v27, v11
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: v_cndmask_b32_e64 v12, v28, v12, s1
+; GFX12-NEXT: v_cndmask_b32_e64 v13, v29, v13, s1
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v8, v0 :: v_dual_cndmask_b32 v1, v9, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_cmp_lt_u64_e64 s1, v[4:5], v[12:13]
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: v_cndmask_b32_e64 v4, v12, v4, s1
+; GFX12-NEXT: v_cndmask_b32_e64 v5, v13, v5, s1
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_lt_u64_e64 s0, v[14:15], v[30:31]
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: v_cndmask_b32_e64 v14, v30, v14, s0
+; GFX12-NEXT: v_cndmask_b32_e64 v15, v31, v15, s0
+; GFX12-NEXT: v_cmp_lt_u64_e64 s0, v[2:3], v[10:11]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[6:7], v[14:15]
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: v_cndmask_b32_e64 v2, v10, v2, s0
+; GFX12-NEXT: v_cndmask_b32_e64 v3, v11, v3, s0
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v6, v14, v6 :: v_dual_cndmask_b32 v7, v15, v7
+; GFX12-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_cmp_lt_u64_e64 s0, v[2:3], v[6:7]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v4, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0
+; GFX12-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.umin.v16i64(<16 x i64> %v)
+ ret i64 %res
+}
+
+declare i8 @llvm.vector.reduce.umin.v2i8(<2 x i8>)
+declare i8 @llvm.vector.reduce.umin.v3i8(<3 x i8>)
+declare i8 @llvm.vector.reduce.umin.v4i8(<4 x i8>)
+declare i8 @llvm.vector.reduce.umin.v8i8(<8 x i8>)
+declare i8 @llvm.vector.reduce.umin.v16i8(<16 x i8>)
+
+declare i16 @llvm.vector.reduce.umin.v2i16(<2 x i16>)
+declare i16 @llvm.vector.reduce.umin.v3i16(<3 x i16>)
+declare i16 @llvm.vector.reduce.umin.v4i16(<4 x i16>)
+declare i16 @llvm.vector.reduce.umin.v8i16(<8 x i16>)
+declare i16 @llvm.vector.reduce.umin.v16i16(<16 x i16>)
+
+declare i32 @llvm.vector.reduce.umin.v2i32(<2 x i32>)
+declare i32 @llvm.vector.reduce.umin.v3i32(<3 x i32>)
+declare i32 @llvm.vector.reduce.umin.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.umin.v8i32(<8 x i32>)
+declare i32 @llvm.vector.reduce.umin.v16i32(<16 x i32>)
+
+declare i64 @llvm.vector.reduce.umin.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.umin.v3i64(<3 x i64>)
+declare i64 @llvm.vector.reduce.umin.v4i64(<4 x i64>)
+declare i64 @llvm.vector.reduce.umin.v8i64(<8 x i64>)
+declare i64 @llvm.vector.reduce.umin.v16i64(<16 x i64>)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/xor.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/xor.ll
new file mode 100644
index 0000000000000..2929b4bea77a2
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm-intrinsics/reduce/xor.ll
@@ -0,0 +1,2276 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx700 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX7 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx801 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX8 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx942 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX9 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX10 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX12 %s
+
+define i8 @test_vector_reduce_xor_v2i8(<2 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_xor_v2i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_xor_v2i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_xor_v2i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_xor_v2i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_xor_v2i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_xor_v2i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.xor.v2i8(<2 x i8> %v)
+ ret i8 %res
+}
+
+define i8 @test_vector_reduce_xor_v3i8(<3 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_xor_v3i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_xor_v3i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_xor_v3i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX9-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_xor_v3i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_xor3_b32 v0, v0, v1, v2
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_xor_v3i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_xor3_b32 v0, v0, v1, v2
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_xor_v3i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_xor3_b32 v0, v0, v1, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.xor.v3i8(<3 x i8> %v)
+ ret i8 %res
+}
+
+define i8 @test_vector_reduce_xor_v4i8(<4 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_xor_v4i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX7-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 24, v2
+; GFX7-NEXT: v_xor_b32_e32 v1, v1, v2
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v3
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_xor_v4i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v4, 8
+; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX8-NEXT: v_or_b32_sdwa v5, v0, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX8-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX8-NEXT: v_or_b32_e32 v2, v5, v2
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX8-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX8-NEXT: v_xor_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_xor_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX8-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_xor_v4i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v4, 0xff
+; GFX9-NEXT: v_lshlrev_b32_sdwa v6, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX9-NEXT: v_and_or_b32 v6, v0, v4, v6
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX9-NEXT: v_or3_b32 v2, v6, v2, v3
+; GFX9-NEXT: v_xor_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_xor_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v1, v0, v4, v1
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX9-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX9-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_xor_v4i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v4, 8
+; GFX10-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX10-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX10-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX10-NEXT: v_and_or_b32 v5, 0xff, v0, v5
+; GFX10-NEXT: v_or3_b32 v2, v5, v2, v3
+; GFX10-NEXT: v_xor_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX10-NEXT: v_xor_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX10-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX10-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_xor_v4i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v1
+; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 8, v4
+; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX11-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX11-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_xor_v4i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v4, 0xff, v1
+; GFX12-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v4, 8, v4
+; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX12-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX12-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX12-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX12-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.xor.v4i8(<4 x i8> %v)
+ ret i8 %res
+}
+
+define i8 @test_vector_reduce_xor_v8i8(<8 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_xor_v8i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v7
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX7-NEXT: v_xor_b32_e32 v1, v1, v5
+; GFX7-NEXT: v_lshrrev_b32_e32 v7, 24, v4
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v4
+; GFX7-NEXT: v_xor_b32_e32 v2, v2, v6
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX7-NEXT: v_xor_b32_e32 v3, v3, v7
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX7-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 24, v2
+; GFX7-NEXT: v_xor_b32_e32 v1, v1, v2
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v3
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_xor_v8i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v8, 8
+; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v8, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v4, v4, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v7
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX8-NEXT: v_xor_b32_e32 v1, v1, v5
+; GFX8-NEXT: v_xor_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_xor_b32_e32 v0, v0, v4
+; GFX8-NEXT: v_xor_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX8-NEXT: v_or_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX8-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX8-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX8-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX8-NEXT: v_xor_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_xor_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX8-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_xor_v8i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v9, 8
+; GFX9-NEXT: v_mov_b32_e32 v8, 0xff
+; GFX9-NEXT: v_lshlrev_b32_sdwa v5, v9, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v4, v4, v8, v5
+; GFX9-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX9-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX9-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX9-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX9-NEXT: v_xor_b32_e32 v1, v1, v5
+; GFX9-NEXT: v_xor_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_xor_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_xor_b32_e32 v0, v0, v4
+; GFX9-NEXT: v_lshlrev_b32_sdwa v4, v9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX9-NEXT: v_and_or_b32 v4, v0, v8, v4
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX9-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX9-NEXT: v_xor_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_xor_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v1, v0, v8, v1
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX9-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX9-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_xor_v8i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v8, 8
+; GFX10-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX10-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v8, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v7
+; GFX10-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX10-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX10-NEXT: v_xor_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_xor_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX10-NEXT: v_xor_b32_e32 v0, v0, v4
+; GFX10-NEXT: v_xor_b32_e32 v1, v1, v5
+; GFX10-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX10-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX10-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX10-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX10-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX10-NEXT: v_xor_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX10-NEXT: v_xor_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX10-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX10-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_xor_v8i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v7, 24, v7
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v4, v4, v6, v7
+; GFX11-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX11-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX11-NEXT: v_xor_b32_e32 v0, v0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_xor_b32_e32 v1, v1, v5
+; GFX11-NEXT: v_lshrrev_b32_e32 v5, 24, v4
+; GFX11-NEXT: v_xor_b32_e32 v2, v2, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v1
+; GFX11-NEXT: v_xor_b32_e32 v3, v3, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 8, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX11-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX11-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX11-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_xor_v8i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX12-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v7, 24, v7
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v4, v4, v6, v7
+; GFX12-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX12-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX12-NEXT: v_xor_b32_e32 v0, v0, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_xor_b32_e32 v1, v1, v5
+; GFX12-NEXT: v_lshrrev_b32_e32 v5, 24, v4
+; GFX12-NEXT: v_xor_b32_e32 v2, v2, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v1
+; GFX12-NEXT: v_xor_b32_e32 v3, v3, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-NEXT: v_lshlrev_b32_e32 v4, 8, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX12-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX12-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX12-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX12-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX12-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.xor.v8i8(<8 x i8> %v)
+ ret i8 %res
+}
+
+define i8 @test_vector_reduce_xor_v16i8(<16 x i8> %v) {
+; GFX7-LABEL: test_vector_reduce_xor_v16i8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v13, 0xff, v13
+; GFX7-NEXT: v_and_b32_e32 v12, 0xff, v12
+; GFX7-NEXT: v_lshlrev_b32_e32 v13, 8, v13
+; GFX7-NEXT: v_or_b32_e32 v12, v12, v13
+; GFX7-NEXT: v_and_b32_e32 v13, 0xff, v14
+; GFX7-NEXT: v_lshlrev_b32_e32 v13, 16, v13
+; GFX7-NEXT: v_or_b32_e32 v12, v12, v13
+; GFX7-NEXT: v_and_b32_e32 v13, 0xff, v15
+; GFX7-NEXT: v_lshlrev_b32_e32 v13, 24, v13
+; GFX7-NEXT: v_or_b32_e32 v12, v12, v13
+; GFX7-NEXT: v_lshrrev_b32_e32 v13, 8, v12
+; GFX7-NEXT: v_and_b32_e32 v9, 0xff, v9
+; GFX7-NEXT: v_xor_b32_e32 v5, v5, v13
+; GFX7-NEXT: v_and_b32_e32 v8, 0xff, v8
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX7-NEXT: v_lshrrev_b32_e32 v14, 16, v12
+; GFX7-NEXT: v_xor_b32_e32 v4, v4, v12
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX7-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX7-NEXT: v_and_b32_e32 v9, 0xff, v10
+; GFX7-NEXT: v_xor_b32_e32 v6, v6, v14
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX7-NEXT: v_lshrrev_b32_e32 v15, 24, v12
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX7-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX7-NEXT: v_and_b32_e32 v9, 0xff, v11
+; GFX7-NEXT: v_xor_b32_e32 v7, v7, v15
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 24, v9
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v7
+; GFX7-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX7-NEXT: v_lshrrev_b32_e32 v9, 8, v8
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshrrev_b32_e32 v10, 16, v8
+; GFX7-NEXT: v_xor_b32_e32 v1, v1, v9
+; GFX7-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX7-NEXT: v_lshrrev_b32_e32 v11, 24, v8
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v8
+; GFX7-NEXT: v_xor_b32_e32 v2, v2, v10
+; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX7-NEXT: v_xor_b32_e32 v1, v1, v5
+; GFX7-NEXT: v_xor_b32_e32 v3, v3, v11
+; GFX7-NEXT: v_lshrrev_b32_e32 v7, 24, v4
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v4
+; GFX7-NEXT: v_xor_b32_e32 v2, v2, v6
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v1
+; GFX7-NEXT: v_xor_b32_e32 v3, v3, v7
+; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX7-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 24, v2
+; GFX7-NEXT: v_xor_b32_e32 v1, v1, v2
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v3
+; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v2, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_xor_v16i8:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v16, 8
+; GFX8-NEXT: v_lshlrev_b32_sdwa v9, v16, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v8, v8, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v9, 0xff, v10
+; GFX8-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX8-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX8-NEXT: v_and_b32_e32 v9, 0xff, v11
+; GFX8-NEXT: v_lshlrev_b32_sdwa v10, v16, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v11, 0xff, v14
+; GFX8-NEXT: v_or_b32_sdwa v10, v12, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v11, 16, v11
+; GFX8-NEXT: v_or_b32_e32 v10, v10, v11
+; GFX8-NEXT: v_and_b32_e32 v11, 0xff, v15
+; GFX8-NEXT: v_lshlrev_b32_e32 v11, 24, v11
+; GFX8-NEXT: v_or_b32_e32 v10, v10, v11
+; GFX8-NEXT: v_lshrrev_b32_e32 v11, 8, v10
+; GFX8-NEXT: v_xor_b32_e32 v5, v5, v11
+; GFX8-NEXT: v_xor_b32_e32 v4, v4, v10
+; GFX8-NEXT: v_xor_b32_sdwa v6, v6, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v16, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v4, v4, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX8-NEXT: v_xor_b32_sdwa v7, v7, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX8-NEXT: v_lshlrev_b32_e32 v9, 24, v9
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_and_b32_e32 v5, 0xff, v7
+; GFX8-NEXT: v_or_b32_e32 v8, v8, v9
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v5
+; GFX8-NEXT: v_lshrrev_b32_e32 v9, 8, v8
+; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT: v_xor_b32_e32 v1, v1, v9
+; GFX8-NEXT: v_xor_b32_sdwa v2, v2, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX8-NEXT: v_xor_b32_e32 v0, v0, v8
+; GFX8-NEXT: v_xor_b32_sdwa v3, v3, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_xor_b32_e32 v1, v1, v5
+; GFX8-NEXT: v_xor_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_xor_b32_e32 v0, v0, v4
+; GFX8-NEXT: v_xor_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v16, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX8-NEXT: v_or_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX8-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX8-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX8-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX8-NEXT: v_xor_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT: v_xor_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v16, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT: v_or_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX8-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_xor_v16i8:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v17, 8
+; GFX9-NEXT: v_mov_b32_e32 v16, 0xff
+; GFX9-NEXT: v_lshlrev_b32_sdwa v9, v17, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v8, v8, v16, v9
+; GFX9-NEXT: v_and_b32_e32 v9, 0xff, v10
+; GFX9-NEXT: v_and_b32_e32 v10, 0xff, v11
+; GFX9-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX9-NEXT: v_lshlrev_b32_e32 v10, 24, v10
+; GFX9-NEXT: v_or3_b32 v8, v8, v9, v10
+; GFX9-NEXT: v_lshlrev_b32_sdwa v10, v17, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v10, v12, v16, v10
+; GFX9-NEXT: v_and_b32_e32 v11, 0xff, v14
+; GFX9-NEXT: v_and_b32_e32 v12, 0xff, v15
+; GFX9-NEXT: v_lshlrev_b32_e32 v11, 16, v11
+; GFX9-NEXT: v_lshlrev_b32_e32 v12, 24, v12
+; GFX9-NEXT: v_or3_b32 v10, v10, v11, v12
+; GFX9-NEXT: v_lshrrev_b32_e32 v11, 8, v10
+; GFX9-NEXT: v_xor_b32_e32 v5, v5, v11
+; GFX9-NEXT: v_xor_b32_e32 v4, v4, v10
+; GFX9-NEXT: v_xor_b32_sdwa v6, v6, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_xor_b32_sdwa v7, v7, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_lshlrev_b32_sdwa v5, v17, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v4, v4, v16, v5
+; GFX9-NEXT: v_and_b32_e32 v5, 0xff, v6
+; GFX9-NEXT: v_and_b32_e32 v6, 0xff, v7
+; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v6
+; GFX9-NEXT: v_lshrrev_b32_e32 v9, 8, v8
+; GFX9-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX9-NEXT: v_xor_b32_e32 v1, v1, v9
+; GFX9-NEXT: v_xor_b32_sdwa v2, v2, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_xor_b32_sdwa v3, v3, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; GFX9-NEXT: v_xor_b32_e32 v0, v0, v8
+; GFX9-NEXT: v_xor_b32_e32 v1, v1, v5
+; GFX9-NEXT: v_xor_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_xor_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_xor_b32_e32 v0, v0, v4
+; GFX9-NEXT: v_lshlrev_b32_sdwa v4, v17, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX9-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX9-NEXT: v_and_or_b32 v4, v0, v16, v4
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX9-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX9-NEXT: v_xor_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT: v_xor_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v17, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_and_or_b32 v1, v0, v16, v1
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX9-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX9-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_xor_v16i8:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v16, 8
+; GFX10-NEXT: v_and_b32_e32 v14, 0xff, v14
+; GFX10-NEXT: v_and_b32_e32 v15, 0xff, v15
+; GFX10-NEXT: v_lshlrev_b32_sdwa v13, v16, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_lshlrev_b32_sdwa v9, v16, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_or_b32 v12, 0xff, v12, v13
+; GFX10-NEXT: v_lshlrev_b32_e32 v13, 16, v14
+; GFX10-NEXT: v_lshlrev_b32_e32 v14, 24, v15
+; GFX10-NEXT: v_and_or_b32 v8, 0xff, v8, v9
+; GFX10-NEXT: v_and_b32_e32 v9, 0xff, v10
+; GFX10-NEXT: v_and_b32_e32 v10, 0xff, v11
+; GFX10-NEXT: v_or3_b32 v12, v12, v13, v14
+; GFX10-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX10-NEXT: v_lshlrev_b32_e32 v10, 24, v10
+; GFX10-NEXT: v_lshrrev_b32_e32 v13, 8, v12
+; GFX10-NEXT: v_xor_b32_sdwa v6, v6, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_xor_b32_sdwa v7, v7, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX10-NEXT: v_xor_b32_e32 v4, v4, v12
+; GFX10-NEXT: v_xor_b32_e32 v5, v5, v13
+; GFX10-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX10-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v16, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v7
+; GFX10-NEXT: v_or3_b32 v7, v8, v9, v10
+; GFX10-NEXT: v_or3_b32 v4, v4, v5, v6
+; GFX10-NEXT: v_lshrrev_b32_e32 v5, 8, v7
+; GFX10-NEXT: v_lshrrev_b32_e32 v6, 16, v7
+; GFX10-NEXT: v_lshrrev_b32_e32 v8, 24, v7
+; GFX10-NEXT: v_lshrrev_b32_e32 v9, 8, v4
+; GFX10-NEXT: v_lshrrev_b32_e32 v10, 16, v4
+; GFX10-NEXT: v_lshrrev_b32_e32 v11, 24, v4
+; GFX10-NEXT: v_xor3_b32 v0, v0, v7, v4
+; GFX10-NEXT: v_xor3_b32 v1, v1, v5, v9
+; GFX10-NEXT: v_xor3_b32 v2, v2, v6, v10
+; GFX10-NEXT: v_xor3_b32 v3, v3, v8, v11
+; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v16, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX10-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX10-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX10-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX10-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX10-NEXT: v_xor_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX10-NEXT: v_xor_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v16, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX10-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX10-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_xor_v16i8:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v13, 0xff, v13
+; GFX11-NEXT: v_and_b32_e32 v14, 0xff, v14
+; GFX11-NEXT: v_and_b32_e32 v15, 0xff, v15
+; GFX11-NEXT: v_and_b32_e32 v9, 0xff, v9
+; GFX11-NEXT: v_and_b32_e32 v10, 0xff, v10
+; GFX11-NEXT: v_lshlrev_b32_e32 v13, 8, v13
+; GFX11-NEXT: v_and_b32_e32 v11, 0xff, v11
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX11-NEXT: v_lshlrev_b32_e32 v10, 16, v10
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_or_b32 v12, 0xff, v12, v13
+; GFX11-NEXT: v_lshlrev_b32_e32 v13, 16, v14
+; GFX11-NEXT: v_lshlrev_b32_e32 v14, 24, v15
+; GFX11-NEXT: v_lshlrev_b32_e32 v11, 24, v11
+; GFX11-NEXT: v_and_or_b32 v8, 0xff, v8, v9
+; GFX11-NEXT: v_or3_b32 v12, v12, v13, v14
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_lshrrev_b32_e32 v13, 8, v12
+; GFX11-NEXT: v_lshrrev_b32_e32 v14, 16, v12
+; GFX11-NEXT: v_lshrrev_b32_e32 v15, 24, v12
+; GFX11-NEXT: v_xor_b32_e32 v4, v4, v12
+; GFX11-NEXT: v_xor_b32_e32 v5, v5, v13
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_xor_b32_e32 v6, v6, v14
+; GFX11-NEXT: v_xor_b32_e32 v7, v7, v15
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX11-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX11-NEXT: v_lshlrev_b32_e32 v7, 24, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX11-NEXT: v_or3_b32 v5, v8, v10, v11
+; GFX11-NEXT: v_or3_b32 v4, v4, v6, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_lshrrev_b32_e32 v6, 8, v5
+; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v5
+; GFX11-NEXT: v_lshrrev_b32_e32 v9, 24, v5
+; GFX11-NEXT: v_lshrrev_b32_e32 v7, 8, v4
+; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v4
+; GFX11-NEXT: v_xor3_b32 v0, v0, v5, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_xor3_b32 v1, v1, v6, v7
+; GFX11-NEXT: v_lshrrev_b32_e32 v6, 24, v4
+; GFX11-NEXT: v_xor3_b32 v2, v2, v8, v10
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v1
+; GFX11-NEXT: v_xor3_b32 v3, v3, v9, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 8, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX11-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX11-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX11-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX11-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_xor_v16i8:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v13, 0xff, v13
+; GFX12-NEXT: v_and_b32_e32 v14, 0xff, v14
+; GFX12-NEXT: v_and_b32_e32 v15, 0xff, v15
+; GFX12-NEXT: v_and_b32_e32 v9, 0xff, v9
+; GFX12-NEXT: v_and_b32_e32 v10, 0xff, v10
+; GFX12-NEXT: v_lshlrev_b32_e32 v13, 8, v13
+; GFX12-NEXT: v_and_b32_e32 v11, 0xff, v11
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX12-NEXT: v_lshlrev_b32_e32 v10, 16, v10
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_or_b32 v12, 0xff, v12, v13
+; GFX12-NEXT: v_lshlrev_b32_e32 v13, 16, v14
+; GFX12-NEXT: v_lshlrev_b32_e32 v14, 24, v15
+; GFX12-NEXT: v_lshlrev_b32_e32 v11, 24, v11
+; GFX12-NEXT: v_and_or_b32 v8, 0xff, v8, v9
+; GFX12-NEXT: v_or3_b32 v12, v12, v13, v14
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_lshrrev_b32_e32 v13, 8, v12
+; GFX12-NEXT: v_lshrrev_b32_e32 v14, 16, v12
+; GFX12-NEXT: v_lshrrev_b32_e32 v15, 24, v12
+; GFX12-NEXT: v_xor_b32_e32 v4, v4, v12
+; GFX12-NEXT: v_xor_b32_e32 v5, v5, v13
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_xor_b32_e32 v6, v6, v14
+; GFX12-NEXT: v_xor_b32_e32 v7, v7, v15
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX12-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX12-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX12-NEXT: v_lshlrev_b32_e32 v7, 24, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v4, v5
+; GFX12-NEXT: v_or3_b32 v5, v8, v10, v11
+; GFX12-NEXT: v_or3_b32 v4, v4, v6, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_lshrrev_b32_e32 v6, 8, v5
+; GFX12-NEXT: v_lshrrev_b32_e32 v8, 16, v5
+; GFX12-NEXT: v_lshrrev_b32_e32 v9, 24, v5
+; GFX12-NEXT: v_lshrrev_b32_e32 v7, 8, v4
+; GFX12-NEXT: v_lshrrev_b32_e32 v10, 16, v4
+; GFX12-NEXT: v_xor3_b32 v0, v0, v5, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_xor3_b32 v1, v1, v6, v7
+; GFX12-NEXT: v_lshrrev_b32_e32 v6, 24, v4
+; GFX12-NEXT: v_xor3_b32 v2, v2, v8, v10
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_b32_e32 v7, 0xff, v1
+; GFX12-NEXT: v_xor3_b32 v3, v3, v9, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_b32_e32 v2, 0xff, v2
+; GFX12-NEXT: v_lshlrev_b32_e32 v4, 8, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_b32_e32 v3, 0xff, v3
+; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_and_or_b32 v4, 0xff, v0, v4
+; GFX12-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v2, v4, v2, v3
+; GFX12-NEXT: v_lshrrev_b32_e32 v3, 24, v2
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX12-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_and_b32_e32 v1, 0xff, v1
+; GFX12-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_and_or_b32 v1, 0xff, v0, v1
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 8, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX12-NEXT: v_bfe_u32 v0, v0, 0, 8
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i8 @llvm.vector.reduce.xor.v16i8(<16 x i8> %v)
+ ret i8 %res
+}
+
+define i16 @test_vector_reduce_xor_v2i16(<2 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_xor_v2i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_xor_v2i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_xor_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_xor_v2i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_xor_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_xor_v2i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_xor_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_xor_v2i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_xor_v2i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.xor.v2i16(<2 x i16> %v)
+ ret i16 %res
+}
+
+define i16 @test_vector_reduce_xor_v3i16(<3 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_xor_v3i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_xor_v3i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_xor_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_xor_v3i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_xor_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_xor_v3i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX10-NEXT: v_xor3_b32 v0, v0, v2, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_xor_v3i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_xor3_b32 v0, v0, v2, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_xor_v3i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_xor3_b32 v0, v0, v2, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.xor.v3i16(<3 x i16> %v)
+ ret i16 %res
+}
+
+define i16 @test_vector_reduce_xor_v4i16(<4 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_xor_v4i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v3
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_xor_v4i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX8-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_xor_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_xor_v4i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX9-NEXT: v_xor_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_xor_v4i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX10-NEXT: v_xor_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_xor_v4i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX11-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_xor_v4i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX12-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.xor.v4i16(<4 x i16> %v)
+ ret i16 %res
+}
+
+define i16 @test_vector_reduce_xor_v8i16(<8 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_xor_v8i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v7
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_or_b32_e32 v5, v5, v6
+; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v4
+; GFX7-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v3
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v7, 16, v5
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff, v5
+; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v7
+; GFX7-NEXT: v_or_b32_e32 v3, v3, v4
+; GFX7-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_xor_v8i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v2
+; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX8-NEXT: v_or_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v5
+; GFX8-NEXT: v_or_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX8-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX8-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_xor_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_xor_v8i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX9-NEXT: s_mov_b32 s0, 0xffff
+; GFX9-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX9-NEXT: v_bfi_b32 v2, s0, v1, v1
+; GFX9-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX9-NEXT: v_xor_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_xor_b32_e32 v1, s0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_xor_v8i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX10-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX10-NEXT: v_bfi_b32 v2, 0xffff, v1, v1
+; GFX10-NEXT: v_xor_b32_e32 v1, s4, v1
+; GFX10-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX10-NEXT: v_xor_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_xor_v8i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX11-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_bfi_b32 v2, 0xffff, v1, v1
+; GFX11-NEXT: v_xor_b32_e32 v1, s0, v1
+; GFX11-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX11-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_xor_v8i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX12-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_bfi_b32 v2, 0xffff, v1, v1
+; GFX12-NEXT: v_xor_b32_e32 v1, s0, v1
+; GFX12-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX12-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.xor.v8i16(<8 x i16> %v)
+ ret i16 %res
+}
+
+define i16 @test_vector_reduce_xor_v16i16(<16 x i16> %v) {
+; GFX7-LABEL: test_vector_reduce_xor_v16i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX7-NEXT: v_and_b32_e32 v8, 0xffff, v8
+; GFX7-NEXT: v_or_b32_e32 v8, v9, v8
+; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v11
+; GFX7-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_or_b32_e32 v9, v9, v10
+; GFX7-NEXT: v_lshlrev_b32_e32 v10, 16, v13
+; GFX7-NEXT: v_and_b32_e32 v11, 0xffff, v12
+; GFX7-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v3
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-NEXT: v_or_b32_e32 v10, v10, v11
+; GFX7-NEXT: v_lshlrev_b32_e32 v11, 16, v15
+; GFX7-NEXT: v_and_b32_e32 v12, 0xffff, v14
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v5
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff, v4
+; GFX7-NEXT: v_or_b32_e32 v11, v11, v12
+; GFX7-NEXT: v_lshrrev_b32_e32 v12, 16, v8
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v7
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff, v6
+; GFX7-NEXT: v_lshrrev_b32_e32 v13, 16, v9
+; GFX7-NEXT: v_or_b32_e32 v3, v3, v4
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff, v8
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v12
+; GFX7-NEXT: v_lshrrev_b32_e32 v14, 16, v10
+; GFX7-NEXT: v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff, v9
+; GFX7-NEXT: v_lshlrev_b32_e32 v6, 16, v13
+; GFX7-NEXT: v_lshrrev_b32_e32 v15, 16, v11
+; GFX7-NEXT: v_or_b32_e32 v5, v5, v6
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff, v10
+; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v14
+; GFX7-NEXT: v_or_b32_e32 v6, v6, v7
+; GFX7-NEXT: v_and_b32_e32 v7, 0xffff, v11
+; GFX7-NEXT: v_lshlrev_b32_e32 v8, 16, v15
+; GFX7-NEXT: v_or_b32_e32 v7, v7, v8
+; GFX7-NEXT: v_xor_b32_e32 v2, v2, v6
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v4
+; GFX7-NEXT: v_xor_b32_e32 v3, v3, v7
+; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v2
+; GFX7-NEXT: v_xor_b32_e32 v1, v1, v5
+; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v3
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX7-NEXT: v_or_b32_e32 v2, v2, v4
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v5
+; GFX7-NEXT: v_or_b32_e32 v3, v3, v4
+; GFX7-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_xor_v16i16:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshrrev_b32_e32 v8, 16, v4
+; GFX8-NEXT: v_lshrrev_b32_e32 v9, 16, v5
+; GFX8-NEXT: v_lshlrev_b32_e32 v8, 16, v8
+; GFX8-NEXT: v_lshrrev_b32_e32 v10, 16, v6
+; GFX8-NEXT: v_or_b32_sdwa v4, v4, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v8, 16, v9
+; GFX8-NEXT: v_lshrrev_b32_e32 v11, 16, v7
+; GFX8-NEXT: v_or_b32_sdwa v5, v5, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v8, 16, v10
+; GFX8-NEXT: v_or_b32_sdwa v6, v6, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v8, 16, v11
+; GFX8-NEXT: v_or_b32_sdwa v7, v7, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_xor_b32_e32 v2, v2, v6
+; GFX8-NEXT: v_xor_b32_e32 v0, v0, v4
+; GFX8-NEXT: v_xor_b32_e32 v3, v3, v7
+; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v2
+; GFX8-NEXT: v_xor_b32_e32 v1, v1, v5
+; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX8-NEXT: v_or_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v5
+; GFX8-NEXT: v_or_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX8-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX8-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_xor_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_xor_v16i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_xor_b32_e32 v5, v1, v5
+; GFX9-NEXT: v_xor_b32_e32 v1, v3, v7
+; GFX9-NEXT: s_mov_b32 s0, 0xffff
+; GFX9-NEXT: v_xor_b32_e32 v4, v0, v4
+; GFX9-NEXT: v_xor_b32_e32 v0, v2, v6
+; GFX9-NEXT: v_bfi_b32 v1, s0, v1, v1
+; GFX9-NEXT: v_bfi_b32 v0, s0, v0, v0
+; GFX9-NEXT: v_xor_b32_e32 v1, v5, v1
+; GFX9-NEXT: v_xor_b32_e32 v0, v4, v0
+; GFX9-NEXT: v_bfi_b32 v2, s0, v1, v1
+; GFX9-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX9-NEXT: v_xor_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_xor_b32_e32 v1, s0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_xor_v16i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_xor_b32_e32 v3, v3, v7
+; GFX10-NEXT: v_xor_b32_e32 v1, v1, v5
+; GFX10-NEXT: v_xor_b32_e32 v2, v2, v6
+; GFX10-NEXT: v_xor_b32_e32 v0, v0, v4
+; GFX10-NEXT: v_bfi_b32 v3, 0xffff, v3, v3
+; GFX10-NEXT: v_bfi_b32 v2, 0xffff, v2, v2
+; GFX10-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX10-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX10-NEXT: v_bfi_b32 v2, 0xffff, v1, v1
+; GFX10-NEXT: v_xor_b32_e32 v1, s4, v1
+; GFX10-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX10-NEXT: v_xor_b32_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_xor_v16i16:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_xor_b32_e32 v3, v3, v7
+; GFX11-NEXT: v_xor_b32_e32 v1, v1, v5
+; GFX11-NEXT: v_xor_b32_e32 v2, v2, v6
+; GFX11-NEXT: v_xor_b32_e32 v0, v0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_bfi_b32 v3, 0xffff, v3, v3
+; GFX11-NEXT: v_bfi_b32 v2, 0xffff, v2, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX11-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_bfi_b32 v2, 0xffff, v1, v1
+; GFX11-NEXT: v_xor_b32_e32 v1, s0, v1
+; GFX11-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX11-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_xor_v16i16:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_xor_b32_e32 v3, v3, v7
+; GFX12-NEXT: v_xor_b32_e32 v1, v1, v5
+; GFX12-NEXT: v_xor_b32_e32 v2, v2, v6
+; GFX12-NEXT: v_xor_b32_e32 v0, v0, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_bfi_b32 v3, 0xffff, v3, v3
+; GFX12-NEXT: v_bfi_b32 v2, 0xffff, v2, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX12-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_bfi_b32 v2, 0xffff, v1, v1
+; GFX12-NEXT: v_xor_b32_e32 v1, s0, v1
+; GFX12-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX12-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i16 @llvm.vector.reduce.xor.v16i16(<16 x i16> %v)
+ ret i16 %res
+}
+
+
+define i32 @test_vector_reduce_xor_v2i32(<2 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_xor_v2i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_xor_v2i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_xor_v2i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_xor_v2i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_xor_v2i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_xor_v2i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.xor.v2i32(<2 x i32> %v)
+ ret i32 %res
+}
+
+define i32 @test_vector_reduce_xor_v3i32(<3 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_xor_v3i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_xor_v3i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_xor_v3i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX9-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_xor_v3i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_xor3_b32 v0, v0, v1, v2
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_xor_v3i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_xor3_b32 v0, v0, v1, v2
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_xor_v3i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_xor3_b32 v0, v0, v1, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.xor.v3i32(<3 x i32> %v)
+ ret i32 %res
+}
+
+define i32 @test_vector_reduce_xor_v4i32(<4 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_xor_v4i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX7-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_xor_v4i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX8-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX8-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_xor_v4i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX9-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX9-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_xor_v4i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX10-NEXT: v_xor3_b32 v0, v0, v2, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_xor_v4i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_xor3_b32 v0, v0, v2, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_xor_v4i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_xor3_b32 v0, v0, v2, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> %v)
+ ret i32 %res
+}
+
+define i32 @test_vector_reduce_xor_v8i32(<8 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_xor_v8i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v4
+; GFX7-NEXT: v_xor_b32_e32 v1, v1, v5
+; GFX7-NEXT: v_xor_b32_e32 v2, v2, v6
+; GFX7-NEXT: v_xor_b32_e32 v3, v3, v7
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX7-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_xor_v8i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_xor_b32_e32 v0, v0, v4
+; GFX8-NEXT: v_xor_b32_e32 v1, v1, v5
+; GFX8-NEXT: v_xor_b32_e32 v2, v2, v6
+; GFX8-NEXT: v_xor_b32_e32 v3, v3, v7
+; GFX8-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX8-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX8-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_xor_v8i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_xor_b32_e32 v0, v0, v4
+; GFX9-NEXT: v_xor_b32_e32 v1, v1, v5
+; GFX9-NEXT: v_xor_b32_e32 v2, v2, v6
+; GFX9-NEXT: v_xor_b32_e32 v3, v3, v7
+; GFX9-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX9-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX9-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_xor_v8i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_xor_b32_e32 v3, v3, v7
+; GFX10-NEXT: v_xor_b32_e32 v0, v0, v4
+; GFX10-NEXT: v_xor_b32_e32 v2, v2, v6
+; GFX10-NEXT: v_xor3_b32 v1, v1, v5, v3
+; GFX10-NEXT: v_xor3_b32 v0, v0, v2, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_xor_v8i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_xor_b32_e32 v3, v3, v7
+; GFX11-NEXT: v_xor_b32_e32 v0, v0, v4
+; GFX11-NEXT: v_xor_b32_e32 v2, v2, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_xor3_b32 v1, v1, v5, v3
+; GFX11-NEXT: v_xor3_b32 v0, v0, v2, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_xor_v8i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_xor_b32_e32 v3, v3, v7
+; GFX12-NEXT: v_xor_b32_e32 v0, v0, v4
+; GFX12-NEXT: v_xor_b32_e32 v2, v2, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_xor3_b32 v1, v1, v5, v3
+; GFX12-NEXT: v_xor3_b32 v0, v0, v2, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.xor.v8i32(<8 x i32> %v)
+ ret i32 %res
+}
+
+define i32 @test_vector_reduce_xor_v16i32(<16 x i32> %v) {
+; GFX7-LABEL: test_vector_reduce_xor_v16i32:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v8
+; GFX7-NEXT: v_xor_b32_e32 v1, v1, v9
+; GFX7-NEXT: v_xor_b32_e32 v2, v2, v10
+; GFX7-NEXT: v_xor_b32_e32 v3, v3, v11
+; GFX7-NEXT: v_xor_b32_e32 v4, v4, v12
+; GFX7-NEXT: v_xor_b32_e32 v5, v5, v13
+; GFX7-NEXT: v_xor_b32_e32 v6, v6, v14
+; GFX7-NEXT: v_xor_b32_e32 v7, v7, v15
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v4
+; GFX7-NEXT: v_xor_b32_e32 v1, v1, v5
+; GFX7-NEXT: v_xor_b32_e32 v2, v2, v6
+; GFX7-NEXT: v_xor_b32_e32 v3, v3, v7
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX7-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_xor_v16i32:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_xor_b32_e32 v0, v0, v8
+; GFX8-NEXT: v_xor_b32_e32 v1, v1, v9
+; GFX8-NEXT: v_xor_b32_e32 v2, v2, v10
+; GFX8-NEXT: v_xor_b32_e32 v3, v3, v11
+; GFX8-NEXT: v_xor_b32_e32 v4, v4, v12
+; GFX8-NEXT: v_xor_b32_e32 v5, v5, v13
+; GFX8-NEXT: v_xor_b32_e32 v6, v6, v14
+; GFX8-NEXT: v_xor_b32_e32 v7, v7, v15
+; GFX8-NEXT: v_xor_b32_e32 v0, v0, v4
+; GFX8-NEXT: v_xor_b32_e32 v1, v1, v5
+; GFX8-NEXT: v_xor_b32_e32 v2, v2, v6
+; GFX8-NEXT: v_xor_b32_e32 v3, v3, v7
+; GFX8-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX8-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX8-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_xor_v16i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_xor_b32_e32 v0, v0, v8
+; GFX9-NEXT: v_xor_b32_e32 v1, v1, v9
+; GFX9-NEXT: v_xor_b32_e32 v2, v2, v10
+; GFX9-NEXT: v_xor_b32_e32 v3, v3, v11
+; GFX9-NEXT: v_xor_b32_e32 v4, v4, v12
+; GFX9-NEXT: v_xor_b32_e32 v5, v5, v13
+; GFX9-NEXT: v_xor_b32_e32 v6, v6, v14
+; GFX9-NEXT: v_xor_b32_e32 v7, v7, v15
+; GFX9-NEXT: v_xor_b32_e32 v0, v0, v4
+; GFX9-NEXT: v_xor_b32_e32 v1, v1, v5
+; GFX9-NEXT: v_xor_b32_e32 v2, v2, v6
+; GFX9-NEXT: v_xor_b32_e32 v3, v3, v7
+; GFX9-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX9-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX9-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_xor_v16i32:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_xor_b32_e32 v7, v7, v15
+; GFX10-NEXT: v_xor_b32_e32 v1, v1, v9
+; GFX10-NEXT: v_xor_b32_e32 v4, v4, v12
+; GFX10-NEXT: v_xor_b32_e32 v5, v5, v13
+; GFX10-NEXT: v_xor_b32_e32 v6, v6, v14
+; GFX10-NEXT: v_xor3_b32 v3, v3, v11, v7
+; GFX10-NEXT: v_xor3_b32 v0, v0, v8, v4
+; GFX10-NEXT: v_xor3_b32 v2, v2, v10, v6
+; GFX10-NEXT: v_xor3_b32 v1, v1, v5, v3
+; GFX10-NEXT: v_xor3_b32 v0, v0, v2, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_xor_v16i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_xor_b32_e32 v7, v7, v15
+; GFX11-NEXT: v_xor_b32_e32 v1, v1, v9
+; GFX11-NEXT: v_xor_b32_e32 v4, v4, v12
+; GFX11-NEXT: v_xor_b32_e32 v5, v5, v13
+; GFX11-NEXT: v_xor_b32_e32 v6, v6, v14
+; GFX11-NEXT: v_xor3_b32 v3, v3, v11, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_xor3_b32 v0, v0, v8, v4
+; GFX11-NEXT: v_xor3_b32 v2, v2, v10, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_xor3_b32 v1, v1, v5, v3
+; GFX11-NEXT: v_xor3_b32 v0, v0, v2, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_xor_v16i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_xor_b32_e32 v7, v7, v15
+; GFX12-NEXT: v_xor_b32_e32 v1, v1, v9
+; GFX12-NEXT: v_xor_b32_e32 v4, v4, v12
+; GFX12-NEXT: v_xor_b32_e32 v5, v5, v13
+; GFX12-NEXT: v_xor_b32_e32 v6, v6, v14
+; GFX12-NEXT: v_xor3_b32 v3, v3, v11, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_xor3_b32 v0, v0, v8, v4
+; GFX12-NEXT: v_xor3_b32 v2, v2, v10, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_xor3_b32 v1, v1, v5, v3
+; GFX12-NEXT: v_xor3_b32 v0, v0, v2, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i32 @llvm.vector.reduce.xor.v16i32(<16 x i32> %v)
+ ret i32 %res
+}
+
+define i64 @test_vector_reduce_xor_v2i64(<2 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_xor_v2i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX7-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_xor_v2i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX8-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_xor_v2i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX9-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_xor_v2i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX10-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_xor_v2i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX11-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_xor_v2i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX12-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.xor.v2i64(<2 x i64> %v)
+ ret i64 %res
+}
+
+define i64 @test_vector_reduce_xor_v3i64(<3 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_xor_v3i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX7-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v4
+; GFX7-NEXT: v_xor_b32_e32 v1, v1, v5
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_xor_v3i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX8-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX8-NEXT: v_xor_b32_e32 v0, v0, v4
+; GFX8-NEXT: v_xor_b32_e32 v1, v1, v5
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_xor_v3i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX9-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX9-NEXT: v_xor_b32_e32 v0, v0, v4
+; GFX9-NEXT: v_xor_b32_e32 v1, v1, v5
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_xor_v3i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_xor3_b32 v0, v0, v2, v4
+; GFX10-NEXT: v_xor3_b32 v1, v1, v3, v5
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_xor_v3i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_xor3_b32 v0, v0, v2, v4
+; GFX11-NEXT: v_xor3_b32 v1, v1, v3, v5
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_xor_v3i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_xor3_b32 v0, v0, v2, v4
+; GFX12-NEXT: v_xor3_b32 v1, v1, v3, v5
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.xor.v3i64(<3 x i64> %v)
+ ret i64 %res
+}
+
+define i64 @test_vector_reduce_xor_v4i64(<4 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_xor_v4i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v4
+; GFX7-NEXT: v_xor_b32_e32 v1, v1, v5
+; GFX7-NEXT: v_xor_b32_e32 v2, v2, v6
+; GFX7-NEXT: v_xor_b32_e32 v3, v3, v7
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX7-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_xor_v4i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_xor_b32_e32 v0, v0, v4
+; GFX8-NEXT: v_xor_b32_e32 v1, v1, v5
+; GFX8-NEXT: v_xor_b32_e32 v2, v2, v6
+; GFX8-NEXT: v_xor_b32_e32 v3, v3, v7
+; GFX8-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX8-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_xor_v4i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_xor_b32_e32 v0, v0, v4
+; GFX9-NEXT: v_xor_b32_e32 v1, v1, v5
+; GFX9-NEXT: v_xor_b32_e32 v2, v2, v6
+; GFX9-NEXT: v_xor_b32_e32 v3, v3, v7
+; GFX9-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX9-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_xor_v4i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_xor_b32_e32 v2, v2, v6
+; GFX10-NEXT: v_xor_b32_e32 v3, v3, v7
+; GFX10-NEXT: v_xor3_b32 v0, v0, v4, v2
+; GFX10-NEXT: v_xor3_b32 v1, v1, v5, v3
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_xor_v4i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_xor_b32_e32 v2, v2, v6
+; GFX11-NEXT: v_xor_b32_e32 v3, v3, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_xor3_b32 v0, v0, v4, v2
+; GFX11-NEXT: v_xor3_b32 v1, v1, v5, v3
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_xor_v4i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_xor_b32_e32 v2, v2, v6
+; GFX12-NEXT: v_xor_b32_e32 v3, v3, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_xor3_b32 v0, v0, v4, v2
+; GFX12-NEXT: v_xor3_b32 v1, v1, v5, v3
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.xor.v4i64(<4 x i64> %v)
+ ret i64 %res
+}
+
+define i64 @test_vector_reduce_xor_v8i64(<8 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_xor_v8i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v8
+; GFX7-NEXT: v_xor_b32_e32 v1, v1, v9
+; GFX7-NEXT: v_xor_b32_e32 v2, v2, v10
+; GFX7-NEXT: v_xor_b32_e32 v3, v3, v11
+; GFX7-NEXT: v_xor_b32_e32 v4, v4, v12
+; GFX7-NEXT: v_xor_b32_e32 v5, v5, v13
+; GFX7-NEXT: v_xor_b32_e32 v6, v6, v14
+; GFX7-NEXT: v_xor_b32_e32 v7, v7, v15
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v4
+; GFX7-NEXT: v_xor_b32_e32 v1, v1, v5
+; GFX7-NEXT: v_xor_b32_e32 v2, v2, v6
+; GFX7-NEXT: v_xor_b32_e32 v3, v3, v7
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX7-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_xor_v8i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_xor_b32_e32 v0, v0, v8
+; GFX8-NEXT: v_xor_b32_e32 v1, v1, v9
+; GFX8-NEXT: v_xor_b32_e32 v2, v2, v10
+; GFX8-NEXT: v_xor_b32_e32 v3, v3, v11
+; GFX8-NEXT: v_xor_b32_e32 v4, v4, v12
+; GFX8-NEXT: v_xor_b32_e32 v5, v5, v13
+; GFX8-NEXT: v_xor_b32_e32 v6, v6, v14
+; GFX8-NEXT: v_xor_b32_e32 v7, v7, v15
+; GFX8-NEXT: v_xor_b32_e32 v0, v0, v4
+; GFX8-NEXT: v_xor_b32_e32 v1, v1, v5
+; GFX8-NEXT: v_xor_b32_e32 v2, v2, v6
+; GFX8-NEXT: v_xor_b32_e32 v3, v3, v7
+; GFX8-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX8-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_xor_v8i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_xor_b32_e32 v0, v0, v8
+; GFX9-NEXT: v_xor_b32_e32 v1, v1, v9
+; GFX9-NEXT: v_xor_b32_e32 v2, v2, v10
+; GFX9-NEXT: v_xor_b32_e32 v3, v3, v11
+; GFX9-NEXT: v_xor_b32_e32 v4, v4, v12
+; GFX9-NEXT: v_xor_b32_e32 v5, v5, v13
+; GFX9-NEXT: v_xor_b32_e32 v6, v6, v14
+; GFX9-NEXT: v_xor_b32_e32 v7, v7, v15
+; GFX9-NEXT: v_xor_b32_e32 v0, v0, v4
+; GFX9-NEXT: v_xor_b32_e32 v1, v1, v5
+; GFX9-NEXT: v_xor_b32_e32 v2, v2, v6
+; GFX9-NEXT: v_xor_b32_e32 v3, v3, v7
+; GFX9-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX9-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_xor_v8i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_xor_b32_e32 v6, v6, v14
+; GFX10-NEXT: v_xor_b32_e32 v7, v7, v15
+; GFX10-NEXT: v_xor_b32_e32 v0, v0, v8
+; GFX10-NEXT: v_xor_b32_e32 v1, v1, v9
+; GFX10-NEXT: v_xor_b32_e32 v4, v4, v12
+; GFX10-NEXT: v_xor_b32_e32 v5, v5, v13
+; GFX10-NEXT: v_xor3_b32 v2, v2, v10, v6
+; GFX10-NEXT: v_xor3_b32 v3, v3, v11, v7
+; GFX10-NEXT: v_xor3_b32 v0, v0, v4, v2
+; GFX10-NEXT: v_xor3_b32 v1, v1, v5, v3
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_xor_v8i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_xor_b32_e32 v6, v6, v14
+; GFX11-NEXT: v_xor_b32_e32 v7, v7, v15
+; GFX11-NEXT: v_xor_b32_e32 v0, v0, v8
+; GFX11-NEXT: v_xor_b32_e32 v1, v1, v9
+; GFX11-NEXT: v_xor_b32_e32 v4, v4, v12
+; GFX11-NEXT: v_xor_b32_e32 v5, v5, v13
+; GFX11-NEXT: v_xor3_b32 v2, v2, v10, v6
+; GFX11-NEXT: v_xor3_b32 v3, v3, v11, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_xor3_b32 v0, v0, v4, v2
+; GFX11-NEXT: v_xor3_b32 v1, v1, v5, v3
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_xor_v8i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_xor_b32_e32 v6, v6, v14
+; GFX12-NEXT: v_xor_b32_e32 v7, v7, v15
+; GFX12-NEXT: v_xor_b32_e32 v0, v0, v8
+; GFX12-NEXT: v_xor_b32_e32 v1, v1, v9
+; GFX12-NEXT: v_xor_b32_e32 v4, v4, v12
+; GFX12-NEXT: v_xor_b32_e32 v5, v5, v13
+; GFX12-NEXT: v_xor3_b32 v2, v2, v10, v6
+; GFX12-NEXT: v_xor3_b32 v3, v3, v11, v7
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_xor3_b32 v0, v0, v4, v2
+; GFX12-NEXT: v_xor3_b32 v1, v1, v5, v3
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.xor.v8i64(<8 x i64> %v)
+ ret i64 %res
+}
+
+define i64 @test_vector_reduce_xor_v16i64(<16 x i64> %v) {
+; GFX7-LABEL: test_vector_reduce_xor_v16i64:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v16
+; GFX7-NEXT: buffer_load_dword v16, off, s[0:3], s32
+; GFX7-NEXT: v_xor_b32_e32 v2, v2, v18
+; GFX7-NEXT: v_xor_b32_e32 v4, v4, v20
+; GFX7-NEXT: v_xor_b32_e32 v6, v6, v22
+; GFX7-NEXT: v_xor_b32_e32 v8, v8, v24
+; GFX7-NEXT: v_xor_b32_e32 v10, v10, v26
+; GFX7-NEXT: v_xor_b32_e32 v12, v12, v28
+; GFX7-NEXT: v_xor_b32_e32 v14, v14, v30
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v8
+; GFX7-NEXT: v_xor_b32_e32 v2, v2, v10
+; GFX7-NEXT: v_xor_b32_e32 v4, v4, v12
+; GFX7-NEXT: v_xor_b32_e32 v6, v6, v14
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v4
+; GFX7-NEXT: v_xor_b32_e32 v2, v2, v6
+; GFX7-NEXT: v_xor_b32_e32 v1, v1, v17
+; GFX7-NEXT: v_xor_b32_e32 v3, v3, v19
+; GFX7-NEXT: v_xor_b32_e32 v5, v5, v21
+; GFX7-NEXT: v_xor_b32_e32 v7, v7, v23
+; GFX7-NEXT: v_xor_b32_e32 v9, v9, v25
+; GFX7-NEXT: v_xor_b32_e32 v11, v11, v27
+; GFX7-NEXT: v_xor_b32_e32 v13, v13, v29
+; GFX7-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX7-NEXT: v_xor_b32_e32 v1, v1, v9
+; GFX7-NEXT: v_xor_b32_e32 v3, v3, v11
+; GFX7-NEXT: v_xor_b32_e32 v5, v5, v13
+; GFX7-NEXT: v_xor_b32_e32 v1, v1, v5
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_xor_b32_e32 v2, v15, v16
+; GFX7-NEXT: v_xor_b32_e32 v2, v7, v2
+; GFX7-NEXT: v_xor_b32_e32 v2, v3, v2
+; GFX7-NEXT: v_xor_b32_e32 v1, v1, v2
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: test_vector_reduce_xor_v16i64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_xor_b32_e32 v0, v0, v16
+; GFX8-NEXT: buffer_load_dword v16, off, s[0:3], s32
+; GFX8-NEXT: v_xor_b32_e32 v2, v2, v18
+; GFX8-NEXT: v_xor_b32_e32 v4, v4, v20
+; GFX8-NEXT: v_xor_b32_e32 v6, v6, v22
+; GFX8-NEXT: v_xor_b32_e32 v8, v8, v24
+; GFX8-NEXT: v_xor_b32_e32 v10, v10, v26
+; GFX8-NEXT: v_xor_b32_e32 v12, v12, v28
+; GFX8-NEXT: v_xor_b32_e32 v14, v14, v30
+; GFX8-NEXT: v_xor_b32_e32 v0, v0, v8
+; GFX8-NEXT: v_xor_b32_e32 v2, v2, v10
+; GFX8-NEXT: v_xor_b32_e32 v4, v4, v12
+; GFX8-NEXT: v_xor_b32_e32 v6, v6, v14
+; GFX8-NEXT: v_xor_b32_e32 v0, v0, v4
+; GFX8-NEXT: v_xor_b32_e32 v2, v2, v6
+; GFX8-NEXT: v_xor_b32_e32 v1, v1, v17
+; GFX8-NEXT: v_xor_b32_e32 v3, v3, v19
+; GFX8-NEXT: v_xor_b32_e32 v5, v5, v21
+; GFX8-NEXT: v_xor_b32_e32 v7, v7, v23
+; GFX8-NEXT: v_xor_b32_e32 v9, v9, v25
+; GFX8-NEXT: v_xor_b32_e32 v11, v11, v27
+; GFX8-NEXT: v_xor_b32_e32 v13, v13, v29
+; GFX8-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX8-NEXT: v_xor_b32_e32 v1, v1, v9
+; GFX8-NEXT: v_xor_b32_e32 v3, v3, v11
+; GFX8-NEXT: v_xor_b32_e32 v5, v5, v13
+; GFX8-NEXT: v_xor_b32_e32 v1, v1, v5
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_xor_b32_e32 v2, v15, v16
+; GFX8-NEXT: v_xor_b32_e32 v2, v7, v2
+; GFX8-NEXT: v_xor_b32_e32 v2, v3, v2
+; GFX8-NEXT: v_xor_b32_e32 v1, v1, v2
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: test_vector_reduce_xor_v16i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: scratch_load_dword v31, off, s32
+; GFX9-NEXT: v_xor_b32_e32 v0, v0, v16
+; GFX9-NEXT: v_xor_b32_e32 v2, v2, v18
+; GFX9-NEXT: v_xor_b32_e32 v4, v4, v20
+; GFX9-NEXT: v_xor_b32_e32 v6, v6, v22
+; GFX9-NEXT: v_xor_b32_e32 v8, v8, v24
+; GFX9-NEXT: v_xor_b32_e32 v10, v10, v26
+; GFX9-NEXT: v_xor_b32_e32 v12, v12, v28
+; GFX9-NEXT: v_xor_b32_e32 v14, v14, v30
+; GFX9-NEXT: v_xor_b32_e32 v0, v0, v8
+; GFX9-NEXT: v_xor_b32_e32 v2, v2, v10
+; GFX9-NEXT: v_xor_b32_e32 v4, v4, v12
+; GFX9-NEXT: v_xor_b32_e32 v6, v6, v14
+; GFX9-NEXT: v_xor_b32_e32 v0, v0, v4
+; GFX9-NEXT: v_xor_b32_e32 v2, v2, v6
+; GFX9-NEXT: v_xor_b32_e32 v1, v1, v17
+; GFX9-NEXT: v_xor_b32_e32 v3, v3, v19
+; GFX9-NEXT: v_xor_b32_e32 v5, v5, v21
+; GFX9-NEXT: v_xor_b32_e32 v7, v7, v23
+; GFX9-NEXT: v_xor_b32_e32 v9, v9, v25
+; GFX9-NEXT: v_xor_b32_e32 v11, v11, v27
+; GFX9-NEXT: v_xor_b32_e32 v13, v13, v29
+; GFX9-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX9-NEXT: v_xor_b32_e32 v1, v1, v9
+; GFX9-NEXT: v_xor_b32_e32 v3, v3, v11
+; GFX9-NEXT: v_xor_b32_e32 v5, v5, v13
+; GFX9-NEXT: v_xor_b32_e32 v1, v1, v5
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_xor_b32_e32 v2, v15, v31
+; GFX9-NEXT: v_xor_b32_e32 v2, v7, v2
+; GFX9-NEXT: v_xor_b32_e32 v2, v3, v2
+; GFX9-NEXT: v_xor_b32_e32 v1, v1, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: test_vector_reduce_xor_v16i64:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX10-NEXT: v_xor_b32_e32 v14, v14, v30
+; GFX10-NEXT: v_xor_b32_e32 v2, v2, v18
+; GFX10-NEXT: v_xor_b32_e32 v3, v3, v19
+; GFX10-NEXT: v_xor_b32_e32 v8, v8, v24
+; GFX10-NEXT: v_xor_b32_e32 v9, v9, v25
+; GFX10-NEXT: v_xor_b32_e32 v10, v10, v26
+; GFX10-NEXT: v_xor_b32_e32 v11, v11, v27
+; GFX10-NEXT: v_xor_b32_e32 v12, v12, v28
+; GFX10-NEXT: v_xor_b32_e32 v13, v13, v29
+; GFX10-NEXT: v_xor3_b32 v6, v6, v22, v14
+; GFX10-NEXT: v_xor3_b32 v0, v0, v16, v8
+; GFX10-NEXT: v_xor3_b32 v1, v1, v17, v9
+; GFX10-NEXT: v_xor3_b32 v4, v4, v20, v12
+; GFX10-NEXT: v_xor3_b32 v5, v5, v21, v13
+; GFX10-NEXT: v_xor3_b32 v2, v2, v10, v6
+; GFX10-NEXT: v_xor3_b32 v0, v0, v4, v2
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_xor_b32_e32 v15, v15, v31
+; GFX10-NEXT: v_xor3_b32 v7, v7, v23, v15
+; GFX10-NEXT: v_xor3_b32 v3, v3, v11, v7
+; GFX10-NEXT: v_xor3_b32 v1, v1, v5, v3
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_vector_reduce_xor_v16i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: scratch_load_b32 v31, off, s32
+; GFX11-NEXT: v_xor_b32_e32 v14, v14, v30
+; GFX11-NEXT: v_xor_b32_e32 v2, v2, v18
+; GFX11-NEXT: v_xor_b32_e32 v3, v3, v19
+; GFX11-NEXT: v_xor_b32_e32 v8, v8, v24
+; GFX11-NEXT: v_xor_b32_e32 v9, v9, v25
+; GFX11-NEXT: v_xor_b32_e32 v10, v10, v26
+; GFX11-NEXT: v_xor_b32_e32 v11, v11, v27
+; GFX11-NEXT: v_xor_b32_e32 v12, v12, v28
+; GFX11-NEXT: v_xor_b32_e32 v13, v13, v29
+; GFX11-NEXT: v_xor3_b32 v6, v6, v22, v14
+; GFX11-NEXT: v_xor3_b32 v0, v0, v16, v8
+; GFX11-NEXT: v_xor3_b32 v1, v1, v17, v9
+; GFX11-NEXT: v_xor3_b32 v4, v4, v20, v12
+; GFX11-NEXT: v_xor3_b32 v5, v5, v21, v13
+; GFX11-NEXT: v_xor3_b32 v2, v2, v10, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_xor3_b32 v0, v0, v4, v2
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_xor_b32_e32 v15, v15, v31
+; GFX11-NEXT: v_xor3_b32 v7, v7, v23, v15
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_xor3_b32 v3, v3, v11, v7
+; GFX11-NEXT: v_xor3_b32 v1, v1, v5, v3
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_vector_reduce_xor_v16i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: scratch_load_b32 v31, off, s32
+; GFX12-NEXT: v_xor_b32_e32 v14, v14, v30
+; GFX12-NEXT: v_xor_b32_e32 v2, v2, v18
+; GFX12-NEXT: v_xor_b32_e32 v3, v3, v19
+; GFX12-NEXT: v_xor_b32_e32 v8, v8, v24
+; GFX12-NEXT: v_xor_b32_e32 v9, v9, v25
+; GFX12-NEXT: v_xor_b32_e32 v10, v10, v26
+; GFX12-NEXT: v_xor_b32_e32 v11, v11, v27
+; GFX12-NEXT: v_xor_b32_e32 v12, v12, v28
+; GFX12-NEXT: v_xor_b32_e32 v13, v13, v29
+; GFX12-NEXT: v_xor3_b32 v6, v6, v22, v14
+; GFX12-NEXT: v_xor3_b32 v0, v0, v16, v8
+; GFX12-NEXT: v_xor3_b32 v1, v1, v17, v9
+; GFX12-NEXT: v_xor3_b32 v4, v4, v20, v12
+; GFX12-NEXT: v_xor3_b32 v5, v5, v21, v13
+; GFX12-NEXT: v_xor3_b32 v2, v2, v10, v6
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_xor3_b32 v0, v0, v4, v2
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_xor_b32_e32 v15, v15, v31
+; GFX12-NEXT: v_xor3_b32 v7, v7, v23, v15
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_xor3_b32 v3, v3, v11, v7
+; GFX12-NEXT: v_xor3_b32 v1, v1, v5, v3
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %res = call i64 @llvm.vector.reduce.xor.v16i64(<16 x i64> %v)
+ ret i64 %res
+}
+
+declare i8 @llvm.vector.reduce.xor.v2i8(<2 x i8>)
+declare i8 @llvm.vector.reduce.xor.v3i8(<3 x i8>)
+declare i8 @llvm.vector.reduce.xor.v4i8(<4 x i8>)
+declare i8 @llvm.vector.reduce.xor.v8i8(<8 x i8>)
+declare i8 @llvm.vector.reduce.xor.v16i8(<16 x i8>)
+
+declare i16 @llvm.vector.reduce.xor.v2i16(<2 x i16>)
+declare i16 @llvm.vector.reduce.xor.v3i16(<3 x i16>)
+declare i16 @llvm.vector.reduce.xor.v4i16(<4 x i16>)
+declare i16 @llvm.vector.reduce.xor.v8i16(<8 x i16>)
+declare i16 @llvm.vector.reduce.xor.v16i16(<16 x i16>)
+
+declare i32 @llvm.vector.reduce.xor.v2i32(<2 x i32>)
+declare i32 @llvm.vector.reduce.xor.v3i32(<3 x i32>)
+declare i32 @llvm.vector.reduce.xor.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.xor.v8i32(<8 x i32>)
+declare i32 @llvm.vector.reduce.xor.v16i32(<16 x i32>)
+
+declare i64 @llvm.vector.reduce.xor.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.xor.v3i64(<3 x i64>)
+declare i64 @llvm.vector.reduce.xor.v4i64(<4 x i64>)
+declare i64 @llvm.vector.reduce.xor.v8i64(<8 x i64>)
+declare i64 @llvm.vector.reduce.xor.v16i64(<16 x i64>)
More information about the llvm-commits
mailing list