[llvm] [SPIRV] Add vector reduction instructions (PR #82786)
Vyacheslav Levytskyy via llvm-commits
llvm-commits at lists.llvm.org
Fri Feb 23 12:30:47 PST 2024
https://github.com/VyacheslavLevytskyy updated https://github.com/llvm/llvm-project/pull/82786
>From df1312ce79e3b44bb76f628fb55a05ca522c7032 Mon Sep 17 00:00:00 2001
From: "Levytskyy, Vyacheslav" <vyacheslav.levytskyy at intel.com>
Date: Fri, 23 Feb 2024 08:20:24 -0800
Subject: [PATCH 1/3] add vector reduction instructions
---
llvm/lib/Target/SPIRV/CMakeLists.txt | 1 +
llvm/lib/Target/SPIRV/SPIRV.h | 2 +
.../Target/SPIRV/SPIRVInstructionSelector.cpp | 49 +++++-
llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp | 25 +++
llvm/lib/Target/SPIRV/SPIRVPostLegalizer.cpp | 146 ++++++++++++++++++
llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp | 98 ++++++------
llvm/lib/Target/SPIRV/SPIRVTargetMachine.cpp | 1 +
7 files changed, 269 insertions(+), 53 deletions(-)
create mode 100644 llvm/lib/Target/SPIRV/SPIRVPostLegalizer.cpp
diff --git a/llvm/lib/Target/SPIRV/CMakeLists.txt b/llvm/lib/Target/SPIRV/CMakeLists.txt
index d1ada45d17a5bc..afc26dda4c68bd 100644
--- a/llvm/lib/Target/SPIRV/CMakeLists.txt
+++ b/llvm/lib/Target/SPIRV/CMakeLists.txt
@@ -29,6 +29,7 @@ add_llvm_target(SPIRVCodeGen
SPIRVMetadata.cpp
SPIRVModuleAnalysis.cpp
SPIRVPreLegalizer.cpp
+ SPIRVPostLegalizer.cpp
SPIRVPrepareFunctions.cpp
SPIRVRegisterBankInfo.cpp
SPIRVRegisterInfo.cpp
diff --git a/llvm/lib/Target/SPIRV/SPIRV.h b/llvm/lib/Target/SPIRV/SPIRV.h
index 9460b0808cae89..6979107349d968 100644
--- a/llvm/lib/Target/SPIRV/SPIRV.h
+++ b/llvm/lib/Target/SPIRV/SPIRV.h
@@ -23,6 +23,7 @@ ModulePass *createSPIRVPrepareFunctionsPass(const SPIRVTargetMachine &TM);
FunctionPass *createSPIRVStripConvergenceIntrinsicsPass();
FunctionPass *createSPIRVRegularizerPass();
FunctionPass *createSPIRVPreLegalizerPass();
+FunctionPass *createSPIRVPostLegalizerPass();
FunctionPass *createSPIRVEmitIntrinsicsPass(SPIRVTargetMachine *TM);
InstructionSelector *
createSPIRVInstructionSelector(const SPIRVTargetMachine &TM,
@@ -32,6 +33,7 @@ createSPIRVInstructionSelector(const SPIRVTargetMachine &TM,
void initializeSPIRVModuleAnalysisPass(PassRegistry &);
void initializeSPIRVConvergenceRegionAnalysisWrapperPassPass(PassRegistry &);
void initializeSPIRVPreLegalizerPass(PassRegistry &);
+void initializeSPIRVPostLegalizerPass(PassRegistry &);
void initializeSPIRVEmitIntrinsicsPass(PassRegistry &);
} // namespace llvm
diff --git a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
index 7258d3b4d88ed3..6987d54e2b176d 100644
--- a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
@@ -183,6 +183,8 @@ class SPIRVInstructionSelector : public InstructionSelector {
bool selectLog10(Register ResVReg, const SPIRVType *ResType,
MachineInstr &I) const;
+ bool selectUnmergeValues(MachineInstr &I) const;
+
Register buildI32Constant(uint32_t Val, MachineInstr &I,
const SPIRVType *ResType = nullptr) const;
@@ -235,7 +237,7 @@ bool SPIRVInstructionSelector::select(MachineInstr &I) {
if (Opcode == SPIRV::ASSIGN_TYPE) { // These pseudos aren't needed any more.
auto *Def = MRI->getVRegDef(I.getOperand(1).getReg());
if (isTypeFoldingSupported(Def->getOpcode())) {
- auto Res = selectImpl(I, *CoverageInfo);
+ bool Res = selectImpl(I, *CoverageInfo);
assert(Res || Def->getOpcode() == TargetOpcode::G_CONSTANT);
if (Res)
return Res;
@@ -263,7 +265,8 @@ bool SPIRVInstructionSelector::select(MachineInstr &I) {
assert(!HasDefs || ResType || I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
if (spvSelect(ResVReg, ResType, I)) {
if (HasDefs) // Make all vregs 32 bits (for SPIR-V IDs).
- MRI->setType(ResVReg, LLT::scalar(32));
+ for (unsigned i = 0; i < I.getNumDefs(); ++i)
+ MRI->setType(I.getOperand(i).getReg(), LLT::scalar(32));
I.removeFromParent();
return true;
}
@@ -273,9 +276,9 @@ bool SPIRVInstructionSelector::select(MachineInstr &I) {
bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
const SPIRVType *ResType,
MachineInstr &I) const {
- assert(!isTypeFoldingSupported(I.getOpcode()) ||
- I.getOpcode() == TargetOpcode::G_CONSTANT);
const unsigned Opcode = I.getOpcode();
+ if (isTypeFoldingSupported(Opcode) && Opcode != TargetOpcode::G_CONSTANT)
+ return selectImpl(I, *CoverageInfo);
switch (Opcode) {
case TargetOpcode::G_CONSTANT:
return selectConst(ResVReg, ResType, I.getOperand(1).getCImm()->getValue(),
@@ -504,6 +507,9 @@ bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
case TargetOpcode::G_FENCE:
return selectFence(I);
+ case TargetOpcode::G_UNMERGE_VALUES:
+ return selectUnmergeValues(I);
+
default:
return false;
}
@@ -733,6 +739,41 @@ bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg,
return Result;
}
+bool SPIRVInstructionSelector::selectUnmergeValues(MachineInstr &I) const {
+ unsigned ArgI = I.getNumOperands() - 1;
+ Register SrcReg =
+ I.getOperand(ArgI).isReg() ? I.getOperand(ArgI).getReg() : Register(0);
+ SPIRVType *DefType =
+ SrcReg.isValid() ? GR.getSPIRVTypeForVReg(SrcReg) : nullptr;
+ if (!DefType || DefType->getOpcode() != SPIRV::OpTypeVector)
+ report_fatal_error(
+ "cannot select G_UNMERGE_VALUES with a non-vector argument");
+
+ SPIRVType *ScalarType =
+ GR.getSPIRVTypeForVReg(DefType->getOperand(1).getReg());
+ MachineBasicBlock &BB = *I.getParent();
+ bool Res = false;
+ for (unsigned i = 0; i < I.getNumDefs(); ++i) {
+ Register ResVReg = I.getOperand(i).getReg();
+ SPIRVType *ResType = GR.getSPIRVTypeForVReg(ResVReg);
+ if (!ResType) {
+ // There was no "assign type" actions, let's fix this now
+ ResType = ScalarType;
+ MRI->setRegClass(ResVReg, &SPIRV::IDRegClass);
+ MRI->setType(ResVReg, LLT::scalar(GR.getScalarOrVectorBitWidth(ResType)));
+ GR.assignSPIRVTypeToVReg(ResType, ResVReg, *GR.CurMF);
+ }
+ auto MIB =
+ BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
+ .addDef(ResVReg)
+ .addUse(GR.getSPIRVTypeID(ResType))
+ .addUse(SrcReg)
+ .addImm(static_cast<int64_t>(i));
+ Res |= MIB.constrainAllUses(TII, TRI, RBI);
+ }
+ return Res;
+}
+
bool SPIRVInstructionSelector::selectFence(MachineInstr &I) const {
AtomicOrdering AO = AtomicOrdering(I.getOperand(0).getImm());
uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
diff --git a/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp b/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp
index 4f2e7a240fc2cc..b6fd4fd2d8b800 100644
--- a/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp
@@ -113,6 +113,13 @@ SPIRVLegalizerInfo::SPIRVLegalizerInfo(const SPIRVSubtarget &ST) {
v3s32, v3s64, v4s1, v4s8, v4s16, v4s32, v4s64, v8s1, v8s8, v8s16,
v8s32, v8s64, v16s1, v16s8, v16s16, v16s32, v16s64};
+ auto allVectors = {
+ v2s1, v2s8, v2s16, v2s32, v2s64,
+ v3s1, v3s8, v3s16, v3s32, v3s64,
+ v4s1, v4s8, v4s16, v4s32, v4s64,
+ v8s1, v8s8, v8s16, v8s32, v8s64,
+ v16s1, v16s8, v16s16, v16s32, v16s64};
+
auto allScalarsAndVectors = {
s1, s8, s16, s32, s64, v2s1, v2s8, v2s16, v2s32, v2s64,
v3s1, v3s8, v3s16, v3s32, v3s64, v4s1, v4s8, v4s16, v4s32, v4s64,
@@ -146,6 +153,24 @@ SPIRVLegalizerInfo::SPIRVLegalizerInfo(const SPIRVSubtarget &ST) {
// TODO: add proper rules for vectors legalization.
getActionDefinitionsBuilder({G_BUILD_VECTOR, G_SHUFFLE_VECTOR}).alwaysLegal();
+ // Vector Reduction Operations
+ getActionDefinitionsBuilder(
+ {G_VECREDUCE_SMIN, G_VECREDUCE_SMAX, G_VECREDUCE_UMIN, G_VECREDUCE_UMAX,
+ G_VECREDUCE_ADD, G_VECREDUCE_MUL, G_VECREDUCE_FMUL, G_VECREDUCE_FMIN,
+ G_VECREDUCE_FMAX, G_VECREDUCE_FMINIMUM, G_VECREDUCE_FMAXIMUM,
+ G_VECREDUCE_OR, G_VECREDUCE_AND, G_VECREDUCE_XOR})
+ .legalFor(allVectors)
+ .scalarize(1)
+ .lower();
+
+ getActionDefinitionsBuilder({G_VECREDUCE_SEQ_FADD, G_VECREDUCE_SEQ_FMUL})
+ .scalarize(2)
+ .lower();
+
+ // Merge/Unmerge
+ // TODO: add proper legalization rules.
+ getActionDefinitionsBuilder(G_UNMERGE_VALUES).alwaysLegal();
+
getActionDefinitionsBuilder({G_MEMCPY, G_MEMMOVE})
.legalIf(all(typeInSet(0, allWritablePtrs), typeInSet(1, allPtrs)));
diff --git a/llvm/lib/Target/SPIRV/SPIRVPostLegalizer.cpp b/llvm/lib/Target/SPIRV/SPIRVPostLegalizer.cpp
new file mode 100644
index 00000000000000..186dc3441327f0
--- /dev/null
+++ b/llvm/lib/Target/SPIRV/SPIRVPostLegalizer.cpp
@@ -0,0 +1,146 @@
+//===-- SPIRVPostLegalizer.cpp - ammend info after legalization -*- C++ -*-===//
+//
+// which may appear after the legalizer pass
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// The pass partially apply pre-legalization logic to new instructions inserted
+// as a result of legalization:
+// - assigns SPIR-V types to registers for new instructions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SPIRV.h"
+#include "SPIRVSubtarget.h"
+#include "SPIRVUtils.h"
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/Analysis/OptimizationRemarkEmitter.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/IR/IntrinsicsSPIRV.h"
+#include "llvm/Target/TargetIntrinsicInfo.h"
+
+#define DEBUG_TYPE "spirv-postlegalizer"
+
+using namespace llvm;
+
+namespace {
+class SPIRVPostLegalizer : public MachineFunctionPass {
+public:
+ static char ID;
+ SPIRVPostLegalizer() : MachineFunctionPass(ID) {
+ initializeSPIRVPostLegalizerPass(*PassRegistry::getPassRegistry());
+ }
+ bool runOnMachineFunction(MachineFunction &MF) override;
+};
+} // namespace
+
+// Defined in SPIRVLegalizerInfo.cpp.
+extern bool isTypeFoldingSupported(unsigned Opcode);
+
+namespace llvm {
+// Defined in SPIRVPreLegalizer.cpp.
+extern Register insertAssignInstr(Register Reg, Type *Ty, SPIRVType *SpirvTy,
+ SPIRVGlobalRegistry *GR,
+ MachineIRBuilder &MIB,
+ MachineRegisterInfo &MRI);
+extern void processInstr(MachineInstr &MI, MachineIRBuilder &MIB,
+ MachineRegisterInfo &MRI, SPIRVGlobalRegistry *GR);
+} // namespace llvm
+
+static bool isMetaInstrGET(unsigned Opcode) {
+ return Opcode == SPIRV::GET_ID || Opcode == SPIRV::GET_fID ||
+ Opcode == SPIRV::GET_pID || Opcode == SPIRV::GET_vID ||
+ Opcode == SPIRV::GET_vfID;
+}
+
+static void processNewInstrs(MachineFunction &MF, SPIRVGlobalRegistry *GR,
+ MachineIRBuilder MIB) {
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+
+ for (MachineBasicBlock &MBB : MF) {
+ for (MachineInstr &I : MBB) {
+ const unsigned Opcode = I.getOpcode();
+ if (Opcode == TargetOpcode::G_UNMERGE_VALUES) {
+ unsigned ArgI = I.getNumOperands() - 1;
+ Register SrcReg = I.getOperand(ArgI).isReg()
+ ? I.getOperand(ArgI).getReg()
+ : Register(0);
+ SPIRVType *DefType =
+ SrcReg.isValid() ? GR->getSPIRVTypeForVReg(SrcReg) : nullptr;
+ if (!DefType || DefType->getOpcode() != SPIRV::OpTypeVector)
+ report_fatal_error(
+ "cannot select G_UNMERGE_VALUES with a non-vector argument");
+ SPIRVType *ScalarType =
+ GR->getSPIRVTypeForVReg(DefType->getOperand(1).getReg());
+ for (unsigned i = 0; i < I.getNumDefs(); ++i) {
+ Register ResVReg = I.getOperand(i).getReg();
+ SPIRVType *ResType = GR->getSPIRVTypeForVReg(ResVReg);
+ if (!ResType) {
+ // There was no "assign type" actions, let's fix this now
+ ResType = ScalarType;
+ MRI.setRegClass(ResVReg, &SPIRV::IDRegClass);
+ MRI.setType(ResVReg,
+ LLT::scalar(GR->getScalarOrVectorBitWidth(ResType)));
+ GR->assignSPIRVTypeToVReg(ResType, ResVReg, *GR->CurMF);
+ }
+ }
+ } else if (isTypeFoldingSupported(Opcode) && I.getNumDefs() == 1 &&
+ I.getNumOperands() > 1 && I.getOperand(1).isReg()) {
+ Register ResVReg = I.getOperand(0).getReg();
+ SPIRVType *ResVType = GR->getSPIRVTypeForVReg(ResVReg);
+ // Check if the register defined by the instruction is newly generated
+ // or already processed
+ if (!ResVType) {
+ // Set type of the defined register
+ ResVType = GR->getSPIRVTypeForVReg(I.getOperand(1).getReg());
+ // Check if we have type defined for operands of the new instruction
+ if (!ResVType)
+ continue;
+ // Set type & class
+ MRI.setRegClass(ResVReg, &SPIRV::IDRegClass);
+ MRI.setType(ResVReg,
+ LLT::scalar(GR->getScalarOrVectorBitWidth(ResVType)));
+ GR->assignSPIRVTypeToVReg(ResVType, ResVReg, *GR->CurMF);
+ }
+ // Check if the instruction newly generated or already processed
+ MachineInstr *NextMI = I.getNextNode();
+ if (NextMI && isMetaInstrGET(NextMI->getOpcode()))
+ continue;
+ // Restore usual instructions pattern for the newly inserted instruction
+ MRI.setRegClass(ResVReg, MRI.getType(ResVReg).isVector()
+ ? &SPIRV::IDRegClass
+ : &SPIRV::ANYIDRegClass);
+ MRI.setType(ResVReg, LLT::scalar(32));
+ insertAssignInstr(ResVReg, nullptr, ResVType, GR, MIB, MRI);
+ processInstr(I, MIB, MRI, GR);
+ }
+ }
+ }
+}
+
+bool SPIRVPostLegalizer::runOnMachineFunction(MachineFunction &MF) {
+ // Initialize the type registry.
+ const SPIRVSubtarget &ST = MF.getSubtarget<SPIRVSubtarget>();
+ SPIRVGlobalRegistry *GR = ST.getSPIRVGlobalRegistry();
+ GR->setCurrentFunc(MF);
+ MachineIRBuilder MIB(MF);
+
+ processNewInstrs(MF, GR, MIB);
+
+ return true;
+}
+
+INITIALIZE_PASS(SPIRVPostLegalizer, DEBUG_TYPE, "SPIRV post legalizer", false,
+ false)
+
+char SPIRVPostLegalizer::ID = 0;
+
+FunctionPass *llvm::createSPIRVPostLegalizerPass() {
+ return new SPIRVPostLegalizer();
+}
diff --git a/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp b/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp
index 144216896eb68c..1e92e5ce264f04 100644
--- a/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp
@@ -212,6 +212,34 @@ static SPIRVType *propagateSPIRVType(MachineInstr *MI, SPIRVGlobalRegistry *GR,
return SpirvTy;
}
+static std::pair<Register, unsigned>
+createNewIdReg(Register ValReg, unsigned Opcode, MachineRegisterInfo &MRI,
+ const SPIRVGlobalRegistry &GR) {
+ LLT NewT = LLT::scalar(32);
+ SPIRVType *SpvType = GR.getSPIRVTypeForVReg(ValReg);
+ assert(SpvType && "VReg is expected to have SPIRV type");
+ bool IsFloat = SpvType->getOpcode() == SPIRV::OpTypeFloat;
+ bool IsVectorFloat =
+ SpvType->getOpcode() == SPIRV::OpTypeVector &&
+ GR.getSPIRVTypeForVReg(SpvType->getOperand(1).getReg())->getOpcode() ==
+ SPIRV::OpTypeFloat;
+ IsFloat |= IsVectorFloat;
+ auto GetIdOp = IsFloat ? SPIRV::GET_fID : SPIRV::GET_ID;
+ auto DstClass = IsFloat ? &SPIRV::fIDRegClass : &SPIRV::IDRegClass;
+ if (MRI.getType(ValReg).isPointer()) {
+ NewT = LLT::pointer(0, 32);
+ GetIdOp = SPIRV::GET_pID;
+ DstClass = &SPIRV::pIDRegClass;
+ } else if (MRI.getType(ValReg).isVector()) {
+ NewT = LLT::fixed_vector(2, NewT);
+ GetIdOp = IsFloat ? SPIRV::GET_vfID : SPIRV::GET_vID;
+ DstClass = IsFloat ? &SPIRV::vfIDRegClass : &SPIRV::vIDRegClass;
+ }
+ Register IdReg = MRI.createGenericVirtualRegister(NewT);
+ MRI.setRegClass(IdReg, DstClass);
+ return {IdReg, GetIdOp};
+}
+
// Insert ASSIGN_TYPE instuction between Reg and its definition, set NewReg as
// a dst of the definition, assign SPIRVType to both registers. If SpirvTy is
// provided, use it as SPIRVType in ASSIGN_TYPE, otherwise create it from Ty.
@@ -249,6 +277,27 @@ Register insertAssignInstr(Register Reg, Type *Ty, SPIRVType *SpirvTy,
Def->getOperand(0).setReg(NewReg);
return NewReg;
}
+
+void processInstr(MachineInstr &MI, MachineIRBuilder &MIB,
+ MachineRegisterInfo &MRI, SPIRVGlobalRegistry *GR) {
+ unsigned Opc = MI.getOpcode();
+ assert(MI.getNumDefs() > 0 && MRI.hasOneUse(MI.getOperand(0).getReg()));
+ MachineInstr &AssignTypeInst =
+ *(MRI.use_instr_begin(MI.getOperand(0).getReg()));
+ auto NewReg = createNewIdReg(MI.getOperand(0).getReg(), Opc, MRI, *GR).first;
+ AssignTypeInst.getOperand(1).setReg(NewReg);
+ MI.getOperand(0).setReg(NewReg);
+ MIB.setInsertPt(*MI.getParent(),
+ (MI.getNextNode() ? MI.getNextNode()->getIterator()
+ : MI.getParent()->end()));
+ for (auto &Op : MI.operands()) {
+ if (!Op.isReg() || Op.isDef())
+ continue;
+ auto IdOpInfo = createNewIdReg(Op.getReg(), Opc, MRI, *GR);
+ MIB.buildInstr(IdOpInfo.second).addDef(IdOpInfo.first).addUse(Op.getReg());
+ Op.setReg(IdOpInfo.first);
+ }
+}
} // namespace llvm
static void generateAssignInstrs(MachineFunction &MF, SPIRVGlobalRegistry *GR,
@@ -345,55 +394,6 @@ static void generateAssignInstrs(MachineFunction &MF, SPIRVGlobalRegistry *GR,
MI->eraseFromParent();
}
-static std::pair<Register, unsigned>
-createNewIdReg(Register ValReg, unsigned Opcode, MachineRegisterInfo &MRI,
- const SPIRVGlobalRegistry &GR) {
- LLT NewT = LLT::scalar(32);
- SPIRVType *SpvType = GR.getSPIRVTypeForVReg(ValReg);
- assert(SpvType && "VReg is expected to have SPIRV type");
- bool IsFloat = SpvType->getOpcode() == SPIRV::OpTypeFloat;
- bool IsVectorFloat =
- SpvType->getOpcode() == SPIRV::OpTypeVector &&
- GR.getSPIRVTypeForVReg(SpvType->getOperand(1).getReg())->getOpcode() ==
- SPIRV::OpTypeFloat;
- IsFloat |= IsVectorFloat;
- auto GetIdOp = IsFloat ? SPIRV::GET_fID : SPIRV::GET_ID;
- auto DstClass = IsFloat ? &SPIRV::fIDRegClass : &SPIRV::IDRegClass;
- if (MRI.getType(ValReg).isPointer()) {
- NewT = LLT::pointer(0, 32);
- GetIdOp = SPIRV::GET_pID;
- DstClass = &SPIRV::pIDRegClass;
- } else if (MRI.getType(ValReg).isVector()) {
- NewT = LLT::fixed_vector(2, NewT);
- GetIdOp = IsFloat ? SPIRV::GET_vfID : SPIRV::GET_vID;
- DstClass = IsFloat ? &SPIRV::vfIDRegClass : &SPIRV::vIDRegClass;
- }
- Register IdReg = MRI.createGenericVirtualRegister(NewT);
- MRI.setRegClass(IdReg, DstClass);
- return {IdReg, GetIdOp};
-}
-
-static void processInstr(MachineInstr &MI, MachineIRBuilder &MIB,
- MachineRegisterInfo &MRI, SPIRVGlobalRegistry *GR) {
- unsigned Opc = MI.getOpcode();
- assert(MI.getNumDefs() > 0 && MRI.hasOneUse(MI.getOperand(0).getReg()));
- MachineInstr &AssignTypeInst =
- *(MRI.use_instr_begin(MI.getOperand(0).getReg()));
- auto NewReg = createNewIdReg(MI.getOperand(0).getReg(), Opc, MRI, *GR).first;
- AssignTypeInst.getOperand(1).setReg(NewReg);
- MI.getOperand(0).setReg(NewReg);
- MIB.setInsertPt(*MI.getParent(),
- (MI.getNextNode() ? MI.getNextNode()->getIterator()
- : MI.getParent()->end()));
- for (auto &Op : MI.operands()) {
- if (!Op.isReg() || Op.isDef())
- continue;
- auto IdOpInfo = createNewIdReg(Op.getReg(), Opc, MRI, *GR);
- MIB.buildInstr(IdOpInfo.second).addDef(IdOpInfo.first).addUse(Op.getReg());
- Op.setReg(IdOpInfo.first);
- }
-}
-
// Defined in SPIRVLegalizerInfo.cpp.
extern bool isTypeFoldingSupported(unsigned Opcode);
diff --git a/llvm/lib/Target/SPIRV/SPIRVTargetMachine.cpp b/llvm/lib/Target/SPIRV/SPIRVTargetMachine.cpp
index e1b7bdd3140dbe..fbf64f2b1dfb13 100644
--- a/llvm/lib/Target/SPIRV/SPIRVTargetMachine.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVTargetMachine.cpp
@@ -189,6 +189,7 @@ void SPIRVPassConfig::addPreLegalizeMachineIR() {
// Use the default legalizer.
bool SPIRVPassConfig::addLegalizeMachineIR() {
addPass(new Legalizer());
+ addPass(createSPIRVPostLegalizerPass());
return false;
}
>From dcdbff13eb5b25801d716f3f978ec337dc11cd36 Mon Sep 17 00:00:00 2001
From: "Levytskyy, Vyacheslav" <vyacheslav.levytskyy at intel.com>
Date: Fri, 23 Feb 2024 09:53:19 -0800
Subject: [PATCH 2/3] more supported instructions
---
llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp | 10 ++--
llvm/lib/Target/SPIRV/SPIRVPostLegalizer.cpp | 48 +++++++++++++++-----
2 files changed, 40 insertions(+), 18 deletions(-)
diff --git a/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp b/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp
index b6fd4fd2d8b800..c3f75463dfd23e 100644
--- a/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp
@@ -113,12 +113,10 @@ SPIRVLegalizerInfo::SPIRVLegalizerInfo(const SPIRVSubtarget &ST) {
v3s32, v3s64, v4s1, v4s8, v4s16, v4s32, v4s64, v8s1, v8s8, v8s16,
v8s32, v8s64, v16s1, v16s8, v16s16, v16s32, v16s64};
- auto allVectors = {
- v2s1, v2s8, v2s16, v2s32, v2s64,
- v3s1, v3s8, v3s16, v3s32, v3s64,
- v4s1, v4s8, v4s16, v4s32, v4s64,
- v8s1, v8s8, v8s16, v8s32, v8s64,
- v16s1, v16s8, v16s16, v16s32, v16s64};
+ auto allVectors = {v2s1, v2s8, v2s16, v2s32, v2s64, v3s1, v3s8,
+ v3s16, v3s32, v3s64, v4s1, v4s8, v4s16, v4s32,
+ v4s64, v8s1, v8s8, v8s16, v8s32, v8s64, v16s1,
+ v16s8, v16s16, v16s32, v16s64};
auto allScalarsAndVectors = {
s1, s8, s16, s32, s64, v2s1, v2s8, v2s16, v2s32, v2s64,
diff --git a/llvm/lib/Target/SPIRV/SPIRVPostLegalizer.cpp b/llvm/lib/Target/SPIRV/SPIRVPostLegalizer.cpp
index 186dc3441327f0..da24c779ffe066 100644
--- a/llvm/lib/Target/SPIRV/SPIRVPostLegalizer.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVPostLegalizer.cpp
@@ -59,6 +59,22 @@ static bool isMetaInstrGET(unsigned Opcode) {
Opcode == SPIRV::GET_vfID;
}
+static bool mayBeInserted(unsigned Opcode) {
+ switch (Opcode) {
+ case TargetOpcode::G_SMAX:
+ case TargetOpcode::G_UMAX:
+ case TargetOpcode::G_SMIN:
+ case TargetOpcode::G_UMIN:
+ case TargetOpcode::G_FMINNUM:
+ case TargetOpcode::G_FMINIMUM:
+ case TargetOpcode::G_FMAXNUM:
+ case TargetOpcode::G_FMAXIMUM:
+ return true;
+ default:
+ return isTypeFoldingSupported(Opcode);
+ }
+}
+
static void processNewInstrs(MachineFunction &MF, SPIRVGlobalRegistry *GR,
MachineIRBuilder MIB) {
MachineRegisterInfo &MRI = MF.getRegInfo();
@@ -90,8 +106,11 @@ static void processNewInstrs(MachineFunction &MF, SPIRVGlobalRegistry *GR,
GR->assignSPIRVTypeToVReg(ResType, ResVReg, *GR->CurMF);
}
}
- } else if (isTypeFoldingSupported(Opcode) && I.getNumDefs() == 1 &&
+ } else if (mayBeInserted(Opcode) && I.getNumDefs() == 1 &&
I.getNumOperands() > 1 && I.getOperand(1).isReg()) {
+ // Legalizer may have added a new instructions and introduced new
+ // registers, we must decorate them as if they were introduced in a
+ // non-automatic way
Register ResVReg = I.getOperand(0).getReg();
SPIRVType *ResVType = GR->getSPIRVTypeForVReg(ResVReg);
// Check if the register defined by the instruction is newly generated
@@ -108,17 +127,22 @@ static void processNewInstrs(MachineFunction &MF, SPIRVGlobalRegistry *GR,
LLT::scalar(GR->getScalarOrVectorBitWidth(ResVType)));
GR->assignSPIRVTypeToVReg(ResVType, ResVReg, *GR->CurMF);
}
- // Check if the instruction newly generated or already processed
- MachineInstr *NextMI = I.getNextNode();
- if (NextMI && isMetaInstrGET(NextMI->getOpcode()))
- continue;
- // Restore usual instructions pattern for the newly inserted instruction
- MRI.setRegClass(ResVReg, MRI.getType(ResVReg).isVector()
- ? &SPIRV::IDRegClass
- : &SPIRV::ANYIDRegClass);
- MRI.setType(ResVReg, LLT::scalar(32));
- insertAssignInstr(ResVReg, nullptr, ResVType, GR, MIB, MRI);
- processInstr(I, MIB, MRI, GR);
+ // If this is a simple operation that is to be reduced by TableGen
+ // definition we must apply some of pre-legalizer rules here
+ if (isTypeFoldingSupported(Opcode)) {
+ // Check if the instruction newly generated or already processed
+ MachineInstr *NextMI = I.getNextNode();
+ if (NextMI && isMetaInstrGET(NextMI->getOpcode()))
+ continue;
+ // Restore usual instructions pattern for the newly inserted
+ // instruction
+ MRI.setRegClass(ResVReg, MRI.getType(ResVReg).isVector()
+ ? &SPIRV::IDRegClass
+ : &SPIRV::ANYIDRegClass);
+ MRI.setType(ResVReg, LLT::scalar(32));
+ insertAssignInstr(ResVReg, nullptr, ResVType, GR, MIB, MRI);
+ processInstr(I, MIB, MRI, GR);
+ }
}
}
}
>From 5c57b3f8d9e4cd231b9e51a8e808e5392b179593 Mon Sep 17 00:00:00 2001
From: "Levytskyy, Vyacheslav" <vyacheslav.levytskyy at intel.com>
Date: Fri, 23 Feb 2024 12:30:36 -0800
Subject: [PATCH 3/3] add tests
---
.../llvm-intrinsics/llvm-vector-reduce/add.ll | 233 ++++++++++++++++++
.../llvm-intrinsics/llvm-vector-reduce/and.ll | 233 ++++++++++++++++++
.../llvm-vector-reduce/fadd.ll | 189 ++++++++++++++
.../llvm-vector-reduce/fmax.ll | 177 +++++++++++++
.../llvm-vector-reduce/fmaximum.ll | 177 +++++++++++++
.../llvm-vector-reduce/fmin.ll | 176 +++++++++++++
.../llvm-vector-reduce/fminimum.ll | 177 +++++++++++++
.../llvm-vector-reduce/fmul.ll | 189 ++++++++++++++
.../llvm-intrinsics/llvm-vector-reduce/mul.ll | 232 +++++++++++++++++
.../llvm-intrinsics/llvm-vector-reduce/or.ll | 233 ++++++++++++++++++
.../llvm-vector-reduce/smax.ll | 233 ++++++++++++++++++
.../llvm-vector-reduce/smin.ll | 233 ++++++++++++++++++
.../llvm-vector-reduce/umax.ll | 233 ++++++++++++++++++
.../llvm-vector-reduce/umin.ll | 233 ++++++++++++++++++
.../llvm-intrinsics/llvm-vector-reduce/xor.ll | 233 ++++++++++++++++++
15 files changed, 3181 insertions(+)
create mode 100644 llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/add.ll
create mode 100644 llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/and.ll
create mode 100644 llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fadd.ll
create mode 100644 llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmax.ll
create mode 100644 llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmaximum.ll
create mode 100644 llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmin.ll
create mode 100644 llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fminimum.ll
create mode 100644 llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmul.ll
create mode 100644 llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/mul.ll
create mode 100644 llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/or.ll
create mode 100644 llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/smax.ll
create mode 100644 llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/smin.ll
create mode 100644 llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/umax.ll
create mode 100644 llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/umin.ll
create mode 100644 llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/xor.ll
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/add.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/add.ll
new file mode 100644
index 00000000000000..4887e6e529d23e
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/add.ll
@@ -0,0 +1,233 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+target triple = "spir64-unknown-unknown"
+
+; CHECK-DAG: %[[Char:.*]] = OpTypeInt 8 0
+; CHECK-DAG: %[[CharVec2:.*]] = OpTypeVector %[[Char]] 2
+; CHECK-DAG: %[[CharVec3:.*]] = OpTypeVector %[[Char]] 3
+
+; CHECK-DAG: %[[Short:.*]] = OpTypeInt 16 0
+; CHECK-DAG: %[[ShortVec2:.*]] = OpTypeVector %[[Short]] 2
+; CHECK-DAG: %[[ShortVec3:.*]] = OpTypeVector %[[Short]] 3
+
+; CHECK-DAG: %[[Int:.*]] = OpTypeInt 32 0
+; CHECK-DAG: %[[IntVec2:.*]] = OpTypeVector %[[Int]] 2
+; CHECK-DAG: %[[IntVec3:.*]] = OpTypeVector %[[Int]] 3
+
+; CHECK-DAG: %[[Long:.*]] = OpTypeInt 64 0
+; CHECK-DAG: %[[LongVec2:.*]] = OpTypeVector %[[Long]] 2
+; CHECK-DAG: %[[LongVec3:.*]] = OpTypeVector %[[Long]] 3
+
+; CHECK: OpFunction
+; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[CharVec2]] %[[#]] %[[#]] 1 -1
+; CHECK: %[[Added1:.*]] = OpIAdd %[[CharVec2]] %[[#]] %[[#]]
+; CHECK: %[[Vec2CharR:.*]] = OpCompositeExtract %[[Char]] %[[Added1]] 0
+; CHECK: OpReturnValue %[[Vec2CharR]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Char:.*]] = OpFunctionParameter %[[CharVec3]]
+; CHECK: %[[Vec3CharItem0:.*]] = OpCompositeExtract %[[Char]] %[[ParamVec3Char]] 0
+; CHECK: %[[Vec3CharItem1:.*]] = OpCompositeExtract %[[Char]] %[[ParamVec3Char]] 1
+; CHECK: %[[Vec3CharItem2:.*]] = OpCompositeExtract %[[Char]] %[[ParamVec3Char]] 2
+; CHECK: %[[Vec3CharR1:.*]] = OpIAdd %[[Char]] %[[Vec3CharItem0]] %[[Vec3CharItem1]]
+; CHECK: %[[Vec3CharR2:.*]] = OpIAdd %[[Char]] %[[Vec3CharR1]] %[[Vec3CharItem2]]
+; CHECK: OpReturnValue %[[Vec3CharR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[ShortVec2]] %[[#]] %[[#]] 1 -1
+; CHECK: %[[Added1:.*]] = OpIAdd %[[ShortVec2]] %[[#]] %[[#]]
+; CHECK: %[[Vec2ShortR:.*]] = OpCompositeExtract %[[Short]] %[[Added1]] 0
+; CHECK: OpReturnValue %[[Vec2ShortR]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Short:.*]] = OpFunctionParameter %[[ShortVec3]]
+; CHECK: %[[Vec3ShortItem0:.*]] = OpCompositeExtract %[[Short]] %[[ParamVec3Short]] 0
+; CHECK: %[[Vec3ShortItem1:.*]] = OpCompositeExtract %[[Short]] %[[ParamVec3Short]] 1
+; CHECK: %[[Vec3ShortItem2:.*]] = OpCompositeExtract %[[Short]] %[[ParamVec3Short]] 2
+; CHECK: %[[Vec3ShortR1:.*]] = OpIAdd %[[Short]] %[[Vec3ShortItem0]] %[[Vec3ShortItem1]]
+; CHECK: %[[Vec3ShortR2:.*]] = OpIAdd %[[Short]] %[[Vec3ShortR1]] %[[Vec3ShortItem2]]
+; CHECK: OpReturnValue %[[Vec3ShortR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[IntVec2]] %[[#]] %[[#]] 1 -1
+; CHECK: %[[Added1:.*]] = OpIAdd %[[IntVec2]] %[[#]] %[[#]]
+; CHECK: %[[Vec2IntR:.*]] = OpCompositeExtract %[[Int]] %[[Added1]] 0
+; CHECK: OpReturnValue %[[Vec2IntR]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Int:.*]] = OpFunctionParameter %[[IntVec3]]
+; CHECK: %[[Vec3IntItem0:.*]] = OpCompositeExtract %[[Int]] %[[ParamVec3Int]] 0
+; CHECK: %[[Vec3IntItem1:.*]] = OpCompositeExtract %[[Int]] %[[ParamVec3Int]] 1
+; CHECK: %[[Vec3IntItem2:.*]] = OpCompositeExtract %[[Int]] %[[ParamVec3Int]] 2
+; CHECK: %[[Vec3IntR1:.*]] = OpIAdd %[[Int]] %[[Vec3IntItem0]] %[[Vec3IntItem1]]
+; CHECK: %[[Vec3IntR2:.*]] = OpIAdd %[[Int]] %[[Vec3IntR1]] %[[Vec3IntItem2]]
+; CHECK: OpReturnValue %[[Vec3IntR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[LongVec2]] %[[#]] %[[#]] 1 -1
+; CHECK: %[[Added1:.*]] = OpIAdd %[[LongVec2]] %[[#]] %[[#]]
+; CHECK: %[[Vec2LongR:.*]] = OpCompositeExtract %[[Long]] %[[Added1]] 0
+; CHECK: OpReturnValue %[[Vec2LongR]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Long:.*]] = OpFunctionParameter %[[LongVec3]]
+; CHECK: %[[Vec3LongItem0:.*]] = OpCompositeExtract %[[Long]] %[[ParamVec3Long]] 0
+; CHECK: %[[Vec3LongItem1:.*]] = OpCompositeExtract %[[Long]] %[[ParamVec3Long]] 1
+; CHECK: %[[Vec3LongItem2:.*]] = OpCompositeExtract %[[Long]] %[[ParamVec3Long]] 2
+; CHECK: %[[Vec3LongR1:.*]] = OpIAdd %[[Long]] %[[Vec3LongItem0]] %[[Vec3LongItem1]]
+; CHECK: %[[Vec3LongR2:.*]] = OpIAdd %[[Long]] %[[Vec3LongR1]] %[[Vec3LongItem2]]
+; CHECK: OpReturnValue %[[Vec3LongR2]]
+; CHECK: OpFunctionEnd
+
+define spir_func i8 @test_vector_reduce_add_v2i8(<2 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.add.v2i8(<2 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i8 @test_vector_reduce_add_v3i8(<3 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.add.v3i8(<3 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i8 @test_vector_reduce_add_v4i8(<4 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.add.v4i8(<4 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i8 @test_vector_reduce_add_v8i8(<8 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i8 @test_vector_reduce_add_v16i8(<16 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i16 @test_vector_reduce_add_v2i16(<2 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.add.v2i16(<2 x i16> %v)
+ ret i16 %0
+}
+
+define spir_func i16 @test_vector_reduce_add_v3i16(<3 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.add.v3i16(<3 x i16> %v)
+ ret i16 %0
+}
+
+define spir_func i16 @test_vector_reduce_add_v4i16(<4 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %v)
+ ret i16 %0
+}
+
+define spir_func i16 @test_vector_reduce_add_v8i16(<8 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %v)
+ ret i16 %0
+}
+
+define spir_func i16 @test_vector_reduce_add_v16i16(<16 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %v)
+ ret i16 %0
+}
+
+
+define spir_func i32 @test_vector_reduce_add_v2i32(<2 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i32 @test_vector_reduce_add_v3i32(<3 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.add.v3i32(<3 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i32 @test_vector_reduce_add_v4i32(<4 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i32 @test_vector_reduce_add_v8i32(<8 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i32 @test_vector_reduce_add_v16i32(<16 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i64 @test_vector_reduce_add_v2i64(<2 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %v)
+ ret i64 %0
+}
+
+define spir_func i64 @test_vector_reduce_add_v3i64(<3 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.add.v3i64(<3 x i64> %v)
+ ret i64 %0
+}
+
+define spir_func i64 @test_vector_reduce_add_v4i64(<4 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %v)
+ ret i64 %0
+}
+
+define spir_func i64 @test_vector_reduce_add_v8i64(<8 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %v)
+ ret i64 %0
+}
+
+define spir_func i64 @test_vector_reduce_add_v16i64(<16 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> %v)
+ ret i64 %0
+}
+
+declare i8 @llvm.vector.reduce.add.v2i8(<2 x i8>)
+declare i8 @llvm.vector.reduce.add.v3i8(<3 x i8>)
+declare i8 @llvm.vector.reduce.add.v4i8(<4 x i8>)
+declare i8 @llvm.vector.reduce.add.v8i8(<8 x i8>)
+declare i8 @llvm.vector.reduce.add.v16i8(<16 x i8>)
+
+declare i16 @llvm.vector.reduce.add.v2i16(<2 x i16>)
+declare i16 @llvm.vector.reduce.add.v3i16(<3 x i16>)
+declare i16 @llvm.vector.reduce.add.v4i16(<4 x i16>)
+declare i16 @llvm.vector.reduce.add.v8i16(<8 x i16>)
+declare i16 @llvm.vector.reduce.add.v16i16(<16 x i16>)
+
+declare i32 @llvm.vector.reduce.add.v2i32(<2 x i32>)
+declare i32 @llvm.vector.reduce.add.v3i32(<3 x i32>)
+declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>)
+declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>)
+
+declare i64 @llvm.vector.reduce.add.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.add.v3i64(<3 x i64>)
+declare i64 @llvm.vector.reduce.add.v4i64(<4 x i64>)
+declare i64 @llvm.vector.reduce.add.v8i64(<8 x i64>)
+declare i64 @llvm.vector.reduce.add.v16i64(<16 x i64>)
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/and.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/and.ll
new file mode 100644
index 00000000000000..36cc95843df916
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/and.ll
@@ -0,0 +1,233 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+target triple = "spir64-unknown-unknown"
+
+; CHECK-DAG: %[[Char:.*]] = OpTypeInt 8 0
+; CHECK-DAG: %[[CharVec2:.*]] = OpTypeVector %[[Char]] 2
+; CHECK-DAG: %[[CharVec3:.*]] = OpTypeVector %[[Char]] 3
+
+; CHECK-DAG: %[[Short:.*]] = OpTypeInt 16 0
+; CHECK-DAG: %[[ShortVec2:.*]] = OpTypeVector %[[Short]] 2
+; CHECK-DAG: %[[ShortVec3:.*]] = OpTypeVector %[[Short]] 3
+
+; CHECK-DAG: %[[Int:.*]] = OpTypeInt 32 0
+; CHECK-DAG: %[[IntVec2:.*]] = OpTypeVector %[[Int]] 2
+; CHECK-DAG: %[[IntVec3:.*]] = OpTypeVector %[[Int]] 3
+
+; CHECK-DAG: %[[Long:.*]] = OpTypeInt 64 0
+; CHECK-DAG: %[[LongVec2:.*]] = OpTypeVector %[[Long]] 2
+; CHECK-DAG: %[[LongVec3:.*]] = OpTypeVector %[[Long]] 3
+
+; CHECK: OpFunction
+; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[CharVec2]] %[[#]] %[[#]] 1 -1
+; CHECK: %[[Added1:.*]] = OpBitwiseAnd %[[CharVec2]] %[[#]] %[[#]]
+; CHECK: %[[Vec2CharR:.*]] = OpCompositeExtract %[[Char]] %[[Added1]] 0
+; CHECK: OpReturnValue %[[Vec2CharR]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Char:.*]] = OpFunctionParameter %[[CharVec3]]
+; CHECK: %[[Vec3CharItem0:.*]] = OpCompositeExtract %[[Char]] %[[ParamVec3Char]] 0
+; CHECK: %[[Vec3CharItem1:.*]] = OpCompositeExtract %[[Char]] %[[ParamVec3Char]] 1
+; CHECK: %[[Vec3CharItem2:.*]] = OpCompositeExtract %[[Char]] %[[ParamVec3Char]] 2
+; CHECK: %[[Vec3CharR1:.*]] = OpBitwiseAnd %[[Char]] %[[Vec3CharItem0]] %[[Vec3CharItem1]]
+; CHECK: %[[Vec3CharR2:.*]] = OpBitwiseAnd %[[Char]] %[[Vec3CharR1]] %[[Vec3CharItem2]]
+; CHECK: OpReturnValue %[[Vec3CharR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[ShortVec2]] %[[#]] %[[#]] 1 -1
+; CHECK: %[[Added1:.*]] = OpBitwiseAnd %[[ShortVec2]] %[[#]] %[[#]]
+; CHECK: %[[Vec2ShortR:.*]] = OpCompositeExtract %[[Short]] %[[Added1]] 0
+; CHECK: OpReturnValue %[[Vec2ShortR]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Short:.*]] = OpFunctionParameter %[[ShortVec3]]
+; CHECK: %[[Vec3ShortItem0:.*]] = OpCompositeExtract %[[Short]] %[[ParamVec3Short]] 0
+; CHECK: %[[Vec3ShortItem1:.*]] = OpCompositeExtract %[[Short]] %[[ParamVec3Short]] 1
+; CHECK: %[[Vec3ShortItem2:.*]] = OpCompositeExtract %[[Short]] %[[ParamVec3Short]] 2
+; CHECK: %[[Vec3ShortR1:.*]] = OpBitwiseAnd %[[Short]] %[[Vec3ShortItem0]] %[[Vec3ShortItem1]]
+; CHECK: %[[Vec3ShortR2:.*]] = OpBitwiseAnd %[[Short]] %[[Vec3ShortR1]] %[[Vec3ShortItem2]]
+; CHECK: OpReturnValue %[[Vec3ShortR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[IntVec2]] %[[#]] %[[#]] 1 -1
+; CHECK: %[[Added1:.*]] = OpBitwiseAnd %[[IntVec2]] %[[#]] %[[#]]
+; CHECK: %[[Vec2IntR:.*]] = OpCompositeExtract %[[Int]] %[[Added1]] 0
+; CHECK: OpReturnValue %[[Vec2IntR]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Int:.*]] = OpFunctionParameter %[[IntVec3]]
+; CHECK: %[[Vec3IntItem0:.*]] = OpCompositeExtract %[[Int]] %[[ParamVec3Int]] 0
+; CHECK: %[[Vec3IntItem1:.*]] = OpCompositeExtract %[[Int]] %[[ParamVec3Int]] 1
+; CHECK: %[[Vec3IntItem2:.*]] = OpCompositeExtract %[[Int]] %[[ParamVec3Int]] 2
+; CHECK: %[[Vec3IntR1:.*]] = OpBitwiseAnd %[[Int]] %[[Vec3IntItem0]] %[[Vec3IntItem1]]
+; CHECK: %[[Vec3IntR2:.*]] = OpBitwiseAnd %[[Int]] %[[Vec3IntR1]] %[[Vec3IntItem2]]
+; CHECK: OpReturnValue %[[Vec3IntR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[LongVec2]] %[[#]] %[[#]] 1 -1
+; CHECK: %[[Added1:.*]] = OpBitwiseAnd %[[LongVec2]] %[[#]] %[[#]]
+; CHECK: %[[Vec2LongR:.*]] = OpCompositeExtract %[[Long]] %[[Added1]] 0
+; CHECK: OpReturnValue %[[Vec2LongR]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Long:.*]] = OpFunctionParameter %[[LongVec3]]
+; CHECK: %[[Vec3LongItem0:.*]] = OpCompositeExtract %[[Long]] %[[ParamVec3Long]] 0
+; CHECK: %[[Vec3LongItem1:.*]] = OpCompositeExtract %[[Long]] %[[ParamVec3Long]] 1
+; CHECK: %[[Vec3LongItem2:.*]] = OpCompositeExtract %[[Long]] %[[ParamVec3Long]] 2
+; CHECK: %[[Vec3LongR1:.*]] = OpBitwiseAnd %[[Long]] %[[Vec3LongItem0]] %[[Vec3LongItem1]]
+; CHECK: %[[Vec3LongR2:.*]] = OpBitwiseAnd %[[Long]] %[[Vec3LongR1]] %[[Vec3LongItem2]]
+; CHECK: OpReturnValue %[[Vec3LongR2]]
+; CHECK: OpFunctionEnd
+
+define spir_func i8 @test_vector_reduce_and_v2i8(<2 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.and.v2i8(<2 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i8 @test_vector_reduce_and_v3i8(<3 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.and.v3i8(<3 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i8 @test_vector_reduce_and_v4i8(<4 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.and.v4i8(<4 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i8 @test_vector_reduce_and_v8i8(<8 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.and.v8i8(<8 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i8 @test_vector_reduce_and_v16i8(<16 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.and.v16i8(<16 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i16 @test_vector_reduce_and_v2i16(<2 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.and.v2i16(<2 x i16> %v)
+ ret i16 %0
+}
+
+define spir_func i16 @test_vector_reduce_and_v3i16(<3 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.and.v3i16(<3 x i16> %v)
+ ret i16 %0
+}
+
+define spir_func i16 @test_vector_reduce_and_v4i16(<4 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.and.v4i16(<4 x i16> %v)
+ ret i16 %0
+}
+
+define spir_func i16 @test_vector_reduce_and_v8i16(<8 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.and.v8i16(<8 x i16> %v)
+ ret i16 %0
+}
+
+define spir_func i16 @test_vector_reduce_and_v16i16(<16 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.and.v16i16(<16 x i16> %v)
+ ret i16 %0
+}
+
+
+define spir_func i32 @test_vector_reduce_and_v2i32(<2 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.and.v2i32(<2 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i32 @test_vector_reduce_and_v3i32(<3 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.and.v3i32(<3 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i32 @test_vector_reduce_and_v4i32(<4 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i32 @test_vector_reduce_and_v8i32(<8 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i32 @test_vector_reduce_and_v16i32(<16 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.and.v16i32(<16 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i64 @test_vector_reduce_and_v2i64(<2 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.and.v2i64(<2 x i64> %v)
+ ret i64 %0
+}
+
+define spir_func i64 @test_vector_reduce_and_v3i64(<3 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.and.v3i64(<3 x i64> %v)
+ ret i64 %0
+}
+
+define spir_func i64 @test_vector_reduce_and_v4i64(<4 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.and.v4i64(<4 x i64> %v)
+ ret i64 %0
+}
+
+define spir_func i64 @test_vector_reduce_and_v8i64(<8 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.and.v8i64(<8 x i64> %v)
+ ret i64 %0
+}
+
+define spir_func i64 @test_vector_reduce_and_v16i64(<16 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.and.v16i64(<16 x i64> %v)
+ ret i64 %0
+}
+
+declare i8 @llvm.vector.reduce.and.v2i8(<2 x i8>)
+declare i8 @llvm.vector.reduce.and.v3i8(<3 x i8>)
+declare i8 @llvm.vector.reduce.and.v4i8(<4 x i8>)
+declare i8 @llvm.vector.reduce.and.v8i8(<8 x i8>)
+declare i8 @llvm.vector.reduce.and.v16i8(<16 x i8>)
+
+declare i16 @llvm.vector.reduce.and.v2i16(<2 x i16>)
+declare i16 @llvm.vector.reduce.and.v3i16(<3 x i16>)
+declare i16 @llvm.vector.reduce.and.v4i16(<4 x i16>)
+declare i16 @llvm.vector.reduce.and.v8i16(<8 x i16>)
+declare i16 @llvm.vector.reduce.and.v16i16(<16 x i16>)
+
+declare i32 @llvm.vector.reduce.and.v2i32(<2 x i32>)
+declare i32 @llvm.vector.reduce.and.v3i32(<3 x i32>)
+declare i32 @llvm.vector.reduce.and.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.and.v8i32(<8 x i32>)
+declare i32 @llvm.vector.reduce.and.v16i32(<16 x i32>)
+
+declare i64 @llvm.vector.reduce.and.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.and.v3i64(<3 x i64>)
+declare i64 @llvm.vector.reduce.and.v4i64(<4 x i64>)
+declare i64 @llvm.vector.reduce.and.v8i64(<8 x i64>)
+declare i64 @llvm.vector.reduce.and.v16i64(<16 x i64>)
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fadd.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fadd.ll
new file mode 100644
index 00000000000000..c2ceb56aba6fe3
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fadd.ll
@@ -0,0 +1,189 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+target triple = "spir64-unknown-unknown"
+
+; CHECK-DAG: %[[Half:.*]] = OpTypeFloat 16
+; CHECK-DAG: %[[HalfVec2:.*]] = OpTypeVector %[[Half]] 2
+; CHECK-DAG: %[[HalfVec3:.*]] = OpTypeVector %[[Half]] 3
+
+; CHECK-DAG: %[[Float:.*]] = OpTypeFloat 32
+; CHECK-DAG: %[[FloatVec2:.*]] = OpTypeVector %[[Float]] 2
+; CHECK-DAG: %[[FloatVec3:.*]] = OpTypeVector %[[Float]] 3
+
+; CHECK-DAG: %[[Double:.*]] = OpTypeFloat 64
+; CHECK-DAG: %[[DoubleVec2:.*]] = OpTypeVector %[[Double]] 2
+; CHECK-DAG: %[[DoubleVec3:.*]] = OpTypeVector %[[Double]] 3
+
+; CHECK: OpFunction
+; CHECK: %[[Param2Half:.*]] = OpFunctionParameter %[[Half]]
+; CHECK: %[[ParamVec2Half:.*]] = OpFunctionParameter %[[HalfVec2]]
+; CHECK: %[[Vec2HalfItem0:.*]] = OpCompositeExtract %[[Half]] %[[ParamVec2Half]] 0
+; CHECK: %[[Vec2HalfR1:.*]] = OpFAdd %[[Half]] %[[Param2Half]] %[[Vec2HalfItem0]]
+; CHECK: %[[Vec2HalfItem1:.*]] = OpCompositeExtract %[[Half]] %[[ParamVec2Half]] 1
+; CHECK: %[[Vec2HalfR2:.*]] = OpFAdd %[[Half]] %[[Vec2HalfR1]] %[[Vec2HalfItem1]]
+; CHECK: OpReturnValue %[[Vec2HalfR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[Param2Half:.*]] = OpFunctionParameter %[[Half]]
+; CHECK: %[[ParamVec3Half:.*]] = OpFunctionParameter %[[HalfVec3]]
+; CHECK: %[[Vec3HalfItem0:.*]] = OpCompositeExtract %[[Half]] %[[ParamVec3Half]] 0
+; CHECK: %[[Vec3HalfR1:.*]] = OpFAdd %[[Half]] %[[Param2Half]] %[[Vec3HalfItem0]]
+; CHECK: %[[Vec3HalfItem1:.*]] = OpCompositeExtract %[[Half]] %[[ParamVec3Half]] 1
+; CHECK: %[[Vec3HalfR2:.*]] = OpFAdd %[[Half]] %[[Vec3HalfR1]] %[[Vec3HalfItem1]]
+; CHECK: %[[Vec3HalfItem2:.*]] = OpCompositeExtract %[[Half]] %[[ParamVec3Half]] 2
+; CHECK: %[[Vec3HalfR3:.*]] = OpFAdd %[[Half]] %[[Vec3HalfR2]] %[[Vec3HalfItem2]]
+; CHECK: OpReturnValue %[[Vec3HalfR3]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[Param2Float:.*]] = OpFunctionParameter %[[Float]]
+; CHECK: %[[ParamVec2Float:.*]] = OpFunctionParameter %[[FloatVec2]]
+; CHECK: %[[Vec2FloatItem0:.*]] = OpCompositeExtract %[[Float]] %[[ParamVec2Float]] 0
+; CHECK: %[[Vec2FloatR1:.*]] = OpFAdd %[[Float]] %[[Param2Float]] %[[Vec2FloatItem0]]
+; CHECK: %[[Vec2FloatItem1:.*]] = OpCompositeExtract %[[Float]] %[[ParamVec2Float]] 1
+; CHECK: %[[Vec2FloatR2:.*]] = OpFAdd %[[Float]] %[[Vec2FloatR1]] %[[Vec2FloatItem1]]
+; CHECK: OpReturnValue %[[Vec2FloatR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[Param2Float:.*]] = OpFunctionParameter %[[Float]]
+; CHECK: %[[ParamVec3Float:.*]] = OpFunctionParameter %[[FloatVec3]]
+; CHECK: %[[Vec3FloatItem0:.*]] = OpCompositeExtract %[[Float]] %[[ParamVec3Float]] 0
+; CHECK: %[[Vec3FloatR1:.*]] = OpFAdd %[[Float]] %[[Param2Float]] %[[Vec3FloatItem0]]
+; CHECK: %[[Vec3FloatItem1:.*]] = OpCompositeExtract %[[Float]] %[[ParamVec3Float]] 1
+; CHECK: %[[Vec3FloatR2:.*]] = OpFAdd %[[Float]] %[[Vec3FloatR1]] %[[Vec3FloatItem1]]
+; CHECK: %[[Vec3FloatItem2:.*]] = OpCompositeExtract %[[Float]] %[[ParamVec3Float]] 2
+; CHECK: %[[Vec3FloatR3:.*]] = OpFAdd %[[Float]] %[[Vec3FloatR2]] %[[Vec3FloatItem2]]
+; CHECK: OpReturnValue %[[Vec3FloatR3]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[Param2Double:.*]] = OpFunctionParameter %[[Double]]
+; CHECK: %[[ParamVec2Double:.*]] = OpFunctionParameter %[[DoubleVec2]]
+; CHECK: %[[Vec2DoubleItem0:.*]] = OpCompositeExtract %[[Double]] %[[ParamVec2Double]] 0
+; CHECK: %[[Vec2DoubleR1:.*]] = OpFAdd %[[Double]] %[[Param2Double]] %[[Vec2DoubleItem0]]
+; CHECK: %[[Vec2DoubleItem1:.*]] = OpCompositeExtract %[[Double]] %[[ParamVec2Double]] 1
+; CHECK: %[[Vec2DoubleR2:.*]] = OpFAdd %[[Double]] %[[Vec2DoubleR1]] %[[Vec2DoubleItem1]]
+; CHECK: OpReturnValue %[[Vec2DoubleR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[Param2Double:.*]] = OpFunctionParameter %[[Double]]
+; CHECK: %[[ParamVec3Double:.*]] = OpFunctionParameter %[[DoubleVec3]]
+; CHECK: %[[Vec3DoubleItem0:.*]] = OpCompositeExtract %[[Double]] %[[ParamVec3Double]] 0
+; CHECK: %[[Vec3DoubleR1:.*]] = OpFAdd %[[Double]] %[[Param2Double]] %[[Vec3DoubleItem0]]
+; CHECK: %[[Vec3DoubleItem1:.*]] = OpCompositeExtract %[[Double]] %[[ParamVec3Double]] 1
+; CHECK: %[[Vec3DoubleR2:.*]] = OpFAdd %[[Double]] %[[Vec3DoubleR1]] %[[Vec3DoubleItem1]]
+; CHECK: %[[Vec3DoubleItem2:.*]] = OpCompositeExtract %[[Double]] %[[ParamVec3Double]] 2
+; CHECK: %[[Vec3DoubleR3:.*]] = OpFAdd %[[Double]] %[[Vec3DoubleR2]] %[[Vec3DoubleItem2]]
+; CHECK: OpReturnValue %[[Vec3DoubleR3]]
+; CHECK: OpFunctionEnd
+
+define spir_func half @test_vector_reduce_fadd_v2half(half %sp, <2 x half> %v) {
+entry:
+ %0 = call half @llvm.vector.reduce.fadd.v2half(half %sp, <2 x half> %v)
+ ret half %0
+}
+
+define spir_func half @test_vector_reduce_fadd_v3half(half %sp, <3 x half> %v) {
+entry:
+ %0 = call half @llvm.vector.reduce.fadd.v3half(half %sp, <3 x half> %v)
+ ret half %0
+}
+
+define spir_func half @test_vector_reduce_fadd_v4half(half %sp, <4 x half> %v) {
+entry:
+ %0 = call half @llvm.vector.reduce.fadd.v4half(half %sp, <4 x half> %v)
+ ret half %0
+}
+
+define spir_func half @test_vector_reduce_fadd_v8half(half %sp, <8 x half> %v) {
+entry:
+ %0 = call half @llvm.vector.reduce.fadd.v8half(half %sp, <8 x half> %v)
+ ret half %0
+}
+
+define spir_func half @test_vector_reduce_fadd_v16half(half %sp, <16 x half> %v) {
+entry:
+ %0 = call half @llvm.vector.reduce.fadd.v16half(half %sp, <16 x half> %v)
+ ret half %0
+}
+
+define spir_func float @test_vector_reduce_fadd_v2float(float %sp, <2 x float> %v) {
+entry:
+ %0 = call float @llvm.vector.reduce.fadd.v2float(float %sp, <2 x float> %v)
+ ret float %0
+}
+
+define spir_func float @test_vector_reduce_fadd_v3float(float %sp, <3 x float> %v) {
+entry:
+ %0 = call float @llvm.vector.reduce.fadd.v3float(float %sp, <3 x float> %v)
+ ret float %0
+}
+
+define spir_func float @test_vector_reduce_fadd_v4float(float %sp, <4 x float> %v) {
+entry:
+ %0 = call float @llvm.vector.reduce.fadd.v4float(float %sp, <4 x float> %v)
+ ret float %0
+}
+
+define spir_func float @test_vector_reduce_fadd_v8float(float %sp, <8 x float> %v) {
+entry:
+ %0 = call float @llvm.vector.reduce.fadd.v8float(float %sp, <8 x float> %v)
+ ret float %0
+}
+
+define spir_func float @test_vector_reduce_fadd_v16float(float %sp, <16 x float> %v) {
+entry:
+ %0 = call float @llvm.vector.reduce.fadd.v16float(float %sp, <16 x float> %v)
+ ret float %0
+}
+
+
+define spir_func double @test_vector_reduce_fadd_v2double(double %sp, <2 x double> %v) {
+entry:
+ %0 = call double @llvm.vector.reduce.fadd.v2double(double %sp, <2 x double> %v)
+ ret double %0
+}
+
+define spir_func double @test_vector_reduce_fadd_v3double(double %sp, <3 x double> %v) {
+entry:
+ %0 = call double @llvm.vector.reduce.fadd.v3double(double %sp, <3 x double> %v)
+ ret double %0
+}
+
+define spir_func double @test_vector_reduce_fadd_v4double(double %sp, <4 x double> %v) {
+entry:
+ %0 = call double @llvm.vector.reduce.fadd.v4double(double %sp, <4 x double> %v)
+ ret double %0
+}
+
+define spir_func double @test_vector_reduce_fadd_v8double(double %sp, <8 x double> %v) {
+entry:
+ %0 = call double @llvm.vector.reduce.fadd.v8double(double %sp, <8 x double> %v)
+ ret double %0
+}
+
+define spir_func double @test_vector_reduce_fadd_v16double(double %sp, <16 x double> %v) {
+entry:
+ %0 = call double @llvm.vector.reduce.fadd.v16double(double %sp, <16 x double> %v)
+ ret double %0
+}
+
+declare half @llvm.vector.reduce.fadd.v2half(half, <2 x half>)
+declare half @llvm.vector.reduce.fadd.v3half(half, <3 x half>)
+declare half @llvm.vector.reduce.fadd.v4half(half, <4 x half>)
+declare half @llvm.vector.reduce.fadd.v8half(half, <8 x half>)
+declare half @llvm.vector.reduce.fadd.v16half(half, <16 x half>)
+declare float @llvm.vector.reduce.fadd.v2float(float, <2 x float>)
+declare float @llvm.vector.reduce.fadd.v3float(float, <3 x float>)
+declare float @llvm.vector.reduce.fadd.v4float(float, <4 x float>)
+declare float @llvm.vector.reduce.fadd.v8float(float, <8 x float>)
+declare float @llvm.vector.reduce.fadd.v16float(float, <16 x float>)
+declare double @llvm.vector.reduce.fadd.v2double(double, <2 x double>)
+declare double @llvm.vector.reduce.fadd.v3double(double, <3 x double>)
+declare double @llvm.vector.reduce.fadd.v4double(double, <4 x double>)
+declare double @llvm.vector.reduce.fadd.v8double(double, <8 x double>)
+declare double @llvm.vector.reduce.fadd.v16double(double, <16 x double>)
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmax.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmax.ll
new file mode 100644
index 00000000000000..543f224bbd054f
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmax.ll
@@ -0,0 +1,177 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+target triple = "spir64-unknown-unknown"
+
+; CHECK-DAG: %[[Half:.*]] = OpTypeFloat 16
+; CHECK-DAG: %[[HalfVec2:.*]] = OpTypeVector %[[Half]] 2
+; CHECK-DAG: %[[HalfVec3:.*]] = OpTypeVector %[[Half]] 3
+
+; CHECK-DAG: %[[Float:.*]] = OpTypeFloat 32
+; CHECK-DAG: %[[FloatVec2:.*]] = OpTypeVector %[[Float]] 2
+; CHECK-DAG: %[[FloatVec3:.*]] = OpTypeVector %[[Float]] 3
+
+; CHECK-DAG: %[[Double:.*]] = OpTypeFloat 64
+; CHECK-DAG: %[[DoubleVec2:.*]] = OpTypeVector %[[Double]] 2
+; CHECK-DAG: %[[DoubleVec3:.*]] = OpTypeVector %[[Double]] 3
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec2Half:.*]] = OpFunctionParameter %[[HalfVec2]]
+; CHECK: %[[Vec2HalfItem0:.*]] = OpCompositeExtract %[[Half]] %[[ParamVec2Half]] 0
+; CHECK: %[[Vec2HalfItem1:.*]] = OpCompositeExtract %[[Half]] %[[ParamVec2Half]] 1
+; CHECK: %[[Vec2HalfR1:.*]] = OpExtInst %[[Half]] %[[#]] fmax %[[Vec2HalfItem0]] %[[Vec2HalfItem1]]
+; CHECK: OpReturnValue %[[Vec2HalfR1]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Half:.*]] = OpFunctionParameter %[[HalfVec3]]
+; CHECK: %[[Vec3HalfItem0:.*]] = OpCompositeExtract %[[Half]] %[[ParamVec3Half]] 0
+; CHECK: %[[Vec3HalfItem1:.*]] = OpCompositeExtract %[[Half]] %[[ParamVec3Half]] 1
+; CHECK: %[[Vec3HalfItem2:.*]] = OpCompositeExtract %[[Half]] %[[ParamVec3Half]] 2
+; CHECK: %[[Vec3HalfR1:.*]] = OpExtInst %[[Half]] %[[#]] fmax %[[Vec3HalfItem0]] %[[Vec3HalfItem1]]
+; CHECK: %[[Vec3HalfR2:.*]] = OpExtInst %[[Half]] %[[#]] fmax %[[Vec3HalfR1]] %[[Vec3HalfItem2]]
+; CHECK: OpReturnValue %[[Vec3HalfR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec2Float:.*]] = OpFunctionParameter %[[FloatVec2]]
+; CHECK: %[[Vec2FloatItem0:.*]] = OpCompositeExtract %[[Float]] %[[ParamVec2Float]] 0
+; CHECK: %[[Vec2FloatItem1:.*]] = OpCompositeExtract %[[Float]] %[[ParamVec2Float]] 1
+; CHECK: %[[Vec2FloatR1:.*]] = OpExtInst %[[Float]] %[[#]] fmax %[[Vec2FloatItem0]] %[[Vec2FloatItem1]]
+; CHECK: OpReturnValue %[[Vec2FloatR1]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Float:.*]] = OpFunctionParameter %[[FloatVec3]]
+; CHECK: %[[Vec3FloatItem0:.*]] = OpCompositeExtract %[[Float]] %[[ParamVec3Float]] 0
+; CHECK: %[[Vec3FloatItem1:.*]] = OpCompositeExtract %[[Float]] %[[ParamVec3Float]] 1
+; CHECK: %[[Vec3FloatItem2:.*]] = OpCompositeExtract %[[Float]] %[[ParamVec3Float]] 2
+; CHECK: %[[Vec3FloatR1:.*]] = OpExtInst %[[Float]] %[[#]] fmax %[[Vec3FloatItem0]] %[[Vec3FloatItem1]]
+; CHECK: %[[Vec3FloatR2:.*]] = OpExtInst %[[Float]] %[[#]] fmax %[[Vec3FloatR1]] %[[Vec3FloatItem2]]
+; CHECK: OpReturnValue %[[Vec3FloatR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec2Double:.*]] = OpFunctionParameter %[[DoubleVec2]]
+; CHECK: %[[Vec2DoubleItem0:.*]] = OpCompositeExtract %[[Double]] %[[ParamVec2Double]] 0
+; CHECK: %[[Vec2DoubleItem1:.*]] = OpCompositeExtract %[[Double]] %[[ParamVec2Double]] 1
+; CHECK: %[[Vec2DoubleR1:.*]] = OpExtInst %[[Double]] %[[#]] fmax %[[Vec2DoubleItem0]] %[[Vec2DoubleItem1]]
+; CHECK: OpReturnValue %[[Vec2DoubleR1]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Double:.*]] = OpFunctionParameter %[[DoubleVec3]]
+; CHECK: %[[Vec3DoubleItem0:.*]] = OpCompositeExtract %[[Double]] %[[ParamVec3Double]] 0
+; CHECK: %[[Vec3DoubleItem1:.*]] = OpCompositeExtract %[[Double]] %[[ParamVec3Double]] 1
+; CHECK: %[[Vec3DoubleItem2:.*]] = OpCompositeExtract %[[Double]] %[[ParamVec3Double]] 2
+; CHECK: %[[Vec3DoubleR1:.*]] = OpExtInst %[[Double]] %[[#]] fmax %[[Vec3DoubleItem0]] %[[Vec3DoubleItem1]]
+; CHECK: %[[Vec3DoubleR2:.*]] = OpExtInst %[[Double]] %[[#]] fmax %[[Vec3DoubleR1]] %[[Vec3DoubleItem2]]
+; CHECK: OpReturnValue %[[Vec3DoubleR2]]
+; CHECK: OpFunctionEnd
+
+define spir_func half @test_vector_reduce_fmax_v2half(<2 x half> %v) {
+entry:
+ %0 = call half @llvm.vector.reduce.fmax.v2half(<2 x half> %v)
+ ret half %0
+}
+
+define spir_func half @test_vector_reduce_fmax_v3half(<3 x half> %v) {
+entry:
+ %0 = call half @llvm.vector.reduce.fmax.v3half(<3 x half> %v)
+ ret half %0
+}
+
+define spir_func half @test_vector_reduce_fmax_v4half(<4 x half> %v) {
+entry:
+ %0 = call half @llvm.vector.reduce.fmax.v4half(<4 x half> %v)
+ ret half %0
+}
+
+define spir_func half @test_vector_reduce_fmax_v8half(<8 x half> %v) {
+entry:
+ %0 = call half @llvm.vector.reduce.fmax.v8half(<8 x half> %v)
+ ret half %0
+}
+
+define spir_func half @test_vector_reduce_fmax_v16half(<16 x half> %v) {
+entry:
+ %0 = call half @llvm.vector.reduce.fmax.v16half(<16 x half> %v)
+ ret half %0
+}
+
+define spir_func float @test_vector_reduce_fmax_v2float(<2 x float> %v) {
+entry:
+ %0 = call float @llvm.vector.reduce.fmax.v2float(<2 x float> %v)
+ ret float %0
+}
+
+define spir_func float @test_vector_reduce_fmax_v3float(<3 x float> %v) {
+entry:
+ %0 = call float @llvm.vector.reduce.fmax.v3float(<3 x float> %v)
+ ret float %0
+}
+
+define spir_func float @test_vector_reduce_fmax_v4float(<4 x float> %v) {
+entry:
+ %0 = call float @llvm.vector.reduce.fmax.v4float(<4 x float> %v)
+ ret float %0
+}
+
+define spir_func float @test_vector_reduce_fmax_v8float(<8 x float> %v) {
+entry:
+ %0 = call float @llvm.vector.reduce.fmax.v8float(<8 x float> %v)
+ ret float %0
+}
+
+define spir_func float @test_vector_reduce_fmax_v16float(<16 x float> %v) {
+entry:
+ %0 = call float @llvm.vector.reduce.fmax.v16float(<16 x float> %v)
+ ret float %0
+}
+
+
+define spir_func double @test_vector_reduce_fmax_v2double(<2 x double> %v) {
+entry:
+ %0 = call double @llvm.vector.reduce.fmax.v2double(<2 x double> %v)
+ ret double %0
+}
+
+define spir_func double @test_vector_reduce_fmax_v3double(<3 x double> %v) {
+entry:
+ %0 = call double @llvm.vector.reduce.fmax.v3double(<3 x double> %v)
+ ret double %0
+}
+
+define spir_func double @test_vector_reduce_fmax_v4double(<4 x double> %v) {
+entry:
+ %0 = call double @llvm.vector.reduce.fmax.v4double(<4 x double> %v)
+ ret double %0
+}
+
+define spir_func double @test_vector_reduce_fmax_v8double(<8 x double> %v) {
+entry:
+ %0 = call double @llvm.vector.reduce.fmax.v8double(<8 x double> %v)
+ ret double %0
+}
+
+define spir_func double @test_vector_reduce_fmax_v16double(<16 x double> %v) {
+entry:
+ %0 = call double @llvm.vector.reduce.fmax.v16double(<16 x double> %v)
+ ret double %0
+}
+
+declare half @llvm.vector.reduce.fmax.v2half(<2 x half>)
+declare half @llvm.vector.reduce.fmax.v3half(<3 x half>)
+declare half @llvm.vector.reduce.fmax.v4half(<4 x half>)
+declare half @llvm.vector.reduce.fmax.v8half(<8 x half>)
+declare half @llvm.vector.reduce.fmax.v16half(<16 x half>)
+declare float @llvm.vector.reduce.fmax.v2float(<2 x float>)
+declare float @llvm.vector.reduce.fmax.v3float(<3 x float>)
+declare float @llvm.vector.reduce.fmax.v4float(<4 x float>)
+declare float @llvm.vector.reduce.fmax.v8float(<8 x float>)
+declare float @llvm.vector.reduce.fmax.v16float(<16 x float>)
+declare double @llvm.vector.reduce.fmax.v2double(<2 x double>)
+declare double @llvm.vector.reduce.fmax.v3double(<3 x double>)
+declare double @llvm.vector.reduce.fmax.v4double(<4 x double>)
+declare double @llvm.vector.reduce.fmax.v8double(<8 x double>)
+declare double @llvm.vector.reduce.fmax.v16double(<16 x double>)
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmaximum.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmaximum.ll
new file mode 100644
index 00000000000000..4e537212926d80
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmaximum.ll
@@ -0,0 +1,177 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+target triple = "spir64-unknown-unknown"
+
+; CHECK-DAG: %[[Half:.*]] = OpTypeFloat 16
+; CHECK-DAG: %[[HalfVec2:.*]] = OpTypeVector %[[Half]] 2
+; CHECK-DAG: %[[HalfVec3:.*]] = OpTypeVector %[[Half]] 3
+
+; CHECK-DAG: %[[Float:.*]] = OpTypeFloat 32
+; CHECK-DAG: %[[FloatVec2:.*]] = OpTypeVector %[[Float]] 2
+; CHECK-DAG: %[[FloatVec3:.*]] = OpTypeVector %[[Float]] 3
+
+; CHECK-DAG: %[[Double:.*]] = OpTypeFloat 64
+; CHECK-DAG: %[[DoubleVec2:.*]] = OpTypeVector %[[Double]] 2
+; CHECK-DAG: %[[DoubleVec3:.*]] = OpTypeVector %[[Double]] 3
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec2Half:.*]] = OpFunctionParameter %[[HalfVec2]]
+; CHECK: %[[Vec2HalfItem0:.*]] = OpCompositeExtract %[[Half]] %[[ParamVec2Half]] 0
+; CHECK: %[[Vec2HalfItem1:.*]] = OpCompositeExtract %[[Half]] %[[ParamVec2Half]] 1
+; CHECK: %[[Vec2HalfR1:.*]] = OpExtInst %[[Half]] %[[#]] fmax %[[Vec2HalfItem0]] %[[Vec2HalfItem1]]
+; CHECK: OpReturnValue %[[Vec2HalfR1]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Half:.*]] = OpFunctionParameter %[[HalfVec3]]
+; CHECK: %[[Vec3HalfItem0:.*]] = OpCompositeExtract %[[Half]] %[[ParamVec3Half]] 0
+; CHECK: %[[Vec3HalfItem1:.*]] = OpCompositeExtract %[[Half]] %[[ParamVec3Half]] 1
+; CHECK: %[[Vec3HalfItem2:.*]] = OpCompositeExtract %[[Half]] %[[ParamVec3Half]] 2
+; CHECK: %[[Vec3HalfR1:.*]] = OpExtInst %[[Half]] %[[#]] fmax %[[Vec3HalfItem0]] %[[Vec3HalfItem1]]
+; CHECK: %[[Vec3HalfR2:.*]] = OpExtInst %[[Half]] %[[#]] fmax %[[Vec3HalfR1]] %[[Vec3HalfItem2]]
+; CHECK: OpReturnValue %[[Vec3HalfR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec2Float:.*]] = OpFunctionParameter %[[FloatVec2]]
+; CHECK: %[[Vec2FloatItem0:.*]] = OpCompositeExtract %[[Float]] %[[ParamVec2Float]] 0
+; CHECK: %[[Vec2FloatItem1:.*]] = OpCompositeExtract %[[Float]] %[[ParamVec2Float]] 1
+; CHECK: %[[Vec2FloatR1:.*]] = OpExtInst %[[Float]] %[[#]] fmax %[[Vec2FloatItem0]] %[[Vec2FloatItem1]]
+; CHECK: OpReturnValue %[[Vec2FloatR1]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Float:.*]] = OpFunctionParameter %[[FloatVec3]]
+; CHECK: %[[Vec3FloatItem0:.*]] = OpCompositeExtract %[[Float]] %[[ParamVec3Float]] 0
+; CHECK: %[[Vec3FloatItem1:.*]] = OpCompositeExtract %[[Float]] %[[ParamVec3Float]] 1
+; CHECK: %[[Vec3FloatItem2:.*]] = OpCompositeExtract %[[Float]] %[[ParamVec3Float]] 2
+; CHECK: %[[Vec3FloatR1:.*]] = OpExtInst %[[Float]] %[[#]] fmax %[[Vec3FloatItem0]] %[[Vec3FloatItem1]]
+; CHECK: %[[Vec3FloatR2:.*]] = OpExtInst %[[Float]] %[[#]] fmax %[[Vec3FloatR1]] %[[Vec3FloatItem2]]
+; CHECK: OpReturnValue %[[Vec3FloatR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec2Double:.*]] = OpFunctionParameter %[[DoubleVec2]]
+; CHECK: %[[Vec2DoubleItem0:.*]] = OpCompositeExtract %[[Double]] %[[ParamVec2Double]] 0
+; CHECK: %[[Vec2DoubleItem1:.*]] = OpCompositeExtract %[[Double]] %[[ParamVec2Double]] 1
+; CHECK: %[[Vec2DoubleR1:.*]] = OpExtInst %[[Double]] %[[#]] fmax %[[Vec2DoubleItem0]] %[[Vec2DoubleItem1]]
+; CHECK: OpReturnValue %[[Vec2DoubleR1]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Double:.*]] = OpFunctionParameter %[[DoubleVec3]]
+; CHECK: %[[Vec3DoubleItem0:.*]] = OpCompositeExtract %[[Double]] %[[ParamVec3Double]] 0
+; CHECK: %[[Vec3DoubleItem1:.*]] = OpCompositeExtract %[[Double]] %[[ParamVec3Double]] 1
+; CHECK: %[[Vec3DoubleItem2:.*]] = OpCompositeExtract %[[Double]] %[[ParamVec3Double]] 2
+; CHECK: %[[Vec3DoubleR1:.*]] = OpExtInst %[[Double]] %[[#]] fmax %[[Vec3DoubleItem0]] %[[Vec3DoubleItem1]]
+; CHECK: %[[Vec3DoubleR2:.*]] = OpExtInst %[[Double]] %[[#]] fmax %[[Vec3DoubleR1]] %[[Vec3DoubleItem2]]
+; CHECK: OpReturnValue %[[Vec3DoubleR2]]
+; CHECK: OpFunctionEnd
+
+define spir_func half @test_vector_reduce_fmaximum_v2half(<2 x half> %v) {
+entry:
+ %0 = call half @llvm.vector.reduce.fmaximum.v2half(<2 x half> %v)
+ ret half %0
+}
+
+define spir_func half @test_vector_reduce_fmaximum_v3half(<3 x half> %v) {
+entry:
+ %0 = call half @llvm.vector.reduce.fmaximum.v3half(<3 x half> %v)
+ ret half %0
+}
+
+define spir_func half @test_vector_reduce_fmaximum_v4half(<4 x half> %v) {
+entry:
+ %0 = call half @llvm.vector.reduce.fmaximum.v4half(<4 x half> %v)
+ ret half %0
+}
+
+define spir_func half @test_vector_reduce_fmaximum_v8half(<8 x half> %v) {
+entry:
+ %0 = call half @llvm.vector.reduce.fmaximum.v8half(<8 x half> %v)
+ ret half %0
+}
+
+define spir_func half @test_vector_reduce_fmaximum_v16half(<16 x half> %v) {
+entry:
+ %0 = call half @llvm.vector.reduce.fmaximum.v16half(<16 x half> %v)
+ ret half %0
+}
+
+define spir_func float @test_vector_reduce_fmaximum_v2float(<2 x float> %v) {
+entry:
+ %0 = call float @llvm.vector.reduce.fmaximum.v2float(<2 x float> %v)
+ ret float %0
+}
+
+define spir_func float @test_vector_reduce_fmaximum_v3float(<3 x float> %v) {
+entry:
+ %0 = call float @llvm.vector.reduce.fmaximum.v3float(<3 x float> %v)
+ ret float %0
+}
+
+define spir_func float @test_vector_reduce_fmaximum_v4float(<4 x float> %v) {
+entry:
+ %0 = call float @llvm.vector.reduce.fmaximum.v4float(<4 x float> %v)
+ ret float %0
+}
+
+define spir_func float @test_vector_reduce_fmaximum_v8float(<8 x float> %v) {
+entry:
+ %0 = call float @llvm.vector.reduce.fmaximum.v8float(<8 x float> %v)
+ ret float %0
+}
+
+define spir_func float @test_vector_reduce_fmaximum_v16float(<16 x float> %v) {
+entry:
+ %0 = call float @llvm.vector.reduce.fmaximum.v16float(<16 x float> %v)
+ ret float %0
+}
+
+
+define spir_func double @test_vector_reduce_fmaximum_v2double(<2 x double> %v) {
+entry:
+ %0 = call double @llvm.vector.reduce.fmaximum.v2double(<2 x double> %v)
+ ret double %0
+}
+
+define spir_func double @test_vector_reduce_fmaximum_v3double(<3 x double> %v) {
+entry:
+ %0 = call double @llvm.vector.reduce.fmaximum.v3double(<3 x double> %v)
+ ret double %0
+}
+
+define spir_func double @test_vector_reduce_fmaximum_v4double(<4 x double> %v) {
+entry:
+ %0 = call double @llvm.vector.reduce.fmaximum.v4double(<4 x double> %v)
+ ret double %0
+}
+
+define spir_func double @test_vector_reduce_fmaximum_v8double(<8 x double> %v) {
+entry:
+ %0 = call double @llvm.vector.reduce.fmaximum.v8double(<8 x double> %v)
+ ret double %0
+}
+
+define spir_func double @test_vector_reduce_fmaximum_v16double(<16 x double> %v) {
+entry:
+ %0 = call double @llvm.vector.reduce.fmaximum.v16double(<16 x double> %v)
+ ret double %0
+}
+
+declare half @llvm.vector.reduce.fmaximum.v2half(<2 x half>)
+declare half @llvm.vector.reduce.fmaximum.v3half(<3 x half>)
+declare half @llvm.vector.reduce.fmaximum.v4half(<4 x half>)
+declare half @llvm.vector.reduce.fmaximum.v8half(<8 x half>)
+declare half @llvm.vector.reduce.fmaximum.v16half(<16 x half>)
+declare float @llvm.vector.reduce.fmaximum.v2float(<2 x float>)
+declare float @llvm.vector.reduce.fmaximum.v3float(<3 x float>)
+declare float @llvm.vector.reduce.fmaximum.v4float(<4 x float>)
+declare float @llvm.vector.reduce.fmaximum.v8float(<8 x float>)
+declare float @llvm.vector.reduce.fmaximum.v16float(<16 x float>)
+declare double @llvm.vector.reduce.fmaximum.v2double(<2 x double>)
+declare double @llvm.vector.reduce.fmaximum.v3double(<3 x double>)
+declare double @llvm.vector.reduce.fmaximum.v4double(<4 x double>)
+declare double @llvm.vector.reduce.fmaximum.v8double(<8 x double>)
+declare double @llvm.vector.reduce.fmaximum.v16double(<16 x double>)
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmin.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmin.ll
new file mode 100644
index 00000000000000..05c34d01171e07
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmin.ll
@@ -0,0 +1,176 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+target triple = "spir64-unknown-unknown"
+
+; CHECK-DAG: %[[Half:.*]] = OpTypeFloat 16
+; CHECK-DAG: %[[HalfVec2:.*]] = OpTypeVector %[[Half]] 2
+; CHECK-DAG: %[[HalfVec3:.*]] = OpTypeVector %[[Half]] 3
+
+; CHECK-DAG: %[[Float:.*]] = OpTypeFloat 32
+; CHECK-DAG: %[[FloatVec2:.*]] = OpTypeVector %[[Float]] 2
+; CHECK-DAG: %[[FloatVec3:.*]] = OpTypeVector %[[Float]] 3
+
+; CHECK-DAG: %[[Double:.*]] = OpTypeFloat 64
+; CHECK-DAG: %[[DoubleVec2:.*]] = OpTypeVector %[[Double]] 2
+; CHECK-DAG: %[[DoubleVec3:.*]] = OpTypeVector %[[Double]] 3
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec2Half:.*]] = OpFunctionParameter %[[HalfVec2]]
+; CHECK: %[[Vec2HalfItem0:.*]] = OpCompositeExtract %[[Half]] %[[ParamVec2Half]] 0
+; CHECK: %[[Vec2HalfItem1:.*]] = OpCompositeExtract %[[Half]] %[[ParamVec2Half]] 1
+; CHECK: %[[Vec2HalfR1:.*]] = OpExtInst %[[Half]] %[[#]] fmin %[[Vec2HalfItem0]] %[[Vec2HalfItem1]]
+; CHECK: OpReturnValue %[[Vec2HalfR1]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Half:.*]] = OpFunctionParameter %[[HalfVec3]]
+; CHECK: %[[Vec3HalfItem0:.*]] = OpCompositeExtract %[[Half]] %[[ParamVec3Half]] 0
+; CHECK: %[[Vec3HalfItem1:.*]] = OpCompositeExtract %[[Half]] %[[ParamVec3Half]] 1
+; CHECK: %[[Vec3HalfItem2:.*]] = OpCompositeExtract %[[Half]] %[[ParamVec3Half]] 2
+; CHECK: %[[Vec3HalfR1:.*]] = OpExtInst %[[Half]] %[[#]] fmin %[[Vec3HalfItem0]] %[[Vec3HalfItem1]]
+; CHECK: %[[Vec3HalfR2:.*]] = OpExtInst %[[Half]] %[[#]] fmin %[[Vec3HalfR1]] %[[Vec3HalfItem2]]
+; CHECK: OpReturnValue %[[Vec3HalfR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec2Float:.*]] = OpFunctionParameter %[[FloatVec2]]
+; CHECK: %[[Vec2FloatItem0:.*]] = OpCompositeExtract %[[Float]] %[[ParamVec2Float]] 0
+; CHECK: %[[Vec2FloatItem1:.*]] = OpCompositeExtract %[[Float]] %[[ParamVec2Float]] 1
+; CHECK: %[[Vec2FloatR1:.*]] = OpExtInst %[[Float]] %[[#]] fmin %[[Vec2FloatItem0]] %[[Vec2FloatItem1]]
+; CHECK: OpReturnValue %[[Vec2FloatR1]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Float:.*]] = OpFunctionParameter %[[FloatVec3]]
+; CHECK: %[[Vec3FloatItem0:.*]] = OpCompositeExtract %[[Float]] %[[ParamVec3Float]] 0
+; CHECK: %[[Vec3FloatItem1:.*]] = OpCompositeExtract %[[Float]] %[[ParamVec3Float]] 1
+; CHECK: %[[Vec3FloatItem2:.*]] = OpCompositeExtract %[[Float]] %[[ParamVec3Float]] 2
+; CHECK: %[[Vec3FloatR1:.*]] = OpExtInst %[[Float]] %[[#]] fmin %[[Vec3FloatItem0]] %[[Vec3FloatItem1]]
+; CHECK: %[[Vec3FloatR2:.*]] = OpExtInst %[[Float]] %[[#]] fmin %[[Vec3FloatR1]] %[[Vec3FloatItem2]]
+; CHECK: OpReturnValue %[[Vec3FloatR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec2Double:.*]] = OpFunctionParameter %[[DoubleVec2]]
+; CHECK: %[[Vec2DoubleItem0:.*]] = OpCompositeExtract %[[Double]] %[[ParamVec2Double]] 0
+; CHECK: %[[Vec2DoubleItem1:.*]] = OpCompositeExtract %[[Double]] %[[ParamVec2Double]] 1
+; CHECK: %[[Vec2DoubleR1:.*]] = OpExtInst %[[Double]] %[[#]] fmin %[[Vec2DoubleItem0]] %[[Vec2DoubleItem1]]
+; CHECK: OpReturnValue %[[Vec2DoubleR1]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Double:.*]] = OpFunctionParameter %[[DoubleVec3]]
+; CHECK: %[[Vec3DoubleItem0:.*]] = OpCompositeExtract %[[Double]] %[[ParamVec3Double]] 0
+; CHECK: %[[Vec3DoubleItem1:.*]] = OpCompositeExtract %[[Double]] %[[ParamVec3Double]] 1
+; CHECK: %[[Vec3DoubleItem2:.*]] = OpCompositeExtract %[[Double]] %[[ParamVec3Double]] 2
+; CHECK: %[[Vec3DoubleR1:.*]] = OpExtInst %[[Double]] %[[#]] fmin %[[Vec3DoubleItem0]] %[[Vec3DoubleItem1]]
+; CHECK: %[[Vec3DoubleR2:.*]] = OpExtInst %[[Double]] %[[#]] fmin %[[Vec3DoubleR1]] %[[Vec3DoubleItem2]]
+; CHECK: OpReturnValue %[[Vec3DoubleR2]]
+; CHECK: OpFunctionEnd
+
+define spir_func half @test_vector_reduce_fmin_v2half(<2 x half> %v) {
+entry:
+ %0 = call half @llvm.vector.reduce.fmin.v2half(<2 x half> %v)
+ ret half %0
+}
+
+define spir_func half @test_vector_reduce_fmin_v3half(<3 x half> %v) {
+entry:
+ %0 = call half @llvm.vector.reduce.fmin.v3half(<3 x half> %v)
+ ret half %0
+}
+
+define spir_func half @test_vector_reduce_fmin_v4half(<4 x half> %v) {
+entry:
+ %0 = call half @llvm.vector.reduce.fmin.v4half(<4 x half> %v)
+ ret half %0
+}
+
+define spir_func half @test_vector_reduce_fmin_v8half(<8 x half> %v) {
+entry:
+ %0 = call half @llvm.vector.reduce.fmin.v8half(<8 x half> %v)
+ ret half %0
+}
+
+define spir_func half @test_vector_reduce_fmin_v16half(<16 x half> %v) {
+entry:
+ %0 = call half @llvm.vector.reduce.fmin.v16half(<16 x half> %v)
+ ret half %0
+}
+
+define spir_func float @test_vector_reduce_fmin_v2float(<2 x float> %v) {
+entry:
+ %0 = call float @llvm.vector.reduce.fmin.v2float(<2 x float> %v)
+ ret float %0
+}
+
+define spir_func float @test_vector_reduce_fmin_v3float(<3 x float> %v) {
+entry:
+ %0 = call float @llvm.vector.reduce.fmin.v3float(<3 x float> %v)
+ ret float %0
+}
+
+define spir_func float @test_vector_reduce_fmin_v4float(<4 x float> %v) {
+entry:
+ %0 = call float @llvm.vector.reduce.fmin.v4float(<4 x float> %v)
+ ret float %0
+}
+
+define spir_func float @test_vector_reduce_fmin_v8float(<8 x float> %v) {
+entry:
+ %0 = call float @llvm.vector.reduce.fmin.v8float(<8 x float> %v)
+ ret float %0
+}
+
+define spir_func float @test_vector_reduce_fmin_v16float(<16 x float> %v) {
+entry:
+ %0 = call float @llvm.vector.reduce.fmin.v16float(<16 x float> %v)
+ ret float %0
+}
+
+define spir_func double @test_vector_reduce_fmin_v2double(<2 x double> %v) {
+entry:
+ %0 = call double @llvm.vector.reduce.fmin.v2double(<2 x double> %v)
+ ret double %0
+}
+
+define spir_func double @test_vector_reduce_fmin_v3double(<3 x double> %v) {
+entry:
+ %0 = call double @llvm.vector.reduce.fmin.v3double(<3 x double> %v)
+ ret double %0
+}
+
+define spir_func double @test_vector_reduce_fmin_v4double(<4 x double> %v) {
+entry:
+ %0 = call double @llvm.vector.reduce.fmin.v4double(<4 x double> %v)
+ ret double %0
+}
+
+define spir_func double @test_vector_reduce_fmin_v8double(<8 x double> %v) {
+entry:
+ %0 = call double @llvm.vector.reduce.fmin.v8double(<8 x double> %v)
+ ret double %0
+}
+
+define spir_func double @test_vector_reduce_fmin_v16double(<16 x double> %v) {
+entry:
+ %0 = call double @llvm.vector.reduce.fmin.v16double(<16 x double> %v)
+ ret double %0
+}
+
+declare half @llvm.vector.reduce.fmin.v2half(<2 x half>)
+declare half @llvm.vector.reduce.fmin.v3half(<3 x half>)
+declare half @llvm.vector.reduce.fmin.v4half(<4 x half>)
+declare half @llvm.vector.reduce.fmin.v8half(<8 x half>)
+declare half @llvm.vector.reduce.fmin.v16half(<16 x half>)
+declare float @llvm.vector.reduce.fmin.v2float(<2 x float>)
+declare float @llvm.vector.reduce.fmin.v3float(<3 x float>)
+declare float @llvm.vector.reduce.fmin.v4float(<4 x float>)
+declare float @llvm.vector.reduce.fmin.v8float(<8 x float>)
+declare float @llvm.vector.reduce.fmin.v16float(<16 x float>)
+declare double @llvm.vector.reduce.fmin.v2double(<2 x double>)
+declare double @llvm.vector.reduce.fmin.v3double(<3 x double>)
+declare double @llvm.vector.reduce.fmin.v4double(<4 x double>)
+declare double @llvm.vector.reduce.fmin.v8double(<8 x double>)
+declare double @llvm.vector.reduce.fmin.v16double(<16 x double>)
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fminimum.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fminimum.ll
new file mode 100644
index 00000000000000..9278e6b0e43a4f
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fminimum.ll
@@ -0,0 +1,177 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+target triple = "spir64-unknown-unknown"
+
+; CHECK-DAG: %[[Half:.*]] = OpTypeFloat 16
+; CHECK-DAG: %[[HalfVec2:.*]] = OpTypeVector %[[Half]] 2
+; CHECK-DAG: %[[HalfVec3:.*]] = OpTypeVector %[[Half]] 3
+
+; CHECK-DAG: %[[Float:.*]] = OpTypeFloat 32
+; CHECK-DAG: %[[FloatVec2:.*]] = OpTypeVector %[[Float]] 2
+; CHECK-DAG: %[[FloatVec3:.*]] = OpTypeVector %[[Float]] 3
+
+; CHECK-DAG: %[[Double:.*]] = OpTypeFloat 64
+; CHECK-DAG: %[[DoubleVec2:.*]] = OpTypeVector %[[Double]] 2
+; CHECK-DAG: %[[DoubleVec3:.*]] = OpTypeVector %[[Double]] 3
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec2Half:.*]] = OpFunctionParameter %[[HalfVec2]]
+; CHECK: %[[Vec2HalfItem0:.*]] = OpCompositeExtract %[[Half]] %[[ParamVec2Half]] 0
+; CHECK: %[[Vec2HalfItem1:.*]] = OpCompositeExtract %[[Half]] %[[ParamVec2Half]] 1
+; CHECK: %[[Vec2HalfR1:.*]] = OpExtInst %[[Half]] %[[#]] fmin %[[Vec2HalfItem0]] %[[Vec2HalfItem1]]
+; CHECK: OpReturnValue %[[Vec2HalfR1]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Half:.*]] = OpFunctionParameter %[[HalfVec3]]
+; CHECK: %[[Vec3HalfItem0:.*]] = OpCompositeExtract %[[Half]] %[[ParamVec3Half]] 0
+; CHECK: %[[Vec3HalfItem1:.*]] = OpCompositeExtract %[[Half]] %[[ParamVec3Half]] 1
+; CHECK: %[[Vec3HalfItem2:.*]] = OpCompositeExtract %[[Half]] %[[ParamVec3Half]] 2
+; CHECK: %[[Vec3HalfR1:.*]] = OpExtInst %[[Half]] %[[#]] fmin %[[Vec3HalfItem0]] %[[Vec3HalfItem1]]
+; CHECK: %[[Vec3HalfR2:.*]] = OpExtInst %[[Half]] %[[#]] fmin %[[Vec3HalfR1]] %[[Vec3HalfItem2]]
+; CHECK: OpReturnValue %[[Vec3HalfR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec2Float:.*]] = OpFunctionParameter %[[FloatVec2]]
+; CHECK: %[[Vec2FloatItem0:.*]] = OpCompositeExtract %[[Float]] %[[ParamVec2Float]] 0
+; CHECK: %[[Vec2FloatItem1:.*]] = OpCompositeExtract %[[Float]] %[[ParamVec2Float]] 1
+; CHECK: %[[Vec2FloatR1:.*]] = OpExtInst %[[Float]] %[[#]] fmin %[[Vec2FloatItem0]] %[[Vec2FloatItem1]]
+; CHECK: OpReturnValue %[[Vec2FloatR1]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Float:.*]] = OpFunctionParameter %[[FloatVec3]]
+; CHECK: %[[Vec3FloatItem0:.*]] = OpCompositeExtract %[[Float]] %[[ParamVec3Float]] 0
+; CHECK: %[[Vec3FloatItem1:.*]] = OpCompositeExtract %[[Float]] %[[ParamVec3Float]] 1
+; CHECK: %[[Vec3FloatItem2:.*]] = OpCompositeExtract %[[Float]] %[[ParamVec3Float]] 2
+; CHECK: %[[Vec3FloatR1:.*]] = OpExtInst %[[Float]] %[[#]] fmin %[[Vec3FloatItem0]] %[[Vec3FloatItem1]]
+; CHECK: %[[Vec3FloatR2:.*]] = OpExtInst %[[Float]] %[[#]] fmin %[[Vec3FloatR1]] %[[Vec3FloatItem2]]
+; CHECK: OpReturnValue %[[Vec3FloatR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec2Double:.*]] = OpFunctionParameter %[[DoubleVec2]]
+; CHECK: %[[Vec2DoubleItem0:.*]] = OpCompositeExtract %[[Double]] %[[ParamVec2Double]] 0
+; CHECK: %[[Vec2DoubleItem1:.*]] = OpCompositeExtract %[[Double]] %[[ParamVec2Double]] 1
+; CHECK: %[[Vec2DoubleR1:.*]] = OpExtInst %[[Double]] %[[#]] fmin %[[Vec2DoubleItem0]] %[[Vec2DoubleItem1]]
+; CHECK: OpReturnValue %[[Vec2DoubleR1]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Double:.*]] = OpFunctionParameter %[[DoubleVec3]]
+; CHECK: %[[Vec3DoubleItem0:.*]] = OpCompositeExtract %[[Double]] %[[ParamVec3Double]] 0
+; CHECK: %[[Vec3DoubleItem1:.*]] = OpCompositeExtract %[[Double]] %[[ParamVec3Double]] 1
+; CHECK: %[[Vec3DoubleItem2:.*]] = OpCompositeExtract %[[Double]] %[[ParamVec3Double]] 2
+; CHECK: %[[Vec3DoubleR1:.*]] = OpExtInst %[[Double]] %[[#]] fmin %[[Vec3DoubleItem0]] %[[Vec3DoubleItem1]]
+; CHECK: %[[Vec3DoubleR2:.*]] = OpExtInst %[[Double]] %[[#]] fmin %[[Vec3DoubleR1]] %[[Vec3DoubleItem2]]
+; CHECK: OpReturnValue %[[Vec3DoubleR2]]
+; CHECK: OpFunctionEnd
+
+define spir_func half @test_vector_reduce_fminimum_v2half(<2 x half> %v) {
+entry:
+ %0 = call half @llvm.vector.reduce.fminimum.v2half(<2 x half> %v)
+ ret half %0
+}
+
+define spir_func half @test_vector_reduce_fminimum_v3half(<3 x half> %v) {
+entry:
+ %0 = call half @llvm.vector.reduce.fminimum.v3half(<3 x half> %v)
+ ret half %0
+}
+
+define spir_func half @test_vector_reduce_fminimum_v4half(<4 x half> %v) {
+entry:
+ %0 = call half @llvm.vector.reduce.fminimum.v4half(<4 x half> %v)
+ ret half %0
+}
+
+define spir_func half @test_vector_reduce_fminimum_v8half(<8 x half> %v) {
+entry:
+ %0 = call half @llvm.vector.reduce.fminimum.v8half(<8 x half> %v)
+ ret half %0
+}
+
+define spir_func half @test_vector_reduce_fminimum_v16half(<16 x half> %v) {
+entry:
+ %0 = call half @llvm.vector.reduce.fminimum.v16half(<16 x half> %v)
+ ret half %0
+}
+
+define spir_func float @test_vector_reduce_fminimum_v2float(<2 x float> %v) {
+entry:
+ %0 = call float @llvm.vector.reduce.fminimum.v2float(<2 x float> %v)
+ ret float %0
+}
+
+define spir_func float @test_vector_reduce_fminimum_v3float(<3 x float> %v) {
+entry:
+ %0 = call float @llvm.vector.reduce.fminimum.v3float(<3 x float> %v)
+ ret float %0
+}
+
+define spir_func float @test_vector_reduce_fminimum_v4float(<4 x float> %v) {
+entry:
+ %0 = call float @llvm.vector.reduce.fminimum.v4float(<4 x float> %v)
+ ret float %0
+}
+
+define spir_func float @test_vector_reduce_fminimum_v8float(<8 x float> %v) {
+entry:
+ %0 = call float @llvm.vector.reduce.fminimum.v8float(<8 x float> %v)
+ ret float %0
+}
+
+define spir_func float @test_vector_reduce_fminimum_v16float(<16 x float> %v) {
+entry:
+ %0 = call float @llvm.vector.reduce.fminimum.v16float(<16 x float> %v)
+ ret float %0
+}
+
+
+define spir_func double @test_vector_reduce_fminimum_v2double(<2 x double> %v) {
+entry:
+ %0 = call double @llvm.vector.reduce.fminimum.v2double(<2 x double> %v)
+ ret double %0
+}
+
+define spir_func double @test_vector_reduce_fminimum_v3double(<3 x double> %v) {
+entry:
+ %0 = call double @llvm.vector.reduce.fminimum.v3double(<3 x double> %v)
+ ret double %0
+}
+
+define spir_func double @test_vector_reduce_fminimum_v4double(<4 x double> %v) {
+entry:
+ %0 = call double @llvm.vector.reduce.fminimum.v4double(<4 x double> %v)
+ ret double %0
+}
+
+define spir_func double @test_vector_reduce_fminimum_v8double(<8 x double> %v) {
+entry:
+ %0 = call double @llvm.vector.reduce.fminimum.v8double(<8 x double> %v)
+ ret double %0
+}
+
+define spir_func double @test_vector_reduce_fminimum_v16double(<16 x double> %v) {
+entry:
+ %0 = call double @llvm.vector.reduce.fminimum.v16double(<16 x double> %v)
+ ret double %0
+}
+
+declare half @llvm.vector.reduce.fminimum.v2half(<2 x half>)
+declare half @llvm.vector.reduce.fminimum.v3half(<3 x half>)
+declare half @llvm.vector.reduce.fminimum.v4half(<4 x half>)
+declare half @llvm.vector.reduce.fminimum.v8half(<8 x half>)
+declare half @llvm.vector.reduce.fminimum.v16half(<16 x half>)
+declare float @llvm.vector.reduce.fminimum.v2float(<2 x float>)
+declare float @llvm.vector.reduce.fminimum.v3float(<3 x float>)
+declare float @llvm.vector.reduce.fminimum.v4float(<4 x float>)
+declare float @llvm.vector.reduce.fminimum.v8float(<8 x float>)
+declare float @llvm.vector.reduce.fminimum.v16float(<16 x float>)
+declare double @llvm.vector.reduce.fminimum.v2double(<2 x double>)
+declare double @llvm.vector.reduce.fminimum.v3double(<3 x double>)
+declare double @llvm.vector.reduce.fminimum.v4double(<4 x double>)
+declare double @llvm.vector.reduce.fminimum.v8double(<8 x double>)
+declare double @llvm.vector.reduce.fminimum.v16double(<16 x double>)
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmul.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmul.ll
new file mode 100644
index 00000000000000..91b0adcccbcecd
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmul.ll
@@ -0,0 +1,189 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+target triple = "spir64-unknown-unknown"
+
+; CHECK-DAG: %[[Half:.*]] = OpTypeFloat 16
+; CHECK-DAG: %[[HalfVec2:.*]] = OpTypeVector %[[Half]] 2
+; CHECK-DAG: %[[HalfVec3:.*]] = OpTypeVector %[[Half]] 3
+
+; CHECK-DAG: %[[Float:.*]] = OpTypeFloat 32
+; CHECK-DAG: %[[FloatVec2:.*]] = OpTypeVector %[[Float]] 2
+; CHECK-DAG: %[[FloatVec3:.*]] = OpTypeVector %[[Float]] 3
+
+; CHECK-DAG: %[[Double:.*]] = OpTypeFloat 64
+; CHECK-DAG: %[[DoubleVec2:.*]] = OpTypeVector %[[Double]] 2
+; CHECK-DAG: %[[DoubleVec3:.*]] = OpTypeVector %[[Double]] 3
+
+; CHECK: OpFunction
+; CHECK: %[[Param2Half:.*]] = OpFunctionParameter %[[Half]]
+; CHECK: %[[ParamVec2Half:.*]] = OpFunctionParameter %[[HalfVec2]]
+; CHECK: %[[Vec2HalfItem0:.*]] = OpCompositeExtract %[[Half]] %[[ParamVec2Half]] 0
+; CHECK: %[[Vec2HalfR1:.*]] = OpFMul %[[Half]] %[[Param2Half]] %[[Vec2HalfItem0]]
+; CHECK: %[[Vec2HalfItem1:.*]] = OpCompositeExtract %[[Half]] %[[ParamVec2Half]] 1
+; CHECK: %[[Vec2HalfR2:.*]] = OpFMul %[[Half]] %[[Vec2HalfR1]] %[[Vec2HalfItem1]]
+; CHECK: OpReturnValue %[[Vec2HalfR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[Param2Half:.*]] = OpFunctionParameter %[[Half]]
+; CHECK: %[[ParamVec3Half:.*]] = OpFunctionParameter %[[HalfVec3]]
+; CHECK: %[[Vec3HalfItem0:.*]] = OpCompositeExtract %[[Half]] %[[ParamVec3Half]] 0
+; CHECK: %[[Vec3HalfR1:.*]] = OpFMul %[[Half]] %[[Param2Half]] %[[Vec3HalfItem0]]
+; CHECK: %[[Vec3HalfItem1:.*]] = OpCompositeExtract %[[Half]] %[[ParamVec3Half]] 1
+; CHECK: %[[Vec3HalfR2:.*]] = OpFMul %[[Half]] %[[Vec3HalfR1]] %[[Vec3HalfItem1]]
+; CHECK: %[[Vec3HalfItem2:.*]] = OpCompositeExtract %[[Half]] %[[ParamVec3Half]] 2
+; CHECK: %[[Vec3HalfR3:.*]] = OpFMul %[[Half]] %[[Vec3HalfR2]] %[[Vec3HalfItem2]]
+; CHECK: OpReturnValue %[[Vec3HalfR3]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[Param2Float:.*]] = OpFunctionParameter %[[Float]]
+; CHECK: %[[ParamVec2Float:.*]] = OpFunctionParameter %[[FloatVec2]]
+; CHECK: %[[Vec2FloatItem0:.*]] = OpCompositeExtract %[[Float]] %[[ParamVec2Float]] 0
+; CHECK: %[[Vec2FloatR1:.*]] = OpFMul %[[Float]] %[[Param2Float]] %[[Vec2FloatItem0]]
+; CHECK: %[[Vec2FloatItem1:.*]] = OpCompositeExtract %[[Float]] %[[ParamVec2Float]] 1
+; CHECK: %[[Vec2FloatR2:.*]] = OpFMul %[[Float]] %[[Vec2FloatR1]] %[[Vec2FloatItem1]]
+; CHECK: OpReturnValue %[[Vec2FloatR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[Param2Float:.*]] = OpFunctionParameter %[[Float]]
+; CHECK: %[[ParamVec3Float:.*]] = OpFunctionParameter %[[FloatVec3]]
+; CHECK: %[[Vec3FloatItem0:.*]] = OpCompositeExtract %[[Float]] %[[ParamVec3Float]] 0
+; CHECK: %[[Vec3FloatR1:.*]] = OpFMul %[[Float]] %[[Param2Float]] %[[Vec3FloatItem0]]
+; CHECK: %[[Vec3FloatItem1:.*]] = OpCompositeExtract %[[Float]] %[[ParamVec3Float]] 1
+; CHECK: %[[Vec3FloatR2:.*]] = OpFMul %[[Float]] %[[Vec3FloatR1]] %[[Vec3FloatItem1]]
+; CHECK: %[[Vec3FloatItem2:.*]] = OpCompositeExtract %[[Float]] %[[ParamVec3Float]] 2
+; CHECK: %[[Vec3FloatR3:.*]] = OpFMul %[[Float]] %[[Vec3FloatR2]] %[[Vec3FloatItem2]]
+; CHECK: OpReturnValue %[[Vec3FloatR3]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[Param2Double:.*]] = OpFunctionParameter %[[Double]]
+; CHECK: %[[ParamVec2Double:.*]] = OpFunctionParameter %[[DoubleVec2]]
+; CHECK: %[[Vec2DoubleItem0:.*]] = OpCompositeExtract %[[Double]] %[[ParamVec2Double]] 0
+; CHECK: %[[Vec2DoubleR1:.*]] = OpFMul %[[Double]] %[[Param2Double]] %[[Vec2DoubleItem0]]
+; CHECK: %[[Vec2DoubleItem1:.*]] = OpCompositeExtract %[[Double]] %[[ParamVec2Double]] 1
+; CHECK: %[[Vec2DoubleR2:.*]] = OpFMul %[[Double]] %[[Vec2DoubleR1]] %[[Vec2DoubleItem1]]
+; CHECK: OpReturnValue %[[Vec2DoubleR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[Param2Double:.*]] = OpFunctionParameter %[[Double]]
+; CHECK: %[[ParamVec3Double:.*]] = OpFunctionParameter %[[DoubleVec3]]
+; CHECK: %[[Vec3DoubleItem0:.*]] = OpCompositeExtract %[[Double]] %[[ParamVec3Double]] 0
+; CHECK: %[[Vec3DoubleR1:.*]] = OpFMul %[[Double]] %[[Param2Double]] %[[Vec3DoubleItem0]]
+; CHECK: %[[Vec3DoubleItem1:.*]] = OpCompositeExtract %[[Double]] %[[ParamVec3Double]] 1
+; CHECK: %[[Vec3DoubleR2:.*]] = OpFMul %[[Double]] %[[Vec3DoubleR1]] %[[Vec3DoubleItem1]]
+; CHECK: %[[Vec3DoubleItem2:.*]] = OpCompositeExtract %[[Double]] %[[ParamVec3Double]] 2
+; CHECK: %[[Vec3DoubleR3:.*]] = OpFMul %[[Double]] %[[Vec3DoubleR2]] %[[Vec3DoubleItem2]]
+; CHECK: OpReturnValue %[[Vec3DoubleR3]]
+; CHECK: OpFunctionEnd
+
+define spir_func half @test_vector_reduce_fmul_v2half(half %sp, <2 x half> %v) {
+entry:
+ %0 = call half @llvm.vector.reduce.fmul.v2half(half %sp, <2 x half> %v)
+ ret half %0
+}
+
+define spir_func half @test_vector_reduce_fmul_v3half(half %sp, <3 x half> %v) {
+entry:
+ %0 = call half @llvm.vector.reduce.fmul.v3half(half %sp, <3 x half> %v)
+ ret half %0
+}
+
+define spir_func half @test_vector_reduce_fmul_v4half(half %sp, <4 x half> %v) {
+entry:
+ %0 = call half @llvm.vector.reduce.fmul.v4half(half %sp, <4 x half> %v)
+ ret half %0
+}
+
+define spir_func half @test_vector_reduce_fmul_v8half(half %sp, <8 x half> %v) {
+entry:
+ %0 = call half @llvm.vector.reduce.fmul.v8half(half %sp, <8 x half> %v)
+ ret half %0
+}
+
+define spir_func half @test_vector_reduce_fmul_v16half(half %sp, <16 x half> %v) {
+entry:
+ %0 = call half @llvm.vector.reduce.fmul.v16half(half %sp, <16 x half> %v)
+ ret half %0
+}
+
+define spir_func float @test_vector_reduce_fmul_v2float(float %sp, <2 x float> %v) {
+entry:
+ %0 = call float @llvm.vector.reduce.fmul.v2float(float %sp, <2 x float> %v)
+ ret float %0
+}
+
+define spir_func float @test_vector_reduce_fmul_v3float(float %sp, <3 x float> %v) {
+entry:
+ %0 = call float @llvm.vector.reduce.fmul.v3float(float %sp, <3 x float> %v)
+ ret float %0
+}
+
+define spir_func float @test_vector_reduce_fmul_v4float(float %sp, <4 x float> %v) {
+entry:
+ %0 = call float @llvm.vector.reduce.fmul.v4float(float %sp, <4 x float> %v)
+ ret float %0
+}
+
+define spir_func float @test_vector_reduce_fmul_v8float(float %sp, <8 x float> %v) {
+entry:
+ %0 = call float @llvm.vector.reduce.fmul.v8float(float %sp, <8 x float> %v)
+ ret float %0
+}
+
+define spir_func float @test_vector_reduce_fmul_v16float(float %sp, <16 x float> %v) {
+entry:
+ %0 = call float @llvm.vector.reduce.fmul.v16float(float %sp, <16 x float> %v)
+ ret float %0
+}
+
+
+define spir_func double @test_vector_reduce_fmul_v2double(double %sp, <2 x double> %v) {
+entry:
+ %0 = call double @llvm.vector.reduce.fmul.v2double(double %sp, <2 x double> %v)
+ ret double %0
+}
+
+define spir_func double @test_vector_reduce_fmul_v3double(double %sp, <3 x double> %v) {
+entry:
+ %0 = call double @llvm.vector.reduce.fmul.v3double(double %sp, <3 x double> %v)
+ ret double %0
+}
+
+define spir_func double @test_vector_reduce_fmul_v4double(double %sp, <4 x double> %v) {
+entry:
+ %0 = call double @llvm.vector.reduce.fmul.v4double(double %sp, <4 x double> %v)
+ ret double %0
+}
+
+define spir_func double @test_vector_reduce_fmul_v8double(double %sp, <8 x double> %v) {
+entry:
+ %0 = call double @llvm.vector.reduce.fmul.v8double(double %sp, <8 x double> %v)
+ ret double %0
+}
+
+define spir_func double @test_vector_reduce_fmul_v16double(double %sp, <16 x double> %v) {
+entry:
+ %0 = call double @llvm.vector.reduce.fmul.v16double(double %sp, <16 x double> %v)
+ ret double %0
+}
+
+declare half @llvm.vector.reduce.fmul.v2half(half, <2 x half>)
+declare half @llvm.vector.reduce.fmul.v3half(half, <3 x half>)
+declare half @llvm.vector.reduce.fmul.v4half(half, <4 x half>)
+declare half @llvm.vector.reduce.fmul.v8half(half, <8 x half>)
+declare half @llvm.vector.reduce.fmul.v16half(half, <16 x half>)
+declare float @llvm.vector.reduce.fmul.v2float(float, <2 x float>)
+declare float @llvm.vector.reduce.fmul.v3float(float, <3 x float>)
+declare float @llvm.vector.reduce.fmul.v4float(float, <4 x float>)
+declare float @llvm.vector.reduce.fmul.v8float(float, <8 x float>)
+declare float @llvm.vector.reduce.fmul.v16float(float, <16 x float>)
+declare double @llvm.vector.reduce.fmul.v2double(double, <2 x double>)
+declare double @llvm.vector.reduce.fmul.v3double(double, <3 x double>)
+declare double @llvm.vector.reduce.fmul.v4double(double, <4 x double>)
+declare double @llvm.vector.reduce.fmul.v8double(double, <8 x double>)
+declare double @llvm.vector.reduce.fmul.v16double(double, <16 x double>)
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/mul.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/mul.ll
new file mode 100644
index 00000000000000..5f6e58b5e2721e
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/mul.ll
@@ -0,0 +1,232 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+target triple = "spir64-unknown-unknown"
+
+; CHECK-DAG: %[[Char:.*]] = OpTypeInt 8 0
+; CHECK-DAG: %[[CharVec2:.*]] = OpTypeVector %[[Char]] 2
+; CHECK-DAG: %[[CharVec3:.*]] = OpTypeVector %[[Char]] 3
+
+; CHECK-DAG: %[[Short:.*]] = OpTypeInt 16 0
+; CHECK-DAG: %[[ShortVec2:.*]] = OpTypeVector %[[Short]] 2
+; CHECK-DAG: %[[ShortVec3:.*]] = OpTypeVector %[[Short]] 3
+
+; CHECK-DAG: %[[Int:.*]] = OpTypeInt 32 0
+; CHECK-DAG: %[[IntVec2:.*]] = OpTypeVector %[[Int]] 2
+; CHECK-DAG: %[[IntVec3:.*]] = OpTypeVector %[[Int]] 3
+
+; CHECK-DAG: %[[Long:.*]] = OpTypeInt 64 0
+; CHECK-DAG: %[[LongVec2:.*]] = OpTypeVector %[[Long]] 2
+; CHECK-DAG: %[[LongVec3:.*]] = OpTypeVector %[[Long]] 3
+
+; CHECK: OpFunction
+; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[CharVec2]] %[[#]] %[[#]] 1 -1
+; CHECK: %[[Added1:.*]] = OpIMul %[[CharVec2]] %[[#]] %[[#]]
+; CHECK: %[[Vec2CharR:.*]] = OpCompositeExtract %[[Char]] %[[Added1]] 0
+; CHECK: OpReturnValue %[[Vec2CharR]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Char:.*]] = OpFunctionParameter %[[CharVec3]]
+; CHECK: %[[Vec3CharItem0:.*]] = OpCompositeExtract %[[Char]] %[[ParamVec3Char]] 0
+; CHECK: %[[Vec3CharItem1:.*]] = OpCompositeExtract %[[Char]] %[[ParamVec3Char]] 1
+; CHECK: %[[Vec3CharItem2:.*]] = OpCompositeExtract %[[Char]] %[[ParamVec3Char]] 2
+; CHECK: %[[Vec3CharR1:.*]] = OpIMul %[[Char]] %[[Vec3CharItem0]] %[[Vec3CharItem1]]
+; CHECK: %[[Vec3CharR2:.*]] = OpIMul %[[Char]] %[[Vec3CharR1]] %[[Vec3CharItem2]]
+; CHECK: OpReturnValue %[[Vec3CharR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[ShortVec2]] %[[#]] %[[#]] 1 -1
+; CHECK: %[[Added1:.*]] = OpIMul %[[ShortVec2]] %[[#]] %[[#]]
+; CHECK: %[[Vec2ShortR:.*]] = OpCompositeExtract %[[Short]] %[[Added1]] 0
+; CHECK: OpReturnValue %[[Vec2ShortR]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Short:.*]] = OpFunctionParameter %[[ShortVec3]]
+; CHECK: %[[Vec3ShortItem0:.*]] = OpCompositeExtract %[[Short]] %[[ParamVec3Short]] 0
+; CHECK: %[[Vec3ShortItem1:.*]] = OpCompositeExtract %[[Short]] %[[ParamVec3Short]] 1
+; CHECK: %[[Vec3ShortItem2:.*]] = OpCompositeExtract %[[Short]] %[[ParamVec3Short]] 2
+; CHECK: %[[Vec3ShortR1:.*]] = OpIMul %[[Short]] %[[Vec3ShortItem0]] %[[Vec3ShortItem1]]
+; CHECK: %[[Vec3ShortR2:.*]] = OpIMul %[[Short]] %[[Vec3ShortR1]] %[[Vec3ShortItem2]]
+; CHECK: OpReturnValue %[[Vec3ShortR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[IntVec2]] %[[#]] %[[#]] 1 -1
+; CHECK: %[[Added1:.*]] = OpIMul %[[IntVec2]] %[[#]] %[[#]]
+; CHECK: %[[Vec2IntR:.*]] = OpCompositeExtract %[[Int]] %[[Added1]] 0
+; CHECK: OpReturnValue %[[Vec2IntR]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Int:.*]] = OpFunctionParameter %[[IntVec3]]
+; CHECK: %[[Vec3IntItem0:.*]] = OpCompositeExtract %[[Int]] %[[ParamVec3Int]] 0
+; CHECK: %[[Vec3IntItem1:.*]] = OpCompositeExtract %[[Int]] %[[ParamVec3Int]] 1
+; CHECK: %[[Vec3IntItem2:.*]] = OpCompositeExtract %[[Int]] %[[ParamVec3Int]] 2
+; CHECK: %[[Vec3IntR1:.*]] = OpIMul %[[Int]] %[[Vec3IntItem0]] %[[Vec3IntItem1]]
+; CHECK: %[[Vec3IntR2:.*]] = OpIMul %[[Int]] %[[Vec3IntR1]] %[[Vec3IntItem2]]
+; CHECK: OpReturnValue %[[Vec3IntR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[LongVec2]] %[[#]] %[[#]] 1 -1
+; CHECK: %[[Added1:.*]] = OpIMul %[[LongVec2]] %[[#]] %[[#]]
+; CHECK: %[[Vec2LongR:.*]] = OpCompositeExtract %[[Long]] %[[Added1]] 0
+; CHECK: OpReturnValue %[[Vec2LongR]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Long:.*]] = OpFunctionParameter %[[LongVec3]]
+; CHECK: %[[Vec3LongItem0:.*]] = OpCompositeExtract %[[Long]] %[[ParamVec3Long]] 0
+; CHECK: %[[Vec3LongItem1:.*]] = OpCompositeExtract %[[Long]] %[[ParamVec3Long]] 1
+; CHECK: %[[Vec3LongItem2:.*]] = OpCompositeExtract %[[Long]] %[[ParamVec3Long]] 2
+; CHECK: %[[Vec3LongR1:.*]] = OpIMul %[[Long]] %[[Vec3LongItem0]] %[[Vec3LongItem1]]
+; CHECK: %[[Vec3LongR2:.*]] = OpIMul %[[Long]] %[[Vec3LongR1]] %[[Vec3LongItem2]]
+; CHECK: OpReturnValue %[[Vec3LongR2]]
+; CHECK: OpFunctionEnd
+
+define spir_func i8 @test_vector_reduce_mul_v2i8(<2 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.mul.v2i8(<2 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i8 @test_vector_reduce_mul_v3i8(<3 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.mul.v3i8(<3 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i8 @test_vector_reduce_mul_v4i8(<4 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.mul.v4i8(<4 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i8 @test_vector_reduce_mul_v8i8(<8 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.mul.v8i8(<8 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i8 @test_vector_reduce_mul_v16i8(<16 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.mul.v16i8(<16 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i16 @test_vector_reduce_mul_v2i16(<2 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.mul.v2i16(<2 x i16> %v)
+ ret i16 %0
+}
+
+define spir_func i16 @test_vector_reduce_mul_v3i16(<3 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.mul.v3i16(<3 x i16> %v)
+ ret i16 %0
+}
+
+define spir_func i16 @test_vector_reduce_mul_v4i16(<4 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.mul.v4i16(<4 x i16> %v)
+ ret i16 %0
+}
+
+define spir_func i16 @test_vector_reduce_mul_v8i16(<8 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.mul.v8i16(<8 x i16> %v)
+ ret i16 %0
+}
+
+define spir_func i16 @test_vector_reduce_mul_v16i16(<16 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.mul.v16i16(<16 x i16> %v)
+ ret i16 %0
+}
+
+define spir_func i32 @test_vector_reduce_mul_v2i32(<2 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.mul.v2i32(<2 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i32 @test_vector_reduce_mul_v3i32(<3 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.mul.v3i32(<3 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i32 @test_vector_reduce_mul_v4i32(<4 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i32 @test_vector_reduce_mul_v8i32(<8 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i32 @test_vector_reduce_mul_v16i32(<16 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.mul.v16i32(<16 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i64 @test_vector_reduce_mul_v2i64(<2 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.mul.v2i64(<2 x i64> %v)
+ ret i64 %0
+}
+
+define spir_func i64 @test_vector_reduce_mul_v3i64(<3 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.mul.v3i64(<3 x i64> %v)
+ ret i64 %0
+}
+
+define spir_func i64 @test_vector_reduce_mul_v4i64(<4 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.mul.v4i64(<4 x i64> %v)
+ ret i64 %0
+}
+
+define spir_func i64 @test_vector_reduce_mul_v8i64(<8 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.mul.v8i64(<8 x i64> %v)
+ ret i64 %0
+}
+
+define spir_func i64 @test_vector_reduce_mul_v16i64(<16 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.mul.v16i64(<16 x i64> %v)
+ ret i64 %0
+}
+
+declare i8 @llvm.vector.reduce.mul.v2i8(<2 x i8>)
+declare i8 @llvm.vector.reduce.mul.v3i8(<3 x i8>)
+declare i8 @llvm.vector.reduce.mul.v4i8(<4 x i8>)
+declare i8 @llvm.vector.reduce.mul.v8i8(<8 x i8>)
+declare i8 @llvm.vector.reduce.mul.v16i8(<16 x i8>)
+
+declare i16 @llvm.vector.reduce.mul.v2i16(<2 x i16>)
+declare i16 @llvm.vector.reduce.mul.v3i16(<3 x i16>)
+declare i16 @llvm.vector.reduce.mul.v4i16(<4 x i16>)
+declare i16 @llvm.vector.reduce.mul.v8i16(<8 x i16>)
+declare i16 @llvm.vector.reduce.mul.v16i16(<16 x i16>)
+
+declare i32 @llvm.vector.reduce.mul.v2i32(<2 x i32>)
+declare i32 @llvm.vector.reduce.mul.v3i32(<3 x i32>)
+declare i32 @llvm.vector.reduce.mul.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.mul.v8i32(<8 x i32>)
+declare i32 @llvm.vector.reduce.mul.v16i32(<16 x i32>)
+
+declare i64 @llvm.vector.reduce.mul.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.mul.v3i64(<3 x i64>)
+declare i64 @llvm.vector.reduce.mul.v4i64(<4 x i64>)
+declare i64 @llvm.vector.reduce.mul.v8i64(<8 x i64>)
+declare i64 @llvm.vector.reduce.mul.v16i64(<16 x i64>)
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/or.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/or.ll
new file mode 100644
index 00000000000000..f44d3fbe83f77c
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/or.ll
@@ -0,0 +1,233 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+target triple = "spir64-unknown-unknown"
+
+; CHECK-DAG: %[[Char:.*]] = OpTypeInt 8 0
+; CHECK-DAG: %[[CharVec2:.*]] = OpTypeVector %[[Char]] 2
+; CHECK-DAG: %[[CharVec3:.*]] = OpTypeVector %[[Char]] 3
+
+; CHECK-DAG: %[[Short:.*]] = OpTypeInt 16 0
+; CHECK-DAG: %[[ShortVec2:.*]] = OpTypeVector %[[Short]] 2
+; CHECK-DAG: %[[ShortVec3:.*]] = OpTypeVector %[[Short]] 3
+
+; CHECK-DAG: %[[Int:.*]] = OpTypeInt 32 0
+; CHECK-DAG: %[[IntVec2:.*]] = OpTypeVector %[[Int]] 2
+; CHECK-DAG: %[[IntVec3:.*]] = OpTypeVector %[[Int]] 3
+
+; CHECK-DAG: %[[Long:.*]] = OpTypeInt 64 0
+; CHECK-DAG: %[[LongVec2:.*]] = OpTypeVector %[[Long]] 2
+; CHECK-DAG: %[[LongVec3:.*]] = OpTypeVector %[[Long]] 3
+
+; CHECK: OpFunction
+; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[CharVec2]] %[[#]] %[[#]] 1 -1
+; CHECK: %[[Added1:.*]] = OpBitwiseOr %[[CharVec2]] %[[#]] %[[#]]
+; CHECK: %[[Vec2CharR:.*]] = OpCompositeExtract %[[Char]] %[[Added1]] 0
+; CHECK: OpReturnValue %[[Vec2CharR]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Char:.*]] = OpFunctionParameter %[[CharVec3]]
+; CHECK: %[[Vec3CharItem0:.*]] = OpCompositeExtract %[[Char]] %[[ParamVec3Char]] 0
+; CHECK: %[[Vec3CharItem1:.*]] = OpCompositeExtract %[[Char]] %[[ParamVec3Char]] 1
+; CHECK: %[[Vec3CharItem2:.*]] = OpCompositeExtract %[[Char]] %[[ParamVec3Char]] 2
+; CHECK: %[[Vec3CharR1:.*]] = OpBitwiseOr %[[Char]] %[[Vec3CharItem0]] %[[Vec3CharItem1]]
+; CHECK: %[[Vec3CharR2:.*]] = OpBitwiseOr %[[Char]] %[[Vec3CharR1]] %[[Vec3CharItem2]]
+; CHECK: OpReturnValue %[[Vec3CharR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[ShortVec2]] %[[#]] %[[#]] 1 -1
+; CHECK: %[[Added1:.*]] = OpBitwiseOr %[[ShortVec2]] %[[#]] %[[#]]
+; CHECK: %[[Vec2ShortR:.*]] = OpCompositeExtract %[[Short]] %[[Added1]] 0
+; CHECK: OpReturnValue %[[Vec2ShortR]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Short:.*]] = OpFunctionParameter %[[ShortVec3]]
+; CHECK: %[[Vec3ShortItem0:.*]] = OpCompositeExtract %[[Short]] %[[ParamVec3Short]] 0
+; CHECK: %[[Vec3ShortItem1:.*]] = OpCompositeExtract %[[Short]] %[[ParamVec3Short]] 1
+; CHECK: %[[Vec3ShortItem2:.*]] = OpCompositeExtract %[[Short]] %[[ParamVec3Short]] 2
+; CHECK: %[[Vec3ShortR1:.*]] = OpBitwiseOr %[[Short]] %[[Vec3ShortItem0]] %[[Vec3ShortItem1]]
+; CHECK: %[[Vec3ShortR2:.*]] = OpBitwiseOr %[[Short]] %[[Vec3ShortR1]] %[[Vec3ShortItem2]]
+; CHECK: OpReturnValue %[[Vec3ShortR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[IntVec2]] %[[#]] %[[#]] 1 -1
+; CHECK: %[[Added1:.*]] = OpBitwiseOr %[[IntVec2]] %[[#]] %[[#]]
+; CHECK: %[[Vec2IntR:.*]] = OpCompositeExtract %[[Int]] %[[Added1]] 0
+; CHECK: OpReturnValue %[[Vec2IntR]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Int:.*]] = OpFunctionParameter %[[IntVec3]]
+; CHECK: %[[Vec3IntItem0:.*]] = OpCompositeExtract %[[Int]] %[[ParamVec3Int]] 0
+; CHECK: %[[Vec3IntItem1:.*]] = OpCompositeExtract %[[Int]] %[[ParamVec3Int]] 1
+; CHECK: %[[Vec3IntItem2:.*]] = OpCompositeExtract %[[Int]] %[[ParamVec3Int]] 2
+; CHECK: %[[Vec3IntR1:.*]] = OpBitwiseOr %[[Int]] %[[Vec3IntItem0]] %[[Vec3IntItem1]]
+; CHECK: %[[Vec3IntR2:.*]] = OpBitwiseOr %[[Int]] %[[Vec3IntR1]] %[[Vec3IntItem2]]
+; CHECK: OpReturnValue %[[Vec3IntR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[LongVec2]] %[[#]] %[[#]] 1 -1
+; CHECK: %[[Added1:.*]] = OpBitwiseOr %[[LongVec2]] %[[#]] %[[#]]
+; CHECK: %[[Vec2LongR:.*]] = OpCompositeExtract %[[Long]] %[[Added1]] 0
+; CHECK: OpReturnValue %[[Vec2LongR]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Long:.*]] = OpFunctionParameter %[[LongVec3]]
+; CHECK: %[[Vec3LongItem0:.*]] = OpCompositeExtract %[[Long]] %[[ParamVec3Long]] 0
+; CHECK: %[[Vec3LongItem1:.*]] = OpCompositeExtract %[[Long]] %[[ParamVec3Long]] 1
+; CHECK: %[[Vec3LongItem2:.*]] = OpCompositeExtract %[[Long]] %[[ParamVec3Long]] 2
+; CHECK: %[[Vec3LongR1:.*]] = OpBitwiseOr %[[Long]] %[[Vec3LongItem0]] %[[Vec3LongItem1]]
+; CHECK: %[[Vec3LongR2:.*]] = OpBitwiseOr %[[Long]] %[[Vec3LongR1]] %[[Vec3LongItem2]]
+; CHECK: OpReturnValue %[[Vec3LongR2]]
+; CHECK: OpFunctionEnd
+
+define spir_func i8 @test_vector_reduce_or_v2i8(<2 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.or.v2i8(<2 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i8 @test_vector_reduce_or_v3i8(<3 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.or.v3i8(<3 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i8 @test_vector_reduce_or_v4i8(<4 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.or.v4i8(<4 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i8 @test_vector_reduce_or_v8i8(<8 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.or.v8i8(<8 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i8 @test_vector_reduce_or_v16i8(<16 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.or.v16i8(<16 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i16 @test_vector_reduce_or_v2i16(<2 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.or.v2i16(<2 x i16> %v)
+ ret i16 %0
+}
+
+define spir_func i16 @test_vector_reduce_or_v3i16(<3 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.or.v3i16(<3 x i16> %v)
+ ret i16 %0
+}
+
+define spir_func i16 @test_vector_reduce_or_v4i16(<4 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.or.v4i16(<4 x i16> %v)
+ ret i16 %0
+}
+
+define spir_func i16 @test_vector_reduce_or_v8i16(<8 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.or.v8i16(<8 x i16> %v)
+ ret i16 %0
+}
+
+define spir_func i16 @test_vector_reduce_or_v16i16(<16 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.or.v16i16(<16 x i16> %v)
+ ret i16 %0
+}
+
+
+define spir_func i32 @test_vector_reduce_or_v2i32(<2 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i32 @test_vector_reduce_or_v3i32(<3 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.or.v3i32(<3 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i32 @test_vector_reduce_or_v4i32(<4 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i32 @test_vector_reduce_or_v8i32(<8 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i32 @test_vector_reduce_or_v16i32(<16 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.or.v16i32(<16 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i64 @test_vector_reduce_or_v2i64(<2 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.or.v2i64(<2 x i64> %v)
+ ret i64 %0
+}
+
+define spir_func i64 @test_vector_reduce_or_v3i64(<3 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.or.v3i64(<3 x i64> %v)
+ ret i64 %0
+}
+
+define spir_func i64 @test_vector_reduce_or_v4i64(<4 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.or.v4i64(<4 x i64> %v)
+ ret i64 %0
+}
+
+define spir_func i64 @test_vector_reduce_or_v8i64(<8 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.or.v8i64(<8 x i64> %v)
+ ret i64 %0
+}
+
+define spir_func i64 @test_vector_reduce_or_v16i64(<16 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.or.v16i64(<16 x i64> %v)
+ ret i64 %0
+}
+
+declare i8 @llvm.vector.reduce.or.v2i8(<2 x i8>)
+declare i8 @llvm.vector.reduce.or.v3i8(<3 x i8>)
+declare i8 @llvm.vector.reduce.or.v4i8(<4 x i8>)
+declare i8 @llvm.vector.reduce.or.v8i8(<8 x i8>)
+declare i8 @llvm.vector.reduce.or.v16i8(<16 x i8>)
+
+declare i16 @llvm.vector.reduce.or.v2i16(<2 x i16>)
+declare i16 @llvm.vector.reduce.or.v3i16(<3 x i16>)
+declare i16 @llvm.vector.reduce.or.v4i16(<4 x i16>)
+declare i16 @llvm.vector.reduce.or.v8i16(<8 x i16>)
+declare i16 @llvm.vector.reduce.or.v16i16(<16 x i16>)
+
+declare i32 @llvm.vector.reduce.or.v2i32(<2 x i32>)
+declare i32 @llvm.vector.reduce.or.v3i32(<3 x i32>)
+declare i32 @llvm.vector.reduce.or.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.or.v8i32(<8 x i32>)
+declare i32 @llvm.vector.reduce.or.v16i32(<16 x i32>)
+
+declare i64 @llvm.vector.reduce.or.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.or.v3i64(<3 x i64>)
+declare i64 @llvm.vector.reduce.or.v4i64(<4 x i64>)
+declare i64 @llvm.vector.reduce.or.v8i64(<8 x i64>)
+declare i64 @llvm.vector.reduce.or.v16i64(<16 x i64>)
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/smax.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/smax.ll
new file mode 100644
index 00000000000000..0322f7bc5f5633
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/smax.ll
@@ -0,0 +1,233 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+target triple = "spir64-unknown-unknown"
+
+; CHECK-DAG: %[[Char:.*]] = OpTypeInt 8 0
+; CHECK-DAG: %[[CharVec2:.*]] = OpTypeVector %[[Char]] 2
+; CHECK-DAG: %[[CharVec3:.*]] = OpTypeVector %[[Char]] 3
+
+; CHECK-DAG: %[[Short:.*]] = OpTypeInt 16 0
+; CHECK-DAG: %[[ShortVec2:.*]] = OpTypeVector %[[Short]] 2
+; CHECK-DAG: %[[ShortVec3:.*]] = OpTypeVector %[[Short]] 3
+
+; CHECK-DAG: %[[Int:.*]] = OpTypeInt 32 0
+; CHECK-DAG: %[[IntVec2:.*]] = OpTypeVector %[[Int]] 2
+; CHECK-DAG: %[[IntVec3:.*]] = OpTypeVector %[[Int]] 3
+
+; CHECK-DAG: %[[Long:.*]] = OpTypeInt 64 0
+; CHECK-DAG: %[[LongVec2:.*]] = OpTypeVector %[[Long]] 2
+; CHECK-DAG: %[[LongVec3:.*]] = OpTypeVector %[[Long]] 3
+
+; CHECK: OpFunction
+; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[CharVec2]] %[[#]] %[[#]] 1 -1
+; CHECK: %[[Added1:.*]] = OpExtInst %[[CharVec2]] %[[#]] s_max %[[#]] %[[#]]
+; CHECK: %[[Vec2CharR:.*]] = OpCompositeExtract %[[Char]] %[[Added1]] 0
+; CHECK: OpReturnValue %[[Vec2CharR]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Char:.*]] = OpFunctionParameter %[[CharVec3]]
+; CHECK: %[[Vec3CharItem0:.*]] = OpCompositeExtract %[[Char]] %[[ParamVec3Char]] 0
+; CHECK: %[[Vec3CharItem1:.*]] = OpCompositeExtract %[[Char]] %[[ParamVec3Char]] 1
+; CHECK: %[[Vec3CharItem2:.*]] = OpCompositeExtract %[[Char]] %[[ParamVec3Char]] 2
+; CHECK: %[[Vec3CharR1:.*]] = OpExtInst %[[Char]] %[[#]] s_max %[[Vec3CharItem0]] %[[Vec3CharItem1]]
+; CHECK: %[[Vec3CharR2:.*]] = OpExtInst %[[Char]] %[[#]] s_max %[[Vec3CharR1]] %[[Vec3CharItem2]]
+; CHECK: OpReturnValue %[[Vec3CharR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[ShortVec2]] %[[#]] %[[#]] 1 -1
+; CHECK: %[[Added1:.*]] = OpExtInst %[[ShortVec2]] %[[#]] s_max %[[#]] %[[#]]
+; CHECK: %[[Vec2ShortR:.*]] = OpCompositeExtract %[[Short]] %[[Added1]] 0
+; CHECK: OpReturnValue %[[Vec2ShortR]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Short:.*]] = OpFunctionParameter %[[ShortVec3]]
+; CHECK: %[[Vec3ShortItem0:.*]] = OpCompositeExtract %[[Short]] %[[ParamVec3Short]] 0
+; CHECK: %[[Vec3ShortItem1:.*]] = OpCompositeExtract %[[Short]] %[[ParamVec3Short]] 1
+; CHECK: %[[Vec3ShortItem2:.*]] = OpCompositeExtract %[[Short]] %[[ParamVec3Short]] 2
+; CHECK: %[[Vec3ShortR1:.*]] = OpExtInst %[[Short]] %[[#]] s_max %[[Vec3ShortItem0]] %[[Vec3ShortItem1]]
+; CHECK: %[[Vec3ShortR2:.*]] = OpExtInst %[[Short]] %[[#]] s_max %[[Vec3ShortR1]] %[[Vec3ShortItem2]]
+; CHECK: OpReturnValue %[[Vec3ShortR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[IntVec2]] %[[#]] %[[#]] 1 -1
+; CHECK: %[[Added1:.*]] = OpExtInst %[[IntVec2]] %[[#]] s_max %[[#]] %[[#]]
+; CHECK: %[[Vec2IntR:.*]] = OpCompositeExtract %[[Int]] %[[Added1]] 0
+; CHECK: OpReturnValue %[[Vec2IntR]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Int:.*]] = OpFunctionParameter %[[IntVec3]]
+; CHECK: %[[Vec3IntItem0:.*]] = OpCompositeExtract %[[Int]] %[[ParamVec3Int]] 0
+; CHECK: %[[Vec3IntItem1:.*]] = OpCompositeExtract %[[Int]] %[[ParamVec3Int]] 1
+; CHECK: %[[Vec3IntItem2:.*]] = OpCompositeExtract %[[Int]] %[[ParamVec3Int]] 2
+; CHECK: %[[Vec3IntR1:.*]] = OpExtInst %[[Int]] %[[#]] s_max %[[Vec3IntItem0]] %[[Vec3IntItem1]]
+; CHECK: %[[Vec3IntR2:.*]] = OpExtInst %[[Int]] %[[#]] s_max %[[Vec3IntR1]] %[[Vec3IntItem2]]
+; CHECK: OpReturnValue %[[Vec3IntR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[LongVec2]] %[[#]] %[[#]] 1 -1
+; CHECK: %[[Added1:.*]] = OpExtInst %[[LongVec2]] %[[#]] s_max %[[#]] %[[#]]
+; CHECK: %[[Vec2LongR:.*]] = OpCompositeExtract %[[Long]] %[[Added1]] 0
+; CHECK: OpReturnValue %[[Vec2LongR]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Long:.*]] = OpFunctionParameter %[[LongVec3]]
+; CHECK: %[[Vec3LongItem0:.*]] = OpCompositeExtract %[[Long]] %[[ParamVec3Long]] 0
+; CHECK: %[[Vec3LongItem1:.*]] = OpCompositeExtract %[[Long]] %[[ParamVec3Long]] 1
+; CHECK: %[[Vec3LongItem2:.*]] = OpCompositeExtract %[[Long]] %[[ParamVec3Long]] 2
+; CHECK: %[[Vec3LongR1:.*]] = OpExtInst %[[Long]] %[[#]] s_max %[[Vec3LongItem0]] %[[Vec3LongItem1]]
+; CHECK: %[[Vec3LongR2:.*]] = OpExtInst %[[Long]] %[[#]] s_max %[[Vec3LongR1]] %[[Vec3LongItem2]]
+; CHECK: OpReturnValue %[[Vec3LongR2]]
+; CHECK: OpFunctionEnd
+
+define spir_func i8 @test_vector_reduce_smax_v2i8(<2 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.smax.v2i8(<2 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i8 @test_vector_reduce_smax_v3i8(<3 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.smax.v3i8(<3 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i8 @test_vector_reduce_smax_v4i8(<4 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.smax.v4i8(<4 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i8 @test_vector_reduce_smax_v8i8(<8 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.smax.v8i8(<8 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i8 @test_vector_reduce_smax_v16i8(<16 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.smax.v16i8(<16 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i16 @test_vector_reduce_smax_v2i16(<2 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.smax.v2i16(<2 x i16> %v)
+ ret i16 %0
+}
+
+define spir_func i16 @test_vector_reduce_smax_v3i16(<3 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.smax.v3i16(<3 x i16> %v)
+ ret i16 %0
+}
+
+define spir_func i16 @test_vector_reduce_smax_v4i16(<4 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.smax.v4i16(<4 x i16> %v)
+ ret i16 %0
+}
+
+define spir_func i16 @test_vector_reduce_smax_v8i16(<8 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.smax.v8i16(<8 x i16> %v)
+ ret i16 %0
+}
+
+define spir_func i16 @test_vector_reduce_smax_v16i16(<16 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.smax.v16i16(<16 x i16> %v)
+ ret i16 %0
+}
+
+
+define spir_func i32 @test_vector_reduce_smax_v2i32(<2 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.smax.v2i32(<2 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i32 @test_vector_reduce_smax_v3i32(<3 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.smax.v3i32(<3 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i32 @test_vector_reduce_smax_v4i32(<4 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i32 @test_vector_reduce_smax_v8i32(<8 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.smax.v8i32(<8 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i32 @test_vector_reduce_smax_v16i32(<16 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.smax.v16i32(<16 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i64 @test_vector_reduce_smax_v2i64(<2 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.smax.v2i64(<2 x i64> %v)
+ ret i64 %0
+}
+
+define spir_func i64 @test_vector_reduce_smax_v3i64(<3 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.smax.v3i64(<3 x i64> %v)
+ ret i64 %0
+}
+
+define spir_func i64 @test_vector_reduce_smax_v4i64(<4 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.smax.v4i64(<4 x i64> %v)
+ ret i64 %0
+}
+
+define spir_func i64 @test_vector_reduce_smax_v8i64(<8 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.smax.v8i64(<8 x i64> %v)
+ ret i64 %0
+}
+
+define spir_func i64 @test_vector_reduce_smax_v16i64(<16 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.smax.v16i64(<16 x i64> %v)
+ ret i64 %0
+}
+
+declare i8 @llvm.vector.reduce.smax.v2i8(<2 x i8>)
+declare i8 @llvm.vector.reduce.smax.v3i8(<3 x i8>)
+declare i8 @llvm.vector.reduce.smax.v4i8(<4 x i8>)
+declare i8 @llvm.vector.reduce.smax.v8i8(<8 x i8>)
+declare i8 @llvm.vector.reduce.smax.v16i8(<16 x i8>)
+
+declare i16 @llvm.vector.reduce.smax.v2i16(<2 x i16>)
+declare i16 @llvm.vector.reduce.smax.v3i16(<3 x i16>)
+declare i16 @llvm.vector.reduce.smax.v4i16(<4 x i16>)
+declare i16 @llvm.vector.reduce.smax.v8i16(<8 x i16>)
+declare i16 @llvm.vector.reduce.smax.v16i16(<16 x i16>)
+
+declare i32 @llvm.vector.reduce.smax.v2i32(<2 x i32>)
+declare i32 @llvm.vector.reduce.smax.v3i32(<3 x i32>)
+declare i32 @llvm.vector.reduce.smax.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.smax.v8i32(<8 x i32>)
+declare i32 @llvm.vector.reduce.smax.v16i32(<16 x i32>)
+
+declare i64 @llvm.vector.reduce.smax.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.smax.v3i64(<3 x i64>)
+declare i64 @llvm.vector.reduce.smax.v4i64(<4 x i64>)
+declare i64 @llvm.vector.reduce.smax.v8i64(<8 x i64>)
+declare i64 @llvm.vector.reduce.smax.v16i64(<16 x i64>)
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/smin.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/smin.ll
new file mode 100644
index 00000000000000..edc6637afdfb9c
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/smin.ll
@@ -0,0 +1,233 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+target triple = "spir64-unknown-unknown"
+
+; CHECK-DAG: %[[Char:.*]] = OpTypeInt 8 0
+; CHECK-DAG: %[[CharVec2:.*]] = OpTypeVector %[[Char]] 2
+; CHECK-DAG: %[[CharVec3:.*]] = OpTypeVector %[[Char]] 3
+
+; CHECK-DAG: %[[Short:.*]] = OpTypeInt 16 0
+; CHECK-DAG: %[[ShortVec2:.*]] = OpTypeVector %[[Short]] 2
+; CHECK-DAG: %[[ShortVec3:.*]] = OpTypeVector %[[Short]] 3
+
+; CHECK-DAG: %[[Int:.*]] = OpTypeInt 32 0
+; CHECK-DAG: %[[IntVec2:.*]] = OpTypeVector %[[Int]] 2
+; CHECK-DAG: %[[IntVec3:.*]] = OpTypeVector %[[Int]] 3
+
+; CHECK-DAG: %[[Long:.*]] = OpTypeInt 64 0
+; CHECK-DAG: %[[LongVec2:.*]] = OpTypeVector %[[Long]] 2
+; CHECK-DAG: %[[LongVec3:.*]] = OpTypeVector %[[Long]] 3
+
+; CHECK: OpFunction
+; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[CharVec2]] %[[#]] %[[#]] 1 -1
+; CHECK: %[[Added1:.*]] = OpExtInst %[[CharVec2]] %[[#]] s_min %[[#]] %[[#]]
+; CHECK: %[[Vec2CharR:.*]] = OpCompositeExtract %[[Char]] %[[Added1]] 0
+; CHECK: OpReturnValue %[[Vec2CharR]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Char:.*]] = OpFunctionParameter %[[CharVec3]]
+; CHECK: %[[Vec3CharItem0:.*]] = OpCompositeExtract %[[Char]] %[[ParamVec3Char]] 0
+; CHECK: %[[Vec3CharItem1:.*]] = OpCompositeExtract %[[Char]] %[[ParamVec3Char]] 1
+; CHECK: %[[Vec3CharItem2:.*]] = OpCompositeExtract %[[Char]] %[[ParamVec3Char]] 2
+; CHECK: %[[Vec3CharR1:.*]] = OpExtInst %[[Char]] %[[#]] s_min %[[Vec3CharItem0]] %[[Vec3CharItem1]]
+; CHECK: %[[Vec3CharR2:.*]] = OpExtInst %[[Char]] %[[#]] s_min %[[Vec3CharR1]] %[[Vec3CharItem2]]
+; CHECK: OpReturnValue %[[Vec3CharR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[ShortVec2]] %[[#]] %[[#]] 1 -1
+; CHECK: %[[Added1:.*]] = OpExtInst %[[ShortVec2]] %[[#]] s_min %[[#]] %[[#]]
+; CHECK: %[[Vec2ShortR:.*]] = OpCompositeExtract %[[Short]] %[[Added1]] 0
+; CHECK: OpReturnValue %[[Vec2ShortR]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Short:.*]] = OpFunctionParameter %[[ShortVec3]]
+; CHECK: %[[Vec3ShortItem0:.*]] = OpCompositeExtract %[[Short]] %[[ParamVec3Short]] 0
+; CHECK: %[[Vec3ShortItem1:.*]] = OpCompositeExtract %[[Short]] %[[ParamVec3Short]] 1
+; CHECK: %[[Vec3ShortItem2:.*]] = OpCompositeExtract %[[Short]] %[[ParamVec3Short]] 2
+; CHECK: %[[Vec3ShortR1:.*]] = OpExtInst %[[Short]] %[[#]] s_min %[[Vec3ShortItem0]] %[[Vec3ShortItem1]]
+; CHECK: %[[Vec3ShortR2:.*]] = OpExtInst %[[Short]] %[[#]] s_min %[[Vec3ShortR1]] %[[Vec3ShortItem2]]
+; CHECK: OpReturnValue %[[Vec3ShortR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[IntVec2]] %[[#]] %[[#]] 1 -1
+; CHECK: %[[Added1:.*]] = OpExtInst %[[IntVec2]] %[[#]] s_min %[[#]] %[[#]]
+; CHECK: %[[Vec2IntR:.*]] = OpCompositeExtract %[[Int]] %[[Added1]] 0
+; CHECK: OpReturnValue %[[Vec2IntR]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Int:.*]] = OpFunctionParameter %[[IntVec3]]
+; CHECK: %[[Vec3IntItem0:.*]] = OpCompositeExtract %[[Int]] %[[ParamVec3Int]] 0
+; CHECK: %[[Vec3IntItem1:.*]] = OpCompositeExtract %[[Int]] %[[ParamVec3Int]] 1
+; CHECK: %[[Vec3IntItem2:.*]] = OpCompositeExtract %[[Int]] %[[ParamVec3Int]] 2
+; CHECK: %[[Vec3IntR1:.*]] = OpExtInst %[[Int]] %[[#]] s_min %[[Vec3IntItem0]] %[[Vec3IntItem1]]
+; CHECK: %[[Vec3IntR2:.*]] = OpExtInst %[[Int]] %[[#]] s_min %[[Vec3IntR1]] %[[Vec3IntItem2]]
+; CHECK: OpReturnValue %[[Vec3IntR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[LongVec2]] %[[#]] %[[#]] 1 -1
+; CHECK: %[[Added1:.*]] = OpExtInst %[[LongVec2]] %[[#]] s_min %[[#]] %[[#]]
+; CHECK: %[[Vec2LongR:.*]] = OpCompositeExtract %[[Long]] %[[Added1]] 0
+; CHECK: OpReturnValue %[[Vec2LongR]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Long:.*]] = OpFunctionParameter %[[LongVec3]]
+; CHECK: %[[Vec3LongItem0:.*]] = OpCompositeExtract %[[Long]] %[[ParamVec3Long]] 0
+; CHECK: %[[Vec3LongItem1:.*]] = OpCompositeExtract %[[Long]] %[[ParamVec3Long]] 1
+; CHECK: %[[Vec3LongItem2:.*]] = OpCompositeExtract %[[Long]] %[[ParamVec3Long]] 2
+; CHECK: %[[Vec3LongR1:.*]] = OpExtInst %[[Long]] %[[#]] s_min %[[Vec3LongItem0]] %[[Vec3LongItem1]]
+; CHECK: %[[Vec3LongR2:.*]] = OpExtInst %[[Long]] %[[#]] s_min %[[Vec3LongR1]] %[[Vec3LongItem2]]
+; CHECK: OpReturnValue %[[Vec3LongR2]]
+; CHECK: OpFunctionEnd
+
+define spir_func i8 @test_vector_reduce_smin_v2i8(<2 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.smin.v2i8(<2 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i8 @test_vector_reduce_smin_v3i8(<3 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.smin.v3i8(<3 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i8 @test_vector_reduce_smin_v4i8(<4 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.smin.v4i8(<4 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i8 @test_vector_reduce_smin_v8i8(<8 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.smin.v8i8(<8 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i8 @test_vector_reduce_smin_v16i8(<16 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.smin.v16i8(<16 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i16 @test_vector_reduce_smin_v2i16(<2 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.smin.v2i16(<2 x i16> %v)
+ ret i16 %0
+}
+
+define spir_func i16 @test_vector_reduce_smin_v3i16(<3 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.smin.v3i16(<3 x i16> %v)
+ ret i16 %0
+}
+
+define spir_func i16 @test_vector_reduce_smin_v4i16(<4 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.smin.v4i16(<4 x i16> %v)
+ ret i16 %0
+}
+
+define spir_func i16 @test_vector_reduce_smin_v8i16(<8 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.smin.v8i16(<8 x i16> %v)
+ ret i16 %0
+}
+
+define spir_func i16 @test_vector_reduce_smin_v16i16(<16 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.smin.v16i16(<16 x i16> %v)
+ ret i16 %0
+}
+
+
+define spir_func i32 @test_vector_reduce_smin_v2i32(<2 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.smin.v2i32(<2 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i32 @test_vector_reduce_smin_v3i32(<3 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.smin.v3i32(<3 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i32 @test_vector_reduce_smin_v4i32(<4 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i32 @test_vector_reduce_smin_v8i32(<8 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.smin.v8i32(<8 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i32 @test_vector_reduce_smin_v16i32(<16 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.smin.v16i32(<16 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i64 @test_vector_reduce_smin_v2i64(<2 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.smin.v2i64(<2 x i64> %v)
+ ret i64 %0
+}
+
+define spir_func i64 @test_vector_reduce_smin_v3i64(<3 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.smin.v3i64(<3 x i64> %v)
+ ret i64 %0
+}
+
+define spir_func i64 @test_vector_reduce_smin_v4i64(<4 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.smin.v4i64(<4 x i64> %v)
+ ret i64 %0
+}
+
+define spir_func i64 @test_vector_reduce_smin_v8i64(<8 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.smin.v8i64(<8 x i64> %v)
+ ret i64 %0
+}
+
+define spir_func i64 @test_vector_reduce_smin_v16i64(<16 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.smin.v16i64(<16 x i64> %v)
+ ret i64 %0
+}
+
+declare i8 @llvm.vector.reduce.smin.v2i8(<2 x i8>)
+declare i8 @llvm.vector.reduce.smin.v3i8(<3 x i8>)
+declare i8 @llvm.vector.reduce.smin.v4i8(<4 x i8>)
+declare i8 @llvm.vector.reduce.smin.v8i8(<8 x i8>)
+declare i8 @llvm.vector.reduce.smin.v16i8(<16 x i8>)
+
+declare i16 @llvm.vector.reduce.smin.v2i16(<2 x i16>)
+declare i16 @llvm.vector.reduce.smin.v3i16(<3 x i16>)
+declare i16 @llvm.vector.reduce.smin.v4i16(<4 x i16>)
+declare i16 @llvm.vector.reduce.smin.v8i16(<8 x i16>)
+declare i16 @llvm.vector.reduce.smin.v16i16(<16 x i16>)
+
+declare i32 @llvm.vector.reduce.smin.v2i32(<2 x i32>)
+declare i32 @llvm.vector.reduce.smin.v3i32(<3 x i32>)
+declare i32 @llvm.vector.reduce.smin.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.smin.v8i32(<8 x i32>)
+declare i32 @llvm.vector.reduce.smin.v16i32(<16 x i32>)
+
+declare i64 @llvm.vector.reduce.smin.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.smin.v3i64(<3 x i64>)
+declare i64 @llvm.vector.reduce.smin.v4i64(<4 x i64>)
+declare i64 @llvm.vector.reduce.smin.v8i64(<8 x i64>)
+declare i64 @llvm.vector.reduce.smin.v16i64(<16 x i64>)
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/umax.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/umax.ll
new file mode 100644
index 00000000000000..a78655e2639580
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/umax.ll
@@ -0,0 +1,233 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+target triple = "spir64-unknown-unknown"
+
+; CHECK-DAG: %[[Char:.*]] = OpTypeInt 8 0
+; CHECK-DAG: %[[CharVec2:.*]] = OpTypeVector %[[Char]] 2
+; CHECK-DAG: %[[CharVec3:.*]] = OpTypeVector %[[Char]] 3
+
+; CHECK-DAG: %[[Short:.*]] = OpTypeInt 16 0
+; CHECK-DAG: %[[ShortVec2:.*]] = OpTypeVector %[[Short]] 2
+; CHECK-DAG: %[[ShortVec3:.*]] = OpTypeVector %[[Short]] 3
+
+; CHECK-DAG: %[[Int:.*]] = OpTypeInt 32 0
+; CHECK-DAG: %[[IntVec2:.*]] = OpTypeVector %[[Int]] 2
+; CHECK-DAG: %[[IntVec3:.*]] = OpTypeVector %[[Int]] 3
+
+; CHECK-DAG: %[[Long:.*]] = OpTypeInt 64 0
+; CHECK-DAG: %[[LongVec2:.*]] = OpTypeVector %[[Long]] 2
+; CHECK-DAG: %[[LongVec3:.*]] = OpTypeVector %[[Long]] 3
+
+; CHECK: OpFunction
+; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[CharVec2]] %[[#]] %[[#]] 1 -1
+; CHECK: %[[Added1:.*]] = OpExtInst %[[CharVec2]] %[[#]] u_max %[[#]] %[[#]]
+; CHECK: %[[Vec2CharR:.*]] = OpCompositeExtract %[[Char]] %[[Added1]] 0
+; CHECK: OpReturnValue %[[Vec2CharR]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Char:.*]] = OpFunctionParameter %[[CharVec3]]
+; CHECK: %[[Vec3CharItem0:.*]] = OpCompositeExtract %[[Char]] %[[ParamVec3Char]] 0
+; CHECK: %[[Vec3CharItem1:.*]] = OpCompositeExtract %[[Char]] %[[ParamVec3Char]] 1
+; CHECK: %[[Vec3CharItem2:.*]] = OpCompositeExtract %[[Char]] %[[ParamVec3Char]] 2
+; CHECK: %[[Vec3CharR1:.*]] = OpExtInst %[[Char]] %[[#]] u_max %[[Vec3CharItem0]] %[[Vec3CharItem1]]
+; CHECK: %[[Vec3CharR2:.*]] = OpExtInst %[[Char]] %[[#]] u_max %[[Vec3CharR1]] %[[Vec3CharItem2]]
+; CHECK: OpReturnValue %[[Vec3CharR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[ShortVec2]] %[[#]] %[[#]] 1 -1
+; CHECK: %[[Added1:.*]] = OpExtInst %[[ShortVec2]] %[[#]] u_max %[[#]] %[[#]]
+; CHECK: %[[Vec2ShortR:.*]] = OpCompositeExtract %[[Short]] %[[Added1]] 0
+; CHECK: OpReturnValue %[[Vec2ShortR]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Short:.*]] = OpFunctionParameter %[[ShortVec3]]
+; CHECK: %[[Vec3ShortItem0:.*]] = OpCompositeExtract %[[Short]] %[[ParamVec3Short]] 0
+; CHECK: %[[Vec3ShortItem1:.*]] = OpCompositeExtract %[[Short]] %[[ParamVec3Short]] 1
+; CHECK: %[[Vec3ShortItem2:.*]] = OpCompositeExtract %[[Short]] %[[ParamVec3Short]] 2
+; CHECK: %[[Vec3ShortR1:.*]] = OpExtInst %[[Short]] %[[#]] u_max %[[Vec3ShortItem0]] %[[Vec3ShortItem1]]
+; CHECK: %[[Vec3ShortR2:.*]] = OpExtInst %[[Short]] %[[#]] u_max %[[Vec3ShortR1]] %[[Vec3ShortItem2]]
+; CHECK: OpReturnValue %[[Vec3ShortR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[IntVec2]] %[[#]] %[[#]] 1 -1
+; CHECK: %[[Added1:.*]] = OpExtInst %[[IntVec2]] %[[#]] u_max %[[#]] %[[#]]
+; CHECK: %[[Vec2IntR:.*]] = OpCompositeExtract %[[Int]] %[[Added1]] 0
+; CHECK: OpReturnValue %[[Vec2IntR]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Int:.*]] = OpFunctionParameter %[[IntVec3]]
+; CHECK: %[[Vec3IntItem0:.*]] = OpCompositeExtract %[[Int]] %[[ParamVec3Int]] 0
+; CHECK: %[[Vec3IntItem1:.*]] = OpCompositeExtract %[[Int]] %[[ParamVec3Int]] 1
+; CHECK: %[[Vec3IntItem2:.*]] = OpCompositeExtract %[[Int]] %[[ParamVec3Int]] 2
+; CHECK: %[[Vec3IntR1:.*]] = OpExtInst %[[Int]] %[[#]] u_max %[[Vec3IntItem0]] %[[Vec3IntItem1]]
+; CHECK: %[[Vec3IntR2:.*]] = OpExtInst %[[Int]] %[[#]] u_max %[[Vec3IntR1]] %[[Vec3IntItem2]]
+; CHECK: OpReturnValue %[[Vec3IntR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[LongVec2]] %[[#]] %[[#]] 1 -1
+; CHECK: %[[Added1:.*]] = OpExtInst %[[LongVec2]] %[[#]] u_max %[[#]] %[[#]]
+; CHECK: %[[Vec2LongR:.*]] = OpCompositeExtract %[[Long]] %[[Added1]] 0
+; CHECK: OpReturnValue %[[Vec2LongR]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Long:.*]] = OpFunctionParameter %[[LongVec3]]
+; CHECK: %[[Vec3LongItem0:.*]] = OpCompositeExtract %[[Long]] %[[ParamVec3Long]] 0
+; CHECK: %[[Vec3LongItem1:.*]] = OpCompositeExtract %[[Long]] %[[ParamVec3Long]] 1
+; CHECK: %[[Vec3LongItem2:.*]] = OpCompositeExtract %[[Long]] %[[ParamVec3Long]] 2
+; CHECK: %[[Vec3LongR1:.*]] = OpExtInst %[[Long]] %[[#]] u_max %[[Vec3LongItem0]] %[[Vec3LongItem1]]
+; CHECK: %[[Vec3LongR2:.*]] = OpExtInst %[[Long]] %[[#]] u_max %[[Vec3LongR1]] %[[Vec3LongItem2]]
+; CHECK: OpReturnValue %[[Vec3LongR2]]
+; CHECK: OpFunctionEnd
+
+define spir_func i8 @test_vector_reduce_umax_v2i8(<2 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.umax.v2i8(<2 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i8 @test_vector_reduce_umax_v3i8(<3 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.umax.v3i8(<3 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i8 @test_vector_reduce_umax_v4i8(<4 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.umax.v4i8(<4 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i8 @test_vector_reduce_umax_v8i8(<8 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.umax.v8i8(<8 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i8 @test_vector_reduce_umax_v16i8(<16 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.umax.v16i8(<16 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i16 @test_vector_reduce_umax_v2i16(<2 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.umax.v2i16(<2 x i16> %v)
+ ret i16 %0
+}
+
+define spir_func i16 @test_vector_reduce_umax_v3i16(<3 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.umax.v3i16(<3 x i16> %v)
+ ret i16 %0
+}
+
+define spir_func i16 @test_vector_reduce_umax_v4i16(<4 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.umax.v4i16(<4 x i16> %v)
+ ret i16 %0
+}
+
+define spir_func i16 @test_vector_reduce_umax_v8i16(<8 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.umax.v8i16(<8 x i16> %v)
+ ret i16 %0
+}
+
+define spir_func i16 @test_vector_reduce_umax_v16i16(<16 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.umax.v16i16(<16 x i16> %v)
+ ret i16 %0
+}
+
+
+define spir_func i32 @test_vector_reduce_umax_v2i32(<2 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.umax.v2i32(<2 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i32 @test_vector_reduce_umax_v3i32(<3 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.umax.v3i32(<3 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i32 @test_vector_reduce_umax_v4i32(<4 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i32 @test_vector_reduce_umax_v8i32(<8 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.umax.v8i32(<8 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i32 @test_vector_reduce_umax_v16i32(<16 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.umax.v16i32(<16 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i64 @test_vector_reduce_umax_v2i64(<2 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.umax.v2i64(<2 x i64> %v)
+ ret i64 %0
+}
+
+define spir_func i64 @test_vector_reduce_umax_v3i64(<3 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.umax.v3i64(<3 x i64> %v)
+ ret i64 %0
+}
+
+define spir_func i64 @test_vector_reduce_umax_v4i64(<4 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.umax.v4i64(<4 x i64> %v)
+ ret i64 %0
+}
+
+define spir_func i64 @test_vector_reduce_umax_v8i64(<8 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.umax.v8i64(<8 x i64> %v)
+ ret i64 %0
+}
+
+define spir_func i64 @test_vector_reduce_umax_v16i64(<16 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.umax.v16i64(<16 x i64> %v)
+ ret i64 %0
+}
+
+declare i8 @llvm.vector.reduce.umax.v2i8(<2 x i8>)
+declare i8 @llvm.vector.reduce.umax.v3i8(<3 x i8>)
+declare i8 @llvm.vector.reduce.umax.v4i8(<4 x i8>)
+declare i8 @llvm.vector.reduce.umax.v8i8(<8 x i8>)
+declare i8 @llvm.vector.reduce.umax.v16i8(<16 x i8>)
+
+declare i16 @llvm.vector.reduce.umax.v2i16(<2 x i16>)
+declare i16 @llvm.vector.reduce.umax.v3i16(<3 x i16>)
+declare i16 @llvm.vector.reduce.umax.v4i16(<4 x i16>)
+declare i16 @llvm.vector.reduce.umax.v8i16(<8 x i16>)
+declare i16 @llvm.vector.reduce.umax.v16i16(<16 x i16>)
+
+declare i32 @llvm.vector.reduce.umax.v2i32(<2 x i32>)
+declare i32 @llvm.vector.reduce.umax.v3i32(<3 x i32>)
+declare i32 @llvm.vector.reduce.umax.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.umax.v8i32(<8 x i32>)
+declare i32 @llvm.vector.reduce.umax.v16i32(<16 x i32>)
+
+declare i64 @llvm.vector.reduce.umax.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.umax.v3i64(<3 x i64>)
+declare i64 @llvm.vector.reduce.umax.v4i64(<4 x i64>)
+declare i64 @llvm.vector.reduce.umax.v8i64(<8 x i64>)
+declare i64 @llvm.vector.reduce.umax.v16i64(<16 x i64>)
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/umin.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/umin.ll
new file mode 100644
index 00000000000000..9a5b6c0b5237ce
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/umin.ll
@@ -0,0 +1,233 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+target triple = "spir64-unknown-unknown"
+
+; CHECK-DAG: %[[Char:.*]] = OpTypeInt 8 0
+; CHECK-DAG: %[[CharVec2:.*]] = OpTypeVector %[[Char]] 2
+; CHECK-DAG: %[[CharVec3:.*]] = OpTypeVector %[[Char]] 3
+
+; CHECK-DAG: %[[Short:.*]] = OpTypeInt 16 0
+; CHECK-DAG: %[[ShortVec2:.*]] = OpTypeVector %[[Short]] 2
+; CHECK-DAG: %[[ShortVec3:.*]] = OpTypeVector %[[Short]] 3
+
+; CHECK-DAG: %[[Int:.*]] = OpTypeInt 32 0
+; CHECK-DAG: %[[IntVec2:.*]] = OpTypeVector %[[Int]] 2
+; CHECK-DAG: %[[IntVec3:.*]] = OpTypeVector %[[Int]] 3
+
+; CHECK-DAG: %[[Long:.*]] = OpTypeInt 64 0
+; CHECK-DAG: %[[LongVec2:.*]] = OpTypeVector %[[Long]] 2
+; CHECK-DAG: %[[LongVec3:.*]] = OpTypeVector %[[Long]] 3
+
+; CHECK: OpFunction
+; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[CharVec2]] %[[#]] %[[#]] 1 -1
+; CHECK: %[[Added1:.*]] = OpExtInst %[[CharVec2]] %[[#]] u_min %[[#]] %[[#]]
+; CHECK: %[[Vec2CharR:.*]] = OpCompositeExtract %[[Char]] %[[Added1]] 0
+; CHECK: OpReturnValue %[[Vec2CharR]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Char:.*]] = OpFunctionParameter %[[CharVec3]]
+; CHECK: %[[Vec3CharItem0:.*]] = OpCompositeExtract %[[Char]] %[[ParamVec3Char]] 0
+; CHECK: %[[Vec3CharItem1:.*]] = OpCompositeExtract %[[Char]] %[[ParamVec3Char]] 1
+; CHECK: %[[Vec3CharItem2:.*]] = OpCompositeExtract %[[Char]] %[[ParamVec3Char]] 2
+; CHECK: %[[Vec3CharR1:.*]] = OpExtInst %[[Char]] %[[#]] u_min %[[Vec3CharItem0]] %[[Vec3CharItem1]]
+; CHECK: %[[Vec3CharR2:.*]] = OpExtInst %[[Char]] %[[#]] u_min %[[Vec3CharR1]] %[[Vec3CharItem2]]
+; CHECK: OpReturnValue %[[Vec3CharR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[ShortVec2]] %[[#]] %[[#]] 1 -1
+; CHECK: %[[Added1:.*]] = OpExtInst %[[ShortVec2]] %[[#]] u_min %[[#]] %[[#]]
+; CHECK: %[[Vec2ShortR:.*]] = OpCompositeExtract %[[Short]] %[[Added1]] 0
+; CHECK: OpReturnValue %[[Vec2ShortR]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Short:.*]] = OpFunctionParameter %[[ShortVec3]]
+; CHECK: %[[Vec3ShortItem0:.*]] = OpCompositeExtract %[[Short]] %[[ParamVec3Short]] 0
+; CHECK: %[[Vec3ShortItem1:.*]] = OpCompositeExtract %[[Short]] %[[ParamVec3Short]] 1
+; CHECK: %[[Vec3ShortItem2:.*]] = OpCompositeExtract %[[Short]] %[[ParamVec3Short]] 2
+; CHECK: %[[Vec3ShortR1:.*]] = OpExtInst %[[Short]] %[[#]] u_min %[[Vec3ShortItem0]] %[[Vec3ShortItem1]]
+; CHECK: %[[Vec3ShortR2:.*]] = OpExtInst %[[Short]] %[[#]] u_min %[[Vec3ShortR1]] %[[Vec3ShortItem2]]
+; CHECK: OpReturnValue %[[Vec3ShortR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[IntVec2]] %[[#]] %[[#]] 1 -1
+; CHECK: %[[Added1:.*]] = OpExtInst %[[IntVec2]] %[[#]] u_min %[[#]] %[[#]]
+; CHECK: %[[Vec2IntR:.*]] = OpCompositeExtract %[[Int]] %[[Added1]] 0
+; CHECK: OpReturnValue %[[Vec2IntR]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Int:.*]] = OpFunctionParameter %[[IntVec3]]
+; CHECK: %[[Vec3IntItem0:.*]] = OpCompositeExtract %[[Int]] %[[ParamVec3Int]] 0
+; CHECK: %[[Vec3IntItem1:.*]] = OpCompositeExtract %[[Int]] %[[ParamVec3Int]] 1
+; CHECK: %[[Vec3IntItem2:.*]] = OpCompositeExtract %[[Int]] %[[ParamVec3Int]] 2
+; CHECK: %[[Vec3IntR1:.*]] = OpExtInst %[[Int]] %[[#]] u_min %[[Vec3IntItem0]] %[[Vec3IntItem1]]
+; CHECK: %[[Vec3IntR2:.*]] = OpExtInst %[[Int]] %[[#]] u_min %[[Vec3IntR1]] %[[Vec3IntItem2]]
+; CHECK: OpReturnValue %[[Vec3IntR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[LongVec2]] %[[#]] %[[#]] 1 -1
+; CHECK: %[[Added1:.*]] = OpExtInst %[[LongVec2]] %[[#]] u_min %[[#]] %[[#]]
+; CHECK: %[[Vec2LongR:.*]] = OpCompositeExtract %[[Long]] %[[Added1]] 0
+; CHECK: OpReturnValue %[[Vec2LongR]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Long:.*]] = OpFunctionParameter %[[LongVec3]]
+; CHECK: %[[Vec3LongItem0:.*]] = OpCompositeExtract %[[Long]] %[[ParamVec3Long]] 0
+; CHECK: %[[Vec3LongItem1:.*]] = OpCompositeExtract %[[Long]] %[[ParamVec3Long]] 1
+; CHECK: %[[Vec3LongItem2:.*]] = OpCompositeExtract %[[Long]] %[[ParamVec3Long]] 2
+; CHECK: %[[Vec3LongR1:.*]] = OpExtInst %[[Long]] %[[#]] u_min %[[Vec3LongItem0]] %[[Vec3LongItem1]]
+; CHECK: %[[Vec3LongR2:.*]] = OpExtInst %[[Long]] %[[#]] u_min %[[Vec3LongR1]] %[[Vec3LongItem2]]
+; CHECK: OpReturnValue %[[Vec3LongR2]]
+; CHECK: OpFunctionEnd
+
+define spir_func i8 @test_vector_reduce_umin_v2i8(<2 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.umin.v2i8(<2 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i8 @test_vector_reduce_umin_v3i8(<3 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.umin.v3i8(<3 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i8 @test_vector_reduce_umin_v4i8(<4 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.umin.v4i8(<4 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i8 @test_vector_reduce_umin_v8i8(<8 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.umin.v8i8(<8 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i8 @test_vector_reduce_umin_v16i8(<16 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.umin.v16i8(<16 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i16 @test_vector_reduce_umin_v2i16(<2 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.umin.v2i16(<2 x i16> %v)
+ ret i16 %0
+}
+
+define spir_func i16 @test_vector_reduce_umin_v3i16(<3 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.umin.v3i16(<3 x i16> %v)
+ ret i16 %0
+}
+
+define spir_func i16 @test_vector_reduce_umin_v4i16(<4 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.umin.v4i16(<4 x i16> %v)
+ ret i16 %0
+}
+
+define spir_func i16 @test_vector_reduce_umin_v8i16(<8 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.umin.v8i16(<8 x i16> %v)
+ ret i16 %0
+}
+
+define spir_func i16 @test_vector_reduce_umin_v16i16(<16 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.umin.v16i16(<16 x i16> %v)
+ ret i16 %0
+}
+
+
+define spir_func i32 @test_vector_reduce_umin_v2i32(<2 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.umin.v2i32(<2 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i32 @test_vector_reduce_umin_v3i32(<3 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.umin.v3i32(<3 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i32 @test_vector_reduce_umin_v4i32(<4 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i32 @test_vector_reduce_umin_v8i32(<8 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.umin.v8i32(<8 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i32 @test_vector_reduce_umin_v16i32(<16 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.umin.v16i32(<16 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i64 @test_vector_reduce_umin_v2i64(<2 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.umin.v2i64(<2 x i64> %v)
+ ret i64 %0
+}
+
+define spir_func i64 @test_vector_reduce_umin_v3i64(<3 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.umin.v3i64(<3 x i64> %v)
+ ret i64 %0
+}
+
+define spir_func i64 @test_vector_reduce_umin_v4i64(<4 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.umin.v4i64(<4 x i64> %v)
+ ret i64 %0
+}
+
+define spir_func i64 @test_vector_reduce_umin_v8i64(<8 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.umin.v8i64(<8 x i64> %v)
+ ret i64 %0
+}
+
+define spir_func i64 @test_vector_reduce_umin_v16i64(<16 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.umin.v16i64(<16 x i64> %v)
+ ret i64 %0
+}
+
+declare i8 @llvm.vector.reduce.umin.v2i8(<2 x i8>)
+declare i8 @llvm.vector.reduce.umin.v3i8(<3 x i8>)
+declare i8 @llvm.vector.reduce.umin.v4i8(<4 x i8>)
+declare i8 @llvm.vector.reduce.umin.v8i8(<8 x i8>)
+declare i8 @llvm.vector.reduce.umin.v16i8(<16 x i8>)
+
+declare i16 @llvm.vector.reduce.umin.v2i16(<2 x i16>)
+declare i16 @llvm.vector.reduce.umin.v3i16(<3 x i16>)
+declare i16 @llvm.vector.reduce.umin.v4i16(<4 x i16>)
+declare i16 @llvm.vector.reduce.umin.v8i16(<8 x i16>)
+declare i16 @llvm.vector.reduce.umin.v16i16(<16 x i16>)
+
+declare i32 @llvm.vector.reduce.umin.v2i32(<2 x i32>)
+declare i32 @llvm.vector.reduce.umin.v3i32(<3 x i32>)
+declare i32 @llvm.vector.reduce.umin.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.umin.v8i32(<8 x i32>)
+declare i32 @llvm.vector.reduce.umin.v16i32(<16 x i32>)
+
+declare i64 @llvm.vector.reduce.umin.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.umin.v3i64(<3 x i64>)
+declare i64 @llvm.vector.reduce.umin.v4i64(<4 x i64>)
+declare i64 @llvm.vector.reduce.umin.v8i64(<8 x i64>)
+declare i64 @llvm.vector.reduce.umin.v16i64(<16 x i64>)
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/xor.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/xor.ll
new file mode 100644
index 00000000000000..6dc87042060708
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/xor.ll
@@ -0,0 +1,233 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+target triple = "spir64-unknown-unknown"
+
+; CHECK-DAG: %[[Char:.*]] = OpTypeInt 8 0
+; CHECK-DAG: %[[CharVec2:.*]] = OpTypeVector %[[Char]] 2
+; CHECK-DAG: %[[CharVec3:.*]] = OpTypeVector %[[Char]] 3
+
+; CHECK-DAG: %[[Short:.*]] = OpTypeInt 16 0
+; CHECK-DAG: %[[ShortVec2:.*]] = OpTypeVector %[[Short]] 2
+; CHECK-DAG: %[[ShortVec3:.*]] = OpTypeVector %[[Short]] 3
+
+; CHECK-DAG: %[[Int:.*]] = OpTypeInt 32 0
+; CHECK-DAG: %[[IntVec2:.*]] = OpTypeVector %[[Int]] 2
+; CHECK-DAG: %[[IntVec3:.*]] = OpTypeVector %[[Int]] 3
+
+; CHECK-DAG: %[[Long:.*]] = OpTypeInt 64 0
+; CHECK-DAG: %[[LongVec2:.*]] = OpTypeVector %[[Long]] 2
+; CHECK-DAG: %[[LongVec3:.*]] = OpTypeVector %[[Long]] 3
+
+; CHECK: OpFunction
+; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[CharVec2]] %[[#]] %[[#]] 1 -1
+; CHECK: %[[Added1:.*]] = OpBitwiseXor %[[CharVec2]] %[[#]] %[[#]]
+; CHECK: %[[Vec2CharR:.*]] = OpCompositeExtract %[[Char]] %[[Added1]] 0
+; CHECK: OpReturnValue %[[Vec2CharR]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Char:.*]] = OpFunctionParameter %[[CharVec3]]
+; CHECK: %[[Vec3CharItem0:.*]] = OpCompositeExtract %[[Char]] %[[ParamVec3Char]] 0
+; CHECK: %[[Vec3CharItem1:.*]] = OpCompositeExtract %[[Char]] %[[ParamVec3Char]] 1
+; CHECK: %[[Vec3CharItem2:.*]] = OpCompositeExtract %[[Char]] %[[ParamVec3Char]] 2
+; CHECK: %[[Vec3CharR1:.*]] = OpBitwiseXor %[[Char]] %[[Vec3CharItem0]] %[[Vec3CharItem1]]
+; CHECK: %[[Vec3CharR2:.*]] = OpBitwiseXor %[[Char]] %[[Vec3CharR1]] %[[Vec3CharItem2]]
+; CHECK: OpReturnValue %[[Vec3CharR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[ShortVec2]] %[[#]] %[[#]] 1 -1
+; CHECK: %[[Added1:.*]] = OpBitwiseXor %[[ShortVec2]] %[[#]] %[[#]]
+; CHECK: %[[Vec2ShortR:.*]] = OpCompositeExtract %[[Short]] %[[Added1]] 0
+; CHECK: OpReturnValue %[[Vec2ShortR]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Short:.*]] = OpFunctionParameter %[[ShortVec3]]
+; CHECK: %[[Vec3ShortItem0:.*]] = OpCompositeExtract %[[Short]] %[[ParamVec3Short]] 0
+; CHECK: %[[Vec3ShortItem1:.*]] = OpCompositeExtract %[[Short]] %[[ParamVec3Short]] 1
+; CHECK: %[[Vec3ShortItem2:.*]] = OpCompositeExtract %[[Short]] %[[ParamVec3Short]] 2
+; CHECK: %[[Vec3ShortR1:.*]] = OpBitwiseXor %[[Short]] %[[Vec3ShortItem0]] %[[Vec3ShortItem1]]
+; CHECK: %[[Vec3ShortR2:.*]] = OpBitwiseXor %[[Short]] %[[Vec3ShortR1]] %[[Vec3ShortItem2]]
+; CHECK: OpReturnValue %[[Vec3ShortR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[IntVec2]] %[[#]] %[[#]] 1 -1
+; CHECK: %[[Added1:.*]] = OpBitwiseXor %[[IntVec2]] %[[#]] %[[#]]
+; CHECK: %[[Vec2IntR:.*]] = OpCompositeExtract %[[Int]] %[[Added1]] 0
+; CHECK: OpReturnValue %[[Vec2IntR]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Int:.*]] = OpFunctionParameter %[[IntVec3]]
+; CHECK: %[[Vec3IntItem0:.*]] = OpCompositeExtract %[[Int]] %[[ParamVec3Int]] 0
+; CHECK: %[[Vec3IntItem1:.*]] = OpCompositeExtract %[[Int]] %[[ParamVec3Int]] 1
+; CHECK: %[[Vec3IntItem2:.*]] = OpCompositeExtract %[[Int]] %[[ParamVec3Int]] 2
+; CHECK: %[[Vec3IntR1:.*]] = OpBitwiseXor %[[Int]] %[[Vec3IntItem0]] %[[Vec3IntItem1]]
+; CHECK: %[[Vec3IntR2:.*]] = OpBitwiseXor %[[Int]] %[[Vec3IntR1]] %[[Vec3IntItem2]]
+; CHECK: OpReturnValue %[[Vec3IntR2]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[LongVec2]] %[[#]] %[[#]] 1 -1
+; CHECK: %[[Added1:.*]] = OpBitwiseXor %[[LongVec2]] %[[#]] %[[#]]
+; CHECK: %[[Vec2LongR:.*]] = OpCompositeExtract %[[Long]] %[[Added1]] 0
+; CHECK: OpReturnValue %[[Vec2LongR]]
+; CHECK: OpFunctionEnd
+
+; CHECK: OpFunction
+; CHECK: %[[ParamVec3Long:.*]] = OpFunctionParameter %[[LongVec3]]
+; CHECK: %[[Vec3LongItem0:.*]] = OpCompositeExtract %[[Long]] %[[ParamVec3Long]] 0
+; CHECK: %[[Vec3LongItem1:.*]] = OpCompositeExtract %[[Long]] %[[ParamVec3Long]] 1
+; CHECK: %[[Vec3LongItem2:.*]] = OpCompositeExtract %[[Long]] %[[ParamVec3Long]] 2
+; CHECK: %[[Vec3LongR1:.*]] = OpBitwiseXor %[[Long]] %[[Vec3LongItem0]] %[[Vec3LongItem1]]
+; CHECK: %[[Vec3LongR2:.*]] = OpBitwiseXor %[[Long]] %[[Vec3LongR1]] %[[Vec3LongItem2]]
+; CHECK: OpReturnValue %[[Vec3LongR2]]
+; CHECK: OpFunctionEnd
+
+define spir_func i8 @test_vector_reduce_xor_v2i8(<2 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.xor.v2i8(<2 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i8 @test_vector_reduce_xor_v3i8(<3 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.xor.v3i8(<3 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i8 @test_vector_reduce_xor_v4i8(<4 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.xor.v4i8(<4 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i8 @test_vector_reduce_xor_v8i8(<8 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.xor.v8i8(<8 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i8 @test_vector_reduce_xor_v16i8(<16 x i8> %v) {
+entry:
+ %0 = call i8 @llvm.vector.reduce.xor.v16i8(<16 x i8> %v)
+ ret i8 %0
+}
+
+define spir_func i16 @test_vector_reduce_xor_v2i16(<2 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.xor.v2i16(<2 x i16> %v)
+ ret i16 %0
+}
+
+define spir_func i16 @test_vector_reduce_xor_v3i16(<3 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.xor.v3i16(<3 x i16> %v)
+ ret i16 %0
+}
+
+define spir_func i16 @test_vector_reduce_xor_v4i16(<4 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.xor.v4i16(<4 x i16> %v)
+ ret i16 %0
+}
+
+define spir_func i16 @test_vector_reduce_xor_v8i16(<8 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.xor.v8i16(<8 x i16> %v)
+ ret i16 %0
+}
+
+define spir_func i16 @test_vector_reduce_xor_v16i16(<16 x i16> %v) {
+entry:
+ %0 = call i16 @llvm.vector.reduce.xor.v16i16(<16 x i16> %v)
+ ret i16 %0
+}
+
+
+define spir_func i32 @test_vector_reduce_xor_v2i32(<2 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.xor.v2i32(<2 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i32 @test_vector_reduce_xor_v3i32(<3 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.xor.v3i32(<3 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i32 @test_vector_reduce_xor_v4i32(<4 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i32 @test_vector_reduce_xor_v8i32(<8 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.xor.v8i32(<8 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i32 @test_vector_reduce_xor_v16i32(<16 x i32> %v) {
+entry:
+ %0 = call i32 @llvm.vector.reduce.xor.v16i32(<16 x i32> %v)
+ ret i32 %0
+}
+
+define spir_func i64 @test_vector_reduce_xor_v2i64(<2 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.xor.v2i64(<2 x i64> %v)
+ ret i64 %0
+}
+
+define spir_func i64 @test_vector_reduce_xor_v3i64(<3 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.xor.v3i64(<3 x i64> %v)
+ ret i64 %0
+}
+
+define spir_func i64 @test_vector_reduce_xor_v4i64(<4 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.xor.v4i64(<4 x i64> %v)
+ ret i64 %0
+}
+
+define spir_func i64 @test_vector_reduce_xor_v8i64(<8 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.xor.v8i64(<8 x i64> %v)
+ ret i64 %0
+}
+
+define spir_func i64 @test_vector_reduce_xor_v16i64(<16 x i64> %v) {
+entry:
+ %0 = call i64 @llvm.vector.reduce.xor.v16i64(<16 x i64> %v)
+ ret i64 %0
+}
+
+declare i8 @llvm.vector.reduce.xor.v2i8(<2 x i8>)
+declare i8 @llvm.vector.reduce.xor.v3i8(<3 x i8>)
+declare i8 @llvm.vector.reduce.xor.v4i8(<4 x i8>)
+declare i8 @llvm.vector.reduce.xor.v8i8(<8 x i8>)
+declare i8 @llvm.vector.reduce.xor.v16i8(<16 x i8>)
+
+declare i16 @llvm.vector.reduce.xor.v2i16(<2 x i16>)
+declare i16 @llvm.vector.reduce.xor.v3i16(<3 x i16>)
+declare i16 @llvm.vector.reduce.xor.v4i16(<4 x i16>)
+declare i16 @llvm.vector.reduce.xor.v8i16(<8 x i16>)
+declare i16 @llvm.vector.reduce.xor.v16i16(<16 x i16>)
+
+declare i32 @llvm.vector.reduce.xor.v2i32(<2 x i32>)
+declare i32 @llvm.vector.reduce.xor.v3i32(<3 x i32>)
+declare i32 @llvm.vector.reduce.xor.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.xor.v8i32(<8 x i32>)
+declare i32 @llvm.vector.reduce.xor.v16i32(<16 x i32>)
+
+declare i64 @llvm.vector.reduce.xor.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.xor.v3i64(<3 x i64>)
+declare i64 @llvm.vector.reduce.xor.v4i64(<4 x i64>)
+declare i64 @llvm.vector.reduce.xor.v8i64(<8 x i64>)
+declare i64 @llvm.vector.reduce.xor.v16i64(<16 x i64>)
More information about the llvm-commits
mailing list