[llvm] [AMDGPU] Allow SLP to analyze i8s (PR #113002)

Jeffrey Byrnes via llvm-commits llvm-commits at lists.llvm.org
Fri Oct 18 16:24:02 PDT 2024


https://github.com/jrbyrnes created https://github.com/llvm/llvm-project/pull/113002

This is a refinement of https://github.com/llvm/llvm-project/pull/105850 and https://github.com/llvm/llvm-project/pull/95328

The fundamental benefit of vectorizing i8s is to facilitate vector i8 type coercion across basic blocks. This has a true benefit of reducing register usage and extract / repack code. That said, vectorizing i8s is not uniformly beneficial. Thus, it is important to have a precise vectorization cost model. https://github.com/llvm/llvm-project/pull/112999 teaches SLP a bit about vectorization costs when dealing with legalization concerns of illegal types, and about the benefit of type coercion. This PR enables SLP to consider i8s for vectorization. The effect of both is to vecorize i8 chains when the type coercion benefit outweighs any potential costs.

>From 37b44411e1f651dcc6b6732f7501ddd0b0c86a35 Mon Sep 17 00:00:00 2001
From: Jeffrey Byrnes <Jeffrey.Byrnes at amd.com>
Date: Wed, 16 Oct 2024 10:54:39 -0700
Subject: [PATCH 1/2] [SLP] NFC: Introduce and use getDataFlowCost

Change-Id: I6a9155f4af3f8ccc943ab9d46c07dab07dc9b5c5
---
 .../llvm/Analysis/TargetTransformInfo.h       | 13 +++
 .../llvm/Analysis/TargetTransformInfoImpl.h   |  4 +
 llvm/include/llvm/CodeGen/BasicTTIImpl.h      |  4 +
 llvm/lib/Analysis/TargetTransformInfo.cpp     |  7 ++
 .../AMDGPU/AMDGPUTargetTransformInfo.cpp      |  8 ++
 .../Target/AMDGPU/AMDGPUTargetTransformInfo.h |  2 +
 .../Transforms/Vectorize/SLPVectorizer.cpp    | 91 +++++++++++++++++--
 7 files changed, 123 insertions(+), 6 deletions(-)

diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h
index f55f21c94a85a4..934012b2e53f5c 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -1534,6 +1534,14 @@ class TargetTransformInfo {
       Function *F, Type *RetTy, ArrayRef<Type *> Tys,
       TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency) const;
 
+  /// \returns The cost of propagating Type \p DataType through Basic Block /
+  /// function boundaries. If \p IsCallingConv is specified, then \p DataType is
+  /// associated with either a function argument or return. Otherwise, \p
+  /// DataType is used in either a GEP instruction, or spans across BasicBlocks
+  /// (this is relevant because SelectionDAG builder may, for example, scalarize
+  /// illegal vectors across blocks, which introduces extract/insert code).
+  InstructionCost getDataFlowCost(Type *DataType, bool IsCallingConv) const;
+
   /// \returns The number of pieces into which the provided type must be
   /// split during legalization. Zero is returned when the answer is unknown.
   unsigned getNumberOfParts(Type *Tp) const;
@@ -2096,6 +2104,8 @@ class TargetTransformInfo::Concept {
   virtual InstructionCost getCallInstrCost(Function *F, Type *RetTy,
                                            ArrayRef<Type *> Tys,
                                            TTI::TargetCostKind CostKind) = 0;
+  virtual InstructionCost getDataFlowCost(Type *DataType,
+                                          bool IsCallingConv) = 0;
   virtual unsigned getNumberOfParts(Type *Tp) = 0;
   virtual InstructionCost
   getAddressComputationCost(Type *Ty, ScalarEvolution *SE, const SCEV *Ptr) = 0;
@@ -2781,6 +2791,9 @@ class TargetTransformInfo::Model final : public TargetTransformInfo::Concept {
                                    TTI::TargetCostKind CostKind) override {
     return Impl.getCallInstrCost(F, RetTy, Tys, CostKind);
   }
+  InstructionCost getDataFlowCost(Type *DataType, bool IsCallingConv) override {
+    return Impl.getDataFlowCost(DataType, IsCallingConv);
+  }
   unsigned getNumberOfParts(Type *Tp) override {
     return Impl.getNumberOfParts(Tp);
   }
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
index 7828bdc1f1f43c..5a25a88c3eb460 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -772,6 +772,10 @@ class TargetTransformInfoImplBase {
     return 1;
   }
 
+  InstructionCost getDataFlowCost(Type *DataType, bool IsCallingConv) const {
+    return 0;
+  }
+
   // Assume that we have a register of the right size for the type.
   unsigned getNumberOfParts(Type *Tp) const { return 1; }
 
diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
index 9f8d3ded9b3c1a..c6a5c38a1b3fd5 100644
--- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h
+++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
@@ -2410,6 +2410,10 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
     return 10;
   }
 
+  InstructionCost getDataFlowCost(Type *DataType, bool IsCallingConv) {
+    return 0;
+  }
+
   unsigned getNumberOfParts(Type *Tp) {
     std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
     return LT.first.isValid() ? *LT.first.getValue() : 0;
diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp
index 7e721cbc87f3f0..edef9afa747d62 100644
--- a/llvm/lib/Analysis/TargetTransformInfo.cpp
+++ b/llvm/lib/Analysis/TargetTransformInfo.cpp
@@ -1116,6 +1116,13 @@ TargetTransformInfo::getCallInstrCost(Function *F, Type *RetTy,
   return Cost;
 }
 
+InstructionCost TargetTransformInfo::getDataFlowCost(Type *DataType,
+                                                     bool IsCallingConv) const {
+  InstructionCost Cost = TTIImpl->getDataFlowCost(DataType, IsCallingConv);
+  assert(Cost >= 0 && "TTI should not produce negative costs!");
+  return Cost;
+}
+
 unsigned TargetTransformInfo::getNumberOfParts(Type *Tp) const {
   return TTIImpl->getNumberOfParts(Tp);
 }
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
index 437e01c37c6b60..5d58cc62dbde09 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
@@ -306,6 +306,14 @@ bool GCNTTIImpl::hasBranchDivergence(const Function *F) const {
   return !F || !ST->isSingleLaneExecution(*F);
 }
 
+InstructionCost GCNTTIImpl::getDataFlowCost(Type *DataType,
+                                            bool IsCallingConv) {
+  if (isTypeLegal(DataType) || IsCallingConv)
+    return BaseT::getDataFlowCost(DataType, IsCallingConv);
+
+  return getNumberOfParts(DataType);
+}
+
 unsigned GCNTTIImpl::getNumberOfRegisters(unsigned RCID) const {
   // NB: RCID is not an RCID. In fact it is 0 or 1 for scalar or vector
   // registers. See getRegisterClassForType for the implementation.
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
index b423df17302ca2..c195c860075eb0 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
@@ -161,6 +161,8 @@ class GCNTTIImpl final : public BasicTTIImplBase<GCNTTIImpl> {
   InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind,
                                  const Instruction *I = nullptr);
 
+  InstructionCost getDataFlowCost(Type *DataType, bool IsCallingConv);
+
   bool isInlineAsmSourceOfDivergence(const CallInst *CI,
                                      ArrayRef<unsigned> Indices = {}) const;
 
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index ae0819c964bef3..42617eb4cf2095 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -9044,6 +9044,51 @@ static SmallVector<Type *> buildIntrinsicArgTypes(const CallInst *CI,
   return ArgTys;
 }
 
+// The cost model may determine that vectorizing and eliminating a series of
+// ExtractElements is beneficial. However, if the input vector is a function
+// argument, the calling convention may require extractions in the geneerated
+// code. In this scenario, vectorizaino would then not eliminate the
+// ExtractElement sequence, but would add additional vectorization code.
+// getCCCostFromScalars does the proper accounting for this.
+static unsigned getCCCostFromScalars(ArrayRef<Value *> &Scalars,
+                                     unsigned ScalarSize,
+                                     TargetTransformInfo *TTI) {
+  SetVector<Value *> ArgRoots;
+  for (unsigned I = 0; I < ScalarSize; I++) {
+    auto *Scalar = Scalars[I];
+    if (!Scalar)
+      continue;
+    auto *EE = dyn_cast<ExtractElementInst>(Scalar);
+    if (!EE)
+      continue;
+
+    auto *Vec = EE->getOperand(0);
+    if (!Vec->getType()->isVectorTy())
+      continue;
+
+    auto F = EE->getFunction();
+    auto FoundIt = find_if(
+        F->args(), [&Vec](Argument &I) { return Vec == cast<Value>(&I); });
+
+    if (FoundIt == F->arg_end())
+      continue;
+
+    if (!ArgRoots.contains(Vec))
+      ArgRoots.insert(Vec);
+  }
+
+  if (!ArgRoots.size())
+    return 0;
+
+  unsigned Cost = 0;
+  for (auto ArgOp : ArgRoots) {
+    Cost += TTI->getDataFlowCost(ArgOp->getType(), /*IsCallingConv*/ true)
+                .getValue()
+                .value_or(0);
+  }
+  return Cost;
+}
+
 InstructionCost
 BoUpSLP::getEntryCost(const TreeEntry *E, ArrayRef<Value *> VectorizedVals,
                       SmallPtrSetImpl<Value *> &CheckedExtracts) {
@@ -9075,15 +9120,16 @@ BoUpSLP::getEntryCost(const TreeEntry *E, ArrayRef<Value *> VectorizedVals,
   auto *FinalVecTy = FixedVectorType::get(ScalarTy, EntryVF);
 
   bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty();
+  InstructionCost CommonCost = getCCCostFromScalars(VL, VL.size(), TTI);
   if (E->State == TreeEntry::NeedToGather) {
     if (allConstant(VL))
-      return 0;
+      return CommonCost;
     if (isa<InsertElementInst>(VL[0]))
       return InstructionCost::getInvalid();
-    return processBuildVector<ShuffleCostEstimator, InstructionCost>(
-        E, ScalarTy, *TTI, VectorizedVals, *this, CheckedExtracts);
+    return CommonCost +
+           processBuildVector<ShuffleCostEstimator, InstructionCost>(
+               E, ScalarTy, *TTI, VectorizedVals, *this, CheckedExtracts);
   }
-  InstructionCost CommonCost = 0;
   SmallVector<int> Mask;
   bool IsReverseOrder = isReverseOrder(E->ReorderIndices);
   if (!E->ReorderIndices.empty() &&
@@ -10241,6 +10287,31 @@ InstructionCost BoUpSLP::getTreeCost(ArrayRef<Value *> VectorizedVals) {
 
     InstructionCost C = getEntryCost(&TE, VectorizedVals, CheckedExtracts);
     Cost += C;
+
+    // Calculate the cost difference of propagating a vector vs series of scalars
+    // across blocks. This may be nonzero in the case of illegal vectors.
+    Instruction *VL0 = TE.getMainOp();
+    bool IsAPhi = VL0 && isa<PHINode>(VL0);
+    bool HasNextEntry = VL0 && ((I + 1) < VectorizableTree.size());
+    bool LiveThru = false;
+    if (HasNextEntry) {
+      Instruction *VL1 = VectorizableTree[I + 1]->getMainOp();
+      LiveThru = VL1 && (VL0->getParent() != VL1->getParent());
+    }
+    if (IsAPhi || LiveThru) {
+      VectorType *VTy = dyn_cast<VectorType>(VL0->getType());
+      Type *ScalarTy = VTy ? VTy->getElementType() : VL0->getType();
+      if (ScalarTy && isValidElementType(ScalarTy)) {
+        InstructionCost ScalarDFlow =
+            TTI->getDataFlowCost(ScalarTy,
+                                 /*IsCallingConv*/ false) *
+            TE.getVectorFactor();
+        InstructionCost VectorDFlow =
+            TTI->getDataFlowCost(FixedVectorType::get(ScalarTy, TE.getVectorFactor()), /*IsCallingConv*/ false);
+        Cost += (VectorDFlow - ScalarDFlow);
+      }
+    }
+
     LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C << " for bundle "
                       << shortBundleName(TE.Scalars) << ".\n"
                       << "SLP: Current total cost = " << Cost << "\n");
@@ -10257,8 +10328,9 @@ InstructionCost BoUpSLP::getTreeCost(ArrayRef<Value *> VectorizedVals) {
   for (ExternalUser &EU : ExternalUses) {
     // We only add extract cost once for the same scalar.
     if (!isa_and_nonnull<InsertElementInst>(EU.User) &&
-        !ExtractCostCalculated.insert(EU.Scalar).second)
+        !ExtractCostCalculated.insert(EU.Scalar).second) {
       continue;
+    }
 
     // Uses by ephemeral values are free (because the ephemeral value will be
     // removed prior to code generation, and so the extraction will be
@@ -10266,6 +10338,14 @@ InstructionCost BoUpSLP::getTreeCost(ArrayRef<Value *> VectorizedVals) {
     if (EphValues.count(EU.User))
       continue;
 
+    // Account for any additional costs required by CallingConvention for the
+    // type.
+    if (isa_and_nonnull<ReturnInst>(EU.User)) {
+      Cost +=
+          TTI->getDataFlowCost(EU.Scalar->getType(), /*IsCallingConv*/ true);
+      continue;
+    }
+
     // No extract cost for vector "scalar"
     if (isa<FixedVectorType>(EU.Scalar->getType()))
       continue;
@@ -10566,7 +10646,6 @@ InstructionCost BoUpSLP::getTreeCost(ArrayRef<Value *> VectorizedVals) {
   if (ViewSLPTree)
     ViewGraph(this, "SLP" + F->getName(), false, Str);
 #endif
-
   return Cost;
 }
 

>From 204d5184752f5217dfaa799a33a15b893dab752a Mon Sep 17 00:00:00 2001
From: Jeffrey Byrnes <Jeffrey.Byrnes at amd.com>
Date: Fri, 3 May 2024 11:37:40 -0700
Subject: [PATCH 2/2] [AMDGPU] Allow SLP to analyze i8s

Change-Id: Ia995bc646e5f050083bd6277eeabe0b5ab410f47
---
 .../AMDGPU/AMDGPUTargetTransformInfo.cpp      |  25 +-
 .../Target/AMDGPU/AMDGPUTargetTransformInfo.h |   1 +
 .../AMDGPU/add_sub_sat-inseltpoison.ll        |  55 +++
 .../SLPVectorizer/AMDGPU/add_sub_sat.ll       |  56 +++
 .../Transforms/SLPVectorizer/AMDGPU/i8.ll     | 358 ++++++++++++++++++
 .../AMDGPU/phi-result-use-order.ll            |  75 ++++
 .../SLPVectorizer/AMDGPU/reduction.ll         | 327 ++++++++++++++++
 7 files changed, 894 insertions(+), 3 deletions(-)
 create mode 100644 llvm/test/Transforms/SLPVectorizer/AMDGPU/i8.ll

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
index 5d58cc62dbde09..882bb5bd0e2dc1 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
@@ -314,6 +314,23 @@ InstructionCost GCNTTIImpl::getDataFlowCost(Type *DataType,
   return getNumberOfParts(DataType);
 }
 
+unsigned GCNTTIImpl::getNumberOfParts(Type *Tp) {
+  // For certain 8 bit ops, we can pack a v4i8 into a single part
+  // (e.g. v4i8 shufflevectors -> v_perm v4i8, v4i8). Thus, we
+  // do not limit the numberOfParts for 8 bit vectors to the
+  // legalization costs of such. It is left up to other target
+  // queries (e.g. get*InstrCost) to decide the proper handling
+  // of 8 bit vectors.
+  if (FixedVectorType *VTy = dyn_cast<FixedVectorType>(Tp)) {
+    if (DL.getTypeSizeInBits(VTy->getElementType()) == 8) {
+      unsigned ElCount = VTy->getElementCount().getFixedValue();
+      return ElCount / 4;
+    }
+  }
+
+  return BaseT::getNumberOfParts(Tp);
+}
+
 unsigned GCNTTIImpl::getNumberOfRegisters(unsigned RCID) const {
   // NB: RCID is not an RCID. In fact it is 0 or 1 for scalar or vector
   // registers. See getRegisterClassForType for the implementation.
@@ -345,9 +362,11 @@ unsigned GCNTTIImpl::getMinVectorRegisterBitWidth() const {
 unsigned GCNTTIImpl::getMaximumVF(unsigned ElemWidth, unsigned Opcode) const {
   if (Opcode == Instruction::Load || Opcode == Instruction::Store)
     return 32 * 4 / ElemWidth;
-  return (ElemWidth == 16 && ST->has16BitInsts()) ? 2
-       : (ElemWidth == 32 && ST->hasPackedFP32Ops()) ? 2
-       : 1;
+
+  return (ElemWidth == 8)                              ? 4
+         : (ElemWidth == 16)                           ? 2
+         : (ElemWidth == 32 && ST->hasPackedFP32Ops()) ? 2
+                                                       : 1;
 }
 
 unsigned GCNTTIImpl::getLoadVectorFactor(unsigned VF, unsigned LoadSize,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
index c195c860075eb0..efff73efd4b059 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
@@ -117,6 +117,7 @@ class GCNTTIImpl final : public BasicTTIImplBase<GCNTTIImpl> {
     return TTI::PSK_FastHardware;
   }
 
+  unsigned getNumberOfParts(Type *Tp);
   unsigned getNumberOfRegisters(unsigned RCID) const;
   TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind Vector) const;
   unsigned getMinVectorRegisterBitWidth() const;
diff --git a/llvm/test/Transforms/SLPVectorizer/AMDGPU/add_sub_sat-inseltpoison.ll b/llvm/test/Transforms/SLPVectorizer/AMDGPU/add_sub_sat-inseltpoison.ll
index 3749bdf1bba394..373c18d0c64317 100644
--- a/llvm/test/Transforms/SLPVectorizer/AMDGPU/add_sub_sat-inseltpoison.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AMDGPU/add_sub_sat-inseltpoison.ll
@@ -363,11 +363,66 @@ bb:
   ret <4 x i16> %ins.3
 }
 
+define <4 x i8> @uadd_sat_v4i8(<4 x i8> %arg0, <4 x i8> %arg1) {
+; GCN-LABEL: @uadd_sat_v4i8(
+; GCN-NEXT:  bb:
+; GCN-NEXT:    [[TMP0:%.*]] = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> [[ARG0:%.*]], <4 x i8> [[ARG1:%.*]])
+; GCN-NEXT:    ret <4 x i8> [[TMP0]]
+;
+bb:
+  %arg0.0 = extractelement <4 x i8> %arg0, i64 0
+  %arg0.1 = extractelement <4 x i8> %arg0, i64 1
+  %arg0.2 = extractelement <4 x i8> %arg0, i64 2
+  %arg0.3 = extractelement <4 x i8> %arg0, i64 3
+  %arg1.0 = extractelement <4 x i8> %arg1, i64 0
+  %arg1.1 = extractelement <4 x i8> %arg1, i64 1
+  %arg1.2 = extractelement <4 x i8> %arg1, i64 2
+  %arg1.3 = extractelement <4 x i8> %arg1, i64 3
+  %add.0 = call i8 @llvm.uadd.sat.i8(i8 %arg0.0, i8 %arg1.0)
+  %add.1 = call i8 @llvm.uadd.sat.i8(i8 %arg0.1, i8 %arg1.1)
+  %add.2 = call i8 @llvm.uadd.sat.i8(i8 %arg0.2, i8 %arg1.2)
+  %add.3 = call i8 @llvm.uadd.sat.i8(i8 %arg0.3, i8 %arg1.3)
+  %ins.0 = insertelement <4 x i8> poison, i8 %add.0, i64 0
+  %ins.1 = insertelement <4 x i8> %ins.0, i8 %add.1, i64 1
+  %ins.2 = insertelement <4 x i8> %ins.1, i8 %add.2, i64 2
+  %ins.3 = insertelement <4 x i8> %ins.2, i8 %add.3, i64 3
+  ret <4 x i8> %ins.3
+}
+
+define <4 x i8> @usub_sat_v4i8(<4 x i8> %arg0, <4 x i8> %arg1) {
+; GCN-LABEL: @usub_sat_v4i8(
+; GCN-NEXT:  bb:
+; GCN-NEXT:    [[TMP0:%.*]] = call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> [[ARG0:%.*]], <4 x i8> [[ARG1:%.*]])
+; GCN-NEXT:    ret <4 x i8> [[TMP0]]
+;
+bb:
+  %arg0.0 = extractelement <4 x i8> %arg0, i64 0
+  %arg0.1 = extractelement <4 x i8> %arg0, i64 1
+  %arg0.2 = extractelement <4 x i8> %arg0, i64 2
+  %arg0.3 = extractelement <4 x i8> %arg0, i64 3
+  %arg1.0 = extractelement <4 x i8> %arg1, i64 0
+  %arg1.1 = extractelement <4 x i8> %arg1, i64 1
+  %arg1.2 = extractelement <4 x i8> %arg1, i64 2
+  %arg1.3 = extractelement <4 x i8> %arg1, i64 3
+  %add.0 = call i8 @llvm.usub.sat.i8(i8 %arg0.0, i8 %arg1.0)
+  %add.1 = call i8 @llvm.usub.sat.i8(i8 %arg0.1, i8 %arg1.1)
+  %add.2 = call i8 @llvm.usub.sat.i8(i8 %arg0.2, i8 %arg1.2)
+  %add.3 = call i8 @llvm.usub.sat.i8(i8 %arg0.3, i8 %arg1.3)
+  %ins.0 = insertelement <4 x i8> poison, i8 %add.0, i64 0
+  %ins.1 = insertelement <4 x i8> %ins.0, i8 %add.1, i64 1
+  %ins.2 = insertelement <4 x i8> %ins.1, i8 %add.2, i64 2
+  %ins.3 = insertelement <4 x i8> %ins.2, i8 %add.3, i64 3
+  ret <4 x i8> %ins.3
+}
+
 declare i16 @llvm.uadd.sat.i16(i16, i16) #0
 declare i16 @llvm.usub.sat.i16(i16, i16) #0
 declare i16 @llvm.sadd.sat.i16(i16, i16) #0
 declare i16 @llvm.ssub.sat.i16(i16, i16) #0
 
+declare i8 @llvm.uadd.sat.i8(i8, i8) #0
+declare i8 @llvm.usub.sat.i8(i8, i8) #0
+
 declare i32 @llvm.uadd.sat.i32(i32, i32) #0
 declare i32 @llvm.usub.sat.i32(i32, i32) #0
 declare i32 @llvm.sadd.sat.i32(i32, i32) #0
diff --git a/llvm/test/Transforms/SLPVectorizer/AMDGPU/add_sub_sat.ll b/llvm/test/Transforms/SLPVectorizer/AMDGPU/add_sub_sat.ll
index 0bb641371825b5..1d18162a7960e3 100644
--- a/llvm/test/Transforms/SLPVectorizer/AMDGPU/add_sub_sat.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AMDGPU/add_sub_sat.ll
@@ -363,11 +363,67 @@ bb:
   ret <4 x i16> %ins.3
 }
 
+define <4 x i8> @uadd_sat_v4i8(<4 x i8> %arg0, <4 x i8> %arg1, ptr addrspace(1) %dst) {
+; GCN-LABEL: @uadd_sat_v4i8(
+; GCN-NEXT:  bb:
+; GCN-NEXT:    [[TMP0:%.*]] = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> [[ARG0:%.*]], <4 x i8> [[ARG1:%.*]])
+; GCN-NEXT:    ret <4 x i8> [[TMP0]]
+;
+bb:
+  %arg0.0 = extractelement <4 x i8> %arg0, i64 0
+  %arg0.1 = extractelement <4 x i8> %arg0, i64 1
+  %arg0.2 = extractelement <4 x i8> %arg0, i64 2
+  %arg0.3 = extractelement <4 x i8> %arg0, i64 3
+  %arg1.0 = extractelement <4 x i8> %arg1, i64 0
+  %arg1.1 = extractelement <4 x i8> %arg1, i64 1
+  %arg1.2 = extractelement <4 x i8> %arg1, i64 2
+  %arg1.3 = extractelement <4 x i8> %arg1, i64 3
+  %add.0 = call i8 @llvm.uadd.sat.i8(i8 %arg0.0, i8 %arg1.0)
+  %add.1 = call i8 @llvm.uadd.sat.i8(i8 %arg0.1, i8 %arg1.1)
+  %add.2 = call i8 @llvm.uadd.sat.i8(i8 %arg0.2, i8 %arg1.2)
+  %add.3 = call i8 @llvm.uadd.sat.i8(i8 %arg0.3, i8 %arg1.3)
+  %ins.0 = insertelement <4 x i8> undef, i8 %add.0, i64 0
+  %ins.1 = insertelement <4 x i8> %ins.0, i8 %add.1, i64 1
+  %ins.2 = insertelement <4 x i8> %ins.1, i8 %add.2, i64 2
+  %ins.3 = insertelement <4 x i8> %ins.2, i8 %add.3, i64 3
+  ret <4 x i8> %ins.3
+}
+define <4 x i8> @usub_sat_v4i8(<4 x i8> %arg0, <4 x i8> %arg1) {
+; GCN-LABEL: @usub_sat_v4i8(
+; GCN-NEXT:  bb:
+; GCN-NEXT:    [[TMP0:%.*]] = call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> [[ARG0:%.*]], <4 x i8> [[ARG1:%.*]])
+; GCN-NEXT:    ret <4 x i8> [[TMP0]]
+;
+bb:
+  %arg0.0 = extractelement <4 x i8> %arg0, i64 0
+  %arg0.1 = extractelement <4 x i8> %arg0, i64 1
+  %arg0.2 = extractelement <4 x i8> %arg0, i64 2
+  %arg0.3 = extractelement <4 x i8> %arg0, i64 3
+  %arg1.0 = extractelement <4 x i8> %arg1, i64 0
+  %arg1.1 = extractelement <4 x i8> %arg1, i64 1
+  %arg1.2 = extractelement <4 x i8> %arg1, i64 2
+  %arg1.3 = extractelement <4 x i8> %arg1, i64 3
+  %add.0 = call i8 @llvm.usub.sat.i8(i8 %arg0.0, i8 %arg1.0)
+  %add.1 = call i8 @llvm.usub.sat.i8(i8 %arg0.1, i8 %arg1.1)
+  %add.2 = call i8 @llvm.usub.sat.i8(i8 %arg0.2, i8 %arg1.2)
+  %add.3 = call i8 @llvm.usub.sat.i8(i8 %arg0.3, i8 %arg1.3)
+  %ins.0 = insertelement <4 x i8> undef, i8 %add.0, i64 0
+  %ins.1 = insertelement <4 x i8> %ins.0, i8 %add.1, i64 1
+  %ins.2 = insertelement <4 x i8> %ins.1, i8 %add.2, i64 2
+  %ins.3 = insertelement <4 x i8> %ins.2, i8 %add.3, i64 3
+  ret <4 x i8> %ins.3
+
+}
+
+
 declare i16 @llvm.uadd.sat.i16(i16, i16) #0
 declare i16 @llvm.usub.sat.i16(i16, i16) #0
 declare i16 @llvm.sadd.sat.i16(i16, i16) #0
 declare i16 @llvm.ssub.sat.i16(i16, i16) #0
 
+declare i8 @llvm.uadd.sat.i8(i8, i8) #0
+declare i8 @llvm.usub.sat.i8(i8, i8) #0
+
 declare i32 @llvm.uadd.sat.i32(i32, i32) #0
 declare i32 @llvm.usub.sat.i32(i32, i32) #0
 declare i32 @llvm.sadd.sat.i32(i32, i32) #0
diff --git a/llvm/test/Transforms/SLPVectorizer/AMDGPU/i8.ll b/llvm/test/Transforms/SLPVectorizer/AMDGPU/i8.ll
new file mode 100644
index 00000000000000..6bdcf628f6c879
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/AMDGPU/i8.ll
@@ -0,0 +1,358 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -passes=slp-vectorizer %s | FileCheck -check-prefixes=GFX7 %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -passes=slp-vectorizer %s | FileCheck -check-prefixes=GFX8PLUS,GFX8 %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=slp-vectorizer %s | FileCheck -check-prefixes=GFX8PLUS,GFX9 %s
+
+define protected amdgpu_kernel void @phi(ptr addrspace(3) %inptr0, ptr addrspace(3) %inptr1, ptr %out, ptr %out1, i32 %flag) {
+; GCN-LABEL: @vectorizePHI(
+; GCN-NEXT:  entry:
+; GCN-NEXT:    [[GEP0:%.*]] = getelementptr i8, ptr addrspace(3) [[INPTR0:%.*]], i32 0
+; GCN-NEXT:    [[ELE0:%.*]] = load i8, ptr addrspace(3) [[GEP0]], align 8
+; GCN-NEXT:    [[GEP1:%.*]] = getelementptr i8, ptr addrspace(3) [[INPTR0]], i32 1
+; GCN-NEXT:    [[ELE1:%.*]] = load i8, ptr addrspace(3) [[GEP1]], align 1
+; GCN-NEXT:    [[GEP2:%.*]] = getelementptr i8, ptr addrspace(3) [[INPTR0]], i32 2
+; GCN-NEXT:    [[ELE2:%.*]] = load i8, ptr addrspace(3) [[GEP2]], align 2
+; GCN-NEXT:    [[GEP3:%.*]] = getelementptr i8, ptr addrspace(3) [[INPTR0]], i32 3
+; GCN-NEXT:    [[ELE3:%.*]] = load i8, ptr addrspace(3) [[GEP3]], align 1
+; GCN-NEXT:    br label [[DO_BODY:%.*]]
+; GCN:       do.body:
+; GCN-NEXT:    [[PHI0:%.*]] = phi i8 [ [[ELE3]], [[ENTRY:%.*]] ], [ [[OTHERELE3:%.*]], [[DO_BODY]] ]
+; GCN-NEXT:    [[PHI1:%.*]] = phi i8 [ [[ELE2]], [[ENTRY]] ], [ [[OTHERELE2:%.*]], [[DO_BODY]] ]
+; GCN-NEXT:    [[PHI2:%.*]] = phi i8 [ [[ELE1]], [[ENTRY]] ], [ [[OTHERELE1:%.*]], [[DO_BODY]] ]
+; GCN-NEXT:    [[PHI3:%.*]] = phi i8 [ [[ELE0]], [[ENTRY]] ], [ [[OTHERELE0:%.*]], [[DO_BODY]] ]
+; GCN-NEXT:    [[OTHERELE0]] = load i8, ptr addrspace(3) [[GEP0]], align 8
+; GCN-NEXT:    [[OTHERELE1]] = load i8, ptr addrspace(3) [[GEP1]], align 1
+; GCN-NEXT:    [[OTHERELE2]] = load i8, ptr addrspace(3) [[GEP2]], align 2
+; GCN-NEXT:    [[OTHERELE3]] = load i8, ptr addrspace(3) [[GEP3]], align 1
+; GCN-NEXT:    [[VEC00:%.*]] = insertelement <16 x i8> poison, i8 [[OTHERELE0]], i64 8
+; GCN-NEXT:    [[VEC01:%.*]] = insertelement <16 x i8> [[VEC00]], i8 [[OTHERELE1]], i64 9
+; GCN-NEXT:    [[VEC02:%.*]] = insertelement <16 x i8> [[VEC01]], i8 [[OTHERELE2]], i64 10
+; GCN-NEXT:    [[VEC03:%.*]] = insertelement <16 x i8> [[VEC02]], i8 [[OTHERELE3]], i64 11
+; GCN-NEXT:    [[VEC10:%.*]] = insertelement <16 x i8> poison, i8 [[PHI3]], i64 8
+; GCN-NEXT:    [[VEC11:%.*]] = insertelement <16 x i8> [[VEC10]], i8 [[PHI2]], i64 9
+; GCN-NEXT:    [[VEC12:%.*]] = insertelement <16 x i8> [[VEC11]], i8 [[PHI1]], i64 10
+; GCN-NEXT:    [[VEC13:%.*]] = insertelement <16 x i8> [[VEC12]], i8 [[PHI0]], i64 11
+; GCN-NEXT:    store <16 x i8> [[VEC13]], ptr addrspace(3) [[INPTR1:%.*]], align 2
+; GCN-NEXT:    [[CMP:%.*]] = icmp eq i32 [[FLAG:%.*]], 0
+; GCN-NEXT:    br i1 [[CMP]], label [[EXIT:%.*]], label [[DO_BODY]]
+; GCN:       exit:
+; GCN-NEXT:    store <16 x i8> [[VEC13]], ptr [[OUT:%.*]], align 16
+; GCN-NEXT:    store <16 x i8> [[VEC03]], ptr [[OUT1:%.*]], align 16
+; GCN-NEXT:    ret void
+;
+; GFX7-LABEL: @phi(
+; GFX7-NEXT:  entry:
+; GFX7-NEXT:    [[GEP0:%.*]] = getelementptr i8, ptr addrspace(3) [[INPTR0:%.*]], i32 0
+; GFX7-NEXT:    [[TMP0:%.*]] = load <4 x i8>, ptr addrspace(3) [[GEP0]], align 8
+; GFX7-NEXT:    br label [[DO_BODY:%.*]]
+; GFX7:       do.body:
+; GFX7-NEXT:    [[TMP1:%.*]] = phi <4 x i8> [ [[TMP0]], [[ENTRY:%.*]] ], [ [[TMP2:%.*]], [[DO_BODY]] ]
+; GFX7-NEXT:    [[TMP2]] = load <4 x i8>, ptr addrspace(3) [[GEP0]], align 8
+; GFX7-NEXT:    [[TMP3:%.*]] = shufflevector <4 x i8> [[TMP2]], <4 x i8> poison, <16 x i32> <i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison>
+; GFX7-NEXT:    [[TMP4:%.*]] = shufflevector <4 x i8> [[TMP1]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; GFX7-NEXT:    [[VEC131:%.*]] = shufflevector <16 x i8> [[TMP4]], <16 x i8> poison, <16 x i32> <i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison>
+; GFX7-NEXT:    store <16 x i8> [[VEC131]], ptr addrspace(3) [[INPTR1:%.*]], align 2
+; GFX7-NEXT:    [[CMP:%.*]] = icmp eq i32 [[FLAG:%.*]], 0
+; GFX7-NEXT:    br i1 [[CMP]], label [[EXIT:%.*]], label [[DO_BODY]]
+; GFX7:       exit:
+; GFX7-NEXT:    store <16 x i8> [[VEC131]], ptr [[OUT:%.*]], align 16
+; GFX7-NEXT:    store <16 x i8> [[TMP3]], ptr [[OUT1:%.*]], align 16
+; GFX7-NEXT:    ret void
+;
+; GFX8PLUS-LABEL: @phi(
+; GFX8PLUS-NEXT:  entry:
+; GFX8PLUS-NEXT:    [[GEP0:%.*]] = getelementptr i8, ptr addrspace(3) [[INPTR0:%.*]], i32 0
+; GFX8PLUS-NEXT:    [[TMP0:%.*]] = load <4 x i8>, ptr addrspace(3) [[GEP0]], align 8
+; GFX8PLUS-NEXT:    br label [[DO_BODY:%.*]]
+; GFX8PLUS:       do.body:
+; GFX8PLUS-NEXT:    [[TMP1:%.*]] = phi <4 x i8> [ [[TMP0]], [[ENTRY:%.*]] ], [ [[TMP2:%.*]], [[DO_BODY]] ]
+; GFX8PLUS-NEXT:    [[TMP2]] = load <4 x i8>, ptr addrspace(3) [[GEP0]], align 8
+; GFX8PLUS-NEXT:    [[TMP3:%.*]] = shufflevector <4 x i8> [[TMP2]], <4 x i8> poison, <16 x i32> <i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison>
+; GFX8PLUS-NEXT:    [[TMP4:%.*]] = shufflevector <4 x i8> [[TMP1]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; GFX8PLUS-NEXT:    [[VEC131:%.*]] = shufflevector <16 x i8> [[TMP4]], <16 x i8> poison, <16 x i32> <i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison>
+; GFX8PLUS-NEXT:    store <16 x i8> [[VEC131]], ptr addrspace(3) [[INPTR1:%.*]], align 2
+; GFX8PLUS-NEXT:    [[CMP:%.*]] = icmp eq i32 [[FLAG:%.*]], 0
+; GFX8PLUS-NEXT:    br i1 [[CMP]], label [[EXIT:%.*]], label [[DO_BODY]]
+; GFX8PLUS:       exit:
+; GFX8PLUS-NEXT:    store <16 x i8> [[VEC131]], ptr [[OUT:%.*]], align 16
+; GFX8PLUS-NEXT:    store <16 x i8> [[TMP3]], ptr [[OUT1:%.*]], align 16
+; GFX8PLUS-NEXT:    ret void
+;
+entry:
+  %gep0 = getelementptr i8, ptr addrspace(3) %inptr0, i32 0
+  %ele0 = load i8, ptr addrspace(3) %gep0, align 8
+  %gep1 = getelementptr i8, ptr addrspace(3) %inptr0, i32 1
+  %ele1 = load i8, ptr addrspace(3) %gep1, align 1
+  %gep2 = getelementptr i8, ptr addrspace(3) %inptr0, i32 2
+  %ele2 = load i8, ptr addrspace(3) %gep2, align 2
+  %gep3 = getelementptr i8, ptr addrspace(3) %inptr0, i32 3
+  %ele3 = load i8, ptr addrspace(3) %gep3, align 1
+  br label %do.body
+
+do.body:
+  %phi0 = phi i8 [ %ele3, %entry ], [ %otherele3, %do.body ]
+  %phi1 = phi i8 [ %ele2, %entry ], [ %otherele2, %do.body ]
+  %phi2 = phi i8 [ %ele1, %entry ], [ %otherele1, %do.body ]
+  %phi3 = phi i8 [ %ele0, %entry ], [ %otherele0, %do.body ]
+  %otherele0 = load i8, ptr addrspace(3) %gep0, align 8
+  %otherele1 = load i8, ptr addrspace(3) %gep1, align 1
+  %otherele2 = load i8, ptr addrspace(3) %gep2, align 2
+  %otherele3 = load i8, ptr addrspace(3) %gep3, align 1
+  %vec00 = insertelement <16 x i8> poison, i8 %otherele0, i64 8
+  %vec01 = insertelement <16 x i8> %vec00, i8 %otherele1, i64 9
+  %vec02 = insertelement <16 x i8> %vec01, i8 %otherele2, i64 10
+  %vec03 = insertelement <16 x i8> %vec02, i8 %otherele3, i64 11
+  %vec10 = insertelement <16 x i8> poison, i8 %phi3, i64 8
+  %vec11 = insertelement <16 x i8> %vec10, i8 %phi2, i64 9
+  %vec12 = insertelement <16 x i8> %vec11, i8 %phi1, i64 10
+  %vec13 = insertelement <16 x i8> %vec12, i8 %phi0, i64 11
+  store <16 x i8> %vec13, ptr addrspace(3) %inptr1, align 2
+  %cmp = icmp eq i32 %flag, 0
+  br i1 %cmp, label %exit, label %do.body
+
+exit:
+  store <16 x i8> %vec13, ptr %out
+  store <16 x i8> %vec03, ptr %out1
+  ret void
+}
+
+
+define protected amdgpu_kernel void @arith_phi(ptr addrspace(3) %inptr0, ptr %out, i32 %flag) {
+; GCN-LABEL: @vectorizePHI2(
+; GCN-NEXT:  entry:
+; GCN-NEXT:    [[GEP0:%.*]] = getelementptr i8, ptr addrspace(3) [[INPTR0:%.*]], i32 0
+; GCN-NEXT:    [[ELE0:%.*]] = load i8, ptr addrspace(3) [[GEP0]], align 8
+; GCN-NEXT:    [[GEP1:%.*]] = getelementptr i8, ptr addrspace(3) [[INPTR0]], i32 1
+; GCN-NEXT:    [[ELE1:%.*]] = load i8, ptr addrspace(3) [[GEP1]], align 1
+; GCN-NEXT:    [[GEP2:%.*]] = getelementptr i8, ptr addrspace(3) [[INPTR0]], i32 2
+; GCN-NEXT:    [[ELE2:%.*]] = load i8, ptr addrspace(3) [[GEP2]], align 2
+; GCN-NEXT:    [[GEP3:%.*]] = getelementptr i8, ptr addrspace(3) [[INPTR0]], i32 3
+; GCN-NEXT:    [[ELE3:%.*]] = load i8, ptr addrspace(3) [[GEP3]], align 1
+; GCN-NEXT:    [[CMP:%.*]] = icmp eq i32 [[FLAG:%.*]], 0
+; GCN-NEXT:    br i1 [[CMP]], label [[EXIT:%.*]], label [[BB_1:%.*]]
+; GCN:       bb.1:
+; GCN-NEXT:    [[ADD0:%.*]] = add i8 [[ELE0]], 1
+; GCN-NEXT:    [[ADD1:%.*]] = add i8 [[ELE1]], 1
+; GCN-NEXT:    [[ADD2:%.*]] = add i8 [[ELE2]], 1
+; GCN-NEXT:    [[ADD3:%.*]] = add i8 [[ELE3]], 1
+; GCN-NEXT:    br label [[EXIT]]
+; GCN:       exit:
+; GCN-NEXT:    [[PHI0:%.*]] = phi i8 [ [[ELE3]], [[ENTRY:%.*]] ], [ [[ADD0]], [[BB_1]] ]
+; GCN-NEXT:    [[PHI1:%.*]] = phi i8 [ [[ELE2]], [[ENTRY]] ], [ [[ADD1]], [[BB_1]] ]
+; GCN-NEXT:    [[PHI2:%.*]] = phi i8 [ [[ELE1]], [[ENTRY]] ], [ [[ADD2]], [[BB_1]] ]
+; GCN-NEXT:    [[PHI3:%.*]] = phi i8 [ [[ELE0]], [[ENTRY]] ], [ [[ADD3]], [[BB_1]] ]
+; GCN-NEXT:    [[OTHERELE0:%.*]] = load i8, ptr addrspace(3) [[GEP0]], align 8
+; GCN-NEXT:    [[OTHERELE1:%.*]] = load i8, ptr addrspace(3) [[GEP1]], align 1
+; GCN-NEXT:    [[OTHERELE2:%.*]] = load i8, ptr addrspace(3) [[GEP2]], align 2
+; GCN-NEXT:    [[OTHERELE3:%.*]] = load i8, ptr addrspace(3) [[GEP3]], align 1
+; GCN-NEXT:    [[VEC10:%.*]] = insertelement <16 x i8> poison, i8 [[PHI3]], i64 8
+; GCN-NEXT:    [[VEC11:%.*]] = insertelement <16 x i8> [[VEC10]], i8 [[PHI2]], i64 9
+; GCN-NEXT:    [[VEC12:%.*]] = insertelement <16 x i8> [[VEC11]], i8 [[PHI1]], i64 10
+; GCN-NEXT:    [[VEC13:%.*]] = insertelement <16 x i8> [[VEC12]], i8 [[PHI0]], i64 11
+; GCN-NEXT:    store <16 x i8> [[VEC13]], ptr [[OUT:%.*]], align 2
+; GCN-NEXT:    ret void
+;
+; GFX7-LABEL: @arith_phi(
+; GFX7-NEXT:  entry:
+; GFX7-NEXT:    [[GEP0:%.*]] = getelementptr i8, ptr addrspace(3) [[INPTR0:%.*]], i32 0
+; GFX7-NEXT:    [[GEP1:%.*]] = getelementptr i8, ptr addrspace(3) [[INPTR0]], i32 1
+; GFX7-NEXT:    [[GEP2:%.*]] = getelementptr i8, ptr addrspace(3) [[INPTR0]], i32 2
+; GFX7-NEXT:    [[GEP3:%.*]] = getelementptr i8, ptr addrspace(3) [[INPTR0]], i32 3
+; GFX7-NEXT:    [[TMP0:%.*]] = load <4 x i8>, ptr addrspace(3) [[GEP0]], align 8
+; GFX7-NEXT:    [[CMP:%.*]] = icmp eq i32 [[FLAG:%.*]], 0
+; GFX7-NEXT:    br i1 [[CMP]], label [[EXIT:%.*]], label [[BB_1:%.*]]
+; GFX7:       bb.1:
+; GFX7-NEXT:    [[TMP1:%.*]] = add <4 x i8> [[TMP0]], <i8 1, i8 1, i8 1, i8 1>
+; GFX7-NEXT:    [[TMP2:%.*]] = shufflevector <4 x i8> [[TMP1]], <4 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; GFX7-NEXT:    br label [[EXIT]]
+; GFX7:       exit:
+; GFX7-NEXT:    [[TMP3:%.*]] = phi <4 x i8> [ [[TMP0]], [[ENTRY:%.*]] ], [ [[TMP2]], [[BB_1]] ]
+; GFX7-NEXT:    [[OTHERELE0:%.*]] = load i8, ptr addrspace(3) [[GEP0]], align 8
+; GFX7-NEXT:    [[OTHERELE1:%.*]] = load i8, ptr addrspace(3) [[GEP1]], align 1
+; GFX7-NEXT:    [[OTHERELE2:%.*]] = load i8, ptr addrspace(3) [[GEP2]], align 2
+; GFX7-NEXT:    [[OTHERELE3:%.*]] = load i8, ptr addrspace(3) [[GEP3]], align 1
+; GFX7-NEXT:    [[TMP4:%.*]] = shufflevector <4 x i8> [[TMP3]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; GFX7-NEXT:    [[VEC131:%.*]] = shufflevector <16 x i8> [[TMP4]], <16 x i8> poison, <16 x i32> <i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison>
+; GFX7-NEXT:    store <16 x i8> [[VEC131]], ptr [[OUT:%.*]], align 2
+; GFX7-NEXT:    ret void
+;
+; GFX8PLUS-LABEL: @arith_phi(
+; GFX8PLUS-NEXT:  entry:
+; GFX8PLUS-NEXT:    [[GEP0:%.*]] = getelementptr i8, ptr addrspace(3) [[INPTR0:%.*]], i32 0
+; GFX8PLUS-NEXT:    [[GEP1:%.*]] = getelementptr i8, ptr addrspace(3) [[INPTR0]], i32 1
+; GFX8PLUS-NEXT:    [[GEP2:%.*]] = getelementptr i8, ptr addrspace(3) [[INPTR0]], i32 2
+; GFX8PLUS-NEXT:    [[GEP3:%.*]] = getelementptr i8, ptr addrspace(3) [[INPTR0]], i32 3
+; GFX8PLUS-NEXT:    [[TMP0:%.*]] = load <4 x i8>, ptr addrspace(3) [[GEP0]], align 8
+; GFX8PLUS-NEXT:    [[CMP:%.*]] = icmp eq i32 [[FLAG:%.*]], 0
+; GFX8PLUS-NEXT:    br i1 [[CMP]], label [[EXIT:%.*]], label [[BB_1:%.*]]
+; GFX8PLUS:       bb.1:
+; GFX8PLUS-NEXT:    [[TMP1:%.*]] = add <4 x i8> [[TMP0]], <i8 1, i8 1, i8 1, i8 1>
+; GFX8PLUS-NEXT:    [[TMP2:%.*]] = shufflevector <4 x i8> [[TMP1]], <4 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; GFX8PLUS-NEXT:    br label [[EXIT]]
+; GFX8PLUS:       exit:
+; GFX8PLUS-NEXT:    [[TMP3:%.*]] = phi <4 x i8> [ [[TMP0]], [[ENTRY:%.*]] ], [ [[TMP2]], [[BB_1]] ]
+; GFX8PLUS-NEXT:    [[OTHERELE0:%.*]] = load i8, ptr addrspace(3) [[GEP0]], align 8
+; GFX8PLUS-NEXT:    [[OTHERELE1:%.*]] = load i8, ptr addrspace(3) [[GEP1]], align 1
+; GFX8PLUS-NEXT:    [[OTHERELE2:%.*]] = load i8, ptr addrspace(3) [[GEP2]], align 2
+; GFX8PLUS-NEXT:    [[OTHERELE3:%.*]] = load i8, ptr addrspace(3) [[GEP3]], align 1
+; GFX8PLUS-NEXT:    [[TMP4:%.*]] = shufflevector <4 x i8> [[TMP3]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; GFX8PLUS-NEXT:    [[VEC131:%.*]] = shufflevector <16 x i8> [[TMP4]], <16 x i8> poison, <16 x i32> <i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison>
+; GFX8PLUS-NEXT:    store <16 x i8> [[VEC131]], ptr [[OUT:%.*]], align 2
+; GFX8PLUS-NEXT:    ret void
+;
+entry:
+  %gep0 = getelementptr i8, ptr addrspace(3) %inptr0, i32 0
+  %ele0 = load i8, ptr addrspace(3) %gep0, align 8
+  %gep1 = getelementptr i8, ptr addrspace(3) %inptr0, i32 1
+  %ele1 = load i8, ptr addrspace(3) %gep1, align 1
+  %gep2 = getelementptr i8, ptr addrspace(3) %inptr0, i32 2
+  %ele2 = load i8, ptr addrspace(3) %gep2, align 2
+  %gep3 = getelementptr i8, ptr addrspace(3) %inptr0, i32 3
+  %ele3 = load i8, ptr addrspace(3) %gep3, align 1
+  %cmp = icmp eq i32 %flag, 0
+  br i1 %cmp, label %exit, label %bb.1
+
+bb.1:
+  %add0 = add i8 %ele0, 1
+  %add1 = add i8 %ele1, 1
+  %add2 = add i8 %ele2, 1
+  %add3 = add i8 %ele3, 1
+  br label %exit
+
+exit:
+  %phi0 = phi i8 [ %ele3, %entry ], [ %add0, %bb.1 ]
+  %phi1 = phi i8 [ %ele2, %entry ], [ %add1, %bb.1 ]
+  %phi2 = phi i8 [ %ele1, %entry ], [ %add2, %bb.1 ]
+  %phi3 = phi i8 [ %ele0, %entry ], [ %add3, %bb.1 ]
+  %otherele0 = load i8, ptr addrspace(3) %gep0, align 8
+  %otherele1 = load i8, ptr addrspace(3) %gep1, align 1
+  %otherele2 = load i8, ptr addrspace(3) %gep2, align 2
+  %otherele3 = load i8, ptr addrspace(3) %gep3, align 1
+  %vec10 = insertelement <16 x i8> poison, i8 %phi3, i64 8
+  %vec11 = insertelement <16 x i8> %vec10, i8 %phi2, i64 9
+  %vec12 = insertelement <16 x i8> %vec11, i8 %phi1, i64 10
+  %vec13 = insertelement <16 x i8> %vec12, i8 %phi0, i64 11
+  store <16 x i8> %vec13, ptr %out, align 2
+  ret void
+}
+
+define protected amdgpu_kernel void @arith(<16 x i8> %invec, ptr %out, i32 %flag) {
+; GFX7-LABEL: @arith(
+; GFX7-NEXT:  entry:
+; GFX7-NEXT:    [[TMP0:%.*]] = shufflevector <16 x i8> [[INVEC:%.*]], <16 x i8> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; GFX7-NEXT:    [[TMP1:%.*]] = mul <4 x i8> [[TMP0]], <i8 1, i8 1, i8 1, i8 1>
+; GFX7-NEXT:    [[TMP2:%.*]] = add <4 x i8> [[TMP1]], <i8 1, i8 1, i8 1, i8 1>
+; GFX7-NEXT:    [[TMP3:%.*]] = shufflevector <16 x i8> [[INVEC]], <16 x i8> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+; GFX7-NEXT:    [[TMP4:%.*]] = mul <4 x i8> [[TMP3]], <i8 1, i8 1, i8 1, i8 1>
+; GFX7-NEXT:    [[TMP5:%.*]] = add <4 x i8> [[TMP4]], <i8 1, i8 1, i8 1, i8 1>
+; GFX7-NEXT:    [[TMP6:%.*]] = shufflevector <16 x i8> [[INVEC]], <16 x i8> poison, <4 x i32> <i32 8, i32 9, i32 10, i32 11>
+; GFX7-NEXT:    [[TMP7:%.*]] = mul <4 x i8> [[TMP6]], <i8 1, i8 1, i8 1, i8 1>
+; GFX7-NEXT:    [[TMP8:%.*]] = add <4 x i8> [[TMP7]], <i8 1, i8 1, i8 1, i8 1>
+; GFX7-NEXT:    [[TMP9:%.*]] = shufflevector <16 x i8> [[INVEC]], <16 x i8> poison, <4 x i32> <i32 12, i32 13, i32 14, i32 15>
+; GFX7-NEXT:    [[TMP10:%.*]] = mul <4 x i8> [[TMP9]], <i8 1, i8 1, i8 1, i8 1>
+; GFX7-NEXT:    [[TMP11:%.*]] = add <4 x i8> [[TMP10]], <i8 1, i8 1, i8 1, i8 1>
+; GFX7-NEXT:    [[TMP12:%.*]] = shufflevector <4 x i8> [[TMP2]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; GFX7-NEXT:    [[TMP13:%.*]] = shufflevector <4 x i8> [[TMP5]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; GFX7-NEXT:    [[VECINS71:%.*]] = shufflevector <16 x i8> [[TMP12]], <16 x i8> [[TMP13]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 16, i32 17, i32 18, i32 19, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; GFX7-NEXT:    [[TMP14:%.*]] = shufflevector <4 x i8> [[TMP8]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; GFX7-NEXT:    [[VECINS112:%.*]] = shufflevector <16 x i8> [[VECINS71]], <16 x i8> [[TMP14]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 12, i32 13, i32 14, i32 15>
+; GFX7-NEXT:    [[TMP15:%.*]] = shufflevector <4 x i8> [[TMP11]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; GFX7-NEXT:    [[VECINS153:%.*]] = shufflevector <16 x i8> [[VECINS112]], <16 x i8> [[TMP15]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>
+; GFX7-NEXT:    store <16 x i8> [[VECINS153]], ptr [[OUT:%.*]], align 16
+; GFX7-NEXT:    ret void
+;
+; GFX8PLUS-LABEL: @arith(
+; GFX8PLUS-NEXT:  entry:
+; GFX8PLUS-NEXT:    [[TMP0:%.*]] = shufflevector <16 x i8> [[INVEC:%.*]], <16 x i8> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; GFX8PLUS-NEXT:    [[TMP1:%.*]] = mul <4 x i8> [[TMP0]], <i8 1, i8 1, i8 1, i8 1>
+; GFX8PLUS-NEXT:    [[TMP2:%.*]] = add <4 x i8> [[TMP1]], <i8 1, i8 1, i8 1, i8 1>
+; GFX8PLUS-NEXT:    [[TMP3:%.*]] = shufflevector <16 x i8> [[INVEC]], <16 x i8> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+; GFX8PLUS-NEXT:    [[TMP4:%.*]] = mul <4 x i8> [[TMP3]], <i8 1, i8 1, i8 1, i8 1>
+; GFX8PLUS-NEXT:    [[TMP5:%.*]] = add <4 x i8> [[TMP4]], <i8 1, i8 1, i8 1, i8 1>
+; GFX8PLUS-NEXT:    [[TMP6:%.*]] = shufflevector <16 x i8> [[INVEC]], <16 x i8> poison, <4 x i32> <i32 8, i32 9, i32 10, i32 11>
+; GFX8PLUS-NEXT:    [[TMP7:%.*]] = mul <4 x i8> [[TMP6]], <i8 1, i8 1, i8 1, i8 1>
+; GFX8PLUS-NEXT:    [[TMP8:%.*]] = add <4 x i8> [[TMP7]], <i8 1, i8 1, i8 1, i8 1>
+; GFX8PLUS-NEXT:    [[TMP9:%.*]] = shufflevector <16 x i8> [[INVEC]], <16 x i8> poison, <4 x i32> <i32 12, i32 13, i32 14, i32 15>
+; GFX8PLUS-NEXT:    [[TMP10:%.*]] = mul <4 x i8> [[TMP9]], <i8 1, i8 1, i8 1, i8 1>
+; GFX8PLUS-NEXT:    [[TMP11:%.*]] = add <4 x i8> [[TMP10]], <i8 1, i8 1, i8 1, i8 1>
+; GFX8PLUS-NEXT:    [[TMP12:%.*]] = shufflevector <4 x i8> [[TMP2]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; GFX8PLUS-NEXT:    [[TMP13:%.*]] = shufflevector <4 x i8> [[TMP5]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; GFX8PLUS-NEXT:    [[VECINS71:%.*]] = shufflevector <16 x i8> [[TMP12]], <16 x i8> [[TMP13]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 16, i32 17, i32 18, i32 19, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; GFX8PLUS-NEXT:    [[TMP14:%.*]] = shufflevector <4 x i8> [[TMP8]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; GFX8PLUS-NEXT:    [[VECINS112:%.*]] = shufflevector <16 x i8> [[VECINS71]], <16 x i8> [[TMP14]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 12, i32 13, i32 14, i32 15>
+; GFX8PLUS-NEXT:    [[TMP15:%.*]] = shufflevector <4 x i8> [[TMP11]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; GFX8PLUS-NEXT:    [[VECINS153:%.*]] = shufflevector <16 x i8> [[VECINS112]], <16 x i8> [[TMP15]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>
+; GFX8PLUS-NEXT:    store <16 x i8> [[VECINS153]], ptr [[OUT:%.*]], align 16
+; GFX8PLUS-NEXT:    ret void
+;
+entry:
+  %el0 = extractelement <16 x i8> %invec, i64  0
+  %el1 = extractelement <16 x i8> %invec, i64  1
+  %el2 = extractelement <16 x i8> %invec, i64  2
+  %el3 = extractelement <16 x i8> %invec, i64  3
+  %el4 = extractelement <16 x i8> %invec, i64  4
+  %el5 = extractelement <16 x i8> %invec, i64  5
+  %el6 = extractelement <16 x i8> %invec, i64  6
+  %el7 = extractelement <16 x i8> %invec, i64  7
+  %el8 = extractelement <16 x i8> %invec, i64  8
+  %el9 = extractelement <16 x i8> %invec, i64  9
+  %el10 = extractelement <16 x i8> %invec, i64 10
+  %el11 = extractelement <16 x i8> %invec, i64 11
+  %el12 = extractelement <16 x i8> %invec, i64 12
+  %el13 = extractelement <16 x i8> %invec, i64 13
+  %el14 = extractelement <16 x i8> %invec, i64 14
+  %el15 = extractelement <16 x i8> %invec, i64 15
+  %mul0 = mul i8 %el0, 1
+  %mul1 = mul i8 %el1, 1
+  %mul2 = mul i8 %el2, 1
+  %mul3 = mul i8 %el3, 1
+  %mul4 = mul i8 %el4, 1
+  %mul5 = mul i8 %el5, 1
+  %mul6 = mul i8 %el6, 1
+  %mul7 = mul i8 %el7, 1
+  %mul8 = mul i8 %el8, 1
+  %mul9 = mul i8 %el9, 1
+  %mul10 = mul i8 %el10, 1
+  %mul11 = mul i8 %el11, 1
+  %mul12 = mul i8 %el12, 1
+  %mul13 = mul i8 %el13, 1
+  %mul14 = mul i8 %el14, 1
+  %mul15 = mul i8 %el15, 1
+  %add0 = add i8 %mul0, 1
+  %add1 = add i8 %mul1, 1
+  %add2 = add i8 %mul2, 1
+  %add3 = add i8 %mul3, 1
+  %add4 = add i8 %mul4, 1
+  %add5 = add i8 %mul5, 1
+  %add6 = add i8 %mul6, 1
+  %add7 = add i8 %mul7, 1
+  %add8 = add i8 %mul8, 1
+  %add9 = add i8 %mul9, 1
+  %add10 = add i8 %mul10, 1
+  %add11 = add i8 %mul11, 1
+  %add12 = add i8 %mul12, 1
+  %add13 = add i8 %mul13, 1
+  %add14 = add i8 %mul14, 1
+  %add15 = add i8 %mul15, 1
+  %vecins0 = insertelement <16 x i8> poison, i8 %add0, i64 0
+  %vecins1 = insertelement <16 x i8> %vecins0, i8 %add1, i64 1
+  %vecins2 = insertelement <16 x i8> %vecins1, i8 %add2, i64 2
+  %vecins3 = insertelement <16 x i8> %vecins2, i8 %add3, i64 3
+  %vecins4 = insertelement <16 x i8> %vecins3, i8 %add4, i64 4
+  %vecins5 = insertelement <16 x i8> %vecins4, i8 %add5, i64 5
+  %vecins6 = insertelement <16 x i8> %vecins5, i8 %add6, i64 6
+  %vecins7 = insertelement <16 x i8> %vecins6, i8 %add7, i64 7
+  %vecins8 = insertelement <16 x i8> %vecins7, i8 %add8, i64 8
+  %vecins9 = insertelement <16 x i8> %vecins8, i8 %add9, i64 9
+  %vecins10 = insertelement <16 x i8> %vecins9, i8 %add10, i64 10
+  %vecins11 = insertelement <16 x i8> %vecins10, i8 %add11, i64 11
+  %vecins12 = insertelement <16 x i8> %vecins11, i8 %add12, i64 12
+  %vecins13 = insertelement <16 x i8> %vecins12, i8 %add13, i64 13
+  %vecins14 = insertelement <16 x i8> %vecins13, i8 %add14, i64 14
+  %vecins15 = insertelement <16 x i8> %vecins14, i8 %add15, i64 15
+  store <16 x i8> %vecins15, ptr %out
+  ret void
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; GFX8: {{.*}}
+; GFX9: {{.*}}
diff --git a/llvm/test/Transforms/SLPVectorizer/AMDGPU/phi-result-use-order.ll b/llvm/test/Transforms/SLPVectorizer/AMDGPU/phi-result-use-order.ll
index 3b63c1e35610fe..08c32691cf366c 100644
--- a/llvm/test/Transforms/SLPVectorizer/AMDGPU/phi-result-use-order.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AMDGPU/phi-result-use-order.ll
@@ -90,3 +90,78 @@ bb1:
   %o3 = insertelement <4 x half> %o2, half %c3, i64 3
   ret <4 x half> %o3
 }
+
+
+define <4 x i8> @phisi8(i1 %cmp1, <4 x i8> %in1, <4 x i8> %in2)  {
+; CHECK-LABEL: @phisi8(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    br i1 [[CMP1:%.*]], label [[BB1:%.*]], label [[BB0:%.*]]
+; CHECK:       bb0:
+; CHECK-NEXT:    br label [[BB1]]
+; CHECK:       bb1:
+; CHECK-NEXT:    [[TMP0:%.*]] = phi <4 x i8> [ [[IN1:%.*]], [[ENTRY:%.*]] ], [ [[IN2:%.*]], [[BB0]] ]
+; CHECK-NEXT:    ret <4 x i8> [[TMP0]]
+;
+entry:
+  %a0 = extractelement <4 x i8> %in1, i64 0
+  %a1 = extractelement <4 x i8> %in1, i64 1
+  %a2 = extractelement <4 x i8> %in1, i64 2
+  %a3 = extractelement <4 x i8> %in1, i64 3
+  br i1 %cmp1, label %bb1, label %bb0
+
+bb0:
+  %b0 = extractelement <4 x i8> %in2, i64 0
+  %b1 = extractelement <4 x i8> %in2, i64 1
+  %b2 = extractelement <4 x i8> %in2, i64 2
+  %b3 = extractelement <4 x i8> %in2, i64 3
+  br label %bb1
+
+bb1:
+  %c0 = phi i8 [ %a0, %entry ], [ %b0, %bb0 ]
+  %c1 = phi i8 [ %a1, %entry ], [ %b1, %bb0 ]
+  %c2 = phi i8 [ %a2, %entry ], [ %b2, %bb0 ]
+  %c3 = phi i8 [ %a3, %entry ], [ %b3, %bb0 ]
+
+  %o0 = insertelement <4 x i8> undef, i8 %c0, i64 0
+  %o1 = insertelement <4 x i8> %o0, i8 %c1, i64 1
+  %o2 = insertelement <4 x i8> %o1, i8 %c2, i64 2
+  %o3 = insertelement <4 x i8> %o2, i8 %c3, i64 3
+  ret <4 x i8> %o3
+}
+
+define <4 x i8> @phisi8_reverse(i1 %cmp1, <4 x i8> %in1, <4 x i8> %in2)  {
+; CHECK-LABEL: @phisi8_reverse(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    br i1 [[CMP1:%.*]], label [[BB1:%.*]], label [[BB0:%.*]]
+; CHECK:       bb0:
+; CHECK-NEXT:    br label [[BB1]]
+; CHECK:       bb1:
+; CHECK-NEXT:    [[TMP0:%.*]] = phi <4 x i8> [ [[IN1:%.*]], [[ENTRY:%.*]] ], [ [[IN2:%.*]], [[BB0]] ]
+; CHECK-NEXT:    ret <4 x i8> [[TMP0]]
+;
+entry:
+  %a0 = extractelement <4 x i8> %in1, i64 0
+  %a1 = extractelement <4 x i8> %in1, i64 1
+  %a2 = extractelement <4 x i8> %in1, i64 2
+  %a3 = extractelement <4 x i8> %in1, i64 3
+  br i1 %cmp1, label %bb1, label %bb0
+
+bb0:
+  %b0 = extractelement <4 x i8> %in2, i64 0
+  %b1 = extractelement <4 x i8> %in2, i64 1
+  %b2 = extractelement <4 x i8> %in2, i64 2
+  %b3 = extractelement <4 x i8> %in2, i64 3
+  br label %bb1
+
+bb1:
+  %c3 = phi i8 [ %a3, %entry ], [ %b3, %bb0 ]
+  %c2 = phi i8 [ %a2, %entry ], [ %b2, %bb0 ]
+  %c1 = phi i8 [ %a1, %entry ], [ %b1, %bb0 ]
+  %c0 = phi i8 [ %a0, %entry ], [ %b0, %bb0 ]
+
+  %o0 = insertelement <4 x i8> undef, i8 %c0, i64 0
+  %o1 = insertelement <4 x i8> %o0, i8 %c1, i64 1
+  %o2 = insertelement <4 x i8> %o1, i8 %c2, i64 2
+  %o3 = insertelement <4 x i8> %o2, i8 %c3, i64 3
+  ret <4 x i8> %o3
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/AMDGPU/reduction.ll b/llvm/test/Transforms/SLPVectorizer/AMDGPU/reduction.ll
index aceee8840bb40e..567760f6955984 100644
--- a/llvm/test/Transforms/SLPVectorizer/AMDGPU/reduction.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AMDGPU/reduction.ll
@@ -549,3 +549,330 @@ entry:
 
   ret float %add3
 }
+
+define i8 @reduction_v4i8(<4 x i8> %a) {
+; GCN-LABEL: @reduction_v4i8(
+; GCN-NEXT:  entry:
+; GCN-NEXT:    [[ELT0:%.*]] = extractelement <4 x i8> [[A:%.*]], i64 0
+; GCN-NEXT:    [[ELT1:%.*]] = extractelement <4 x i8> [[A]], i64 1
+; GCN-NEXT:    [[ELT2:%.*]] = extractelement <4 x i8> [[A]], i64 2
+; GCN-NEXT:    [[ELT3:%.*]] = extractelement <4 x i8> [[A]], i64 3
+; GCN-NEXT:    [[ADD1:%.*]] = add i8 [[ELT1]], [[ELT0]]
+; GCN-NEXT:    [[ADD2:%.*]] = add i8 [[ELT2]], [[ADD1]]
+; GCN-NEXT:    [[ADD3:%.*]] = add i8 [[ELT3]], [[ADD2]]
+; GCN-NEXT:    ret i8 [[ADD3]]
+;
+entry:
+  %elt0 = extractelement <4 x i8> %a, i64 0
+  %elt1 = extractelement <4 x i8> %a, i64 1
+  %elt2 = extractelement <4 x i8> %a, i64 2
+  %elt3 = extractelement <4 x i8> %a, i64 3
+
+  %add1 = add i8 %elt1, %elt0
+  %add2 = add i8 %elt2, %add1
+  %add3 = add i8 %elt3, %add2
+
+  ret i8 %add3
+}
+
+define i8 @reduction_v8i8(<8 x i8> %vec8) {
+; GCN-LABEL: @reduction_v8i8(
+; GCN-NEXT:  entry:
+; GCN-NEXT:    [[ELT0:%.*]] = extractelement <8 x i8> [[VEC8:%.*]], i64 0
+; GCN-NEXT:    [[ELT1:%.*]] = extractelement <8 x i8> [[VEC8]], i64 1
+; GCN-NEXT:    [[ELT2:%.*]] = extractelement <8 x i8> [[VEC8]], i64 2
+; GCN-NEXT:    [[ELT3:%.*]] = extractelement <8 x i8> [[VEC8]], i64 3
+; GCN-NEXT:    [[ELT4:%.*]] = extractelement <8 x i8> [[VEC8]], i64 4
+; GCN-NEXT:    [[ELT5:%.*]] = extractelement <8 x i8> [[VEC8]], i64 5
+; GCN-NEXT:    [[ELT6:%.*]] = extractelement <8 x i8> [[VEC8]], i64 6
+; GCN-NEXT:    [[ELT7:%.*]] = extractelement <8 x i8> [[VEC8]], i64 7
+; GCN-NEXT:    [[ADD1:%.*]] = add i8 [[ELT1]], [[ELT0]]
+; GCN-NEXT:    [[ADD2:%.*]] = add i8 [[ELT2]], [[ADD1]]
+; GCN-NEXT:    [[ADD3:%.*]] = add i8 [[ELT3]], [[ADD2]]
+; GCN-NEXT:    [[ADD4:%.*]] = add i8 [[ELT4]], [[ADD3]]
+; GCN-NEXT:    [[ADD5:%.*]] = add i8 [[ELT5]], [[ADD4]]
+; GCN-NEXT:    [[ADD6:%.*]] = add i8 [[ELT6]], [[ADD5]]
+; GCN-NEXT:    [[ADD7:%.*]] = add i8 [[ELT7]], [[ADD6]]
+; GCN-NEXT:    ret i8 [[ADD7]]
+;
+entry:
+  %elt0 = extractelement <8 x i8> %vec8, i64 0
+  %elt1 = extractelement <8 x i8> %vec8, i64 1
+  %elt2 = extractelement <8 x i8> %vec8, i64 2
+  %elt3 = extractelement <8 x i8> %vec8, i64 3
+  %elt4 = extractelement <8 x i8> %vec8, i64 4
+  %elt5 = extractelement <8 x i8> %vec8, i64 5
+  %elt6 = extractelement <8 x i8> %vec8, i64 6
+  %elt7 = extractelement <8 x i8> %vec8, i64 7
+
+  %add1 = add i8 %elt1, %elt0
+  %add2 = add i8 %elt2, %add1
+  %add3 = add i8 %elt3, %add2
+  %add4 = add i8 %elt4, %add3
+  %add5 = add i8 %elt5, %add4
+  %add6 = add i8 %elt6, %add5
+  %add7 = add i8 %elt7, %add6
+
+  ret i8 %add7
+}
+
+define i8 @reduction_umin_v4i8(<4 x i8> %vec4) {
+; GCN-LABEL: @reduction_umin_v4i8(
+; GCN-NEXT:  entry:
+; GCN-NEXT:    [[ELT0:%.*]] = extractelement <4 x i8> [[VEC4:%.*]], i64 0
+; GCN-NEXT:    [[ELT1:%.*]] = extractelement <4 x i8> [[VEC4]], i64 1
+; GCN-NEXT:    [[ELT2:%.*]] = extractelement <4 x i8> [[VEC4]], i64 2
+; GCN-NEXT:    [[ELT3:%.*]] = extractelement <4 x i8> [[VEC4]], i64 3
+; GCN-NEXT:    [[CMP1:%.*]] = icmp ult i8 [[ELT1]], [[ELT0]]
+; GCN-NEXT:    [[MIN1:%.*]] = select i1 [[CMP1]], i8 [[ELT1]], i8 [[ELT0]]
+; GCN-NEXT:    [[CMP2:%.*]] = icmp ult i8 [[ELT2]], [[MIN1]]
+; GCN-NEXT:    [[MIN2:%.*]] = select i1 [[CMP2]], i8 [[ELT2]], i8 [[MIN1]]
+; GCN-NEXT:    [[CMP3:%.*]] = icmp ult i8 [[ELT3]], [[MIN2]]
+; GCN-NEXT:    [[MIN3:%.*]] = select i1 [[CMP3]], i8 [[ELT3]], i8 [[MIN2]]
+; GCN-NEXT:    ret i8 [[MIN3]]
+;
+entry:
+  %elt0 = extractelement <4 x i8> %vec4, i64 0
+  %elt1 = extractelement <4 x i8> %vec4, i64 1
+  %elt2 = extractelement <4 x i8> %vec4, i64 2
+  %elt3 = extractelement <4 x i8> %vec4, i64 3
+
+  %cmp1 = icmp ult i8 %elt1, %elt0
+  %min1 = select i1 %cmp1, i8 %elt1, i8 %elt0
+  %cmp2 = icmp ult i8 %elt2, %min1
+  %min2 = select i1 %cmp2, i8 %elt2, i8 %min1
+  %cmp3 = icmp ult i8 %elt3, %min2
+  %min3 = select i1 %cmp3, i8 %elt3, i8 %min2
+
+  ret i8 %min3
+}
+
+define i8 @reduction_icmp_v8i8(<8 x i8> %vec8) {
+; GCN-LABEL: @reduction_icmp_v8i8(
+; GCN-NEXT:  entry:
+; GCN-NEXT:    [[ELT0:%.*]] = extractelement <8 x i8> [[VEC8:%.*]], i64 0
+; GCN-NEXT:    [[ELT1:%.*]] = extractelement <8 x i8> [[VEC8]], i64 1
+; GCN-NEXT:    [[ELT2:%.*]] = extractelement <8 x i8> [[VEC8]], i64 2
+; GCN-NEXT:    [[ELT3:%.*]] = extractelement <8 x i8> [[VEC8]], i64 3
+; GCN-NEXT:    [[ELT4:%.*]] = extractelement <8 x i8> [[VEC8]], i64 4
+; GCN-NEXT:    [[ELT5:%.*]] = extractelement <8 x i8> [[VEC8]], i64 5
+; GCN-NEXT:    [[ELT6:%.*]] = extractelement <8 x i8> [[VEC8]], i64 6
+; GCN-NEXT:    [[ELT7:%.*]] = extractelement <8 x i8> [[VEC8]], i64 7
+; GCN-NEXT:    [[CMP0:%.*]] = icmp ult i8 [[ELT1]], [[ELT0]]
+; GCN-NEXT:    [[MIN1:%.*]] = select i1 [[CMP0]], i8 [[ELT1]], i8 [[ELT0]]
+; GCN-NEXT:    [[CMP1:%.*]] = icmp ult i8 [[ELT2]], [[MIN1]]
+; GCN-NEXT:    [[MIN2:%.*]] = select i1 [[CMP1]], i8 [[ELT2]], i8 [[MIN1]]
+; GCN-NEXT:    [[CMP2:%.*]] = icmp ult i8 [[ELT3]], [[MIN2]]
+; GCN-NEXT:    [[MIN3:%.*]] = select i1 [[CMP2]], i8 [[ELT3]], i8 [[MIN2]]
+; GCN-NEXT:    [[CMP3:%.*]] = icmp ult i8 [[ELT4]], [[MIN3]]
+; GCN-NEXT:    [[MIN4:%.*]] = select i1 [[CMP3]], i8 [[ELT4]], i8 [[MIN3]]
+; GCN-NEXT:    [[CMP4:%.*]] = icmp ult i8 [[ELT5]], [[MIN4]]
+; GCN-NEXT:    [[MIN5:%.*]] = select i1 [[CMP4]], i8 [[ELT5]], i8 [[MIN4]]
+; GCN-NEXT:    [[CMP5:%.*]] = icmp ult i8 [[ELT6]], [[MIN5]]
+; GCN-NEXT:    [[MIN6:%.*]] = select i1 [[CMP5]], i8 [[ELT6]], i8 [[MIN5]]
+; GCN-NEXT:    [[CMP6:%.*]] = icmp ult i8 [[ELT7]], [[MIN6]]
+; GCN-NEXT:    [[MIN7:%.*]] = select i1 [[CMP6]], i8 [[ELT7]], i8 [[MIN6]]
+; GCN-NEXT:    ret i8 [[MIN7]]
+;
+entry:
+  %elt0 = extractelement <8 x i8> %vec8, i64 0
+  %elt1 = extractelement <8 x i8> %vec8, i64 1
+  %elt2 = extractelement <8 x i8> %vec8, i64 2
+  %elt3 = extractelement <8 x i8> %vec8, i64 3
+  %elt4 = extractelement <8 x i8> %vec8, i64 4
+  %elt5 = extractelement <8 x i8> %vec8, i64 5
+  %elt6 = extractelement <8 x i8> %vec8, i64 6
+  %elt7 = extractelement <8 x i8> %vec8, i64 7
+
+  %cmp0 = icmp ult i8 %elt1, %elt0
+  %min1 = select i1 %cmp0, i8 %elt1, i8 %elt0
+  %cmp1 = icmp ult i8 %elt2, %min1
+  %min2 = select i1 %cmp1, i8 %elt2, i8 %min1
+  %cmp2 = icmp ult i8 %elt3, %min2
+  %min3 = select i1 %cmp2, i8 %elt3, i8 %min2
+
+  %cmp3 = icmp ult i8 %elt4, %min3
+  %min4 = select i1 %cmp3, i8 %elt4, i8 %min3
+  %cmp4 = icmp ult i8 %elt5, %min4
+  %min5 = select i1 %cmp4, i8 %elt5, i8 %min4
+
+  %cmp5 = icmp ult i8 %elt6, %min5
+  %min6 = select i1 %cmp5, i8 %elt6, i8 %min5
+  %cmp6 = icmp ult i8 %elt7, %min6
+  %min7 = select i1 %cmp6, i8 %elt7, i8 %min6
+
+  ret i8 %min7
+}
+
+define i8 @reduction_smin_v16i8(<16 x i8> %vec16) {
+; GCN-LABEL: @reduction_smin_v16i8(
+; GCN-NEXT:  entry:
+; GCN-NEXT:    [[ELT0:%.*]] = extractelement <16 x i8> [[VEC16:%.*]], i64 0
+; GCN-NEXT:    [[ELT1:%.*]] = extractelement <16 x i8> [[VEC16]], i64 1
+; GCN-NEXT:    [[ELT2:%.*]] = extractelement <16 x i8> [[VEC16]], i64 2
+; GCN-NEXT:    [[ELT3:%.*]] = extractelement <16 x i8> [[VEC16]], i64 3
+; GCN-NEXT:    [[ELT4:%.*]] = extractelement <16 x i8> [[VEC16]], i64 4
+; GCN-NEXT:    [[ELT5:%.*]] = extractelement <16 x i8> [[VEC16]], i64 5
+; GCN-NEXT:    [[ELT6:%.*]] = extractelement <16 x i8> [[VEC16]], i64 6
+; GCN-NEXT:    [[ELT7:%.*]] = extractelement <16 x i8> [[VEC16]], i64 7
+; GCN-NEXT:    [[ELT8:%.*]] = extractelement <16 x i8> [[VEC16]], i64 8
+; GCN-NEXT:    [[ELT9:%.*]] = extractelement <16 x i8> [[VEC16]], i64 9
+; GCN-NEXT:    [[ELT10:%.*]] = extractelement <16 x i8> [[VEC16]], i64 10
+; GCN-NEXT:    [[ELT11:%.*]] = extractelement <16 x i8> [[VEC16]], i64 11
+; GCN-NEXT:    [[ELT12:%.*]] = extractelement <16 x i8> [[VEC16]], i64 12
+; GCN-NEXT:    [[ELT13:%.*]] = extractelement <16 x i8> [[VEC16]], i64 13
+; GCN-NEXT:    [[ELT14:%.*]] = extractelement <16 x i8> [[VEC16]], i64 14
+; GCN-NEXT:    [[ELT15:%.*]] = extractelement <16 x i8> [[VEC16]], i64 15
+; GCN-NEXT:    [[CMP0:%.*]] = icmp slt i8 [[ELT1]], [[ELT0]]
+; GCN-NEXT:    [[MIN1:%.*]] = select i1 [[CMP0]], i8 [[ELT1]], i8 [[ELT0]]
+; GCN-NEXT:    [[CMP1:%.*]] = icmp slt i8 [[ELT2]], [[MIN1]]
+; GCN-NEXT:    [[MIN2:%.*]] = select i1 [[CMP1]], i8 [[ELT2]], i8 [[MIN1]]
+; GCN-NEXT:    [[CMP2:%.*]] = icmp slt i8 [[ELT3]], [[MIN2]]
+; GCN-NEXT:    [[MIN3:%.*]] = select i1 [[CMP2]], i8 [[ELT3]], i8 [[MIN2]]
+; GCN-NEXT:    [[CMP3:%.*]] = icmp slt i8 [[ELT4]], [[MIN3]]
+; GCN-NEXT:    [[MIN4:%.*]] = select i1 [[CMP3]], i8 [[ELT4]], i8 [[MIN3]]
+; GCN-NEXT:    [[CMP4:%.*]] = icmp slt i8 [[ELT5]], [[MIN4]]
+; GCN-NEXT:    [[MIN5:%.*]] = select i1 [[CMP4]], i8 [[ELT5]], i8 [[MIN4]]
+; GCN-NEXT:    [[CMP5:%.*]] = icmp slt i8 [[ELT6]], [[MIN5]]
+; GCN-NEXT:    [[MIN6:%.*]] = select i1 [[CMP5]], i8 [[ELT6]], i8 [[MIN5]]
+; GCN-NEXT:    [[CMP6:%.*]] = icmp slt i8 [[ELT7]], [[MIN6]]
+; GCN-NEXT:    [[MIN7:%.*]] = select i1 [[CMP6]], i8 [[ELT7]], i8 [[MIN6]]
+; GCN-NEXT:    [[CMP7:%.*]] = icmp slt i8 [[ELT8]], [[MIN7]]
+; GCN-NEXT:    [[MIN8:%.*]] = select i1 [[CMP7]], i8 [[ELT8]], i8 [[MIN7]]
+; GCN-NEXT:    [[CMP8:%.*]] = icmp slt i8 [[ELT9]], [[MIN8]]
+; GCN-NEXT:    [[MIN9:%.*]] = select i1 [[CMP8]], i8 [[ELT9]], i8 [[MIN8]]
+; GCN-NEXT:    [[CMP9:%.*]] = icmp slt i8 [[ELT10]], [[MIN9]]
+; GCN-NEXT:    [[MIN10:%.*]] = select i1 [[CMP9]], i8 [[ELT10]], i8 [[MIN9]]
+; GCN-NEXT:    [[CMP10:%.*]] = icmp slt i8 [[ELT11]], [[MIN10]]
+; GCN-NEXT:    [[MIN11:%.*]] = select i1 [[CMP10]], i8 [[ELT11]], i8 [[MIN10]]
+; GCN-NEXT:    [[CMP11:%.*]] = icmp slt i8 [[ELT12]], [[MIN11]]
+; GCN-NEXT:    [[MIN12:%.*]] = select i1 [[CMP11]], i8 [[ELT12]], i8 [[MIN11]]
+; GCN-NEXT:    [[CMP12:%.*]] = icmp slt i8 [[ELT13]], [[MIN12]]
+; GCN-NEXT:    [[MIN13:%.*]] = select i1 [[CMP12]], i8 [[ELT13]], i8 [[MIN12]]
+; GCN-NEXT:    [[CMP13:%.*]] = icmp slt i8 [[ELT14]], [[MIN13]]
+; GCN-NEXT:    [[MIN14:%.*]] = select i1 [[CMP13]], i8 [[ELT14]], i8 [[MIN13]]
+; GCN-NEXT:    [[CMP14:%.*]] = icmp slt i8 [[ELT15]], [[MIN14]]
+; GCN-NEXT:    [[MIN15:%.*]] = select i1 [[CMP14]], i8 [[ELT15]], i8 [[MIN14]]
+; GCN-NEXT:    ret i8 [[MIN15]]
+;
+entry:
+  %elt0 = extractelement <16 x i8> %vec16, i64 0
+  %elt1 = extractelement <16 x i8> %vec16, i64 1
+  %elt2 = extractelement <16 x i8> %vec16, i64 2
+  %elt3 = extractelement <16 x i8> %vec16, i64 3
+  %elt4 = extractelement <16 x i8> %vec16, i64 4
+  %elt5 = extractelement <16 x i8> %vec16, i64 5
+  %elt6 = extractelement <16 x i8> %vec16, i64 6
+  %elt7 = extractelement <16 x i8> %vec16, i64 7
+
+  %elt8 = extractelement <16 x i8> %vec16, i64 8
+  %elt9 = extractelement <16 x i8> %vec16, i64 9
+  %elt10 = extractelement <16 x i8> %vec16, i64 10
+  %elt11 = extractelement <16 x i8> %vec16, i64 11
+  %elt12 = extractelement <16 x i8> %vec16, i64 12
+  %elt13 = extractelement <16 x i8> %vec16, i64 13
+  %elt14 = extractelement <16 x i8> %vec16, i64 14
+  %elt15 = extractelement <16 x i8> %vec16, i64 15
+
+  %cmp0 = icmp slt i8 %elt1, %elt0
+  %min1 = select i1 %cmp0, i8 %elt1, i8 %elt0
+  %cmp1 = icmp slt i8 %elt2, %min1
+  %min2 = select i1 %cmp1, i8 %elt2, i8 %min1
+  %cmp2 = icmp slt i8 %elt3, %min2
+  %min3 = select i1 %cmp2, i8 %elt3, i8 %min2
+
+  %cmp3 = icmp slt i8 %elt4, %min3
+  %min4 = select i1 %cmp3, i8 %elt4, i8 %min3
+  %cmp4 = icmp slt i8 %elt5, %min4
+  %min5 = select i1 %cmp4, i8 %elt5, i8 %min4
+
+  %cmp5 = icmp slt i8 %elt6, %min5
+  %min6 = select i1 %cmp5, i8 %elt6, i8 %min5
+  %cmp6 = icmp slt i8 %elt7, %min6
+  %min7 = select i1 %cmp6, i8 %elt7, i8 %min6
+
+  %cmp7 = icmp slt i8 %elt8, %min7
+  %min8 = select i1 %cmp7, i8 %elt8, i8 %min7
+  %cmp8 = icmp slt i8 %elt9, %min8
+  %min9 = select i1 %cmp8, i8 %elt9, i8 %min8
+
+  %cmp9 = icmp slt i8 %elt10, %min9
+  %min10 = select i1 %cmp9, i8 %elt10, i8 %min9
+  %cmp10 = icmp slt i8 %elt11, %min10
+  %min11 = select i1 %cmp10, i8 %elt11, i8 %min10
+
+  %cmp11 = icmp slt i8 %elt12, %min11
+  %min12 = select i1 %cmp11, i8 %elt12, i8 %min11
+  %cmp12 = icmp slt i8 %elt13, %min12
+  %min13 = select i1 %cmp12, i8 %elt13, i8 %min12
+
+  %cmp13 = icmp slt i8 %elt14, %min13
+  %min14 = select i1 %cmp13, i8 %elt14, i8 %min13
+  %cmp14 = icmp slt i8 %elt15, %min14
+  %min15 = select i1 %cmp14, i8 %elt15, i8 %min14
+
+
+  ret i8 %min15
+}
+
+define i8 @reduction_umax_v4i8(<4 x i8> %vec4) {
+; GCN-LABEL: @reduction_umax_v4i8(
+; GCN-NEXT:  entry:
+; GCN-NEXT:    [[ELT0:%.*]] = extractelement <4 x i8> [[VEC4:%.*]], i64 0
+; GCN-NEXT:    [[ELT1:%.*]] = extractelement <4 x i8> [[VEC4]], i64 1
+; GCN-NEXT:    [[ELT2:%.*]] = extractelement <4 x i8> [[VEC4]], i64 2
+; GCN-NEXT:    [[ELT3:%.*]] = extractelement <4 x i8> [[VEC4]], i64 3
+; GCN-NEXT:    [[CMP1:%.*]] = icmp ugt i8 [[ELT1]], [[ELT0]]
+; GCN-NEXT:    [[MAX1:%.*]] = select i1 [[CMP1]], i8 [[ELT1]], i8 [[ELT0]]
+; GCN-NEXT:    [[CMP2:%.*]] = icmp ugt i8 [[ELT2]], [[MAX1]]
+; GCN-NEXT:    [[MAX2:%.*]] = select i1 [[CMP2]], i8 [[ELT2]], i8 [[MAX1]]
+; GCN-NEXT:    [[CMP3:%.*]] = icmp ugt i8 [[ELT3]], [[MAX2]]
+; GCN-NEXT:    [[MAX3:%.*]] = select i1 [[CMP3]], i8 [[ELT3]], i8 [[MAX2]]
+; GCN-NEXT:    ret i8 [[MAX3]]
+;
+entry:
+  %elt0 = extractelement <4 x i8> %vec4, i64 0
+  %elt1 = extractelement <4 x i8> %vec4, i64 1
+  %elt2 = extractelement <4 x i8> %vec4, i64 2
+  %elt3 = extractelement <4 x i8> %vec4, i64 3
+
+  %cmp1 = icmp ugt i8 %elt1, %elt0
+  %max1 = select i1 %cmp1, i8 %elt1, i8 %elt0
+  %cmp2 = icmp ugt i8 %elt2, %max1
+  %max2 = select i1 %cmp2, i8 %elt2, i8 %max1
+  %cmp3 = icmp ugt i8 %elt3, %max2
+  %max3 = select i1 %cmp3, i8 %elt3, i8 %max2
+
+  ret i8 %max3
+}
+
+define i8 @reduction_smax_v4i8(<4 x i8> %vec4) {
+; GCN-LABEL: @reduction_smax_v4i8(
+; GCN-NEXT:  entry:
+; GCN-NEXT:    [[ELT0:%.*]] = extractelement <4 x i8> [[VEC4:%.*]], i64 0
+; GCN-NEXT:    [[ELT1:%.*]] = extractelement <4 x i8> [[VEC4]], i64 1
+; GCN-NEXT:    [[ELT2:%.*]] = extractelement <4 x i8> [[VEC4]], i64 2
+; GCN-NEXT:    [[ELT3:%.*]] = extractelement <4 x i8> [[VEC4]], i64 3
+; GCN-NEXT:    [[CMP1:%.*]] = icmp sgt i8 [[ELT1]], [[ELT0]]
+; GCN-NEXT:    [[MAX1:%.*]] = select i1 [[CMP1]], i8 [[ELT1]], i8 [[ELT0]]
+; GCN-NEXT:    [[CMP2:%.*]] = icmp sgt i8 [[ELT2]], [[MAX1]]
+; GCN-NEXT:    [[MAX2:%.*]] = select i1 [[CMP2]], i8 [[ELT2]], i8 [[MAX1]]
+; GCN-NEXT:    [[CMP3:%.*]] = icmp sgt i8 [[ELT3]], [[MAX2]]
+; GCN-NEXT:    [[MAX3:%.*]] = select i1 [[CMP3]], i8 [[ELT3]], i8 [[MAX2]]
+; GCN-NEXT:    ret i8 [[MAX3]]
+;
+entry:
+  %elt0 = extractelement <4 x i8> %vec4, i64 0
+  %elt1 = extractelement <4 x i8> %vec4, i64 1
+  %elt2 = extractelement <4 x i8> %vec4, i64 2
+  %elt3 = extractelement <4 x i8> %vec4, i64 3
+
+  %cmp1 = icmp sgt i8 %elt1, %elt0
+  %max1 = select i1 %cmp1, i8 %elt1, i8 %elt0
+  %cmp2 = icmp sgt i8 %elt2, %max1
+  %max2 = select i1 %cmp2, i8 %elt2, i8 %max1
+  %cmp3 = icmp sgt i8 %elt3, %max2
+  %max3 = select i1 %cmp3, i8 %elt3, i8 %max2
+
+  ret i8 %max3
+}



More information about the llvm-commits mailing list