[llvm] 5849fcb - Revert rG1b7089fe67b924bdd5ecef786a34bdba7a88778f "[SLP] Add ScalarizationOverheadBuilder helper to track vector extractions"

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Fri Sep 30 03:23:07 PDT 2022


Author: Simon Pilgrim
Date: 2022-09-30T11:22:48+01:00
New Revision: 5849fcb635508648df8fa38fc85d032f664423f5

URL: https://github.com/llvm/llvm-project/commit/5849fcb635508648df8fa38fc85d032f664423f5
DIFF: https://github.com/llvm/llvm-project/commit/5849fcb635508648df8fa38fc85d032f664423f5.diff

LOG: Revert rG1b7089fe67b924bdd5ecef786a34bdba7a88778f "[SLP] Add ScalarizationOverheadBuilder helper to track vector extractions"
Revert rGef89409a59f3b79ae143b33b7d8e6ee6285aa42f "Fix 'unused-lambda-capture' gcc warning. NFCI."
Revert rG926ccfef032d206dcbcdf74ca1e3a9ebf4d1be45 "[SLP] ScalarizationOverheadBuilder - demand all elements for scalarization if the extraction index is unknown / out of bounds"

Revert ScalarizationOverheadBuilder sequence from D134605 - when accumulating extraction costs by Type (instead of specific Value), we are not distinguishing enough when they are coming from the same source or not, and we always just count the cost once. This needs addressing before we can use getScalarizationOverhead properly.

Added: 
    

Modified: 
    llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
    llvm/test/Transforms/Coroutines/coro-retcon-resume-values.ll
    llvm/test/Transforms/PhaseOrdering/X86/vector-reductions.ll
    llvm/test/Transforms/SLPVectorizer/AArch64/scalarization-overhead.ll
    llvm/test/Transforms/SLPVectorizer/X86/bool-mask.ll
    llvm/test/Transforms/SLPVectorizer/X86/c-ray.ll
    llvm/test/Transforms/SLPVectorizer/X86/crash_reordering_undefs.ll
    llvm/test/Transforms/SLPVectorizer/X86/geps-non-pow-2.ll
    llvm/test/Transforms/SLPVectorizer/X86/reduction2.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index 167858edc40c8..faec5611979d5 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -5782,80 +5782,6 @@ bool BoUpSLP::areAllUsersVectorized(Instruction *I,
          });
 }
 
-namespace {
-/// Helper to keep track of the extracted elements to compute an accumulated
-/// scalarization extraction cost.
-class ScalarizationOverheadBuilder {
-  /// Keep track of demanded elements by source vector or type.
-  typedef DenseMap<Value *, APInt> ExtractByClass;
-  typedef DenseMap<FixedVectorType *, APInt> ExtractByType;
-
-  /// TODO: Add getExtractWithExtendCost support to getScalarizationOverhead.
-  struct ExtractWithExtendOps {
-    unsigned Opcode;
-    VectorType *VecTy;
-    Type *SclTy;
-    unsigned Idx;
-  };
-
-  ExtractByClass m_ExtractsByClass;
-  ExtractByType m_ExtractsByType;
-  SmallVector<ExtractWithExtendOps> m_ExtractsWithExtends;
-
-public:
-  /// Add an extraction from a specific source and element index.
-  void addExtract(Value *Src, unsigned Idx) {
-    auto *Ty = cast<FixedVectorType>(Src->getType());
-    unsigned NumElts = Ty->getNumElements();
-    if (m_ExtractsByClass.count(Src)) {
-      if (Idx < NumElts)
-        m_ExtractsByClass[Src].setBit(Idx);
-      else
-        m_ExtractsByClass[Src].setAllBits();
-      return;
-    }
-    m_ExtractsByClass[Src] = Idx < NumElts ? APInt::getOneBitSet(NumElts, Idx)
-                                           : APInt::getAllOnes(NumElts);
-  }
-
-  /// Add an extraction from a vector type and specific element index.
-  /// We assume that all extractions from a given type are from the same source.
-  void addExtract(FixedVectorType *VecTy, unsigned Idx) {
-    unsigned NumElts = VecTy->getNumElements();
-    if (m_ExtractsByType.count(VecTy)) {
-      if (Idx < NumElts)
-        m_ExtractsByType[VecTy].setBit(Idx);
-      else
-        m_ExtractsByType[VecTy].setAllBits();
-      return;
-    }
-    m_ExtractsByType[VecTy] = Idx < NumElts ? APInt::getOneBitSet(NumElts, Idx)
-                                            : APInt::getAllOnes(NumElts);
-  }
-
-  /// Add an extended extraction from a specific source and element index.
-  void addExtractWithExtend(unsigned Opcode, Type *SclTy,
-                            VectorType *VecTy,
-                            unsigned Idx) {
-    m_ExtractsWithExtends.push_back({Opcode, VecTy, SclTy, Idx});
-  }
-
-  /// Determine the accumulated scalarization cost for the specified extractions.
-  InstructionCost getCost(const TargetTransformInfo *TTI) {
-    InstructionCost Cost = 0;
-    for (struct ExtractWithExtendOps &It : m_ExtractsWithExtends)
-      Cost +=
-          TTI->getExtractWithExtendCost(It.Opcode, It.SclTy, It.VecTy, It.Idx);
-    for (detail::DenseMapPair<FixedVectorType *, APInt> &It : m_ExtractsByType)
-      Cost += TTI->getScalarizationOverhead(It.first, It.second, false, true);
-    for (detail::DenseMapPair<Value *, APInt> &It : m_ExtractsByClass)
-      Cost += TTI->getScalarizationOverhead(
-          cast<VectorType>(It.first->getType()), It.second, false, true);
-    return Cost;
-  }
-};
-} // anonymous namespace
-
 static std::pair<InstructionCost, InstructionCost>
 getVectorCallCosts(CallInst *CI, FixedVectorType *VecTy,
                    TargetTransformInfo *TTI, TargetLibraryInfo *TLI) {
@@ -6083,9 +6009,9 @@ InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E,
   bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty();
   // FIXME: it tries to fix a problem with MSVC buildbots.
   TargetTransformInfo &TTIRef = *TTI;
-  auto &&AdjustExtractsCost = [this, &TTIRef, CostKind, VL,
+  auto &&AdjustExtractsCost = [this, &TTIRef, CostKind, VL, VecTy,
                                VectorizedVals, E](InstructionCost &Cost) {
-    ScalarizationOverheadBuilder ScalarizationCost;
+    DenseMap<Value *, int> ExtractVectorsTys;
     SmallPtrSet<Value *, 4> CheckedExtracts;
     for (auto *V : VL) {
       if (isa<UndefValue>(V))
@@ -6106,6 +6032,12 @@ InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E,
       if (!EEIdx)
         continue;
       unsigned Idx = *EEIdx;
+      if (TTIRef.getNumberOfParts(VecTy) !=
+          TTIRef.getNumberOfParts(EE->getVectorOperandType())) {
+        auto It =
+            ExtractVectorsTys.try_emplace(EE->getVectorOperand(), Idx).first;
+        It->getSecond() = std::min<int>(It->second, Idx);
+      }
       // Take credit for instruction that will become dead.
       if (EE->hasOneUse()) {
         Instruction *Ext = EE->user_back();
@@ -6114,9 +6046,9 @@ InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E,
             })) {
           // Use getExtractWithExtendCost() to calculate the cost of
           // extractelement/ext pair.
-          ScalarizationCost.addExtractWithExtend(
-              Ext->getOpcode(), Ext->getType(), EE->getVectorOperandType(),
-              Idx);
+          Cost -=
+              TTIRef.getExtractWithExtendCost(Ext->getOpcode(), Ext->getType(),
+                                              EE->getVectorOperandType(), Idx);
           // Add back the cost of s|zext which is subtracted separately.
           Cost += TTIRef.getCastInstrCost(
               Ext->getOpcode(), Ext->getType(), EE->getType(),
@@ -6124,9 +6056,36 @@ InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E,
           continue;
         }
       }
-      ScalarizationCost.addExtract(EE->getVectorOperand(), Idx);
+      Cost -= TTIRef.getVectorInstrCost(*EE, EE->getVectorOperandType(), Idx);
+    }
+    // Add a cost for subvector extracts/inserts if required.
+    for (const auto &Data : ExtractVectorsTys) {
+      auto *EEVTy = cast<FixedVectorType>(Data.first->getType());
+      unsigned NumElts = VecTy->getNumElements();
+      if (Data.second % NumElts == 0)
+        continue;
+      if (TTIRef.getNumberOfParts(EEVTy) > TTIRef.getNumberOfParts(VecTy)) {
+        unsigned Idx = (Data.second / NumElts) * NumElts;
+        unsigned EENumElts = EEVTy->getNumElements();
+        if (Idx + NumElts <= EENumElts) {
+          Cost +=
+              TTIRef.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector,
+                                    EEVTy, None, CostKind, Idx, VecTy);
+        } else {
+          // Need to round up the subvector type vectorization factor to avoid a
+          // crash in cost model functions. Make SubVT so that Idx + VF of SubVT
+          // <= EENumElts.
+          auto *SubVT =
+              FixedVectorType::get(VecTy->getElementType(), EENumElts - Idx);
+          Cost +=
+              TTIRef.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector,
+                                    EEVTy, None, CostKind, Idx, SubVT);
+        }
+      } else {
+        Cost += TTIRef.getShuffleCost(TargetTransformInfo::SK_InsertSubvector,
+                                      VecTy, None, CostKind, 0, EEVTy);
+      }
     }
-    Cost -= ScalarizationCost.getCost(&TTIRef);
   };
   if (E->State == TreeEntry::NeedToGather) {
     if (allConstant(VL))
@@ -6327,16 +6286,16 @@ InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E,
     case Instruction::ExtractElement: {
       // The common cost of removal ExtractElement/ExtractValue instructions +
       // the cost of shuffles, if required to resuffle the original vector.
-      ScalarizationOverheadBuilder ScalarizationCost, ReuseScalarizationCost;
       if (NeedToShuffleReuses) {
         unsigned Idx = 0;
         for (unsigned I : E->ReuseShuffleIndices) {
           if (ShuffleOrOp == Instruction::ExtractElement) {
             auto *EE = cast<ExtractElementInst>(VL[I]);
-            ReuseScalarizationCost.addExtract(EE->getVectorOperand(),
-                                              *getExtractIndex(EE));
+            CommonCost -= TTI->getVectorInstrCost(
+                *EE, EE->getVectorOperandType(), *getExtractIndex(EE));
           } else {
-            ReuseScalarizationCost.addExtract(VecTy, Idx);
+            CommonCost -= TTI->getVectorInstrCost(Instruction::ExtractElement,
+                                                  VecTy, Idx);
             ++Idx;
           }
         }
@@ -6344,18 +6303,16 @@ InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E,
         for (Value *V : VL) {
           if (ShuffleOrOp == Instruction::ExtractElement) {
             auto *EE = cast<ExtractElementInst>(V);
-            ScalarizationCost.addExtract(EE->getVectorOperand(),
-                                         *getExtractIndex(EE));
+            CommonCost += TTI->getVectorInstrCost(
+                *EE, EE->getVectorOperandType(), *getExtractIndex(EE));
           } else {
             --Idx;
-            ScalarizationCost.addExtract(VecTy, Idx);
+            CommonCost += TTI->getVectorInstrCost(Instruction::ExtractElement,
+                                                  VecTy, Idx);
           }
         }
-        CommonCost -= ReuseScalarizationCost.getCost(TTI);
-        CommonCost += ScalarizationCost.getCost(TTI);
       }
       if (ShuffleOrOp == Instruction::ExtractValue) {
-        ScalarizationOverheadBuilder ValueScalarizationCost;
         for (unsigned I = 0, E = VL.size(); I < E; ++I) {
           auto *EI = cast<Instruction>(VL[I]);
           // Take credit for instruction that will become dead.
@@ -6364,20 +6321,20 @@ InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E,
             if (isa<SExtInst, ZExtInst>(Ext) &&
                 all_of(Ext->users(),
                        [](User *U) { return isa<GetElementPtrInst>(U); })) {
-              // Use getExtractWithExtendCost() to calculate the cost of
-              // extractelement/ext pair.
-              ValueScalarizationCost.addExtractWithExtend(
-                  Ext->getOpcode(), Ext->getType(), VecTy, I);
-              // Add back the cost of s|zext which is subtracted separately.
-              CommonCost += TTI->getCastInstrCost(
-                  Ext->getOpcode(), Ext->getType(), EI->getType(),
-                  TTI::getCastContextHint(Ext), CostKind, Ext);
-              continue;
+            // Use getExtractWithExtendCost() to calculate the cost of
+            // extractelement/ext pair.
+            CommonCost -= TTI->getExtractWithExtendCost(
+                Ext->getOpcode(), Ext->getType(), VecTy, I);
+            // Add back the cost of s|zext which is subtracted separately.
+            CommonCost += TTI->getCastInstrCost(
+                Ext->getOpcode(), Ext->getType(), EI->getType(),
+                TTI::getCastContextHint(Ext), CostKind, Ext);
+            continue;
             }
           }
-          ValueScalarizationCost.addExtract(VecTy, I);
+          CommonCost -=
+              TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, I);
         }
-        CommonCost -= ValueScalarizationCost.getCost(TTI);
       } else {
         AdjustExtractsCost(CommonCost);
       }
@@ -7277,7 +7234,6 @@ InstructionCost BoUpSLP::getTreeCost(ArrayRef<Value *> VectorizedVals) {
   SmallVector<MapVector<const TreeEntry *, SmallVector<int>>> ShuffleMasks;
   SmallVector<std::pair<Value *, const TreeEntry *>> FirstUsers;
   SmallVector<APInt> DemandedElts;
-  ScalarizationOverheadBuilder ScalarizationCost;
   for (ExternalUser &EU : ExternalUses) {
     // We only add extract cost once for the same scalar.
     if (!isa_and_nonnull<InsertElementInst>(EU.User) &&
@@ -7368,20 +7324,20 @@ InstructionCost BoUpSLP::getTreeCost(ArrayRef<Value *> VectorizedVals) {
     // If we plan to rewrite the tree in a smaller type, we will need to sign
     // extend the extracted value back to the original type. Here, we account
     // for the extract and the added cost of the sign extend if needed.
+    auto *VecTy = FixedVectorType::get(EU.Scalar->getType(), BundleWidth);
     auto *ScalarRoot = VectorizableTree[0]->Scalars[0];
     if (MinBWs.count(ScalarRoot)) {
       auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first);
       auto Extend =
           MinBWs[ScalarRoot].second ? Instruction::SExt : Instruction::ZExt;
-      auto *VecTy = FixedVectorType::get(MinTy, BundleWidth);
-      ScalarizationCost.addExtractWithExtend(Extend, EU.Scalar->getType(),
-                                             VecTy, EU.Lane);
+      VecTy = FixedVectorType::get(MinTy, BundleWidth);
+      ExtractCost += TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(),
+                                                   VecTy, EU.Lane);
     } else {
-      auto *VecTy = FixedVectorType::get(EU.Scalar->getType(), BundleWidth);
-      ScalarizationCost.addExtract(VecTy, EU.Lane);
+      ExtractCost +=
+          TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, EU.Lane);
     }
   }
-  ExtractCost += ScalarizationCost.getCost(TTI);
 
   InstructionCost SpillCost = getSpillCost();
   Cost += SpillCost + ExtractCost;

diff  --git a/llvm/test/Transforms/Coroutines/coro-retcon-resume-values.ll b/llvm/test/Transforms/Coroutines/coro-retcon-resume-values.ll
index 4db11b7110c5f..082ccd8100208 100644
--- a/llvm/test/Transforms/Coroutines/coro-retcon-resume-values.ll
+++ b/llvm/test/Transforms/Coroutines/coro-retcon-resume-values.ll
@@ -61,14 +61,14 @@ define i32 @main() {
 ; CHECK-NEXT:    store i32 4, i32* [[INPUT_RELOAD_ADDR13_I]], align 4, !noalias !3
 ; CHECK-NEXT:    tail call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]])
 ; CHECK-NEXT:    [[FRAMEPTR_I2:%.*]] = load %f.Frame*, %f.Frame** [[TMP2]], align 8, !alias.scope !6
+; CHECK-NEXT:    [[INPUT_RELOAD_ADDR13_I3:%.*]] = getelementptr inbounds [[F_FRAME]], %f.Frame* [[FRAMEPTR_I2]], i64 0, i32 2
+; CHECK-NEXT:    [[INPUT_RELOAD14_I4:%.*]] = load i32, i32* [[INPUT_RELOAD_ADDR13_I3]], align 4, !noalias !6
 ; CHECK-NEXT:    [[N_VAL3_RELOAD_ADDR11_I5:%.*]] = getelementptr inbounds [[F_FRAME]], %f.Frame* [[FRAMEPTR_I2]], i64 0, i32 1
-; CHECK-NEXT:    [[TMP5:%.*]] = load i32, i32* [[N_VAL3_RELOAD_ADDR11_I5]], align 4
-; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds i32, i32* [[N_VAL3_RELOAD_ADDR11_I5]], i64 1
-; CHECK-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
-; CHECK-NEXT:    [[SUM7_I7:%.*]] = add i32 [[TMP5]], [[TMP7]]
+; CHECK-NEXT:    [[N_VAL3_RELOAD12_I6:%.*]] = load i32, i32* [[N_VAL3_RELOAD_ADDR11_I5]], align 4, !noalias !6
+; CHECK-NEXT:    [[SUM7_I7:%.*]] = add i32 [[N_VAL3_RELOAD12_I6]], [[INPUT_RELOAD14_I4]]
 ; CHECK-NEXT:    tail call void @print(i32 [[SUM7_I7]]), !noalias !6
-; CHECK-NEXT:    [[TMP8:%.*]] = bitcast %f.Frame* [[FRAMEPTR_I2]] to i8*
-; CHECK-NEXT:    tail call void @deallocate(i8* [[TMP8]]), !noalias !6
+; CHECK-NEXT:    [[TMP5:%.*]] = bitcast %f.Frame* [[FRAMEPTR_I2]] to i8*
+; CHECK-NEXT:    tail call void @deallocate(i8* [[TMP5]]), !noalias !6
 ; CHECK-NEXT:    ret i32 0
 ;
 entry:

diff  --git a/llvm/test/Transforms/PhaseOrdering/X86/vector-reductions.ll b/llvm/test/Transforms/PhaseOrdering/X86/vector-reductions.ll
index 470823dc7e00e..1ea2c75062a59 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/vector-reductions.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/vector-reductions.ll
@@ -274,6 +274,7 @@ for.end:
 
 ; PR43745 - https://bugs.llvm.org/show_bug.cgi?id=43745
 
+; FIXME: this should be vectorized
 define i1 @cmp_lt_gt(double %a, double %b, double %c) {
 ; CHECK-LABEL: @cmp_lt_gt(
 ; CHECK-NEXT:  entry:
@@ -287,16 +288,17 @@ define i1 @cmp_lt_gt(double %a, double %b, double %c) {
 ; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <2 x double> poison, double [[MUL]], i64 0
 ; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <2 x double> [[TMP5]], <2 x double> poison, <2 x i32> zeroinitializer
 ; CHECK-NEXT:    [[TMP7:%.*]] = fdiv <2 x double> [[TMP4]], [[TMP6]]
-; CHECK-NEXT:    [[TMP8:%.*]] = fcmp olt <2 x double> [[TMP7]], <double 0x3EB0C6F7A0B5ED8D, double 0x3EB0C6F7A0B5ED8D>
-; CHECK-NEXT:    [[TMP9:%.*]] = extractelement <2 x i1> [[TMP8]], i64 0
-; CHECK-NEXT:    [[TMP10:%.*]] = extractelement <2 x i1> [[TMP8]], i64 1
-; CHECK-NEXT:    [[OR_COND:%.*]] = select i1 [[TMP10]], i1 [[TMP9]], i1 false
+; CHECK-NEXT:    [[TMP8:%.*]] = extractelement <2 x double> [[TMP7]], i64 1
+; CHECK-NEXT:    [[CMP:%.*]] = fcmp olt double [[TMP8]], 0x3EB0C6F7A0B5ED8D
+; CHECK-NEXT:    [[TMP9:%.*]] = extractelement <2 x double> [[TMP7]], i64 0
+; CHECK-NEXT:    [[CMP4:%.*]] = fcmp olt double [[TMP9]], 0x3EB0C6F7A0B5ED8D
+; CHECK-NEXT:    [[OR_COND:%.*]] = select i1 [[CMP]], i1 [[CMP4]], i1 false
 ; CHECK-NEXT:    br i1 [[OR_COND]], label [[CLEANUP:%.*]], label [[LOR_LHS_FALSE:%.*]]
 ; CHECK:       lor.lhs.false:
-; CHECK-NEXT:    [[TMP11:%.*]] = fcmp ule <2 x double> [[TMP7]], <double 1.000000e+00, double 1.000000e+00>
-; CHECK-NEXT:    [[TMP12:%.*]] = extractelement <2 x i1> [[TMP11]], i64 0
-; CHECK-NEXT:    [[TMP13:%.*]] = extractelement <2 x i1> [[TMP11]], i64 1
-; CHECK-NEXT:    [[OR_COND1:%.*]] = select i1 [[TMP13]], i1 true, i1 [[TMP12]]
+; CHECK-NEXT:    [[TMP10:%.*]] = fcmp ule <2 x double> [[TMP7]], <double 1.000000e+00, double 1.000000e+00>
+; CHECK-NEXT:    [[TMP11:%.*]] = extractelement <2 x i1> [[TMP10]], i64 0
+; CHECK-NEXT:    [[TMP12:%.*]] = extractelement <2 x i1> [[TMP10]], i64 1
+; CHECK-NEXT:    [[OR_COND1:%.*]] = select i1 [[TMP12]], i1 true, i1 [[TMP11]]
 ; CHECK-NEXT:    br label [[CLEANUP]]
 ; CHECK:       cleanup:
 ; CHECK-NEXT:    [[RETVAL_0:%.*]] = phi i1 [ false, [[ENTRY:%.*]] ], [ [[OR_COND1]], [[LOR_LHS_FALSE]] ]

diff  --git a/llvm/test/Transforms/SLPVectorizer/AArch64/scalarization-overhead.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/scalarization-overhead.ll
index 833374d9f6104..8125b135e793f 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/scalarization-overhead.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/scalarization-overhead.ll
@@ -1,56 +1,64 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -mtriple=arm64-apple-macosx11.0.0 -slp-vectorizer -S < %s | FileCheck %s
 
-; TODO: Test case reported on D134605 where the vectorization was causing a slowdown due to an underestimation in the cost of the extractions.
+; Test case reported on D134605 where the vectorization was causing a slowdown due to an underestimation in the cost of the extractions.
 
 define fastcc i64 @zot(float %arg, float %arg1, float %arg2, float %arg3, float %arg4, ptr %arg5, i1 %arg6, i1 %arg7, i1 %arg8) {
 ; CHECK-LABEL: @zot(
 ; CHECK-NEXT:  bb:
-; CHECK-NEXT:    [[TMP0:%.*]] = insertelement <2 x float> <float 0.000000e+00, float poison>, float [[ARG:%.*]], i32 1
-; CHECK-NEXT:    [[TMP1:%.*]] = fmul fast <2 x float> zeroinitializer, [[TMP0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <2 x float> poison, float [[ARG3:%.*]], i32 0
-; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x float> [[TMP2]], float [[ARG3]], i32 1
-; CHECK-NEXT:    [[TMP4:%.*]] = fmul fast <2 x float> [[TMP3]], <float 1.000000e+00, float 1.000000e+00>
-; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <2 x float> <float poison, float 0.000000e+00>, float [[ARG3]], i32 0
-; CHECK-NEXT:    [[TMP6:%.*]] = fadd fast <2 x float> <float 1.000000e+00, float 0.000000e+00>, [[TMP5]]
-; CHECK-NEXT:    [[TMP7:%.*]] = fadd fast <2 x float> [[TMP6]], <float 2.000000e+00, float 1.000000e+00>
-; CHECK-NEXT:    [[TMP8:%.*]] = fadd fast <2 x float> [[TMP3]], <float 1.000000e+00, float 1.000000e+00>
+; CHECK-NEXT:    [[VAL:%.*]] = fmul fast float 0.000000e+00, 0.000000e+00
+; CHECK-NEXT:    [[VAL9:%.*]] = fmul fast float 0.000000e+00, [[ARG:%.*]]
+; CHECK-NEXT:    [[VAL10:%.*]] = fmul fast float [[ARG3:%.*]], 1.000000e+00
+; CHECK-NEXT:    [[VAL11:%.*]] = fmul fast float [[ARG3]], 1.000000e+00
+; CHECK-NEXT:    [[VAL12:%.*]] = fadd fast float [[ARG3]], 1.000000e+00
+; CHECK-NEXT:    [[VAL13:%.*]] = fadd fast float [[VAL12]], 2.000000e+00
+; CHECK-NEXT:    [[VAL14:%.*]] = fadd fast float 0.000000e+00, 0.000000e+00
+; CHECK-NEXT:    [[VAL15:%.*]] = fadd fast float [[VAL14]], 1.000000e+00
+; CHECK-NEXT:    [[VAL16:%.*]] = fadd fast float [[ARG3]], 1.000000e+00
+; CHECK-NEXT:    [[VAL17:%.*]] = fadd fast float [[ARG3]], 1.000000e+00
 ; CHECK-NEXT:    br i1 [[ARG6:%.*]], label [[BB18:%.*]], label [[BB57:%.*]]
 ; CHECK:       bb18:
-; CHECK-NEXT:    [[TMP9:%.*]] = phi <2 x float> [ [[TMP8]], [[BB:%.*]] ]
-; CHECK-NEXT:    [[TMP10:%.*]] = phi <2 x float> [ [[TMP7]], [[BB]] ]
-; CHECK-NEXT:    [[TMP11:%.*]] = extractelement <2 x float> [[TMP8]], i32 0
-; CHECK-NEXT:    [[VAL23:%.*]] = fmul fast float [[TMP11]], 2.000000e+00
-; CHECK-NEXT:    [[TMP12:%.*]] = extractelement <2 x float> [[TMP8]], i32 1
-; CHECK-NEXT:    [[VAL24:%.*]] = fmul fast float [[TMP12]], 3.000000e+00
+; CHECK-NEXT:    [[VAL19:%.*]] = phi float [ [[VAL13]], [[BB:%.*]] ]
+; CHECK-NEXT:    [[VAL20:%.*]] = phi float [ [[VAL15]], [[BB]] ]
+; CHECK-NEXT:    [[VAL21:%.*]] = phi float [ [[VAL16]], [[BB]] ]
+; CHECK-NEXT:    [[VAL22:%.*]] = phi float [ [[VAL17]], [[BB]] ]
+; CHECK-NEXT:    [[VAL23:%.*]] = fmul fast float [[VAL16]], 2.000000e+00
+; CHECK-NEXT:    [[VAL24:%.*]] = fmul fast float [[VAL17]], 3.000000e+00
 ; CHECK-NEXT:    br i1 [[ARG7:%.*]], label [[BB25:%.*]], label [[BB57]]
 ; CHECK:       bb25:
-; CHECK-NEXT:    [[TMP13:%.*]] = phi <2 x float> [ [[TMP9]], [[BB18]] ]
-; CHECK-NEXT:    [[TMP14:%.*]] = phi <2 x float> [ [[TMP10]], [[BB18]] ]
+; CHECK-NEXT:    [[VAL26:%.*]] = phi float [ [[VAL19]], [[BB18]] ]
+; CHECK-NEXT:    [[VAL27:%.*]] = phi float [ [[VAL20]], [[BB18]] ]
+; CHECK-NEXT:    [[VAL28:%.*]] = phi float [ [[VAL21]], [[BB18]] ]
+; CHECK-NEXT:    [[VAL29:%.*]] = phi float [ [[VAL22]], [[BB18]] ]
 ; CHECK-NEXT:    br label [[BB30:%.*]]
 ; CHECK:       bb30:
 ; CHECK-NEXT:    [[VAL31:%.*]] = phi float [ [[VAL55:%.*]], [[BB30]] ], [ 0.000000e+00, [[BB25]] ]
-; CHECK-NEXT:    [[VAL32:%.*]] = phi float [ [[TMP27:%.*]], [[BB30]] ], [ 0.000000e+00, [[BB25]] ]
-; CHECK-NEXT:    [[VAL38:%.*]] = getelementptr inbounds i8, ptr [[ARG5:%.*]], i64 2
-; CHECK-NEXT:    [[TMP15:%.*]] = load <2 x i8>, ptr [[ARG5]], align 1
-; CHECK-NEXT:    [[TMP16:%.*]] = uitofp <2 x i8> [[TMP15]] to <2 x float>
-; CHECK-NEXT:    [[TMP17:%.*]] = fsub fast <2 x float> [[TMP16]], [[TMP1]]
-; CHECK-NEXT:    [[TMP18:%.*]] = fmul fast <2 x float> [[TMP17]], [[TMP14]]
-; CHECK-NEXT:    [[TMP19:%.*]] = extractelement <2 x float> [[TMP18]], i32 0
-; CHECK-NEXT:    [[TMP20:%.*]] = extractelement <2 x float> [[TMP18]], i32 1
-; CHECK-NEXT:    [[VAL50:%.*]] = fadd fast float [[TMP20]], [[TMP19]]
-; CHECK-NEXT:    [[TMP21:%.*]] = load <2 x i8>, ptr [[VAL38]], align 1
-; CHECK-NEXT:    [[TMP22:%.*]] = uitofp <2 x i8> [[TMP21]] to <2 x float>
-; CHECK-NEXT:    [[TMP23:%.*]] = fsub fast <2 x float> [[TMP22]], [[TMP4]]
-; CHECK-NEXT:    [[TMP24:%.*]] = fmul fast <2 x float> [[TMP23]], [[TMP13]]
-; CHECK-NEXT:    [[TMP25:%.*]] = extractelement <2 x float> [[TMP24]], i32 0
-; CHECK-NEXT:    [[VAL52:%.*]] = fadd fast float [[VAL50]], [[TMP25]]
-; CHECK-NEXT:    [[TMP26:%.*]] = extractelement <2 x float> [[TMP24]], i32 1
-; CHECK-NEXT:    [[VAL54:%.*]] = fadd fast float [[VAL52]], [[TMP26]]
+; CHECK-NEXT:    [[VAL32:%.*]] = phi float [ [[VAL9]], [[BB30]] ], [ 0.000000e+00, [[BB25]] ]
+; CHECK-NEXT:    [[VAL33:%.*]] = load i8, ptr [[ARG5:%.*]], align 1
+; CHECK-NEXT:    [[VAL34:%.*]] = uitofp i8 [[VAL33]] to float
+; CHECK-NEXT:    [[VAL35:%.*]] = getelementptr inbounds i8, ptr [[ARG5]], i64 1
+; CHECK-NEXT:    [[VAL36:%.*]] = load i8, ptr [[VAL35]], align 1
+; CHECK-NEXT:    [[VAL37:%.*]] = uitofp i8 [[VAL36]] to float
+; CHECK-NEXT:    [[VAL38:%.*]] = getelementptr inbounds i8, ptr [[ARG5]], i64 2
+; CHECK-NEXT:    [[VAL39:%.*]] = load i8, ptr [[VAL38]], align 1
+; CHECK-NEXT:    [[VAL40:%.*]] = uitofp i8 [[VAL39]] to float
+; CHECK-NEXT:    [[VAL41:%.*]] = getelementptr inbounds i8, ptr [[ARG5]], i64 3
+; CHECK-NEXT:    [[VAL42:%.*]] = load i8, ptr [[VAL41]], align 1
+; CHECK-NEXT:    [[VAL43:%.*]] = uitofp i8 [[VAL42]] to float
+; CHECK-NEXT:    [[VAL44:%.*]] = fsub fast float [[VAL34]], [[VAL]]
+; CHECK-NEXT:    [[VAL45:%.*]] = fsub fast float [[VAL37]], [[VAL9]]
+; CHECK-NEXT:    [[VAL46:%.*]] = fsub fast float [[VAL40]], [[VAL10]]
+; CHECK-NEXT:    [[VAL47:%.*]] = fsub fast float [[VAL43]], [[VAL11]]
+; CHECK-NEXT:    [[VAL48:%.*]] = fmul fast float [[VAL44]], [[VAL26]]
+; CHECK-NEXT:    [[VAL49:%.*]] = fmul fast float [[VAL45]], [[VAL27]]
+; CHECK-NEXT:    [[VAL50:%.*]] = fadd fast float [[VAL49]], [[VAL48]]
+; CHECK-NEXT:    [[VAL51:%.*]] = fmul fast float [[VAL46]], [[VAL28]]
+; CHECK-NEXT:    [[VAL52:%.*]] = fadd fast float [[VAL50]], [[VAL51]]
+; CHECK-NEXT:    [[VAL53:%.*]] = fmul fast float [[VAL47]], [[VAL29]]
+; CHECK-NEXT:    [[VAL54:%.*]] = fadd fast float [[VAL52]], [[VAL53]]
 ; CHECK-NEXT:    [[VAL55]] = tail call fast float @llvm.minnum.f32(float [[VAL31]], float [[ARG1:%.*]])
 ; CHECK-NEXT:    [[VAL56:%.*]] = tail call fast float @llvm.maxnum.f32(float [[ARG2:%.*]], float [[VAL54]])
 ; CHECK-NEXT:    call void @ham(float [[VAL55]], float [[VAL56]])
-; CHECK-NEXT:    [[TMP27]] = extractelement <2 x float> [[TMP1]], i32 1
 ; CHECK-NEXT:    br i1 [[ARG8:%.*]], label [[BB30]], label [[BB57]]
 ; CHECK:       bb57:
 ; CHECK-NEXT:    ret i64 0

diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/bool-mask.ll b/llvm/test/Transforms/SLPVectorizer/X86/bool-mask.ll
index 7d4242cb0ea14..d5cf4ac62551f 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/bool-mask.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/bool-mask.ll
@@ -29,19 +29,20 @@ define i64 @bitmask_16xi8(ptr nocapture noundef readonly %src) {
 ; SSE-NEXT:    [[TMP5:%.*]] = icmp eq <4 x i8> [[TMP4]], zeroinitializer
 ; SSE-NEXT:    [[TMP6:%.*]] = select <4 x i1> [[TMP5]], <4 x i64> zeroinitializer, <4 x i64> <i64 512, i64 1024, i64 2048, i64 4096>
 ; SSE-NEXT:    [[ARRAYIDX_13:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 13
-; SSE-NEXT:    [[TMP7:%.*]] = load <2 x i8>, ptr [[ARRAYIDX_13]], align 1
-; SSE-NEXT:    [[TMP8:%.*]] = icmp eq <2 x i8> [[TMP7]], zeroinitializer
-; SSE-NEXT:    [[TMP9:%.*]] = extractelement <2 x i1> [[TMP8]], i32 0
-; SSE-NEXT:    [[OR_13:%.*]] = select i1 [[TMP9]], i64 0, i64 8192
-; SSE-NEXT:    [[TMP10:%.*]] = extractelement <2 x i1> [[TMP8]], i32 1
-; SSE-NEXT:    [[OR_14:%.*]] = select i1 [[TMP10]], i64 0, i64 16384
+; SSE-NEXT:    [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX_13]], align 1
+; SSE-NEXT:    [[TOBOOL_NOT_13:%.*]] = icmp eq i8 [[TMP7]], 0
+; SSE-NEXT:    [[OR_13:%.*]] = select i1 [[TOBOOL_NOT_13]], i64 0, i64 8192
+; SSE-NEXT:    [[ARRAYIDX_14:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 14
+; SSE-NEXT:    [[TMP8:%.*]] = load i8, ptr [[ARRAYIDX_14]], align 1
+; SSE-NEXT:    [[TOBOOL_NOT_14:%.*]] = icmp eq i8 [[TMP8]], 0
+; SSE-NEXT:    [[OR_14:%.*]] = select i1 [[TOBOOL_NOT_14]], i64 0, i64 16384
 ; SSE-NEXT:    [[ARRAYIDX_15:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 15
-; SSE-NEXT:    [[TMP11:%.*]] = load i8, ptr [[ARRAYIDX_15]], align 1
-; SSE-NEXT:    [[TOBOOL_NOT_15:%.*]] = icmp eq i8 [[TMP11]], 0
+; SSE-NEXT:    [[TMP9:%.*]] = load i8, ptr [[ARRAYIDX_15]], align 1
+; SSE-NEXT:    [[TOBOOL_NOT_15:%.*]] = icmp eq i8 [[TMP9]], 0
 ; SSE-NEXT:    [[OR_15:%.*]] = select i1 [[TOBOOL_NOT_15]], i64 0, i64 32768
-; SSE-NEXT:    [[TMP12:%.*]] = call i64 @llvm.vector.reduce.or.v8i64(<8 x i64> [[TMP3]])
-; SSE-NEXT:    [[TMP13:%.*]] = call i64 @llvm.vector.reduce.or.v4i64(<4 x i64> [[TMP6]])
-; SSE-NEXT:    [[OP_RDX:%.*]] = or i64 [[TMP12]], [[TMP13]]
+; SSE-NEXT:    [[TMP10:%.*]] = call i64 @llvm.vector.reduce.or.v8i64(<8 x i64> [[TMP3]])
+; SSE-NEXT:    [[TMP11:%.*]] = call i64 @llvm.vector.reduce.or.v4i64(<4 x i64> [[TMP6]])
+; SSE-NEXT:    [[OP_RDX:%.*]] = or i64 [[TMP10]], [[TMP11]]
 ; SSE-NEXT:    [[OP_RDX1:%.*]] = or i64 [[OP_RDX]], [[OR_13]]
 ; SSE-NEXT:    [[OP_RDX2:%.*]] = or i64 [[OR_14]], [[OR_15]]
 ; SSE-NEXT:    [[OP_RDX3:%.*]] = or i64 [[OP_RDX1]], [[OP_RDX2]]
@@ -62,19 +63,20 @@ define i64 @bitmask_16xi8(ptr nocapture noundef readonly %src) {
 ; AVX-NEXT:    [[TMP5:%.*]] = icmp eq <4 x i8> [[TMP4]], zeroinitializer
 ; AVX-NEXT:    [[TMP6:%.*]] = select <4 x i1> [[TMP5]], <4 x i64> zeroinitializer, <4 x i64> <i64 512, i64 1024, i64 2048, i64 4096>
 ; AVX-NEXT:    [[ARRAYIDX_13:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 13
-; AVX-NEXT:    [[TMP7:%.*]] = load <2 x i8>, ptr [[ARRAYIDX_13]], align 1
-; AVX-NEXT:    [[TMP8:%.*]] = icmp eq <2 x i8> [[TMP7]], zeroinitializer
-; AVX-NEXT:    [[TMP9:%.*]] = extractelement <2 x i1> [[TMP8]], i32 0
-; AVX-NEXT:    [[OR_13:%.*]] = select i1 [[TMP9]], i64 0, i64 8192
-; AVX-NEXT:    [[TMP10:%.*]] = extractelement <2 x i1> [[TMP8]], i32 1
-; AVX-NEXT:    [[OR_14:%.*]] = select i1 [[TMP10]], i64 0, i64 16384
+; AVX-NEXT:    [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX_13]], align 1
+; AVX-NEXT:    [[TOBOOL_NOT_13:%.*]] = icmp eq i8 [[TMP7]], 0
+; AVX-NEXT:    [[OR_13:%.*]] = select i1 [[TOBOOL_NOT_13]], i64 0, i64 8192
+; AVX-NEXT:    [[ARRAYIDX_14:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 14
+; AVX-NEXT:    [[TMP8:%.*]] = load i8, ptr [[ARRAYIDX_14]], align 1
+; AVX-NEXT:    [[TOBOOL_NOT_14:%.*]] = icmp eq i8 [[TMP8]], 0
+; AVX-NEXT:    [[OR_14:%.*]] = select i1 [[TOBOOL_NOT_14]], i64 0, i64 16384
 ; AVX-NEXT:    [[ARRAYIDX_15:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 15
-; AVX-NEXT:    [[TMP11:%.*]] = load i8, ptr [[ARRAYIDX_15]], align 1
-; AVX-NEXT:    [[TOBOOL_NOT_15:%.*]] = icmp eq i8 [[TMP11]], 0
+; AVX-NEXT:    [[TMP9:%.*]] = load i8, ptr [[ARRAYIDX_15]], align 1
+; AVX-NEXT:    [[TOBOOL_NOT_15:%.*]] = icmp eq i8 [[TMP9]], 0
 ; AVX-NEXT:    [[OR_15:%.*]] = select i1 [[TOBOOL_NOT_15]], i64 0, i64 32768
-; AVX-NEXT:    [[TMP12:%.*]] = call i64 @llvm.vector.reduce.or.v8i64(<8 x i64> [[TMP3]])
-; AVX-NEXT:    [[TMP13:%.*]] = call i64 @llvm.vector.reduce.or.v4i64(<4 x i64> [[TMP6]])
-; AVX-NEXT:    [[OP_RDX:%.*]] = or i64 [[TMP12]], [[TMP13]]
+; AVX-NEXT:    [[TMP10:%.*]] = call i64 @llvm.vector.reduce.or.v8i64(<8 x i64> [[TMP3]])
+; AVX-NEXT:    [[TMP11:%.*]] = call i64 @llvm.vector.reduce.or.v4i64(<4 x i64> [[TMP6]])
+; AVX-NEXT:    [[OP_RDX:%.*]] = or i64 [[TMP10]], [[TMP11]]
 ; AVX-NEXT:    [[OP_RDX1:%.*]] = or i64 [[OP_RDX]], [[OR_13]]
 ; AVX-NEXT:    [[OP_RDX2:%.*]] = or i64 [[OR_14]], [[OR_15]]
 ; AVX-NEXT:    [[OP_RDX3:%.*]] = or i64 [[OP_RDX1]], [[OP_RDX2]]
@@ -206,18 +208,19 @@ define i64 @bitmask_4xi16(ptr nocapture noundef readonly %src) {
 ; SSE-NEXT:    [[TMP2:%.*]] = icmp eq <4 x i16> [[TMP1]], zeroinitializer
 ; SSE-NEXT:    [[TMP3:%.*]] = select <4 x i1> [[TMP2]], <4 x i64> zeroinitializer, <4 x i64> <i64 2, i64 4, i64 8, i64 16>
 ; SSE-NEXT:    [[ARRAYIDX_5:%.*]] = getelementptr inbounds i16, ptr [[SRC]], i64 5
-; SSE-NEXT:    [[TMP4:%.*]] = load <2 x i16>, ptr [[ARRAYIDX_5]], align 2
-; SSE-NEXT:    [[TMP5:%.*]] = icmp eq <2 x i16> [[TMP4]], zeroinitializer
-; SSE-NEXT:    [[TMP6:%.*]] = extractelement <2 x i1> [[TMP5]], i32 0
-; SSE-NEXT:    [[OR_5:%.*]] = select i1 [[TMP6]], i64 0, i64 32
-; SSE-NEXT:    [[TMP7:%.*]] = extractelement <2 x i1> [[TMP5]], i32 1
-; SSE-NEXT:    [[OR_6:%.*]] = select i1 [[TMP7]], i64 0, i64 64
+; SSE-NEXT:    [[TMP4:%.*]] = load i16, ptr [[ARRAYIDX_5]], align 2
+; SSE-NEXT:    [[TOBOOL_NOT_5:%.*]] = icmp eq i16 [[TMP4]], 0
+; SSE-NEXT:    [[OR_5:%.*]] = select i1 [[TOBOOL_NOT_5]], i64 0, i64 32
+; SSE-NEXT:    [[ARRAYIDX_6:%.*]] = getelementptr inbounds i16, ptr [[SRC]], i64 6
+; SSE-NEXT:    [[TMP5:%.*]] = load i16, ptr [[ARRAYIDX_6]], align 2
+; SSE-NEXT:    [[TOBOOL_NOT_6:%.*]] = icmp eq i16 [[TMP5]], 0
+; SSE-NEXT:    [[OR_6:%.*]] = select i1 [[TOBOOL_NOT_6]], i64 0, i64 64
 ; SSE-NEXT:    [[ARRAYIDX_7:%.*]] = getelementptr inbounds i16, ptr [[SRC]], i64 7
-; SSE-NEXT:    [[TMP8:%.*]] = load i16, ptr [[ARRAYIDX_7]], align 2
-; SSE-NEXT:    [[TOBOOL_NOT_7:%.*]] = icmp eq i16 [[TMP8]], 0
+; SSE-NEXT:    [[TMP6:%.*]] = load i16, ptr [[ARRAYIDX_7]], align 2
+; SSE-NEXT:    [[TOBOOL_NOT_7:%.*]] = icmp eq i16 [[TMP6]], 0
 ; SSE-NEXT:    [[OR_7:%.*]] = select i1 [[TOBOOL_NOT_7]], i64 0, i64 128
-; SSE-NEXT:    [[TMP9:%.*]] = call i64 @llvm.vector.reduce.or.v4i64(<4 x i64> [[TMP3]])
-; SSE-NEXT:    [[OP_RDX:%.*]] = or i64 [[TMP9]], [[OR_5]]
+; SSE-NEXT:    [[TMP7:%.*]] = call i64 @llvm.vector.reduce.or.v4i64(<4 x i64> [[TMP3]])
+; SSE-NEXT:    [[OP_RDX:%.*]] = or i64 [[TMP7]], [[OR_5]]
 ; SSE-NEXT:    [[OP_RDX1:%.*]] = or i64 [[OR_6]], [[OR_7]]
 ; SSE-NEXT:    [[OP_RDX2:%.*]] = or i64 [[OP_RDX]], [[OP_RDX1]]
 ; SSE-NEXT:    [[OP_RDX3:%.*]] = or i64 [[OP_RDX2]], [[OR]]
@@ -233,18 +236,19 @@ define i64 @bitmask_4xi16(ptr nocapture noundef readonly %src) {
 ; AVX-NEXT:    [[TMP2:%.*]] = icmp eq <4 x i16> [[TMP1]], zeroinitializer
 ; AVX-NEXT:    [[TMP3:%.*]] = select <4 x i1> [[TMP2]], <4 x i64> zeroinitializer, <4 x i64> <i64 2, i64 4, i64 8, i64 16>
 ; AVX-NEXT:    [[ARRAYIDX_5:%.*]] = getelementptr inbounds i16, ptr [[SRC]], i64 5
-; AVX-NEXT:    [[TMP4:%.*]] = load <2 x i16>, ptr [[ARRAYIDX_5]], align 2
-; AVX-NEXT:    [[TMP5:%.*]] = icmp eq <2 x i16> [[TMP4]], zeroinitializer
-; AVX-NEXT:    [[TMP6:%.*]] = extractelement <2 x i1> [[TMP5]], i32 0
-; AVX-NEXT:    [[OR_5:%.*]] = select i1 [[TMP6]], i64 0, i64 32
-; AVX-NEXT:    [[TMP7:%.*]] = extractelement <2 x i1> [[TMP5]], i32 1
-; AVX-NEXT:    [[OR_6:%.*]] = select i1 [[TMP7]], i64 0, i64 64
+; AVX-NEXT:    [[TMP4:%.*]] = load i16, ptr [[ARRAYIDX_5]], align 2
+; AVX-NEXT:    [[TOBOOL_NOT_5:%.*]] = icmp eq i16 [[TMP4]], 0
+; AVX-NEXT:    [[OR_5:%.*]] = select i1 [[TOBOOL_NOT_5]], i64 0, i64 32
+; AVX-NEXT:    [[ARRAYIDX_6:%.*]] = getelementptr inbounds i16, ptr [[SRC]], i64 6
+; AVX-NEXT:    [[TMP5:%.*]] = load i16, ptr [[ARRAYIDX_6]], align 2
+; AVX-NEXT:    [[TOBOOL_NOT_6:%.*]] = icmp eq i16 [[TMP5]], 0
+; AVX-NEXT:    [[OR_6:%.*]] = select i1 [[TOBOOL_NOT_6]], i64 0, i64 64
 ; AVX-NEXT:    [[ARRAYIDX_7:%.*]] = getelementptr inbounds i16, ptr [[SRC]], i64 7
-; AVX-NEXT:    [[TMP8:%.*]] = load i16, ptr [[ARRAYIDX_7]], align 2
-; AVX-NEXT:    [[TOBOOL_NOT_7:%.*]] = icmp eq i16 [[TMP8]], 0
+; AVX-NEXT:    [[TMP6:%.*]] = load i16, ptr [[ARRAYIDX_7]], align 2
+; AVX-NEXT:    [[TOBOOL_NOT_7:%.*]] = icmp eq i16 [[TMP6]], 0
 ; AVX-NEXT:    [[OR_7:%.*]] = select i1 [[TOBOOL_NOT_7]], i64 0, i64 128
-; AVX-NEXT:    [[TMP9:%.*]] = call i64 @llvm.vector.reduce.or.v4i64(<4 x i64> [[TMP3]])
-; AVX-NEXT:    [[OP_RDX:%.*]] = or i64 [[TMP9]], [[OR_5]]
+; AVX-NEXT:    [[TMP7:%.*]] = call i64 @llvm.vector.reduce.or.v4i64(<4 x i64> [[TMP3]])
+; AVX-NEXT:    [[OP_RDX:%.*]] = or i64 [[TMP7]], [[OR_5]]
 ; AVX-NEXT:    [[OP_RDX1:%.*]] = or i64 [[OR_6]], [[OR_7]]
 ; AVX-NEXT:    [[OP_RDX2:%.*]] = or i64 [[OP_RDX]], [[OP_RDX1]]
 ; AVX-NEXT:    [[OP_RDX3:%.*]] = or i64 [[OP_RDX2]], [[OR]]
@@ -329,18 +333,19 @@ define i64 @bitmask_8xi32(ptr nocapture noundef readonly %src) {
 ; SSE-NEXT:    [[TMP2:%.*]] = icmp eq <4 x i32> [[TMP1]], zeroinitializer
 ; SSE-NEXT:    [[TMP3:%.*]] = select <4 x i1> [[TMP2]], <4 x i64> zeroinitializer, <4 x i64> <i64 2, i64 4, i64 8, i64 16>
 ; SSE-NEXT:    [[ARRAYIDX_5:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 5
-; SSE-NEXT:    [[TMP4:%.*]] = load <2 x i32>, ptr [[ARRAYIDX_5]], align 4
-; SSE-NEXT:    [[TMP5:%.*]] = icmp eq <2 x i32> [[TMP4]], zeroinitializer
-; SSE-NEXT:    [[TMP6:%.*]] = extractelement <2 x i1> [[TMP5]], i32 0
-; SSE-NEXT:    [[OR_5:%.*]] = select i1 [[TMP6]], i64 0, i64 32
-; SSE-NEXT:    [[TMP7:%.*]] = extractelement <2 x i1> [[TMP5]], i32 1
-; SSE-NEXT:    [[OR_6:%.*]] = select i1 [[TMP7]], i64 0, i64 64
+; SSE-NEXT:    [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX_5]], align 4
+; SSE-NEXT:    [[TOBOOL_NOT_5:%.*]] = icmp eq i32 [[TMP4]], 0
+; SSE-NEXT:    [[OR_5:%.*]] = select i1 [[TOBOOL_NOT_5]], i64 0, i64 32
+; SSE-NEXT:    [[ARRAYIDX_6:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 6
+; SSE-NEXT:    [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX_6]], align 4
+; SSE-NEXT:    [[TOBOOL_NOT_6:%.*]] = icmp eq i32 [[TMP5]], 0
+; SSE-NEXT:    [[OR_6:%.*]] = select i1 [[TOBOOL_NOT_6]], i64 0, i64 64
 ; SSE-NEXT:    [[ARRAYIDX_7:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 7
-; SSE-NEXT:    [[TMP8:%.*]] = load i32, ptr [[ARRAYIDX_7]], align 4
-; SSE-NEXT:    [[TOBOOL_NOT_7:%.*]] = icmp eq i32 [[TMP8]], 0
+; SSE-NEXT:    [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX_7]], align 4
+; SSE-NEXT:    [[TOBOOL_NOT_7:%.*]] = icmp eq i32 [[TMP6]], 0
 ; SSE-NEXT:    [[OR_7:%.*]] = select i1 [[TOBOOL_NOT_7]], i64 0, i64 128
-; SSE-NEXT:    [[TMP9:%.*]] = call i64 @llvm.vector.reduce.or.v4i64(<4 x i64> [[TMP3]])
-; SSE-NEXT:    [[OP_RDX:%.*]] = or i64 [[TMP9]], [[OR_5]]
+; SSE-NEXT:    [[TMP7:%.*]] = call i64 @llvm.vector.reduce.or.v4i64(<4 x i64> [[TMP3]])
+; SSE-NEXT:    [[OP_RDX:%.*]] = or i64 [[TMP7]], [[OR_5]]
 ; SSE-NEXT:    [[OP_RDX1:%.*]] = or i64 [[OR_6]], [[OR_7]]
 ; SSE-NEXT:    [[OP_RDX2:%.*]] = or i64 [[OP_RDX]], [[OP_RDX1]]
 ; SSE-NEXT:    [[OP_RDX3:%.*]] = or i64 [[OP_RDX2]], [[OR]]
@@ -356,18 +361,19 @@ define i64 @bitmask_8xi32(ptr nocapture noundef readonly %src) {
 ; AVX-NEXT:    [[TMP2:%.*]] = icmp eq <4 x i32> [[TMP1]], zeroinitializer
 ; AVX-NEXT:    [[TMP3:%.*]] = select <4 x i1> [[TMP2]], <4 x i64> zeroinitializer, <4 x i64> <i64 2, i64 4, i64 8, i64 16>
 ; AVX-NEXT:    [[ARRAYIDX_5:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 5
-; AVX-NEXT:    [[TMP4:%.*]] = load <2 x i32>, ptr [[ARRAYIDX_5]], align 4
-; AVX-NEXT:    [[TMP5:%.*]] = icmp eq <2 x i32> [[TMP4]], zeroinitializer
-; AVX-NEXT:    [[TMP6:%.*]] = extractelement <2 x i1> [[TMP5]], i32 0
-; AVX-NEXT:    [[OR_5:%.*]] = select i1 [[TMP6]], i64 0, i64 32
-; AVX-NEXT:    [[TMP7:%.*]] = extractelement <2 x i1> [[TMP5]], i32 1
-; AVX-NEXT:    [[OR_6:%.*]] = select i1 [[TMP7]], i64 0, i64 64
+; AVX-NEXT:    [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX_5]], align 4
+; AVX-NEXT:    [[TOBOOL_NOT_5:%.*]] = icmp eq i32 [[TMP4]], 0
+; AVX-NEXT:    [[OR_5:%.*]] = select i1 [[TOBOOL_NOT_5]], i64 0, i64 32
+; AVX-NEXT:    [[ARRAYIDX_6:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 6
+; AVX-NEXT:    [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX_6]], align 4
+; AVX-NEXT:    [[TOBOOL_NOT_6:%.*]] = icmp eq i32 [[TMP5]], 0
+; AVX-NEXT:    [[OR_6:%.*]] = select i1 [[TOBOOL_NOT_6]], i64 0, i64 64
 ; AVX-NEXT:    [[ARRAYIDX_7:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 7
-; AVX-NEXT:    [[TMP8:%.*]] = load i32, ptr [[ARRAYIDX_7]], align 4
-; AVX-NEXT:    [[TOBOOL_NOT_7:%.*]] = icmp eq i32 [[TMP8]], 0
+; AVX-NEXT:    [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX_7]], align 4
+; AVX-NEXT:    [[TOBOOL_NOT_7:%.*]] = icmp eq i32 [[TMP6]], 0
 ; AVX-NEXT:    [[OR_7:%.*]] = select i1 [[TOBOOL_NOT_7]], i64 0, i64 128
-; AVX-NEXT:    [[TMP9:%.*]] = call i64 @llvm.vector.reduce.or.v4i64(<4 x i64> [[TMP3]])
-; AVX-NEXT:    [[OP_RDX:%.*]] = or i64 [[TMP9]], [[OR_5]]
+; AVX-NEXT:    [[TMP7:%.*]] = call i64 @llvm.vector.reduce.or.v4i64(<4 x i64> [[TMP3]])
+; AVX-NEXT:    [[OP_RDX:%.*]] = or i64 [[TMP7]], [[OR_5]]
 ; AVX-NEXT:    [[OP_RDX1:%.*]] = or i64 [[OR_6]], [[OR_7]]
 ; AVX-NEXT:    [[OP_RDX2:%.*]] = or i64 [[OP_RDX]], [[OP_RDX1]]
 ; AVX-NEXT:    [[OP_RDX3:%.*]] = or i64 [[OP_RDX2]], [[OR]]
@@ -498,14 +504,15 @@ define i64 @bitmask_8xi64(ptr nocapture noundef readonly %src) {
 ; SSE4-NEXT:    [[TOBOOL_NOT_5:%.*]] = icmp eq i64 [[TMP4]], 0
 ; SSE4-NEXT:    [[OR_5:%.*]] = select i1 [[TOBOOL_NOT_5]], i64 0, i64 32
 ; SSE4-NEXT:    [[ARRAYIDX_6:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 6
-; SSE4-NEXT:    [[TMP5:%.*]] = load <2 x i64>, ptr [[ARRAYIDX_6]], align 8
-; SSE4-NEXT:    [[TMP6:%.*]] = icmp eq <2 x i64> [[TMP5]], zeroinitializer
-; SSE4-NEXT:    [[TMP7:%.*]] = extractelement <2 x i1> [[TMP6]], i32 0
-; SSE4-NEXT:    [[OR_6:%.*]] = select i1 [[TMP7]], i64 0, i64 64
-; SSE4-NEXT:    [[TMP8:%.*]] = extractelement <2 x i1> [[TMP6]], i32 1
-; SSE4-NEXT:    [[OR_7:%.*]] = select i1 [[TMP8]], i64 0, i64 128
-; SSE4-NEXT:    [[TMP9:%.*]] = call i64 @llvm.vector.reduce.or.v4i64(<4 x i64> [[TMP3]])
-; SSE4-NEXT:    [[OP_RDX:%.*]] = or i64 [[TMP9]], [[OR_5]]
+; SSE4-NEXT:    [[TMP5:%.*]] = load i64, ptr [[ARRAYIDX_6]], align 8
+; SSE4-NEXT:    [[TOBOOL_NOT_6:%.*]] = icmp eq i64 [[TMP5]], 0
+; SSE4-NEXT:    [[OR_6:%.*]] = select i1 [[TOBOOL_NOT_6]], i64 0, i64 64
+; SSE4-NEXT:    [[ARRAYIDX_7:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 7
+; SSE4-NEXT:    [[TMP6:%.*]] = load i64, ptr [[ARRAYIDX_7]], align 8
+; SSE4-NEXT:    [[TOBOOL_NOT_7:%.*]] = icmp eq i64 [[TMP6]], 0
+; SSE4-NEXT:    [[OR_7:%.*]] = select i1 [[TOBOOL_NOT_7]], i64 0, i64 128
+; SSE4-NEXT:    [[TMP7:%.*]] = call i64 @llvm.vector.reduce.or.v4i64(<4 x i64> [[TMP3]])
+; SSE4-NEXT:    [[OP_RDX:%.*]] = or i64 [[TMP7]], [[OR_5]]
 ; SSE4-NEXT:    [[OP_RDX1:%.*]] = or i64 [[OR_6]], [[OR_7]]
 ; SSE4-NEXT:    [[OP_RDX2:%.*]] = or i64 [[OP_RDX]], [[OP_RDX1]]
 ; SSE4-NEXT:    [[OP_RDX3:%.*]] = or i64 [[OP_RDX2]], [[OR]]
@@ -525,14 +532,15 @@ define i64 @bitmask_8xi64(ptr nocapture noundef readonly %src) {
 ; AVX-NEXT:    [[TOBOOL_NOT_5:%.*]] = icmp eq i64 [[TMP4]], 0
 ; AVX-NEXT:    [[OR_5:%.*]] = select i1 [[TOBOOL_NOT_5]], i64 0, i64 32
 ; AVX-NEXT:    [[ARRAYIDX_6:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 6
-; AVX-NEXT:    [[TMP5:%.*]] = load <2 x i64>, ptr [[ARRAYIDX_6]], align 8
-; AVX-NEXT:    [[TMP6:%.*]] = icmp eq <2 x i64> [[TMP5]], zeroinitializer
-; AVX-NEXT:    [[TMP7:%.*]] = extractelement <2 x i1> [[TMP6]], i32 0
-; AVX-NEXT:    [[OR_6:%.*]] = select i1 [[TMP7]], i64 0, i64 64
-; AVX-NEXT:    [[TMP8:%.*]] = extractelement <2 x i1> [[TMP6]], i32 1
-; AVX-NEXT:    [[OR_7:%.*]] = select i1 [[TMP8]], i64 0, i64 128
-; AVX-NEXT:    [[TMP9:%.*]] = call i64 @llvm.vector.reduce.or.v4i64(<4 x i64> [[TMP3]])
-; AVX-NEXT:    [[OP_RDX:%.*]] = or i64 [[TMP9]], [[OR_5]]
+; AVX-NEXT:    [[TMP5:%.*]] = load i64, ptr [[ARRAYIDX_6]], align 8
+; AVX-NEXT:    [[TOBOOL_NOT_6:%.*]] = icmp eq i64 [[TMP5]], 0
+; AVX-NEXT:    [[OR_6:%.*]] = select i1 [[TOBOOL_NOT_6]], i64 0, i64 64
+; AVX-NEXT:    [[ARRAYIDX_7:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 7
+; AVX-NEXT:    [[TMP6:%.*]] = load i64, ptr [[ARRAYIDX_7]], align 8
+; AVX-NEXT:    [[TOBOOL_NOT_7:%.*]] = icmp eq i64 [[TMP6]], 0
+; AVX-NEXT:    [[OR_7:%.*]] = select i1 [[TOBOOL_NOT_7]], i64 0, i64 128
+; AVX-NEXT:    [[TMP7:%.*]] = call i64 @llvm.vector.reduce.or.v4i64(<4 x i64> [[TMP3]])
+; AVX-NEXT:    [[OP_RDX:%.*]] = or i64 [[TMP7]], [[OR_5]]
 ; AVX-NEXT:    [[OP_RDX1:%.*]] = or i64 [[OR_6]], [[OR_7]]
 ; AVX-NEXT:    [[OP_RDX2:%.*]] = or i64 [[OP_RDX]], [[OP_RDX1]]
 ; AVX-NEXT:    [[OP_RDX3:%.*]] = or i64 [[OP_RDX2]], [[OR]]

diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/c-ray.ll b/llvm/test/Transforms/SLPVectorizer/X86/c-ray.ll
index dfa25011bceac..405d6de7ea76a 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/c-ray.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/c-ray.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -mtriple=x86_64-unknown -slp-vectorizer -S | FileCheck %s --check-prefixes=SSE
-; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=corei7-avx -slp-vectorizer -S | FileCheck %s --check-prefixes=AVX
-; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=core-avx2 -slp-vectorizer -S | FileCheck %s --check-prefixes=AVX
+; RUN: opt < %s -mtriple=x86_64-unknown -slp-vectorizer -S | FileCheck %s
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=corei7-avx -slp-vectorizer -S | FileCheck %s
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=core-avx2 -slp-vectorizer -S | FileCheck %s
 
 %struct.ray = type { %struct.vec3, %struct.vec3 }
 %struct.vec3 = type { double, double, double }
@@ -9,166 +9,86 @@
 %struct.material = type { %struct.vec3, double, double }
 
 define i32 @ray_sphere(ptr nocapture noundef readonly %sph, ptr nocapture noundef readonly byval(%struct.ray) align 8 %ray, ptr nocapture noundef readnone %sp) {
-; SSE-LABEL: @ray_sphere(
-; SSE-NEXT:  entry:
-; SSE-NEXT:    [[DIR:%.*]] = getelementptr inbounds [[STRUCT_RAY:%.*]], ptr [[RAY:%.*]], i64 0, i32 1
-; SSE-NEXT:    [[TMP0:%.*]] = load double, ptr [[DIR]], align 8
-; SSE-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_RAY]], ptr [[RAY]], i64 0, i32 1, i32 1
-; SSE-NEXT:    [[TMP1:%.*]] = load double, ptr [[Y]], align 8
-; SSE-NEXT:    [[MUL6:%.*]] = fmul double [[TMP1]], [[TMP1]]
-; SSE-NEXT:    [[TMP2:%.*]] = tail call double @llvm.fmuladd.f64(double [[TMP0]], double [[TMP0]], double [[MUL6]])
-; SSE-NEXT:    [[Z:%.*]] = getelementptr inbounds [[STRUCT_RAY]], ptr [[RAY]], i64 0, i32 1, i32 2
-; SSE-NEXT:    [[TMP3:%.*]] = load double, ptr [[Z]], align 8
-; SSE-NEXT:    [[TMP4:%.*]] = tail call double @llvm.fmuladd.f64(double [[TMP3]], double [[TMP3]], double [[TMP2]])
-; SSE-NEXT:    [[MUL:%.*]] = fmul double [[TMP0]], 2.000000e+00
-; SSE-NEXT:    [[TMP5:%.*]] = load double, ptr [[RAY]], align 8
-; SSE-NEXT:    [[TMP6:%.*]] = load double, ptr [[SPH:%.*]], align 8
-; SSE-NEXT:    [[SUB:%.*]] = fsub double [[TMP5]], [[TMP6]]
-; SSE-NEXT:    [[MUL17:%.*]] = fmul double [[TMP1]], 2.000000e+00
-; SSE-NEXT:    [[Y19:%.*]] = getelementptr inbounds [[STRUCT_VEC3:%.*]], ptr [[RAY]], i64 0, i32 1
-; SSE-NEXT:    [[TMP7:%.*]] = load double, ptr [[Y19]], align 8
-; SSE-NEXT:    [[Y21:%.*]] = getelementptr inbounds [[STRUCT_VEC3]], ptr [[SPH]], i64 0, i32 1
-; SSE-NEXT:    [[TMP8:%.*]] = load double, ptr [[Y21]], align 8
-; SSE-NEXT:    [[SUB22:%.*]] = fsub double [[TMP7]], [[TMP8]]
-; SSE-NEXT:    [[MUL23:%.*]] = fmul double [[MUL17]], [[SUB22]]
-; SSE-NEXT:    [[TMP9:%.*]] = tail call double @llvm.fmuladd.f64(double [[MUL]], double [[SUB]], double [[MUL23]])
-; SSE-NEXT:    [[MUL26:%.*]] = fmul double [[TMP3]], 2.000000e+00
-; SSE-NEXT:    [[Z28:%.*]] = getelementptr inbounds [[STRUCT_VEC3]], ptr [[RAY]], i64 0, i32 2
-; SSE-NEXT:    [[TMP10:%.*]] = load double, ptr [[Z28]], align 8
-; SSE-NEXT:    [[Z30:%.*]] = getelementptr inbounds [[STRUCT_VEC3]], ptr [[SPH]], i64 0, i32 2
-; SSE-NEXT:    [[TMP11:%.*]] = load double, ptr [[Z30]], align 8
-; SSE-NEXT:    [[SUB31:%.*]] = fsub double [[TMP10]], [[TMP11]]
-; SSE-NEXT:    [[TMP12:%.*]] = tail call double @llvm.fmuladd.f64(double [[MUL26]], double [[SUB31]], double [[TMP9]])
-; SSE-NEXT:    [[MUL42:%.*]] = fmul double [[TMP8]], [[TMP8]]
-; SSE-NEXT:    [[TMP13:%.*]] = tail call double @llvm.fmuladd.f64(double [[TMP6]], double [[TMP6]], double [[MUL42]])
-; SSE-NEXT:    [[TMP14:%.*]] = tail call double @llvm.fmuladd.f64(double [[TMP11]], double [[TMP11]], double [[TMP13]])
-; SSE-NEXT:    [[TMP15:%.*]] = tail call double @llvm.fmuladd.f64(double [[TMP5]], double [[TMP5]], double [[TMP14]])
-; SSE-NEXT:    [[TMP16:%.*]] = tail call double @llvm.fmuladd.f64(double [[TMP7]], double [[TMP7]], double [[TMP15]])
-; SSE-NEXT:    [[TMP17:%.*]] = tail call double @llvm.fmuladd.f64(double [[TMP10]], double [[TMP10]], double [[TMP16]])
-; SSE-NEXT:    [[FNEG:%.*]] = fneg double [[TMP6]]
-; SSE-NEXT:    [[TMP18:%.*]] = fneg double [[TMP8]]
-; SSE-NEXT:    [[NEG:%.*]] = fmul double [[TMP7]], [[TMP18]]
-; SSE-NEXT:    [[TMP19:%.*]] = tail call double @llvm.fmuladd.f64(double [[FNEG]], double [[TMP5]], double [[NEG]])
-; SSE-NEXT:    [[NEG78:%.*]] = fneg double [[TMP11]]
-; SSE-NEXT:    [[TMP20:%.*]] = tail call double @llvm.fmuladd.f64(double [[NEG78]], double [[TMP10]], double [[TMP19]])
-; SSE-NEXT:    [[TMP21:%.*]] = tail call double @llvm.fmuladd.f64(double [[TMP20]], double 2.000000e+00, double [[TMP17]])
-; SSE-NEXT:    [[RAD:%.*]] = getelementptr inbounds [[STRUCT_SPHERE:%.*]], ptr [[SPH]], i64 0, i32 1
-; SSE-NEXT:    [[TMP22:%.*]] = load double, ptr [[RAD]], align 8
-; SSE-NEXT:    [[NEG82:%.*]] = fneg double [[TMP22]]
-; SSE-NEXT:    [[TMP23:%.*]] = tail call double @llvm.fmuladd.f64(double [[NEG82]], double [[TMP22]], double [[TMP21]])
-; SSE-NEXT:    [[TMP24:%.*]] = fmul double [[TMP4]], -4.000000e+00
-; SSE-NEXT:    [[NEG86:%.*]] = fmul double [[TMP24]], [[TMP23]]
-; SSE-NEXT:    [[TMP25:%.*]] = tail call double @llvm.fmuladd.f64(double [[TMP12]], double [[TMP12]], double [[NEG86]])
-; SSE-NEXT:    [[CMP:%.*]] = fcmp olt double [[TMP25]], 0.000000e+00
-; SSE-NEXT:    br i1 [[CMP]], label [[CLEANUP:%.*]], label [[IF_END:%.*]]
-; SSE:       if.end:
-; SSE-NEXT:    [[CALL:%.*]] = tail call double @sqrt(double noundef [[TMP25]])
-; SSE-NEXT:    [[FNEG87:%.*]] = fneg double [[TMP12]]
-; SSE-NEXT:    [[MUL88:%.*]] = fmul double [[TMP4]], 2.000000e+00
-; SSE-NEXT:    [[TMP26:%.*]] = insertelement <2 x double> poison, double [[FNEG87]], i32 0
-; SSE-NEXT:    [[TMP27:%.*]] = insertelement <2 x double> [[TMP26]], double [[CALL]], i32 1
-; SSE-NEXT:    [[TMP28:%.*]] = insertelement <2 x double> poison, double [[CALL]], i32 0
-; SSE-NEXT:    [[TMP29:%.*]] = insertelement <2 x double> [[TMP28]], double [[TMP12]], i32 1
-; SSE-NEXT:    [[TMP30:%.*]] = fsub <2 x double> [[TMP27]], [[TMP29]]
-; SSE-NEXT:    [[TMP31:%.*]] = insertelement <2 x double> poison, double [[MUL88]], i32 0
-; SSE-NEXT:    [[TMP32:%.*]] = insertelement <2 x double> [[TMP31]], double [[MUL88]], i32 1
-; SSE-NEXT:    [[TMP33:%.*]] = fdiv <2 x double> [[TMP30]], [[TMP32]]
-; SSE-NEXT:    [[TMP34:%.*]] = extractelement <2 x double> [[TMP33]], i32 1
-; SSE-NEXT:    [[CMP93:%.*]] = fcmp olt double [[TMP34]], 0x3EB0C6F7A0B5ED8D
-; SSE-NEXT:    [[TMP35:%.*]] = extractelement <2 x double> [[TMP33]], i32 0
-; SSE-NEXT:    [[CMP94:%.*]] = fcmp olt double [[TMP35]], 0x3EB0C6F7A0B5ED8D
-; SSE-NEXT:    [[OR_COND:%.*]] = select i1 [[CMP93]], i1 [[CMP94]], i1 false
-; SSE-NEXT:    br i1 [[OR_COND]], label [[CLEANUP]], label [[LOR_LHS_FALSE:%.*]]
-; SSE:       lor.lhs.false:
-; SSE-NEXT:    [[TMP36:%.*]] = fcmp ule <2 x double> [[TMP33]], <double 1.000000e+00, double 1.000000e+00>
-; SSE-NEXT:    [[TMP37:%.*]] = extractelement <2 x i1> [[TMP36]], i32 0
-; SSE-NEXT:    [[TMP38:%.*]] = extractelement <2 x i1> [[TMP36]], i32 1
-; SSE-NEXT:    [[OR_COND106:%.*]] = select i1 [[TMP38]], i1 true, i1 [[TMP37]]
-; SSE-NEXT:    [[SPEC_SELECT:%.*]] = zext i1 [[OR_COND106]] to i32
-; SSE-NEXT:    br label [[CLEANUP]]
-; SSE:       cleanup:
-; SSE-NEXT:    [[RETVAL_0:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ 0, [[IF_END]] ], [ [[SPEC_SELECT]], [[LOR_LHS_FALSE]] ]
-; SSE-NEXT:    ret i32 [[RETVAL_0]]
-;
-; AVX-LABEL: @ray_sphere(
-; AVX-NEXT:  entry:
-; AVX-NEXT:    [[DIR:%.*]] = getelementptr inbounds [[STRUCT_RAY:%.*]], ptr [[RAY:%.*]], i64 0, i32 1
-; AVX-NEXT:    [[TMP0:%.*]] = load double, ptr [[DIR]], align 8
-; AVX-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_RAY]], ptr [[RAY]], i64 0, i32 1, i32 1
-; AVX-NEXT:    [[TMP1:%.*]] = load double, ptr [[Y]], align 8
-; AVX-NEXT:    [[MUL6:%.*]] = fmul double [[TMP1]], [[TMP1]]
-; AVX-NEXT:    [[TMP2:%.*]] = tail call double @llvm.fmuladd.f64(double [[TMP0]], double [[TMP0]], double [[MUL6]])
-; AVX-NEXT:    [[Z:%.*]] = getelementptr inbounds [[STRUCT_RAY]], ptr [[RAY]], i64 0, i32 1, i32 2
-; AVX-NEXT:    [[TMP3:%.*]] = load double, ptr [[Z]], align 8
-; AVX-NEXT:    [[TMP4:%.*]] = tail call double @llvm.fmuladd.f64(double [[TMP3]], double [[TMP3]], double [[TMP2]])
-; AVX-NEXT:    [[MUL:%.*]] = fmul double [[TMP0]], 2.000000e+00
-; AVX-NEXT:    [[TMP5:%.*]] = load double, ptr [[RAY]], align 8
-; AVX-NEXT:    [[TMP6:%.*]] = load double, ptr [[SPH:%.*]], align 8
-; AVX-NEXT:    [[SUB:%.*]] = fsub double [[TMP5]], [[TMP6]]
-; AVX-NEXT:    [[MUL17:%.*]] = fmul double [[TMP1]], 2.000000e+00
-; AVX-NEXT:    [[Y19:%.*]] = getelementptr inbounds [[STRUCT_VEC3:%.*]], ptr [[RAY]], i64 0, i32 1
-; AVX-NEXT:    [[TMP7:%.*]] = load double, ptr [[Y19]], align 8
-; AVX-NEXT:    [[Y21:%.*]] = getelementptr inbounds [[STRUCT_VEC3]], ptr [[SPH]], i64 0, i32 1
-; AVX-NEXT:    [[TMP8:%.*]] = load double, ptr [[Y21]], align 8
-; AVX-NEXT:    [[SUB22:%.*]] = fsub double [[TMP7]], [[TMP8]]
-; AVX-NEXT:    [[MUL23:%.*]] = fmul double [[MUL17]], [[SUB22]]
-; AVX-NEXT:    [[TMP9:%.*]] = tail call double @llvm.fmuladd.f64(double [[MUL]], double [[SUB]], double [[MUL23]])
-; AVX-NEXT:    [[MUL26:%.*]] = fmul double [[TMP3]], 2.000000e+00
-; AVX-NEXT:    [[Z28:%.*]] = getelementptr inbounds [[STRUCT_VEC3]], ptr [[RAY]], i64 0, i32 2
-; AVX-NEXT:    [[TMP10:%.*]] = load double, ptr [[Z28]], align 8
-; AVX-NEXT:    [[Z30:%.*]] = getelementptr inbounds [[STRUCT_VEC3]], ptr [[SPH]], i64 0, i32 2
-; AVX-NEXT:    [[TMP11:%.*]] = load double, ptr [[Z30]], align 8
-; AVX-NEXT:    [[SUB31:%.*]] = fsub double [[TMP10]], [[TMP11]]
-; AVX-NEXT:    [[TMP12:%.*]] = tail call double @llvm.fmuladd.f64(double [[MUL26]], double [[SUB31]], double [[TMP9]])
-; AVX-NEXT:    [[MUL42:%.*]] = fmul double [[TMP8]], [[TMP8]]
-; AVX-NEXT:    [[TMP13:%.*]] = tail call double @llvm.fmuladd.f64(double [[TMP6]], double [[TMP6]], double [[MUL42]])
-; AVX-NEXT:    [[TMP14:%.*]] = tail call double @llvm.fmuladd.f64(double [[TMP11]], double [[TMP11]], double [[TMP13]])
-; AVX-NEXT:    [[TMP15:%.*]] = tail call double @llvm.fmuladd.f64(double [[TMP5]], double [[TMP5]], double [[TMP14]])
-; AVX-NEXT:    [[TMP16:%.*]] = tail call double @llvm.fmuladd.f64(double [[TMP7]], double [[TMP7]], double [[TMP15]])
-; AVX-NEXT:    [[TMP17:%.*]] = tail call double @llvm.fmuladd.f64(double [[TMP10]], double [[TMP10]], double [[TMP16]])
-; AVX-NEXT:    [[FNEG:%.*]] = fneg double [[TMP6]]
-; AVX-NEXT:    [[TMP18:%.*]] = fneg double [[TMP8]]
-; AVX-NEXT:    [[NEG:%.*]] = fmul double [[TMP7]], [[TMP18]]
-; AVX-NEXT:    [[TMP19:%.*]] = tail call double @llvm.fmuladd.f64(double [[FNEG]], double [[TMP5]], double [[NEG]])
-; AVX-NEXT:    [[NEG78:%.*]] = fneg double [[TMP11]]
-; AVX-NEXT:    [[TMP20:%.*]] = tail call double @llvm.fmuladd.f64(double [[NEG78]], double [[TMP10]], double [[TMP19]])
-; AVX-NEXT:    [[TMP21:%.*]] = tail call double @llvm.fmuladd.f64(double [[TMP20]], double 2.000000e+00, double [[TMP17]])
-; AVX-NEXT:    [[RAD:%.*]] = getelementptr inbounds [[STRUCT_SPHERE:%.*]], ptr [[SPH]], i64 0, i32 1
-; AVX-NEXT:    [[TMP22:%.*]] = load double, ptr [[RAD]], align 8
-; AVX-NEXT:    [[NEG82:%.*]] = fneg double [[TMP22]]
-; AVX-NEXT:    [[TMP23:%.*]] = tail call double @llvm.fmuladd.f64(double [[NEG82]], double [[TMP22]], double [[TMP21]])
-; AVX-NEXT:    [[TMP24:%.*]] = fmul double [[TMP4]], -4.000000e+00
-; AVX-NEXT:    [[NEG86:%.*]] = fmul double [[TMP24]], [[TMP23]]
-; AVX-NEXT:    [[TMP25:%.*]] = tail call double @llvm.fmuladd.f64(double [[TMP12]], double [[TMP12]], double [[NEG86]])
-; AVX-NEXT:    [[CMP:%.*]] = fcmp olt double [[TMP25]], 0.000000e+00
-; AVX-NEXT:    br i1 [[CMP]], label [[CLEANUP:%.*]], label [[IF_END:%.*]]
-; AVX:       if.end:
-; AVX-NEXT:    [[CALL:%.*]] = tail call double @sqrt(double noundef [[TMP25]])
-; AVX-NEXT:    [[FNEG87:%.*]] = fneg double [[TMP12]]
-; AVX-NEXT:    [[MUL88:%.*]] = fmul double [[TMP4]], 2.000000e+00
-; AVX-NEXT:    [[TMP26:%.*]] = insertelement <2 x double> poison, double [[FNEG87]], i32 0
-; AVX-NEXT:    [[TMP27:%.*]] = insertelement <2 x double> [[TMP26]], double [[CALL]], i32 1
-; AVX-NEXT:    [[TMP28:%.*]] = insertelement <2 x double> poison, double [[CALL]], i32 0
-; AVX-NEXT:    [[TMP29:%.*]] = insertelement <2 x double> [[TMP28]], double [[TMP12]], i32 1
-; AVX-NEXT:    [[TMP30:%.*]] = fsub <2 x double> [[TMP27]], [[TMP29]]
-; AVX-NEXT:    [[TMP31:%.*]] = insertelement <2 x double> poison, double [[MUL88]], i32 0
-; AVX-NEXT:    [[TMP32:%.*]] = insertelement <2 x double> [[TMP31]], double [[MUL88]], i32 1
-; AVX-NEXT:    [[TMP33:%.*]] = fdiv <2 x double> [[TMP30]], [[TMP32]]
-; AVX-NEXT:    [[TMP34:%.*]] = fcmp olt <2 x double> [[TMP33]], <double 0x3EB0C6F7A0B5ED8D, double 0x3EB0C6F7A0B5ED8D>
-; AVX-NEXT:    [[TMP35:%.*]] = extractelement <2 x i1> [[TMP34]], i32 0
-; AVX-NEXT:    [[TMP36:%.*]] = extractelement <2 x i1> [[TMP34]], i32 1
-; AVX-NEXT:    [[OR_COND:%.*]] = select i1 [[TMP36]], i1 [[TMP35]], i1 false
-; AVX-NEXT:    br i1 [[OR_COND]], label [[CLEANUP]], label [[LOR_LHS_FALSE:%.*]]
-; AVX:       lor.lhs.false:
-; AVX-NEXT:    [[TMP37:%.*]] = fcmp ule <2 x double> [[TMP33]], <double 1.000000e+00, double 1.000000e+00>
-; AVX-NEXT:    [[TMP38:%.*]] = extractelement <2 x i1> [[TMP37]], i32 0
-; AVX-NEXT:    [[TMP39:%.*]] = extractelement <2 x i1> [[TMP37]], i32 1
-; AVX-NEXT:    [[OR_COND106:%.*]] = select i1 [[TMP39]], i1 true, i1 [[TMP38]]
-; AVX-NEXT:    [[SPEC_SELECT:%.*]] = zext i1 [[OR_COND106]] to i32
-; AVX-NEXT:    br label [[CLEANUP]]
-; AVX:       cleanup:
-; AVX-NEXT:    [[RETVAL_0:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ 0, [[IF_END]] ], [ [[SPEC_SELECT]], [[LOR_LHS_FALSE]] ]
-; AVX-NEXT:    ret i32 [[RETVAL_0]]
+; CHECK-LABEL: @ray_sphere(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[DIR:%.*]] = getelementptr inbounds [[STRUCT_RAY:%.*]], ptr [[RAY:%.*]], i64 0, i32 1
+; CHECK-NEXT:    [[TMP0:%.*]] = load double, ptr [[DIR]], align 8
+; CHECK-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_RAY]], ptr [[RAY]], i64 0, i32 1, i32 1
+; CHECK-NEXT:    [[TMP1:%.*]] = load double, ptr [[Y]], align 8
+; CHECK-NEXT:    [[MUL6:%.*]] = fmul double [[TMP1]], [[TMP1]]
+; CHECK-NEXT:    [[TMP2:%.*]] = tail call double @llvm.fmuladd.f64(double [[TMP0]], double [[TMP0]], double [[MUL6]])
+; CHECK-NEXT:    [[Z:%.*]] = getelementptr inbounds [[STRUCT_RAY]], ptr [[RAY]], i64 0, i32 1, i32 2
+; CHECK-NEXT:    [[TMP3:%.*]] = load double, ptr [[Z]], align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = tail call double @llvm.fmuladd.f64(double [[TMP3]], double [[TMP3]], double [[TMP2]])
+; CHECK-NEXT:    [[MUL:%.*]] = fmul double [[TMP0]], 2.000000e+00
+; CHECK-NEXT:    [[TMP5:%.*]] = load double, ptr [[RAY]], align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load double, ptr [[SPH:%.*]], align 8
+; CHECK-NEXT:    [[SUB:%.*]] = fsub double [[TMP5]], [[TMP6]]
+; CHECK-NEXT:    [[MUL17:%.*]] = fmul double [[TMP1]], 2.000000e+00
+; CHECK-NEXT:    [[Y19:%.*]] = getelementptr inbounds [[STRUCT_VEC3:%.*]], ptr [[RAY]], i64 0, i32 1
+; CHECK-NEXT:    [[TMP7:%.*]] = load double, ptr [[Y19]], align 8
+; CHECK-NEXT:    [[Y21:%.*]] = getelementptr inbounds [[STRUCT_VEC3]], ptr [[SPH]], i64 0, i32 1
+; CHECK-NEXT:    [[TMP8:%.*]] = load double, ptr [[Y21]], align 8
+; CHECK-NEXT:    [[SUB22:%.*]] = fsub double [[TMP7]], [[TMP8]]
+; CHECK-NEXT:    [[MUL23:%.*]] = fmul double [[MUL17]], [[SUB22]]
+; CHECK-NEXT:    [[TMP9:%.*]] = tail call double @llvm.fmuladd.f64(double [[MUL]], double [[SUB]], double [[MUL23]])
+; CHECK-NEXT:    [[MUL26:%.*]] = fmul double [[TMP3]], 2.000000e+00
+; CHECK-NEXT:    [[Z28:%.*]] = getelementptr inbounds [[STRUCT_VEC3]], ptr [[RAY]], i64 0, i32 2
+; CHECK-NEXT:    [[TMP10:%.*]] = load double, ptr [[Z28]], align 8
+; CHECK-NEXT:    [[Z30:%.*]] = getelementptr inbounds [[STRUCT_VEC3]], ptr [[SPH]], i64 0, i32 2
+; CHECK-NEXT:    [[TMP11:%.*]] = load double, ptr [[Z30]], align 8
+; CHECK-NEXT:    [[SUB31:%.*]] = fsub double [[TMP10]], [[TMP11]]
+; CHECK-NEXT:    [[TMP12:%.*]] = tail call double @llvm.fmuladd.f64(double [[MUL26]], double [[SUB31]], double [[TMP9]])
+; CHECK-NEXT:    [[MUL42:%.*]] = fmul double [[TMP8]], [[TMP8]]
+; CHECK-NEXT:    [[TMP13:%.*]] = tail call double @llvm.fmuladd.f64(double [[TMP6]], double [[TMP6]], double [[MUL42]])
+; CHECK-NEXT:    [[TMP14:%.*]] = tail call double @llvm.fmuladd.f64(double [[TMP11]], double [[TMP11]], double [[TMP13]])
+; CHECK-NEXT:    [[TMP15:%.*]] = tail call double @llvm.fmuladd.f64(double [[TMP5]], double [[TMP5]], double [[TMP14]])
+; CHECK-NEXT:    [[TMP16:%.*]] = tail call double @llvm.fmuladd.f64(double [[TMP7]], double [[TMP7]], double [[TMP15]])
+; CHECK-NEXT:    [[TMP17:%.*]] = tail call double @llvm.fmuladd.f64(double [[TMP10]], double [[TMP10]], double [[TMP16]])
+; CHECK-NEXT:    [[FNEG:%.*]] = fneg double [[TMP6]]
+; CHECK-NEXT:    [[TMP18:%.*]] = fneg double [[TMP8]]
+; CHECK-NEXT:    [[NEG:%.*]] = fmul double [[TMP7]], [[TMP18]]
+; CHECK-NEXT:    [[TMP19:%.*]] = tail call double @llvm.fmuladd.f64(double [[FNEG]], double [[TMP5]], double [[NEG]])
+; CHECK-NEXT:    [[NEG78:%.*]] = fneg double [[TMP11]]
+; CHECK-NEXT:    [[TMP20:%.*]] = tail call double @llvm.fmuladd.f64(double [[NEG78]], double [[TMP10]], double [[TMP19]])
+; CHECK-NEXT:    [[TMP21:%.*]] = tail call double @llvm.fmuladd.f64(double [[TMP20]], double 2.000000e+00, double [[TMP17]])
+; CHECK-NEXT:    [[RAD:%.*]] = getelementptr inbounds [[STRUCT_SPHERE:%.*]], ptr [[SPH]], i64 0, i32 1
+; CHECK-NEXT:    [[TMP22:%.*]] = load double, ptr [[RAD]], align 8
+; CHECK-NEXT:    [[NEG82:%.*]] = fneg double [[TMP22]]
+; CHECK-NEXT:    [[TMP23:%.*]] = tail call double @llvm.fmuladd.f64(double [[NEG82]], double [[TMP22]], double [[TMP21]])
+; CHECK-NEXT:    [[TMP24:%.*]] = fmul double [[TMP4]], -4.000000e+00
+; CHECK-NEXT:    [[NEG86:%.*]] = fmul double [[TMP24]], [[TMP23]]
+; CHECK-NEXT:    [[TMP25:%.*]] = tail call double @llvm.fmuladd.f64(double [[TMP12]], double [[TMP12]], double [[NEG86]])
+; CHECK-NEXT:    [[CMP:%.*]] = fcmp olt double [[TMP25]], 0.000000e+00
+; CHECK-NEXT:    br i1 [[CMP]], label [[CLEANUP:%.*]], label [[IF_END:%.*]]
+; CHECK:       if.end:
+; CHECK-NEXT:    [[CALL:%.*]] = tail call double @sqrt(double noundef [[TMP25]])
+; CHECK-NEXT:    [[FNEG87:%.*]] = fneg double [[TMP12]]
+; CHECK-NEXT:    [[MUL88:%.*]] = fmul double [[TMP4]], 2.000000e+00
+; CHECK-NEXT:    [[TMP26:%.*]] = insertelement <2 x double> poison, double [[FNEG87]], i32 0
+; CHECK-NEXT:    [[TMP27:%.*]] = insertelement <2 x double> [[TMP26]], double [[CALL]], i32 1
+; CHECK-NEXT:    [[TMP28:%.*]] = insertelement <2 x double> poison, double [[CALL]], i32 0
+; CHECK-NEXT:    [[TMP29:%.*]] = insertelement <2 x double> [[TMP28]], double [[TMP12]], i32 1
+; CHECK-NEXT:    [[TMP30:%.*]] = fsub <2 x double> [[TMP27]], [[TMP29]]
+; CHECK-NEXT:    [[TMP31:%.*]] = insertelement <2 x double> poison, double [[MUL88]], i32 0
+; CHECK-NEXT:    [[TMP32:%.*]] = insertelement <2 x double> [[TMP31]], double [[MUL88]], i32 1
+; CHECK-NEXT:    [[TMP33:%.*]] = fdiv <2 x double> [[TMP30]], [[TMP32]]
+; CHECK-NEXT:    [[TMP34:%.*]] = extractelement <2 x double> [[TMP33]], i32 1
+; CHECK-NEXT:    [[CMP93:%.*]] = fcmp olt double [[TMP34]], 0x3EB0C6F7A0B5ED8D
+; CHECK-NEXT:    [[TMP35:%.*]] = extractelement <2 x double> [[TMP33]], i32 0
+; CHECK-NEXT:    [[CMP94:%.*]] = fcmp olt double [[TMP35]], 0x3EB0C6F7A0B5ED8D
+; CHECK-NEXT:    [[OR_COND:%.*]] = select i1 [[CMP93]], i1 [[CMP94]], i1 false
+; CHECK-NEXT:    br i1 [[OR_COND]], label [[CLEANUP]], label [[LOR_LHS_FALSE:%.*]]
+; CHECK:       lor.lhs.false:
+; CHECK-NEXT:    [[TMP36:%.*]] = fcmp ule <2 x double> [[TMP33]], <double 1.000000e+00, double 1.000000e+00>
+; CHECK-NEXT:    [[TMP37:%.*]] = extractelement <2 x i1> [[TMP36]], i32 0
+; CHECK-NEXT:    [[TMP38:%.*]] = extractelement <2 x i1> [[TMP36]], i32 1
+; CHECK-NEXT:    [[OR_COND106:%.*]] = select i1 [[TMP38]], i1 true, i1 [[TMP37]]
+; CHECK-NEXT:    [[SPEC_SELECT:%.*]] = zext i1 [[OR_COND106]] to i32
+; CHECK-NEXT:    br label [[CLEANUP]]
+; CHECK:       cleanup:
+; CHECK-NEXT:    [[RETVAL_0:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ 0, [[IF_END]] ], [ [[SPEC_SELECT]], [[LOR_LHS_FALSE]] ]
+; CHECK-NEXT:    ret i32 [[RETVAL_0]]
 ;
 entry:
   %dir = getelementptr inbounds %struct.ray, ptr %ray, i64 0, i32 1

diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/crash_reordering_undefs.ll b/llvm/test/Transforms/SLPVectorizer/X86/crash_reordering_undefs.ll
index 3b67d561f4fcf..79b9d62a27441 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/crash_reordering_undefs.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/crash_reordering_undefs.ll
@@ -4,12 +4,16 @@
 define i32 @crash_reordering_undefs() {
 ; CHECK-LABEL: @crash_reordering_undefs(
 ; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[OR0:%.*]] = or i64 undef, undef
+; CHECK-NEXT:    [[CMP0:%.*]] = icmp eq i64 undef, [[OR0]]
+; CHECK-NEXT:    [[ADD0:%.*]] = select i1 [[CMP0]], i32 65536, i32 65537
 ; CHECK-NEXT:    [[CMP1:%.*]] = icmp eq i64 undef, undef
 ; CHECK-NEXT:    [[ADD2:%.*]] = select i1 [[CMP1]], i32 65536, i32 65537
 ; CHECK-NEXT:    [[CMP2:%.*]] = icmp eq i64 undef, undef
 ; CHECK-NEXT:    [[ADD4:%.*]] = select i1 [[CMP2]], i32 65536, i32 65537
-; CHECK-NEXT:    [[ADD0:%.*]] = select i1 undef, i32 65536, i32 65537
-; CHECK-NEXT:    [[ADD9:%.*]] = select i1 undef, i32 65536, i32 65537
+; CHECK-NEXT:    [[OR1:%.*]] = or i64 undef, undef
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp eq i64 undef, [[OR1]]
+; CHECK-NEXT:    [[ADD9:%.*]] = select i1 [[CMP3]], i32 65536, i32 65537
 ; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> undef)
 ; CHECK-NEXT:    [[OP_RDX:%.*]] = add i32 [[TMP0]], undef
 ; CHECK-NEXT:    [[OP_RDX1:%.*]] = add i32 [[ADD0]], [[ADD2]]

diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/geps-non-pow-2.ll b/llvm/test/Transforms/SLPVectorizer/X86/geps-non-pow-2.ll
index 162a5beaa64b6..596543880d365 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/geps-non-pow-2.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/geps-non-pow-2.ll
@@ -11,37 +11,36 @@ define dso_local i32 @g() local_unnamed_addr {
 ; CHECK-NEXT:    [[TOBOOL_NOT19:%.*]] = icmp eq i32 [[TMP0]], 0
 ; CHECK-NEXT:    br i1 [[TOBOOL_NOT19]], label [[WHILE_END:%.*]], label [[WHILE_BODY:%.*]]
 ; CHECK:       while.body:
-; CHECK-NEXT:    [[A_020:%.*]] = phi i32* [ [[A_020_BE:%.*]], [[WHILE_BODY_BACKEDGE:%.*]] ], [ undef, [[ENTRY:%.*]] ]
-; CHECK-NEXT:    [[TMP1:%.*]] = phi <2 x i32*> [ [[TMP15:%.*]], [[WHILE_BODY_BACKEDGE]] ], [ undef, [[ENTRY]] ]
-; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x i32*> [[TMP1]], i32 0
-; CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint i32* [[TMP2]] to i64
-; CHECK-NEXT:    [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
-; CHECK-NEXT:    [[INCDEC_PTR1:%.*]] = getelementptr inbounds i32, i32* [[A_020]], i64 1
-; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr i32, <2 x i32*> [[TMP1]], <2 x i64> <i64 1, i64 1>
-; CHECK-NEXT:    switch i32 [[TMP4]], label [[WHILE_BODY_BACKEDGE]] [
+; CHECK-NEXT:    [[C_022:%.*]] = phi i32* [ [[C_022_BE:%.*]], [[WHILE_BODY_BACKEDGE:%.*]] ], [ undef, [[ENTRY:%.*]] ]
+; CHECK-NEXT:    [[TMP1:%.*]] = phi <2 x i32*> [ [[TMP14:%.*]], [[WHILE_BODY_BACKEDGE]] ], [ undef, [[ENTRY]] ]
+; CHECK-NEXT:    [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, i32* [[C_022]], i64 1
+; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint i32* [[C_022]] to i64
+; CHECK-NEXT:    [[TMP3:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr i32, <2 x i32*> [[TMP1]], <2 x i64> <i64 1, i64 1>
+; CHECK-NEXT:    switch i32 [[TMP3]], label [[WHILE_BODY_BACKEDGE]] [
 ; CHECK-NEXT:    i32 2, label [[SW_BB:%.*]]
 ; CHECK-NEXT:    i32 4, label [[SW_BB6:%.*]]
 ; CHECK-NEXT:    ]
 ; CHECK:       sw.bb:
-; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <2 x i32*> [[TMP5]], i32 1
-; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint i32* [[TMP6]] to i64
-; CHECK-NEXT:    [[TMP8:%.*]] = trunc i64 [[TMP7]] to i32
-; CHECK-NEXT:    [[INCDEC_PTR4:%.*]] = getelementptr inbounds i32, i32* [[A_020]], i64 2
-; CHECK-NEXT:    store i32 [[TMP8]], i32* [[INCDEC_PTR1]], align 4
-; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr i32, <2 x i32*> [[TMP1]], <2 x i64> <i64 2, i64 2>
+; CHECK-NEXT:    [[TMP5:%.*]] = extractelement <2 x i32*> [[TMP4]], i32 0
+; CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint i32* [[TMP5]] to i64
+; CHECK-NEXT:    [[TMP7:%.*]] = trunc i64 [[TMP6]] to i32
+; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr i32, <2 x i32*> [[TMP1]], <2 x i64> <i64 2, i64 2>
+; CHECK-NEXT:    [[TMP9:%.*]] = extractelement <2 x i32*> [[TMP4]], i32 1
+; CHECK-NEXT:    store i32 [[TMP7]], i32* [[TMP9]], align 4
+; CHECK-NEXT:    [[INCDEC_PTR5:%.*]] = getelementptr inbounds i32, i32* [[C_022]], i64 2
 ; CHECK-NEXT:    br label [[WHILE_BODY_BACKEDGE]]
 ; CHECK:       sw.bb6:
-; CHECK-NEXT:    [[INCDEC_PTR7:%.*]] = getelementptr inbounds i32, i32* [[A_020]], i64 2
-; CHECK-NEXT:    [[TMP10:%.*]] = extractelement <2 x i32*> [[TMP5]], i32 0
-; CHECK-NEXT:    [[TMP11:%.*]] = ptrtoint i32* [[TMP10]] to i64
-; CHECK-NEXT:    [[TMP12:%.*]] = trunc i64 [[TMP11]] to i32
-; CHECK-NEXT:    [[TMP13:%.*]] = getelementptr i32, <2 x i32*> [[TMP1]], <2 x i64> <i64 2, i64 2>
-; CHECK-NEXT:    [[TMP14:%.*]] = extractelement <2 x i32*> [[TMP5]], i32 1
-; CHECK-NEXT:    store i32 [[TMP12]], i32* [[TMP14]], align 4
+; CHECK-NEXT:    [[INCDEC_PTR8:%.*]] = getelementptr inbounds i32, i32* [[C_022]], i64 2
+; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint i32* [[INCDEC_PTR]] to i64
+; CHECK-NEXT:    [[TMP11:%.*]] = trunc i64 [[TMP10]] to i32
+; CHECK-NEXT:    [[TMP12:%.*]] = getelementptr i32, <2 x i32*> [[TMP1]], <2 x i64> <i64 2, i64 2>
+; CHECK-NEXT:    [[TMP13:%.*]] = extractelement <2 x i32*> [[TMP4]], i32 0
+; CHECK-NEXT:    store i32 [[TMP11]], i32* [[TMP13]], align 4
 ; CHECK-NEXT:    br label [[WHILE_BODY_BACKEDGE]]
 ; CHECK:       while.body.backedge:
-; CHECK-NEXT:    [[A_020_BE]] = phi i32* [ [[INCDEC_PTR1]], [[WHILE_BODY]] ], [ [[INCDEC_PTR7]], [[SW_BB6]] ], [ [[INCDEC_PTR4]], [[SW_BB]] ]
-; CHECK-NEXT:    [[TMP15]] = phi <2 x i32*> [ [[TMP5]], [[WHILE_BODY]] ], [ [[TMP13]], [[SW_BB6]] ], [ [[TMP9]], [[SW_BB]] ]
+; CHECK-NEXT:    [[C_022_BE]] = phi i32* [ [[INCDEC_PTR]], [[WHILE_BODY]] ], [ [[INCDEC_PTR8]], [[SW_BB6]] ], [ [[INCDEC_PTR5]], [[SW_BB]] ]
+; CHECK-NEXT:    [[TMP14]] = phi <2 x i32*> [ [[TMP4]], [[WHILE_BODY]] ], [ [[TMP12]], [[SW_BB6]] ], [ [[TMP8]], [[SW_BB]] ]
 ; CHECK-NEXT:    br label [[WHILE_BODY]]
 ; CHECK:       while.end:
 ; CHECK-NEXT:    ret i32 undef

diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/reduction2.ll b/llvm/test/Transforms/SLPVectorizer/X86/reduction2.ll
index 3f0777d9d7498..6307fd1a0543f 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/reduction2.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/reduction2.ll
@@ -96,16 +96,17 @@ define i1 @fcmp_lt_gt(double %a, double %b, double %c) {
 ; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <2 x double> poison, double [[MUL]], i32 0
 ; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x double> [[TMP5]], double [[MUL]], i32 1
 ; CHECK-NEXT:    [[TMP7:%.*]] = fdiv <2 x double> [[TMP4]], [[TMP6]]
-; CHECK-NEXT:    [[TMP8:%.*]] = fcmp olt <2 x double> [[TMP7]], <double 0x3EB0C6F7A0B5ED8D, double 0x3EB0C6F7A0B5ED8D>
-; CHECK-NEXT:    [[TMP9:%.*]] = extractelement <2 x i1> [[TMP8]], i32 0
-; CHECK-NEXT:    [[TMP10:%.*]] = extractelement <2 x i1> [[TMP8]], i32 1
-; CHECK-NEXT:    [[OR_COND:%.*]] = and i1 [[TMP10]], [[TMP9]]
+; CHECK-NEXT:    [[TMP8:%.*]] = extractelement <2 x double> [[TMP7]], i32 1
+; CHECK-NEXT:    [[CMP:%.*]] = fcmp olt double [[TMP8]], 0x3EB0C6F7A0B5ED8D
+; CHECK-NEXT:    [[TMP9:%.*]] = extractelement <2 x double> [[TMP7]], i32 0
+; CHECK-NEXT:    [[CMP4:%.*]] = fcmp olt double [[TMP9]], 0x3EB0C6F7A0B5ED8D
+; CHECK-NEXT:    [[OR_COND:%.*]] = and i1 [[CMP]], [[CMP4]]
 ; CHECK-NEXT:    br i1 [[OR_COND]], label [[CLEANUP:%.*]], label [[LOR_LHS_FALSE:%.*]]
 ; CHECK:       lor.lhs.false:
-; CHECK-NEXT:    [[TMP11:%.*]] = fcmp ule <2 x double> [[TMP7]], <double 1.000000e+00, double 1.000000e+00>
-; CHECK-NEXT:    [[TMP12:%.*]] = extractelement <2 x i1> [[TMP11]], i32 0
-; CHECK-NEXT:    [[TMP13:%.*]] = extractelement <2 x i1> [[TMP11]], i32 1
-; CHECK-NEXT:    [[NOT_OR_COND9:%.*]] = or i1 [[TMP12]], [[TMP13]]
+; CHECK-NEXT:    [[TMP10:%.*]] = fcmp ule <2 x double> [[TMP7]], <double 1.000000e+00, double 1.000000e+00>
+; CHECK-NEXT:    [[TMP11:%.*]] = extractelement <2 x i1> [[TMP10]], i32 0
+; CHECK-NEXT:    [[TMP12:%.*]] = extractelement <2 x i1> [[TMP10]], i32 1
+; CHECK-NEXT:    [[NOT_OR_COND9:%.*]] = or i1 [[TMP11]], [[TMP12]]
 ; CHECK-NEXT:    ret i1 [[NOT_OR_COND9]]
 ; CHECK:       cleanup:
 ; CHECK-NEXT:    ret i1 false


        


More information about the llvm-commits mailing list