[llvm] b74e09c - [SLP]Check for the whole vector vectorization in unique scalars analysis

Alexey Bataev via llvm-commits llvm-commits at lists.llvm.org
Tue Sep 3 06:28:39 PDT 2024


Author: Alexey Bataev
Date: 2024-09-03T06:19:21-07:00
New Revision: b74e09cb20e6218320013b54c9ba2f5c069d44b9

URL: https://github.com/llvm/llvm-project/commit/b74e09cb20e6218320013b54c9ba2f5c069d44b9
DIFF: https://github.com/llvm/llvm-project/commit/b74e09cb20e6218320013b54c9ba2f5c069d44b9.diff

LOG: [SLP]Check for the whole vector vectorization in unique scalars analysis

Need to check that thr whole number of register is attempted to
vectorize before actually trying to build the node to avoid compiler
crash.

Added: 
    llvm/test/Transforms/SLPVectorizer/RISCV/unique-loads-insert-non-power-of-2.ll

Modified: 
    llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index f58803fc56a2d7..93e7bfcdd87c44 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -409,7 +409,7 @@ static bool isVectorLikeInstWithConstOps(Value *V) {
 /// total number of elements \p Size and number of registers (parts) \p
 /// NumParts.
 static unsigned getPartNumElems(unsigned Size, unsigned NumParts) {
-  return PowerOf2Ceil(divideCeil(Size, NumParts));
+  return std::min<unsigned>(Size, PowerOf2Ceil(divideCeil(Size, NumParts)));
 }
 
 /// Returns correct remaining number of elements, considering total amount \p
@@ -7022,7 +7022,11 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
         UniqueValues.emplace_back(V);
     }
     size_t NumUniqueScalarValues = UniqueValues.size();
-    if (NumUniqueScalarValues == VL.size()) {
+    bool IsFullVectors =
+        hasFullVectorsOnly(*TTI, UniqueValues.front()->getType(),
+                           NumUniqueScalarValues);
+    if (NumUniqueScalarValues == VL.size() &&
+        (VectorizeNonPowerOf2 || IsFullVectors)) {
       ReuseShuffleIndices.clear();
     } else {
       // FIXME: Reshuffing scalars is not supported yet for non-power-of-2 ops.
@@ -7033,14 +7037,10 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
         return false;
       }
       LLVM_DEBUG(dbgs() << "SLP: Shuffle for reused scalars.\n");
-      if (NumUniqueScalarValues <= 1 ||
-          (UniquePositions.size() == 1 && all_of(UniqueValues,
-                                                 [](Value *V) {
-                                                   return isa<UndefValue>(V) ||
-                                                          !isConstant(V);
-                                                 })) ||
-          !hasFullVectorsOnly(*TTI, UniqueValues.front()->getType(),
-                              NumUniqueScalarValues)) {
+      if (NumUniqueScalarValues <= 1 || !IsFullVectors ||
+          (UniquePositions.size() == 1 && all_of(UniqueValues, [](Value *V) {
+             return isa<UndefValue>(V) || !isConstant(V);
+           }))) {
         if (DoNotFail && UniquePositions.size() > 1 &&
             NumUniqueScalarValues > 1 && S.MainOp->isSafeToRemove() &&
             all_of(UniqueValues, [=](Value *V) {
@@ -9144,9 +9144,6 @@ class BoUpSLP::ShuffleCostEstimator : public BaseShuffleAnalysis {
       return nullptr;
     Value *VecBase = nullptr;
     ArrayRef<Value *> VL = E->Scalars;
-    // If the resulting type is scalarized, do not adjust the cost.
-    if (NumParts == VL.size())
-      return nullptr;
     // Check if it can be considered reused if same extractelements were
     // vectorized already.
     bool PrevNodeFound = any_of(
@@ -9799,7 +9796,7 @@ BoUpSLP::getEntryCost(const TreeEntry *E, ArrayRef<Value *> VectorizedVals,
       InsertMask[Idx] = I + 1;
     }
     unsigned VecScalarsSz = PowerOf2Ceil(NumElts);
-    if (NumOfParts > 0)
+    if (NumOfParts > 0 && NumOfParts < NumElts)
       VecScalarsSz = PowerOf2Ceil((NumElts + NumOfParts - 1) / NumOfParts);
     unsigned VecSz = (1 + OffsetEnd / VecScalarsSz - OffsetBeg / VecScalarsSz) *
                      VecScalarsSz;

diff  --git a/llvm/test/Transforms/SLPVectorizer/RISCV/unique-loads-insert-non-power-of-2.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/unique-loads-insert-non-power-of-2.ll
new file mode 100644
index 00000000000000..595293803ca859
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/unique-loads-insert-non-power-of-2.ll
@@ -0,0 +1,67 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S --passes=slp-vectorizer -mtriple=riscv64-unknown-linux-gnu -mattr=+v -slp-threshold=-10 < %s | FileCheck %s
+
+define void @test(ptr %agg.result) {
+; CHECK-LABEL: define void @test(
+; CHECK-SAME: ptr [[AGG_RESULT:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[ARRAYIDX_I39_1:%.*]] = getelementptr i8, ptr [[AGG_RESULT]], i64 8
+; CHECK-NEXT:    [[ARRAYIDX_I39_2:%.*]] = getelementptr i8, ptr [[AGG_RESULT]], i64 16
+; CHECK-NEXT:    [[ADD_PTR_I41_1_1_1:%.*]] = getelementptr i8, ptr null, i64 16
+; CHECK-NEXT:    [[TMP0:%.*]] = load double, ptr [[ADD_PTR_I41_1_1_1]], align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x double>, ptr null, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load double, ptr null, align 8
+; CHECK-NEXT:    [[MUL_1:%.*]] = fmul double [[TMP2]], 0.000000e+00
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <4 x double> poison, double [[TMP0]], i32 2
+; CHECK-NEXT:    [[TMP4:%.*]] = shufflevector <2 x double> [[TMP1]], <2 x double> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <4 x double> [[TMP3]], <4 x double> [[TMP4]], <4 x i32> <i32 poison, i32 poison, i32 2, i32 5>
+; CHECK-NEXT:    [[TMP6:%.*]] = call <4 x double> @llvm.vector.insert.v4f64.v2f64(<4 x double> [[TMP5]], <2 x double> [[TMP1]], i64 0)
+; CHECK-NEXT:    [[TMP7:%.*]] = fmul <4 x double> [[TMP6]], <double 0.000000e+00, double 0.000000e+00, double 1.000000e+00, double 0.000000e+00>
+; CHECK-NEXT:    [[TMP8:%.*]] = fmul <4 x double> zeroinitializer, [[TMP7]]
+; CHECK-NEXT:    [[TMP9:%.*]] = extractelement <4 x double> [[TMP8]], i32 1
+; CHECK-NEXT:    store double [[TMP9]], ptr [[ARRAYIDX_I39_1]], align 8
+; CHECK-NEXT:    store <4 x double> [[TMP8]], ptr [[ARRAYIDX_I39_2]], align 8
+; CHECK-NEXT:    [[ARRAYIDX_I37_2:%.*]] = getelementptr i8, ptr [[AGG_RESULT]], i64 48
+; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <4 x double> [[TMP8]], <4 x double> poison, <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT:    store <2 x double> [[TMP10]], ptr [[ARRAYIDX_I37_2]], align 8
+; CHECK-NEXT:    [[ARRAYIDX_I39_2_2:%.*]] = getelementptr i8, ptr [[AGG_RESULT]], i64 64
+; CHECK-NEXT:    [[MUL_1_2_2:%.*]] = fmul double 1.000000e+00, 0.000000e+00
+; CHECK-NEXT:    [[MUL_2_2_2:%.*]] = fmul double 0.000000e+00, [[MUL_1_2_2]]
+; CHECK-NEXT:    store double [[MUL_2_2_2]], ptr [[ARRAYIDX_I39_2_2]], align 8
+; CHECK-NEXT:    ret void
+;
+entry:
+  %0 = load double, ptr null, align 8
+  %mul.1 = fmul double %0, 0.000000e+00
+  %arrayidx.i39.1 = getelementptr i8, ptr %agg.result, i64 8
+  %add.ptr.i41.1.1 = getelementptr i8, ptr null, i64 8
+  %1 = load double, ptr %add.ptr.i41.1.1, align 8
+  %mul.1.1 = fmul double %1, 0.000000e+00
+  %mul.2.1 = fmul double 0.000000e+00, %mul.1.1
+  store double %mul.2.1, ptr %arrayidx.i39.1, align 8
+  %arrayidx.i39.2 = getelementptr i8, ptr %agg.result, i64 16
+  %mul.1.2 = fmul double %0, 0.000000e+00
+  %mul.2.2 = fmul double 0.000000e+00, %mul.1.2
+  store double %mul.2.2, ptr %arrayidx.i39.2, align 8
+  %arrayidx.i37.1 = getelementptr i8, ptr %agg.result, i64 24
+  store double %mul.2.1, ptr %arrayidx.i37.1, align 8
+  %arrayidx.i39.1.1 = getelementptr i8, ptr %agg.result, i64 32
+  %add.ptr.i41.1.1.1 = getelementptr i8, ptr null, i64 16
+  %2 = load double, ptr %add.ptr.i41.1.1.1, align 8
+  %mul.1.1.1 = fmul double %2, 1.000000e+00
+  %mul.2.1.1 = fmul double 0.000000e+00, %mul.1.1.1
+  store double %mul.2.1.1, ptr %arrayidx.i39.1.1, align 8
+  %arrayidx.i39.2.1 = getelementptr i8, ptr %agg.result, i64 40
+  %mul.1.2.1 = fmul double %1, 0.000000e+00
+  %mul.2.2.1 = fmul double 0.000000e+00, %mul.1.2.1
+  store double %mul.2.2.1, ptr %arrayidx.i39.2.1, align 8
+  %arrayidx.i37.2 = getelementptr i8, ptr %agg.result, i64 48
+  store double %mul.2.2, ptr %arrayidx.i37.2, align 8
+  %arrayidx.i39.1.2 = getelementptr i8, ptr %agg.result, i64 56
+  store double %mul.2.2.1, ptr %arrayidx.i39.1.2, align 8
+  %arrayidx.i39.2.2 = getelementptr i8, ptr %agg.result, i64 64
+  %mul.1.2.2 = fmul double 1.000000e+00, 0.000000e+00
+  %mul.2.2.2 = fmul double 0.000000e+00, %mul.1.2.2
+  store double %mul.2.2.2, ptr %arrayidx.i39.2.2, align 8
+  ret void
+}


        


More information about the llvm-commits mailing list