[llvm] 07507cb - [SLP]Fix shuffling of entries of the different sizes

Alexey Bataev via llvm-commits llvm-commits at lists.llvm.org
Thu Nov 21 13:10:51 PST 2024


Author: Alexey Bataev
Date: 2024-11-21T13:08:27-08:00
New Revision: 07507cb5919cae0ae880bfee538ebc993b97dd6c

URL: https://github.com/llvm/llvm-project/commit/07507cb5919cae0ae880bfee538ebc993b97dd6c
DIFF: https://github.com/llvm/llvm-project/commit/07507cb5919cae0ae880bfee538ebc993b97dd6c.diff

LOG: [SLP]Fix shuffling of entries of the different sizes

Need to choose the size of vector factor for mask based on the entries
vector factors, not mask size, to generate correct code.

Fixes #117170

Added: 
    llvm/test/Transforms/SLPVectorizer/X86/entries-shuffled-diff-sizes.ll

Modified: 
    llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
    llvm/test/Transforms/SLPVectorizer/shuffle-multivector.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index a9481c44e44bb8..c0cb9f04de6cf9 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -10130,6 +10130,9 @@ class BoUpSLP::ShuffleCostEstimator : public BaseShuffleAnalysis {
                             InVectors.size() == 1 ? nullptr : InVectors.back(),
                             CommonMask);
       transformMaskAfterShuffle(CommonMask, CommonMask);
+    } else if (InVectors.size() == 2) {
+      Cost += createShuffle(InVectors.front(), InVectors.back(), CommonMask);
+      transformMaskAfterShuffle(CommonMask, CommonMask);
     }
     SameNodesEstimated = false;
     if (!E2 && InVectors.size() == 1) {
@@ -10147,8 +10150,14 @@ class BoUpSLP::ShuffleCostEstimator : public BaseShuffleAnalysis {
       Cost += createShuffle(InVectors.front(), &E1, CommonMask);
       transformMaskAfterShuffle(CommonMask, CommonMask);
     } else {
+      auto P = InVectors.front();
       Cost += createShuffle(&E1, E2, Mask);
-      transformMaskAfterShuffle(CommonMask, Mask);
+      unsigned VF = std::max(E1.getVectorFactor(), E2->getVectorFactor());
+      for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx)
+        if (Mask[Idx] != PoisonMaskElem)
+          CommonMask[Idx] = Idx + (InVectors.empty() ? 0 : VF);
+      Cost += createShuffle(P, InVectors.front(), CommonMask);
+      transformMaskAfterShuffle(CommonMask, CommonMask);
     }
   }
 
@@ -14007,9 +14016,10 @@ class BoUpSLP::ShuffleInstructionBuilder final : public BaseShuffleAnalysis {
       transformMaskAfterShuffle(CommonMask, CommonMask);
     }
     V1 = createShuffle(V1, V2, Mask);
+    unsigned VF = std::max(getVF(V1), getVF(Vec));
     for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx)
       if (Mask[Idx] != PoisonMaskElem)
-        CommonMask[Idx] = Idx + Sz;
+        CommonMask[Idx] = Idx + VF;
     InVectors.front() = Vec;
     if (InVectors.size() == 2)
       InVectors.back() = V1;

diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/entries-shuffled-
diff -sizes.ll b/llvm/test/Transforms/SLPVectorizer/X86/entries-shuffled-
diff -sizes.ll
new file mode 100644
index 00000000000000..aa9195f8c48cec
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/entries-shuffled-
diff -sizes.ll
@@ -0,0 +1,136 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux -mattr=+avx512vl < %s | FileCheck %s
+
+ at GLOB = external global [16000 x i8], align 32
+
+define void @test() {
+; CHECK-LABEL: define void @test(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:  [[ALLOCA_0:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load <16 x float>, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1208), align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x float>, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1612), align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = shufflevector <2 x float> [[TMP3]], <2 x float> poison, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+; CHECK-NEXT:    [[GEPLOAD1612:%.*]] = extractelement <16 x float> [[TMP4]], i32 0
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <16 x float> [[TMP0]], i32 0
+; CHECK-NEXT:    [[TMP2:%.*]] = fmul reassoc ninf nsz arcp contract afn float [[GEPLOAD1612]], [[TMP1]]
+; CHECK-NEXT:    [[TMP6:%.*]] = fmul reassoc ninf nsz arcp contract afn <16 x float> [[TMP4]], [[TMP0]]
+; CHECK-NEXT:    store <16 x float> [[TMP6]], ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 2928), align 16
+; CHECK-NEXT:    [[TMP7:%.*]] = load <4 x float>, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1272), align 16
+; CHECK-NEXT:    [[TMP8:%.*]] = load <2 x float>, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1288), align 16
+; CHECK-NEXT:    [[TMP9:%.*]] = load <2 x float>, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1296), align 16
+; CHECK-NEXT:    [[TMP13:%.*]] = load <8 x float>, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1304), align 16
+; CHECK-NEXT:    [[TMP11:%.*]] = load <2 x float>, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1620), align 4
+; CHECK-NEXT:    [[TMP12:%.*]] = shufflevector <2 x float> [[TMP11]], <2 x float> [[TMP8]], <16 x i32> <i32 poison, i32 0, i32 2, i32 1, i32 0, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT:    [[TMP19:%.*]] = shufflevector <2 x float> [[TMP3]], <2 x float> poison, <16 x i32> <i32 0, i32 1, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT:    [[TMP14:%.*]] = shufflevector <16 x float> [[TMP19]], <16 x float> [[TMP12]], <16 x i32> <i32 1, i32 1, i32 17, i32 17, i32 18, i32 17, i32 17, i32 17, i32 17, i32 17, i32 17, i32 17, i32 19, i32 19, i32 19, i32 19>
+; CHECK-NEXT:    [[TMP15:%.*]] = call <16 x float> @llvm.vector.insert.v16f32.v8f32(<16 x float> [[TMP12]], <8 x float> [[TMP13]], i64 8)
+; CHECK-NEXT:    [[TMP16:%.*]] = call <16 x float> @llvm.vector.insert.v16f32.v4f32(<16 x float> [[TMP15]], <4 x float> [[TMP7]], i64 0)
+; CHECK-NEXT:    [[TMP17:%.*]] = call <16 x float> @llvm.vector.insert.v16f32.v2f32(<16 x float> [[TMP16]], <2 x float> [[TMP9]], i64 6)
+; CHECK-NEXT:    [[TMP18:%.*]] = fmul reassoc ninf nsz arcp contract afn <16 x float> [[TMP14]], [[TMP17]]
+; CHECK-NEXT:    store <16 x float> [[TMP18]], ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 2992), align 16
+; CHECK-NEXT:    ret void
+;
+alloca_0:
+  %gepload1208 = load float, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1208), align 4
+  %gepload1212 = load float, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1212), align 4
+  %gepload1216 = load float, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1216), align 4
+  %gepload1220 = load float, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1220), align 4
+  %gepload1224 = load float, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1224), align 4
+  %gepload1228 = load float, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1228), align 4
+  %gepload1232 = load float, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1232), align 4
+  %gepload1236 = load float, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1236), align 4
+  %gepload1612 = load float, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1612), align 4
+  %0 = fmul reassoc ninf nsz arcp contract afn float %gepload1612, %gepload1208
+  %1 = fmul reassoc ninf nsz arcp contract afn float %gepload1612, %gepload1208
+  store float %1, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 2928), align 16
+  %2 = fmul reassoc ninf nsz arcp contract afn float %gepload1612, %gepload1212
+  store float %2, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 2932), align 4
+  %3 = fmul reassoc ninf nsz arcp contract afn float %gepload1612, %gepload1216
+  store float %3, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 2936), align 8
+  %4 = fmul reassoc ninf nsz arcp contract afn float %gepload1612, %gepload1220
+  store float %4, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 2940), align 4
+  %5 = fmul reassoc ninf nsz arcp contract afn float %gepload1612, %gepload1224
+  store float %5, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 2944), align 32
+  %6 = fmul reassoc ninf nsz arcp contract afn float %gepload1612, %gepload1228
+  store float %6, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 2948), align 4
+  %7 = fmul reassoc ninf nsz arcp contract afn float %gepload1612, %gepload1232
+  store float %7, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 2952), align 8
+  %8 = fmul reassoc ninf nsz arcp contract afn float %gepload1612, %gepload1236
+  store float %8, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 2956), align 4
+  %gepload1240 = load float, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1240), align 16
+  %gepload1244 = load float, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1244), align 16
+  %gepload1248 = load float, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1248), align 16
+  %gepload1252 = load float, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1252), align 16
+  %gepload1256 = load float, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1256), align 16
+  %gepload1260 = load float, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1260), align 16
+  %gepload1264 = load float, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1264), align 16
+  %gepload1268 = load float, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1268), align 16
+  %gepload1272 = load float, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1272), align 16
+  %gepload1276 = load float, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1276), align 16
+  %gepload1616 = load float, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1616), align 16
+  %9 = fmul reassoc ninf nsz arcp contract afn float %gepload1616, %gepload1240
+  store float %9, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 2960), align 16
+  %10 = fmul reassoc ninf nsz arcp contract afn float %gepload1616, %gepload1244
+  store float %10, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 2964), align 4
+  %11 = fmul reassoc ninf nsz arcp contract afn float %gepload1616, %gepload1248
+  store float %11, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 2968), align 8
+  %12 = fmul reassoc ninf nsz arcp contract afn float %gepload1616, %gepload1252
+  store float %12, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 2972), align 4
+  %13 = fmul reassoc ninf nsz arcp contract afn float %gepload1616, %gepload1256
+  store float %13, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 2976), align 32
+  %14 = fmul reassoc ninf nsz arcp contract afn float %gepload1616, %gepload1260
+  store float %14, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 2980), align 4
+  %15 = fmul reassoc ninf nsz arcp contract afn float %gepload1616, %gepload1264
+  store float %15, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 2984), align 8
+  %16 = fmul reassoc ninf nsz arcp contract afn float %gepload1616, %gepload1268
+  store float %16, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 2988), align 4
+  %17 = fmul reassoc ninf nsz arcp contract afn float %gepload1616, %gepload1272
+  store float %17, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 2992), align 16
+  %18 = fmul reassoc ninf nsz arcp contract afn float %gepload1616, %gepload1276
+  store float %18, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 2996), align 4
+  %gepload1280 = load float, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1280), align 16
+  %gepload1284 = load float, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1284), align 16
+  %gepload1288 = load float, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1288), align 16
+  %gepload1292 = load float, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1292), align 16
+  %gepload1296 = load float, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1296), align 16
+  %gepload1300 = load float, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1300), align 16
+  %gepload1304 = load float, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1304), align 16
+  %gepload1308 = load float, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1308), align 16
+  %gepload1312 = load float, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1312), align 16
+  %gepload1316 = load float, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1316), align 16
+  %gepload1620 = load float, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1620), align 4
+  %19 = fmul reassoc ninf nsz arcp contract afn float %gepload1620, %gepload1280
+  store float %19, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 3000), align 8
+  %20 = fmul reassoc ninf nsz arcp contract afn float %gepload1620, %gepload1284
+  store float %20, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 3004), align 4
+  %21 = fmul reassoc ninf nsz arcp contract afn float %gepload1620, %gepload1288
+  store float %21, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 3008), align 32
+  %22 = fmul reassoc ninf nsz arcp contract afn float %gepload1620, %gepload1292
+  store float %22, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 3012), align 4
+  %23 = fmul reassoc ninf nsz arcp contract afn float %gepload1620, %gepload1296
+  store float %23, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 3016), align 8
+  %24 = fmul reassoc ninf nsz arcp contract afn float %gepload1620, %gepload1300
+  store float %24, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 3020), align 4
+  %25 = fmul reassoc ninf nsz arcp contract afn float %gepload1620, %gepload1304
+  store float %25, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 3024), align 16
+  %26 = fmul reassoc ninf nsz arcp contract afn float %gepload1620, %gepload1308
+  store float %26, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 3028), align 4
+  %27 = fmul reassoc ninf nsz arcp contract afn float %gepload1620, %gepload1312
+  store float %27, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 3032), align 8
+  %28 = fmul reassoc ninf nsz arcp contract afn float %gepload1620, %gepload1316
+  store float %28, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 3036), align 4
+  %gepload1320 = load float, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1320), align 16
+  %gepload1324 = load float, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1324), align 16
+  %gepload1328 = load float, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1328), align 16
+  %gepload1332 = load float, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1332), align 16
+  %gepload1624 = load float, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 1624), align 8
+  %29 = fmul reassoc ninf nsz arcp contract afn float %gepload1624, %gepload1320
+  store float %29, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 3040), align 32
+  %30 = fmul reassoc ninf nsz arcp contract afn float %gepload1624, %gepload1324
+  store float %30, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 3044), align 4
+  %31 = fmul reassoc ninf nsz arcp contract afn float %gepload1624, %gepload1328
+  store float %31, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 3048), align 8
+  %32 = fmul reassoc ninf nsz arcp contract afn float %gepload1624, %gepload1332
+  store float %32, ptr getelementptr ([16000 x i8], ptr @GLOB, i64 0, i64 3052), align 4
+  ret void
+}

diff  --git a/llvm/test/Transforms/SLPVectorizer/shuffle-multivector.ll b/llvm/test/Transforms/SLPVectorizer/shuffle-multivector.ll
index 2253c70dc25015..0d7b26e0299e63 100644
--- a/llvm/test/Transforms/SLPVectorizer/shuffle-multivector.ll
+++ b/llvm/test/Transforms/SLPVectorizer/shuffle-multivector.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: %if x86-registered-target %{ opt -passes=slp-vectorizer -S < %s -mtriple=x86_64-unknown-linux -slp-threshold=-163 | FileCheck %s %}
-; RUN: %if aarch64-registered-target %{ opt -passes=slp-vectorizer -S < %s -mtriple=aarch64-unknown-linux -slp-threshold=-163 | FileCheck %s %}
+; RUN: %if x86-registered-target %{ opt -passes=slp-vectorizer -S < %s -mtriple=x86_64-unknown-linux -slp-threshold=-165 | FileCheck %s %}
+; RUN: %if aarch64-registered-target %{ opt -passes=slp-vectorizer -S < %s -mtriple=aarch64-unknown-linux -slp-threshold=-165 | FileCheck %s %}
 
 define void @test1(i128 %p0, i128 %p1, i128 %p2, i128 %p3, <4 x i128> %vec) {
 ; CHECK-LABEL: @test1(


        


More information about the llvm-commits mailing list