[llvm] 65fc992 - [SLP]Early exit out of the reordering if shuffled/perfect diamond match found.

Alexey Bataev via llvm-commits llvm-commits at lists.llvm.org
Thu Dec 16 11:10:44 PST 2021


Author: Alexey Bataev
Date: 2021-12-16T11:09:49-08:00
New Revision: 65fc992579906886241043b104ed9e704c3e7486

URL: https://github.com/llvm/llvm-project/commit/65fc992579906886241043b104ed9e704c3e7486
DIFF: https://github.com/llvm/llvm-project/commit/65fc992579906886241043b104ed9e704c3e7486.diff

LOG: [SLP]Early exit out of the reordering if shuffled/perfect diamond match found.

Need to early exit out of the reordering process if the perfect/shuffled match is found in the operands. Such pattern will result in not profitable reordering because of (false positive) external use of scalars.

Differential Revision: https://reviews.llvm.org/D115811

Added: 
    

Modified: 
    llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
    llvm/test/Transforms/SLPVectorizer/X86/reorder_diamond_match.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index 11e8275df9103..b18ddba3088b4 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -1648,11 +1648,37 @@ class BoUpSLP {
           ReorderingModes[OpIdx] = ReorderingMode::Failed;
       }
 
+      // Check that we don't have same operands. No need to reorder if operands
+      // are just perfect diamond or shuffled diamond match. Do not do it only
+      // for possible broadcasts or non-power of 2 number of scalars (just for
+      // now).
+      auto &&SkipReordering = [this]() {
+        SmallPtrSet<Value *, 4> UniqueValues;
+        ArrayRef<OperandData> Op0 = OpsVec.front();
+        for (const OperandData &Data : Op0)
+          UniqueValues.insert(Data.V);
+        for (ArrayRef<OperandData> Op : drop_begin(OpsVec, 1)) {
+          if (any_of(Op, [&UniqueValues](const OperandData &Data) {
+                return !UniqueValues.contains(Data.V);
+              }))
+            return false;
+        }
+        // TODO: Check if we can remove a check for non-power-2 number of
+        // scalars after full support of non-power-2 vectorization.
+        return UniqueValues.size() != 2 && isPowerOf2_32(UniqueValues.size());
+      };
+
       // If the initial strategy fails for any of the operand indexes, then we
       // perform reordering again in a second pass. This helps avoid assigning
       // high priority to the failed strategy, and should improve reordering for
       // the non-failed operand indexes.
       for (int Pass = 0; Pass != 2; ++Pass) {
+        // Check if no need to reorder operands since they're are perfect or
+        // shuffled diamond match.
+        // Need to to do it to avoid extra external use cost counting for
+        // shuffled matches, which may cause regressions.
+        if (SkipReordering())
+          break;
         // Skip the second pass if the first pass did not fail.
         bool StrategyFailed = false;
         // Mark all operand data as free to use.

diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/reorder_diamond_match.ll b/llvm/test/Transforms/SLPVectorizer/X86/reorder_diamond_match.ll
index 42ac6cb0c9149..5b00b2e044a57 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/reorder_diamond_match.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/reorder_diamond_match.ll
@@ -4,45 +4,36 @@
 define void @test() {
 ; CHECK-LABEL: @test(
 ; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, i8* undef, i64 4
-; CHECK-NEXT:    [[TMP2:%.*]] = load i8, i8* [[TMP1]], align 1
-; CHECK-NEXT:    [[TMP3:%.*]] = zext i8 [[TMP2]] to i32
-; CHECK-NEXT:    [[TMP4:%.*]] = sub nsw i32 0, [[TMP3]]
-; CHECK-NEXT:    [[TMP5:%.*]] = shl nsw i32 [[TMP4]], 0
-; CHECK-NEXT:    [[TMP6:%.*]] = add nsw i32 [[TMP5]], 0
-; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds i8, i8* undef, i64 5
-; CHECK-NEXT:    [[TMP8:%.*]] = load i8, i8* [[TMP7]], align 1
-; CHECK-NEXT:    [[TMP9:%.*]] = zext i8 [[TMP8]] to i32
-; CHECK-NEXT:    [[TMP10:%.*]] = sub nsw i32 0, [[TMP9]]
-; CHECK-NEXT:    [[TMP11:%.*]] = shl nsw i32 [[TMP10]], 0
-; CHECK-NEXT:    [[TMP12:%.*]] = add nsw i32 [[TMP11]], 0
-; CHECK-NEXT:    [[TMP13:%.*]] = getelementptr inbounds i8, i8* undef, i64 6
-; CHECK-NEXT:    [[TMP14:%.*]] = load i8, i8* [[TMP13]], align 1
-; CHECK-NEXT:    [[TMP15:%.*]] = zext i8 [[TMP14]] to i32
-; CHECK-NEXT:    [[TMP16:%.*]] = sub nsw i32 0, [[TMP15]]
-; CHECK-NEXT:    [[TMP17:%.*]] = shl nsw i32 [[TMP16]], 0
-; CHECK-NEXT:    [[TMP18:%.*]] = add nsw i32 [[TMP17]], 0
-; CHECK-NEXT:    [[TMP19:%.*]] = getelementptr inbounds i8, i8* undef, i64 7
-; CHECK-NEXT:    [[TMP20:%.*]] = load i8, i8* [[TMP19]], align 1
-; CHECK-NEXT:    [[TMP21:%.*]] = zext i8 [[TMP20]] to i32
-; CHECK-NEXT:    [[TMP22:%.*]] = sub nsw i32 0, [[TMP21]]
-; CHECK-NEXT:    [[TMP23:%.*]] = shl nsw i32 [[TMP22]], 0
-; CHECK-NEXT:    [[TMP24:%.*]] = add nsw i32 [[TMP23]], 0
-; CHECK-NEXT:    [[TMP25:%.*]] = add nsw i32 [[TMP12]], [[TMP6]]
-; CHECK-NEXT:    [[TMP26:%.*]] = sub nsw i32 [[TMP6]], [[TMP12]]
-; CHECK-NEXT:    [[TMP27:%.*]] = add nsw i32 [[TMP24]], [[TMP18]]
-; CHECK-NEXT:    [[TMP28:%.*]] = sub nsw i32 [[TMP18]], [[TMP24]]
-; CHECK-NEXT:    [[TMP29:%.*]] = add nsw i32 0, [[TMP25]]
-; CHECK-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [4 x [4 x i32]], [4 x [4 x i32]]* undef, i64 0, i64 1, i64 0
-; CHECK-NEXT:    store i32 [[TMP29]], i32* [[TMP30]], align 16
-; CHECK-NEXT:    [[TMP31:%.*]] = sub nsw i32 0, [[TMP27]]
-; CHECK-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [4 x [4 x i32]], [4 x [4 x i32]]* undef, i64 0, i64 1, i64 2
-; CHECK-NEXT:    store i32 [[TMP31]], i32* [[TMP32]], align 8
-; CHECK-NEXT:    [[TMP33:%.*]] = add nsw i32 0, [[TMP26]]
-; CHECK-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [4 x [4 x i32]], [4 x [4 x i32]]* undef, i64 0, i64 1, i64 1
-; CHECK-NEXT:    store i32 [[TMP33]], i32* [[TMP34]], align 4
-; CHECK-NEXT:    [[TMP35:%.*]] = sub nsw i32 0, [[TMP28]]
-; CHECK-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [4 x [4 x i32]], [4 x [4 x i32]]* undef, i64 0, i64 1, i64 3
-; CHECK-NEXT:    store i32 [[TMP35]], i32* [[TMP36]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i8, i8* undef, i64 5
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i8, i8* undef, i64 6
+; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i8, i8* undef, i64 7
+; CHECK-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP1]] to <4 x i8>*
+; CHECK-NEXT:    [[TMP6:%.*]] = load <4 x i8>, <4 x i8>* [[TMP5]], align 1
+; CHECK-NEXT:    [[TMP7:%.*]] = zext <4 x i8> [[TMP6]] to <4 x i32>
+; CHECK-NEXT:    [[SHUFFLE:%.*]] = shufflevector <4 x i32> [[TMP7]], <4 x i32> poison, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+; CHECK-NEXT:    [[TMP8:%.*]] = sub nsw <4 x i32> zeroinitializer, [[SHUFFLE]]
+; CHECK-NEXT:    [[TMP9:%.*]] = shl nsw <4 x i32> [[TMP8]], zeroinitializer
+; CHECK-NEXT:    [[TMP10:%.*]] = add nsw <4 x i32> [[TMP9]], zeroinitializer
+; CHECK-NEXT:    [[TMP11:%.*]] = extractelement <4 x i32> [[TMP10]], i32 1
+; CHECK-NEXT:    [[TMP12:%.*]] = insertelement <4 x i32> poison, i32 [[TMP11]], i32 0
+; CHECK-NEXT:    [[TMP13:%.*]] = extractelement <4 x i32> [[TMP10]], i32 0
+; CHECK-NEXT:    [[TMP14:%.*]] = insertelement <4 x i32> [[TMP12]], i32 [[TMP13]], i32 1
+; CHECK-NEXT:    [[TMP15:%.*]] = extractelement <4 x i32> [[TMP10]], i32 3
+; CHECK-NEXT:    [[TMP16:%.*]] = insertelement <4 x i32> [[TMP14]], i32 [[TMP15]], i32 2
+; CHECK-NEXT:    [[TMP17:%.*]] = extractelement <4 x i32> [[TMP10]], i32 2
+; CHECK-NEXT:    [[TMP18:%.*]] = insertelement <4 x i32> [[TMP16]], i32 [[TMP17]], i32 3
+; CHECK-NEXT:    [[TMP19:%.*]] = add nsw <4 x i32> [[TMP10]], [[TMP18]]
+; CHECK-NEXT:    [[TMP20:%.*]] = sub nsw <4 x i32> [[TMP10]], [[TMP18]]
+; CHECK-NEXT:    [[TMP21:%.*]] = shufflevector <4 x i32> [[TMP19]], <4 x i32> [[TMP20]], <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+; CHECK-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [4 x [4 x i32]], [4 x [4 x i32]]* undef, i64 0, i64 1, i64 0
+; CHECK-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [4 x [4 x i32]], [4 x [4 x i32]]* undef, i64 0, i64 1, i64 2
+; CHECK-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [4 x [4 x i32]], [4 x [4 x i32]]* undef, i64 0, i64 1, i64 1
+; CHECK-NEXT:    [[TMP25:%.*]] = add nsw <4 x i32> zeroinitializer, [[TMP21]]
+; CHECK-NEXT:    [[TMP26:%.*]] = sub nsw <4 x i32> zeroinitializer, [[TMP21]]
+; CHECK-NEXT:    [[TMP27:%.*]] = shufflevector <4 x i32> [[TMP25]], <4 x i32> [[TMP26]], <4 x i32> <i32 0, i32 1, i32 6, i32 7>
+; CHECK-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [4 x [4 x i32]], [4 x [4 x i32]]* undef, i64 0, i64 1, i64 3
+; CHECK-NEXT:    [[TMP29:%.*]] = bitcast i32* [[TMP22]] to <4 x i32>*
+; CHECK-NEXT:    store <4 x i32> [[TMP27]], <4 x i32>* [[TMP29]], align 16
 ; CHECK-NEXT:    ret void
 ;
   %1 = getelementptr inbounds i8, i8* undef, i64 4


        


More information about the llvm-commits mailing list