[llvm] aa00fb0 - [LV] Use umax(VF * UF, MinProfTC) for scalable vectors.

Florian Hahn via llvm-commits llvm-commits at lists.llvm.org
Fri Jul 15 10:23:32 PDT 2022


Author: Florian Hahn
Date: 2022-07-15T10:23:14-07:00
New Revision: aa00fb02c98ad702b3b5cd4dc95025ebbe70acb3

URL: https://github.com/llvm/llvm-project/commit/aa00fb02c98ad702b3b5cd4dc95025ebbe70acb3
DIFF: https://github.com/llvm/llvm-project/commit/aa00fb02c98ad702b3b5cd4dc95025ebbe70acb3.diff

LOG: [LV] Use umax(VF * UF, MinProfTC) for scalable vectors.

For scalable vectors, it is not sufficient to only check
MinProfitableTripCount if it is >= VF.getKnownMinValue() * UF, because
this property may not holder for larger values of vscale. In those
cases, compute umax(VF * UF, MinProfTC) instead.

This should fix
https://lab.llvm.org/buildbot/#/builders/197/builds/2262

Added: 
    

Modified: 
    llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
    llvm/test/Transforms/LoopVectorize/AArch64/sve-runtime-check-size-based-threshold.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index d897b247a41e3..b5a96de1afe36 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -2947,11 +2947,17 @@ void InnerLoopVectorizer::emitIterationCountCheck(BasicBlock *Bypass) {
   // If tail is to be folded, vector loop takes care of all iterations.
   Type *CountTy = Count->getType();
   Value *CheckMinIters = Builder.getFalse();
-  auto CreateStep = [&]() {
+  auto CreateStep = [&]() -> Value * {
     // Create step with max(MinProTripCount, UF * VF).
-    if (UF * VF.getKnownMinValue() < MinProfitableTripCount.getKnownMinValue())
-      return createStepForVF(Builder, CountTy, MinProfitableTripCount, 1);
-    return createStepForVF(Builder, CountTy, VF, UF);
+    if (UF * VF.getKnownMinValue() >= MinProfitableTripCount.getKnownMinValue())
+      return createStepForVF(Builder, CountTy, VF, UF);
+
+    Value *MinProfTC =
+        createStepForVF(Builder, CountTy, MinProfitableTripCount, 1);
+    if (!VF.isScalable())
+      return MinProfTC;
+    return Builder.CreateBinaryIntrinsic(
+        Intrinsic::umax, MinProfTC, createStepForVF(Builder, CountTy, VF, UF));
   };
 
   if (!Cost->foldTailByMasking())

diff  --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-runtime-check-size-based-threshold.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-runtime-check-size-based-threshold.ll
index bb573f75723fd..401a44045d164 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-runtime-check-size-based-threshold.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-runtime-check-size-based-threshold.ll
@@ -14,92 +14,95 @@ define void @min_trip_count_due_to_runtime_checks_1(ptr %dst.1, ptr %dst.2, ptr
 ; CHECK-NEXT:    [[DST_12:%.*]] = ptrtoint ptr [[DST_1:%.*]] to i64
 ; CHECK-NEXT:    [[DST_21:%.*]] = ptrtoint ptr [[DST_2:%.*]] to i64
 ; CHECK-NEXT:    [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[N:%.*]], i64 1)
-; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[UMAX]], 28
+; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT:    [[TMP1:%.*]] = mul i64 [[TMP0]], 4
+; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 28, i64 [[TMP1]])
+; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[UMAX]], [[TMP2]]
 ; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
 ; CHECK:       vector.memcheck:
-; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT:    [[TMP1:%.*]] = mul i64 [[TMP0]], 2
-; CHECK-NEXT:    [[TMP2:%.*]] = mul i64 [[TMP1]], 16
-; CHECK-NEXT:    [[TMP3:%.*]] = sub i64 [[DST_21]], [[DST_12]]
-; CHECK-NEXT:    [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP3]], [[TMP2]]
-; CHECK-NEXT:    [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT:    [[TMP5:%.*]] = mul i64 [[TMP4]], 2
-; CHECK-NEXT:    [[TMP6:%.*]] = mul i64 [[TMP5]], 16
-; CHECK-NEXT:    [[TMP7:%.*]] = sub i64 [[DST_12]], [[SRC_13]]
-; CHECK-NEXT:    [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP7]], [[TMP6]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT:    [[TMP4:%.*]] = mul i64 [[TMP3]], 2
+; CHECK-NEXT:    [[TMP5:%.*]] = mul i64 [[TMP4]], 16
+; CHECK-NEXT:    [[TMP6:%.*]] = sub i64 [[DST_21]], [[DST_12]]
+; CHECK-NEXT:    [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]]
+; CHECK-NEXT:    [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT:    [[TMP8:%.*]] = mul i64 [[TMP7]], 2
+; CHECK-NEXT:    [[TMP9:%.*]] = mul i64 [[TMP8]], 16
+; CHECK-NEXT:    [[TMP10:%.*]] = sub i64 [[DST_12]], [[SRC_13]]
+; CHECK-NEXT:    [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP10]], [[TMP9]]
 ; CHECK-NEXT:    [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]]
-; CHECK-NEXT:    [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT:    [[TMP9:%.*]] = mul i64 [[TMP8]], 2
-; CHECK-NEXT:    [[TMP10:%.*]] = mul i64 [[TMP9]], 16
-; CHECK-NEXT:    [[TMP11:%.*]] = sub i64 [[DST_12]], [[SRC_25]]
-; CHECK-NEXT:    [[DIFF_CHECK6:%.*]] = icmp ult i64 [[TMP11]], [[TMP10]]
+; CHECK-NEXT:    [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT:    [[TMP12:%.*]] = mul i64 [[TMP11]], 2
+; CHECK-NEXT:    [[TMP13:%.*]] = mul i64 [[TMP12]], 16
+; CHECK-NEXT:    [[TMP14:%.*]] = sub i64 [[DST_12]], [[SRC_25]]
+; CHECK-NEXT:    [[DIFF_CHECK6:%.*]] = icmp ult i64 [[TMP14]], [[TMP13]]
 ; CHECK-NEXT:    [[CONFLICT_RDX7:%.*]] = or i1 [[CONFLICT_RDX]], [[DIFF_CHECK6]]
-; CHECK-NEXT:    [[TMP12:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT:    [[TMP13:%.*]] = mul i64 [[TMP12]], 2
-; CHECK-NEXT:    [[TMP14:%.*]] = mul i64 [[TMP13]], 16
-; CHECK-NEXT:    [[TMP15:%.*]] = sub i64 [[DST_21]], [[SRC_13]]
-; CHECK-NEXT:    [[DIFF_CHECK8:%.*]] = icmp ult i64 [[TMP15]], [[TMP14]]
+; CHECK-NEXT:    [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT:    [[TMP16:%.*]] = mul i64 [[TMP15]], 2
+; CHECK-NEXT:    [[TMP17:%.*]] = mul i64 [[TMP16]], 16
+; CHECK-NEXT:    [[TMP18:%.*]] = sub i64 [[DST_21]], [[SRC_13]]
+; CHECK-NEXT:    [[DIFF_CHECK8:%.*]] = icmp ult i64 [[TMP18]], [[TMP17]]
 ; CHECK-NEXT:    [[CONFLICT_RDX9:%.*]] = or i1 [[CONFLICT_RDX7]], [[DIFF_CHECK8]]
-; CHECK-NEXT:    [[TMP16:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT:    [[TMP17:%.*]] = mul i64 [[TMP16]], 2
-; CHECK-NEXT:    [[TMP18:%.*]] = mul i64 [[TMP17]], 16
-; CHECK-NEXT:    [[TMP19:%.*]] = sub i64 [[DST_21]], [[SRC_25]]
-; CHECK-NEXT:    [[DIFF_CHECK10:%.*]] = icmp ult i64 [[TMP19]], [[TMP18]]
+; CHECK-NEXT:    [[TMP19:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT:    [[TMP20:%.*]] = mul i64 [[TMP19]], 2
+; CHECK-NEXT:    [[TMP21:%.*]] = mul i64 [[TMP20]], 16
+; CHECK-NEXT:    [[TMP22:%.*]] = sub i64 [[DST_21]], [[SRC_25]]
+; CHECK-NEXT:    [[DIFF_CHECK10:%.*]] = icmp ult i64 [[TMP22]], [[TMP21]]
 ; CHECK-NEXT:    [[CONFLICT_RDX11:%.*]] = or i1 [[CONFLICT_RDX9]], [[DIFF_CHECK10]]
 ; CHECK-NEXT:    br i1 [[CONFLICT_RDX11]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
 ; CHECK:       vector.ph:
-; CHECK-NEXT:    [[TMP20:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT:    [[TMP21:%.*]] = mul i64 [[TMP20]], 4
-; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[UMAX]], [[TMP21]]
+; CHECK-NEXT:    [[TMP23:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT:    [[TMP24:%.*]] = mul i64 [[TMP23]], 4
+; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[UMAX]], [[TMP24]]
 ; CHECK-NEXT:    [[N_VEC:%.*]] = sub i64 [[UMAX]], [[N_MOD_VF]]
 ; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
 ; CHECK:       vector.body:
 ; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT:    [[TMP22:%.*]] = add i64 [[INDEX]], 0
-; CHECK-NEXT:    [[TMP23:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT:    [[TMP24:%.*]] = mul i64 [[TMP23]], 2
-; CHECK-NEXT:    [[TMP25:%.*]] = add i64 [[TMP24]], 0
-; CHECK-NEXT:    [[TMP26:%.*]] = mul i64 [[TMP25]], 1
-; CHECK-NEXT:    [[TMP27:%.*]] = add i64 [[INDEX]], [[TMP26]]
-; CHECK-NEXT:    [[TMP28:%.*]] = getelementptr i64, ptr [[SRC_1]], i64 [[TMP22]]
-; CHECK-NEXT:    [[TMP29:%.*]] = getelementptr i64, ptr [[SRC_1]], i64 [[TMP27]]
-; CHECK-NEXT:    [[TMP30:%.*]] = getelementptr i64, ptr [[SRC_2]], i64 [[TMP22]]
-; CHECK-NEXT:    [[TMP31:%.*]] = getelementptr i64, ptr [[SRC_2]], i64 [[TMP27]]
-; CHECK-NEXT:    [[TMP32:%.*]] = getelementptr i64, ptr [[TMP28]], i32 0
-; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP32]], align 4
-; CHECK-NEXT:    [[TMP33:%.*]] = call i32 @llvm.vscale.i32()
-; CHECK-NEXT:    [[TMP34:%.*]] = mul i32 [[TMP33]], 2
-; CHECK-NEXT:    [[TMP35:%.*]] = getelementptr i64, ptr [[TMP28]], i32 [[TMP34]]
-; CHECK-NEXT:    [[WIDE_LOAD12:%.*]] = load <vscale x 2 x i64>, ptr [[TMP35]], align 4
-; CHECK-NEXT:    [[TMP36:%.*]] = getelementptr i64, ptr [[TMP30]], i32 0
-; CHECK-NEXT:    [[WIDE_LOAD13:%.*]] = load <vscale x 2 x i64>, ptr [[TMP36]], align 4
-; CHECK-NEXT:    [[TMP37:%.*]] = call i32 @llvm.vscale.i32()
-; CHECK-NEXT:    [[TMP38:%.*]] = mul i32 [[TMP37]], 2
-; CHECK-NEXT:    [[TMP39:%.*]] = getelementptr i64, ptr [[TMP30]], i32 [[TMP38]]
-; CHECK-NEXT:    [[WIDE_LOAD14:%.*]] = load <vscale x 2 x i64>, ptr [[TMP39]], align 4
-; CHECK-NEXT:    [[TMP40:%.*]] = add <vscale x 2 x i64> [[WIDE_LOAD]], [[WIDE_LOAD13]]
-; CHECK-NEXT:    [[TMP41:%.*]] = add <vscale x 2 x i64> [[WIDE_LOAD12]], [[WIDE_LOAD14]]
-; CHECK-NEXT:    [[TMP42:%.*]] = getelementptr i64, ptr [[DST_1]], i64 [[TMP22]]
-; CHECK-NEXT:    [[TMP43:%.*]] = getelementptr i64, ptr [[DST_1]], i64 [[TMP27]]
-; CHECK-NEXT:    [[TMP44:%.*]] = getelementptr i64, ptr [[DST_2]], i64 [[TMP22]]
-; CHECK-NEXT:    [[TMP45:%.*]] = getelementptr i64, ptr [[DST_2]], i64 [[TMP27]]
-; CHECK-NEXT:    [[TMP46:%.*]] = getelementptr i64, ptr [[TMP42]], i32 0
-; CHECK-NEXT:    store <vscale x 2 x i64> [[TMP40]], ptr [[TMP46]], align 4
-; CHECK-NEXT:    [[TMP47:%.*]] = call i32 @llvm.vscale.i32()
-; CHECK-NEXT:    [[TMP48:%.*]] = mul i32 [[TMP47]], 2
-; CHECK-NEXT:    [[TMP49:%.*]] = getelementptr i64, ptr [[TMP42]], i32 [[TMP48]]
-; CHECK-NEXT:    store <vscale x 2 x i64> [[TMP41]], ptr [[TMP49]], align 4
-; CHECK-NEXT:    [[TMP50:%.*]] = getelementptr i64, ptr [[TMP44]], i32 0
-; CHECK-NEXT:    store <vscale x 2 x i64> [[TMP40]], ptr [[TMP50]], align 4
-; CHECK-NEXT:    [[TMP51:%.*]] = call i32 @llvm.vscale.i32()
-; CHECK-NEXT:    [[TMP52:%.*]] = mul i32 [[TMP51]], 2
-; CHECK-NEXT:    [[TMP53:%.*]] = getelementptr i64, ptr [[TMP44]], i32 [[TMP52]]
-; CHECK-NEXT:    store <vscale x 2 x i64> [[TMP41]], ptr [[TMP53]], align 4
-; CHECK-NEXT:    [[TMP54:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT:    [[TMP55:%.*]] = mul i64 [[TMP54]], 4
-; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP55]]
-; CHECK-NEXT:    [[TMP56:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT:    br i1 [[TMP56]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT:    [[TMP25:%.*]] = add i64 [[INDEX]], 0
+; CHECK-NEXT:    [[TMP26:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT:    [[TMP27:%.*]] = mul i64 [[TMP26]], 2
+; CHECK-NEXT:    [[TMP28:%.*]] = add i64 [[TMP27]], 0
+; CHECK-NEXT:    [[TMP29:%.*]] = mul i64 [[TMP28]], 1
+; CHECK-NEXT:    [[TMP30:%.*]] = add i64 [[INDEX]], [[TMP29]]
+; CHECK-NEXT:    [[TMP31:%.*]] = getelementptr i64, ptr [[SRC_1]], i64 [[TMP25]]
+; CHECK-NEXT:    [[TMP32:%.*]] = getelementptr i64, ptr [[SRC_1]], i64 [[TMP30]]
+; CHECK-NEXT:    [[TMP33:%.*]] = getelementptr i64, ptr [[SRC_2]], i64 [[TMP25]]
+; CHECK-NEXT:    [[TMP34:%.*]] = getelementptr i64, ptr [[SRC_2]], i64 [[TMP30]]
+; CHECK-NEXT:    [[TMP35:%.*]] = getelementptr i64, ptr [[TMP31]], i32 0
+; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP35]], align 4
+; CHECK-NEXT:    [[TMP36:%.*]] = call i32 @llvm.vscale.i32()
+; CHECK-NEXT:    [[TMP37:%.*]] = mul i32 [[TMP36]], 2
+; CHECK-NEXT:    [[TMP38:%.*]] = getelementptr i64, ptr [[TMP31]], i32 [[TMP37]]
+; CHECK-NEXT:    [[WIDE_LOAD12:%.*]] = load <vscale x 2 x i64>, ptr [[TMP38]], align 4
+; CHECK-NEXT:    [[TMP39:%.*]] = getelementptr i64, ptr [[TMP33]], i32 0
+; CHECK-NEXT:    [[WIDE_LOAD13:%.*]] = load <vscale x 2 x i64>, ptr [[TMP39]], align 4
+; CHECK-NEXT:    [[TMP40:%.*]] = call i32 @llvm.vscale.i32()
+; CHECK-NEXT:    [[TMP41:%.*]] = mul i32 [[TMP40]], 2
+; CHECK-NEXT:    [[TMP42:%.*]] = getelementptr i64, ptr [[TMP33]], i32 [[TMP41]]
+; CHECK-NEXT:    [[WIDE_LOAD14:%.*]] = load <vscale x 2 x i64>, ptr [[TMP42]], align 4
+; CHECK-NEXT:    [[TMP43:%.*]] = add <vscale x 2 x i64> [[WIDE_LOAD]], [[WIDE_LOAD13]]
+; CHECK-NEXT:    [[TMP44:%.*]] = add <vscale x 2 x i64> [[WIDE_LOAD12]], [[WIDE_LOAD14]]
+; CHECK-NEXT:    [[TMP45:%.*]] = getelementptr i64, ptr [[DST_1]], i64 [[TMP25]]
+; CHECK-NEXT:    [[TMP46:%.*]] = getelementptr i64, ptr [[DST_1]], i64 [[TMP30]]
+; CHECK-NEXT:    [[TMP47:%.*]] = getelementptr i64, ptr [[DST_2]], i64 [[TMP25]]
+; CHECK-NEXT:    [[TMP48:%.*]] = getelementptr i64, ptr [[DST_2]], i64 [[TMP30]]
+; CHECK-NEXT:    [[TMP49:%.*]] = getelementptr i64, ptr [[TMP45]], i32 0
+; CHECK-NEXT:    store <vscale x 2 x i64> [[TMP43]], ptr [[TMP49]], align 4
+; CHECK-NEXT:    [[TMP50:%.*]] = call i32 @llvm.vscale.i32()
+; CHECK-NEXT:    [[TMP51:%.*]] = mul i32 [[TMP50]], 2
+; CHECK-NEXT:    [[TMP52:%.*]] = getelementptr i64, ptr [[TMP45]], i32 [[TMP51]]
+; CHECK-NEXT:    store <vscale x 2 x i64> [[TMP44]], ptr [[TMP52]], align 4
+; CHECK-NEXT:    [[TMP53:%.*]] = getelementptr i64, ptr [[TMP47]], i32 0
+; CHECK-NEXT:    store <vscale x 2 x i64> [[TMP43]], ptr [[TMP53]], align 4
+; CHECK-NEXT:    [[TMP54:%.*]] = call i32 @llvm.vscale.i32()
+; CHECK-NEXT:    [[TMP55:%.*]] = mul i32 [[TMP54]], 2
+; CHECK-NEXT:    [[TMP56:%.*]] = getelementptr i64, ptr [[TMP47]], i32 [[TMP55]]
+; CHECK-NEXT:    store <vscale x 2 x i64> [[TMP44]], ptr [[TMP56]], align 4
+; CHECK-NEXT:    [[TMP57:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT:    [[TMP58:%.*]] = mul i64 [[TMP57]], 4
+; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP58]]
+; CHECK-NEXT:    [[TMP59:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT:    br i1 [[TMP59]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
 ; CHECK:       middle.block:
 ; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[UMAX]], [[N_VEC]]
 ; CHECK-NEXT:    br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]


        


More information about the llvm-commits mailing list