[llvm-branch-commits] [llvm] d8af310 - [LV] Add missed optimization fold-tail test
Gil Rapaport via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Sat Jan 2 04:05:55 PST 2021
Author: Gil Rapaport
Date: 2021-01-02T14:00:15+02:00
New Revision: d8af31006351c9f441d73d4b6c5ea6d109f3d4f1
URL: https://github.com/llvm/llvm-project/commit/d8af31006351c9f441d73d4b6c5ea6d109f3d4f1
DIFF: https://github.com/llvm/llvm-project/commit/d8af31006351c9f441d73d4b6c5ea6d109f3d4f1.diff
LOG: [LV] Add missed optimization fold-tail test
The loop vectorizer avoids folding the tail for loop's whose trip-count is
known to SCEV to be divisible by VF. In this case the assumption providing this
information is not taken into account, so the tail is needlessly folded.
Added:
llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-assumed-divisible-TC.ll
Modified:
Removed:
################################################################################
diff --git a/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-assumed-divisible-TC.ll b/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-assumed-divisible-TC.ll
new file mode 100644
index 000000000000..e0a14db9a6d1
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-assumed-divisible-TC.ll
@@ -0,0 +1,99 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -loop-vectorize -force-vector-width=4 -S | FileCheck %s
+
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+
+; TODO: Make sure the loop is vectorized under -Os without folding its tail based on
+; its trip-count's lower bits assumed to be zero.
+
+define dso_local void @assumeAlignedTC(i32* noalias nocapture %A, i32* %p) optsize {
+; CHECK-LABEL: @assumeAlignedTC(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[N:%.*]] = load i32, i32* [[P:%.*]], align 4
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[N]], 3
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK: vector.ph:
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i32 [[N]], 3
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N_RND_UP]], 4
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[N_RND_UP]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i32 [[N]], 1
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TRIP_COUNT_MINUS_1]], i32 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
+; CHECK: vector.body:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE6:%.*]] ]
+; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[PRED_STORE_CONTINUE6]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = icmp ule <4 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i1> [[TMP0]], i32 0
+; CHECK-NEXT: br i1 [[TMP1]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
+; CHECK: pred.store.if:
+; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[INDEX]], 0
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i32 [[TMP2]]
+; CHECK-NEXT: store i32 13, i32* [[TMP3]], align 1
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE]]
+; CHECK: pred.store.continue:
+; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i1> [[TMP0]], i32 1
+; CHECK-NEXT: br i1 [[TMP4]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2:%.*]]
+; CHECK: pred.store.if1:
+; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[INDEX]], 1
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[TMP5]]
+; CHECK-NEXT: store i32 13, i32* [[TMP6]], align 1
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE2]]
+; CHECK: pred.store.continue2:
+; CHECK-NEXT: [[TMP7:%.*]] = extractelement <4 x i1> [[TMP0]], i32 2
+; CHECK-NEXT: br i1 [[TMP7]], label [[PRED_STORE_IF3:%.*]], label [[PRED_STORE_CONTINUE4:%.*]]
+; CHECK: pred.store.if3:
+; CHECK-NEXT: [[TMP8:%.*]] = add i32 [[INDEX]], 2
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[TMP8]]
+; CHECK-NEXT: store i32 13, i32* [[TMP9]], align 1
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE4]]
+; CHECK: pred.store.continue4:
+; CHECK-NEXT: [[TMP10:%.*]] = extractelement <4 x i1> [[TMP0]], i32 3
+; CHECK-NEXT: br i1 [[TMP10]], label [[PRED_STORE_IF5:%.*]], label [[PRED_STORE_CONTINUE6]]
+; CHECK: pred.store.if5:
+; CHECK-NEXT: [[TMP11:%.*]] = add i32 [[INDEX]], 3
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[TMP11]]
+; CHECK-NEXT: store i32 13, i32* [[TMP12]], align 1
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE6]]
+; CHECK: pred.store.continue6:
+; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], <i32 4, i32 4, i32 4, i32 4>
+; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], [[LOOP0:!llvm.loop !.*]]
+; CHECK: middle.block:
+; CHECK-NEXT: br i1 true, label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK: scalar.ph:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK: loop:
+; CHECK-NEXT: [[RIV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[RIVPLUS1:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[RIV]]
+; CHECK-NEXT: store i32 13, i32* [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[RIVPLUS1]] = add nuw nsw i32 [[RIV]], 1
+; CHECK-NEXT: [[COND:%.*]] = icmp eq i32 [[RIVPLUS1]], [[N]]
+; CHECK-NEXT: br i1 [[COND]], label [[EXIT]], label [[LOOP]], [[LOOP2:!llvm.loop !.*]]
+; CHECK: exit:
+; CHECK-NEXT: ret void
+;
+entry:
+ %n = load i32, i32* %p
+ %and = and i32 %n, 3
+ %cmp = icmp eq i32 %and, 0
+ tail call void @llvm.assume(i1 %cmp)
+ br label %loop
+
+loop:
+ %riv = phi i32 [ 0, %entry ], [ %rivPlus1, %loop ]
+ %arrayidx = getelementptr inbounds i32, i32* %A, i32 %riv
+ store i32 13, i32* %arrayidx, align 1
+ %rivPlus1 = add nuw nsw i32 %riv, 1
+ %cond = icmp eq i32 %rivPlus1, %n
+ br i1 %cond, label %exit, label %loop
+
+exit:
+ ret void
+}
+
+declare void @llvm.assume(i1 noundef) nofree nosync nounwind willreturn
More information about the llvm-branch-commits
mailing list