[llvm] 10a6fd7 - [LV] Regenerate checks for test (NFC).
Florian Hahn via llvm-commits
llvm-commits at lists.llvm.org
Wed Aug 13 04:28:50 PDT 2025
Author: Florian Hahn
Date: 2025-08-13T12:20:50+01:00
New Revision: 10a6fd70d66a0b59c1ed683c9a5dbf861beea3d5
URL: https://github.com/llvm/llvm-project/commit/10a6fd70d66a0b59c1ed683c9a5dbf861beea3d5
DIFF: https://github.com/llvm/llvm-project/commit/10a6fd70d66a0b59c1ed683c9a5dbf861beea3d5.diff
LOG: [LV] Regenerate checks for test (NFC).
Auto-generate check lines for scalable-loop-unpredicated-body-scalar-tail.ll,
while also updating the input to be more compact and avoid unnecessary
checks to keep auto-generated checks compact without loss of
generality.
Added:
Modified:
llvm/test/Transforms/LoopVectorize/scalable-loop-unpredicated-body-scalar-tail.ll
Removed:
################################################################################
diff --git a/llvm/test/Transforms/LoopVectorize/scalable-loop-unpredicated-body-scalar-tail.ll b/llvm/test/Transforms/LoopVectorize/scalable-loop-unpredicated-body-scalar-tail.ll
index 901f228c6b67d..ee6da8f528dd2 100644
--- a/llvm/test/Transforms/LoopVectorize/scalable-loop-unpredicated-body-scalar-tail.ll
+++ b/llvm/test/Transforms/LoopVectorize/scalable-loop-unpredicated-body-scalar-tail.ll
@@ -1,87 +1,91 @@
-; RUN: opt -S -passes=loop-vectorize,instcombine -force-vector-interleave=1 -force-vector-width=4 -force-target-supports-scalable-vectors=true -scalable-vectorization=on < %s | FileCheck %s --check-prefix=CHECKUF1
-; RUN: opt -S -passes=loop-vectorize,instcombine -force-vector-interleave=2 -force-vector-width=4 -force-target-supports-scalable-vectors=true -scalable-vectorization=on < %s | FileCheck %s --check-prefix=CHECKUF2
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "scalar.ph\:" --version 5
+; RUN: opt -S -passes=loop-vectorize -force-vector-interleave=1 -force-vector-width=4 -force-target-supports-scalable-vectors=true -scalable-vectorization=on < %s | FileCheck %s --check-prefix=CHECKUF1
+; RUN: opt -S -passes=loop-vectorize -force-vector-interleave=2 -force-vector-width=4 -force-target-supports-scalable-vectors=true -scalable-vectorization=on < %s | FileCheck %s --check-prefix=CHECKUF2
-; CHECKUF1: for.body.preheader:
-; CHECKUF1-DAG: %wide.trip.count = zext nneg i32 %N to i64
-; CHECKUF1-DAG: %[[VSCALE:.*]] = call i64 @llvm.vscale.i64()
-; CHECKUF1-DAG: %[[VSCALEX4:.*]] = shl nuw i64 %[[VSCALE]], 2
-; CHECKUF1-DAG: %min.iters.check = icmp ugt i64 %[[VSCALEX4]], %wide.trip.count
-
-; CHECKUF1: vector.ph:
-; CHECKUF1-DAG: %[[VSCALE:.*]] = call i64 @llvm.vscale.i64()
-; CHECKUF1-DAG: %[[VSCALEX4:.*]] = shl nuw i64 %[[VSCALE]], 2
-; CHECKUF1-DAG: %n.mod.vf = urem i64 %wide.trip.count, %[[VSCALEX4]]
-; CHECKUF1: %n.vec = sub nsw i64 %wide.trip.count, %n.mod.vf
-
-; CHECKUF1: vector.body:
-; CHECKUF1: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-; CHECKUF1: %[[IDXB:.*]] = getelementptr inbounds double, ptr %b, i64 %index
-; CHECKUF1: %wide.load = load <vscale x 4 x double>, ptr %[[IDXB]], align 8
-; CHECKUF1: %[[FADD:.*]] = fadd <vscale x 4 x double> %wide.load, splat (double 1.000000e+00)
-; CHECKUF1: %[[IDXA:.*]] = getelementptr inbounds double, ptr %a, i64 %index
-; CHECKUF1: store <vscale x 4 x double> %[[FADD]], ptr %[[IDXA]], align 8
-; CHECKUF1: %index.next = add nuw i64 %index, %[[VSCALEX4]]
-; CHECKUF1: %[[CMP:.*]] = icmp eq i64 %index.next, %n.vec
-; CHECKUF1: br i1 %[[CMP]], label %middle.block, label %vector.body, !llvm.loop !0
-
-
-; For an interleave factor of 2, vscale is scaled by 8 instead of 4 (and thus shifted left by 3 instead of 2).
+; For an interleave factor of 2, vscale is scaled by 8 instead of 4.
; There is also the increment for the next iteration, e.g. instead of indexing IDXB, it indexes at IDXB + vscale * 4.
-
-; CHECKUF2: for.body.preheader:
-; CHECKUF2-DAG: %wide.trip.count = zext nneg i32 %N to i64
-; CHECKUF2-DAG: %[[VSCALE:.*]] = call i64 @llvm.vscale.i64()
-; CHECKUF2-DAG: %[[VSCALEX8:.*]] = shl nuw i64 %[[VSCALE]], 3
-; CHECKUF2-DAG: %min.iters.check = icmp ugt i64 %[[VSCALEX8]], %wide.trip.count
-
-; CHECKUF2: vector.ph:
-; CHECKUF2-DAG: %[[VSCALE:.*]] = call i64 @llvm.vscale.i64()
-; CHECKUF2-DAG: %[[VSCALEX8:.*]] = shl nuw i64 %[[VSCALE]], 3
-; CHECKUF2-DAG: %n.mod.vf = urem i64 %wide.trip.count, %[[VSCALEX8]]
-; CHECKUF2: %n.vec = sub nsw i64 %wide.trip.count, %n.mod.vf
-
-; CHECKUF2: vector.body:
-; CHECKUF2: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-; CHECKUF2: %[[IDXB:.*]] = getelementptr inbounds double, ptr %b, i64 %index
-; CHECKUF2: %[[VSCALE:.*]] = call i64 @llvm.vscale.i64()
-; CHECKUF2: %[[VSCALE2:.*]] = shl i64 %[[VSCALE]], 5
-; CHECKUF2: %[[IDXB_NEXT:.*]] = getelementptr inbounds i8, ptr %[[IDXB]], i64 %[[VSCALE2]]
-; CHECKUF2: %wide.load = load <vscale x 4 x double>, ptr %[[IDXB]], align 8
-; CHECKUF2: %wide.load{{[0-9]+}} = load <vscale x 4 x double>, ptr %[[IDXB_NEXT]], align 8
-; CHECKUF2: %[[FADD:.*]] = fadd <vscale x 4 x double> %wide.load, splat (double 1.000000e+00)
-; CHECKUF2: %[[FADD_NEXT:.*]] = fadd <vscale x 4 x double> %wide.load{{[0-9]+}}, splat (double 1.000000e+00)
-; CHECKUF2: %[[IDXA:.*]] = getelementptr inbounds double, ptr %a, i64 %index
-; CHECKUF2: %[[VSCALE:.*]] = call i64 @llvm.vscale.i64()
-; CHECKUF2: %[[VSCALE2:.*]] = shl i64 %[[VSCALE]], 5
-; CHECKUF2: %[[IDXA_NEXT:.*]] = getelementptr inbounds i8, ptr %[[IDXA]], i64 %[[VSCALE2]]
-; CHECKUF2: store <vscale x 4 x double> %[[FADD]], ptr %[[IDXA]], align 8
-; CHECKUF2: store <vscale x 4 x double> %[[FADD_NEXT]], ptr %[[IDXA_NEXT]], align 8
-; CHECKUF2: %index.next = add nuw i64 %index, %[[VSCALEX8]]
-; CHECKUF2: %[[CMP:.*]] = icmp eq i64 %index.next, %n.vec
-; CHECKUF2: br i1 %[[CMP]], label %middle.block, label %vector.body, !llvm.loop !0
-
-define void @loop(i32 %N, ptr nocapture %a, ptr nocapture readonly %b) {
+define void @loop(i64 %N, ptr noalias %a, ptr noalias %b) {
+; CHECKUF1-LABEL: define void @loop(
+; CHECKUF1-SAME: i64 [[N:%.*]], ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) {
+; CHECKUF1-NEXT: [[ENTRY:.*:]]
+; CHECKUF1-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECKUF1-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; CHECKUF1-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECKUF1-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECKUF1: [[VECTOR_PH]]:
+; CHECKUF1-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
+; CHECKUF1-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4
+; CHECKUF1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]]
+; CHECKUF1-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECKUF1-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECKUF1: [[VECTOR_BODY]]:
+; CHECKUF1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECKUF1-NEXT: [[TMP7:%.*]] = getelementptr inbounds double, ptr [[B]], i64 [[INDEX]]
+; CHECKUF1-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x double>, ptr [[TMP7]], align 8
+; CHECKUF1-NEXT: [[TMP8:%.*]] = fadd <vscale x 4 x double> [[WIDE_LOAD]], splat (double 1.000000e+00)
+; CHECKUF1-NEXT: [[TMP9:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[INDEX]]
+; CHECKUF1-NEXT: store <vscale x 4 x double> [[TMP8]], ptr [[TMP9]], align 8
+; CHECKUF1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
+; CHECKUF1-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECKUF1-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECKUF1: [[MIDDLE_BLOCK]]:
+; CHECKUF1-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECKUF1-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]]
+; CHECKUF1: [[SCALAR_PH]]:
+;
+; CHECKUF2-LABEL: define void @loop(
+; CHECKUF2-SAME: i64 [[N:%.*]], ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) {
+; CHECKUF2-NEXT: [[ENTRY:.*:]]
+; CHECKUF2-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECKUF2-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
+; CHECKUF2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECKUF2-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECKUF2: [[VECTOR_PH]]:
+; CHECKUF2-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
+; CHECKUF2-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8
+; CHECKUF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]]
+; CHECKUF2-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECKUF2-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECKUF2: [[VECTOR_BODY]]:
+; CHECKUF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECKUF2-NEXT: [[TMP7:%.*]] = getelementptr inbounds double, ptr [[B]], i64 [[INDEX]]
+; CHECKUF2-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
+; CHECKUF2-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP8]], 4
+; CHECKUF2-NEXT: [[TMP9:%.*]] = getelementptr inbounds double, ptr [[TMP7]], i64 [[TMP16]]
+; CHECKUF2-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x double>, ptr [[TMP7]], align 8
+; CHECKUF2-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 4 x double>, ptr [[TMP9]], align 8
+; CHECKUF2-NEXT: [[TMP10:%.*]] = fadd <vscale x 4 x double> [[WIDE_LOAD]], splat (double 1.000000e+00)
+; CHECKUF2-NEXT: [[TMP11:%.*]] = fadd <vscale x 4 x double> [[WIDE_LOAD3]], splat (double 1.000000e+00)
+; CHECKUF2-NEXT: [[TMP12:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[INDEX]]
+; CHECKUF2-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
+; CHECKUF2-NEXT: [[TMP17:%.*]] = mul nuw i64 [[TMP13]], 4
+; CHECKUF2-NEXT: [[TMP14:%.*]] = getelementptr inbounds double, ptr [[TMP12]], i64 [[TMP17]]
+; CHECKUF2-NEXT: store <vscale x 4 x double> [[TMP10]], ptr [[TMP12]], align 8
+; CHECKUF2-NEXT: store <vscale x 4 x double> [[TMP11]], ptr [[TMP14]], align 8
+; CHECKUF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
+; CHECKUF2-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECKUF2-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECKUF2: [[MIDDLE_BLOCK]]:
+; CHECKUF2-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECKUF2-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]]
+; CHECKUF2: [[SCALAR_PH]]:
+;
entry:
- %cmp7 = icmp sgt i32 %N, 0
- br i1 %cmp7, label %for.body.preheader, label %for.cond.cleanup
-
-for.body.preheader: ; preds = %entry
- %wide.trip.count = zext i32 %N to i64
- br label %for.body
-
-for.cond.cleanup: ; preds = %for.body, %entry
- ret void
+ br label %loop
-for.body: ; preds = %for.body.preheader, %for.body
- %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds double, ptr %b, i64 %indvars.iv
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %arrayidx = getelementptr inbounds double, ptr %b, i64 %iv
%0 = load double, ptr %arrayidx, align 8
%add = fadd double %0, 1.000000e+00
- %arrayidx2 = getelementptr inbounds double, ptr %a, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds double, ptr %a, i64 %iv
store double %add, ptr %arrayidx2, align 8
- %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
- br i1 %exitcond.not, label %for.cond.cleanup, label %for.body, !llvm.loop !1
+ %iv.next = add nuw nsw i64 %iv, 1
+ %ec = icmp eq i64 %iv.next, %N
+ br i1 %ec, label %exit, label %loop, !llvm.loop !1
+
+exit:
+ ret void
}
!1 = distinct !{!1, !2}
More information about the llvm-commits
mailing list