[llvm-branch-commits] [llvm] [NFC][VPlan] Add initial tests for future VPlan-based stride MV (PR #182594)
via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Fri Feb 20 13:21:30 PST 2026
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-llvm-transforms
Author: Andrei Elovikov (eas)
<details>
<summary>Changes</summary>
I tried to include both the features that current
LoopAccessAnalysis-based transformation supports (e.g., trunc/sext of stride) but also cases where the current implementation behaves poorly, e.g., https://godbolt.org/z/h31c3zKxK; as well as some other potentially interesting scenarios I could imagine.
Stacked on top of https://github.com/llvm/llvm-project/pull/182593.
---
Patch is 201.47 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/182594.diff
2 Files Affected:
- (added) llvm/test/Transforms/LoopVectorize/VPlan/vplan-based-stride-mv.ll (+2282)
- (added) llvm/test/Transforms/LoopVectorize/vplan-based-stride-mv.ll (+2027)
``````````diff
diff --git a/llvm/test/Transforms/LoopVectorize/VPlan/vplan-based-stride-mv.ll b/llvm/test/Transforms/LoopVectorize/VPlan/vplan-based-stride-mv.ll
new file mode 100644
index 0000000000000..33eac6bc08b26
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/VPlan/vplan-based-stride-mv.ll
@@ -0,0 +1,2282 @@
+; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 6
+; RUN: opt < %s -p loop-vectorize -force-vector-width=4 -disable-output \
+; RUN: -vplan-print-after=scalarizeMemOpsWithIrregularTypes \
+; RUN: -enable-mem-access-versioning=false 2>&1 | FileCheck %s
+
+define void @basic(ptr noalias %p.out, ptr %p, i64 %stride) {
+; CHECK-LABEL: VPlan for loop in 'basic'
+; CHECK: VPlan ' for UF>=1' {
+; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF
+; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF
+; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count
+; CHECK-NEXT: Live-in ir<128> = original trip-count
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<entry>:
+; CHECK-NEXT: Successor(s): scalar.ph, vector.ph
+; CHECK-EMPTY:
+; CHECK-NEXT: vector.ph:
+; CHECK-NEXT: Successor(s): vector loop
+; CHECK-EMPTY:
+; CHECK-NEXT: <x1> vector loop: {
+; CHECK-NEXT: vector.body:
+; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next>
+; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]>
+; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1>
+; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride>
+; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx>
+; CHECK-NEXT: EMIT ir<%ld> = load ir<%gep.ld>
+; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv>
+; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st>
+; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128>
+; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond>
+; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]>
+; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]>
+; CHECK-NEXT: No successors
+; CHECK-NEXT: }
+; CHECK-NEXT: Successor(s): middle.block
+; CHECK-EMPTY:
+; CHECK-NEXT: middle.block:
+; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]>
+; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n>
+; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<exit>:
+; CHECK-NEXT: No successors
+; CHECK-EMPTY:
+; CHECK-NEXT: scalar.ph:
+; CHECK-NEXT: EMIT-SCALAR vp<[[VP7:%[0-9]+]]> = phi [ ir<%iv>, middle.block ], [ ir<0>, ir-bb<entry> ]
+; CHECK-NEXT: Successor(s): ir-bb<header>
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<header>:
+; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<[[VP7]]> from scalar.ph)
+; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1
+; CHECK-NEXT: IR %idx = mul i64 %iv, %stride
+; CHECK-NEXT: IR %gep.ld = getelementptr i64, ptr %p, i64 %idx
+; CHECK-NEXT: IR %ld = load i64, ptr %gep.ld, align 8
+; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %iv
+; CHECK-NEXT: IR store i64 %ld, ptr %gep.st, align 8
+; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, 128
+; CHECK-NEXT: No successors
+; CHECK-NEXT: }
+;
+entry:
+ br label %header
+
+header:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ]
+ %iv.next = add nsw i64 %iv, 1
+
+ %idx = mul i64 %iv, %stride
+
+ %gep.ld = getelementptr i64, ptr %p, i64 %idx
+ %ld = load i64, ptr %gep.ld, align 8
+
+ %gep.st = getelementptr i64, ptr %p.out, i64 %iv
+ store i64 %ld, ptr %gep.st, align 8
+
+ %exitcond = icmp slt i64 %iv.next, 128
+ br i1 %exitcond, label %header, label %exit
+
+exit:
+ ret void
+}
+
+define void @byte_gep_scaled_stride(ptr noalias %p.out, ptr %p, i64 %stride) {
+; CHECK-LABEL: VPlan for loop in 'byte_gep_scaled_stride'
+; CHECK: VPlan ' for UF>=1' {
+; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF
+; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF
+; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count
+; CHECK-NEXT: Live-in ir<128> = original trip-count
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<entry>:
+; CHECK-NEXT: Successor(s): scalar.ph, vector.ph
+; CHECK-EMPTY:
+; CHECK-NEXT: vector.ph:
+; CHECK-NEXT: Successor(s): vector loop
+; CHECK-EMPTY:
+; CHECK-NEXT: <x1> vector loop: {
+; CHECK-NEXT: vector.body:
+; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next>
+; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]>
+; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1>
+; CHECK-NEXT: EMIT ir<%stride.x8> = mul ir<%stride>, ir<8>
+; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride.x8>
+; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx>
+; CHECK-NEXT: EMIT ir<%ld> = load ir<%gep.ld>
+; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv>
+; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st>
+; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128>
+; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond>
+; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]>
+; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]>
+; CHECK-NEXT: No successors
+; CHECK-NEXT: }
+; CHECK-NEXT: Successor(s): middle.block
+; CHECK-EMPTY:
+; CHECK-NEXT: middle.block:
+; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]>
+; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n>
+; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<exit>:
+; CHECK-NEXT: No successors
+; CHECK-EMPTY:
+; CHECK-NEXT: scalar.ph:
+; CHECK-NEXT: EMIT-SCALAR vp<[[VP7:%[0-9]+]]> = phi [ ir<%iv>, middle.block ], [ ir<0>, ir-bb<entry> ]
+; CHECK-NEXT: Successor(s): ir-bb<header>
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<header>:
+; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<[[VP7]]> from scalar.ph)
+; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1
+; CHECK-NEXT: IR %stride.x8 = mul i64 %stride, 8
+; CHECK-NEXT: IR %idx = mul i64 %iv, %stride.x8
+; CHECK-NEXT: IR %gep.ld = getelementptr i8, ptr %p, i64 %idx
+; CHECK-NEXT: IR %ld = load i64, ptr %gep.ld, align 8
+; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %iv
+; CHECK-NEXT: IR store i64 %ld, ptr %gep.st, align 8
+; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, 128
+; CHECK-NEXT: No successors
+; CHECK-NEXT: }
+;
+entry:
+ br label %header
+
+header:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ]
+ %iv.next = add nsw i64 %iv, 1
+
+ %stride.x8 = mul i64 %stride, 8
+ %idx = mul i64 %iv, %stride.x8
+
+ %gep.ld = getelementptr i8, ptr %p, i64 %idx
+ %ld = load i64, ptr %gep.ld, align 8
+
+ %gep.st = getelementptr i64, ptr %p.out, i64 %iv
+ store i64 %ld, ptr %gep.st, align 8
+
+ %exitcond = icmp slt i64 %iv.next, 128
+ br i1 %exitcond, label %header, label %exit
+
+exit:
+ ret void
+}
+
+define void @byte_gep_under_scaled_stride(ptr noalias %p.out, ptr %p, i64 %stride) {
+; CHECK-LABEL: VPlan for loop in 'byte_gep_under_scaled_stride'
+; CHECK: VPlan ' for UF>=1' {
+; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF
+; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF
+; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count
+; CHECK-NEXT: Live-in ir<128> = original trip-count
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<entry>:
+; CHECK-NEXT: Successor(s): scalar.ph, vector.ph
+; CHECK-EMPTY:
+; CHECK-NEXT: vector.ph:
+; CHECK-NEXT: Successor(s): vector loop
+; CHECK-EMPTY:
+; CHECK-NEXT: <x1> vector loop: {
+; CHECK-NEXT: vector.body:
+; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next>
+; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]>
+; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1>
+; CHECK-NEXT: EMIT ir<%stride.x8> = mul ir<%stride>, ir<4>
+; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride.x8>
+; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx>
+; CHECK-NEXT: EMIT ir<%ld> = load ir<%gep.ld>
+; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv>
+; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st>
+; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128>
+; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond>
+; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]>
+; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]>
+; CHECK-NEXT: No successors
+; CHECK-NEXT: }
+; CHECK-NEXT: Successor(s): middle.block
+; CHECK-EMPTY:
+; CHECK-NEXT: middle.block:
+; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]>
+; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n>
+; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<exit>:
+; CHECK-NEXT: No successors
+; CHECK-EMPTY:
+; CHECK-NEXT: scalar.ph:
+; CHECK-NEXT: EMIT-SCALAR vp<[[VP7:%[0-9]+]]> = phi [ ir<%iv>, middle.block ], [ ir<0>, ir-bb<entry> ]
+; CHECK-NEXT: Successor(s): ir-bb<header>
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<header>:
+; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<[[VP7]]> from scalar.ph)
+; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1
+; CHECK-NEXT: IR %stride.x8 = mul i64 %stride, 4
+; CHECK-NEXT: IR %idx = mul i64 %iv, %stride.x8
+; CHECK-NEXT: IR %gep.ld = getelementptr i8, ptr %p, i64 %idx
+; CHECK-NEXT: IR %ld = load i64, ptr %gep.ld, align 8
+; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %iv
+; CHECK-NEXT: IR store i64 %ld, ptr %gep.st, align 8
+; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, 128
+; CHECK-NEXT: No successors
+; CHECK-NEXT: }
+;
+entry:
+ br label %header
+
+header:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ]
+ %iv.next = add nsw i64 %iv, 1
+
+ %stride.x8 = mul i64 %stride, 4
+ %idx = mul i64 %iv, %stride.x8
+
+ %gep.ld = getelementptr i8, ptr %p, i64 %idx
+ %ld = load i64, ptr %gep.ld, align 8
+
+ %gep.st = getelementptr i64, ptr %p.out, i64 %iv
+ store i64 %ld, ptr %gep.st, align 8
+
+ %exitcond = icmp slt i64 %iv.next, 128
+ br i1 %exitcond, label %header, label %exit
+
+exit:
+ ret void
+}
+
+define void @byte_gep_over_scaled_stride(ptr noalias %p.out, ptr %p, i64 %stride) {
+; CHECK-LABEL: VPlan for loop in 'byte_gep_over_scaled_stride'
+; CHECK: VPlan ' for UF>=1' {
+; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF
+; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF
+; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count
+; CHECK-NEXT: Live-in ir<128> = original trip-count
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<entry>:
+; CHECK-NEXT: Successor(s): scalar.ph, vector.ph
+; CHECK-EMPTY:
+; CHECK-NEXT: vector.ph:
+; CHECK-NEXT: Successor(s): vector loop
+; CHECK-EMPTY:
+; CHECK-NEXT: <x1> vector loop: {
+; CHECK-NEXT: vector.body:
+; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next>
+; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]>
+; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1>
+; CHECK-NEXT: EMIT ir<%stride.x8> = mul ir<%stride>, ir<16>
+; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride.x8>
+; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx>
+; CHECK-NEXT: EMIT ir<%ld> = load ir<%gep.ld>
+; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv>
+; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st>
+; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128>
+; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond>
+; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]>
+; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]>
+; CHECK-NEXT: No successors
+; CHECK-NEXT: }
+; CHECK-NEXT: Successor(s): middle.block
+; CHECK-EMPTY:
+; CHECK-NEXT: middle.block:
+; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]>
+; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n>
+; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<exit>:
+; CHECK-NEXT: No successors
+; CHECK-EMPTY:
+; CHECK-NEXT: scalar.ph:
+; CHECK-NEXT: EMIT-SCALAR vp<[[VP7:%[0-9]+]]> = phi [ ir<%iv>, middle.block ], [ ir<0>, ir-bb<entry> ]
+; CHECK-NEXT: Successor(s): ir-bb<header>
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<header>:
+; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<[[VP7]]> from scalar.ph)
+; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1
+; CHECK-NEXT: IR %stride.x8 = mul i64 %stride, 16
+; CHECK-NEXT: IR %idx = mul i64 %iv, %stride.x8
+; CHECK-NEXT: IR %gep.ld = getelementptr i8, ptr %p, i64 %idx
+; CHECK-NEXT: IR %ld = load i64, ptr %gep.ld, align 8
+; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %iv
+; CHECK-NEXT: IR store i64 %ld, ptr %gep.st, align 8
+; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, 128
+; CHECK-NEXT: No successors
+; CHECK-NEXT: }
+;
+entry:
+ br label %header
+
+header:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ]
+ %iv.next = add nsw i64 %iv, 1
+
+ %stride.x8 = mul i64 %stride, 16
+ %idx = mul i64 %iv, %stride.x8
+
+ %gep.ld = getelementptr i8, ptr %p, i64 %idx
+ %ld = load i64, ptr %gep.ld, align 8
+
+ %gep.st = getelementptr i64, ptr %p.out, i64 %iv
+ store i64 %ld, ptr %gep.st, align 8
+
+ %exitcond = icmp slt i64 %iv.next, 128
+ br i1 %exitcond, label %header, label %exit
+
+exit:
+ ret void
+}
+
+define void @byte_gep_non_power_of_two_scaled_stride(ptr noalias %p.out, ptr %p, i64 %stride) {
+; CHECK-LABEL: VPlan for loop in 'byte_gep_non_power_of_two_scaled_stride'
+; CHECK: VPlan ' for UF>=1' {
+; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF
+; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF
+; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count
+; CHECK-NEXT: Live-in ir<128> = original trip-count
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<entry>:
+; CHECK-NEXT: Successor(s): scalar.ph, vector.ph
+; CHECK-EMPTY:
+; CHECK-NEXT: vector.ph:
+; CHECK-NEXT: Successor(s): vector loop
+; CHECK-EMPTY:
+; CHECK-NEXT: <x1> vector loop: {
+; CHECK-NEXT: vector.body:
+; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next>
+; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]>
+; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1>
+; CHECK-NEXT: EMIT ir<%stride.x8> = mul ir<%stride>, ir<11>
+; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride.x8>
+; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx>
+; CHECK-NEXT: EMIT ir<%ld> = load ir<%gep.ld>
+; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv>
+; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st>
+; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128>
+; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond>
+; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]>
+; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]>
+; CHECK-NEXT: No successors
+; CHECK-NEXT: }
+; CHECK-NEXT: Successor(s): middle.block
+; CHECK-EMPTY:
+; CHECK-NEXT: middle.block:
+; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]>
+; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n>
+; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<exit>:
+; CHECK-NEXT: No successors
+; CHECK-EMPTY:
+; CHECK-NEXT: scalar.ph:
+; CHECK-NEXT: EMIT-SCALAR vp<[[VP7:%[0-9]+]]> = phi [ ir<%iv>, middle.block ], [ ir<0>, ir-bb<entry> ]
+; CHECK-NEXT: Successor(s): ir-bb<header>
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<header>:
+; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<[[VP7]]> from scalar.ph)
+; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1
+; CHECK-NEXT: IR %stride.x8 = mul i64 %stride, 11
+; CHECK-NEXT: IR %idx = mul i64 %iv, %stride.x8
+; CHECK-NEXT: IR %gep.ld = getelementptr i8, ptr %p, i64 %idx
+; CHECK-NEXT: IR %ld = load i64, ptr %gep.ld, align 8
+; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %iv
+; CHECK-NEXT: IR store i64 %ld, ptr %gep.st, align 8
+; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, 128
+; CHECK-NEXT: No successors
+; CHECK-NEXT: }
+;
+entry:
+ br label %header
+
+header:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ]
+ %iv.next = add nsw i64 %iv, 1
+
+ %stride.x8 = mul i64 %stride, 11
+ %idx = mul i64 %iv, %stride.x8
+
+ %gep.ld = getelementptr i8, ptr %p, i64 %idx
+ %ld = load i64, ptr %gep.ld, align 8
+
+ %gep.st = getelementptr i64, ptr %p.out, i64 %iv
+ store i64 %ld, ptr %gep.st, align 8
+
+ %exitcond = icmp slt i64 %iv.next, 128
+ br i1 %exitcond, label %header, label %exit
+
+exit:
+ ret void
+}
+
+define void @byte_gep_nonscaled_stride(ptr noalias %p.out, ptr %p, i64 %stride) {
+; CHECK-LABEL: VPlan for loop in 'byte_gep_nonscaled_stride'
+; CHECK: VPlan ' for UF>=1' {
+; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF
+; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF
+; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count
+; CHECK-NEXT: Live-in ir<128> = original trip-count
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<entry>:
+; CHECK-NEXT: Successor(s): scalar.ph, vector.ph
+; CHECK-EMPTY:
+; CHECK-NEXT: vector.ph:
+; CHECK-NEXT: Successor(s): vector loop
+; CHECK-EMPTY:
+; CHECK-NEXT: <x1> vector loop: {
+; CHECK-NEXT: vector.body:
+; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next>
+; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]>
+; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1>
+; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride>
+; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx>
+; CHECK-NEXT: EMIT ir<%ld> = load ir<%gep.ld>
+; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv>
+; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st>
+; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128>
+; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond>
+; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]>
+; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]>
+; CHECK-NEXT: No successors
+; CHECK-NEXT: }
+; CHECK-NEXT: Successor(s): middle.block
+; CHECK-EMPTY:
+; CHECK-NEXT: middle.block:
+; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]>
+; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n>
+; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<exit>:
+; CHECK-NEXT: No successors
+; CHECK-EMPTY:
+; CHECK-NEXT: scalar.ph:
+; CHECK-NEXT: EMIT-SCALAR vp<[[VP7:%[0-9]+]]> = phi [ ir<%iv>, middle.block ], [ ir<0>, ir-bb<entry> ]
+; CHECK-NEXT: Successor(s): ir-bb<header>
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<header>:
+; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<[[VP7]]> from scalar.ph)
+; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1
+; CHECK-NEXT: IR %idx = mul i64 %iv, %stride
+; CHECK-NEXT: IR %gep.ld = getelementptr i8, ptr %p, i64 %idx
+; CHECK-NEXT: IR %ld = load i64, ptr %gep.ld, align 8
+; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %iv
+; CHECK-NEXT: IR store i64 %ld, ptr %gep.st, align 8
+; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, 128
+; CHECK-NEXT: No successors
+; CHECK-NEXT: }
+;
+entry:
+ br label %header
+
+header:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ]
+ %iv.next = add nsw i64 %iv, 1
+
+ %idx = mul i64 %iv, %stride
+
+ %gep.ld = getelementptr i8, ptr %p, i64 %idx
+ %ld = load i64, ptr %gep.ld, align 8
+
+ %gep.st = getelementptr i64, ptr %p.out, i64 %iv
+ store i64 %ld, ptr %gep.st, align 8
+
+ %exitcond = icmp slt i64 %iv.next, 128
+ br i1 %exitcond, label %header, label %exit
+
+exit:
+ ret void
+}
+
+define void @byte_gep_negated_stride(ptr noalias %p.out, ptr %p, i64 %stride) {
+; CHECK-LABEL: VPlan for loop in 'byte_gep_negated_stride'
+; CHECK: VPlan ' for UF>=1' {
+; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF
+; CHECK-NEXT: Live-in vp<[[...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/182594
More information about the llvm-branch-commits
mailing list