[llvm] [NFC] Partial reduce test to demonstrate regression post commit #cc9c64d (PR #162681)
via llvm-commits
llvm-commits at lists.llvm.org
Thu Oct 9 08:43:08 PDT 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-llvm-transforms
Author: Sushant Gokhale (sushgokh)
<details>
<summary>Changes</summary>
We have seen regression for Neoverse-v2 post commit #<!-- -->158641 for the mentioned case. See https://godbolt.org/z/j9Mj5WM7c. A future patch will address this.
---
Full diff: https://github.com/llvm/llvm-project/pull/162681.diff
1 Files Affected:
- (added) llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-add.ll (+99)
``````````diff
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-add.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-add.ll
new file mode 100644
index 0000000000000..7b6c62b51e0a2
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-add.ll
@@ -0,0 +1,99 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --filter-out-after "^middle" --version 6
+; RUN: opt < %s -p loop-vectorize -mtriple=aarch64 -S -o - | FileCheck %s
+; RUN: opt < %s -p loop-vectorize -mtriple=aarch64 -mcpu=neoverse-v2 -S -o - | FileCheck %s --check-prefix NEOVERSE-V2
+
+define i64 @partial_reduction_with_no_second_input(ptr %arr, i64 %N)
+; CHECK-LABEL: define i64 @partial_reduction_with_no_second_input(
+; CHECK-SAME: ptr [[ARR:%.*]], i64 [[N:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[N]], i64 1)
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[UMAX]], 8
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], [[SCALAR_PH:label %.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[UMAX]], 8
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[UMAX]], [[N_MOD_VF]]
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i64> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP4:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i64> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i32 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4
+; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = sext <4 x i32> [[WIDE_LOAD]] to <4 x i64>
+; CHECK-NEXT: [[TMP3:%.*]] = sext <4 x i32> [[WIDE_LOAD2]] to <4 x i64>
+; CHECK-NEXT: [[TMP4]] = add <4 x i64> [[VEC_PHI]], [[TMP2]]
+; CHECK-NEXT: [[TMP5]] = add <4 x i64> [[VEC_PHI1]], [[TMP3]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+;
+; NEOVERSE-V2-LABEL: define i64 @partial_reduction_with_no_second_input(
+; NEOVERSE-V2-SAME: ptr [[ARR:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
+; NEOVERSE-V2-NEXT: [[ITER_CHECK:.*:]]
+; NEOVERSE-V2-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[N]], i64 1)
+; NEOVERSE-V2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[UMAX]], 2
+; NEOVERSE-V2-NEXT: br i1 [[MIN_ITERS_CHECK]], [[VEC_EPILOG_SCALAR_PH:label %.*]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]]
+; NEOVERSE-V2: [[VECTOR_MAIN_LOOP_ITER_CHECK]]:
+; NEOVERSE-V2-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 [[UMAX]], 8
+; NEOVERSE-V2-NEXT: br i1 [[MIN_ITERS_CHECK1]], [[VEC_EPILOG_PH:label %.*]], label %[[VECTOR_PH:.*]]
+; NEOVERSE-V2: [[VECTOR_PH]]:
+; NEOVERSE-V2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[UMAX]], 8
+; NEOVERSE-V2-NEXT: [[N_VEC:%.*]] = sub i64 [[UMAX]], [[N_MOD_VF]]
+; NEOVERSE-V2-NEXT: br label %[[VECTOR_BODY:.*]]
+; NEOVERSE-V2: [[VECTOR_BODY]]:
+; NEOVERSE-V2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NEOVERSE-V2-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; NEOVERSE-V2-NEXT: [[VEC_PHI2:%.*]] = phi <2 x i64> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ]
+; NEOVERSE-V2-NEXT: [[VEC_PHI3:%.*]] = phi <2 x i64> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP10:%.*]], %[[VECTOR_BODY]] ]
+; NEOVERSE-V2-NEXT: [[VEC_PHI4:%.*]] = phi <2 x i64> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP11:%.*]], %[[VECTOR_BODY]] ]
+; NEOVERSE-V2-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[INDEX]]
+; NEOVERSE-V2-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i32 2
+; NEOVERSE-V2-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i32 4
+; NEOVERSE-V2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i32 6
+; NEOVERSE-V2-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP0]], align 4
+; NEOVERSE-V2-NEXT: [[WIDE_LOAD5:%.*]] = load <2 x i32>, ptr [[TMP1]], align 4
+; NEOVERSE-V2-NEXT: [[WIDE_LOAD6:%.*]] = load <2 x i32>, ptr [[TMP2]], align 4
+; NEOVERSE-V2-NEXT: [[WIDE_LOAD7:%.*]] = load <2 x i32>, ptr [[TMP3]], align 4
+; NEOVERSE-V2-NEXT: [[TMP4:%.*]] = sext <2 x i32> [[WIDE_LOAD]] to <2 x i64>
+; NEOVERSE-V2-NEXT: [[TMP5:%.*]] = sext <2 x i32> [[WIDE_LOAD5]] to <2 x i64>
+; NEOVERSE-V2-NEXT: [[TMP6:%.*]] = sext <2 x i32> [[WIDE_LOAD6]] to <2 x i64>
+; NEOVERSE-V2-NEXT: [[TMP7:%.*]] = sext <2 x i32> [[WIDE_LOAD7]] to <2 x i64>
+; NEOVERSE-V2-NEXT: [[TMP8]] = add <2 x i64> [[VEC_PHI]], [[TMP4]]
+; NEOVERSE-V2-NEXT: [[TMP9]] = add <2 x i64> [[VEC_PHI2]], [[TMP5]]
+; NEOVERSE-V2-NEXT: [[TMP10]] = add <2 x i64> [[VEC_PHI3]], [[TMP6]]
+; NEOVERSE-V2-NEXT: [[TMP11]] = add <2 x i64> [[VEC_PHI4]], [[TMP7]]
+; NEOVERSE-V2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; NEOVERSE-V2-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NEOVERSE-V2-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; NEOVERSE-V2: [[MIDDLE_BLOCK]]:
+;
+{
+entry:
+ br label %loop
+
+loop:
+ %1 = phi i64 [ 0, %entry ], [ %2, %loop ]
+ %acc = phi i64 [ 0, %entry ], [ %add, %loop ]
+ %gep = getelementptr inbounds i32, ptr %arr, i64 %1
+ %load = load i32, ptr %gep
+ %sext = sext i32 %load to i64
+ %add = add i64 %acc, %sext
+ %2 = add i64 %1, 1
+ %3 = icmp ult i64 %2, %N
+ br i1 %3, label %loop, label %exit
+
+exit:
+ ret i64 %add
+}
+;.
+; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
+; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
+; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
+;.
+; NEOVERSE-V2: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
+; NEOVERSE-V2: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
+; NEOVERSE-V2: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
+;.
``````````
</details>
https://github.com/llvm/llvm-project/pull/162681
More information about the llvm-commits
mailing list