[llvm] [LV] Simplify the chain traversal in `getScaledReductions()` (NFCI) (PR #184830)
Benjamin Maxwell via llvm-commits
llvm-commits at lists.llvm.org
Tue Mar 10 03:49:44 PDT 2026
================
@@ -0,0 +1,146 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 6
+; RUN: opt -p loop-vectorize -force-vector-width=8 -force-vector-interleave=1 %s -S | FileCheck %s
+
+target triple = "arm64-apple-macosx"
+
+; In this test %sum1 and %sum2 share values. %sum2 is a valid partial reduction
+; chain. %sum1 is not a valid partial reduction chain as its exit value
+; `%sum1.next = add i64 %accum, 1` does not have an extended operand.
+; The sum2 chain will be matched, but rejected as some of its inputs are used
+; by sum1, which is not a partial reduction.
+define void @invalid_operation_as_exit_value(ptr %ptr, i64 %n) #0 {
+; CHECK-LABEL: define void @invalid_operation_as_exit_value(
+; CHECK-SAME: ptr [[PTR:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], 1
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], 8
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], 8
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <8 x i64> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <8 x i64> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP6:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr [2 x i8], ptr [[PTR]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[TMP1]], align 2
+; CHECK-NEXT: [[TMP2:%.*]] = sext <8 x i16> [[WIDE_LOAD]] to <8 x i32>
+; CHECK-NEXT: [[TMP3:%.*]] = mul <8 x i32> [[TMP2]], [[TMP2]]
+; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i32> [[TMP3]] to <8 x i64>
+; CHECK-NEXT: [[TMP6]] = add <8 x i64> [[VEC_PHI]], [[TMP4]]
+; CHECK-NEXT: [[TMP11:%.*]] = add <8 x i64> [[VEC_PHI1]], [[TMP4]]
+; CHECK-NEXT: [[TMP7]] = add <8 x i64> [[TMP11]], splat (i64 1)
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP7]])
+; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP6]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[TMP9]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX2:%.*]] = phi i64 [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[SUM1:%.*]] = phi i64 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[SUM1_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[SUM2:%.*]] = phi i64 [ [[BC_MERGE_RDX2]], %[[SCALAR_PH]] ], [ [[SUM2_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr [2 x i8], ptr [[PTR]], i64 [[IV]]
+; CHECK-NEXT: [[LOAD:%.*]] = load i16, ptr [[GEP]], align 2
+; CHECK-NEXT: [[EXT_A:%.*]] = sext i16 [[LOAD]] to i32
+; CHECK-NEXT: [[EXT_B:%.*]] = sext i16 [[LOAD]] to i32
+; CHECK-NEXT: [[EXT_C:%.*]] = sext i16 [[LOAD]] to i32
+; CHECK-NEXT: [[MUL_1:%.*]] = mul i32 [[EXT_B]], [[EXT_C]]
+; CHECK-NEXT: [[MUL_1_EXT:%.*]] = sext i32 [[MUL_1]] to i64
+; CHECK-NEXT: [[SUM2_NEXT]] = add i64 [[SUM2]], [[MUL_1_EXT]]
+; CHECK-NEXT: [[MUL_2:%.*]] = mul i32 [[EXT_A]], [[EXT_C]]
+; CHECK-NEXT: [[MUL_2_EXT:%.*]] = sext i32 [[MUL_2]] to i64
+; CHECK-NEXT: [[ACCUM:%.*]] = add i64 [[SUM1]], [[MUL_2_EXT]]
+; CHECK-NEXT: [[SUM1_NEXT]] = add i64 [[ACCUM]], 1
+; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[SUM2_NEXT_LCSSA:%.*]] = phi i64 [ [[SUM2_NEXT]], %[[LOOP]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[SUM1_NEXT_LCSSA:%.*]] = phi i64 [ [[SUM1_NEXT]], %[[LOOP]] ], [ [[TMP9]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: call void @use(i64 [[SUM1_NEXT_LCSSA]], i64 [[SUM2_NEXT_LCSSA]])
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %sum1 = phi i64 [ 0, %entry ], [ %sum1.next, %loop ]
+ %sum2 = phi i64 [ 0, %entry ], [ %sum2.next, %loop ]
+ %gep = getelementptr [2 x i8], ptr %ptr, i64 %iv
+ %load = load i16, ptr %gep, align 2
+ %ext.a = sext i16 %load to i32
+ %ext.b = sext i16 %load to i32
+ %ext.c = sext i16 %load to i32
+ %mul.1 = mul i32 %ext.b, %ext.c
+ %mul.1.ext = sext i32 %mul.1 to i64
+ %sum2.next = add i64 %sum2, %mul.1.ext
+ %mul.2 = mul i32 %ext.a, %ext.c
+ %mul.2.ext = sext i32 %mul.2 to i64
+ %accum = add i64 %sum1, %mul.2.ext
+ %sum1.next = add i64 %accum, 1
+ %iv.next = add i64 %iv, 1
+ %exitcond = icmp eq i64 %iv, %n
+ br i1 %exitcond, label %exit, label %loop
+
+exit:
+ call void @use(i64 %sum1.next, i64 %sum2.next)
+ ret void
+}
+
+; This test case should be rejected as `%add.const = add i32 %accum, 1` (which
+; is not the exit value), does not have an extended operand.
+define i32 @invalid_operation_after_exit_value(ptr %src) #0 {
----------------
MacDue wrote:
Done :+1:
https://github.com/llvm/llvm-project/pull/184830
More information about the llvm-commits
mailing list