[llvm] [LV][NFC] Regen some partial reduction tests (PR #129047)
David Sherwood via llvm-commits
llvm-commits at lists.llvm.org
Thu Feb 27 05:05:16 PST 2025
https://github.com/david-arm created https://github.com/llvm/llvm-project/pull/129047
A few test files seemed to have been edited after using the
update_test_checks script, which can make life hard for
developers when trying to update these tests in future
patches. Also, the tests still had this comment at the top
; NOTE: Assertions have been autogenerated by ...
which could potentially be confusing.
>From 61a4f59cb50698f1b2e54d57506b6c44e199d8dc Mon Sep 17 00:00:00 2001
From: David Sherwood <david.sherwood at arm.com>
Date: Thu, 27 Feb 2025 13:03:09 +0000
Subject: [PATCH] [LV][NFC] Regen some partial reduction tests
A few test files seemed to have been edited after using the
update_test_checks script, which can make life hard for
developers when trying to update these tests in future
patches. Also, the tests still had this comment at the top
; NOTE: Assertions have been autogenerated by ...
which could potentially be confusing.
---
.../AArch64/partial-reduce-chained.ll | 532 ++++++++++++++++++
.../AArch64/partial-reduce-no-dotprod.ll | 31 +
.../AArch64/partial-reduce-sub.ll | 82 +++
3 files changed, 645 insertions(+)
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll
index 4e4a5c82c298a..092938866c65b 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll
@@ -48,6 +48,32 @@ define i32 @chained_partial_reduce_add_sub(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-NEON-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE3]])
; CHECK-NEON-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
; CHECK-NEON-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
+; CHECK-NEON: scalar.ph:
+; CHECK-NEON-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEON-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP15]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
+; CHECK-NEON-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK-NEON: for.cond.cleanup:
+; CHECK-NEON-NEXT: [[RES_0_LCSSA:%.*]] = phi i32 [ [[SUB:%.*]], [[FOR_BODY]] ], [ [[TMP15]], [[MIDDLE_BLOCK]] ]
+; CHECK-NEON-NEXT: ret i32 [[RES_0_LCSSA]]
+; CHECK-NEON: for.body:
+; CHECK-NEON-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEON-NEXT: [[RES:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SUB]], [[FOR_BODY]] ]
+; CHECK-NEON-NEXT: [[A_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEON-NEXT: [[B_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-NEON-NEXT: [[C_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDVARS_IV]]
+; CHECK-NEON-NEXT: [[A_VAL:%.*]] = load i8, ptr [[A_PTR]], align 1
+; CHECK-NEON-NEXT: [[B_VAL:%.*]] = load i8, ptr [[B_PTR]], align 1
+; CHECK-NEON-NEXT: [[C_VAL:%.*]] = load i8, ptr [[C_PTR]], align 1
+; CHECK-NEON-NEXT: [[A_EXT:%.*]] = sext i8 [[A_VAL]] to i32
+; CHECK-NEON-NEXT: [[B_EXT:%.*]] = sext i8 [[B_VAL]] to i32
+; CHECK-NEON-NEXT: [[C_EXT:%.*]] = sext i8 [[C_VAL]] to i32
+; CHECK-NEON-NEXT: [[MUL_AB:%.*]] = mul nsw i32 [[A_EXT]], [[B_EXT]]
+; CHECK-NEON-NEXT: [[ADD:%.*]] = add nsw i32 [[RES]], [[MUL_AB]]
+; CHECK-NEON-NEXT: [[MUL_AC:%.*]] = mul nsw i32 [[A_EXT]], [[C_EXT]]
+; CHECK-NEON-NEXT: [[SUB]] = sub i32 [[ADD]], [[MUL_AC]]
+; CHECK-NEON-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEON-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
+; CHECK-NEON-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]], !loop [[META4:![0-9]+]]
;
; CHECK-SVE-LABEL: define i32 @chained_partial_reduce_add_sub(
; CHECK-SVE-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
@@ -94,6 +120,32 @@ define i32 @chained_partial_reduce_add_sub(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-SVE-NEXT: [[TMP21:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP19]])
; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
+; CHECK-SVE: scalar.ph:
+; CHECK-SVE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-SVE-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP21]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
+; CHECK-SVE-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK-SVE: for.cond.cleanup:
+; CHECK-SVE-NEXT: [[RES_0_LCSSA:%.*]] = phi i32 [ [[SUB:%.*]], [[FOR_BODY]] ], [ [[TMP21]], [[MIDDLE_BLOCK]] ]
+; CHECK-SVE-NEXT: ret i32 [[RES_0_LCSSA]]
+; CHECK-SVE: for.body:
+; CHECK-SVE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-SVE-NEXT: [[RES:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SUB]], [[FOR_BODY]] ]
+; CHECK-SVE-NEXT: [[A_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-SVE-NEXT: [[B_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-SVE-NEXT: [[C_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDVARS_IV]]
+; CHECK-SVE-NEXT: [[A_VAL:%.*]] = load i8, ptr [[A_PTR]], align 1
+; CHECK-SVE-NEXT: [[B_VAL:%.*]] = load i8, ptr [[B_PTR]], align 1
+; CHECK-SVE-NEXT: [[C_VAL:%.*]] = load i8, ptr [[C_PTR]], align 1
+; CHECK-SVE-NEXT: [[A_EXT:%.*]] = sext i8 [[A_VAL]] to i32
+; CHECK-SVE-NEXT: [[B_EXT:%.*]] = sext i8 [[B_VAL]] to i32
+; CHECK-SVE-NEXT: [[C_EXT:%.*]] = sext i8 [[C_VAL]] to i32
+; CHECK-SVE-NEXT: [[MUL_AB:%.*]] = mul nsw i32 [[A_EXT]], [[B_EXT]]
+; CHECK-SVE-NEXT: [[ADD:%.*]] = add nsw i32 [[RES]], [[MUL_AB]]
+; CHECK-SVE-NEXT: [[MUL_AC:%.*]] = mul nsw i32 [[A_EXT]], [[C_EXT]]
+; CHECK-SVE-NEXT: [[SUB]] = sub i32 [[ADD]], [[MUL_AC]]
+; CHECK-SVE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-SVE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
+; CHECK-SVE-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]], !loop [[META4:![0-9]+]]
;
; CHECK-SVE-MAXBW-LABEL: define i32 @chained_partial_reduce_add_sub(
; CHECK-SVE-MAXBW-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
@@ -141,6 +193,32 @@ define i32 @chained_partial_reduce_add_sub(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-SVE-MAXBW-NEXT: [[TMP21:%.*]] = call i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32> [[PARTIAL_REDUCE3]])
; CHECK-SVE-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
; CHECK-SVE-MAXBW-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
+; CHECK-SVE-MAXBW: scalar.ph:
+; CHECK-SVE-MAXBW-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-SVE-MAXBW-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP21]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
+; CHECK-SVE-MAXBW-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK-SVE-MAXBW: for.cond.cleanup:
+; CHECK-SVE-MAXBW-NEXT: [[RES_0_LCSSA:%.*]] = phi i32 [ [[SUB:%.*]], [[FOR_BODY]] ], [ [[TMP21]], [[MIDDLE_BLOCK]] ]
+; CHECK-SVE-MAXBW-NEXT: ret i32 [[RES_0_LCSSA]]
+; CHECK-SVE-MAXBW: for.body:
+; CHECK-SVE-MAXBW-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-SVE-MAXBW-NEXT: [[RES:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SUB]], [[FOR_BODY]] ]
+; CHECK-SVE-MAXBW-NEXT: [[A_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-SVE-MAXBW-NEXT: [[B_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-SVE-MAXBW-NEXT: [[C_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDVARS_IV]]
+; CHECK-SVE-MAXBW-NEXT: [[A_VAL:%.*]] = load i8, ptr [[A_PTR]], align 1
+; CHECK-SVE-MAXBW-NEXT: [[B_VAL:%.*]] = load i8, ptr [[B_PTR]], align 1
+; CHECK-SVE-MAXBW-NEXT: [[C_VAL:%.*]] = load i8, ptr [[C_PTR]], align 1
+; CHECK-SVE-MAXBW-NEXT: [[A_EXT:%.*]] = sext i8 [[A_VAL]] to i32
+; CHECK-SVE-MAXBW-NEXT: [[B_EXT:%.*]] = sext i8 [[B_VAL]] to i32
+; CHECK-SVE-MAXBW-NEXT: [[C_EXT:%.*]] = sext i8 [[C_VAL]] to i32
+; CHECK-SVE-MAXBW-NEXT: [[MUL_AB:%.*]] = mul nsw i32 [[A_EXT]], [[B_EXT]]
+; CHECK-SVE-MAXBW-NEXT: [[ADD:%.*]] = add nsw i32 [[RES]], [[MUL_AB]]
+; CHECK-SVE-MAXBW-NEXT: [[MUL_AC:%.*]] = mul nsw i32 [[A_EXT]], [[C_EXT]]
+; CHECK-SVE-MAXBW-NEXT: [[SUB]] = sub i32 [[ADD]], [[MUL_AC]]
+; CHECK-SVE-MAXBW-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-SVE-MAXBW-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
+; CHECK-SVE-MAXBW-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]], !loop [[META4:![0-9]+]]
;
entry:
%cmp28.not = icmp ult i32 %N, 2
@@ -213,6 +291,32 @@ define i32 @chained_partial_reduce_add_add(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-NEON-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE3]])
; CHECK-NEON-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
; CHECK-NEON-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
+; CHECK-NEON: scalar.ph:
+; CHECK-NEON-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEON-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP13]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
+; CHECK-NEON-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK-NEON: for.cond.cleanup:
+; CHECK-NEON-NEXT: [[RES_0_LCSSA:%.*]] = phi i32 [ [[ADD_2:%.*]], [[FOR_BODY]] ], [ [[TMP13]], [[MIDDLE_BLOCK]] ]
+; CHECK-NEON-NEXT: ret i32 [[RES_0_LCSSA]]
+; CHECK-NEON: for.body:
+; CHECK-NEON-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEON-NEXT: [[RES:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD_2]], [[FOR_BODY]] ]
+; CHECK-NEON-NEXT: [[A_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEON-NEXT: [[B_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-NEON-NEXT: [[C_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDVARS_IV]]
+; CHECK-NEON-NEXT: [[A_VAL:%.*]] = load i8, ptr [[A_PTR]], align 1
+; CHECK-NEON-NEXT: [[B_VAL:%.*]] = load i8, ptr [[B_PTR]], align 1
+; CHECK-NEON-NEXT: [[C_VAL:%.*]] = load i8, ptr [[C_PTR]], align 1
+; CHECK-NEON-NEXT: [[A_EXT:%.*]] = sext i8 [[A_VAL]] to i32
+; CHECK-NEON-NEXT: [[B_EXT:%.*]] = sext i8 [[B_VAL]] to i32
+; CHECK-NEON-NEXT: [[C_EXT:%.*]] = sext i8 [[C_VAL]] to i32
+; CHECK-NEON-NEXT: [[MUL_AB:%.*]] = mul nsw i32 [[A_EXT]], [[B_EXT]]
+; CHECK-NEON-NEXT: [[ADD:%.*]] = add nsw i32 [[RES]], [[MUL_AB]]
+; CHECK-NEON-NEXT: [[MUL_AC:%.*]] = mul nsw i32 [[A_EXT]], [[C_EXT]]
+; CHECK-NEON-NEXT: [[ADD_2]] = add i32 [[ADD]], [[MUL_AC]]
+; CHECK-NEON-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEON-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
+; CHECK-NEON-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]], !loop [[META4]]
;
; CHECK-SVE-LABEL: define i32 @chained_partial_reduce_add_add(
; CHECK-SVE-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
@@ -259,6 +363,32 @@ define i32 @chained_partial_reduce_add_add(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-SVE-NEXT: [[TMP21:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP19]])
; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
+; CHECK-SVE: scalar.ph:
+; CHECK-SVE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-SVE-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP21]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
+; CHECK-SVE-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK-SVE: for.cond.cleanup:
+; CHECK-SVE-NEXT: [[RES_0_LCSSA:%.*]] = phi i32 [ [[ADD_2:%.*]], [[FOR_BODY]] ], [ [[TMP21]], [[MIDDLE_BLOCK]] ]
+; CHECK-SVE-NEXT: ret i32 [[RES_0_LCSSA]]
+; CHECK-SVE: for.body:
+; CHECK-SVE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-SVE-NEXT: [[RES:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD_2]], [[FOR_BODY]] ]
+; CHECK-SVE-NEXT: [[A_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-SVE-NEXT: [[B_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-SVE-NEXT: [[C_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDVARS_IV]]
+; CHECK-SVE-NEXT: [[A_VAL:%.*]] = load i8, ptr [[A_PTR]], align 1
+; CHECK-SVE-NEXT: [[B_VAL:%.*]] = load i8, ptr [[B_PTR]], align 1
+; CHECK-SVE-NEXT: [[C_VAL:%.*]] = load i8, ptr [[C_PTR]], align 1
+; CHECK-SVE-NEXT: [[A_EXT:%.*]] = sext i8 [[A_VAL]] to i32
+; CHECK-SVE-NEXT: [[B_EXT:%.*]] = sext i8 [[B_VAL]] to i32
+; CHECK-SVE-NEXT: [[C_EXT:%.*]] = sext i8 [[C_VAL]] to i32
+; CHECK-SVE-NEXT: [[MUL_AB:%.*]] = mul nsw i32 [[A_EXT]], [[B_EXT]]
+; CHECK-SVE-NEXT: [[ADD:%.*]] = add nsw i32 [[RES]], [[MUL_AB]]
+; CHECK-SVE-NEXT: [[MUL_AC:%.*]] = mul nsw i32 [[A_EXT]], [[C_EXT]]
+; CHECK-SVE-NEXT: [[ADD_2]] = add i32 [[ADD]], [[MUL_AC]]
+; CHECK-SVE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-SVE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
+; CHECK-SVE-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]], !loop [[META4]]
;
; CHECK-SVE-MAXBW-LABEL: define i32 @chained_partial_reduce_add_add(
; CHECK-SVE-MAXBW-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
@@ -305,6 +435,32 @@ define i32 @chained_partial_reduce_add_add(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-SVE-MAXBW-NEXT: [[TMP19:%.*]] = call i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32> [[PARTIAL_REDUCE3]])
; CHECK-SVE-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
; CHECK-SVE-MAXBW-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
+; CHECK-SVE-MAXBW: scalar.ph:
+; CHECK-SVE-MAXBW-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-SVE-MAXBW-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP19]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
+; CHECK-SVE-MAXBW-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK-SVE-MAXBW: for.cond.cleanup:
+; CHECK-SVE-MAXBW-NEXT: [[RES_0_LCSSA:%.*]] = phi i32 [ [[ADD_2:%.*]], [[FOR_BODY]] ], [ [[TMP19]], [[MIDDLE_BLOCK]] ]
+; CHECK-SVE-MAXBW-NEXT: ret i32 [[RES_0_LCSSA]]
+; CHECK-SVE-MAXBW: for.body:
+; CHECK-SVE-MAXBW-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-SVE-MAXBW-NEXT: [[RES:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD_2]], [[FOR_BODY]] ]
+; CHECK-SVE-MAXBW-NEXT: [[A_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-SVE-MAXBW-NEXT: [[B_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-SVE-MAXBW-NEXT: [[C_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDVARS_IV]]
+; CHECK-SVE-MAXBW-NEXT: [[A_VAL:%.*]] = load i8, ptr [[A_PTR]], align 1
+; CHECK-SVE-MAXBW-NEXT: [[B_VAL:%.*]] = load i8, ptr [[B_PTR]], align 1
+; CHECK-SVE-MAXBW-NEXT: [[C_VAL:%.*]] = load i8, ptr [[C_PTR]], align 1
+; CHECK-SVE-MAXBW-NEXT: [[A_EXT:%.*]] = sext i8 [[A_VAL]] to i32
+; CHECK-SVE-MAXBW-NEXT: [[B_EXT:%.*]] = sext i8 [[B_VAL]] to i32
+; CHECK-SVE-MAXBW-NEXT: [[C_EXT:%.*]] = sext i8 [[C_VAL]] to i32
+; CHECK-SVE-MAXBW-NEXT: [[MUL_AB:%.*]] = mul nsw i32 [[A_EXT]], [[B_EXT]]
+; CHECK-SVE-MAXBW-NEXT: [[ADD:%.*]] = add nsw i32 [[RES]], [[MUL_AB]]
+; CHECK-SVE-MAXBW-NEXT: [[MUL_AC:%.*]] = mul nsw i32 [[A_EXT]], [[C_EXT]]
+; CHECK-SVE-MAXBW-NEXT: [[ADD_2]] = add i32 [[ADD]], [[MUL_AC]]
+; CHECK-SVE-MAXBW-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-SVE-MAXBW-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
+; CHECK-SVE-MAXBW-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]], !loop [[META4]]
;
entry:
%cmp28.not = icmp ult i32 %N, 2
@@ -378,6 +534,32 @@ define i32 @chained_partial_reduce_sub_add(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-NEON-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE3]])
; CHECK-NEON-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
; CHECK-NEON-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
+; CHECK-NEON: scalar.ph:
+; CHECK-NEON-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEON-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP15]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
+; CHECK-NEON-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK-NEON: for.cond.cleanup:
+; CHECK-NEON-NEXT: [[RES_0_LCSSA:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[TMP15]], [[MIDDLE_BLOCK]] ]
+; CHECK-NEON-NEXT: ret i32 [[RES_0_LCSSA]]
+; CHECK-NEON: for.body:
+; CHECK-NEON-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEON-NEXT: [[RES:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD]], [[FOR_BODY]] ]
+; CHECK-NEON-NEXT: [[A_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEON-NEXT: [[B_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-NEON-NEXT: [[C_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDVARS_IV]]
+; CHECK-NEON-NEXT: [[A_VAL:%.*]] = load i8, ptr [[A_PTR]], align 1
+; CHECK-NEON-NEXT: [[B_VAL:%.*]] = load i8, ptr [[B_PTR]], align 1
+; CHECK-NEON-NEXT: [[C_VAL:%.*]] = load i8, ptr [[C_PTR]], align 1
+; CHECK-NEON-NEXT: [[A_EXT:%.*]] = sext i8 [[A_VAL]] to i32
+; CHECK-NEON-NEXT: [[B_EXT:%.*]] = sext i8 [[B_VAL]] to i32
+; CHECK-NEON-NEXT: [[C_EXT:%.*]] = sext i8 [[C_VAL]] to i32
+; CHECK-NEON-NEXT: [[MUL_AB:%.*]] = mul nsw i32 [[A_EXT]], [[B_EXT]]
+; CHECK-NEON-NEXT: [[SUB:%.*]] = sub nsw i32 [[RES]], [[MUL_AB]]
+; CHECK-NEON-NEXT: [[MUL_AC:%.*]] = mul nsw i32 [[A_EXT]], [[C_EXT]]
+; CHECK-NEON-NEXT: [[ADD]] = add i32 [[SUB]], [[MUL_AC]]
+; CHECK-NEON-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEON-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
+; CHECK-NEON-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]], !loop [[META4]]
;
; CHECK-SVE-LABEL: define i32 @chained_partial_reduce_sub_add(
; CHECK-SVE-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
@@ -424,6 +606,32 @@ define i32 @chained_partial_reduce_sub_add(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-SVE-NEXT: [[TMP21:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP19]])
; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
+; CHECK-SVE: scalar.ph:
+; CHECK-SVE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-SVE-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP21]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
+; CHECK-SVE-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK-SVE: for.cond.cleanup:
+; CHECK-SVE-NEXT: [[RES_0_LCSSA:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[TMP21]], [[MIDDLE_BLOCK]] ]
+; CHECK-SVE-NEXT: ret i32 [[RES_0_LCSSA]]
+; CHECK-SVE: for.body:
+; CHECK-SVE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-SVE-NEXT: [[RES:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD]], [[FOR_BODY]] ]
+; CHECK-SVE-NEXT: [[A_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-SVE-NEXT: [[B_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-SVE-NEXT: [[C_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDVARS_IV]]
+; CHECK-SVE-NEXT: [[A_VAL:%.*]] = load i8, ptr [[A_PTR]], align 1
+; CHECK-SVE-NEXT: [[B_VAL:%.*]] = load i8, ptr [[B_PTR]], align 1
+; CHECK-SVE-NEXT: [[C_VAL:%.*]] = load i8, ptr [[C_PTR]], align 1
+; CHECK-SVE-NEXT: [[A_EXT:%.*]] = sext i8 [[A_VAL]] to i32
+; CHECK-SVE-NEXT: [[B_EXT:%.*]] = sext i8 [[B_VAL]] to i32
+; CHECK-SVE-NEXT: [[C_EXT:%.*]] = sext i8 [[C_VAL]] to i32
+; CHECK-SVE-NEXT: [[MUL_AB:%.*]] = mul nsw i32 [[A_EXT]], [[B_EXT]]
+; CHECK-SVE-NEXT: [[SUB:%.*]] = sub nsw i32 [[RES]], [[MUL_AB]]
+; CHECK-SVE-NEXT: [[MUL_AC:%.*]] = mul nsw i32 [[A_EXT]], [[C_EXT]]
+; CHECK-SVE-NEXT: [[ADD]] = add i32 [[SUB]], [[MUL_AC]]
+; CHECK-SVE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-SVE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
+; CHECK-SVE-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]], !loop [[META4]]
;
; CHECK-SVE-MAXBW-LABEL: define i32 @chained_partial_reduce_sub_add(
; CHECK-SVE-MAXBW-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
@@ -471,6 +679,32 @@ define i32 @chained_partial_reduce_sub_add(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-SVE-MAXBW-NEXT: [[TMP21:%.*]] = call i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32> [[PARTIAL_REDUCE3]])
; CHECK-SVE-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
; CHECK-SVE-MAXBW-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
+; CHECK-SVE-MAXBW: scalar.ph:
+; CHECK-SVE-MAXBW-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-SVE-MAXBW-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP21]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
+; CHECK-SVE-MAXBW-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK-SVE-MAXBW: for.cond.cleanup:
+; CHECK-SVE-MAXBW-NEXT: [[RES_0_LCSSA:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[TMP21]], [[MIDDLE_BLOCK]] ]
+; CHECK-SVE-MAXBW-NEXT: ret i32 [[RES_0_LCSSA]]
+; CHECK-SVE-MAXBW: for.body:
+; CHECK-SVE-MAXBW-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-SVE-MAXBW-NEXT: [[RES:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD]], [[FOR_BODY]] ]
+; CHECK-SVE-MAXBW-NEXT: [[A_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-SVE-MAXBW-NEXT: [[B_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-SVE-MAXBW-NEXT: [[C_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDVARS_IV]]
+; CHECK-SVE-MAXBW-NEXT: [[A_VAL:%.*]] = load i8, ptr [[A_PTR]], align 1
+; CHECK-SVE-MAXBW-NEXT: [[B_VAL:%.*]] = load i8, ptr [[B_PTR]], align 1
+; CHECK-SVE-MAXBW-NEXT: [[C_VAL:%.*]] = load i8, ptr [[C_PTR]], align 1
+; CHECK-SVE-MAXBW-NEXT: [[A_EXT:%.*]] = sext i8 [[A_VAL]] to i32
+; CHECK-SVE-MAXBW-NEXT: [[B_EXT:%.*]] = sext i8 [[B_VAL]] to i32
+; CHECK-SVE-MAXBW-NEXT: [[C_EXT:%.*]] = sext i8 [[C_VAL]] to i32
+; CHECK-SVE-MAXBW-NEXT: [[MUL_AB:%.*]] = mul nsw i32 [[A_EXT]], [[B_EXT]]
+; CHECK-SVE-MAXBW-NEXT: [[SUB:%.*]] = sub nsw i32 [[RES]], [[MUL_AB]]
+; CHECK-SVE-MAXBW-NEXT: [[MUL_AC:%.*]] = mul nsw i32 [[A_EXT]], [[C_EXT]]
+; CHECK-SVE-MAXBW-NEXT: [[ADD]] = add i32 [[SUB]], [[MUL_AC]]
+; CHECK-SVE-MAXBW-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-SVE-MAXBW-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
+; CHECK-SVE-MAXBW-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]], !loop [[META4]]
;
entry:
%cmp28.not = icmp ult i32 %N, 2
@@ -547,6 +781,32 @@ define i32 @chained_partial_reduce_sub_sub(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-NEON-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE3]])
; CHECK-NEON-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
; CHECK-NEON-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
+; CHECK-NEON: scalar.ph:
+; CHECK-NEON-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEON-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP15]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
+; CHECK-NEON-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK-NEON: for.cond.cleanup:
+; CHECK-NEON-NEXT: [[RES_0_LCSSA:%.*]] = phi i32 [ [[SUB_2:%.*]], [[FOR_BODY]] ], [ [[TMP15]], [[MIDDLE_BLOCK]] ]
+; CHECK-NEON-NEXT: ret i32 [[RES_0_LCSSA]]
+; CHECK-NEON: for.body:
+; CHECK-NEON-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEON-NEXT: [[RES:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SUB_2]], [[FOR_BODY]] ]
+; CHECK-NEON-NEXT: [[A_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEON-NEXT: [[B_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-NEON-NEXT: [[C_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDVARS_IV]]
+; CHECK-NEON-NEXT: [[A_VAL:%.*]] = load i8, ptr [[A_PTR]], align 1
+; CHECK-NEON-NEXT: [[B_VAL:%.*]] = load i8, ptr [[B_PTR]], align 1
+; CHECK-NEON-NEXT: [[C_VAL:%.*]] = load i8, ptr [[C_PTR]], align 1
+; CHECK-NEON-NEXT: [[A_EXT:%.*]] = sext i8 [[A_VAL]] to i32
+; CHECK-NEON-NEXT: [[B_EXT:%.*]] = sext i8 [[B_VAL]] to i32
+; CHECK-NEON-NEXT: [[C_EXT:%.*]] = sext i8 [[C_VAL]] to i32
+; CHECK-NEON-NEXT: [[MUL_AB:%.*]] = mul nsw i32 [[A_EXT]], [[B_EXT]]
+; CHECK-NEON-NEXT: [[SUB:%.*]] = sub nsw i32 [[RES]], [[MUL_AB]]
+; CHECK-NEON-NEXT: [[MUL_AC:%.*]] = mul nsw i32 [[A_EXT]], [[C_EXT]]
+; CHECK-NEON-NEXT: [[SUB_2]] = sub i32 [[SUB]], [[MUL_AC]]
+; CHECK-NEON-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEON-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
+; CHECK-NEON-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]], !loop [[META4]]
;
; CHECK-SVE-LABEL: define i32 @chained_partial_reduce_sub_sub(
; CHECK-SVE-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
@@ -593,6 +853,32 @@ define i32 @chained_partial_reduce_sub_sub(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-SVE-NEXT: [[TMP21:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP19]])
; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
+; CHECK-SVE: scalar.ph:
+; CHECK-SVE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-SVE-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP21]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
+; CHECK-SVE-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK-SVE: for.cond.cleanup:
+; CHECK-SVE-NEXT: [[RES_0_LCSSA:%.*]] = phi i32 [ [[SUB_2:%.*]], [[FOR_BODY]] ], [ [[TMP21]], [[MIDDLE_BLOCK]] ]
+; CHECK-SVE-NEXT: ret i32 [[RES_0_LCSSA]]
+; CHECK-SVE: for.body:
+; CHECK-SVE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-SVE-NEXT: [[RES:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SUB_2]], [[FOR_BODY]] ]
+; CHECK-SVE-NEXT: [[A_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-SVE-NEXT: [[B_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-SVE-NEXT: [[C_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDVARS_IV]]
+; CHECK-SVE-NEXT: [[A_VAL:%.*]] = load i8, ptr [[A_PTR]], align 1
+; CHECK-SVE-NEXT: [[B_VAL:%.*]] = load i8, ptr [[B_PTR]], align 1
+; CHECK-SVE-NEXT: [[C_VAL:%.*]] = load i8, ptr [[C_PTR]], align 1
+; CHECK-SVE-NEXT: [[A_EXT:%.*]] = sext i8 [[A_VAL]] to i32
+; CHECK-SVE-NEXT: [[B_EXT:%.*]] = sext i8 [[B_VAL]] to i32
+; CHECK-SVE-NEXT: [[C_EXT:%.*]] = sext i8 [[C_VAL]] to i32
+; CHECK-SVE-NEXT: [[MUL_AB:%.*]] = mul nsw i32 [[A_EXT]], [[B_EXT]]
+; CHECK-SVE-NEXT: [[SUB:%.*]] = sub nsw i32 [[RES]], [[MUL_AB]]
+; CHECK-SVE-NEXT: [[MUL_AC:%.*]] = mul nsw i32 [[A_EXT]], [[C_EXT]]
+; CHECK-SVE-NEXT: [[SUB_2]] = sub i32 [[SUB]], [[MUL_AC]]
+; CHECK-SVE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-SVE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
+; CHECK-SVE-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]], !loop [[META4]]
;
; CHECK-SVE-MAXBW-LABEL: define i32 @chained_partial_reduce_sub_sub(
; CHECK-SVE-MAXBW-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
@@ -641,6 +927,32 @@ define i32 @chained_partial_reduce_sub_sub(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-SVE-MAXBW-NEXT: [[TMP21:%.*]] = call i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32> [[PARTIAL_REDUCE3]])
; CHECK-SVE-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
; CHECK-SVE-MAXBW-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
+; CHECK-SVE-MAXBW: scalar.ph:
+; CHECK-SVE-MAXBW-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-SVE-MAXBW-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP21]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
+; CHECK-SVE-MAXBW-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK-SVE-MAXBW: for.cond.cleanup:
+; CHECK-SVE-MAXBW-NEXT: [[RES_0_LCSSA:%.*]] = phi i32 [ [[SUB_2:%.*]], [[FOR_BODY]] ], [ [[TMP21]], [[MIDDLE_BLOCK]] ]
+; CHECK-SVE-MAXBW-NEXT: ret i32 [[RES_0_LCSSA]]
+; CHECK-SVE-MAXBW: for.body:
+; CHECK-SVE-MAXBW-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-SVE-MAXBW-NEXT: [[RES:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SUB_2]], [[FOR_BODY]] ]
+; CHECK-SVE-MAXBW-NEXT: [[A_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-SVE-MAXBW-NEXT: [[B_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-SVE-MAXBW-NEXT: [[C_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDVARS_IV]]
+; CHECK-SVE-MAXBW-NEXT: [[A_VAL:%.*]] = load i8, ptr [[A_PTR]], align 1
+; CHECK-SVE-MAXBW-NEXT: [[B_VAL:%.*]] = load i8, ptr [[B_PTR]], align 1
+; CHECK-SVE-MAXBW-NEXT: [[C_VAL:%.*]] = load i8, ptr [[C_PTR]], align 1
+; CHECK-SVE-MAXBW-NEXT: [[A_EXT:%.*]] = sext i8 [[A_VAL]] to i32
+; CHECK-SVE-MAXBW-NEXT: [[B_EXT:%.*]] = sext i8 [[B_VAL]] to i32
+; CHECK-SVE-MAXBW-NEXT: [[C_EXT:%.*]] = sext i8 [[C_VAL]] to i32
+; CHECK-SVE-MAXBW-NEXT: [[MUL_AB:%.*]] = mul nsw i32 [[A_EXT]], [[B_EXT]]
+; CHECK-SVE-MAXBW-NEXT: [[SUB:%.*]] = sub nsw i32 [[RES]], [[MUL_AB]]
+; CHECK-SVE-MAXBW-NEXT: [[MUL_AC:%.*]] = mul nsw i32 [[A_EXT]], [[C_EXT]]
+; CHECK-SVE-MAXBW-NEXT: [[SUB_2]] = sub i32 [[SUB]], [[MUL_AC]]
+; CHECK-SVE-MAXBW-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-SVE-MAXBW-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
+; CHECK-SVE-MAXBW-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]], !loop [[META4]]
;
entry:
%cmp28.not = icmp ult i32 %N, 2
@@ -718,6 +1030,34 @@ define i32 @chained_partial_reduce_add_add_add(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-NEON-NEXT: [[TMP14:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE4]])
; CHECK-NEON-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
; CHECK-NEON-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
+; CHECK-NEON: scalar.ph:
+; CHECK-NEON-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEON-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP14]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
+; CHECK-NEON-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK-NEON: for.cond.cleanup:
+; CHECK-NEON-NEXT: [[RES_0_LCSSA:%.*]] = phi i32 [ [[SUB_2:%.*]], [[FOR_BODY]] ], [ [[TMP14]], [[MIDDLE_BLOCK]] ]
+; CHECK-NEON-NEXT: ret i32 [[RES_0_LCSSA]]
+; CHECK-NEON: for.body:
+; CHECK-NEON-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEON-NEXT: [[RES:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SUB_2]], [[FOR_BODY]] ]
+; CHECK-NEON-NEXT: [[A_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEON-NEXT: [[B_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-NEON-NEXT: [[C_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDVARS_IV]]
+; CHECK-NEON-NEXT: [[A_VAL:%.*]] = load i8, ptr [[A_PTR]], align 1
+; CHECK-NEON-NEXT: [[B_VAL:%.*]] = load i8, ptr [[B_PTR]], align 1
+; CHECK-NEON-NEXT: [[C_VAL:%.*]] = load i8, ptr [[C_PTR]], align 1
+; CHECK-NEON-NEXT: [[A_EXT:%.*]] = sext i8 [[A_VAL]] to i32
+; CHECK-NEON-NEXT: [[B_EXT:%.*]] = sext i8 [[B_VAL]] to i32
+; CHECK-NEON-NEXT: [[C_EXT:%.*]] = sext i8 [[C_VAL]] to i32
+; CHECK-NEON-NEXT: [[MUL_AB:%.*]] = mul nsw i32 [[A_EXT]], [[B_EXT]]
+; CHECK-NEON-NEXT: [[SUB:%.*]] = add nsw i32 [[RES]], [[MUL_AB]]
+; CHECK-NEON-NEXT: [[MUL_AC:%.*]] = mul nsw i32 [[A_EXT]], [[C_EXT]]
+; CHECK-NEON-NEXT: [[ADD:%.*]] = add nsw i32 [[SUB]], [[MUL_AC]]
+; CHECK-NEON-NEXT: [[MUL_BC:%.*]] = mul nsw i32 [[B_EXT]], [[C_EXT]]
+; CHECK-NEON-NEXT: [[SUB_2]] = add i32 [[ADD]], [[MUL_BC]]
+; CHECK-NEON-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEON-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
+; CHECK-NEON-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]], !loop [[META4]]
;
; CHECK-SVE-LABEL: define i32 @chained_partial_reduce_add_add_add(
; CHECK-SVE-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
@@ -766,6 +1106,34 @@ define i32 @chained_partial_reduce_add_add_add(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-SVE-NEXT: [[TMP23:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP21]])
; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
+; CHECK-SVE: scalar.ph:
+; CHECK-SVE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-SVE-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP23]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
+; CHECK-SVE-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK-SVE: for.cond.cleanup:
+; CHECK-SVE-NEXT: [[RES_0_LCSSA:%.*]] = phi i32 [ [[SUB_2:%.*]], [[FOR_BODY]] ], [ [[TMP23]], [[MIDDLE_BLOCK]] ]
+; CHECK-SVE-NEXT: ret i32 [[RES_0_LCSSA]]
+; CHECK-SVE: for.body:
+; CHECK-SVE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-SVE-NEXT: [[RES:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SUB_2]], [[FOR_BODY]] ]
+; CHECK-SVE-NEXT: [[A_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-SVE-NEXT: [[B_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-SVE-NEXT: [[C_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDVARS_IV]]
+; CHECK-SVE-NEXT: [[A_VAL:%.*]] = load i8, ptr [[A_PTR]], align 1
+; CHECK-SVE-NEXT: [[B_VAL:%.*]] = load i8, ptr [[B_PTR]], align 1
+; CHECK-SVE-NEXT: [[C_VAL:%.*]] = load i8, ptr [[C_PTR]], align 1
+; CHECK-SVE-NEXT: [[A_EXT:%.*]] = sext i8 [[A_VAL]] to i32
+; CHECK-SVE-NEXT: [[B_EXT:%.*]] = sext i8 [[B_VAL]] to i32
+; CHECK-SVE-NEXT: [[C_EXT:%.*]] = sext i8 [[C_VAL]] to i32
+; CHECK-SVE-NEXT: [[MUL_AB:%.*]] = mul nsw i32 [[A_EXT]], [[B_EXT]]
+; CHECK-SVE-NEXT: [[SUB:%.*]] = add nsw i32 [[RES]], [[MUL_AB]]
+; CHECK-SVE-NEXT: [[MUL_AC:%.*]] = mul nsw i32 [[A_EXT]], [[C_EXT]]
+; CHECK-SVE-NEXT: [[ADD:%.*]] = add nsw i32 [[SUB]], [[MUL_AC]]
+; CHECK-SVE-NEXT: [[MUL_BC:%.*]] = mul nsw i32 [[B_EXT]], [[C_EXT]]
+; CHECK-SVE-NEXT: [[SUB_2]] = add i32 [[ADD]], [[MUL_BC]]
+; CHECK-SVE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-SVE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
+; CHECK-SVE-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]], !loop [[META4]]
;
; CHECK-SVE-MAXBW-LABEL: define i32 @chained_partial_reduce_add_add_add(
; CHECK-SVE-MAXBW-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
@@ -814,6 +1182,34 @@ define i32 @chained_partial_reduce_add_add_add(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-SVE-MAXBW-NEXT: [[TMP20:%.*]] = call i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32> [[PARTIAL_REDUCE4]])
; CHECK-SVE-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
; CHECK-SVE-MAXBW-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
+; CHECK-SVE-MAXBW: scalar.ph:
+; CHECK-SVE-MAXBW-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-SVE-MAXBW-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP20]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
+; CHECK-SVE-MAXBW-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK-SVE-MAXBW: for.cond.cleanup:
+; CHECK-SVE-MAXBW-NEXT: [[RES_0_LCSSA:%.*]] = phi i32 [ [[SUB_2:%.*]], [[FOR_BODY]] ], [ [[TMP20]], [[MIDDLE_BLOCK]] ]
+; CHECK-SVE-MAXBW-NEXT: ret i32 [[RES_0_LCSSA]]
+; CHECK-SVE-MAXBW: for.body:
+; CHECK-SVE-MAXBW-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-SVE-MAXBW-NEXT: [[RES:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SUB_2]], [[FOR_BODY]] ]
+; CHECK-SVE-MAXBW-NEXT: [[A_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-SVE-MAXBW-NEXT: [[B_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-SVE-MAXBW-NEXT: [[C_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDVARS_IV]]
+; CHECK-SVE-MAXBW-NEXT: [[A_VAL:%.*]] = load i8, ptr [[A_PTR]], align 1
+; CHECK-SVE-MAXBW-NEXT: [[B_VAL:%.*]] = load i8, ptr [[B_PTR]], align 1
+; CHECK-SVE-MAXBW-NEXT: [[C_VAL:%.*]] = load i8, ptr [[C_PTR]], align 1
+; CHECK-SVE-MAXBW-NEXT: [[A_EXT:%.*]] = sext i8 [[A_VAL]] to i32
+; CHECK-SVE-MAXBW-NEXT: [[B_EXT:%.*]] = sext i8 [[B_VAL]] to i32
+; CHECK-SVE-MAXBW-NEXT: [[C_EXT:%.*]] = sext i8 [[C_VAL]] to i32
+; CHECK-SVE-MAXBW-NEXT: [[MUL_AB:%.*]] = mul nsw i32 [[A_EXT]], [[B_EXT]]
+; CHECK-SVE-MAXBW-NEXT: [[SUB:%.*]] = add nsw i32 [[RES]], [[MUL_AB]]
+; CHECK-SVE-MAXBW-NEXT: [[MUL_AC:%.*]] = mul nsw i32 [[A_EXT]], [[C_EXT]]
+; CHECK-SVE-MAXBW-NEXT: [[ADD:%.*]] = add nsw i32 [[SUB]], [[MUL_AC]]
+; CHECK-SVE-MAXBW-NEXT: [[MUL_BC:%.*]] = mul nsw i32 [[B_EXT]], [[C_EXT]]
+; CHECK-SVE-MAXBW-NEXT: [[SUB_2]] = add i32 [[ADD]], [[MUL_BC]]
+; CHECK-SVE-MAXBW-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-SVE-MAXBW-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
+; CHECK-SVE-MAXBW-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]], !loop [[META4]]
;
entry:
%cmp28.not = icmp ult i32 %N, 2
@@ -895,6 +1291,34 @@ define i32 @chained_partial_reduce_sub_add_sub(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-NEON-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE4]])
; CHECK-NEON-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
; CHECK-NEON-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
+; CHECK-NEON: scalar.ph:
+; CHECK-NEON-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEON-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP17]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
+; CHECK-NEON-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK-NEON: for.cond.cleanup:
+; CHECK-NEON-NEXT: [[RES_0_LCSSA:%.*]] = phi i32 [ [[SUB_2:%.*]], [[FOR_BODY]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ]
+; CHECK-NEON-NEXT: ret i32 [[RES_0_LCSSA]]
+; CHECK-NEON: for.body:
+; CHECK-NEON-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEON-NEXT: [[RES:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SUB_2]], [[FOR_BODY]] ]
+; CHECK-NEON-NEXT: [[A_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEON-NEXT: [[B_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-NEON-NEXT: [[C_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDVARS_IV]]
+; CHECK-NEON-NEXT: [[A_VAL:%.*]] = load i8, ptr [[A_PTR]], align 1
+; CHECK-NEON-NEXT: [[B_VAL:%.*]] = load i8, ptr [[B_PTR]], align 1
+; CHECK-NEON-NEXT: [[C_VAL:%.*]] = load i8, ptr [[C_PTR]], align 1
+; CHECK-NEON-NEXT: [[A_EXT:%.*]] = sext i8 [[A_VAL]] to i32
+; CHECK-NEON-NEXT: [[B_EXT:%.*]] = sext i8 [[B_VAL]] to i32
+; CHECK-NEON-NEXT: [[C_EXT:%.*]] = sext i8 [[C_VAL]] to i32
+; CHECK-NEON-NEXT: [[MUL_AB:%.*]] = mul nsw i32 [[A_EXT]], [[B_EXT]]
+; CHECK-NEON-NEXT: [[SUB:%.*]] = sub nsw i32 [[RES]], [[MUL_AB]]
+; CHECK-NEON-NEXT: [[MUL_AC:%.*]] = mul nsw i32 [[A_EXT]], [[C_EXT]]
+; CHECK-NEON-NEXT: [[ADD:%.*]] = add nsw i32 [[SUB]], [[MUL_AC]]
+; CHECK-NEON-NEXT: [[MUL_BC:%.*]] = mul nsw i32 [[B_EXT]], [[C_EXT]]
+; CHECK-NEON-NEXT: [[SUB_2]] = sub i32 [[ADD]], [[MUL_BC]]
+; CHECK-NEON-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEON-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
+; CHECK-NEON-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]], !loop [[META4]]
;
; CHECK-SVE-LABEL: define i32 @chained_partial_reduce_sub_add_sub(
; CHECK-SVE-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
@@ -943,6 +1367,34 @@ define i32 @chained_partial_reduce_sub_add_sub(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-SVE-NEXT: [[TMP23:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP21]])
; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
+; CHECK-SVE: scalar.ph:
+; CHECK-SVE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-SVE-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP23]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
+; CHECK-SVE-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK-SVE: for.cond.cleanup:
+; CHECK-SVE-NEXT: [[RES_0_LCSSA:%.*]] = phi i32 [ [[SUB_2:%.*]], [[FOR_BODY]] ], [ [[TMP23]], [[MIDDLE_BLOCK]] ]
+; CHECK-SVE-NEXT: ret i32 [[RES_0_LCSSA]]
+; CHECK-SVE: for.body:
+; CHECK-SVE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-SVE-NEXT: [[RES:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SUB_2]], [[FOR_BODY]] ]
+; CHECK-SVE-NEXT: [[A_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-SVE-NEXT: [[B_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-SVE-NEXT: [[C_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDVARS_IV]]
+; CHECK-SVE-NEXT: [[A_VAL:%.*]] = load i8, ptr [[A_PTR]], align 1
+; CHECK-SVE-NEXT: [[B_VAL:%.*]] = load i8, ptr [[B_PTR]], align 1
+; CHECK-SVE-NEXT: [[C_VAL:%.*]] = load i8, ptr [[C_PTR]], align 1
+; CHECK-SVE-NEXT: [[A_EXT:%.*]] = sext i8 [[A_VAL]] to i32
+; CHECK-SVE-NEXT: [[B_EXT:%.*]] = sext i8 [[B_VAL]] to i32
+; CHECK-SVE-NEXT: [[C_EXT:%.*]] = sext i8 [[C_VAL]] to i32
+; CHECK-SVE-NEXT: [[MUL_AB:%.*]] = mul nsw i32 [[A_EXT]], [[B_EXT]]
+; CHECK-SVE-NEXT: [[SUB:%.*]] = sub nsw i32 [[RES]], [[MUL_AB]]
+; CHECK-SVE-NEXT: [[MUL_AC:%.*]] = mul nsw i32 [[A_EXT]], [[C_EXT]]
+; CHECK-SVE-NEXT: [[ADD:%.*]] = add nsw i32 [[SUB]], [[MUL_AC]]
+; CHECK-SVE-NEXT: [[MUL_BC:%.*]] = mul nsw i32 [[B_EXT]], [[C_EXT]]
+; CHECK-SVE-NEXT: [[SUB_2]] = sub i32 [[ADD]], [[MUL_BC]]
+; CHECK-SVE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-SVE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
+; CHECK-SVE-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]], !loop [[META4]]
;
; CHECK-SVE-MAXBW-LABEL: define i32 @chained_partial_reduce_sub_add_sub(
; CHECK-SVE-MAXBW-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
@@ -993,6 +1445,34 @@ define i32 @chained_partial_reduce_sub_add_sub(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-SVE-MAXBW-NEXT: [[TMP23:%.*]] = call i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32> [[PARTIAL_REDUCE4]])
; CHECK-SVE-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
; CHECK-SVE-MAXBW-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
+; CHECK-SVE-MAXBW: scalar.ph:
+; CHECK-SVE-MAXBW-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-SVE-MAXBW-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP23]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
+; CHECK-SVE-MAXBW-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK-SVE-MAXBW: for.cond.cleanup:
+; CHECK-SVE-MAXBW-NEXT: [[RES_0_LCSSA:%.*]] = phi i32 [ [[SUB_2:%.*]], [[FOR_BODY]] ], [ [[TMP23]], [[MIDDLE_BLOCK]] ]
+; CHECK-SVE-MAXBW-NEXT: ret i32 [[RES_0_LCSSA]]
+; CHECK-SVE-MAXBW: for.body:
+; CHECK-SVE-MAXBW-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-SVE-MAXBW-NEXT: [[RES:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SUB_2]], [[FOR_BODY]] ]
+; CHECK-SVE-MAXBW-NEXT: [[A_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-SVE-MAXBW-NEXT: [[B_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-SVE-MAXBW-NEXT: [[C_PTR:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDVARS_IV]]
+; CHECK-SVE-MAXBW-NEXT: [[A_VAL:%.*]] = load i8, ptr [[A_PTR]], align 1
+; CHECK-SVE-MAXBW-NEXT: [[B_VAL:%.*]] = load i8, ptr [[B_PTR]], align 1
+; CHECK-SVE-MAXBW-NEXT: [[C_VAL:%.*]] = load i8, ptr [[C_PTR]], align 1
+; CHECK-SVE-MAXBW-NEXT: [[A_EXT:%.*]] = sext i8 [[A_VAL]] to i32
+; CHECK-SVE-MAXBW-NEXT: [[B_EXT:%.*]] = sext i8 [[B_VAL]] to i32
+; CHECK-SVE-MAXBW-NEXT: [[C_EXT:%.*]] = sext i8 [[C_VAL]] to i32
+; CHECK-SVE-MAXBW-NEXT: [[MUL_AB:%.*]] = mul nsw i32 [[A_EXT]], [[B_EXT]]
+; CHECK-SVE-MAXBW-NEXT: [[SUB:%.*]] = sub nsw i32 [[RES]], [[MUL_AB]]
+; CHECK-SVE-MAXBW-NEXT: [[MUL_AC:%.*]] = mul nsw i32 [[A_EXT]], [[C_EXT]]
+; CHECK-SVE-MAXBW-NEXT: [[ADD:%.*]] = add nsw i32 [[SUB]], [[MUL_AC]]
+; CHECK-SVE-MAXBW-NEXT: [[MUL_BC:%.*]] = mul nsw i32 [[B_EXT]], [[C_EXT]]
+; CHECK-SVE-MAXBW-NEXT: [[SUB_2]] = sub i32 [[ADD]], [[MUL_BC]]
+; CHECK-SVE-MAXBW-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-SVE-MAXBW-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
+; CHECK-SVE-MAXBW-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]], !loop [[META4]]
;
entry:
%cmp28.not = icmp ult i32 %N, 2
@@ -1035,3 +1515,55 @@ attributes #0 = { vscale_range(1,16) }
!0 = !{!"llvm.loop.vectorize.scalable.enable", i1 true}
!1 = distinct !{!0}
+;.
+; CHECK-NEON: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
+; CHECK-NEON: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
+; CHECK-NEON: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
+; CHECK-NEON: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
+; CHECK-NEON: [[META4]] = distinct !{[[META5:![0-9]+]]}
+; CHECK-NEON: [[META5]] = !{!"llvm.loop.vectorize.scalable.enable", i1 true}
+; CHECK-NEON: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]}
+; CHECK-NEON: [[LOOP7]] = distinct !{[[LOOP7]], [[META2]], [[META1]]}
+; CHECK-NEON: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]}
+; CHECK-NEON: [[LOOP9]] = distinct !{[[LOOP9]], [[META2]], [[META1]]}
+; CHECK-NEON: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META2]]}
+; CHECK-NEON: [[LOOP11]] = distinct !{[[LOOP11]], [[META2]], [[META1]]}
+; CHECK-NEON: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]], [[META2]]}
+; CHECK-NEON: [[LOOP13]] = distinct !{[[LOOP13]], [[META2]], [[META1]]}
+; CHECK-NEON: [[LOOP14]] = distinct !{[[LOOP14]], [[META1]], [[META2]]}
+; CHECK-NEON: [[LOOP15]] = distinct !{[[LOOP15]], [[META2]], [[META1]]}
+;.
+; CHECK-SVE: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
+; CHECK-SVE: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
+; CHECK-SVE: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
+; CHECK-SVE: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
+; CHECK-SVE: [[META4]] = distinct !{[[META5:![0-9]+]]}
+; CHECK-SVE: [[META5]] = !{!"llvm.loop.vectorize.scalable.enable", i1 true}
+; CHECK-SVE: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]}
+; CHECK-SVE: [[LOOP7]] = distinct !{[[LOOP7]], [[META2]], [[META1]]}
+; CHECK-SVE: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]}
+; CHECK-SVE: [[LOOP9]] = distinct !{[[LOOP9]], [[META2]], [[META1]]}
+; CHECK-SVE: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META2]]}
+; CHECK-SVE: [[LOOP11]] = distinct !{[[LOOP11]], [[META2]], [[META1]]}
+; CHECK-SVE: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]], [[META2]]}
+; CHECK-SVE: [[LOOP13]] = distinct !{[[LOOP13]], [[META2]], [[META1]]}
+; CHECK-SVE: [[LOOP14]] = distinct !{[[LOOP14]], [[META1]], [[META2]]}
+; CHECK-SVE: [[LOOP15]] = distinct !{[[LOOP15]], [[META2]], [[META1]]}
+;.
+; CHECK-SVE-MAXBW: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
+; CHECK-SVE-MAXBW: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
+; CHECK-SVE-MAXBW: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
+; CHECK-SVE-MAXBW: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
+; CHECK-SVE-MAXBW: [[META4]] = distinct !{[[META5:![0-9]+]]}
+; CHECK-SVE-MAXBW: [[META5]] = !{!"llvm.loop.vectorize.scalable.enable", i1 true}
+; CHECK-SVE-MAXBW: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]}
+; CHECK-SVE-MAXBW: [[LOOP7]] = distinct !{[[LOOP7]], [[META2]], [[META1]]}
+; CHECK-SVE-MAXBW: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]}
+; CHECK-SVE-MAXBW: [[LOOP9]] = distinct !{[[LOOP9]], [[META2]], [[META1]]}
+; CHECK-SVE-MAXBW: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META2]]}
+; CHECK-SVE-MAXBW: [[LOOP11]] = distinct !{[[LOOP11]], [[META2]], [[META1]]}
+; CHECK-SVE-MAXBW: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]], [[META2]]}
+; CHECK-SVE-MAXBW: [[LOOP13]] = distinct !{[[LOOP13]], [[META2]], [[META1]]}
+; CHECK-SVE-MAXBW: [[LOOP14]] = distinct !{[[LOOP14]], [[META1]], [[META2]]}
+; CHECK-SVE-MAXBW: [[LOOP15]] = distinct !{[[LOOP15]], [[META2]], [[META1]]}
+;.
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-no-dotprod.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-no-dotprod.ll
index 3561f52df9490..7118d86cb8c7c 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-no-dotprod.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-no-dotprod.ll
@@ -37,6 +37,31 @@ define i32 @not_dotp(ptr %a, ptr %b) {
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], 992
; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP14]], [[TMP13]]
+; CHECK-NEXT: [[TMP16:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]])
+; CHECK-NEXT: br i1 false, label %[[FOR_EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 992, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP16]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i8, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[LOAD_A:%.*]] = load i8, ptr [[GEP_A]], align 1
+; CHECK-NEXT: [[EXT_A:%.*]] = zext i8 [[LOAD_A]] to i32
+; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr i8, ptr [[B]], i64 [[IV]]
+; CHECK-NEXT: [[LOAD_B:%.*]] = load i8, ptr [[GEP_B]], align 1
+; CHECK-NEXT: [[EXT_B:%.*]] = zext i8 [[LOAD_B]] to i32
+; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[EXT_B]], [[EXT_A]]
+; CHECK-NEXT: [[ADD]] = add i32 [[MUL]], [[ACCUM]]
+; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1000
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK: [[FOR_EXIT]]:
+; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP16]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret i32 [[ADD_LCSSA]]
;
entry:
br label %for.body
@@ -59,3 +84,9 @@ for.body: ; preds = %for.body, %entry
for.exit: ; preds = %for.body
ret i32 %add
}
+;.
+; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
+; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
+; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
+; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
+;.
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-sub.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-sub.ll
index 37da523ed7337..476b4bbea77f0 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-sub.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-sub.ll
@@ -43,6 +43,28 @@ define i32 @dotp(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP15]])
; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK-INTERLEAVE1: scalar.ph:
+; CHECK-INTERLEAVE1-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-INTERLEAVE1-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP17]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
+; CHECK-INTERLEAVE1-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK-INTERLEAVE1: for.body:
+; CHECK-INTERLEAVE1-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-INTERLEAVE1-NEXT: [[ACCUM:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
+; CHECK-INTERLEAVE1-NEXT: [[GEP_A:%.*]] = getelementptr i8, ptr [[A]], i64 [[IV]]
+; CHECK-INTERLEAVE1-NEXT: [[LOAD_A:%.*]] = load i8, ptr [[GEP_A]], align 1
+; CHECK-INTERLEAVE1-NEXT: [[EXT_A:%.*]] = zext i8 [[LOAD_A]] to i32
+; CHECK-INTERLEAVE1-NEXT: [[GEP_B:%.*]] = getelementptr i8, ptr [[B]], i64 [[IV]]
+; CHECK-INTERLEAVE1-NEXT: [[LOAD_B:%.*]] = load i8, ptr [[GEP_B]], align 1
+; CHECK-INTERLEAVE1-NEXT: [[EXT_B:%.*]] = zext i8 [[LOAD_B]] to i32
+; CHECK-INTERLEAVE1-NEXT: [[MUL:%.*]] = mul i32 [[EXT_B]], [[EXT_A]]
+; CHECK-INTERLEAVE1-NEXT: [[SUB:%.*]] = sub i32 0, [[MUL]]
+; CHECK-INTERLEAVE1-NEXT: [[ADD]] = add i32 [[ACCUM]], [[SUB]]
+; CHECK-INTERLEAVE1-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
+; CHECK-INTERLEAVE1-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
+; CHECK-INTERLEAVE1-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-INTERLEAVE1: for.exit:
+; CHECK-INTERLEAVE1-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ]
+; CHECK-INTERLEAVE1-NEXT: ret i32 [[ADD_LCSSA]]
;
; CHECK-INTERLEAVED-LABEL: define i32 @dotp(
; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] {
@@ -95,6 +117,28 @@ define i32 @dotp(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[BIN_RDX]])
; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK-INTERLEAVED: scalar.ph:
+; CHECK-INTERLEAVED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-INTERLEAVED-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP28]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
+; CHECK-INTERLEAVED-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK-INTERLEAVED: for.body:
+; CHECK-INTERLEAVED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-INTERLEAVED-NEXT: [[ACCUM:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
+; CHECK-INTERLEAVED-NEXT: [[GEP_A:%.*]] = getelementptr i8, ptr [[A]], i64 [[IV]]
+; CHECK-INTERLEAVED-NEXT: [[LOAD_A:%.*]] = load i8, ptr [[GEP_A]], align 1
+; CHECK-INTERLEAVED-NEXT: [[EXT_A:%.*]] = zext i8 [[LOAD_A]] to i32
+; CHECK-INTERLEAVED-NEXT: [[GEP_B:%.*]] = getelementptr i8, ptr [[B]], i64 [[IV]]
+; CHECK-INTERLEAVED-NEXT: [[LOAD_B:%.*]] = load i8, ptr [[GEP_B]], align 1
+; CHECK-INTERLEAVED-NEXT: [[EXT_B:%.*]] = zext i8 [[LOAD_B]] to i32
+; CHECK-INTERLEAVED-NEXT: [[MUL:%.*]] = mul i32 [[EXT_B]], [[EXT_A]]
+; CHECK-INTERLEAVED-NEXT: [[SUB:%.*]] = sub i32 0, [[MUL]]
+; CHECK-INTERLEAVED-NEXT: [[ADD]] = add i32 [[ACCUM]], [[SUB]]
+; CHECK-INTERLEAVED-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
+; CHECK-INTERLEAVED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
+; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-INTERLEAVED: for.exit:
+; CHECK-INTERLEAVED-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP28]], [[MIDDLE_BLOCK]] ]
+; CHECK-INTERLEAVED-NEXT: ret i32 [[ADD_LCSSA]]
;
; CHECK-MAXBW-LABEL: define i32 @dotp(
; CHECK-MAXBW-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] {
@@ -132,6 +176,28 @@ define i32 @dotp(ptr %a, ptr %b) #0 {
; CHECK-MAXBW-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32> [[PARTIAL_REDUCE]])
; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
; CHECK-MAXBW-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK-MAXBW: scalar.ph:
+; CHECK-MAXBW-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ]
+; CHECK-MAXBW-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP17]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1]] ]
+; CHECK-MAXBW-NEXT: br label [[FOR_BODY1:%.*]]
+; CHECK-MAXBW: for.body:
+; CHECK-MAXBW-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ]
+; CHECK-MAXBW-NEXT: [[ACCUM:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY1]] ]
+; CHECK-MAXBW-NEXT: [[GEP_A:%.*]] = getelementptr i8, ptr [[A]], i64 [[IV1]]
+; CHECK-MAXBW-NEXT: [[LOAD_A:%.*]] = load i8, ptr [[GEP_A]], align 1
+; CHECK-MAXBW-NEXT: [[EXT_A:%.*]] = zext i8 [[LOAD_A]] to i32
+; CHECK-MAXBW-NEXT: [[GEP_B:%.*]] = getelementptr i8, ptr [[B]], i64 [[IV1]]
+; CHECK-MAXBW-NEXT: [[LOAD_B:%.*]] = load i8, ptr [[GEP_B]], align 1
+; CHECK-MAXBW-NEXT: [[EXT_B:%.*]] = zext i8 [[LOAD_B]] to i32
+; CHECK-MAXBW-NEXT: [[MUL:%.*]] = mul i32 [[EXT_B]], [[EXT_A]]
+; CHECK-MAXBW-NEXT: [[SUB:%.*]] = sub i32 0, [[MUL]]
+; CHECK-MAXBW-NEXT: [[ADD]] = add i32 [[ACCUM]], [[SUB]]
+; CHECK-MAXBW-NEXT: [[IV_NEXT1]] = add i64 [[IV1]], 1
+; CHECK-MAXBW-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], 1024
+; CHECK-MAXBW-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_EXIT]], label [[FOR_BODY1]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-MAXBW: for.exit:
+; CHECK-MAXBW-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY1]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ]
+; CHECK-MAXBW-NEXT: ret i32 [[ADD_LCSSA]]
;
entry:
br label %for.body
@@ -161,3 +227,19 @@ for.exit: ; preds = %for.body
!9 = !{!"llvm.loop.vectorize.predicate.enable", i1 true}
!10 = !{!"llvm.loop.vectorize.enable", i1 true}
attributes #0 = { vscale_range(1,16) "target-features"="+sve" }
+;.
+; CHECK-INTERLEAVE1: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
+; CHECK-INTERLEAVE1: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
+; CHECK-INTERLEAVE1: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
+; CHECK-INTERLEAVE1: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
+;.
+; CHECK-INTERLEAVED: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
+; CHECK-INTERLEAVED: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
+; CHECK-INTERLEAVED: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
+; CHECK-INTERLEAVED: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
+;.
+; CHECK-MAXBW: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
+; CHECK-MAXBW: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
+; CHECK-MAXBW: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
+; CHECK-MAXBW: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
+;.
More information about the llvm-commits
mailing list