[llvm] 5db774a - [LV] Add additional test for narrowing to single scalars.
Florian Hahn via llvm-commits
llvm-commits at lists.llvm.org
Sun Oct 12 02:29:13 PDT 2025
Author: Florian Hahn
Date: 2025-10-12T10:27:20+01:00
New Revision: 5db774a8224fd71418877fa4b90349381cb257d5
URL: https://github.com/llvm/llvm-project/commit/5db774a8224fd71418877fa4b90349381cb257d5
DIFF: https://github.com/llvm/llvm-project/commit/5db774a8224fd71418877fa4b90349381cb257d5.diff
LOG: [LV] Add additional test for narrowing to single scalars.
Add extra test coverage for narrowing stores to single scalars, with the
store address being uniform-per-part, not uniform-across-all-parts.
Test for https://github.com/llvm/llvm-project/issues/162498.
Added:
Modified:
llvm/test/Transforms/LoopVectorize/narrow-to-single-scalar.ll
Removed:
################################################################################
diff --git a/llvm/test/Transforms/LoopVectorize/narrow-to-single-scalar.ll b/llvm/test/Transforms/LoopVectorize/narrow-to-single-scalar.ll
index cb16032580136..1533906247739 100644
--- a/llvm/test/Transforms/LoopVectorize/narrow-to-single-scalar.ll
+++ b/llvm/test/Transforms/LoopVectorize/narrow-to-single-scalar.ll
@@ -1,46 +1,59 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
-; RUN: opt -p loop-vectorize -force-vector-width=4 -S %s | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 6
+; RUN: opt -p loop-vectorize -force-vector-width=4 -S %s | FileCheck --check-prefix=VF4IC1 %s
+; RUN: opt -p loop-vectorize -force-vector-width=2 -force-vector-interleave=2 -S %s | FileCheck --check-prefix=VF2IC2 %s
define void @narrow_select_to_single_scalar(i1 %invar.cond, ptr noalias %A, ptr noalias %B, ptr noalias %C) {
-; CHECK-LABEL: define void @narrow_select_to_single_scalar(
-; CHECK-SAME: i1 [[INVAR_COND:%.*]], ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) {
-; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: br label %[[VECTOR_PH:.*]]
-; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP0:%.*]] = select i1 [[INVAR_COND]], i16 0, i16 1
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i16, ptr [[C]], i16 [[TMP0]]
-; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
-; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[OFFSET_IDX:%.*]] = trunc i32 [[INDEX]] to i16
-; CHECK-NEXT: [[TMP2:%.*]] = add i16 [[OFFSET_IDX]], 0
-; CHECK-NEXT: [[TMP3:%.*]] = add i16 [[OFFSET_IDX]], 1
-; CHECK-NEXT: [[TMP4:%.*]] = add i16 [[OFFSET_IDX]], 2
-; CHECK-NEXT: [[TMP5:%.*]] = add i16 [[OFFSET_IDX]], 3
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[A]], i16 [[TMP5]]
-; CHECK-NEXT: [[TMP7:%.*]] = load i16, ptr [[TMP6]], align 1
-; CHECK-NEXT: store i16 [[TMP7]], ptr [[B]], align 1
-; CHECK-NEXT: store i16 0, ptr [[TMP1]], align 1
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
-; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1024
-; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
-; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: br label %[[SCALAR_PH:.*]]
-; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
-; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i16 [ 1024, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_HEADER]] ]
-; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i8, ptr [[A]], i16 [[IV]]
-; CHECK-NEXT: [[L_0:%.*]] = load i16, ptr [[GEP_A]], align 1
-; CHECK-NEXT: store i16 [[L_0]], ptr [[B]], align 1
-; CHECK-NEXT: [[INVAR_SEL:%.*]] = select i1 [[INVAR_COND]], i16 0, i16 1
-; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr i16, ptr [[C]], i16 [[INVAR_SEL]]
-; CHECK-NEXT: store i16 0, ptr [[GEP_C]], align 1
-; CHECK-NEXT: [[IV_NEXT]] = add i16 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp ne i16 [[IV]], 1024
-; CHECK-NEXT: br i1 [[EC]], label %[[LOOP_HEADER]], label %[[EXIT:.*]], !llvm.loop [[LOOP3:![0-9]+]]
-; CHECK: [[EXIT]]:
-; CHECK-NEXT: ret void
+; VF4IC1-LABEL: define void @narrow_select_to_single_scalar(
+; VF4IC1-SAME: i1 [[INVAR_COND:%.*]], ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) {
+; VF4IC1-NEXT: [[ENTRY:.*:]]
+; VF4IC1-NEXT: br label %[[VECTOR_PH:.*]]
+; VF4IC1: [[VECTOR_PH]]:
+; VF4IC1-NEXT: [[TMP0:%.*]] = select i1 [[INVAR_COND]], i16 0, i16 1
+; VF4IC1-NEXT: [[TMP1:%.*]] = getelementptr i16, ptr [[C]], i16 [[TMP0]]
+; VF4IC1-NEXT: br label %[[VECTOR_BODY:.*]]
+; VF4IC1: [[VECTOR_BODY]]:
+; VF4IC1-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; VF4IC1-NEXT: [[OFFSET_IDX:%.*]] = trunc i32 [[INDEX]] to i16
+; VF4IC1-NEXT: [[TMP2:%.*]] = add i16 [[OFFSET_IDX]], 0
+; VF4IC1-NEXT: [[TMP3:%.*]] = add i16 [[OFFSET_IDX]], 1
+; VF4IC1-NEXT: [[TMP4:%.*]] = add i16 [[OFFSET_IDX]], 2
+; VF4IC1-NEXT: [[TMP5:%.*]] = add i16 [[OFFSET_IDX]], 3
+; VF4IC1-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[A]], i16 [[TMP5]]
+; VF4IC1-NEXT: [[TMP7:%.*]] = load i16, ptr [[TMP6]], align 1
+; VF4IC1-NEXT: store i16 [[TMP7]], ptr [[B]], align 1
+; VF4IC1-NEXT: store i16 0, ptr [[TMP1]], align 1
+; VF4IC1-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
+; VF4IC1-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1024
+; VF4IC1-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; VF4IC1: [[MIDDLE_BLOCK]]:
+; VF4IC1-NEXT: br label %[[EXIT:.*]]
+; VF4IC1: [[EXIT]]:
+; VF4IC1-NEXT: ret void
+;
+; VF2IC2-LABEL: define void @narrow_select_to_single_scalar(
+; VF2IC2-SAME: i1 [[INVAR_COND:%.*]], ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) {
+; VF2IC2-NEXT: [[ENTRY:.*:]]
+; VF2IC2-NEXT: br label %[[VECTOR_PH:.*]]
+; VF2IC2: [[VECTOR_PH]]:
+; VF2IC2-NEXT: [[TMP0:%.*]] = select i1 [[INVAR_COND]], i16 0, i16 1
+; VF2IC2-NEXT: [[TMP1:%.*]] = getelementptr i16, ptr [[C]], i16 [[TMP0]]
+; VF2IC2-NEXT: br label %[[VECTOR_BODY:.*]]
+; VF2IC2: [[VECTOR_BODY]]:
+; VF2IC2-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; VF2IC2-NEXT: [[OFFSET_IDX:%.*]] = trunc i32 [[INDEX]] to i16
+; VF2IC2-NEXT: [[TMP2:%.*]] = add i16 [[OFFSET_IDX]], 2
+; VF2IC2-NEXT: [[TMP3:%.*]] = add i16 [[OFFSET_IDX]], 3
+; VF2IC2-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[A]], i16 [[TMP3]]
+; VF2IC2-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP4]], align 1
+; VF2IC2-NEXT: store i16 [[TMP5]], ptr [[B]], align 1
+; VF2IC2-NEXT: store i16 0, ptr [[TMP1]], align 1
+; VF2IC2-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
+; VF2IC2-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1024
+; VF2IC2-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; VF2IC2: [[MIDDLE_BLOCK]]:
+; VF2IC2-NEXT: br label %[[EXIT:.*]]
+; VF2IC2: [[EXIT]]:
+; VF2IC2-NEXT: ret void
;
entry:
br label %loop.header
@@ -54,15 +67,88 @@ loop.header:
%gep.C = getelementptr i16, ptr %C, i16 %invar.sel
store i16 0, ptr %gep.C, align 1
%iv.next = add i16 %iv, 1
- %ec = icmp ne i16 %iv, 1024
+ %ec = icmp ne i16 %iv.next, 1024
br i1 %ec, label %loop.header, label %exit
exit:
ret void
}
-;.
-; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
-; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
-; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
-; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
-;.
+
+; FIXME: Currently this mis-compiled when interleaving; all stores store the
+; last lane of the last part, instead of the last lane per part.
+; Test case for https://github.com/llvm/llvm-project/issues/162498.
+define void @narrow_to_single_scalar_store_address_not_uniform_across_all_parts(ptr %dst) {
+; VF4IC1-LABEL: define void @narrow_to_single_scalar_store_address_not_uniform_across_all_parts(
+; VF4IC1-SAME: ptr [[DST:%.*]]) {
+; VF4IC1-NEXT: [[ENTRY:.*:]]
+; VF4IC1-NEXT: br label %[[VECTOR_PH:.*]]
+; VF4IC1: [[VECTOR_PH]]:
+; VF4IC1-NEXT: br label %[[VECTOR_BODY:.*]]
+; VF4IC1: [[VECTOR_BODY]]:
+; VF4IC1-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; VF4IC1-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; VF4IC1-NEXT: [[TMP0:%.*]] = add i32 [[INDEX]], 0
+; VF4IC1-NEXT: [[TMP1:%.*]] = add i32 [[INDEX]], 1
+; VF4IC1-NEXT: [[TMP2:%.*]] = add i32 [[INDEX]], 2
+; VF4IC1-NEXT: [[TMP3:%.*]] = add i32 [[INDEX]], 3
+; VF4IC1-NEXT: [[TMP4:%.*]] = lshr <4 x i32> [[VEC_IND]], splat (i32 1)
+; VF4IC1-NEXT: [[TMP5:%.*]] = extractelement <4 x i32> [[TMP4]], i32 0
+; VF4IC1-NEXT: [[TMP6:%.*]] = getelementptr i32, ptr [[DST]], i32 [[TMP5]]
+; VF4IC1-NEXT: [[TMP7:%.*]] = extractelement <4 x i32> [[TMP4]], i32 1
+; VF4IC1-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[DST]], i32 [[TMP7]]
+; VF4IC1-NEXT: [[TMP9:%.*]] = extractelement <4 x i32> [[TMP4]], i32 2
+; VF4IC1-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr [[DST]], i32 [[TMP9]]
+; VF4IC1-NEXT: [[TMP11:%.*]] = extractelement <4 x i32> [[TMP4]], i32 3
+; VF4IC1-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[DST]], i32 [[TMP11]]
+; VF4IC1-NEXT: store i32 [[TMP0]], ptr [[TMP6]], align 4
+; VF4IC1-NEXT: store i32 [[TMP1]], ptr [[TMP8]], align 4
+; VF4IC1-NEXT: store i32 [[TMP2]], ptr [[TMP10]], align 4
+; VF4IC1-NEXT: store i32 [[TMP3]], ptr [[TMP12]], align 4
+; VF4IC1-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
+; VF4IC1-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4)
+; VF4IC1-NEXT: [[TMP13:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100
+; VF4IC1-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; VF4IC1: [[MIDDLE_BLOCK]]:
+; VF4IC1-NEXT: br label %[[EXIT:.*]]
+; VF4IC1: [[EXIT]]:
+; VF4IC1-NEXT: ret void
+;
+; VF2IC2-LABEL: define void @narrow_to_single_scalar_store_address_not_uniform_across_all_parts(
+; VF2IC2-SAME: ptr [[DST:%.*]]) {
+; VF2IC2-NEXT: [[ENTRY:.*:]]
+; VF2IC2-NEXT: br label %[[VECTOR_PH:.*]]
+; VF2IC2: [[VECTOR_PH]]:
+; VF2IC2-NEXT: br label %[[VECTOR_BODY:.*]]
+; VF2IC2: [[VECTOR_BODY]]:
+; VF2IC2-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; VF2IC2-NEXT: [[TMP0:%.*]] = add i32 [[INDEX]], 2
+; VF2IC2-NEXT: [[TMP1:%.*]] = add i32 [[INDEX]], 3
+; VF2IC2-NEXT: [[TMP2:%.*]] = lshr i32 [[INDEX]], 1
+; VF2IC2-NEXT: [[TMP3:%.*]] = lshr i32 [[TMP0]], 1
+; VF2IC2-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[DST]], i32 [[TMP2]]
+; VF2IC2-NEXT: [[TMP5:%.*]] = getelementptr i32, ptr [[DST]], i32 [[TMP3]]
+; VF2IC2-NEXT: store i32 [[TMP1]], ptr [[TMP4]], align 4
+; VF2IC2-NEXT: store i32 [[TMP1]], ptr [[TMP5]], align 4
+; VF2IC2-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
+; VF2IC2-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100
+; VF2IC2-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; VF2IC2: [[MIDDLE_BLOCK]]:
+; VF2IC2-NEXT: br label %[[EXIT:.*]]
+; VF2IC2: [[EXIT]]:
+; VF2IC2-NEXT: ret void
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
+ %iv.shift = lshr i32 %iv, 1
+ %gep.dst = getelementptr i32, ptr %dst, i32 %iv.shift
+ store i32 %iv, ptr %gep.dst, align 4
+ %iv.next = add i32 %iv, 1
+ %ec = icmp eq i32 %iv.next, 100
+ br i1 %ec, label %exit, label %loop
+
+exit:
+ ret void
+}
More information about the llvm-commits
mailing list