[llvm] 4fdd28b - [SLP][X86] Add test coverage for #124993

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 5 00:55:54 PST 2025


Author: Simon Pilgrim
Date: 2025-02-05T08:54:09Z
New Revision: 4fdd28b7912880e5723c7c728df7a18ad82f31b6

URL: https://github.com/llvm/llvm-project/commit/4fdd28b7912880e5723c7c728df7a18ad82f31b6
DIFF: https://github.com/llvm/llvm-project/commit/4fdd28b7912880e5723c7c728df7a18ad82f31b6.diff

LOG: [SLP][X86] Add test coverage for #124993

Added: 
    llvm/test/Transforms/SLPVectorizer/X86/scalarize-ctlz.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/scalarize-ctlz.ll b/llvm/test/Transforms/SLPVectorizer/X86/scalarize-ctlz.ll
new file mode 100644
index 00000000000000..0f9b2e9ba86fd7
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/scalarize-ctlz.ll
@@ -0,0 +1,203 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-- -mcpu=x86-64 %s | FileCheck %s --check-prefixes=SSE,SSE2
+; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-- -mcpu=x86-64-v2 %s | FileCheck %s --check-prefixes=SSE,SSE4
+; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-- -mcpu=x86-64-v3 %s | FileCheck %s --check-prefixes=AVX2
+; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-- -mcpu=x86-64-v4 %s | FileCheck %s --check-prefixes=AVX512
+
+; PR124993 - ensure scalarized CTLZ calls remain scalarized unless there is a definite cost improvement, the cost of scalarization was being over estimated.
+
+define <2 x i64> @scalarize_ctlz_v2i64(<2 x i64> %v)  {
+; SSE-LABEL: define <2 x i64> @scalarize_ctlz_v2i64(
+; SSE-SAME: <2 x i64> [[V:%.*]]) #[[ATTR0:[0-9]+]] {
+; SSE-NEXT:    [[V0:%.*]] = extractelement <2 x i64> [[V]], i64 0
+; SSE-NEXT:    [[V1:%.*]] = extractelement <2 x i64> [[V]], i64 1
+; SSE-NEXT:    [[C0:%.*]] = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 [[V0]], i1 false)
+; SSE-NEXT:    [[C1:%.*]] = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 [[V1]], i1 false)
+; SSE-NEXT:    [[R0:%.*]] = insertelement <2 x i64> poison, i64 [[C0]], i64 0
+; SSE-NEXT:    [[R1:%.*]] = insertelement <2 x i64> [[R0]], i64 [[C1]], i64 1
+; SSE-NEXT:    ret <2 x i64> [[R1]]
+;
+; AVX2-LABEL: define <2 x i64> @scalarize_ctlz_v2i64(
+; AVX2-SAME: <2 x i64> [[V:%.*]]) #[[ATTR0:[0-9]+]] {
+; AVX2-NEXT:    [[V0:%.*]] = extractelement <2 x i64> [[V]], i64 0
+; AVX2-NEXT:    [[V1:%.*]] = extractelement <2 x i64> [[V]], i64 1
+; AVX2-NEXT:    [[C0:%.*]] = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 [[V0]], i1 false)
+; AVX2-NEXT:    [[C1:%.*]] = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 [[V1]], i1 false)
+; AVX2-NEXT:    [[R0:%.*]] = insertelement <2 x i64> poison, i64 [[C0]], i64 0
+; AVX2-NEXT:    [[R1:%.*]] = insertelement <2 x i64> [[R0]], i64 [[C1]], i64 1
+; AVX2-NEXT:    ret <2 x i64> [[R1]]
+;
+; AVX512-LABEL: define <2 x i64> @scalarize_ctlz_v2i64(
+; AVX512-SAME: <2 x i64> [[V:%.*]]) #[[ATTR0:[0-9]+]] {
+; AVX512-NEXT:    [[TMP1:%.*]] = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> [[V]], i1 false)
+; AVX512-NEXT:    ret <2 x i64> [[TMP1]]
+;
+  %v0 = extractelement <2 x i64> %v, i64 0
+  %v1 = extractelement <2 x i64> %v, i64 1
+  %c0 = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 %v0, i1 false)
+  %c1 = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 %v1, i1 false)
+  %r0 = insertelement <2 x i64> poison, i64 %c0, i64 0
+  %r1 = insertelement <2 x i64> %r0, i64 %c1, i64 1
+  ret <2 x i64> %r1
+}
+
+define <4 x i64> @scalarize_ctlz_v4i64(<4 x i64> %v)  {
+; SSE-LABEL: define <4 x i64> @scalarize_ctlz_v4i64(
+; SSE-SAME: <4 x i64> [[V:%.*]]) #[[ATTR0]] {
+; SSE-NEXT:    [[V0:%.*]] = extractelement <4 x i64> [[V]], i64 0
+; SSE-NEXT:    [[V1:%.*]] = extractelement <4 x i64> [[V]], i64 1
+; SSE-NEXT:    [[V2:%.*]] = extractelement <4 x i64> [[V]], i64 2
+; SSE-NEXT:    [[V3:%.*]] = extractelement <4 x i64> [[V]], i64 3
+; SSE-NEXT:    [[C0:%.*]] = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 [[V0]], i1 false)
+; SSE-NEXT:    [[C1:%.*]] = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 [[V1]], i1 false)
+; SSE-NEXT:    [[C2:%.*]] = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 [[V2]], i1 false)
+; SSE-NEXT:    [[C3:%.*]] = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 [[V3]], i1 false)
+; SSE-NEXT:    [[R0:%.*]] = insertelement <4 x i64> poison, i64 [[C0]], i64 0
+; SSE-NEXT:    [[R1:%.*]] = insertelement <4 x i64> [[R0]], i64 [[C1]], i64 1
+; SSE-NEXT:    [[R2:%.*]] = insertelement <4 x i64> [[R1]], i64 [[C2]], i64 2
+; SSE-NEXT:    [[R3:%.*]] = insertelement <4 x i64> [[R2]], i64 [[C3]], i64 3
+; SSE-NEXT:    ret <4 x i64> [[R3]]
+;
+; AVX2-LABEL: define <4 x i64> @scalarize_ctlz_v4i64(
+; AVX2-SAME: <4 x i64> [[V:%.*]]) #[[ATTR0]] {
+; AVX2-NEXT:    [[V0:%.*]] = extractelement <4 x i64> [[V]], i64 0
+; AVX2-NEXT:    [[V1:%.*]] = extractelement <4 x i64> [[V]], i64 1
+; AVX2-NEXT:    [[V2:%.*]] = extractelement <4 x i64> [[V]], i64 2
+; AVX2-NEXT:    [[V3:%.*]] = extractelement <4 x i64> [[V]], i64 3
+; AVX2-NEXT:    [[C0:%.*]] = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 [[V0]], i1 false)
+; AVX2-NEXT:    [[C1:%.*]] = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 [[V1]], i1 false)
+; AVX2-NEXT:    [[C2:%.*]] = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 [[V2]], i1 false)
+; AVX2-NEXT:    [[C3:%.*]] = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 [[V3]], i1 false)
+; AVX2-NEXT:    [[R0:%.*]] = insertelement <4 x i64> poison, i64 [[C0]], i64 0
+; AVX2-NEXT:    [[R1:%.*]] = insertelement <4 x i64> [[R0]], i64 [[C1]], i64 1
+; AVX2-NEXT:    [[R2:%.*]] = insertelement <4 x i64> [[R1]], i64 [[C2]], i64 2
+; AVX2-NEXT:    [[R3:%.*]] = insertelement <4 x i64> [[R2]], i64 [[C3]], i64 3
+; AVX2-NEXT:    ret <4 x i64> [[R3]]
+;
+; AVX512-LABEL: define <4 x i64> @scalarize_ctlz_v4i64(
+; AVX512-SAME: <4 x i64> [[V:%.*]]) #[[ATTR0]] {
+; AVX512-NEXT:    [[TMP1:%.*]] = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> [[V]], i1 false)
+; AVX512-NEXT:    ret <4 x i64> [[TMP1]]
+;
+  %v0 = extractelement <4 x i64> %v, i64 0
+  %v1 = extractelement <4 x i64> %v, i64 1
+  %v2 = extractelement <4 x i64> %v, i64 2
+  %v3 = extractelement <4 x i64> %v, i64 3
+  %c0 = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 %v0, i1 false)
+  %c1 = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 %v1, i1 false)
+  %c2 = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 %v2, i1 false)
+  %c3 = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 %v3, i1 false)
+  %r0 = insertelement <4 x i64> poison, i64 %c0, i64 0
+  %r1 = insertelement <4 x i64> %r0, i64 %c1, i64 1
+  %r2 = insertelement <4 x i64> %r1, i64 %c2, i64 2
+  %r3 = insertelement <4 x i64> %r2, i64 %c3, i64 3
+  ret <4 x i64> %r3
+}
+
+define <8 x i64> @scalarize_ctlz_v8i64(<8 x i64> %v)  {
+; SSE2-LABEL: define <8 x i64> @scalarize_ctlz_v8i64(
+; SSE2-SAME: <8 x i64> [[V:%.*]]) #[[ATTR0]] {
+; SSE2-NEXT:    [[TMP1:%.*]] = shufflevector <8 x i64> [[V]], <8 x i64> poison, <2 x i32> <i32 0, i32 1>
+; SSE2-NEXT:    [[TMP2:%.*]] = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> [[TMP1]], i1 false)
+; SSE2-NEXT:    [[TMP3:%.*]] = shufflevector <8 x i64> [[V]], <8 x i64> poison, <2 x i32> <i32 2, i32 3>
+; SSE2-NEXT:    [[TMP4:%.*]] = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> [[TMP3]], i1 false)
+; SSE2-NEXT:    [[TMP5:%.*]] = shufflevector <8 x i64> [[V]], <8 x i64> poison, <2 x i32> <i32 4, i32 5>
+; SSE2-NEXT:    [[TMP6:%.*]] = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> [[TMP5]], i1 false)
+; SSE2-NEXT:    [[TMP7:%.*]] = shufflevector <8 x i64> [[V]], <8 x i64> poison, <2 x i32> <i32 6, i32 7>
+; SSE2-NEXT:    [[TMP8:%.*]] = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> [[TMP7]], i1 false)
+; SSE2-NEXT:    [[TMP9:%.*]] = shufflevector <2 x i64> [[TMP2]], <2 x i64> poison, <8 x i32> <i32 0, i32 1, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; SSE2-NEXT:    [[TMP10:%.*]] = shufflevector <2 x i64> [[TMP4]], <2 x i64> poison, <8 x i32> <i32 0, i32 1, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; SSE2-NEXT:    [[R31:%.*]] = shufflevector <8 x i64> [[TMP9]], <8 x i64> [[TMP10]], <8 x i32> <i32 0, i32 1, i32 8, i32 9, i32 4, i32 5, i32 6, i32 7>
+; SSE2-NEXT:    [[TMP11:%.*]] = shufflevector <2 x i64> [[TMP6]], <2 x i64> poison, <8 x i32> <i32 0, i32 1, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; SSE2-NEXT:    [[R52:%.*]] = shufflevector <8 x i64> [[R31]], <8 x i64> [[TMP11]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 6, i32 7>
+; SSE2-NEXT:    [[TMP12:%.*]] = shufflevector <2 x i64> [[TMP8]], <2 x i64> poison, <8 x i32> <i32 0, i32 1, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; SSE2-NEXT:    [[R73:%.*]] = shufflevector <8 x i64> [[R52]], <8 x i64> [[TMP12]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 8, i32 9>
+; SSE2-NEXT:    ret <8 x i64> [[R73]]
+;
+; SSE4-LABEL: define <8 x i64> @scalarize_ctlz_v8i64(
+; SSE4-SAME: <8 x i64> [[V:%.*]]) #[[ATTR0]] {
+; SSE4-NEXT:    [[V0:%.*]] = extractelement <8 x i64> [[V]], i64 0
+; SSE4-NEXT:    [[V1:%.*]] = extractelement <8 x i64> [[V]], i64 1
+; SSE4-NEXT:    [[V2:%.*]] = extractelement <8 x i64> [[V]], i64 2
+; SSE4-NEXT:    [[V3:%.*]] = extractelement <8 x i64> [[V]], i64 3
+; SSE4-NEXT:    [[V4:%.*]] = extractelement <8 x i64> [[V]], i64 4
+; SSE4-NEXT:    [[V5:%.*]] = extractelement <8 x i64> [[V]], i64 5
+; SSE4-NEXT:    [[V6:%.*]] = extractelement <8 x i64> [[V]], i64 6
+; SSE4-NEXT:    [[V7:%.*]] = extractelement <8 x i64> [[V]], i64 7
+; SSE4-NEXT:    [[C0:%.*]] = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 [[V0]], i1 false)
+; SSE4-NEXT:    [[C1:%.*]] = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 [[V1]], i1 false)
+; SSE4-NEXT:    [[C2:%.*]] = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 [[V2]], i1 false)
+; SSE4-NEXT:    [[C3:%.*]] = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 [[V3]], i1 false)
+; SSE4-NEXT:    [[C4:%.*]] = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 [[V4]], i1 false)
+; SSE4-NEXT:    [[C5:%.*]] = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 [[V5]], i1 false)
+; SSE4-NEXT:    [[C6:%.*]] = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 [[V6]], i1 false)
+; SSE4-NEXT:    [[C7:%.*]] = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 [[V7]], i1 false)
+; SSE4-NEXT:    [[R0:%.*]] = insertelement <8 x i64> poison, i64 [[C0]], i64 0
+; SSE4-NEXT:    [[R1:%.*]] = insertelement <8 x i64> [[R0]], i64 [[C1]], i64 1
+; SSE4-NEXT:    [[R2:%.*]] = insertelement <8 x i64> [[R1]], i64 [[C2]], i64 2
+; SSE4-NEXT:    [[R3:%.*]] = insertelement <8 x i64> [[R2]], i64 [[C3]], i64 3
+; SSE4-NEXT:    [[R4:%.*]] = insertelement <8 x i64> [[R3]], i64 [[C4]], i64 4
+; SSE4-NEXT:    [[R5:%.*]] = insertelement <8 x i64> [[R4]], i64 [[C5]], i64 5
+; SSE4-NEXT:    [[R6:%.*]] = insertelement <8 x i64> [[R5]], i64 [[C6]], i64 6
+; SSE4-NEXT:    [[R7:%.*]] = insertelement <8 x i64> [[R6]], i64 [[C7]], i64 7
+; SSE4-NEXT:    ret <8 x i64> [[R7]]
+;
+; AVX2-LABEL: define <8 x i64> @scalarize_ctlz_v8i64(
+; AVX2-SAME: <8 x i64> [[V:%.*]]) #[[ATTR0]] {
+; AVX2-NEXT:    [[V0:%.*]] = extractelement <8 x i64> [[V]], i64 0
+; AVX2-NEXT:    [[V1:%.*]] = extractelement <8 x i64> [[V]], i64 1
+; AVX2-NEXT:    [[V2:%.*]] = extractelement <8 x i64> [[V]], i64 2
+; AVX2-NEXT:    [[V3:%.*]] = extractelement <8 x i64> [[V]], i64 3
+; AVX2-NEXT:    [[V4:%.*]] = extractelement <8 x i64> [[V]], i64 4
+; AVX2-NEXT:    [[V5:%.*]] = extractelement <8 x i64> [[V]], i64 5
+; AVX2-NEXT:    [[V6:%.*]] = extractelement <8 x i64> [[V]], i64 6
+; AVX2-NEXT:    [[V7:%.*]] = extractelement <8 x i64> [[V]], i64 7
+; AVX2-NEXT:    [[C0:%.*]] = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 [[V0]], i1 false)
+; AVX2-NEXT:    [[C1:%.*]] = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 [[V1]], i1 false)
+; AVX2-NEXT:    [[C2:%.*]] = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 [[V2]], i1 false)
+; AVX2-NEXT:    [[C3:%.*]] = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 [[V3]], i1 false)
+; AVX2-NEXT:    [[C4:%.*]] = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 [[V4]], i1 false)
+; AVX2-NEXT:    [[C5:%.*]] = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 [[V5]], i1 false)
+; AVX2-NEXT:    [[C6:%.*]] = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 [[V6]], i1 false)
+; AVX2-NEXT:    [[C7:%.*]] = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 [[V7]], i1 false)
+; AVX2-NEXT:    [[R0:%.*]] = insertelement <8 x i64> poison, i64 [[C0]], i64 0
+; AVX2-NEXT:    [[R1:%.*]] = insertelement <8 x i64> [[R0]], i64 [[C1]], i64 1
+; AVX2-NEXT:    [[R2:%.*]] = insertelement <8 x i64> [[R1]], i64 [[C2]], i64 2
+; AVX2-NEXT:    [[R3:%.*]] = insertelement <8 x i64> [[R2]], i64 [[C3]], i64 3
+; AVX2-NEXT:    [[R4:%.*]] = insertelement <8 x i64> [[R3]], i64 [[C4]], i64 4
+; AVX2-NEXT:    [[R5:%.*]] = insertelement <8 x i64> [[R4]], i64 [[C5]], i64 5
+; AVX2-NEXT:    [[R6:%.*]] = insertelement <8 x i64> [[R5]], i64 [[C6]], i64 6
+; AVX2-NEXT:    [[R7:%.*]] = insertelement <8 x i64> [[R6]], i64 [[C7]], i64 7
+; AVX2-NEXT:    ret <8 x i64> [[R7]]
+;
+; AVX512-LABEL: define <8 x i64> @scalarize_ctlz_v8i64(
+; AVX512-SAME: <8 x i64> [[V:%.*]]) #[[ATTR0]] {
+; AVX512-NEXT:    [[TMP1:%.*]] = call <8 x i64> @llvm.ctlz.v8i64(<8 x i64> [[V]], i1 false)
+; AVX512-NEXT:    ret <8 x i64> [[TMP1]]
+;
+  %v0 = extractelement <8 x i64> %v, i64 0
+  %v1 = extractelement <8 x i64> %v, i64 1
+  %v2 = extractelement <8 x i64> %v, i64 2
+  %v3 = extractelement <8 x i64> %v, i64 3
+  %v4 = extractelement <8 x i64> %v, i64 4
+  %v5 = extractelement <8 x i64> %v, i64 5
+  %v6 = extractelement <8 x i64> %v, i64 6
+  %v7 = extractelement <8 x i64> %v, i64 7
+  %c0 = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 %v0, i1 false)
+  %c1 = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 %v1, i1 false)
+  %c2 = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 %v2, i1 false)
+  %c3 = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 %v3, i1 false)
+  %c4 = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 %v4, i1 false)
+  %c5 = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 %v5, i1 false)
+  %c6 = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 %v6, i1 false)
+  %c7 = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 %v7, i1 false)
+  %r0 = insertelement <8 x i64> poison, i64 %c0, i64 0
+  %r1 = insertelement <8 x i64> %r0, i64 %c1, i64 1
+  %r2 = insertelement <8 x i64> %r1, i64 %c2, i64 2
+  %r3 = insertelement <8 x i64> %r2, i64 %c3, i64 3
+  %r4 = insertelement <8 x i64> %r3, i64 %c4, i64 4
+  %r5 = insertelement <8 x i64> %r4, i64 %c5, i64 5
+  %r6 = insertelement <8 x i64> %r5, i64 %c6, i64 6
+  %r7 = insertelement <8 x i64> %r6, i64 %c7, i64 7
+  ret <8 x i64> %r7
+}


        


More information about the llvm-commits mailing list