[clang] 38896d6 - [Headers][X86] Add constexpr support for some AVX512 masked extension/truncation intrinsics. (#161984)
via cfe-commits
cfe-commits at lists.llvm.org
Mon Oct 6 01:28:10 PDT 2025
Author: sskzakaria
Date: 2025-10-06T08:28:06Z
New Revision: 38896d67e458cf4d3b5ce0c3742f48e97527c797
URL: https://github.com/llvm/llvm-project/commit/38896d67e458cf4d3b5ce0c3742f48e97527c797
DIFF: https://github.com/llvm/llvm-project/commit/38896d67e458cf4d3b5ce0c3742f48e97527c797.diff
LOG: [Headers][X86] Add constexpr support for some AVX512 masked extension/truncation intrinsics. (#161984)
The following AVX[512] intrinsics are now constexpr:
* _mm_cvtepi32_epi8
* _mm_cvtepi32_epi16
* _mm_cvtepi64_epi8
* _mm_cvtepi64_epi16
* _mm_cvtepi64_epi32
* _mm256_cvtepi32_epi8
* _mm256_cvtepi32_epi16
* _mm256_cvtepi64_epi8
* _mm256_cvtepi64_epi16
* _mm256_cvtepi64_epi32
* _mm256_mask_cvtepi64_epi32
* _mm256_maskz_cvtepi64_epi32
Fixes #154539
Added:
Modified:
clang/lib/Headers/avx512vlintrin.h
clang/test/CodeGen/X86/avx512vl-builtins.c
Removed:
################################################################################
diff --git a/clang/lib/Headers/avx512vlintrin.h b/clang/lib/Headers/avx512vlintrin.h
index 754f43ad88543..965741f0ff944 100644
--- a/clang/lib/Headers/avx512vlintrin.h
+++ b/clang/lib/Headers/avx512vlintrin.h
@@ -7330,9 +7330,8 @@ _mm256_mask_cvtusepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
__builtin_ia32_pmovusqw256mem_mask ((__v8hi *) __P, (__v4di) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtepi32_epi8 (__m128i __A)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_cvtepi32_epi8(__m128i __A) {
return (__m128i)__builtin_shufflevector(
__builtin_convertvector((__v4si)__A, __v4qi), (__v4qi){0, 0, 0, 0}, 0, 1,
2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7);
@@ -7360,9 +7359,8 @@ _mm_mask_cvtepi32_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
__builtin_ia32_pmovdb128mem_mask ((__v16qi *) __P, (__v4si) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtepi32_epi8 (__m256i __A)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_cvtepi32_epi8(__m256i __A) {
return (__m128i)__builtin_shufflevector(
__builtin_convertvector((__v8si)__A, __v8qi),
(__v8qi){0, 0, 0, 0, 0, 0, 0, 0}, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
@@ -7370,8 +7368,7 @@ _mm256_cvtepi32_epi8 (__m256i __A)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepi32_epi8 (__m128i __O, __mmask8 __M, __m256i __A)
-{
+_mm256_mask_cvtepi32_epi8(__m128i __O, __mmask8 __M, __m256i __A) {
return (__m128i) __builtin_ia32_pmovdb256_mask ((__v8si) __A,
(__v16qi) __O, __M);
}
@@ -7390,9 +7387,8 @@ _mm256_mask_cvtepi32_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
__builtin_ia32_pmovdb256mem_mask ((__v16qi *) __P, (__v8si) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtepi32_epi16 (__m128i __A)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_cvtepi32_epi16(__m128i __A) {
return (__m128i)__builtin_shufflevector(
__builtin_convertvector((__v4si)__A, __v4hi), (__v4hi){0, 0, 0, 0}, 0, 1,
2, 3, 4, 5, 6, 7);
@@ -7419,9 +7415,8 @@ _mm_mask_cvtepi32_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A)
__builtin_ia32_pmovdw128mem_mask ((__v8hi *) __P, (__v4si) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtepi32_epi16 (__m256i __A)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_cvtepi32_epi16(__m256i __A) {
return (__m128i)__builtin_convertvector((__v8si)__A, __v8hi);
}
@@ -7446,9 +7441,8 @@ _mm256_mask_cvtepi32_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
__builtin_ia32_pmovdw256mem_mask ((__v8hi *) __P, (__v8si) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtepi64_epi8 (__m128i __A)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_cvtepi64_epi8(__m128i __A) {
return (__m128i)__builtin_shufflevector(
__builtin_convertvector((__v2di)__A, __v2qi), (__v2qi){0, 0}, 0, 1, 2, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3);
@@ -7475,9 +7469,8 @@ _mm_mask_cvtepi64_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
__builtin_ia32_pmovqb128mem_mask ((__v16qi *) __P, (__v2di) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtepi64_epi8 (__m256i __A)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_cvtepi64_epi8(__m256i __A) {
return (__m128i)__builtin_shufflevector(
__builtin_convertvector((__v4di)__A, __v4qi), (__v4qi){0, 0, 0, 0}, 0, 1,
2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7);
@@ -7504,9 +7497,8 @@ _mm256_mask_cvtepi64_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
__builtin_ia32_pmovqb256mem_mask ((__v16qi *) __P, (__v4di) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtepi64_epi32 (__m128i __A)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_cvtepi64_epi32(__m128i __A) {
return (__m128i)__builtin_shufflevector(
__builtin_convertvector((__v2di)__A, __v2si), (__v2si){0, 0}, 0, 1, 2, 3);
}
@@ -7532,23 +7524,20 @@ _mm_mask_cvtepi64_storeu_epi32 (void * __P, __mmask8 __M, __m128i __A)
__builtin_ia32_pmovqd128mem_mask ((__v4si *) __P, (__v2di) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtepi64_epi32 (__m256i __A)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_cvtepi64_epi32(__m256i __A) {
return (__m128i)__builtin_convertvector((__v4di)__A, __v4si);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepi64_epi32 (__m128i __O, __mmask8 __M, __m256i __A)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_mask_cvtepi64_epi32(__m128i __O, __mmask8 __M, __m256i __A) {
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
(__v4si)_mm256_cvtepi64_epi32(__A),
(__v4si)__O);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtepi64_epi32 (__mmask8 __M, __m256i __A)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_maskz_cvtepi64_epi32(__mmask8 __M, __m256i __A) {
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
(__v4si)_mm256_cvtepi64_epi32(__A),
(__v4si)_mm_setzero_si128());
@@ -7560,9 +7549,8 @@ _mm256_mask_cvtepi64_storeu_epi32 (void * __P, __mmask8 __M, __m256i __A)
__builtin_ia32_pmovqd256mem_mask ((__v4si *) __P, (__v4di) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtepi64_epi16 (__m128i __A)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_cvtepi64_epi16(__m128i __A) {
return (__m128i)__builtin_shufflevector(
__builtin_convertvector((__v2di)__A, __v2hi), (__v2hi){0, 0}, 0, 1, 2, 3,
3, 3, 3, 3);
@@ -7590,9 +7578,8 @@ _mm_mask_cvtepi64_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A)
__builtin_ia32_pmovqw128mem_mask ((__v8hi *) __P, (__v2di) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtepi64_epi16 (__m256i __A)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_cvtepi64_epi16(__m256i __A) {
return (__m128i)__builtin_shufflevector(
__builtin_convertvector((__v4di)__A, __v4hi), (__v4hi){0, 0, 0, 0}, 0, 1,
2, 3, 4, 5, 6, 7);
diff --git a/clang/test/CodeGen/X86/avx512vl-builtins.c b/clang/test/CodeGen/X86/avx512vl-builtins.c
index 88006232c5c99..6d9187086c267 100644
--- a/clang/test/CodeGen/X86/avx512vl-builtins.c
+++ b/clang/test/CodeGen/X86/avx512vl-builtins.c
@@ -9272,6 +9272,8 @@ __m128i test_mm_cvtepi32_epi8(__m128i __A) {
return _mm_cvtepi32_epi8(__A);
}
+TEST_CONSTEXPR(match_v16qi(_mm_cvtepi32_epi8((__m128i)(__v4si){1, 2, 3, 4}), 1 ,2, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0));
+
__m128i test_mm_mask_cvtepi32_epi8(__m128i __O, __mmask8 __M, __m128i __A) {
// CHECK-LABEL: test_mm_mask_cvtepi32_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.db.128
@@ -9297,6 +9299,8 @@ __m128i test_mm256_cvtepi32_epi8(__m256i __A) {
return _mm256_cvtepi32_epi8(__A);
}
+TEST_CONSTEXPR(match_v16qi(_mm256_cvtepi32_epi8((__m256i)(__v8si){1, 2, 3, 4, 5, 6, 7, 8}), 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0 ,0));
+
__m128i test_mm256_mask_cvtepi32_epi8(__m128i __O, __mmask8 __M, __m256i __A) {
// CHECK-LABEL: test_mm256_mask_cvtepi32_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.db.256
@@ -9322,6 +9326,8 @@ __m128i test_mm_cvtepi32_epi16(__m128i __A) {
return _mm_cvtepi32_epi16(__A);
}
+TEST_CONSTEXPR(match_v8hi(_mm_cvtepi32_epi16((__m128i)(__v4si){1, 2, 3, 4}), 1 ,2, 3, 4, 0, 0, 0, 0));
+
__m128i test_mm_mask_cvtepi32_epi16(__m128i __O, __mmask8 __M, __m128i __A) {
// CHECK-LABEL: test_mm_mask_cvtepi32_epi16
// CHECK: @llvm.x86.avx512.mask.pmov.dw.128
@@ -9346,6 +9352,8 @@ __m128i test_mm256_cvtepi32_epi16(__m256i __A) {
return _mm256_cvtepi32_epi16(__A);
}
+TEST_CONSTEXPR(match_v8hi(_mm256_cvtepi32_epi16((__m256i)(__v8si){1, 2, 3, 4, 5, 6, 7, 8}), 1, 2, 3, 4, 5, 6, 7, 8));
+
__m128i test_mm256_mask_cvtepi32_epi16(__m128i __O, __mmask8 __M, __m256i __A) {
// CHECK-LABEL: test_mm256_mask_cvtepi32_epi16
// CHECK: @llvm.x86.avx512.mask.pmov.dw.256
@@ -9371,6 +9379,8 @@ __m128i test_mm_cvtepi64_epi8(__m128i __A) {
return _mm_cvtepi64_epi8(__A);
}
+TEST_CONSTEXPR(match_v16qi(_mm_cvtepi64_epi8((__m128i)(__v2di){1, 2}), 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0));
+
__m128i test_mm_mask_cvtepi64_epi8(__m128i __O, __mmask8 __M, __m128i __A) {
// CHECK-LABEL: test_mm_mask_cvtepi64_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.qb.128
@@ -9396,6 +9406,8 @@ __m128i test_mm256_cvtepi64_epi8(__m256i __A) {
return _mm256_cvtepi64_epi8(__A);
}
+TEST_CONSTEXPR(match_v16qi(_mm256_cvtepi64_epi8((__m256i)(__v4di){1, 2, 3, 4}), 1, 2, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0));
+
__m128i test_mm256_mask_cvtepi64_epi8(__m128i __O, __mmask8 __M, __m256i __A) {
// CHECK-LABEL: test_mm256_mask_cvtepi64_epi8
// CHECK: @llvm.x86.avx512.mask.pmov.qb.256
@@ -9421,6 +9433,8 @@ __m128i test_mm_cvtepi64_epi32(__m128i __A) {
return _mm_cvtepi64_epi32(__A);
}
+TEST_CONSTEXPR(match_v4si(_mm_cvtepi64_epi32((__m128i)(__v2di){1, 2}),1, 2, 0, 0));
+
__m128i test_mm_mask_cvtepi64_epi32(__m128i __O, __mmask8 __M, __m128i __A) {
// CHECK-LABEL: test_mm_mask_cvtepi64_epi32
// CHECK: @llvm.x86.avx512.mask.pmov.qd.128
@@ -9445,6 +9459,8 @@ __m128i test_mm256_cvtepi64_epi32(__m256i __A) {
return _mm256_cvtepi64_epi32(__A);
}
+TEST_CONSTEXPR(match_v4si(_mm256_cvtepi64_epi32((__m256i)(__v4di){1 ,2 ,3 ,4}), 1, 2, 3, 4));
+
__m128i test_mm256_mask_cvtepi64_epi32(__m128i __O, __mmask8 __M, __m256i __A) {
// CHECK-LABEL: test_mm256_mask_cvtepi64_epi32
// CHECK: trunc <4 x i64> %{{.*}} to <4 x i32>
@@ -9452,6 +9468,8 @@ __m128i test_mm256_mask_cvtepi64_epi32(__m128i __O, __mmask8 __M, __m256i __A) {
return _mm256_mask_cvtepi64_epi32(__O, __M, __A);
}
+TEST_CONSTEXPR(match_v4si(_mm256_mask_cvtepi64_epi32(_mm_set1_epi32(-777), 0xA,(__m256i)(__v4di){1, -2, 3, -4}), -777, -2, -777, -4));
+
__m128i test_mm256_maskz_cvtepi64_epi32(__mmask8 __M, __m256i __A) {
// CHECK-LABEL: test_mm256_maskz_cvtepi64_epi32
// CHECK: trunc <4 x i64> %{{.*}} to <4 x i32>
@@ -9459,6 +9477,8 @@ __m128i test_mm256_maskz_cvtepi64_epi32(__mmask8 __M, __m256i __A) {
return _mm256_maskz_cvtepi64_epi32(__M, __A);
}
+TEST_CONSTEXPR(match_v4si(_mm256_maskz_cvtepi64_epi32( 0xA,(__m256i)(__v4di){1, -2, 3, -4}),0 , -2, 0, -4));
+
void test_mm256_mask_cvtepi64_storeu_epi32(void * __P, __mmask8 __M, __m256i __A) {
// CHECK-LABEL: test_mm256_mask_cvtepi64_storeu_epi32
// CHECK: @llvm.x86.avx512.mask.pmov.qd.mem.256
@@ -9472,6 +9492,8 @@ __m128i test_mm_cvtepi64_epi16(__m128i __A) {
return _mm_cvtepi64_epi16(__A);
}
+TEST_CONSTEXPR(match_v8hi(_mm_cvtepi64_epi16((__m128i)(__v2di){1, 2}),1, 2, 0, 0, 0, 0, 0, 0));
+
__m128i test_mm_mask_cvtepi64_epi16(__m128i __O, __mmask8 __M, __m128i __A) {
// CHECK-LABEL: test_mm_mask_cvtepi64_epi16
// CHECK: @llvm.x86.avx512.mask.pmov.qw.128
@@ -9497,6 +9519,8 @@ __m128i test_mm256_cvtepi64_epi16(__m256i __A) {
return _mm256_cvtepi64_epi16(__A);
}
+TEST_CONSTEXPR(match_v8hi(_mm256_cvtepi64_epi16((__m256i)(__v4di){1 ,2, 3, 4}),1, 2, 3, 4, 0, 0, 0, 0));
+
__m128i test_mm256_mask_cvtepi64_epi16(__m128i __O, __mmask8 __M, __m256i __A) {
// CHECK-LABEL: test_mm256_mask_cvtepi64_epi16
// CHECK: @llvm.x86.avx512.mask.pmov.qw.256
More information about the cfe-commits
mailing list