[clang] [Headers][X86] Update MMX arithmetic intrinsics to be used in constexpr (PR #152296)
via cfe-commits
cfe-commits at lists.llvm.org
Wed Aug 6 05:09:04 PDT 2025
github-actions[bot] wrote:
<!--LLVM CODE FORMAT COMMENT: {clang-format}-->
:warning: C/C++ code formatter, clang-format found issues in your code. :warning:
<details>
<summary>
You can test this locally with the following command:
</summary>
``````````bash
git-clang-format --diff HEAD~1 HEAD --extensions h,c -- clang/lib/Headers/emmintrin.h clang/lib/Headers/mmintrin.h clang/test/CodeGen/X86/mmx-builtins.c
``````````
</details>
<details>
<summary>
View the diff from clang-format here.
</summary>
``````````diff
diff --git a/clang/lib/Headers/mmintrin.h b/clang/lib/Headers/mmintrin.h
index 5a02a4551..b17d8b1bf 100644
--- a/clang/lib/Headers/mmintrin.h
+++ b/clang/lib/Headers/mmintrin.h
@@ -86,9 +86,8 @@ _mm_empty(void) {
/// \returns A 64-bit integer vector. The lower 32 bits contain the value of the
/// parameter. The upper 32 bits are set to 0.
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
-_mm_cvtsi32_si64(int __i)
-{
- return __extension__ (__m64)(__v2si){__i, 0};
+_mm_cvtsi32_si64(int __i) {
+ return __extension__(__m64)(__v2si){__i, 0};
}
/// Returns the lower 32 bits of a 64-bit integer vector as a 32-bit
@@ -103,9 +102,8 @@ _mm_cvtsi32_si64(int __i)
/// \returns A 32-bit signed integer value containing the lower 32 bits of the
/// parameter.
static __inline__ int __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
-_mm_cvtsi64_si32(__m64 __m)
-{
- return ((__v2si)__m)[0];
+_mm_cvtsi64_si32(__m64 __m) {
+ return ((__v2si)__m)[0];
}
/// Casts a 64-bit signed integer value into a 64-bit integer vector.
@@ -119,9 +117,8 @@ _mm_cvtsi64_si32(__m64 __m)
/// \returns A 64-bit integer vector containing the same bitwise pattern as the
/// parameter.
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
-_mm_cvtsi64_m64(long long __i)
-{
- return __extension__ (__m64)(__v1di){__i};
+_mm_cvtsi64_m64(long long __i) {
+ return __extension__(__m64)(__v1di){__i};
}
/// Casts a 64-bit integer vector into a 64-bit signed integer value.
@@ -135,9 +132,8 @@ _mm_cvtsi64_m64(long long __i)
/// \returns A 64-bit signed integer containing the same bitwise pattern as the
/// parameter.
static __inline__ long long __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
-_mm_cvtm64_si64(__m64 __m)
-{
- return ((__v1di)__m)[0];
+_mm_cvtm64_si64(__m64 __m) {
+ return ((__v1di)__m)[0];
}
/// Converts, with saturation, 16-bit signed integers from both 64-bit integer
@@ -380,9 +376,8 @@ _mm_unpacklo_pi32(__m64 __m1, __m64 __m2)
/// \returns A 64-bit integer vector of [8 x i8] containing the sums of both
/// parameters.
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
-_mm_add_pi8(__m64 __m1, __m64 __m2)
-{
- return (__m64)(((__v8qu)__m1) + ((__v8qu)__m2));
+_mm_add_pi8(__m64 __m1, __m64 __m2) {
+ return (__m64)(((__v8qu)__m1) + ((__v8qu)__m2));
}
/// Adds each 16-bit integer element of the first 64-bit integer vector
@@ -401,9 +396,8 @@ _mm_add_pi8(__m64 __m1, __m64 __m2)
/// \returns A 64-bit integer vector of [4 x i16] containing the sums of both
/// parameters.
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
-_mm_add_pi16(__m64 __m1, __m64 __m2)
-{
- return (__m64)(((__v4hu)__m1) + ((__v4hu)__m2));
+_mm_add_pi16(__m64 __m1, __m64 __m2) {
+ return (__m64)(((__v4hu)__m1) + ((__v4hu)__m2));
}
/// Adds each 32-bit integer element of the first 64-bit integer vector
@@ -422,9 +416,8 @@ _mm_add_pi16(__m64 __m1, __m64 __m2)
/// \returns A 64-bit integer vector of [2 x i32] containing the sums of both
/// parameters.
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
-_mm_add_pi32(__m64 __m1, __m64 __m2)
-{
- return (__m64)(((__v2su)__m1) + ((__v2su)__m2));
+_mm_add_pi32(__m64 __m1, __m64 __m2) {
+ return (__m64)(((__v2su)__m1) + ((__v2su)__m2));
}
/// Adds, with saturation, each 8-bit signed integer element of the first
@@ -537,9 +530,8 @@ _mm_adds_pu16(__m64 __m1, __m64 __m2)
/// \returns A 64-bit integer vector of [8 x i8] containing the differences of
/// both parameters.
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
-_mm_sub_pi8(__m64 __m1, __m64 __m2)
-{
- return (__m64)(((__v8qu)__m1) - ((__v8qu)__m2));
+_mm_sub_pi8(__m64 __m1, __m64 __m2) {
+ return (__m64)(((__v8qu)__m1) - ((__v8qu)__m2));
}
/// Subtracts each 16-bit integer element of the second 64-bit integer
@@ -558,9 +550,8 @@ _mm_sub_pi8(__m64 __m1, __m64 __m2)
/// \returns A 64-bit integer vector of [4 x i16] containing the differences of
/// both parameters.
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
-_mm_sub_pi16(__m64 __m1, __m64 __m2)
-{
- return (__m64)(((__v4hu)__m1) - ((__v4hu)__m2));
+_mm_sub_pi16(__m64 __m1, __m64 __m2) {
+ return (__m64)(((__v4hu)__m1) - ((__v4hu)__m2));
}
/// Subtracts each 32-bit integer element of the second 64-bit integer
@@ -579,9 +570,8 @@ _mm_sub_pi16(__m64 __m1, __m64 __m2)
/// \returns A 64-bit integer vector of [2 x i32] containing the differences of
/// both parameters.
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
-_mm_sub_pi32(__m64 __m1, __m64 __m2)
-{
- return (__m64)(((__v2su)__m1) - ((__v2su)__m2));
+_mm_sub_pi32(__m64 __m1, __m64 __m2) {
+ return (__m64)(((__v2su)__m1) - ((__v2su)__m2));
}
/// Subtracts, with saturation, each 8-bit signed integer element of the second
@@ -746,9 +736,8 @@ _mm_mulhi_pi16(__m64 __m1, __m64 __m2)
/// \returns A 64-bit integer vector of [4 x i16] containing the lower 16 bits
/// of the products of both parameters.
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
-_mm_mullo_pi16(__m64 __m1, __m64 __m2)
-{
- return (__m64)(((__v4hu)__m1) * ((__v4hu)__m2));
+_mm_mullo_pi16(__m64 __m1, __m64 __m2) {
+ return (__m64)(((__v4hu)__m1) * ((__v4hu)__m2));
}
/// Left-shifts each 16-bit signed integer element of the first
@@ -1135,9 +1124,8 @@ _mm_srli_si64(__m64 __m, int __count)
/// \returns A 64-bit integer vector containing the bitwise AND of both
/// parameters.
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
-_mm_and_si64(__m64 __m1, __m64 __m2)
-{
- return (__m64)(((__v1du)__m1) & ((__v1du)__m2));
+_mm_and_si64(__m64 __m1, __m64 __m2) {
+ return (__m64)(((__v1du)__m1) & ((__v1du)__m2));
}
/// Performs a bitwise NOT of the first 64-bit integer vector, and then
@@ -1156,9 +1144,8 @@ _mm_and_si64(__m64 __m1, __m64 __m2)
/// \returns A 64-bit integer vector containing the bitwise AND of the second
/// parameter and the one's complement of the first parameter.
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
-_mm_andnot_si64(__m64 __m1, __m64 __m2)
-{
- return (__m64)(~((__v1du)__m1) & ((__v1du)__m2));
+_mm_andnot_si64(__m64 __m1, __m64 __m2) {
+ return (__m64)(~((__v1du)__m1) & ((__v1du)__m2));
}
/// Performs a bitwise OR of two 64-bit integer vectors.
@@ -1174,9 +1161,8 @@ _mm_andnot_si64(__m64 __m1, __m64 __m2)
/// \returns A 64-bit integer vector containing the bitwise OR of both
/// parameters.
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
-_mm_or_si64(__m64 __m1, __m64 __m2)
-{
- return (__m64)(((__v1du)__m1) | ((__v1du)__m2));
+_mm_or_si64(__m64 __m1, __m64 __m2) {
+ return (__m64)(((__v1du)__m1) | ((__v1du)__m2));
}
/// Performs a bitwise exclusive OR of two 64-bit integer vectors.
@@ -1192,9 +1178,8 @@ _mm_or_si64(__m64 __m1, __m64 __m2)
/// \returns A 64-bit integer vector containing the bitwise exclusive OR of both
/// parameters.
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
-_mm_xor_si64(__m64 __m1, __m64 __m2)
-{
- return (__m64)(((__v1du)__m1) ^ ((__v1du)__m2));
+_mm_xor_si64(__m64 __m1, __m64 __m2) {
+ return (__m64)(((__v1du)__m1) ^ ((__v1du)__m2));
}
/// Compares the 8-bit integer elements of two 64-bit integer vectors of
@@ -1214,9 +1199,8 @@ _mm_xor_si64(__m64 __m1, __m64 __m2)
/// \returns A 64-bit integer vector of [8 x i8] containing the comparison
/// results.
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
-_mm_cmpeq_pi8(__m64 __m1, __m64 __m2)
-{
- return (__m64)(((__v8qi)__m1) == ((__v8qi)__m2));
+_mm_cmpeq_pi8(__m64 __m1, __m64 __m2) {
+ return (__m64)(((__v8qi)__m1) == ((__v8qi)__m2));
}
/// Compares the 16-bit integer elements of two 64-bit integer vectors of
@@ -1236,9 +1220,8 @@ _mm_cmpeq_pi8(__m64 __m1, __m64 __m2)
/// \returns A 64-bit integer vector of [4 x i16] containing the comparison
/// results.
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
-_mm_cmpeq_pi16(__m64 __m1, __m64 __m2)
-{
- return (__m64)(((__v4hi)__m1) == ((__v4hi)__m2));
+_mm_cmpeq_pi16(__m64 __m1, __m64 __m2) {
+ return (__m64)(((__v4hi)__m1) == ((__v4hi)__m2));
}
/// Compares the 32-bit integer elements of two 64-bit integer vectors of
@@ -1258,9 +1241,8 @@ _mm_cmpeq_pi16(__m64 __m1, __m64 __m2)
/// \returns A 64-bit integer vector of [2 x i32] containing the comparison
/// results.
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
-_mm_cmpeq_pi32(__m64 __m1, __m64 __m2)
-{
- return (__m64)(((__v2si)__m1) == ((__v2si)__m2));
+_mm_cmpeq_pi32(__m64 __m1, __m64 __m2) {
+ return (__m64)(((__v2si)__m1) == ((__v2si)__m2));
}
/// Compares the 8-bit integer elements of two 64-bit integer vectors of
@@ -1280,8 +1262,7 @@ _mm_cmpeq_pi32(__m64 __m1, __m64 __m2)
/// \returns A 64-bit integer vector of [8 x i8] containing the comparison
/// results.
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
-_mm_cmpgt_pi8(__m64 __m1, __m64 __m2)
-{
+_mm_cmpgt_pi8(__m64 __m1, __m64 __m2) {
/* This function always performs a signed comparison, but __v8qi is a char
which may be signed or unsigned, so use __v8qs. */
return (__m64)((__v8qs)__m1 > (__v8qs)__m2);
@@ -1304,9 +1285,8 @@ _mm_cmpgt_pi8(__m64 __m1, __m64 __m2)
/// \returns A 64-bit integer vector of [4 x i16] containing the comparison
/// results.
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
-_mm_cmpgt_pi16(__m64 __m1, __m64 __m2)
-{
- return (__m64)((__v4hi)__m1 > (__v4hi)__m2);
+_mm_cmpgt_pi16(__m64 __m1, __m64 __m2) {
+ return (__m64)((__v4hi)__m1 > (__v4hi)__m2);
}
/// Compares the 32-bit integer elements of two 64-bit integer vectors of
@@ -1326,9 +1306,8 @@ _mm_cmpgt_pi16(__m64 __m1, __m64 __m2)
/// \returns A 64-bit integer vector of [2 x i32] containing the comparison
/// results.
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
-_mm_cmpgt_pi32(__m64 __m1, __m64 __m2)
-{
- return (__m64)((__v2si)__m1 > (__v2si)__m2);
+_mm_cmpgt_pi32(__m64 __m1, __m64 __m2) {
+ return (__m64)((__v2si)__m1 > (__v2si)__m2);
}
/// Constructs a 64-bit integer vector initialized to zero.
``````````
</details>
https://github.com/llvm/llvm-project/pull/152296
More information about the cfe-commits
mailing list