r269744 - [AVX512] Add parentheses around macro arguments in AVX512VLDQ intrinsics. Remove leading underscores from macro argument names. Add explicit typecasts to all macro arguments and return values. And finally reformat after all the adjustments.

Craig Topper via cfe-commits cfe-commits at lists.llvm.org
Mon May 16 21:41:46 PDT 2016


Author: ctopper
Date: Mon May 16 23:41:46 2016
New Revision: 269744

URL: http://llvm.org/viewvc/llvm-project?rev=269744&view=rev
Log:
[AVX512] Add parentheses around macro arguments in AVX512VLDQ intrinsics. Remove leading underscores from macro argument names. Add explicit typecasts to all macro arguments and return values. And finally reformat after all the adjustments.

This is a mostly mechanical change accomplished with a script. I tried to split out any changes to the typecasts that already existed into separate commits.

Modified:
    cfe/trunk/lib/Headers/avx512vldqintrin.h

Modified: cfe/trunk/lib/Headers/avx512vldqintrin.h
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Headers/avx512vldqintrin.h?rev=269744&r1=269743&r2=269744&view=diff
==============================================================================
--- cfe/trunk/lib/Headers/avx512vldqintrin.h (original)
+++ cfe/trunk/lib/Headers/avx512vldqintrin.h Mon May 16 23:41:46 2016
@@ -852,101 +852,135 @@ _mm256_maskz_cvtepu64_ps (__mmask8 __U,
                 (__mmask8) __U);
 }
 
-#define _mm_range_pd(__A, __B, __C) __extension__ ({                         \
-  (__m128d) __builtin_ia32_rangepd128_mask ((__v2df) __A, (__v2df) __B, __C, \
-                (__v2df) _mm_setzero_pd(), (__mmask8) -1); })
-
-#define _mm_mask_range_pd(__W, __U, __A, __B, __C) __extension__ ({          \
-  (__m128d) __builtin_ia32_rangepd128_mask ((__v2df) __A, (__v2df) __B, __C, \
-                (__v2df) __W, (__mmask8) __U); })
-
-#define _mm_maskz_range_pd(__U, __A, __B, __C) __extension__ ({              \
-  (__m128d) __builtin_ia32_rangepd128_mask ((__v2df) __A, (__v2df) __B, __C, \
-                (__v2df) _mm_setzero_pd(), (__mmask8) __U); })
-
-#define _mm256_range_pd(__A, __B, __C) __extension__ ({                      \
-  (__m256d) __builtin_ia32_rangepd256_mask ((__v4df) __A, (__v4df) __B, __C, \
-                (__v4df) _mm256_setzero_pd(), (__mmask8) -1); })
-
-#define _mm256_mask_range_pd(__W, __U, __A, __B, __C) __extension__ ({       \
-  (__m256d) __builtin_ia32_rangepd256_mask ((__v4df) __A, (__v4df) __B, __C, \
-                (__v4df) __W, (__mmask8) __U); })
-
-#define _mm256_maskz_range_pd(__U, __A, __B, __C) __extension__ ({           \
-  (__m256d) __builtin_ia32_rangepd256_mask ((__v4df) __A, (__v4df) __B, __C, \
-                (__v4df) _mm256_setzero_pd(), (__mmask8) __U); })
-
-#define _mm_range_ps(__A, __B, __C) __extension__ ({                         \
-  (__m128) __builtin_ia32_rangeps128_mask ((__v4sf) __A, (__v4sf) __B, __C,  \
-                (__v4sf) _mm_setzero_ps(), (__mmask8) -1); })
-
-#define _mm_mask_range_ps(__W, __U, __A, __B, __C) __extension__ ({          \
-  (__m128) __builtin_ia32_rangeps128_mask ((__v4sf) __A, (__v4sf) __B, __C,  \
-                (__v4sf) __W, (__mmask8) __U); })
-
-#define _mm_maskz_range_ps(__U, __A, __B, __C) __extension__ ({              \
-  (__m128) __builtin_ia32_rangeps128_mask ((__v4sf) __A, (__v4sf) __B, __C,  \
-                (__v4sf) _mm_setzero_ps(), (__mmask8) __U); })
-
-#define _mm256_range_ps(__A, __B, __C) __extension__ ({                      \
-  (__m256) __builtin_ia32_rangeps256_mask ((__v8sf) __A, (__v8sf) __B, __C,  \
-                (__v8sf) _mm256_setzero_ps(), (__mmask8) -1); })
-
-#define _mm256_mask_range_ps(__W, __U, __A, __B, __C) __extension__ ({       \
-  (__m256) __builtin_ia32_rangeps256_mask ((__v8sf) __A, (__v8sf) __B, __C,  \
-                (__v8sf) __W, (__mmask8) __U); })
-
-#define _mm256_maskz_range_ps(__U, __A, __B, __C) __extension__ ({           \
-  (__m256) __builtin_ia32_rangeps256_mask ((__v8sf) __A, (__v8sf) __B, __C,  \
-                (__v8sf) _mm256_setzero_ps(), (__mmask8) __U); })
-
-#define _mm_reduce_pd(__A, __B) __extension__ ({                \
-  (__m128d) __builtin_ia32_reducepd128_mask ((__v2df) __A, __B, \
-                (__v2df) _mm_setzero_pd(), (__mmask8) -1); })
-
-#define _mm_mask_reduce_pd(__W, __U, __A, __B) __extension__ ({ \
-  (__m128d) __builtin_ia32_reducepd128_mask ((__v2df) __A, __B, \
-                (__v2df) __W, (__mmask8) __U); })
-
-#define _mm_maskz_reduce_pd(__U, __A, __B) __extension__ ({     \
-  (__m128d) __builtin_ia32_reducepd128_mask ((__v2df) __A, __B, \
-                (__v2df) _mm_setzero_pd(), (__mmask8) __U); })
-
-#define _mm256_reduce_pd(__A, __B) __extension__ ({                \
-  (__m256d) __builtin_ia32_reducepd256_mask ((__v4df) __A, __B,    \
-                (__v4df) _mm256_setzero_pd(), (__mmask8) -1); })
-
-#define _mm256_mask_reduce_pd(__W, __U, __A, __B) __extension__ ({ \
-  (__m256d) __builtin_ia32_reducepd256_mask ((__v4df) __A, __B,    \
-                (__v4df) __W, (__mmask8) __U); })
-
-#define _mm256_maskz_reduce_pd(__U, __A, __B) __extension__ ({     \
-  (__m256d) __builtin_ia32_reducepd256_mask ((__v4df) __A, __B,    \
-                (__v4df) _mm256_setzero_pd(), (__mmask8) __U); })
-
-#define _mm_reduce_ps(__A, __B) __extension__ ({                   \
-  (__m128) __builtin_ia32_reduceps128_mask ((__v4sf) __A, __B,     \
-                (__v4sf) _mm_setzero_ps(), (__mmask8) -1); })
-
-#define _mm_mask_reduce_ps(__W, __U, __A, __B) __extension__ ({    \
-  (__m128) __builtin_ia32_reduceps128_mask ((__v4sf) __A, __B,     \
-                (__v4sf) __W, (__mmask8) __U); })
-
-#define _mm_maskz_reduce_ps(__U, __A, __B) __extension__ ({        \
-  (__m128) __builtin_ia32_reduceps128_mask ((__v4sf) __A, __B,     \
-                (__v4sf) _mm_setzero_ps(), (__mmask8) __U); })
-
-#define _mm256_reduce_ps(__A, __B) __extension__ ({                \
-  (__m256) __builtin_ia32_reduceps256_mask ((__v8sf) __A, __B,     \
-                (__v8sf) _mm256_setzero_ps(), (__mmask8) -1); })
-
-#define _mm256_mask_reduce_ps(__W, __U, __A, __B) __extension__ ({ \
-  (__m256) __builtin_ia32_reduceps256_mask ((__v8sf) __A, __B,     \
-                (__v8sf) __W, (__mmask8) __U); })
-
-#define _mm256_maskz_reduce_ps(__U, __A, __B) __extension__ ({     \
-  (__m256) __builtin_ia32_reduceps256_mask ((__v8sf) __A, __B,     \
-                (__v8sf) _mm256_setzero_ps(), (__mmask8) __U); })
+#define _mm_range_pd(A, B, C) __extension__ ({                         \
+  (__m128d)__builtin_ia32_rangepd128_mask((__v2df)(__m128d)(A), \
+                                          (__v2df)(__m128d)(B), (int)(C), \
+                                          (__v2df)_mm_setzero_pd(), \
+                                          (__mmask8)-1); })
+
+#define _mm_mask_range_pd(W, U, A, B, C) __extension__ ({          \
+  (__m128d)__builtin_ia32_rangepd128_mask((__v2df)(__m128d)(A), \
+                                          (__v2df)(__m128d)(B), (int)(C), \
+                                          (__v2df)(__m128d)(W), \
+                                          (__mmask8)(U)); })
+
+#define _mm_maskz_range_pd(U, A, B, C) __extension__ ({              \
+  (__m128d)__builtin_ia32_rangepd128_mask((__v2df)(__m128d)(A), \
+                                          (__v2df)(__m128d)(B), (int)(C), \
+                                          (__v2df)_mm_setzero_pd(), \
+                                          (__mmask8)(U)); })
+
+#define _mm256_range_pd(A, B, C) __extension__ ({                      \
+  (__m256d)__builtin_ia32_rangepd256_mask((__v4df)(__m256d)(A), \
+                                          (__v4df)(__m256d)(B), (int)(C), \
+                                          (__v4df)_mm256_setzero_pd(), \
+                                          (__mmask8)-1); })
+
+#define _mm256_mask_range_pd(W, U, A, B, C) __extension__ ({       \
+  (__m256d)__builtin_ia32_rangepd256_mask((__v4df)(__m256d)(A), \
+                                          (__v4df)(__m256d)(B), (int)(C), \
+                                          (__v4df)(__m256d)(W), \
+                                          (__mmask8)(U)); })
+
+#define _mm256_maskz_range_pd(U, A, B, C) __extension__ ({           \
+  (__m256d)__builtin_ia32_rangepd256_mask((__v4df)(__m256d)(A), \
+                                          (__v4df)(__m256d)(B), (int)(C), \
+                                          (__v4df)_mm256_setzero_pd(), \
+                                          (__mmask8)(U)); })
+
+#define _mm_range_ps(A, B, C) __extension__ ({                         \
+  (__m128)__builtin_ia32_rangeps128_mask((__v4sf)(__m128)(A), \
+                                         (__v4sf)(__m128)(B), (int)(C), \
+                                         (__v4sf)_mm_setzero_ps(), \
+                                         (__mmask8)-1); })
+
+#define _mm_mask_range_ps(W, U, A, B, C) __extension__ ({          \
+  (__m128)__builtin_ia32_rangeps128_mask((__v4sf)(__m128)(A), \
+                                         (__v4sf)(__m128)(B), (int)(C), \
+                                         (__v4sf)(__m128)(W), (__mmask8)(U)); })
+
+#define _mm_maskz_range_ps(U, A, B, C) __extension__ ({              \
+  (__m128)__builtin_ia32_rangeps128_mask((__v4sf)(__m128)(A), \
+                                         (__v4sf)(__m128)(B), (int)(C), \
+                                         (__v4sf)_mm_setzero_ps(), \
+                                         (__mmask8)(U)); })
+
+#define _mm256_range_ps(A, B, C) __extension__ ({                      \
+  (__m256)__builtin_ia32_rangeps256_mask((__v8sf)(__m256)(A), \
+                                         (__v8sf)(__m256)(B), (int)(C), \
+                                         (__v8sf)_mm256_setzero_ps(), \
+                                         (__mmask8)-1); })
+
+#define _mm256_mask_range_ps(W, U, A, B, C) __extension__ ({       \
+  (__m256)__builtin_ia32_rangeps256_mask((__v8sf)(__m256)(A), \
+                                         (__v8sf)(__m256)(B), (int)(C), \
+                                         (__v8sf)(__m256)(W), (__mmask8)(U)); })
+
+#define _mm256_maskz_range_ps(U, A, B, C) __extension__ ({           \
+  (__m256)__builtin_ia32_rangeps256_mask((__v8sf)(__m256)(A), \
+                                         (__v8sf)(__m256)(B), (int)(C), \
+                                         (__v8sf)_mm256_setzero_ps(), \
+                                         (__mmask8)(U)); })
+
+#define _mm_reduce_pd(A, B) __extension__ ({                \
+  (__m128d)__builtin_ia32_reducepd128_mask((__v2df)(__m128d)(A), (int)(B), \
+                                           (__v2df)_mm_setzero_pd(), \
+                                           (__mmask8)-1); })
+
+#define _mm_mask_reduce_pd(W, U, A, B) __extension__ ({ \
+  (__m128d)__builtin_ia32_reducepd128_mask((__v2df)(__m128d)(A), (int)(B), \
+                                           (__v2df)(__m128d)(W), \
+                                           (__mmask8)(U)); })
+
+#define _mm_maskz_reduce_pd(U, A, B) __extension__ ({     \
+  (__m128d)__builtin_ia32_reducepd128_mask((__v2df)(__m128d)(A), (int)(B), \
+                                           (__v2df)_mm_setzero_pd(), \
+                                           (__mmask8)(U)); })
+
+#define _mm256_reduce_pd(A, B) __extension__ ({                \
+  (__m256d)__builtin_ia32_reducepd256_mask((__v4df)(__m256d)(A), (int)(B), \
+                                           (__v4df)_mm256_setzero_pd(), \
+                                           (__mmask8)-1); })
+
+#define _mm256_mask_reduce_pd(W, U, A, B) __extension__ ({ \
+  (__m256d)__builtin_ia32_reducepd256_mask((__v4df)(__m256d)(A), (int)(B), \
+                                           (__v4df)(__m256d)(W), \
+                                           (__mmask8)(U)); })
+
+#define _mm256_maskz_reduce_pd(U, A, B) __extension__ ({     \
+  (__m256d)__builtin_ia32_reducepd256_mask((__v4df)(__m256d)(A), (int)(B), \
+                                           (__v4df)_mm256_setzero_pd(), \
+                                           (__mmask8)(U)); })
+
+#define _mm_reduce_ps(A, B) __extension__ ({                   \
+  (__m128)__builtin_ia32_reduceps128_mask((__v4sf)(__m128)(A), (int)(B), \
+                                          (__v4sf)_mm_setzero_ps(), \
+                                          (__mmask8)-1); })
+
+#define _mm_mask_reduce_ps(W, U, A, B) __extension__ ({    \
+  (__m128)__builtin_ia32_reduceps128_mask((__v4sf)(__m128)(A), (int)(B), \
+                                          (__v4sf)(__m128)(W), \
+                                          (__mmask8)(U)); })
+
+#define _mm_maskz_reduce_ps(U, A, B) __extension__ ({        \
+  (__m128)__builtin_ia32_reduceps128_mask((__v4sf)(__m128)(A), (int)(B), \
+                                          (__v4sf)_mm_setzero_ps(), \
+                                          (__mmask8)(U)); })
+
+#define _mm256_reduce_ps(A, B) __extension__ ({                \
+  (__m256)__builtin_ia32_reduceps256_mask((__v8sf)(__m256)(A), (int)(B), \
+                                          (__v8sf)_mm256_setzero_ps(), \
+                                          (__mmask8)-1); })
+
+#define _mm256_mask_reduce_ps(W, U, A, B) __extension__ ({ \
+  (__m256)__builtin_ia32_reduceps256_mask((__v8sf)(__m256)(A), (int)(B), \
+                                          (__v8sf)(__m256)(W), \
+                                          (__mmask8)(U)); })
+
+#define _mm256_maskz_reduce_ps(U, A, B) __extension__ ({     \
+  (__m256)__builtin_ia32_reduceps256_mask((__v8sf)(__m256)(A), (int)(B), \
+                                          (__v8sf)_mm256_setzero_ps(), \
+                                          (__mmask8)(U)); })
 
 static __inline__ __mmask8 __DEFAULT_FN_ATTRS
 _mm_movepi32_mask (__m128i __A)
@@ -1116,139 +1150,115 @@ _mm256_maskz_broadcast_i64x2 (__mmask8 _
                  __M);
 }
 
-#define _mm256_extractf64x2_pd( __A, __imm) __extension__ ({ \
-__builtin_ia32_extractf64x2_256_mask ((__v4df)( __A),\
-               ( __imm),\
-               (__v2df) _mm_setzero_pd (),\
-               (__mmask8) -1);\
-})
-
-#define _mm256_mask_extractf64x2_pd( __W, __U, __A, __imm) __extension__ ({ \
-__builtin_ia32_extractf64x2_256_mask ((__v4df)( __A),\
-               ( __imm),\
-               (__v2df)( __W),\
-               (__mmask8) ( __U));\
-})
-
-#define _mm256_maskz_extractf64x2_pd( __U, __A, __imm) __extension__ ({ \
-__builtin_ia32_extractf64x2_256_mask ((__v4df)( __A),\
-               ( __imm),\
-               (__v2df) _mm_setzero_pd (),\
-               (__mmask8) ( __U));\
-})
-
-#define _mm256_extracti64x2_epi64( __A, __imm) __extension__ ({ \
-__builtin_ia32_extracti64x2_256_mask ((__v4di)( __A),\
-               ( __imm),\
-               (__v2di) _mm_setzero_di (),\
-               (__mmask8) -1);\
-})
-
-#define _mm256_mask_extracti64x2_epi64( __W, __U, __A, __imm) __extension__ ({ \
-__builtin_ia32_extracti64x2_256_mask ((__v4di)( __A),\
-               ( __imm),\
-               (__v2di)( __W),\
-               (__mmask8) ( __U));\
-})
-
-#define _mm256_maskz_extracti64x2_epi64( __U, __A, __imm) __extension__ ({ \
-__builtin_ia32_extracti64x2_256_mask ((__v4di)( __A),\
-               ( __imm),\
-               (__v2di) _mm_setzero_di (),\
-               (__mmask8) ( __U));\
-})
-
-#define _mm256_insertf64x2( __A, __B, __imm) __extension__ ({ \
-__builtin_ia32_insertf64x2_256_mask ((__v4df)( __A),\
-              (__v2df)( __B),\
-              ( __imm),\
-              (__v4df) _mm256_setzero_pd (),\
-              (__mmask8) -1);\
-})
-
-#define _mm256_mask_insertf64x2( __W, __U, __A, __B, __imm) __extension__ ({ \
-__builtin_ia32_insertf64x2_256_mask ((__v4df)( __A),\
-              (__v2df)( __B),\
-              ( __imm),\
-              (__v4df)( __W),\
-              (__mmask8) ( __U));\
-})
-
-#define _mm256_maskz_insertf64x2( __U, __A, __B, __imm) __extension__ ({ \
-__builtin_ia32_insertf64x2_256_mask ((__v4df)( __A),\
-              (__v2df)( __B),\
-              ( __imm),\
-              (__v4df) _mm256_setzero_pd (),\
-              (__mmask8) ( __U));\
-})
-
-#define _mm256_inserti64x2( __A, __B, __imm) __extension__ ({ \
-__builtin_ia32_inserti64x2_256_mask ((__v4di)( __A),\
-              (__v2di)( __B),\
-              ( __imm),\
-              (__v4di) _mm256_setzero_si256 (),\
-              (__mmask8) -1);\
-})
-
-#define _mm256_mask_inserti64x2( __W, __U, __A, __B, __imm) __extension__ ({ \
-__builtin_ia32_inserti64x2_256_mask ((__v4di)( __A),\
-              (__v2di)( __B),\
-              ( __imm),\
-              (__v4di)( __W),\
-              (__mmask8) ( __U));\
-})
-
-#define _mm256_maskz_inserti64x2( __U, __A, __B, __imm) __extension__ ({ \
-__builtin_ia32_inserti64x2_256_mask ((__v4di)( __A),\
-              (__v2di)( __B),\
-              ( __imm),\
-              (__v4di) _mm256_setzero_si256 (),\
-              (__mmask8) ( __U));\
-})
-
-#define _mm_mask_fpclass_pd_mask( __U, __A, __imm) __extension__ ({ \
-__builtin_ia32_fpclasspd128_mask ((__v2df)( __A),\
-                 ( __imm),( __U));\
-})
-
-#define _mm_fpclass_pd_mask( __A, __imm) __extension__ ({ \
-__builtin_ia32_fpclasspd128_mask ((__v2df)( __A),\
-                  ( __imm),\
-                  (__mmask8) -1);\
-})
-
-#define _mm256_mask_fpclass_pd_mask( __U, __A, __imm) __extension__ ({ \
-__builtin_ia32_fpclasspd256_mask ((__v4df)( __A),\
-                  ( __imm),( __U));\
-})
-
-#define _mm256_fpclass_pd_mask( __A, __imm) __extension__ ({ \
-__builtin_ia32_fpclasspd256_mask ((__v4df)( __A),\
-                  ( __imm),\
-                  (__mmask8) -1);\
-})
-
-#define _mm_mask_fpclass_ps_mask( __U, __A, __imm) __extension__ ({ \
-__builtin_ia32_fpclassps128_mask ((__v4sf)( __A),\
-                  ( __imm),( __U));\
-})
-
-#define _mm_fpclass_ps_mask( __A, __imm) __extension__ ({ \
-__builtin_ia32_fpclassps128_mask ((__v4sf)( __A),\
-                  ( __imm),\
-                  (__mmask8) -1);\
-})
-
-#define _mm256_mask_fpclass_ps_mask( __U, __A, __imm) __extension__ ({ \
-__builtin_ia32_fpclassps256_mask ((__v8sf)( __A),\
-                  ( __imm),( __U));\
-})
-
-#define _mm256_fpclass_ps_mask( __A, __imm) __extension__ ({ \
-__builtin_ia32_fpclassps256_mask ((__v8sf)( __A),\
-                  ( __imm),\
-                  (__mmask8) -1);\
-})
+#define _mm256_extractf64x2_pd(A, imm) __extension__ ({ \
+  (__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \
+                                                (int)(imm), \
+                                                (__v2df)_mm_setzero_pd(), \
+                                                (__mmask8)-1); })
+
+#define _mm256_mask_extractf64x2_pd(W, U, A, imm) __extension__ ({ \
+  (__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \
+                                                (int)(imm), \
+                                                (__v2df)(__m128d)(W), \
+                                                (__mmask8)(U)); })
+
+#define _mm256_maskz_extractf64x2_pd(U, A, imm) __extension__ ({ \
+  (__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \
+                                                (int)(imm), \
+                                                (__v2df)_mm_setzero_pd(), \
+                                                (__mmask8)(U)); })
+
+#define _mm256_extracti64x2_epi64(A, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \
+                                                (int)(imm), \
+                                                (__v2di)_mm_setzero_di(), \
+                                                (__mmask8)-1); })
+
+#define _mm256_mask_extracti64x2_epi64(W, U, A, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \
+                                                (int)(imm), \
+                                                (__v2di)(__m128i)(W), \
+                                                (__mmask8)(U)); })
+
+#define _mm256_maskz_extracti64x2_epi64(U, A, imm) __extension__ ({ \
+  (__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \
+                                                (int)(imm), \
+                                                (__v2di)_mm_setzero_di(), \
+                                                (__mmask8)(U)); })
+
+#define _mm256_insertf64x2(A, B, imm) __extension__ ({ \
+  (__m256d)__builtin_ia32_insertf64x2_256_mask((__v4df)(__m256d)(A), \
+                                               (__v2df)(__m128d)(B), \
+                                               (int)(imm), \
+                                               (__v4df)_mm256_setzero_pd(), \
+                                               (__mmask8)-1); })
+
+#define _mm256_mask_insertf64x2(W, U, A, B, imm) __extension__ ({ \
+  (__m256d)__builtin_ia32_insertf64x2_256_mask((__v4df)(__m256d)(A), \
+                                               (__v2df)(__m128d)(B), \
+                                               (int)(imm), \
+                                               (__v4df)(__m256d)(W), \
+                                               (__mmask8)(U)); })
+
+#define _mm256_maskz_insertf64x2(U, A, B, imm) __extension__ ({ \
+  (__m256d)__builtin_ia32_insertf64x2_256_mask((__v4df)(__m256d)(A), \
+                                               (__v2df)(__m128d)(B), \
+                                               (int)(imm), \
+                                               (__v4df)_mm256_setzero_pd(), \
+                                               (__mmask8)(U)); })
+
+#define _mm256_inserti64x2(A, B, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_inserti64x2_256_mask((__v4di)(__m256i)(A), \
+                                               (__v2di)(__m128i)(B), \
+                                               (int)(imm), \
+                                               (__v4di)_mm256_setzero_si256(), \
+                                               (__mmask8)-1); })
+
+#define _mm256_mask_inserti64x2(W, U, A, B, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_inserti64x2_256_mask((__v4di)(__m256i)(A), \
+                                               (__v2di)(__m128i)(B), \
+                                               (int)(imm), \
+                                               (__v4di)(__m256i)(W), \
+                                               (__mmask8)(U)); })
+
+#define _mm256_maskz_inserti64x2(U, A, B, imm) __extension__ ({ \
+  (__m256i)__builtin_ia32_inserti64x2_256_mask((__v4di)(__m256i)(A), \
+                                               (__v2di)(__m128i)(B), \
+                                               (int)(imm), \
+                                               (__v4di)_mm256_setzero_si256(), \
+                                               (__mmask8)(U)); })
+
+#define _mm_mask_fpclass_pd_mask(U, A, imm) __extension__ ({ \
+  (__mmask8)__builtin_ia32_fpclasspd128_mask((__v2df)(__m128d)(A), (int)(imm), \
+                                             (__mmask8)(U)); })
+
+#define _mm_fpclass_pd_mask(A, imm) __extension__ ({ \
+  (__mmask8)__builtin_ia32_fpclasspd128_mask((__v2df)(__m128d)(A), (int)(imm), \
+                                             (__mmask8)-1); })
+
+#define _mm256_mask_fpclass_pd_mask(U, A, imm) __extension__ ({ \
+  (__mmask8)__builtin_ia32_fpclasspd256_mask((__v4df)(__m256d)(A), (int)(imm), \
+                                             (__mmask8)(U)); })
+
+#define _mm256_fpclass_pd_mask(A, imm) __extension__ ({ \
+  (__mmask8)__builtin_ia32_fpclasspd256_mask((__v4df)(__m256d)(A), (int)(imm), \
+                                             (__mmask8)-1); })
+
+#define _mm_mask_fpclass_ps_mask(U, A, imm) __extension__ ({ \
+  (__mmask8)__builtin_ia32_fpclassps128_mask((__v4sf)(__m128)(A), (int)(imm), \
+                                             (__mmask8)(U)); })
+
+#define _mm_fpclass_ps_mask(A, imm) __extension__ ({ \
+  (__mmask8)__builtin_ia32_fpclassps128_mask((__v4sf)(__m128)(A), (int)(imm), \
+                                             (__mmask8)-1); })
+
+#define _mm256_mask_fpclass_ps_mask(U, A, imm) __extension__ ({ \
+  (__mmask8)__builtin_ia32_fpclassps256_mask((__v8sf)(__m256)(A), (int)(imm), \
+                                             (__mmask8)(U)); })
+
+#define _mm256_fpclass_ps_mask(A, imm) __extension__ ({ \
+  (__mmask8)__builtin_ia32_fpclassps256_mask((__v8sf)(__m256)(A), (int)(imm), \
+                                             (__mmask8)-1); })
 
 #undef __DEFAULT_FN_ATTRS
 




More information about the cfe-commits mailing list