[clang] 705b119 - [X86] Add parentheses around casts in X86 intrinsic headers.

Craig Topper via cfe-commits cfe-commits at lists.llvm.org
Sat Aug 14 18:17:10 PDT 2021


Author: Craig Topper
Date: 2021-08-14T18:14:44-07:00
New Revision: 705b1191aad3e46b72eac8c0dc965d408d6147d0

URL: https://github.com/llvm/llvm-project/commit/705b1191aad3e46b72eac8c0dc965d408d6147d0
DIFF: https://github.com/llvm/llvm-project/commit/705b1191aad3e46b72eac8c0dc965d408d6147d0.diff

LOG: [X86] Add parentheses around casts in X86 intrinsic headers.

Fixes PR51324.

Added: 
    

Modified: 
    clang/lib/Headers/avx512bwintrin.h
    clang/lib/Headers/avx512dqintrin.h
    clang/lib/Headers/avx512erintrin.h
    clang/lib/Headers/avx512fintrin.h
    clang/lib/Headers/avx512vbmi2intrin.h
    clang/lib/Headers/avx512vlbwintrin.h
    clang/lib/Headers/avx512vldqintrin.h
    clang/lib/Headers/avx512vlintrin.h
    clang/lib/Headers/avx512vlvbmi2intrin.h
    clang/lib/Headers/avx512vlvnniintrin.h
    clang/lib/Headers/f16cintrin.h
    clang/lib/Headers/gfniintrin.h
    clang/lib/Headers/vpclmulqdqintrin.h
    clang/lib/Headers/xopintrin.h

Removed: 
    


################################################################################
diff  --git a/clang/lib/Headers/avx512bwintrin.h b/clang/lib/Headers/avx512bwintrin.h
index 4281a33d375c..6aee8aed8487 100644
--- a/clang/lib/Headers/avx512bwintrin.h
+++ b/clang/lib/Headers/avx512bwintrin.h
@@ -178,16 +178,16 @@ _kadd_mask64(__mmask64 __A, __mmask64 __B)
 }
 
 #define _kshiftli_mask32(A, I) \
-  (__mmask32)__builtin_ia32_kshiftlisi((__mmask32)(A), (unsigned int)(I))
+  ((__mmask32)__builtin_ia32_kshiftlisi((__mmask32)(A), (unsigned int)(I)))
 
 #define _kshiftri_mask32(A, I) \
-  (__mmask32)__builtin_ia32_kshiftrisi((__mmask32)(A), (unsigned int)(I))
+  ((__mmask32)__builtin_ia32_kshiftrisi((__mmask32)(A), (unsigned int)(I)))
 
 #define _kshiftli_mask64(A, I) \
-  (__mmask64)__builtin_ia32_kshiftlidi((__mmask64)(A), (unsigned int)(I))
+  ((__mmask64)__builtin_ia32_kshiftlidi((__mmask64)(A), (unsigned int)(I)))
 
 #define _kshiftri_mask64(A, I) \
-  (__mmask64)__builtin_ia32_kshiftridi((__mmask64)(A), (unsigned int)(I))
+  ((__mmask64)__builtin_ia32_kshiftridi((__mmask64)(A), (unsigned int)(I)))
 
 static __inline__ unsigned int __DEFAULT_FN_ATTRS
 _cvtmask32_u32(__mmask32 __A) {
@@ -232,44 +232,44 @@ _store_mask64(__mmask64 *__A, __mmask64 __B) {
 /* Integer compare */
 
 #define _mm512_cmp_epi8_mask(a, b, p) \
-  (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)(__m512i)(a), \
-                                         (__v64qi)(__m512i)(b), (int)(p), \
-                                         (__mmask64)-1)
+  ((__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)(__m512i)(a), \
+                                          (__v64qi)(__m512i)(b), (int)(p), \
+                                          (__mmask64)-1))
 
 #define _mm512_mask_cmp_epi8_mask(m, a, b, p) \
-  (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)(__m512i)(a), \
-                                         (__v64qi)(__m512i)(b), (int)(p), \
-                                         (__mmask64)(m))
+  ((__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)(__m512i)(a), \
+                                          (__v64qi)(__m512i)(b), (int)(p), \
+                                          (__mmask64)(m)))
 
 #define _mm512_cmp_epu8_mask(a, b, p) \
-  (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)(__m512i)(a), \
-                                          (__v64qi)(__m512i)(b), (int)(p), \
-                                          (__mmask64)-1)
+  ((__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)(__m512i)(a), \
+                                           (__v64qi)(__m512i)(b), (int)(p), \
+                                           (__mmask64)-1))
 
 #define _mm512_mask_cmp_epu8_mask(m, a, b, p) \
-  (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)(__m512i)(a), \
-                                          (__v64qi)(__m512i)(b), (int)(p), \
-                                          (__mmask64)(m))
+  ((__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)(__m512i)(a), \
+                                           (__v64qi)(__m512i)(b), (int)(p), \
+                                           (__mmask64)(m)))
 
 #define _mm512_cmp_epi16_mask(a, b, p) \
-  (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)(__m512i)(a), \
-                                         (__v32hi)(__m512i)(b), (int)(p), \
-                                         (__mmask32)-1)
+  ((__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)(__m512i)(a), \
+                                          (__v32hi)(__m512i)(b), (int)(p), \
+                                          (__mmask32)-1))
 
 #define _mm512_mask_cmp_epi16_mask(m, a, b, p) \
-  (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)(__m512i)(a), \
-                                         (__v32hi)(__m512i)(b), (int)(p), \
-                                         (__mmask32)(m))
+  ((__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)(__m512i)(a), \
+                                          (__v32hi)(__m512i)(b), (int)(p), \
+                                          (__mmask32)(m)))
 
 #define _mm512_cmp_epu16_mask(a, b, p) \
-  (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)(__m512i)(a), \
-                                          (__v32hi)(__m512i)(b), (int)(p), \
-                                          (__mmask32)-1)
+  ((__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)(__m512i)(a), \
+                                           (__v32hi)(__m512i)(b), (int)(p), \
+                                           (__mmask32)-1))
 
 #define _mm512_mask_cmp_epu16_mask(m, a, b, p) \
-  (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)(__m512i)(a), \
-                                          (__v32hi)(__m512i)(b), (int)(p), \
-                                          (__mmask32)(m))
+  ((__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)(__m512i)(a), \
+                                           (__v32hi)(__m512i)(b), (int)(p), \
+                                           (__mmask32)(m)))
 
 #define _mm512_cmpeq_epi8_mask(A, B) \
     _mm512_cmp_epi8_mask((A), (B), _MM_CMPINT_EQ)
@@ -1428,36 +1428,36 @@ _mm512_maskz_cvtepu8_epi16(__mmask32 __U, __m256i __A)
 
 
 #define _mm512_shufflehi_epi16(A, imm) \
-  (__m512i)__builtin_ia32_pshufhw512((__v32hi)(__m512i)(A), (int)(imm))
+  ((__m512i)__builtin_ia32_pshufhw512((__v32hi)(__m512i)(A), (int)(imm)))
 
 #define _mm512_mask_shufflehi_epi16(W, U, A, imm) \
-  (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
-                                      (__v32hi)_mm512_shufflehi_epi16((A), \
-                                                                      (imm)), \
-                                      (__v32hi)(__m512i)(W))
+  ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+                                       (__v32hi)_mm512_shufflehi_epi16((A), \
+                                                                       (imm)), \
+                                       (__v32hi)(__m512i)(W)))
 
 #define _mm512_maskz_shufflehi_epi16(U, A, imm) \
-  (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
-                                      (__v32hi)_mm512_shufflehi_epi16((A), \
-                                                                      (imm)), \
-                                      (__v32hi)_mm512_setzero_si512())
+  ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+                                       (__v32hi)_mm512_shufflehi_epi16((A), \
+                                                                       (imm)), \
+                                       (__v32hi)_mm512_setzero_si512()))
 
 #define _mm512_shufflelo_epi16(A, imm) \
-  (__m512i)__builtin_ia32_pshuflw512((__v32hi)(__m512i)(A), (int)(imm))
+  ((__m512i)__builtin_ia32_pshuflw512((__v32hi)(__m512i)(A), (int)(imm)))
 
 
 #define _mm512_mask_shufflelo_epi16(W, U, A, imm) \
-  (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
-                                      (__v32hi)_mm512_shufflelo_epi16((A), \
-                                                                      (imm)), \
-                                      (__v32hi)(__m512i)(W))
+  ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+                                       (__v32hi)_mm512_shufflelo_epi16((A), \
+                                                                       (imm)), \
+                                       (__v32hi)(__m512i)(W)))
 
 
 #define _mm512_maskz_shufflelo_epi16(U, A, imm) \
-  (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
-                                      (__v32hi)_mm512_shufflelo_epi16((A), \
-                                                                      (imm)), \
-                                      (__v32hi)_mm512_setzero_si512())
+  ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+                                       (__v32hi)_mm512_shufflelo_epi16((A), \
+                                                                       (imm)), \
+                                       (__v32hi)_mm512_setzero_si512()))
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_sllv_epi16(__m512i __A, __m512i __B)
@@ -1527,7 +1527,7 @@ _mm512_maskz_slli_epi16(__mmask32 __U, __m512i __A, unsigned int __B)
 }
 
 #define _mm512_bslli_epi128(a, imm) \
-  (__m512i)__builtin_ia32_pslldqi512_byteshift((__v8di)(__m512i)(a), (int)(imm))
+  ((__m512i)__builtin_ia32_pslldqi512_byteshift((__v8di)(__m512i)(a), (int)(imm)))
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_srlv_epi16(__m512i __A, __m512i __B)
@@ -1664,7 +1664,7 @@ _mm512_maskz_srli_epi16(__mmask32 __U, __m512i __A, int __B)
 }
 
 #define _mm512_bsrli_epi128(a, imm) \
-  (__m512i)__builtin_ia32_psrldqi512_byteshift((__v8di)(__m512i)(a), (int)(imm))
+  ((__m512i)__builtin_ia32_psrldqi512_byteshift((__v8di)(__m512i)(a), (int)(imm)))
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_mask_mov_epi16 (__m512i __W, __mmask32 __U, __m512i __A)
@@ -1984,32 +1984,32 @@ _mm512_mask_permutexvar_epi16 (__m512i __W, __mmask32 __M, __m512i __A,
 }
 
 #define _mm512_alignr_epi8(A, B, N) \
-  (__m512i)__builtin_ia32_palignr512((__v64qi)(__m512i)(A), \
-                                     (__v64qi)(__m512i)(B), (int)(N))
+  ((__m512i)__builtin_ia32_palignr512((__v64qi)(__m512i)(A), \
+                                      (__v64qi)(__m512i)(B), (int)(N)))
 
 #define _mm512_mask_alignr_epi8(W, U, A, B, N) \
-  (__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \
-                             (__v64qi)_mm512_alignr_epi8((A), (B), (int)(N)), \
-                             (__v64qi)(__m512i)(W))
+  ((__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \
+                              (__v64qi)_mm512_alignr_epi8((A), (B), (int)(N)), \
+                              (__v64qi)(__m512i)(W)))
 
 #define _mm512_maskz_alignr_epi8(U, A, B, N) \
-  (__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \
+  ((__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \
                               (__v64qi)_mm512_alignr_epi8((A), (B), (int)(N)), \
-                              (__v64qi)(__m512i)_mm512_setzero_si512())
+                              (__v64qi)(__m512i)_mm512_setzero_si512()))
 
 #define _mm512_dbsad_epu8(A, B, imm) \
-  (__m512i)__builtin_ia32_dbpsadbw512((__v64qi)(__m512i)(A), \
-                                      (__v64qi)(__m512i)(B), (int)(imm))
+  ((__m512i)__builtin_ia32_dbpsadbw512((__v64qi)(__m512i)(A), \
+                                       (__v64qi)(__m512i)(B), (int)(imm)))
 
 #define _mm512_mask_dbsad_epu8(W, U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+  ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
                                   (__v32hi)_mm512_dbsad_epu8((A), (B), (imm)), \
-                                  (__v32hi)(__m512i)(W))
+                                  (__v32hi)(__m512i)(W)))
 
 #define _mm512_maskz_dbsad_epu8(U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+  ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
                                   (__v32hi)_mm512_dbsad_epu8((A), (B), (imm)), \
-                                  (__v32hi)_mm512_setzero_si512())
+                                  (__v32hi)_mm512_setzero_si512()))
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_sad_epu8 (__m512i __A, __m512i __B)

diff  --git a/clang/lib/Headers/avx512dqintrin.h b/clang/lib/Headers/avx512dqintrin.h
index 337256c50f50..3ba0a0cfd5fd 100644
--- a/clang/lib/Headers/avx512dqintrin.h
+++ b/clang/lib/Headers/avx512dqintrin.h
@@ -121,10 +121,10 @@ _kadd_mask16(__mmask16 __A, __mmask16 __B)
 }
 
 #define _kshiftli_mask8(A, I) \
-  (__mmask8)__builtin_ia32_kshiftliqi((__mmask8)(A), (unsigned int)(I))
+  ((__mmask8)__builtin_ia32_kshiftliqi((__mmask8)(A), (unsigned int)(I)))
 
 #define _kshiftri_mask8(A, I) \
-  (__mmask8)__builtin_ia32_kshiftriqi((__mmask8)(A), (unsigned int)(I))
+  ((__mmask8)__builtin_ia32_kshiftriqi((__mmask8)(A), (unsigned int)(I)))
 
 static __inline__ unsigned int __DEFAULT_FN_ATTRS
 _cvtmask8_u32(__mmask8 __A) {
@@ -342,19 +342,19 @@ _mm512_maskz_cvtpd_epi64 (__mmask8 __U, __m512d __A) {
 }
 
 #define _mm512_cvt_roundpd_epi64(A, R) \
-  (__m512i)__builtin_ia32_cvtpd2qq512_mask((__v8df)(__m512d)(A), \
-                                           (__v8di)_mm512_setzero_si512(), \
-                                           (__mmask8)-1, (int)(R))
+  ((__m512i)__builtin_ia32_cvtpd2qq512_mask((__v8df)(__m512d)(A), \
+                                            (__v8di)_mm512_setzero_si512(), \
+                                            (__mmask8)-1, (int)(R)))
 
 #define _mm512_mask_cvt_roundpd_epi64(W, U, A, R) \
-  (__m512i)__builtin_ia32_cvtpd2qq512_mask((__v8df)(__m512d)(A), \
-                                           (__v8di)(__m512i)(W), \
-                                           (__mmask8)(U), (int)(R))
+  ((__m512i)__builtin_ia32_cvtpd2qq512_mask((__v8df)(__m512d)(A), \
+                                            (__v8di)(__m512i)(W), \
+                                            (__mmask8)(U), (int)(R)))
 
 #define _mm512_maskz_cvt_roundpd_epi64(U, A, R) \
-  (__m512i)__builtin_ia32_cvtpd2qq512_mask((__v8df)(__m512d)(A), \
-                                           (__v8di)_mm512_setzero_si512(), \
-                                           (__mmask8)(U), (int)(R))
+  ((__m512i)__builtin_ia32_cvtpd2qq512_mask((__v8df)(__m512d)(A), \
+                                            (__v8di)_mm512_setzero_si512(), \
+                                            (__mmask8)(U), (int)(R)))
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_cvtpd_epu64 (__m512d __A) {
@@ -381,19 +381,19 @@ _mm512_maskz_cvtpd_epu64 (__mmask8 __U, __m512d __A) {
 }
 
 #define _mm512_cvt_roundpd_epu64(A, R) \
-  (__m512i)__builtin_ia32_cvtpd2uqq512_mask((__v8df)(__m512d)(A), \
-                                            (__v8di)_mm512_setzero_si512(), \
-                                            (__mmask8)-1, (int)(R))
+  ((__m512i)__builtin_ia32_cvtpd2uqq512_mask((__v8df)(__m512d)(A), \
+                                             (__v8di)_mm512_setzero_si512(), \
+                                             (__mmask8)-1, (int)(R)))
 
 #define _mm512_mask_cvt_roundpd_epu64(W, U, A, R) \
-  (__m512i)__builtin_ia32_cvtpd2uqq512_mask((__v8df)(__m512d)(A), \
-                                            (__v8di)(__m512i)(W), \
-                                            (__mmask8)(U), (int)(R))
+  ((__m512i)__builtin_ia32_cvtpd2uqq512_mask((__v8df)(__m512d)(A), \
+                                             (__v8di)(__m512i)(W), \
+                                             (__mmask8)(U), (int)(R)))
 
 #define _mm512_maskz_cvt_roundpd_epu64(U, A, R) \
-  (__m512i)__builtin_ia32_cvtpd2uqq512_mask((__v8df)(__m512d)(A), \
-                                            (__v8di)_mm512_setzero_si512(), \
-                                            (__mmask8)(U), (int)(R))
+  ((__m512i)__builtin_ia32_cvtpd2uqq512_mask((__v8df)(__m512d)(A), \
+                                             (__v8di)_mm512_setzero_si512(), \
+                                             (__mmask8)(U), (int)(R)))
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_cvtps_epi64 (__m256 __A) {
@@ -420,19 +420,19 @@ _mm512_maskz_cvtps_epi64 (__mmask8 __U, __m256 __A) {
 }
 
 #define _mm512_cvt_roundps_epi64(A, R) \
-  (__m512i)__builtin_ia32_cvtps2qq512_mask((__v8sf)(__m256)(A), \
-                                           (__v8di)_mm512_setzero_si512(), \
-                                           (__mmask8)-1, (int)(R))
+  ((__m512i)__builtin_ia32_cvtps2qq512_mask((__v8sf)(__m256)(A), \
+                                            (__v8di)_mm512_setzero_si512(), \
+                                            (__mmask8)-1, (int)(R)))
 
 #define _mm512_mask_cvt_roundps_epi64(W, U, A, R) \
-  (__m512i)__builtin_ia32_cvtps2qq512_mask((__v8sf)(__m256)(A), \
-                                           (__v8di)(__m512i)(W), \
-                                           (__mmask8)(U), (int)(R))
+  ((__m512i)__builtin_ia32_cvtps2qq512_mask((__v8sf)(__m256)(A), \
+                                            (__v8di)(__m512i)(W), \
+                                            (__mmask8)(U), (int)(R)))
 
 #define _mm512_maskz_cvt_roundps_epi64(U, A, R) \
-  (__m512i)__builtin_ia32_cvtps2qq512_mask((__v8sf)(__m256)(A), \
-                                           (__v8di)_mm512_setzero_si512(), \
-                                           (__mmask8)(U), (int)(R))
+  ((__m512i)__builtin_ia32_cvtps2qq512_mask((__v8sf)(__m256)(A), \
+                                            (__v8di)_mm512_setzero_si512(), \
+                                            (__mmask8)(U), (int)(R)))
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_cvtps_epu64 (__m256 __A) {
@@ -459,19 +459,19 @@ _mm512_maskz_cvtps_epu64 (__mmask8 __U, __m256 __A) {
 }
 
 #define _mm512_cvt_roundps_epu64(A, R) \
-  (__m512i)__builtin_ia32_cvtps2uqq512_mask((__v8sf)(__m256)(A), \
-                                            (__v8di)_mm512_setzero_si512(), \
-                                            (__mmask8)-1, (int)(R))
+  ((__m512i)__builtin_ia32_cvtps2uqq512_mask((__v8sf)(__m256)(A), \
+                                             (__v8di)_mm512_setzero_si512(), \
+                                             (__mmask8)-1, (int)(R)))
 
 #define _mm512_mask_cvt_roundps_epu64(W, U, A, R) \
-  (__m512i)__builtin_ia32_cvtps2uqq512_mask((__v8sf)(__m256)(A), \
-                                            (__v8di)(__m512i)(W), \
-                                            (__mmask8)(U), (int)(R))
+  ((__m512i)__builtin_ia32_cvtps2uqq512_mask((__v8sf)(__m256)(A), \
+                                             (__v8di)(__m512i)(W), \
+                                             (__mmask8)(U), (int)(R)))
 
 #define _mm512_maskz_cvt_roundps_epu64(U, A, R) \
-  (__m512i)__builtin_ia32_cvtps2uqq512_mask((__v8sf)(__m256)(A), \
-                                            (__v8di)_mm512_setzero_si512(), \
-                                            (__mmask8)(U), (int)(R))
+  ((__m512i)__builtin_ia32_cvtps2uqq512_mask((__v8sf)(__m256)(A), \
+                                             (__v8di)_mm512_setzero_si512(), \
+                                             (__mmask8)(U), (int)(R)))
 
 
 static __inline__ __m512d __DEFAULT_FN_ATTRS512
@@ -494,19 +494,19 @@ _mm512_maskz_cvtepi64_pd (__mmask8 __U, __m512i __A) {
 }
 
 #define _mm512_cvt_roundepi64_pd(A, R) \
-  (__m512d)__builtin_ia32_cvtqq2pd512_mask((__v8di)(__m512i)(A), \
-                                           (__v8df)_mm512_setzero_pd(), \
-                                           (__mmask8)-1, (int)(R))
+  ((__m512d)__builtin_ia32_cvtqq2pd512_mask((__v8di)(__m512i)(A), \
+                                            (__v8df)_mm512_setzero_pd(), \
+                                            (__mmask8)-1, (int)(R)))
 
 #define _mm512_mask_cvt_roundepi64_pd(W, U, A, R) \
-  (__m512d)__builtin_ia32_cvtqq2pd512_mask((__v8di)(__m512i)(A), \
-                                           (__v8df)(__m512d)(W), \
-                                           (__mmask8)(U), (int)(R))
+  ((__m512d)__builtin_ia32_cvtqq2pd512_mask((__v8di)(__m512i)(A), \
+                                            (__v8df)(__m512d)(W), \
+                                            (__mmask8)(U), (int)(R)))
 
 #define _mm512_maskz_cvt_roundepi64_pd(U, A, R) \
-  (__m512d)__builtin_ia32_cvtqq2pd512_mask((__v8di)(__m512i)(A), \
-                                           (__v8df)_mm512_setzero_pd(), \
-                                           (__mmask8)(U), (int)(R))
+  ((__m512d)__builtin_ia32_cvtqq2pd512_mask((__v8di)(__m512i)(A), \
+                                            (__v8df)_mm512_setzero_pd(), \
+                                            (__mmask8)(U), (int)(R)))
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS512
 _mm512_cvtepi64_ps (__m512i __A) {
@@ -533,19 +533,19 @@ _mm512_maskz_cvtepi64_ps (__mmask8 __U, __m512i __A) {
 }
 
 #define _mm512_cvt_roundepi64_ps(A, R) \
-  (__m256)__builtin_ia32_cvtqq2ps512_mask((__v8di)(__m512i)(A), \
-                                          (__v8sf)_mm256_setzero_ps(), \
-                                          (__mmask8)-1, (int)(R))
+  ((__m256)__builtin_ia32_cvtqq2ps512_mask((__v8di)(__m512i)(A), \
+                                           (__v8sf)_mm256_setzero_ps(), \
+                                           (__mmask8)-1, (int)(R)))
 
 #define _mm512_mask_cvt_roundepi64_ps(W, U, A, R) \
-  (__m256)__builtin_ia32_cvtqq2ps512_mask((__v8di)(__m512i)(A), \
-                                          (__v8sf)(__m256)(W), (__mmask8)(U), \
-                                          (int)(R))
+  ((__m256)__builtin_ia32_cvtqq2ps512_mask((__v8di)(__m512i)(A), \
+                                           (__v8sf)(__m256)(W), (__mmask8)(U), \
+                                           (int)(R)))
 
 #define _mm512_maskz_cvt_roundepi64_ps(U, A, R) \
-  (__m256)__builtin_ia32_cvtqq2ps512_mask((__v8di)(__m512i)(A), \
-                                          (__v8sf)_mm256_setzero_ps(), \
-                                          (__mmask8)(U), (int)(R))
+  ((__m256)__builtin_ia32_cvtqq2ps512_mask((__v8di)(__m512i)(A), \
+                                           (__v8sf)_mm256_setzero_ps(), \
+                                           (__mmask8)(U), (int)(R)))
 
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -573,19 +573,19 @@ _mm512_maskz_cvttpd_epi64 (__mmask8 __U, __m512d __A) {
 }
 
 #define _mm512_cvtt_roundpd_epi64(A, R) \
-  (__m512i)__builtin_ia32_cvttpd2qq512_mask((__v8df)(__m512d)(A), \
-                                            (__v8di)_mm512_setzero_si512(), \
-                                            (__mmask8)-1, (int)(R))
+  ((__m512i)__builtin_ia32_cvttpd2qq512_mask((__v8df)(__m512d)(A), \
+                                             (__v8di)_mm512_setzero_si512(), \
+                                             (__mmask8)-1, (int)(R)))
 
 #define _mm512_mask_cvtt_roundpd_epi64(W, U, A, R) \
-  (__m512i)__builtin_ia32_cvttpd2qq512_mask((__v8df)(__m512d)(A), \
-                                            (__v8di)(__m512i)(W), \
-                                            (__mmask8)(U), (int)(R))
+  ((__m512i)__builtin_ia32_cvttpd2qq512_mask((__v8df)(__m512d)(A), \
+                                             (__v8di)(__m512i)(W), \
+                                             (__mmask8)(U), (int)(R)))
 
 #define _mm512_maskz_cvtt_roundpd_epi64(U, A, R) \
-  (__m512i)__builtin_ia32_cvttpd2qq512_mask((__v8df)(__m512d)(A), \
-                                            (__v8di)_mm512_setzero_si512(), \
-                                            (__mmask8)(U), (int)(R))
+  ((__m512i)__builtin_ia32_cvttpd2qq512_mask((__v8df)(__m512d)(A), \
+                                             (__v8di)_mm512_setzero_si512(), \
+                                             (__mmask8)(U), (int)(R)))
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_cvttpd_epu64 (__m512d __A) {
@@ -612,19 +612,19 @@ _mm512_maskz_cvttpd_epu64 (__mmask8 __U, __m512d __A) {
 }
 
 #define _mm512_cvtt_roundpd_epu64(A, R) \
-  (__m512i)__builtin_ia32_cvttpd2uqq512_mask((__v8df)(__m512d)(A), \
-                                             (__v8di)_mm512_setzero_si512(), \
-                                             (__mmask8)-1, (int)(R))
+  ((__m512i)__builtin_ia32_cvttpd2uqq512_mask((__v8df)(__m512d)(A), \
+                                              (__v8di)_mm512_setzero_si512(), \
+                                              (__mmask8)-1, (int)(R)))
 
 #define _mm512_mask_cvtt_roundpd_epu64(W, U, A, R) \
-  (__m512i)__builtin_ia32_cvttpd2uqq512_mask((__v8df)(__m512d)(A), \
-                                             (__v8di)(__m512i)(W), \
-                                             (__mmask8)(U), (int)(R))
+  ((__m512i)__builtin_ia32_cvttpd2uqq512_mask((__v8df)(__m512d)(A), \
+                                              (__v8di)(__m512i)(W), \
+                                              (__mmask8)(U), (int)(R)))
 
 #define _mm512_maskz_cvtt_roundpd_epu64(U, A, R) \
-  (__m512i)__builtin_ia32_cvttpd2uqq512_mask((__v8df)(__m512d)(A), \
-                                             (__v8di)_mm512_setzero_si512(), \
-                                             (__mmask8)(U), (int)(R))
+  ((__m512i)__builtin_ia32_cvttpd2uqq512_mask((__v8df)(__m512d)(A), \
+                                              (__v8di)_mm512_setzero_si512(), \
+                                              (__mmask8)(U), (int)(R)))
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_cvttps_epi64 (__m256 __A) {
@@ -651,19 +651,19 @@ _mm512_maskz_cvttps_epi64 (__mmask8 __U, __m256 __A) {
 }
 
 #define _mm512_cvtt_roundps_epi64(A, R) \
-  (__m512i)__builtin_ia32_cvttps2qq512_mask((__v8sf)(__m256)(A), \
-                                            (__v8di)_mm512_setzero_si512(), \
-                                            (__mmask8)-1, (int)(R))
+  ((__m512i)__builtin_ia32_cvttps2qq512_mask((__v8sf)(__m256)(A), \
+                                             (__v8di)_mm512_setzero_si512(), \
+                                             (__mmask8)-1, (int)(R)))
 
 #define _mm512_mask_cvtt_roundps_epi64(W, U, A, R) \
-  (__m512i)__builtin_ia32_cvttps2qq512_mask((__v8sf)(__m256)(A), \
-                                            (__v8di)(__m512i)(W), \
-                                            (__mmask8)(U), (int)(R))
+  ((__m512i)__builtin_ia32_cvttps2qq512_mask((__v8sf)(__m256)(A), \
+                                             (__v8di)(__m512i)(W), \
+                                             (__mmask8)(U), (int)(R)))
 
 #define _mm512_maskz_cvtt_roundps_epi64(U, A, R) \
-  (__m512i)__builtin_ia32_cvttps2qq512_mask((__v8sf)(__m256)(A), \
-                                            (__v8di)_mm512_setzero_si512(), \
-                                            (__mmask8)(U), (int)(R))
+  ((__m512i)__builtin_ia32_cvttps2qq512_mask((__v8sf)(__m256)(A), \
+                                             (__v8di)_mm512_setzero_si512(), \
+                                             (__mmask8)(U), (int)(R)))
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_cvttps_epu64 (__m256 __A) {
@@ -690,19 +690,19 @@ _mm512_maskz_cvttps_epu64 (__mmask8 __U, __m256 __A) {
 }
 
 #define _mm512_cvtt_roundps_epu64(A, R) \
-  (__m512i)__builtin_ia32_cvttps2uqq512_mask((__v8sf)(__m256)(A), \
-                                             (__v8di)_mm512_setzero_si512(), \
-                                             (__mmask8)-1, (int)(R))
+  ((__m512i)__builtin_ia32_cvttps2uqq512_mask((__v8sf)(__m256)(A), \
+                                              (__v8di)_mm512_setzero_si512(), \
+                                              (__mmask8)-1, (int)(R)))
 
 #define _mm512_mask_cvtt_roundps_epu64(W, U, A, R) \
-  (__m512i)__builtin_ia32_cvttps2uqq512_mask((__v8sf)(__m256)(A), \
-                                             (__v8di)(__m512i)(W), \
-                                             (__mmask8)(U), (int)(R))
+  ((__m512i)__builtin_ia32_cvttps2uqq512_mask((__v8sf)(__m256)(A), \
+                                              (__v8di)(__m512i)(W), \
+                                              (__mmask8)(U), (int)(R)))
 
 #define _mm512_maskz_cvtt_roundps_epu64(U, A, R) \
-  (__m512i)__builtin_ia32_cvttps2uqq512_mask((__v8sf)(__m256)(A), \
-                                             (__v8di)_mm512_setzero_si512(), \
-                                             (__mmask8)(U), (int)(R))
+  ((__m512i)__builtin_ia32_cvttps2uqq512_mask((__v8sf)(__m256)(A), \
+                                              (__v8di)_mm512_setzero_si512(), \
+                                              (__mmask8)(U), (int)(R)))
 
 static __inline__ __m512d __DEFAULT_FN_ATTRS512
 _mm512_cvtepu64_pd (__m512i __A) {
@@ -724,20 +724,20 @@ _mm512_maskz_cvtepu64_pd (__mmask8 __U, __m512i __A) {
 }
 
 #define _mm512_cvt_roundepu64_pd(A, R) \
-  (__m512d)__builtin_ia32_cvtuqq2pd512_mask((__v8di)(__m512i)(A), \
-                                            (__v8df)_mm512_setzero_pd(), \
-                                            (__mmask8)-1, (int)(R))
+  ((__m512d)__builtin_ia32_cvtuqq2pd512_mask((__v8di)(__m512i)(A), \
+                                             (__v8df)_mm512_setzero_pd(), \
+                                             (__mmask8)-1, (int)(R)))
 
 #define _mm512_mask_cvt_roundepu64_pd(W, U, A, R) \
-  (__m512d)__builtin_ia32_cvtuqq2pd512_mask((__v8di)(__m512i)(A), \
-                                            (__v8df)(__m512d)(W), \
-                                            (__mmask8)(U), (int)(R))
+  ((__m512d)__builtin_ia32_cvtuqq2pd512_mask((__v8di)(__m512i)(A), \
+                                             (__v8df)(__m512d)(W), \
+                                             (__mmask8)(U), (int)(R)))
 
 
 #define _mm512_maskz_cvt_roundepu64_pd(U, A, R) \
-  (__m512d)__builtin_ia32_cvtuqq2pd512_mask((__v8di)(__m512i)(A), \
-                                            (__v8df)_mm512_setzero_pd(), \
-                                            (__mmask8)(U), (int)(R))
+  ((__m512d)__builtin_ia32_cvtuqq2pd512_mask((__v8di)(__m512i)(A), \
+                                             (__v8df)_mm512_setzero_pd(), \
+                                             (__mmask8)(U), (int)(R)))
 
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS512
@@ -765,290 +765,290 @@ _mm512_maskz_cvtepu64_ps (__mmask8 __U, __m512i __A) {
 }
 
 #define _mm512_cvt_roundepu64_ps(A, R) \
-  (__m256)__builtin_ia32_cvtuqq2ps512_mask((__v8di)(__m512i)(A), \
-                                           (__v8sf)_mm256_setzero_ps(), \
-                                           (__mmask8)-1, (int)(R))
+  ((__m256)__builtin_ia32_cvtuqq2ps512_mask((__v8di)(__m512i)(A), \
+                                            (__v8sf)_mm256_setzero_ps(), \
+                                            (__mmask8)-1, (int)(R)))
 
 #define _mm512_mask_cvt_roundepu64_ps(W, U, A, R) \
-  (__m256)__builtin_ia32_cvtuqq2ps512_mask((__v8di)(__m512i)(A), \
-                                           (__v8sf)(__m256)(W), (__mmask8)(U), \
-                                           (int)(R))
+  ((__m256)__builtin_ia32_cvtuqq2ps512_mask((__v8di)(__m512i)(A), \
+                                            (__v8sf)(__m256)(W), (__mmask8)(U), \
+                                            (int)(R)))
 
 #define _mm512_maskz_cvt_roundepu64_ps(U, A, R) \
-  (__m256)__builtin_ia32_cvtuqq2ps512_mask((__v8di)(__m512i)(A), \
-                                           (__v8sf)_mm256_setzero_ps(), \
-                                           (__mmask8)(U), (int)(R))
+  ((__m256)__builtin_ia32_cvtuqq2ps512_mask((__v8di)(__m512i)(A), \
+                                            (__v8sf)_mm256_setzero_ps(), \
+                                            (__mmask8)(U), (int)(R)))
 
 #define _mm512_range_pd(A, B, C) \
-  (__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
-                                          (__v8df)(__m512d)(B), (int)(C), \
-                                          (__v8df)_mm512_setzero_pd(), \
-                                          (__mmask8)-1, \
-                                          _MM_FROUND_CUR_DIRECTION)
+  ((__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
+                                           (__v8df)(__m512d)(B), (int)(C), \
+                                           (__v8df)_mm512_setzero_pd(), \
+                                           (__mmask8)-1, \
+                                           _MM_FROUND_CUR_DIRECTION))
 
 #define _mm512_mask_range_pd(W, U, A, B, C) \
-  (__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
-                                          (__v8df)(__m512d)(B), (int)(C), \
-                                          (__v8df)(__m512d)(W), (__mmask8)(U), \
-                                          _MM_FROUND_CUR_DIRECTION)
+  ((__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
+                                           (__v8df)(__m512d)(B), (int)(C), \
+                                           (__v8df)(__m512d)(W), (__mmask8)(U), \
+                                           _MM_FROUND_CUR_DIRECTION))
 
 #define _mm512_maskz_range_pd(U, A, B, C) \
-  (__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
-                                          (__v8df)(__m512d)(B), (int)(C), \
-                                          (__v8df)_mm512_setzero_pd(), \
-                                          (__mmask8)(U), \
-                                          _MM_FROUND_CUR_DIRECTION)
+  ((__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
+                                           (__v8df)(__m512d)(B), (int)(C), \
+                                           (__v8df)_mm512_setzero_pd(), \
+                                           (__mmask8)(U), \
+                                           _MM_FROUND_CUR_DIRECTION))
 
 #define _mm512_range_round_pd(A, B, C, R) \
-  (__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
-                                          (__v8df)(__m512d)(B), (int)(C), \
-                                          (__v8df)_mm512_setzero_pd(), \
-                                          (__mmask8)-1, (int)(R))
+  ((__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
+                                           (__v8df)(__m512d)(B), (int)(C), \
+                                           (__v8df)_mm512_setzero_pd(), \
+                                           (__mmask8)-1, (int)(R)))
 
 #define _mm512_mask_range_round_pd(W, U, A, B, C, R) \
-  (__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
-                                          (__v8df)(__m512d)(B), (int)(C), \
-                                          (__v8df)(__m512d)(W), (__mmask8)(U), \
-                                          (int)(R))
+  ((__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
+                                           (__v8df)(__m512d)(B), (int)(C), \
+                                           (__v8df)(__m512d)(W), (__mmask8)(U), \
+                                           (int)(R)))
 
 #define _mm512_maskz_range_round_pd(U, A, B, C, R) \
-  (__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
-                                          (__v8df)(__m512d)(B), (int)(C), \
-                                          (__v8df)_mm512_setzero_pd(), \
-                                          (__mmask8)(U), (int)(R))
+  ((__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
+                                           (__v8df)(__m512d)(B), (int)(C), \
+                                           (__v8df)_mm512_setzero_pd(), \
+                                           (__mmask8)(U), (int)(R)))
 
 #define _mm512_range_ps(A, B, C) \
-  (__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
-                                         (__v16sf)(__m512)(B), (int)(C), \
-                                         (__v16sf)_mm512_setzero_ps(), \
-                                         (__mmask16)-1, \
-                                         _MM_FROUND_CUR_DIRECTION)
+  ((__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
+                                          (__v16sf)(__m512)(B), (int)(C), \
+                                          (__v16sf)_mm512_setzero_ps(), \
+                                          (__mmask16)-1, \
+                                          _MM_FROUND_CUR_DIRECTION))
 
 #define _mm512_mask_range_ps(W, U, A, B, C) \
-  (__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
-                                         (__v16sf)(__m512)(B), (int)(C), \
-                                         (__v16sf)(__m512)(W), (__mmask16)(U), \
-                                         _MM_FROUND_CUR_DIRECTION)
+  ((__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
+                                          (__v16sf)(__m512)(B), (int)(C), \
+                                          (__v16sf)(__m512)(W), (__mmask16)(U), \
+                                          _MM_FROUND_CUR_DIRECTION))
 
 #define _mm512_maskz_range_ps(U, A, B, C) \
-  (__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
-                                         (__v16sf)(__m512)(B), (int)(C), \
-                                         (__v16sf)_mm512_setzero_ps(), \
-                                         (__mmask16)(U), \
-                                         _MM_FROUND_CUR_DIRECTION)
+  ((__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
+                                          (__v16sf)(__m512)(B), (int)(C), \
+                                          (__v16sf)_mm512_setzero_ps(), \
+                                          (__mmask16)(U), \
+                                          _MM_FROUND_CUR_DIRECTION))
 
 #define _mm512_range_round_ps(A, B, C, R) \
-  (__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
-                                         (__v16sf)(__m512)(B), (int)(C), \
-                                         (__v16sf)_mm512_setzero_ps(), \
-                                         (__mmask16)-1, (int)(R))
+  ((__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
+                                          (__v16sf)(__m512)(B), (int)(C), \
+                                          (__v16sf)_mm512_setzero_ps(), \
+                                          (__mmask16)-1, (int)(R)))
 
 #define _mm512_mask_range_round_ps(W, U, A, B, C, R) \
-  (__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
-                                         (__v16sf)(__m512)(B), (int)(C), \
-                                         (__v16sf)(__m512)(W), (__mmask16)(U), \
-                                         (int)(R))
+  ((__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
+                                          (__v16sf)(__m512)(B), (int)(C), \
+                                          (__v16sf)(__m512)(W), (__mmask16)(U), \
+                                          (int)(R)))
 
 #define _mm512_maskz_range_round_ps(U, A, B, C, R) \
-  (__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
-                                         (__v16sf)(__m512)(B), (int)(C), \
-                                         (__v16sf)_mm512_setzero_ps(), \
-                                         (__mmask16)(U), (int)(R))
+  ((__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
+                                          (__v16sf)(__m512)(B), (int)(C), \
+                                          (__v16sf)_mm512_setzero_ps(), \
+                                          (__mmask16)(U), (int)(R)))
 
 #define _mm_range_round_ss(A, B, C, R) \
-  (__m128)__builtin_ia32_rangess128_round_mask((__v4sf)(__m128)(A), \
-                                               (__v4sf)(__m128)(B), \
-                                               (__v4sf)_mm_setzero_ps(), \
-                                               (__mmask8) -1, (int)(C),\
-                                               (int)(R))
+  ((__m128)__builtin_ia32_rangess128_round_mask((__v4sf)(__m128)(A), \
+                                                (__v4sf)(__m128)(B), \
+                                                (__v4sf)_mm_setzero_ps(), \
+                                                (__mmask8) -1, (int)(C),\
+                                                (int)(R)))
 
 #define _mm_range_ss(A ,B , C) _mm_range_round_ss(A, B, C ,_MM_FROUND_CUR_DIRECTION)
 
 #define _mm_mask_range_round_ss(W, U, A, B, C, R) \
-  (__m128)__builtin_ia32_rangess128_round_mask((__v4sf)(__m128)(A), \
-                                               (__v4sf)(__m128)(B), \
-                                               (__v4sf)(__m128)(W),\
-                                               (__mmask8)(U), (int)(C),\
-                                               (int)(R))
+  ((__m128)__builtin_ia32_rangess128_round_mask((__v4sf)(__m128)(A), \
+                                                (__v4sf)(__m128)(B), \
+                                                (__v4sf)(__m128)(W),\
+                                                (__mmask8)(U), (int)(C),\
+                                                (int)(R)))
 
 #define _mm_mask_range_ss(W , U, A, B, C) _mm_mask_range_round_ss(W, U, A, B, C , _MM_FROUND_CUR_DIRECTION)
 
 #define _mm_maskz_range_round_ss(U, A, B, C, R) \
-  (__m128)__builtin_ia32_rangess128_round_mask((__v4sf)(__m128)(A), \
-                                               (__v4sf)(__m128)(B), \
-                                               (__v4sf)_mm_setzero_ps(), \
-                                               (__mmask8)(U), (int)(C),\
-                                               (int)(R))
+  ((__m128)__builtin_ia32_rangess128_round_mask((__v4sf)(__m128)(A), \
+                                                (__v4sf)(__m128)(B), \
+                                                (__v4sf)_mm_setzero_ps(), \
+                                                (__mmask8)(U), (int)(C),\
+                                                (int)(R)))
 
 #define _mm_maskz_range_ss(U, A ,B , C) _mm_maskz_range_round_ss(U, A, B, C ,_MM_FROUND_CUR_DIRECTION)
 
 #define _mm_range_round_sd(A, B, C, R) \
-  (__m128d)__builtin_ia32_rangesd128_round_mask((__v2df)(__m128d)(A), \
-                                                (__v2df)(__m128d)(B), \
-                                                (__v2df)_mm_setzero_pd(), \
-                                                (__mmask8) -1, (int)(C),\
-                                                (int)(R))
+  ((__m128d)__builtin_ia32_rangesd128_round_mask((__v2df)(__m128d)(A), \
+                                                 (__v2df)(__m128d)(B), \
+                                                 (__v2df)_mm_setzero_pd(), \
+                                                 (__mmask8) -1, (int)(C),\
+                                                 (int)(R)))
 
 #define _mm_range_sd(A ,B , C) _mm_range_round_sd(A, B, C ,_MM_FROUND_CUR_DIRECTION)
 
 #define _mm_mask_range_round_sd(W, U, A, B, C, R) \
-  (__m128d)__builtin_ia32_rangesd128_round_mask((__v2df)(__m128d)(A), \
-                                                (__v2df)(__m128d)(B), \
-                                                (__v2df)(__m128d)(W),\
-                                                (__mmask8)(U), (int)(C),\
-                                                (int)(R))
+  ((__m128d)__builtin_ia32_rangesd128_round_mask((__v2df)(__m128d)(A), \
+                                                 (__v2df)(__m128d)(B), \
+                                                 (__v2df)(__m128d)(W),\
+                                                 (__mmask8)(U), (int)(C),\
+                                                 (int)(R)))
 
 #define _mm_mask_range_sd(W, U, A, B, C) _mm_mask_range_round_sd(W, U, A, B, C ,_MM_FROUND_CUR_DIRECTION)
 
 #define _mm_maskz_range_round_sd(U, A, B, C, R) \
-  (__m128d)__builtin_ia32_rangesd128_round_mask((__v2df)(__m128d)(A), \
-                                                (__v2df)(__m128d)(B), \
-                                                (__v2df)_mm_setzero_pd(), \
-                                                (__mmask8)(U), (int)(C),\
-                                                (int)(R))
+  ((__m128d)__builtin_ia32_rangesd128_round_mask((__v2df)(__m128d)(A), \
+                                                 (__v2df)(__m128d)(B), \
+                                                 (__v2df)_mm_setzero_pd(), \
+                                                 (__mmask8)(U), (int)(C),\
+                                                 (int)(R)))
 
 #define _mm_maskz_range_sd(U, A, B, C) _mm_maskz_range_round_sd(U, A, B, C ,_MM_FROUND_CUR_DIRECTION)
 
 #define _mm512_reduce_pd(A, B) \
-  (__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
-                                           (__v8df)_mm512_setzero_pd(), \
-                                           (__mmask8)-1, \
-                                           _MM_FROUND_CUR_DIRECTION)
+  ((__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
+                                            (__v8df)_mm512_setzero_pd(), \
+                                            (__mmask8)-1, \
+                                            _MM_FROUND_CUR_DIRECTION))
 
 #define _mm512_mask_reduce_pd(W, U, A, B) \
-  (__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
-                                           (__v8df)(__m512d)(W), \
-                                           (__mmask8)(U), \
-                                           _MM_FROUND_CUR_DIRECTION)
+  ((__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
+                                            (__v8df)(__m512d)(W), \
+                                            (__mmask8)(U), \
+                                            _MM_FROUND_CUR_DIRECTION))
 
 #define _mm512_maskz_reduce_pd(U, A, B) \
-  (__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
-                                           (__v8df)_mm512_setzero_pd(), \
-                                           (__mmask8)(U), \
-                                           _MM_FROUND_CUR_DIRECTION)
+  ((__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
+                                            (__v8df)_mm512_setzero_pd(), \
+                                            (__mmask8)(U), \
+                                            _MM_FROUND_CUR_DIRECTION))
 
 #define _mm512_reduce_ps(A, B) \
-  (__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
-                                          (__v16sf)_mm512_setzero_ps(), \
-                                          (__mmask16)-1, \
-                                          _MM_FROUND_CUR_DIRECTION)
+  ((__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
+                                           (__v16sf)_mm512_setzero_ps(), \
+                                           (__mmask16)-1, \
+                                           _MM_FROUND_CUR_DIRECTION))
 
 #define _mm512_mask_reduce_ps(W, U, A, B) \
-  (__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
-                                          (__v16sf)(__m512)(W), \
-                                          (__mmask16)(U), \
-                                          _MM_FROUND_CUR_DIRECTION)
+  ((__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
+                                           (__v16sf)(__m512)(W), \
+                                           (__mmask16)(U), \
+                                           _MM_FROUND_CUR_DIRECTION))
 
 #define _mm512_maskz_reduce_ps(U, A, B) \
-  (__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
-                                          (__v16sf)_mm512_setzero_ps(), \
-                                          (__mmask16)(U), \
-                                          _MM_FROUND_CUR_DIRECTION)
+  ((__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
+                                           (__v16sf)_mm512_setzero_ps(), \
+                                           (__mmask16)(U), \
+                                           _MM_FROUND_CUR_DIRECTION))
 
 #define _mm512_reduce_round_pd(A, B, R) \
-  (__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
-                                           (__v8df)_mm512_setzero_pd(), \
-                                           (__mmask8)-1, (int)(R))
+  ((__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
+                                            (__v8df)_mm512_setzero_pd(), \
+                                            (__mmask8)-1, (int)(R)))
 
 #define _mm512_mask_reduce_round_pd(W, U, A, B, R) \
-  (__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
-                                           (__v8df)(__m512d)(W), \
-                                           (__mmask8)(U), (int)(R))
+  ((__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
+                                            (__v8df)(__m512d)(W), \
+                                            (__mmask8)(U), (int)(R)))
 
 #define _mm512_maskz_reduce_round_pd(U, A, B, R) \
-  (__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
-                                           (__v8df)_mm512_setzero_pd(), \
-                                           (__mmask8)(U), (int)(R))
+  ((__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
+                                            (__v8df)_mm512_setzero_pd(), \
+                                            (__mmask8)(U), (int)(R)))
 
 #define _mm512_reduce_round_ps(A, B, R) \
-  (__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
-                                          (__v16sf)_mm512_setzero_ps(), \
-                                          (__mmask16)-1, (int)(R))
+  ((__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
+                                           (__v16sf)_mm512_setzero_ps(), \
+                                           (__mmask16)-1, (int)(R)))
 
 #define _mm512_mask_reduce_round_ps(W, U, A, B, R) \
-  (__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
-                                          (__v16sf)(__m512)(W), \
-                                          (__mmask16)(U), (int)(R))
+  ((__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
+                                           (__v16sf)(__m512)(W), \
+                                           (__mmask16)(U), (int)(R)))
 
 #define _mm512_maskz_reduce_round_ps(U, A, B, R) \
-  (__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
-                                          (__v16sf)_mm512_setzero_ps(), \
-                                          (__mmask16)(U), (int)(R))
+  ((__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
+                                           (__v16sf)_mm512_setzero_ps(), \
+                                           (__mmask16)(U), (int)(R)))
 
 #define _mm_reduce_ss(A, B, C) \
-  (__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
-                                       (__v4sf)(__m128)(B), \
-                                       (__v4sf)_mm_setzero_ps(), (__mmask8)-1, \
-                                       (int)(C), _MM_FROUND_CUR_DIRECTION)
+  ((__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
+                                        (__v4sf)(__m128)(B), \
+                                        (__v4sf)_mm_setzero_ps(), (__mmask8)-1, \
+                                        (int)(C), _MM_FROUND_CUR_DIRECTION))
 
 #define _mm_mask_reduce_ss(W, U, A, B, C) \
-  (__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
-                                       (__v4sf)(__m128)(B), \
-                                       (__v4sf)(__m128)(W), (__mmask8)(U), \
-                                       (int)(C), _MM_FROUND_CUR_DIRECTION)
+  ((__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
+                                        (__v4sf)(__m128)(B), \
+                                        (__v4sf)(__m128)(W), (__mmask8)(U), \
+                                        (int)(C), _MM_FROUND_CUR_DIRECTION))
 
 #define _mm_maskz_reduce_ss(U, A, B, C) \
-  (__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
-                                       (__v4sf)(__m128)(B), \
-                                       (__v4sf)_mm_setzero_ps(), \
-                                       (__mmask8)(U), (int)(C), \
-                                       _MM_FROUND_CUR_DIRECTION)
+  ((__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
+                                        (__v4sf)(__m128)(B), \
+                                        (__v4sf)_mm_setzero_ps(), \
+                                        (__mmask8)(U), (int)(C), \
+                                        _MM_FROUND_CUR_DIRECTION))
 
 #define _mm_reduce_round_ss(A, B, C, R) \
-  (__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
-                                       (__v4sf)(__m128)(B), \
-                                       (__v4sf)_mm_setzero_ps(), (__mmask8)-1, \
-                                       (int)(C), (int)(R))
+  ((__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
+                                        (__v4sf)(__m128)(B), \
+                                        (__v4sf)_mm_setzero_ps(), (__mmask8)-1, \
+                                        (int)(C), (int)(R)))
 
 #define _mm_mask_reduce_round_ss(W, U, A, B, C, R) \
-  (__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
-                                       (__v4sf)(__m128)(B), \
-                                       (__v4sf)(__m128)(W), (__mmask8)(U), \
-                                       (int)(C), (int)(R))
+  ((__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
+                                        (__v4sf)(__m128)(B), \
+                                        (__v4sf)(__m128)(W), (__mmask8)(U), \
+                                        (int)(C), (int)(R)))
 
 #define _mm_maskz_reduce_round_ss(U, A, B, C, R) \
-  (__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
-                                       (__v4sf)(__m128)(B), \
-                                       (__v4sf)_mm_setzero_ps(), \
-                                       (__mmask8)(U), (int)(C), (int)(R))
+  ((__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
+                                        (__v4sf)(__m128)(B), \
+                                        (__v4sf)_mm_setzero_ps(), \
+                                        (__mmask8)(U), (int)(C), (int)(R)))
 
 #define _mm_reduce_sd(A, B, C) \
-  (__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
-                                        (__v2df)(__m128d)(B), \
-                                        (__v2df)_mm_setzero_pd(), \
-                                        (__mmask8)-1, (int)(C), \
-                                        _MM_FROUND_CUR_DIRECTION)
+  ((__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
+                                         (__v2df)(__m128d)(B), \
+                                         (__v2df)_mm_setzero_pd(), \
+                                         (__mmask8)-1, (int)(C), \
+                                         _MM_FROUND_CUR_DIRECTION))
 
 #define _mm_mask_reduce_sd(W, U, A, B, C) \
-  (__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
-                                        (__v2df)(__m128d)(B), \
-                                        (__v2df)(__m128d)(W), (__mmask8)(U), \
-                                        (int)(C), _MM_FROUND_CUR_DIRECTION)
+  ((__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
+                                         (__v2df)(__m128d)(B), \
+                                         (__v2df)(__m128d)(W), (__mmask8)(U), \
+                                         (int)(C), _MM_FROUND_CUR_DIRECTION))
 
 #define _mm_maskz_reduce_sd(U, A, B, C) \
-  (__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
-                                        (__v2df)(__m128d)(B), \
-                                        (__v2df)_mm_setzero_pd(), \
-                                        (__mmask8)(U), (int)(C), \
-                                        _MM_FROUND_CUR_DIRECTION)
+  ((__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
+                                         (__v2df)(__m128d)(B), \
+                                         (__v2df)_mm_setzero_pd(), \
+                                         (__mmask8)(U), (int)(C), \
+                                         _MM_FROUND_CUR_DIRECTION))
 
 #define _mm_reduce_round_sd(A, B, C, R) \
-  (__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
-                                        (__v2df)(__m128d)(B), \
-                                        (__v2df)_mm_setzero_pd(), \
-                                        (__mmask8)-1, (int)(C), (int)(R))
+  ((__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
+                                         (__v2df)(__m128d)(B), \
+                                         (__v2df)_mm_setzero_pd(), \
+                                         (__mmask8)-1, (int)(C), (int)(R)))
 
 #define _mm_mask_reduce_round_sd(W, U, A, B, C, R) \
-  (__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
-                                        (__v2df)(__m128d)(B), \
-                                        (__v2df)(__m128d)(W), (__mmask8)(U), \
-                                        (int)(C), (int)(R))
+  ((__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
+                                         (__v2df)(__m128d)(B), \
+                                         (__v2df)(__m128d)(W), (__mmask8)(U), \
+                                         (int)(C), (int)(R)))
 
 #define _mm_maskz_reduce_round_sd(U, A, B, C, R) \
-  (__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
-                                        (__v2df)(__m128d)(B), \
-                                        (__v2df)_mm_setzero_pd(), \
-                                        (__mmask8)(U), (int)(C), (int)(R))
+  ((__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
+                                         (__v2df)(__m128d)(B), \
+                                         (__v2df)_mm_setzero_pd(), \
+                                         (__mmask8)(U), (int)(C), (int)(R)))
 
 static __inline__ __mmask16 __DEFAULT_FN_ATTRS512
 _mm512_movepi32_mask (__m512i __A)
@@ -1218,158 +1218,158 @@ _mm512_maskz_broadcast_i64x2(__mmask8 __M, __m128i __A)
 }
 
 #define _mm512_extractf32x8_ps(A, imm) \
-  (__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \
-                                           (__v8sf)_mm256_undefined_ps(), \
-                                           (__mmask8)-1)
+  ((__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \
+                                            (__v8sf)_mm256_undefined_ps(), \
+                                            (__mmask8)-1))
 
 #define _mm512_mask_extractf32x8_ps(W, U, A, imm) \
-  (__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \
-                                           (__v8sf)(__m256)(W), \
-                                           (__mmask8)(U))
+  ((__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \
+                                            (__v8sf)(__m256)(W), \
+                                            (__mmask8)(U)))
 
 #define _mm512_maskz_extractf32x8_ps(U, A, imm) \
-  (__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \
-                                           (__v8sf)_mm256_setzero_ps(), \
-                                           (__mmask8)(U))
+  ((__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \
+                                            (__v8sf)_mm256_setzero_ps(), \
+                                            (__mmask8)(U)))
 
 #define _mm512_extractf64x2_pd(A, imm) \
-  (__m128d)__builtin_ia32_extractf64x2_512_mask((__v8df)(__m512d)(A), \
-                                                (int)(imm), \
-                                                (__v2df)_mm_undefined_pd(), \
-                                                (__mmask8)-1)
+  ((__m128d)__builtin_ia32_extractf64x2_512_mask((__v8df)(__m512d)(A), \
+                                                 (int)(imm), \
+                                                 (__v2df)_mm_undefined_pd(), \
+                                                 (__mmask8)-1))
 
 #define _mm512_mask_extractf64x2_pd(W, U, A, imm) \
-  (__m128d)__builtin_ia32_extractf64x2_512_mask((__v8df)(__m512d)(A), \
-                                                (int)(imm), \
-                                                (__v2df)(__m128d)(W), \
-                                                (__mmask8)(U))
+  ((__m128d)__builtin_ia32_extractf64x2_512_mask((__v8df)(__m512d)(A), \
+                                                 (int)(imm), \
+                                                 (__v2df)(__m128d)(W), \
+                                                 (__mmask8)(U)))
 
 #define _mm512_maskz_extractf64x2_pd(U, A, imm) \
-  (__m128d)__builtin_ia32_extractf64x2_512_mask((__v8df)(__m512d)(A), \
-                                                (int)(imm), \
-                                                (__v2df)_mm_setzero_pd(), \
-                                                (__mmask8)(U))
+  ((__m128d)__builtin_ia32_extractf64x2_512_mask((__v8df)(__m512d)(A), \
+                                                 (int)(imm), \
+                                                 (__v2df)_mm_setzero_pd(), \
+                                                 (__mmask8)(U)))
 
 #define _mm512_extracti32x8_epi32(A, imm) \
-  (__m256i)__builtin_ia32_extracti32x8_mask((__v16si)(__m512i)(A), (int)(imm), \
-                                            (__v8si)_mm256_undefined_si256(), \
-                                            (__mmask8)-1)
+  ((__m256i)__builtin_ia32_extracti32x8_mask((__v16si)(__m512i)(A), (int)(imm), \
+                                             (__v8si)_mm256_undefined_si256(), \
+                                             (__mmask8)-1))
 
 #define _mm512_mask_extracti32x8_epi32(W, U, A, imm) \
-  (__m256i)__builtin_ia32_extracti32x8_mask((__v16si)(__m512i)(A), (int)(imm), \
-                                            (__v8si)(__m256i)(W), \
-                                            (__mmask8)(U))
+  ((__m256i)__builtin_ia32_extracti32x8_mask((__v16si)(__m512i)(A), (int)(imm), \
+                                             (__v8si)(__m256i)(W), \
+                                             (__mmask8)(U)))
 
 #define _mm512_maskz_extracti32x8_epi32(U, A, imm) \
-  (__m256i)__builtin_ia32_extracti32x8_mask((__v16si)(__m512i)(A), (int)(imm), \
-                                            (__v8si)_mm256_setzero_si256(), \
-                                            (__mmask8)(U))
+  ((__m256i)__builtin_ia32_extracti32x8_mask((__v16si)(__m512i)(A), (int)(imm), \
+                                             (__v8si)_mm256_setzero_si256(), \
+                                             (__mmask8)(U)))
 
 #define _mm512_extracti64x2_epi64(A, imm) \
-  (__m128i)__builtin_ia32_extracti64x2_512_mask((__v8di)(__m512i)(A), \
+  ((__m128i)__builtin_ia32_extracti64x2_512_mask((__v8di)(__m512i)(A), \
                                                 (int)(imm), \
                                                 (__v2di)_mm_undefined_si128(), \
-                                                (__mmask8)-1)
+                                                (__mmask8)-1))
 
 #define _mm512_mask_extracti64x2_epi64(W, U, A, imm) \
-  (__m128i)__builtin_ia32_extracti64x2_512_mask((__v8di)(__m512i)(A), \
-                                                (int)(imm), \
-                                                (__v2di)(__m128i)(W), \
-                                                (__mmask8)(U))
+  ((__m128i)__builtin_ia32_extracti64x2_512_mask((__v8di)(__m512i)(A), \
+                                                 (int)(imm), \
+                                                 (__v2di)(__m128i)(W), \
+                                                 (__mmask8)(U)))
 
 #define _mm512_maskz_extracti64x2_epi64(U, A, imm) \
-  (__m128i)__builtin_ia32_extracti64x2_512_mask((__v8di)(__m512i)(A), \
-                                                (int)(imm), \
-                                                (__v2di)_mm_setzero_si128(), \
-                                                (__mmask8)(U))
+  ((__m128i)__builtin_ia32_extracti64x2_512_mask((__v8di)(__m512i)(A), \
+                                                 (int)(imm), \
+                                                 (__v2di)_mm_setzero_si128(), \
+                                                 (__mmask8)(U)))
 
 #define _mm512_insertf32x8(A, B, imm) \
-  (__m512)__builtin_ia32_insertf32x8((__v16sf)(__m512)(A), \
-                                     (__v8sf)(__m256)(B), (int)(imm))
+  ((__m512)__builtin_ia32_insertf32x8((__v16sf)(__m512)(A), \
+                                      (__v8sf)(__m256)(B), (int)(imm)))
 
 #define _mm512_mask_insertf32x8(W, U, A, B, imm) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
                                  (__v16sf)_mm512_insertf32x8((A), (B), (imm)), \
-                                 (__v16sf)(__m512)(W))
+                                 (__v16sf)(__m512)(W)))
 
 #define _mm512_maskz_insertf32x8(U, A, B, imm) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
                                  (__v16sf)_mm512_insertf32x8((A), (B), (imm)), \
-                                 (__v16sf)_mm512_setzero_ps())
+                                 (__v16sf)_mm512_setzero_ps()))
 
 #define _mm512_insertf64x2(A, B, imm) \
-  (__m512d)__builtin_ia32_insertf64x2_512((__v8df)(__m512d)(A), \
-                                          (__v2df)(__m128d)(B), (int)(imm))
+  ((__m512d)__builtin_ia32_insertf64x2_512((__v8df)(__m512d)(A), \
+                                           (__v2df)(__m128d)(B), (int)(imm)))
 
 #define _mm512_mask_insertf64x2(W, U, A, B, imm) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
                                   (__v8df)_mm512_insertf64x2((A), (B), (imm)), \
-                                  (__v8df)(__m512d)(W))
+                                  (__v8df)(__m512d)(W)))
 
 #define _mm512_maskz_insertf64x2(U, A, B, imm) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
                                   (__v8df)_mm512_insertf64x2((A), (B), (imm)), \
-                                  (__v8df)_mm512_setzero_pd())
+                                  (__v8df)_mm512_setzero_pd()))
 
 #define _mm512_inserti32x8(A, B, imm) \
-  (__m512i)__builtin_ia32_inserti32x8((__v16si)(__m512i)(A), \
-                                      (__v8si)(__m256i)(B), (int)(imm))
+  ((__m512i)__builtin_ia32_inserti32x8((__v16si)(__m512i)(A), \
+                                       (__v8si)(__m256i)(B), (int)(imm)))
 
 #define _mm512_mask_inserti32x8(W, U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
                                  (__v16si)_mm512_inserti32x8((A), (B), (imm)), \
-                                 (__v16si)(__m512i)(W))
+                                 (__v16si)(__m512i)(W)))
 
 #define _mm512_maskz_inserti32x8(U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
                                  (__v16si)_mm512_inserti32x8((A), (B), (imm)), \
-                                 (__v16si)_mm512_setzero_si512())
+                                 (__v16si)_mm512_setzero_si512()))
 
 #define _mm512_inserti64x2(A, B, imm) \
-  (__m512i)__builtin_ia32_inserti64x2_512((__v8di)(__m512i)(A), \
-                                          (__v2di)(__m128i)(B), (int)(imm))
+  ((__m512i)__builtin_ia32_inserti64x2_512((__v8di)(__m512i)(A), \
+                                           (__v2di)(__m128i)(B), (int)(imm)))
 
 #define _mm512_mask_inserti64x2(W, U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
                                   (__v8di)_mm512_inserti64x2((A), (B), (imm)), \
-                                  (__v8di)(__m512i)(W))
+                                  (__v8di)(__m512i)(W)))
 
 #define _mm512_maskz_inserti64x2(U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
                                   (__v8di)_mm512_inserti64x2((A), (B), (imm)), \
-                                  (__v8di)_mm512_setzero_si512())
+                                  (__v8di)_mm512_setzero_si512()))
 
 #define _mm512_mask_fpclass_ps_mask(U, A, imm) \
-  (__mmask16)__builtin_ia32_fpclassps512_mask((__v16sf)(__m512)(A), \
-                                              (int)(imm), (__mmask16)(U))
+  ((__mmask16)__builtin_ia32_fpclassps512_mask((__v16sf)(__m512)(A), \
+                                               (int)(imm), (__mmask16)(U)))
 
 #define _mm512_fpclass_ps_mask(A, imm) \
-  (__mmask16)__builtin_ia32_fpclassps512_mask((__v16sf)(__m512)(A), \
-                                              (int)(imm), (__mmask16)-1)
+  ((__mmask16)__builtin_ia32_fpclassps512_mask((__v16sf)(__m512)(A), \
+                                               (int)(imm), (__mmask16)-1))
 
 #define _mm512_mask_fpclass_pd_mask(U, A, imm) \
-  (__mmask8)__builtin_ia32_fpclasspd512_mask((__v8df)(__m512d)(A), (int)(imm), \
-                                             (__mmask8)(U))
+  ((__mmask8)__builtin_ia32_fpclasspd512_mask((__v8df)(__m512d)(A), (int)(imm), \
+                                              (__mmask8)(U)))
 
 #define _mm512_fpclass_pd_mask(A, imm) \
-  (__mmask8)__builtin_ia32_fpclasspd512_mask((__v8df)(__m512d)(A), (int)(imm), \
-                                             (__mmask8)-1)
+  ((__mmask8)__builtin_ia32_fpclasspd512_mask((__v8df)(__m512d)(A), (int)(imm), \
+                                              (__mmask8)-1))
 
 #define _mm_fpclass_sd_mask(A, imm) \
-  (__mmask8)__builtin_ia32_fpclasssd_mask((__v2df)(__m128d)(A), (int)(imm), \
-                                          (__mmask8)-1)
+  ((__mmask8)__builtin_ia32_fpclasssd_mask((__v2df)(__m128d)(A), (int)(imm), \
+                                           (__mmask8)-1))
 
 #define _mm_mask_fpclass_sd_mask(U, A, imm) \
-  (__mmask8)__builtin_ia32_fpclasssd_mask((__v2df)(__m128d)(A), (int)(imm), \
-                                          (__mmask8)(U))
+  ((__mmask8)__builtin_ia32_fpclasssd_mask((__v2df)(__m128d)(A), (int)(imm), \
+                                           (__mmask8)(U)))
 
 #define _mm_fpclass_ss_mask(A, imm) \
-  (__mmask8)__builtin_ia32_fpclassss_mask((__v4sf)(__m128)(A), (int)(imm), \
-                                          (__mmask8)-1)
+  ((__mmask8)__builtin_ia32_fpclassss_mask((__v4sf)(__m128)(A), (int)(imm), \
+                                           (__mmask8)-1))
 
 #define _mm_mask_fpclass_ss_mask(U, A, imm) \
-  (__mmask8)__builtin_ia32_fpclassss_mask((__v4sf)(__m128)(A), (int)(imm), \
-                                          (__mmask8)(U))
+  ((__mmask8)__builtin_ia32_fpclassss_mask((__v4sf)(__m128)(A), (int)(imm), \
+                                           (__mmask8)(U)))
 
 #undef __DEFAULT_FN_ATTRS512
 #undef __DEFAULT_FN_ATTRS

diff  --git a/clang/lib/Headers/avx512erintrin.h b/clang/lib/Headers/avx512erintrin.h
index 857006169906..1c5a2d2d208f 100644
--- a/clang/lib/Headers/avx512erintrin.h
+++ b/clang/lib/Headers/avx512erintrin.h
@@ -15,19 +15,19 @@
 
 /* exp2a23 */
 #define _mm512_exp2a23_round_pd(A, R) \
-  (__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \
-                                      (__v8df)_mm512_setzero_pd(), \
-                                      (__mmask8)-1, (int)(R))
+  ((__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \
+                                       (__v8df)_mm512_setzero_pd(), \
+                                       (__mmask8)-1, (int)(R)))
 
 #define _mm512_mask_exp2a23_round_pd(S, M, A, R) \
-  (__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \
-                                      (__v8df)(__m512d)(S), (__mmask8)(M), \
-                                      (int)(R))
+  ((__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \
+                                       (__v8df)(__m512d)(S), (__mmask8)(M), \
+                                       (int)(R)))
 
 #define _mm512_maskz_exp2a23_round_pd(M, A, R) \
-  (__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \
-                                      (__v8df)_mm512_setzero_pd(), \
-                                      (__mmask8)(M), (int)(R))
+  ((__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \
+                                       (__v8df)_mm512_setzero_pd(), \
+                                       (__mmask8)(M), (int)(R)))
 
 #define _mm512_exp2a23_pd(A) \
   _mm512_exp2a23_round_pd((A), _MM_FROUND_CUR_DIRECTION)
@@ -39,19 +39,19 @@
   _mm512_maskz_exp2a23_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION)
 
 #define _mm512_exp2a23_round_ps(A, R) \
-  (__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \
-                                     (__v16sf)_mm512_setzero_ps(), \
-                                     (__mmask16)-1, (int)(R))
+  ((__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \
+                                      (__v16sf)_mm512_setzero_ps(), \
+                                      (__mmask16)-1, (int)(R)))
 
 #define _mm512_mask_exp2a23_round_ps(S, M, A, R) \
-  (__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \
-                                     (__v16sf)(__m512)(S), (__mmask16)(M), \
-                                     (int)(R))
+  ((__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \
+                                      (__v16sf)(__m512)(S), (__mmask16)(M), \
+                                      (int)(R)))
 
 #define _mm512_maskz_exp2a23_round_ps(M, A, R) \
-  (__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \
-                                     (__v16sf)_mm512_setzero_ps(), \
-                                     (__mmask16)(M), (int)(R))
+  ((__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \
+                                      (__v16sf)_mm512_setzero_ps(), \
+                                      (__mmask16)(M), (int)(R)))
 
 #define _mm512_exp2a23_ps(A) \
   _mm512_exp2a23_round_ps((A), _MM_FROUND_CUR_DIRECTION)
@@ -64,19 +64,19 @@
 
 /* rsqrt28 */
 #define _mm512_rsqrt28_round_pd(A, R) \
-  (__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \
-                                         (__v8df)_mm512_setzero_pd(), \
-                                         (__mmask8)-1, (int)(R))
+  ((__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \
+                                          (__v8df)_mm512_setzero_pd(), \
+                                          (__mmask8)-1, (int)(R)))
 
 #define _mm512_mask_rsqrt28_round_pd(S, M, A, R) \
-  (__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \
-                                         (__v8df)(__m512d)(S), (__mmask8)(M), \
-                                         (int)(R))
+  ((__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \
+                                          (__v8df)(__m512d)(S), (__mmask8)(M), \
+                                          (int)(R)))
 
 #define _mm512_maskz_rsqrt28_round_pd(M, A, R) \
-  (__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \
-                                         (__v8df)_mm512_setzero_pd(), \
-                                         (__mmask8)(M), (int)(R))
+  ((__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \
+                                          (__v8df)_mm512_setzero_pd(), \
+                                          (__mmask8)(M), (int)(R)))
 
 #define _mm512_rsqrt28_pd(A) \
   _mm512_rsqrt28_round_pd((A), _MM_FROUND_CUR_DIRECTION)
@@ -88,19 +88,19 @@
   _mm512_maskz_rsqrt28_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION)
 
 #define _mm512_rsqrt28_round_ps(A, R) \
-  (__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \
-                                        (__v16sf)_mm512_setzero_ps(), \
-                                        (__mmask16)-1, (int)(R))
+  ((__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \
+                                         (__v16sf)_mm512_setzero_ps(), \
+                                         (__mmask16)-1, (int)(R)))
 
 #define _mm512_mask_rsqrt28_round_ps(S, M, A, R) \
-  (__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \
-                                        (__v16sf)(__m512)(S), (__mmask16)(M), \
-                                        (int)(R))
+  ((__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \
+                                         (__v16sf)(__m512)(S), (__mmask16)(M), \
+                                         (int)(R)))
 
 #define _mm512_maskz_rsqrt28_round_ps(M, A, R) \
-  (__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \
-                                        (__v16sf)_mm512_setzero_ps(), \
-                                        (__mmask16)(M), (int)(R))
+  ((__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \
+                                         (__v16sf)_mm512_setzero_ps(), \
+                                         (__mmask16)(M), (int)(R)))
 
 #define _mm512_rsqrt28_ps(A) \
   _mm512_rsqrt28_round_ps((A), _MM_FROUND_CUR_DIRECTION)
@@ -112,22 +112,22 @@
   _mm512_maskz_rsqrt28_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION)
 
 #define _mm_rsqrt28_round_ss(A, B, R) \
-  (__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \
-                                              (__v4sf)(__m128)(B), \
-                                              (__v4sf)_mm_setzero_ps(), \
-                                              (__mmask8)-1, (int)(R))
+  ((__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \
+                                               (__v4sf)(__m128)(B), \
+                                               (__v4sf)_mm_setzero_ps(), \
+                                               (__mmask8)-1, (int)(R)))
 
 #define _mm_mask_rsqrt28_round_ss(S, M, A, B, R) \
-  (__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \
-                                              (__v4sf)(__m128)(B), \
-                                              (__v4sf)(__m128)(S), \
-                                              (__mmask8)(M), (int)(R))
+  ((__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \
+                                               (__v4sf)(__m128)(B), \
+                                               (__v4sf)(__m128)(S), \
+                                               (__mmask8)(M), (int)(R)))
 
 #define _mm_maskz_rsqrt28_round_ss(M, A, B, R) \
-  (__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \
-                                              (__v4sf)(__m128)(B), \
-                                              (__v4sf)_mm_setzero_ps(), \
-                                              (__mmask8)(M), (int)(R))
+  ((__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \
+                                               (__v4sf)(__m128)(B), \
+                                               (__v4sf)_mm_setzero_ps(), \
+                                               (__mmask8)(M), (int)(R)))
 
 #define _mm_rsqrt28_ss(A, B) \
   _mm_rsqrt28_round_ss((A), (B), _MM_FROUND_CUR_DIRECTION)
@@ -139,22 +139,22 @@
   _mm_maskz_rsqrt28_round_ss((M), (A), (B), _MM_FROUND_CUR_DIRECTION)
 
 #define _mm_rsqrt28_round_sd(A, B, R) \
-  (__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \
-                                               (__v2df)(__m128d)(B), \
-                                               (__v2df)_mm_setzero_pd(), \
-                                               (__mmask8)-1, (int)(R))
+  ((__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \
+                                                (__v2df)(__m128d)(B), \
+                                                (__v2df)_mm_setzero_pd(), \
+                                                (__mmask8)-1, (int)(R)))
 
 #define _mm_mask_rsqrt28_round_sd(S, M, A, B, R) \
-  (__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \
-                                               (__v2df)(__m128d)(B), \
-                                               (__v2df)(__m128d)(S), \
-                                               (__mmask8)(M), (int)(R))
+  ((__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \
+                                                (__v2df)(__m128d)(B), \
+                                                (__v2df)(__m128d)(S), \
+                                                (__mmask8)(M), (int)(R)))
 
 #define _mm_maskz_rsqrt28_round_sd(M, A, B, R) \
-  (__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \
-                                               (__v2df)(__m128d)(B), \
-                                               (__v2df)_mm_setzero_pd(), \
-                                               (__mmask8)(M), (int)(R))
+  ((__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \
+                                                (__v2df)(__m128d)(B), \
+                                                (__v2df)_mm_setzero_pd(), \
+                                                (__mmask8)(M), (int)(R)))
 
 #define _mm_rsqrt28_sd(A, B) \
   _mm_rsqrt28_round_sd((A), (B), _MM_FROUND_CUR_DIRECTION)
@@ -167,19 +167,19 @@
 
 /* rcp28 */
 #define _mm512_rcp28_round_pd(A, R) \
-  (__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \
-                                       (__v8df)_mm512_setzero_pd(), \
-                                       (__mmask8)-1, (int)(R))
+  ((__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \
+                                        (__v8df)_mm512_setzero_pd(), \
+                                        (__mmask8)-1, (int)(R)))
 
 #define _mm512_mask_rcp28_round_pd(S, M, A, R) \
-  (__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \
-                                       (__v8df)(__m512d)(S), (__mmask8)(M), \
-                                       (int)(R))
+  ((__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \
+                                        (__v8df)(__m512d)(S), (__mmask8)(M), \
+                                        (int)(R)))
 
 #define _mm512_maskz_rcp28_round_pd(M, A, R) \
-  (__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \
-                                       (__v8df)_mm512_setzero_pd(), \
-                                       (__mmask8)(M), (int)(R))
+  ((__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \
+                                        (__v8df)_mm512_setzero_pd(), \
+                                        (__mmask8)(M), (int)(R)))
 
 #define _mm512_rcp28_pd(A) \
   _mm512_rcp28_round_pd((A), _MM_FROUND_CUR_DIRECTION)
@@ -191,19 +191,19 @@
   _mm512_maskz_rcp28_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION)
 
 #define _mm512_rcp28_round_ps(A, R) \
-  (__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \
-                                      (__v16sf)_mm512_setzero_ps(), \
-                                      (__mmask16)-1, (int)(R))
+  ((__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \
+                                       (__v16sf)_mm512_setzero_ps(), \
+                                       (__mmask16)-1, (int)(R)))
 
 #define _mm512_mask_rcp28_round_ps(S, M, A, R) \
-  (__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \
-                                      (__v16sf)(__m512)(S), (__mmask16)(M), \
-                                      (int)(R))
+  ((__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \
+                                       (__v16sf)(__m512)(S), (__mmask16)(M), \
+                                       (int)(R)))
 
 #define _mm512_maskz_rcp28_round_ps(M, A, R) \
-  (__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \
-                                      (__v16sf)_mm512_setzero_ps(), \
-                                      (__mmask16)(M), (int)(R))
+  ((__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \
+                                       (__v16sf)_mm512_setzero_ps(), \
+                                       (__mmask16)(M), (int)(R)))
 
 #define _mm512_rcp28_ps(A) \
   _mm512_rcp28_round_ps((A), _MM_FROUND_CUR_DIRECTION)
@@ -215,22 +215,22 @@
   _mm512_maskz_rcp28_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION)
 
 #define _mm_rcp28_round_ss(A, B, R) \
-  (__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \
-                                            (__v4sf)(__m128)(B), \
-                                            (__v4sf)_mm_setzero_ps(), \
-                                            (__mmask8)-1, (int)(R))
+  ((__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \
+                                             (__v4sf)(__m128)(B), \
+                                             (__v4sf)_mm_setzero_ps(), \
+                                             (__mmask8)-1, (int)(R)))
 
 #define _mm_mask_rcp28_round_ss(S, M, A, B, R) \
-  (__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \
-                                            (__v4sf)(__m128)(B), \
-                                            (__v4sf)(__m128)(S), \
-                                            (__mmask8)(M), (int)(R))
+  ((__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \
+                                             (__v4sf)(__m128)(B), \
+                                             (__v4sf)(__m128)(S), \
+                                             (__mmask8)(M), (int)(R)))
 
 #define _mm_maskz_rcp28_round_ss(M, A, B, R) \
-  (__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \
-                                            (__v4sf)(__m128)(B), \
-                                            (__v4sf)_mm_setzero_ps(), \
-                                            (__mmask8)(M), (int)(R))
+  ((__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \
+                                             (__v4sf)(__m128)(B), \
+                                             (__v4sf)_mm_setzero_ps(), \
+                                             (__mmask8)(M), (int)(R)))
 
 #define _mm_rcp28_ss(A, B) \
   _mm_rcp28_round_ss((A), (B), _MM_FROUND_CUR_DIRECTION)
@@ -242,22 +242,22 @@
   _mm_maskz_rcp28_round_ss((M), (A), (B), _MM_FROUND_CUR_DIRECTION)
 
 #define _mm_rcp28_round_sd(A, B, R) \
-  (__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \
-                                             (__v2df)(__m128d)(B), \
-                                             (__v2df)_mm_setzero_pd(), \
-                                             (__mmask8)-1, (int)(R))
+  ((__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \
+                                              (__v2df)(__m128d)(B), \
+                                              (__v2df)_mm_setzero_pd(), \
+                                              (__mmask8)-1, (int)(R)))
 
 #define _mm_mask_rcp28_round_sd(S, M, A, B, R) \
-  (__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \
-                                             (__v2df)(__m128d)(B), \
-                                             (__v2df)(__m128d)(S), \
-                                             (__mmask8)(M), (int)(R))
+  ((__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \
+                                              (__v2df)(__m128d)(B), \
+                                              (__v2df)(__m128d)(S), \
+                                              (__mmask8)(M), (int)(R)))
 
 #define _mm_maskz_rcp28_round_sd(M, A, B, R) \
-  (__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \
-                                             (__v2df)(__m128d)(B), \
-                                             (__v2df)_mm_setzero_pd(), \
-                                             (__mmask8)(M), (int)(R))
+  ((__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \
+                                              (__v2df)(__m128d)(B), \
+                                              (__v2df)_mm_setzero_pd(), \
+                                              (__mmask8)(M), (int)(R)))
 
 #define _mm_rcp28_sd(A, B) \
   _mm_rcp28_round_sd((A), (B), _MM_FROUND_CUR_DIRECTION)

diff  --git a/clang/lib/Headers/avx512fintrin.h b/clang/lib/Headers/avx512fintrin.h
index 010bcadab019..df298640523b 100644
--- a/clang/lib/Headers/avx512fintrin.h
+++ b/clang/lib/Headers/avx512fintrin.h
@@ -937,18 +937,18 @@ _mm512_maskz_sub_epi32(__mmask16 __U, __m512i __A, __m512i __B)
 }
 
 #define _mm512_max_round_pd(A, B, R) \
-  (__m512d)__builtin_ia32_maxpd512((__v8df)(__m512d)(A), \
-                                   (__v8df)(__m512d)(B), (int)(R))
+  ((__m512d)__builtin_ia32_maxpd512((__v8df)(__m512d)(A), \
+                                    (__v8df)(__m512d)(B), (int)(R)))
 
 #define _mm512_mask_max_round_pd(W, U, A, B, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
                                    (__v8df)_mm512_max_round_pd((A), (B), (R)), \
-                                   (__v8df)(W))
+                                   (__v8df)(W)))
 
 #define _mm512_maskz_max_round_pd(U, A, B, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
                                    (__v8df)_mm512_max_round_pd((A), (B), (R)), \
-                                   (__v8df)_mm512_setzero_pd())
+                                   (__v8df)_mm512_setzero_pd()))
 
 static  __inline__ __m512d __DEFAULT_FN_ATTRS512
 _mm512_max_pd(__m512d __A, __m512d __B)
@@ -974,18 +974,18 @@ _mm512_maskz_max_pd (__mmask8 __U, __m512d __A, __m512d __B)
 }
 
 #define _mm512_max_round_ps(A, B, R) \
-  (__m512)__builtin_ia32_maxps512((__v16sf)(__m512)(A), \
-                                  (__v16sf)(__m512)(B), (int)(R))
+  ((__m512)__builtin_ia32_maxps512((__v16sf)(__m512)(A), \
+                                   (__v16sf)(__m512)(B), (int)(R)))
 
 #define _mm512_mask_max_round_ps(W, U, A, B, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
                                   (__v16sf)_mm512_max_round_ps((A), (B), (R)), \
-                                  (__v16sf)(W))
+                                  (__v16sf)(W)))
 
 #define _mm512_maskz_max_round_ps(U, A, B, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
                                   (__v16sf)_mm512_max_round_ps((A), (B), (R)), \
-                                  (__v16sf)_mm512_setzero_ps())
+                                  (__v16sf)_mm512_setzero_ps()))
 
 static  __inline__ __m512 __DEFAULT_FN_ATTRS512
 _mm512_max_ps(__m512 __A, __m512 __B)
@@ -1029,22 +1029,22 @@ _mm_maskz_max_ss(__mmask8 __U,__m128 __A, __m128 __B) {
 }
 
 #define _mm_max_round_ss(A, B, R) \
-  (__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)_mm_setzero_ps(), \
-                                          (__mmask8)-1, (int)(R))
+  ((__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
+                                           (__v4sf)(__m128)(B), \
+                                           (__v4sf)_mm_setzero_ps(), \
+                                           (__mmask8)-1, (int)(R)))
 
 #define _mm_mask_max_round_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)(__m128)(W), (__mmask8)(U), \
-                                          (int)(R))
+  ((__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
+                                           (__v4sf)(__m128)(B), \
+                                           (__v4sf)(__m128)(W), (__mmask8)(U), \
+                                           (int)(R)))
 
 #define _mm_maskz_max_round_ss(U, A, B, R) \
-  (__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)_mm_setzero_ps(), \
-                                          (__mmask8)(U), (int)(R))
+  ((__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
+                                           (__v4sf)(__m128)(B), \
+                                           (__v4sf)_mm_setzero_ps(), \
+                                           (__mmask8)(U), (int)(R)))
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_mask_max_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
@@ -1065,22 +1065,22 @@ _mm_maskz_max_sd(__mmask8 __U,__m128d __A, __m128d __B) {
 }
 
 #define _mm_max_round_sd(A, B, R) \
-  (__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)_mm_setzero_pd(), \
-                                           (__mmask8)-1, (int)(R))
+  ((__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
+                                            (__v2df)(__m128d)(B), \
+                                            (__v2df)_mm_setzero_pd(), \
+                                            (__mmask8)-1, (int)(R)))
 
 #define _mm_mask_max_round_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)(__m128d)(W), \
-                                           (__mmask8)(U), (int)(R))
+  ((__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
+                                            (__v2df)(__m128d)(B), \
+                                            (__v2df)(__m128d)(W), \
+                                            (__mmask8)(U), (int)(R)))
 
 #define _mm_maskz_max_round_sd(U, A, B, R) \
-  (__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)_mm_setzero_pd(), \
-                                           (__mmask8)(U), (int)(R))
+  ((__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
+                                            (__v2df)(__m128d)(B), \
+                                            (__v2df)_mm_setzero_pd(), \
+                                            (__mmask8)(U), (int)(R)))
 
 static __inline __m512i
 __DEFAULT_FN_ATTRS512
@@ -1172,18 +1172,18 @@ _mm512_maskz_max_epu64 (__mmask8 __M, __m512i __A, __m512i __B)
 }
 
 #define _mm512_min_round_pd(A, B, R) \
-  (__m512d)__builtin_ia32_minpd512((__v8df)(__m512d)(A), \
-                                   (__v8df)(__m512d)(B), (int)(R))
+  ((__m512d)__builtin_ia32_minpd512((__v8df)(__m512d)(A), \
+                                    (__v8df)(__m512d)(B), (int)(R)))
 
 #define _mm512_mask_min_round_pd(W, U, A, B, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
                                    (__v8df)_mm512_min_round_pd((A), (B), (R)), \
-                                   (__v8df)(W))
+                                   (__v8df)(W)))
 
 #define _mm512_maskz_min_round_pd(U, A, B, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
                                    (__v8df)_mm512_min_round_pd((A), (B), (R)), \
-                                   (__v8df)_mm512_setzero_pd())
+                                   (__v8df)_mm512_setzero_pd()))
 
 static  __inline__ __m512d __DEFAULT_FN_ATTRS512
 _mm512_min_pd(__m512d __A, __m512d __B)
@@ -1209,18 +1209,18 @@ _mm512_maskz_min_pd (__mmask8 __U, __m512d __A, __m512d __B)
 }
 
 #define _mm512_min_round_ps(A, B, R) \
-  (__m512)__builtin_ia32_minps512((__v16sf)(__m512)(A), \
-                                  (__v16sf)(__m512)(B), (int)(R))
+  ((__m512)__builtin_ia32_minps512((__v16sf)(__m512)(A), \
+                                   (__v16sf)(__m512)(B), (int)(R)))
 
 #define _mm512_mask_min_round_ps(W, U, A, B, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
                                   (__v16sf)_mm512_min_round_ps((A), (B), (R)), \
-                                  (__v16sf)(W))
+                                  (__v16sf)(W)))
 
 #define _mm512_maskz_min_round_ps(U, A, B, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
                                   (__v16sf)_mm512_min_round_ps((A), (B), (R)), \
-                                  (__v16sf)_mm512_setzero_ps())
+                                  (__v16sf)_mm512_setzero_ps()))
 
 static  __inline__ __m512 __DEFAULT_FN_ATTRS512
 _mm512_min_ps(__m512 __A, __m512 __B)
@@ -1264,22 +1264,22 @@ _mm_maskz_min_ss(__mmask8 __U,__m128 __A, __m128 __B) {
 }
 
 #define _mm_min_round_ss(A, B, R) \
-  (__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)_mm_setzero_ps(), \
-                                          (__mmask8)-1, (int)(R))
+  ((__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
+                                           (__v4sf)(__m128)(B), \
+                                           (__v4sf)_mm_setzero_ps(), \
+                                           (__mmask8)-1, (int)(R)))
 
 #define _mm_mask_min_round_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)(__m128)(W), (__mmask8)(U), \
-                                          (int)(R))
+  ((__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
+                                           (__v4sf)(__m128)(B), \
+                                           (__v4sf)(__m128)(W), (__mmask8)(U), \
+                                           (int)(R)))
 
 #define _mm_maskz_min_round_ss(U, A, B, R) \
-  (__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)_mm_setzero_ps(), \
-                                          (__mmask8)(U), (int)(R))
+  ((__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
+                                           (__v4sf)(__m128)(B), \
+                                           (__v4sf)_mm_setzero_ps(), \
+                                           (__mmask8)(U), (int)(R)))
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_mask_min_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
@@ -1300,22 +1300,22 @@ _mm_maskz_min_sd(__mmask8 __U,__m128d __A, __m128d __B) {
 }
 
 #define _mm_min_round_sd(A, B, R) \
-  (__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)_mm_setzero_pd(), \
-                                           (__mmask8)-1, (int)(R))
+  ((__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
+                                            (__v2df)(__m128d)(B), \
+                                            (__v2df)_mm_setzero_pd(), \
+                                            (__mmask8)-1, (int)(R)))
 
 #define _mm_mask_min_round_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)(__m128d)(W), \
-                                           (__mmask8)(U), (int)(R))
+  ((__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
+                                            (__v2df)(__m128d)(B), \
+                                            (__v2df)(__m128d)(W), \
+                                            (__mmask8)(U), (int)(R)))
 
 #define _mm_maskz_min_round_sd(U, A, B, R) \
-  (__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)_mm_setzero_pd(), \
-                                           (__mmask8)(U), (int)(R))
+  ((__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
+                                            (__v2df)(__m128d)(B), \
+                                            (__v2df)_mm_setzero_pd(), \
+                                            (__mmask8)(U), (int)(R)))
 
 static __inline __m512i
 __DEFAULT_FN_ATTRS512
@@ -1485,17 +1485,17 @@ _mm512_mask_mullox_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) {
 }
 
 #define _mm512_sqrt_round_pd(A, R) \
-  (__m512d)__builtin_ia32_sqrtpd512((__v8df)(__m512d)(A), (int)(R))
+  ((__m512d)__builtin_ia32_sqrtpd512((__v8df)(__m512d)(A), (int)(R)))
 
 #define _mm512_mask_sqrt_round_pd(W, U, A, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
                                        (__v8df)_mm512_sqrt_round_pd((A), (R)), \
-                                       (__v8df)(__m512d)(W))
+                                       (__v8df)(__m512d)(W)))
 
 #define _mm512_maskz_sqrt_round_pd(U, A, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
                                        (__v8df)_mm512_sqrt_round_pd((A), (R)), \
-                                       (__v8df)_mm512_setzero_pd())
+                                       (__v8df)_mm512_setzero_pd()))
 
 static  __inline__ __m512d __DEFAULT_FN_ATTRS512
 _mm512_sqrt_pd(__m512d __A)
@@ -1521,17 +1521,17 @@ _mm512_maskz_sqrt_pd (__mmask8 __U, __m512d __A)
 }
 
 #define _mm512_sqrt_round_ps(A, R) \
-  (__m512)__builtin_ia32_sqrtps512((__v16sf)(__m512)(A), (int)(R))
+  ((__m512)__builtin_ia32_sqrtps512((__v16sf)(__m512)(A), (int)(R)))
 
 #define _mm512_mask_sqrt_round_ps(W, U, A, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
                                       (__v16sf)_mm512_sqrt_round_ps((A), (R)), \
-                                      (__v16sf)(__m512)(W))
+                                      (__v16sf)(__m512)(W)))
 
 #define _mm512_maskz_sqrt_round_ps(U, A, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
                                       (__v16sf)_mm512_sqrt_round_ps((A), (R)), \
-                                      (__v16sf)_mm512_setzero_ps())
+                                      (__v16sf)_mm512_setzero_ps()))
 
 static  __inline__ __m512 __DEFAULT_FN_ATTRS512
 _mm512_sqrt_ps(__m512 __A)
@@ -1900,22 +1900,22 @@ _mm_maskz_add_ss(__mmask8 __U,__m128 __A, __m128 __B) {
 }
 
 #define _mm_add_round_ss(A, B, R) \
-  (__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)_mm_setzero_ps(), \
-                                          (__mmask8)-1, (int)(R))
+  ((__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
+                                           (__v4sf)(__m128)(B), \
+                                           (__v4sf)_mm_setzero_ps(), \
+                                           (__mmask8)-1, (int)(R)))
 
 #define _mm_mask_add_round_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)(__m128)(W), (__mmask8)(U), \
-                                          (int)(R))
+  ((__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
+                                           (__v4sf)(__m128)(B), \
+                                           (__v4sf)(__m128)(W), (__mmask8)(U), \
+                                           (int)(R)))
 
 #define _mm_maskz_add_round_ss(U, A, B, R) \
-  (__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)_mm_setzero_ps(), \
-                                          (__mmask8)(U), (int)(R))
+  ((__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
+                                           (__v4sf)(__m128)(B), \
+                                           (__v4sf)_mm_setzero_ps(), \
+                                           (__mmask8)(U), (int)(R)))
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_mask_add_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
@@ -1929,22 +1929,22 @@ _mm_maskz_add_sd(__mmask8 __U,__m128d __A, __m128d __B) {
   return __builtin_ia32_selectsd_128(__U, __A, _mm_setzero_pd());
 }
 #define _mm_add_round_sd(A, B, R) \
-  (__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)_mm_setzero_pd(), \
-                                           (__mmask8)-1, (int)(R))
+  ((__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
+                                            (__v2df)(__m128d)(B), \
+                                            (__v2df)_mm_setzero_pd(), \
+                                            (__mmask8)-1, (int)(R)))
 
 #define _mm_mask_add_round_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)(__m128d)(W), \
-                                           (__mmask8)(U), (int)(R))
+  ((__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
+                                            (__v2df)(__m128d)(B), \
+                                            (__v2df)(__m128d)(W), \
+                                            (__mmask8)(U), (int)(R)))
 
 #define _mm_maskz_add_round_sd(U, A, B, R) \
-  (__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)_mm_setzero_pd(), \
-                                           (__mmask8)(U), (int)(R))
+  ((__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
+                                            (__v2df)(__m128d)(B), \
+                                            (__v2df)_mm_setzero_pd(), \
+                                            (__mmask8)(U), (int)(R)))
 
 static __inline__ __m512d __DEFAULT_FN_ATTRS512
 _mm512_mask_add_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
@@ -1975,32 +1975,32 @@ _mm512_maskz_add_ps(__mmask16 __U, __m512 __A, __m512 __B) {
 }
 
 #define _mm512_add_round_pd(A, B, R) \
-  (__m512d)__builtin_ia32_addpd512((__v8df)(__m512d)(A), \
-                                   (__v8df)(__m512d)(B), (int)(R))
+  ((__m512d)__builtin_ia32_addpd512((__v8df)(__m512d)(A), \
+                                    (__v8df)(__m512d)(B), (int)(R)))
 
 #define _mm512_mask_add_round_pd(W, U, A, B, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
                                    (__v8df)_mm512_add_round_pd((A), (B), (R)), \
-                                   (__v8df)(__m512d)(W))
+                                   (__v8df)(__m512d)(W)))
 
 #define _mm512_maskz_add_round_pd(U, A, B, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
                                    (__v8df)_mm512_add_round_pd((A), (B), (R)), \
-                                   (__v8df)_mm512_setzero_pd())
+                                   (__v8df)_mm512_setzero_pd()))
 
 #define _mm512_add_round_ps(A, B, R) \
-  (__m512)__builtin_ia32_addps512((__v16sf)(__m512)(A), \
-                                  (__v16sf)(__m512)(B), (int)(R))
+  ((__m512)__builtin_ia32_addps512((__v16sf)(__m512)(A), \
+                                   (__v16sf)(__m512)(B), (int)(R)))
 
 #define _mm512_mask_add_round_ps(W, U, A, B, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
                                   (__v16sf)_mm512_add_round_ps((A), (B), (R)), \
-                                  (__v16sf)(__m512)(W))
+                                  (__v16sf)(__m512)(W)))
 
 #define _mm512_maskz_add_round_ps(U, A, B, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
                                   (__v16sf)_mm512_add_round_ps((A), (B), (R)), \
-                                  (__v16sf)_mm512_setzero_ps())
+                                  (__v16sf)_mm512_setzero_ps()))
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_mask_sub_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
@@ -2014,22 +2014,22 @@ _mm_maskz_sub_ss(__mmask8 __U,__m128 __A, __m128 __B) {
   return __builtin_ia32_selectss_128(__U, __A, _mm_setzero_ps());
 }
 #define _mm_sub_round_ss(A, B, R) \
-  (__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)_mm_setzero_ps(), \
-                                          (__mmask8)-1, (int)(R))
+  ((__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
+                                           (__v4sf)(__m128)(B), \
+                                           (__v4sf)_mm_setzero_ps(), \
+                                           (__mmask8)-1, (int)(R)))
 
 #define _mm_mask_sub_round_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)(__m128)(W), (__mmask8)(U), \
-                                          (int)(R))
+  ((__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
+                                           (__v4sf)(__m128)(B), \
+                                           (__v4sf)(__m128)(W), (__mmask8)(U), \
+                                           (int)(R)))
 
 #define _mm_maskz_sub_round_ss(U, A, B, R) \
-  (__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)_mm_setzero_ps(), \
-                                          (__mmask8)(U), (int)(R))
+  ((__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
+                                           (__v4sf)(__m128)(B), \
+                                           (__v4sf)_mm_setzero_ps(), \
+                                           (__mmask8)(U), (int)(R)))
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_mask_sub_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
@@ -2044,22 +2044,22 @@ _mm_maskz_sub_sd(__mmask8 __U,__m128d __A, __m128d __B) {
 }
 
 #define _mm_sub_round_sd(A, B, R) \
-  (__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)_mm_setzero_pd(), \
-                                           (__mmask8)-1, (int)(R))
+  ((__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
+                                            (__v2df)(__m128d)(B), \
+                                            (__v2df)_mm_setzero_pd(), \
+                                            (__mmask8)-1, (int)(R)))
 
 #define _mm_mask_sub_round_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)(__m128d)(W), \
-                                           (__mmask8)(U), (int)(R))
+  ((__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
+                                            (__v2df)(__m128d)(B), \
+                                            (__v2df)(__m128d)(W), \
+                                            (__mmask8)(U), (int)(R)))
 
 #define _mm_maskz_sub_round_sd(U, A, B, R) \
-  (__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)_mm_setzero_pd(), \
-                                           (__mmask8)(U), (int)(R))
+  ((__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
+                                            (__v2df)(__m128d)(B), \
+                                            (__v2df)_mm_setzero_pd(), \
+                                            (__mmask8)(U), (int)(R)))
 
 static __inline__ __m512d __DEFAULT_FN_ATTRS512
 _mm512_mask_sub_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
@@ -2090,32 +2090,32 @@ _mm512_maskz_sub_ps(__mmask16 __U, __m512 __A, __m512 __B) {
 }
 
 #define _mm512_sub_round_pd(A, B, R) \
-  (__m512d)__builtin_ia32_subpd512((__v8df)(__m512d)(A), \
-                                   (__v8df)(__m512d)(B), (int)(R))
+  ((__m512d)__builtin_ia32_subpd512((__v8df)(__m512d)(A), \
+                                    (__v8df)(__m512d)(B), (int)(R)))
 
 #define _mm512_mask_sub_round_pd(W, U, A, B, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
                                    (__v8df)_mm512_sub_round_pd((A), (B), (R)), \
-                                   (__v8df)(__m512d)(W))
+                                   (__v8df)(__m512d)(W)))
 
 #define _mm512_maskz_sub_round_pd(U, A, B, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
                                    (__v8df)_mm512_sub_round_pd((A), (B), (R)), \
-                                   (__v8df)_mm512_setzero_pd())
+                                   (__v8df)_mm512_setzero_pd()))
 
 #define _mm512_sub_round_ps(A, B, R) \
-  (__m512)__builtin_ia32_subps512((__v16sf)(__m512)(A), \
-                                  (__v16sf)(__m512)(B), (int)(R))
+  ((__m512)__builtin_ia32_subps512((__v16sf)(__m512)(A), \
+                                   (__v16sf)(__m512)(B), (int)(R)))
 
 #define _mm512_mask_sub_round_ps(W, U, A, B, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
                                   (__v16sf)_mm512_sub_round_ps((A), (B), (R)), \
-                                  (__v16sf)(__m512)(W))
+                                  (__v16sf)(__m512)(W)))
 
 #define _mm512_maskz_sub_round_ps(U, A, B, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
                                   (__v16sf)_mm512_sub_round_ps((A), (B), (R)), \
-                                  (__v16sf)_mm512_setzero_ps())
+                                  (__v16sf)_mm512_setzero_ps()))
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_mask_mul_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
@@ -2129,22 +2129,22 @@ _mm_maskz_mul_ss(__mmask8 __U,__m128 __A, __m128 __B) {
   return __builtin_ia32_selectss_128(__U, __A, _mm_setzero_ps());
 }
 #define _mm_mul_round_ss(A, B, R) \
-  (__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)_mm_setzero_ps(), \
-                                          (__mmask8)-1, (int)(R))
+  ((__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
+                                           (__v4sf)(__m128)(B), \
+                                           (__v4sf)_mm_setzero_ps(), \
+                                           (__mmask8)-1, (int)(R)))
 
 #define _mm_mask_mul_round_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)(__m128)(W), (__mmask8)(U), \
-                                          (int)(R))
+  ((__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
+                                           (__v4sf)(__m128)(B), \
+                                           (__v4sf)(__m128)(W), (__mmask8)(U), \
+                                           (int)(R)))
 
 #define _mm_maskz_mul_round_ss(U, A, B, R) \
-  (__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)_mm_setzero_ps(), \
-                                          (__mmask8)(U), (int)(R))
+  ((__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
+                                           (__v4sf)(__m128)(B), \
+                                           (__v4sf)_mm_setzero_ps(), \
+                                           (__mmask8)(U), (int)(R)))
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_mask_mul_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
@@ -2159,22 +2159,22 @@ _mm_maskz_mul_sd(__mmask8 __U,__m128d __A, __m128d __B) {
 }
 
 #define _mm_mul_round_sd(A, B, R) \
-  (__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)_mm_setzero_pd(), \
-                                           (__mmask8)-1, (int)(R))
+  ((__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
+                                            (__v2df)(__m128d)(B), \
+                                            (__v2df)_mm_setzero_pd(), \
+                                            (__mmask8)-1, (int)(R)))
 
 #define _mm_mask_mul_round_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)(__m128d)(W), \
-                                           (__mmask8)(U), (int)(R))
+  ((__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
+                                            (__v2df)(__m128d)(B), \
+                                            (__v2df)(__m128d)(W), \
+                                            (__mmask8)(U), (int)(R)))
 
 #define _mm_maskz_mul_round_sd(U, A, B, R) \
-  (__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)_mm_setzero_pd(), \
-                                           (__mmask8)(U), (int)(R))
+  ((__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
+                                            (__v2df)(__m128d)(B), \
+                                            (__v2df)_mm_setzero_pd(), \
+                                            (__mmask8)(U), (int)(R)))
 
 static __inline__ __m512d __DEFAULT_FN_ATTRS512
 _mm512_mask_mul_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
@@ -2205,32 +2205,32 @@ _mm512_maskz_mul_ps(__mmask16 __U, __m512 __A, __m512 __B) {
 }
 
 #define _mm512_mul_round_pd(A, B, R) \
-  (__m512d)__builtin_ia32_mulpd512((__v8df)(__m512d)(A), \
-                                   (__v8df)(__m512d)(B), (int)(R))
+  ((__m512d)__builtin_ia32_mulpd512((__v8df)(__m512d)(A), \
+                                    (__v8df)(__m512d)(B), (int)(R)))
 
 #define _mm512_mask_mul_round_pd(W, U, A, B, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
                                    (__v8df)_mm512_mul_round_pd((A), (B), (R)), \
-                                   (__v8df)(__m512d)(W))
+                                   (__v8df)(__m512d)(W)))
 
 #define _mm512_maskz_mul_round_pd(U, A, B, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
                                    (__v8df)_mm512_mul_round_pd((A), (B), (R)), \
-                                   (__v8df)_mm512_setzero_pd())
+                                   (__v8df)_mm512_setzero_pd()))
 
 #define _mm512_mul_round_ps(A, B, R) \
-  (__m512)__builtin_ia32_mulps512((__v16sf)(__m512)(A), \
-                                  (__v16sf)(__m512)(B), (int)(R))
+  ((__m512)__builtin_ia32_mulps512((__v16sf)(__m512)(A), \
+                                  (__v16sf)(__m512)(B), (int)(R)))
 
 #define _mm512_mask_mul_round_ps(W, U, A, B, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
                                   (__v16sf)_mm512_mul_round_ps((A), (B), (R)), \
-                                  (__v16sf)(__m512)(W))
+                                  (__v16sf)(__m512)(W)))
 
 #define _mm512_maskz_mul_round_ps(U, A, B, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
                                   (__v16sf)_mm512_mul_round_ps((A), (B), (R)), \
-                                  (__v16sf)_mm512_setzero_ps())
+                                  (__v16sf)_mm512_setzero_ps()))
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_mask_div_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
@@ -2245,22 +2245,22 @@ _mm_maskz_div_ss(__mmask8 __U,__m128 __A, __m128 __B) {
 }
 
 #define _mm_div_round_ss(A, B, R) \
-  (__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)_mm_setzero_ps(), \
-                                          (__mmask8)-1, (int)(R))
+  ((__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
+                                           (__v4sf)(__m128)(B), \
+                                           (__v4sf)_mm_setzero_ps(), \
+                                           (__mmask8)-1, (int)(R)))
 
 #define _mm_mask_div_round_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)(__m128)(W), (__mmask8)(U), \
-                                          (int)(R))
+  ((__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
+                                           (__v4sf)(__m128)(B), \
+                                           (__v4sf)(__m128)(W), (__mmask8)(U), \
+                                           (int)(R)))
 
 #define _mm_maskz_div_round_ss(U, A, B, R) \
-  (__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)_mm_setzero_ps(), \
-                                          (__mmask8)(U), (int)(R))
+  ((__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
+                                           (__v4sf)(__m128)(B), \
+                                           (__v4sf)_mm_setzero_ps(), \
+                                           (__mmask8)(U), (int)(R)))
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_mask_div_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
@@ -2275,22 +2275,22 @@ _mm_maskz_div_sd(__mmask8 __U,__m128d __A, __m128d __B) {
 }
 
 #define _mm_div_round_sd(A, B, R) \
-  (__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)_mm_setzero_pd(), \
-                                           (__mmask8)-1, (int)(R))
+  ((__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
+                                            (__v2df)(__m128d)(B), \
+                                            (__v2df)_mm_setzero_pd(), \
+                                            (__mmask8)-1, (int)(R)))
 
 #define _mm_mask_div_round_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)(__m128d)(W), \
-                                           (__mmask8)(U), (int)(R))
+  ((__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
+                                            (__v2df)(__m128d)(B), \
+                                            (__v2df)(__m128d)(W), \
+                                            (__mmask8)(U), (int)(R)))
 
 #define _mm_maskz_div_round_sd(U, A, B, R) \
-  (__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)_mm_setzero_pd(), \
-                                           (__mmask8)(U), (int)(R))
+  ((__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
+                                            (__v2df)(__m128d)(B), \
+                                            (__v2df)_mm_setzero_pd(), \
+                                            (__mmask8)(U), (int)(R)))
 
 static __inline __m512d __DEFAULT_FN_ATTRS512
 _mm512_div_pd(__m512d __a, __m512d __b)
@@ -2333,179 +2333,179 @@ _mm512_maskz_div_ps(__mmask16 __U, __m512 __A, __m512 __B) {
 }
 
 #define _mm512_div_round_pd(A, B, R) \
-  (__m512d)__builtin_ia32_divpd512((__v8df)(__m512d)(A), \
-                                   (__v8df)(__m512d)(B), (int)(R))
+  ((__m512d)__builtin_ia32_divpd512((__v8df)(__m512d)(A), \
+                                    (__v8df)(__m512d)(B), (int)(R)))
 
 #define _mm512_mask_div_round_pd(W, U, A, B, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
                                    (__v8df)_mm512_div_round_pd((A), (B), (R)), \
-                                   (__v8df)(__m512d)(W))
+                                   (__v8df)(__m512d)(W)))
 
 #define _mm512_maskz_div_round_pd(U, A, B, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
                                    (__v8df)_mm512_div_round_pd((A), (B), (R)), \
-                                   (__v8df)_mm512_setzero_pd())
+                                   (__v8df)_mm512_setzero_pd()))
 
 #define _mm512_div_round_ps(A, B, R) \
-  (__m512)__builtin_ia32_divps512((__v16sf)(__m512)(A), \
-                                  (__v16sf)(__m512)(B), (int)(R))
+  ((__m512)__builtin_ia32_divps512((__v16sf)(__m512)(A), \
+                                   (__v16sf)(__m512)(B), (int)(R)))
 
 #define _mm512_mask_div_round_ps(W, U, A, B, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
                                   (__v16sf)_mm512_div_round_ps((A), (B), (R)), \
-                                  (__v16sf)(__m512)(W))
+                                  (__v16sf)(__m512)(W)))
 
 #define _mm512_maskz_div_round_ps(U, A, B, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
                                   (__v16sf)_mm512_div_round_ps((A), (B), (R)), \
-                                  (__v16sf)_mm512_setzero_ps())
+                                  (__v16sf)_mm512_setzero_ps()))
 
 #define _mm512_roundscale_ps(A, B) \
-  (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(A), (int)(B), \
-                                         (__v16sf)_mm512_undefined_ps(), \
-                                         (__mmask16)-1, \
-                                         _MM_FROUND_CUR_DIRECTION)
+  ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(A), (int)(B), \
+                                          (__v16sf)_mm512_undefined_ps(), \
+                                          (__mmask16)-1, \
+                                          _MM_FROUND_CUR_DIRECTION))
 
 #define _mm512_mask_roundscale_ps(A, B, C, imm) \
-  (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(C), (int)(imm), \
+  ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(C), (int)(imm), \
                                          (__v16sf)(__m512)(A), (__mmask16)(B), \
-                                         _MM_FROUND_CUR_DIRECTION)
+                                         _MM_FROUND_CUR_DIRECTION))
 
 #define _mm512_maskz_roundscale_ps(A, B, imm) \
-  (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(B), (int)(imm), \
-                                         (__v16sf)_mm512_setzero_ps(), \
-                                         (__mmask16)(A), \
-                                         _MM_FROUND_CUR_DIRECTION)
+  ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(B), (int)(imm), \
+                                          (__v16sf)_mm512_setzero_ps(), \
+                                          (__mmask16)(A), \
+                                          _MM_FROUND_CUR_DIRECTION))
 
 #define _mm512_mask_roundscale_round_ps(A, B, C, imm, R) \
-  (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(C), (int)(imm), \
+  ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(C), (int)(imm), \
                                          (__v16sf)(__m512)(A), (__mmask16)(B), \
-                                         (int)(R))
+                                         (int)(R)))
 
 #define _mm512_maskz_roundscale_round_ps(A, B, imm, R) \
-  (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(B), (int)(imm), \
-                                         (__v16sf)_mm512_setzero_ps(), \
-                                         (__mmask16)(A), (int)(R))
+  ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(B), (int)(imm), \
+                                          (__v16sf)_mm512_setzero_ps(), \
+                                          (__mmask16)(A), (int)(R)))
 
 #define _mm512_roundscale_round_ps(A, imm, R) \
-  (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(A), (int)(imm), \
-                                         (__v16sf)_mm512_undefined_ps(), \
-                                         (__mmask16)-1, (int)(R))
+  ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(A), (int)(imm), \
+                                          (__v16sf)_mm512_undefined_ps(), \
+                                          (__mmask16)-1, (int)(R)))
 
 #define _mm512_roundscale_pd(A, B) \
-  (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(A), (int)(B), \
-                                          (__v8df)_mm512_undefined_pd(), \
-                                          (__mmask8)-1, \
-                                          _MM_FROUND_CUR_DIRECTION)
+  ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(A), (int)(B), \
+                                           (__v8df)_mm512_undefined_pd(), \
+                                           (__mmask8)-1, \
+                                           _MM_FROUND_CUR_DIRECTION))
 
 #define _mm512_mask_roundscale_pd(A, B, C, imm) \
-  (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(C), (int)(imm), \
+  ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(C), (int)(imm), \
                                           (__v8df)(__m512d)(A), (__mmask8)(B), \
-                                          _MM_FROUND_CUR_DIRECTION)
+                                          _MM_FROUND_CUR_DIRECTION))
 
 #define _mm512_maskz_roundscale_pd(A, B, imm) \
-  (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(B), (int)(imm), \
-                                          (__v8df)_mm512_setzero_pd(), \
-                                          (__mmask8)(A), \
-                                          _MM_FROUND_CUR_DIRECTION)
+  ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(B), (int)(imm), \
+                                           (__v8df)_mm512_setzero_pd(), \
+                                           (__mmask8)(A), \
+                                           _MM_FROUND_CUR_DIRECTION))
 
 #define _mm512_mask_roundscale_round_pd(A, B, C, imm, R) \
-  (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(C), (int)(imm), \
+  ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(C), (int)(imm), \
                                           (__v8df)(__m512d)(A), (__mmask8)(B), \
-                                          (int)(R))
+                                          (int)(R)))
 
 #define _mm512_maskz_roundscale_round_pd(A, B, imm, R) \
-  (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(B), (int)(imm), \
-                                          (__v8df)_mm512_setzero_pd(), \
-                                          (__mmask8)(A), (int)(R))
+  ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(B), (int)(imm), \
+                                           (__v8df)_mm512_setzero_pd(), \
+                                           (__mmask8)(A), (int)(R)))
 
 #define _mm512_roundscale_round_pd(A, imm, R) \
-  (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(A), (int)(imm), \
-                                          (__v8df)_mm512_undefined_pd(), \
-                                          (__mmask8)-1, (int)(R))
+  ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(A), (int)(imm), \
+                                           (__v8df)_mm512_undefined_pd(), \
+                                           (__mmask8)-1, (int)(R)))
 
 #define _mm512_fmadd_round_pd(A, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
-                                           (__v8df)(__m512d)(B), \
-                                           (__v8df)(__m512d)(C), \
-                                           (__mmask8)-1, (int)(R))
+  ((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
+                                            (__v8df)(__m512d)(B), \
+                                            (__v8df)(__m512d)(C), \
+                                            (__mmask8)-1, (int)(R)))
 
 
 #define _mm512_mask_fmadd_round_pd(A, U, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
-                                           (__v8df)(__m512d)(B), \
-                                           (__v8df)(__m512d)(C), \
-                                           (__mmask8)(U), (int)(R))
+  ((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
+                                            (__v8df)(__m512d)(B), \
+                                            (__v8df)(__m512d)(C), \
+                                            (__mmask8)(U), (int)(R)))
 
 
 #define _mm512_mask3_fmadd_round_pd(A, B, C, U, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_mask3((__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            (__v8df)(__m512d)(C), \
-                                            (__mmask8)(U), (int)(R))
+  ((__m512d)__builtin_ia32_vfmaddpd512_mask3((__v8df)(__m512d)(A), \
+                                             (__v8df)(__m512d)(B), \
+                                             (__v8df)(__m512d)(C), \
+                                             (__mmask8)(U), (int)(R)))
 
 
 #define _mm512_maskz_fmadd_round_pd(U, A, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_maskz((__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            (__v8df)(__m512d)(C), \
-                                            (__mmask8)(U), (int)(R))
+  ((__m512d)__builtin_ia32_vfmaddpd512_maskz((__v8df)(__m512d)(A), \
+                                             (__v8df)(__m512d)(B), \
+                                             (__v8df)(__m512d)(C), \
+                                             (__mmask8)(U), (int)(R)))
 
 
 #define _mm512_fmsub_round_pd(A, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
-                                           (__v8df)(__m512d)(B), \
-                                           -(__v8df)(__m512d)(C), \
-                                           (__mmask8)-1, (int)(R))
+  ((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
+                                            (__v8df)(__m512d)(B), \
+                                            -(__v8df)(__m512d)(C), \
+                                            (__mmask8)-1, (int)(R)))
 
 
 #define _mm512_mask_fmsub_round_pd(A, U, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
-                                           (__v8df)(__m512d)(B), \
-                                           -(__v8df)(__m512d)(C), \
-                                           (__mmask8)(U), (int)(R))
+  ((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
+                                            (__v8df)(__m512d)(B), \
+                                            -(__v8df)(__m512d)(C), \
+                                            (__mmask8)(U), (int)(R)))
 
 
 #define _mm512_maskz_fmsub_round_pd(U, A, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_maskz((__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            -(__v8df)(__m512d)(C), \
-                                            (__mmask8)(U), (int)(R))
+  ((__m512d)__builtin_ia32_vfmaddpd512_maskz((__v8df)(__m512d)(A), \
+                                             (__v8df)(__m512d)(B), \
+                                             -(__v8df)(__m512d)(C), \
+                                             (__mmask8)(U), (int)(R)))
 
 
 #define _mm512_fnmadd_round_pd(A, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_mask(-(__v8df)(__m512d)(A), \
-                                           (__v8df)(__m512d)(B), \
-                                           (__v8df)(__m512d)(C), \
-                                           (__mmask8)-1, (int)(R))
+  ((__m512d)__builtin_ia32_vfmaddpd512_mask(-(__v8df)(__m512d)(A), \
+                                            (__v8df)(__m512d)(B), \
+                                            (__v8df)(__m512d)(C), \
+                                            (__mmask8)-1, (int)(R)))
 
 
 #define _mm512_mask3_fnmadd_round_pd(A, B, C, U, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_mask3(-(__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            (__v8df)(__m512d)(C), \
-                                            (__mmask8)(U), (int)(R))
+  ((__m512d)__builtin_ia32_vfmaddpd512_mask3(-(__v8df)(__m512d)(A), \
+                                             (__v8df)(__m512d)(B), \
+                                             (__v8df)(__m512d)(C), \
+                                             (__mmask8)(U), (int)(R)))
 
 
 #define _mm512_maskz_fnmadd_round_pd(U, A, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_maskz(-(__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            (__v8df)(__m512d)(C), \
-                                            (__mmask8)(U), (int)(R))
+  ((__m512d)__builtin_ia32_vfmaddpd512_maskz(-(__v8df)(__m512d)(A), \
+                                             (__v8df)(__m512d)(B), \
+                                             (__v8df)(__m512d)(C), \
+                                             (__mmask8)(U), (int)(R)))
 
 
 #define _mm512_fnmsub_round_pd(A, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_mask(-(__v8df)(__m512d)(A), \
-                                           (__v8df)(__m512d)(B), \
-                                           -(__v8df)(__m512d)(C), \
-                                           (__mmask8)-1, (int)(R))
+  ((__m512d)__builtin_ia32_vfmaddpd512_mask(-(__v8df)(__m512d)(A), \
+                                            (__v8df)(__m512d)(B), \
+                                            -(__v8df)(__m512d)(C), \
+                                            (__mmask8)-1, (int)(R)))
 
 
 #define _mm512_maskz_fnmsub_round_pd(U, A, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_maskz(-(__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            -(__v8df)(__m512d)(C), \
-                                            (__mmask8)(U), (int)(R))
+  ((__m512d)__builtin_ia32_vfmaddpd512_maskz(-(__v8df)(__m512d)(A), \
+                                             (__v8df)(__m512d)(B), \
+                                             -(__v8df)(__m512d)(C), \
+                                             (__mmask8)(U), (int)(R)))
 
 
 static __inline__ __m512d __DEFAULT_FN_ATTRS512
@@ -2629,87 +2629,87 @@ _mm512_maskz_fnmsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
 }
 
 #define _mm512_fmadd_round_ps(A, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-                                          (__v16sf)(__m512)(B), \
-                                          (__v16sf)(__m512)(C), \
-                                          (__mmask16)-1, (int)(R))
+  ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+                                           (__v16sf)(__m512)(B), \
+                                           (__v16sf)(__m512)(C), \
+                                           (__mmask16)-1, (int)(R)))
 
 
 #define _mm512_mask_fmadd_round_ps(A, U, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-                                          (__v16sf)(__m512)(B), \
-                                          (__v16sf)(__m512)(C), \
-                                          (__mmask16)(U), (int)(R))
+  ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+                                           (__v16sf)(__m512)(B), \
+                                           (__v16sf)(__m512)(C), \
+                                           (__mmask16)(U), (int)(R)))
 
 
 #define _mm512_mask3_fmadd_round_ps(A, B, C, U, R) \
-  (__m512)__builtin_ia32_vfmaddps512_mask3((__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), \
-                                           (__v16sf)(__m512)(C), \
-                                           (__mmask16)(U), (int)(R))
+  ((__m512)__builtin_ia32_vfmaddps512_mask3((__v16sf)(__m512)(A), \
+                                            (__v16sf)(__m512)(B), \
+                                            (__v16sf)(__m512)(C), \
+                                            (__mmask16)(U), (int)(R)))
 
 
 #define _mm512_maskz_fmadd_round_ps(U, A, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddps512_maskz((__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), \
-                                           (__v16sf)(__m512)(C), \
-                                           (__mmask16)(U), (int)(R))
+  ((__m512)__builtin_ia32_vfmaddps512_maskz((__v16sf)(__m512)(A), \
+                                            (__v16sf)(__m512)(B), \
+                                            (__v16sf)(__m512)(C), \
+                                            (__mmask16)(U), (int)(R)))
 
 
 #define _mm512_fmsub_round_ps(A, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-                                          (__v16sf)(__m512)(B), \
-                                          -(__v16sf)(__m512)(C), \
-                                          (__mmask16)-1, (int)(R))
+  ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+                                           (__v16sf)(__m512)(B), \
+                                           -(__v16sf)(__m512)(C), \
+                                           (__mmask16)-1, (int)(R)))
 
 
 #define _mm512_mask_fmsub_round_ps(A, U, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-                                          (__v16sf)(__m512)(B), \
-                                          -(__v16sf)(__m512)(C), \
-                                          (__mmask16)(U), (int)(R))
+  ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+                                           (__v16sf)(__m512)(B), \
+                                           -(__v16sf)(__m512)(C), \
+                                           (__mmask16)(U), (int)(R)))
 
 
 #define _mm512_maskz_fmsub_round_ps(U, A, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddps512_maskz((__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), \
-                                           -(__v16sf)(__m512)(C), \
-                                           (__mmask16)(U), (int)(R))
+  ((__m512)__builtin_ia32_vfmaddps512_maskz((__v16sf)(__m512)(A), \
+                                            (__v16sf)(__m512)(B), \
+                                            -(__v16sf)(__m512)(C), \
+                                            (__mmask16)(U), (int)(R)))
 
 
 #define _mm512_fnmadd_round_ps(A, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-                                          -(__v16sf)(__m512)(B), \
-                                          (__v16sf)(__m512)(C), \
-                                          (__mmask16)-1, (int)(R))
+  ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+                                           -(__v16sf)(__m512)(B), \
+                                           (__v16sf)(__m512)(C), \
+                                           (__mmask16)-1, (int)(R)))
 
 
 #define _mm512_mask3_fnmadd_round_ps(A, B, C, U, R) \
-  (__m512)__builtin_ia32_vfmaddps512_mask3(-(__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), \
-                                           (__v16sf)(__m512)(C), \
-                                           (__mmask16)(U), (int)(R))
+  ((__m512)__builtin_ia32_vfmaddps512_mask3(-(__v16sf)(__m512)(A), \
+                                            (__v16sf)(__m512)(B), \
+                                            (__v16sf)(__m512)(C), \
+                                            (__mmask16)(U), (int)(R)))
 
 
 #define _mm512_maskz_fnmadd_round_ps(U, A, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddps512_maskz(-(__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), \
-                                           (__v16sf)(__m512)(C), \
-                                           (__mmask16)(U), (int)(R))
+  ((__m512)__builtin_ia32_vfmaddps512_maskz(-(__v16sf)(__m512)(A), \
+                                            (__v16sf)(__m512)(B), \
+                                            (__v16sf)(__m512)(C), \
+                                            (__mmask16)(U), (int)(R)))
 
 
 #define _mm512_fnmsub_round_ps(A, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-                                          -(__v16sf)(__m512)(B), \
-                                          -(__v16sf)(__m512)(C), \
-                                          (__mmask16)-1, (int)(R))
+  ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+                                           -(__v16sf)(__m512)(B), \
+                                           -(__v16sf)(__m512)(C), \
+                                           (__mmask16)-1, (int)(R)))
 
 
 #define _mm512_maskz_fnmsub_round_ps(U, A, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddps512_maskz(-(__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), \
-                                           -(__v16sf)(__m512)(C), \
-                                           (__mmask16)(U), (int)(R))
+  ((__m512)__builtin_ia32_vfmaddps512_maskz(-(__v16sf)(__m512)(A), \
+                                            (__v16sf)(__m512)(B), \
+                                            -(__v16sf)(__m512)(C), \
+                                            (__mmask16)(U), (int)(R)))
 
 
 static __inline__ __m512 __DEFAULT_FN_ATTRS512
@@ -2833,52 +2833,52 @@ _mm512_maskz_fnmsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
 }
 
 #define _mm512_fmaddsub_round_pd(A, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
-                                              (__v8df)(__m512d)(B), \
-                                              (__v8df)(__m512d)(C), \
-                                              (__mmask8)-1, (int)(R))
+  ((__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
+                                               (__v8df)(__m512d)(B), \
+                                               (__v8df)(__m512d)(C), \
+                                               (__mmask8)-1, (int)(R)))
 
 
 #define _mm512_mask_fmaddsub_round_pd(A, U, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
-                                              (__v8df)(__m512d)(B), \
-                                              (__v8df)(__m512d)(C), \
-                                              (__mmask8)(U), (int)(R))
+  ((__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
+                                               (__v8df)(__m512d)(B), \
+                                               (__v8df)(__m512d)(C), \
+                                               (__mmask8)(U), (int)(R)))
 
 
 #define _mm512_mask3_fmaddsub_round_pd(A, B, C, U, R) \
-  (__m512d)__builtin_ia32_vfmaddsubpd512_mask3((__v8df)(__m512d)(A), \
-                                               (__v8df)(__m512d)(B), \
-                                               (__v8df)(__m512d)(C), \
-                                               (__mmask8)(U), (int)(R))
+  ((__m512d)__builtin_ia32_vfmaddsubpd512_mask3((__v8df)(__m512d)(A), \
+                                                (__v8df)(__m512d)(B), \
+                                                (__v8df)(__m512d)(C), \
+                                                (__mmask8)(U), (int)(R)))
 
 
 #define _mm512_maskz_fmaddsub_round_pd(U, A, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddsubpd512_maskz((__v8df)(__m512d)(A), \
-                                               (__v8df)(__m512d)(B), \
-                                               (__v8df)(__m512d)(C), \
-                                               (__mmask8)(U), (int)(R))
+  ((__m512d)__builtin_ia32_vfmaddsubpd512_maskz((__v8df)(__m512d)(A), \
+                                                (__v8df)(__m512d)(B), \
+                                                (__v8df)(__m512d)(C), \
+                                                (__mmask8)(U), (int)(R)))
 
 
 #define _mm512_fmsubadd_round_pd(A, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
-                                              (__v8df)(__m512d)(B), \
-                                              -(__v8df)(__m512d)(C), \
-                                              (__mmask8)-1, (int)(R))
+  ((__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
+                                               (__v8df)(__m512d)(B), \
+                                               -(__v8df)(__m512d)(C), \
+                                               (__mmask8)-1, (int)(R)))
 
 
 #define _mm512_mask_fmsubadd_round_pd(A, U, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
-                                              (__v8df)(__m512d)(B), \
-                                              -(__v8df)(__m512d)(C), \
-                                              (__mmask8)(U), (int)(R))
+  ((__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
+                                               (__v8df)(__m512d)(B), \
+                                               -(__v8df)(__m512d)(C), \
+                                               (__mmask8)(U), (int)(R)))
 
 
 #define _mm512_maskz_fmsubadd_round_pd(U, A, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddsubpd512_maskz((__v8df)(__m512d)(A), \
-                                               (__v8df)(__m512d)(B), \
-                                               -(__v8df)(__m512d)(C), \
-                                               (__mmask8)(U), (int)(R))
+  ((__m512d)__builtin_ia32_vfmaddsubpd512_maskz((__v8df)(__m512d)(A), \
+                                                (__v8df)(__m512d)(B), \
+                                                -(__v8df)(__m512d)(C), \
+                                                (__mmask8)(U), (int)(R)))
 
 
 static __inline__ __m512d __DEFAULT_FN_ATTRS512
@@ -2952,52 +2952,52 @@ _mm512_maskz_fmsubadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
 }
 
 #define _mm512_fmaddsub_round_ps(A, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
-                                             (__v16sf)(__m512)(B), \
-                                             (__v16sf)(__m512)(C), \
-                                             (__mmask16)-1, (int)(R))
+  ((__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
+                                              (__v16sf)(__m512)(B), \
+                                              (__v16sf)(__m512)(C), \
+                                              (__mmask16)-1, (int)(R)))
 
 
 #define _mm512_mask_fmaddsub_round_ps(A, U, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
-                                             (__v16sf)(__m512)(B), \
-                                             (__v16sf)(__m512)(C), \
-                                             (__mmask16)(U), (int)(R))
+  ((__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
+                                              (__v16sf)(__m512)(B), \
+                                              (__v16sf)(__m512)(C), \
+                                              (__mmask16)(U), (int)(R)))
 
 
 #define _mm512_mask3_fmaddsub_round_ps(A, B, C, U, R) \
-  (__m512)__builtin_ia32_vfmaddsubps512_mask3((__v16sf)(__m512)(A), \
-                                              (__v16sf)(__m512)(B), \
-                                              (__v16sf)(__m512)(C), \
-                                              (__mmask16)(U), (int)(R))
+  ((__m512)__builtin_ia32_vfmaddsubps512_mask3((__v16sf)(__m512)(A), \
+                                               (__v16sf)(__m512)(B), \
+                                               (__v16sf)(__m512)(C), \
+                                               (__mmask16)(U), (int)(R)))
 
 
 #define _mm512_maskz_fmaddsub_round_ps(U, A, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddsubps512_maskz((__v16sf)(__m512)(A), \
-                                              (__v16sf)(__m512)(B), \
-                                              (__v16sf)(__m512)(C), \
-                                              (__mmask16)(U), (int)(R))
+  ((__m512)__builtin_ia32_vfmaddsubps512_maskz((__v16sf)(__m512)(A), \
+                                               (__v16sf)(__m512)(B), \
+                                               (__v16sf)(__m512)(C), \
+                                               (__mmask16)(U), (int)(R)))
 
 
 #define _mm512_fmsubadd_round_ps(A, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
-                                             (__v16sf)(__m512)(B), \
-                                             -(__v16sf)(__m512)(C), \
-                                             (__mmask16)-1, (int)(R))
+  ((__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
+                                              (__v16sf)(__m512)(B), \
+                                              -(__v16sf)(__m512)(C), \
+                                              (__mmask16)-1, (int)(R)))
 
 
 #define _mm512_mask_fmsubadd_round_ps(A, U, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
-                                             (__v16sf)(__m512)(B), \
-                                             -(__v16sf)(__m512)(C), \
-                                             (__mmask16)(U), (int)(R))
+  ((__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
+                                              (__v16sf)(__m512)(B), \
+                                              -(__v16sf)(__m512)(C), \
+                                              (__mmask16)(U), (int)(R)))
 
 
 #define _mm512_maskz_fmsubadd_round_ps(U, A, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddsubps512_maskz((__v16sf)(__m512)(A), \
-                                              (__v16sf)(__m512)(B), \
-                                              -(__v16sf)(__m512)(C), \
-                                              (__mmask16)(U), (int)(R))
+  ((__m512)__builtin_ia32_vfmaddsubps512_maskz((__v16sf)(__m512)(A), \
+                                               (__v16sf)(__m512)(B), \
+                                               -(__v16sf)(__m512)(C), \
+                                               (__mmask16)(U), (int)(R)))
 
 
 static __inline__ __m512 __DEFAULT_FN_ATTRS512
@@ -3071,10 +3071,10 @@ _mm512_maskz_fmsubadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
 }
 
 #define _mm512_mask3_fmsub_round_pd(A, B, C, U, R) \
-  (__m512d)__builtin_ia32_vfmsubpd512_mask3((__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            (__v8df)(__m512d)(C), \
-                                            (__mmask8)(U), (int)(R))
+  ((__m512d)__builtin_ia32_vfmsubpd512_mask3((__v8df)(__m512d)(A), \
+                                             (__v8df)(__m512d)(B), \
+                                             (__v8df)(__m512d)(C), \
+                                             (__mmask8)(U), (int)(R)))
 
 
 static __inline__ __m512d __DEFAULT_FN_ATTRS512
@@ -3088,10 +3088,10 @@ _mm512_mask3_fmsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
 }
 
 #define _mm512_mask3_fmsub_round_ps(A, B, C, U, R) \
-  (__m512)__builtin_ia32_vfmsubps512_mask3((__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), \
-                                           (__v16sf)(__m512)(C), \
-                                           (__mmask16)(U), (int)(R))
+  ((__m512)__builtin_ia32_vfmsubps512_mask3((__v16sf)(__m512)(A), \
+                                            (__v16sf)(__m512)(B), \
+                                            (__v16sf)(__m512)(C), \
+                                            (__mmask16)(U), (int)(R)))
 
 static __inline__ __m512 __DEFAULT_FN_ATTRS512
 _mm512_mask3_fmsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
@@ -3104,10 +3104,10 @@ _mm512_mask3_fmsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
 }
 
 #define _mm512_mask3_fmsubadd_round_pd(A, B, C, U, R) \
-  (__m512d)__builtin_ia32_vfmsubaddpd512_mask3((__v8df)(__m512d)(A), \
-                                               (__v8df)(__m512d)(B), \
-                                               (__v8df)(__m512d)(C), \
-                                               (__mmask8)(U), (int)(R))
+  ((__m512d)__builtin_ia32_vfmsubaddpd512_mask3((__v8df)(__m512d)(A), \
+                                                (__v8df)(__m512d)(B), \
+                                                (__v8df)(__m512d)(C), \
+                                                (__mmask8)(U), (int)(R)))
 
 
 static __inline__ __m512d __DEFAULT_FN_ATTRS512
@@ -3121,10 +3121,10 @@ _mm512_mask3_fmsubadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
 }
 
 #define _mm512_mask3_fmsubadd_round_ps(A, B, C, U, R) \
-  (__m512)__builtin_ia32_vfmsubaddps512_mask3((__v16sf)(__m512)(A), \
-                                              (__v16sf)(__m512)(B), \
-                                              (__v16sf)(__m512)(C), \
-                                              (__mmask16)(U), (int)(R))
+  ((__m512)__builtin_ia32_vfmsubaddps512_mask3((__v16sf)(__m512)(A), \
+                                               (__v16sf)(__m512)(B), \
+                                               (__v16sf)(__m512)(C), \
+                                               (__mmask16)(U), (int)(R)))
 
 
 static __inline__ __m512 __DEFAULT_FN_ATTRS512
@@ -3138,10 +3138,10 @@ _mm512_mask3_fmsubadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
 }
 
 #define _mm512_mask_fnmadd_round_pd(A, U, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
-                                           -(__v8df)(__m512d)(B), \
-                                           (__v8df)(__m512d)(C), \
-                                           (__mmask8)(U), (int)(R))
+  ((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
+                                            -(__v8df)(__m512d)(B), \
+                                            (__v8df)(__m512d)(C), \
+                                            (__mmask8)(U), (int)(R)))
 
 
 static __inline__ __m512d __DEFAULT_FN_ATTRS512
@@ -3155,10 +3155,10 @@ _mm512_mask_fnmadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
 }
 
 #define _mm512_mask_fnmadd_round_ps(A, U, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-                                          -(__v16sf)(__m512)(B), \
-                                          (__v16sf)(__m512)(C), \
-                                          (__mmask16)(U), (int)(R))
+  ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+                                           -(__v16sf)(__m512)(B), \
+                                           (__v16sf)(__m512)(C), \
+                                           (__mmask16)(U), (int)(R)))
 
 
 static __inline__ __m512 __DEFAULT_FN_ATTRS512
@@ -3172,17 +3172,17 @@ _mm512_mask_fnmadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
 }
 
 #define _mm512_mask_fnmsub_round_pd(A, U, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
-                                           -(__v8df)(__m512d)(B), \
-                                           -(__v8df)(__m512d)(C), \
-                                           (__mmask8)(U), (int)(R))
+  ((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
+                                            -(__v8df)(__m512d)(B), \
+                                            -(__v8df)(__m512d)(C), \
+                                            (__mmask8)(U), (int)(R)))
 
 
 #define _mm512_mask3_fnmsub_round_pd(A, B, C, U, R) \
-  (__m512d)__builtin_ia32_vfmsubpd512_mask3(-(__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            (__v8df)(__m512d)(C), \
-                                            (__mmask8)(U), (int)(R))
+  ((__m512d)__builtin_ia32_vfmsubpd512_mask3(-(__v8df)(__m512d)(A), \
+                                             (__v8df)(__m512d)(B), \
+                                             (__v8df)(__m512d)(C), \
+                                             (__mmask8)(U), (int)(R)))
 
 
 static __inline__ __m512d __DEFAULT_FN_ATTRS512
@@ -3206,17 +3206,17 @@ _mm512_mask3_fnmsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
 }
 
 #define _mm512_mask_fnmsub_round_ps(A, U, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-                                          -(__v16sf)(__m512)(B), \
-                                          -(__v16sf)(__m512)(C), \
-                                          (__mmask16)(U), (int)(R))
+  ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+                                           -(__v16sf)(__m512)(B), \
+                                           -(__v16sf)(__m512)(C), \
+                                           (__mmask16)(U), (int)(R)))
 
 
 #define _mm512_mask3_fnmsub_round_ps(A, B, C, U, R) \
-  (__m512)__builtin_ia32_vfmsubps512_mask3(-(__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), \
-                                           (__v16sf)(__m512)(C), \
-                                           (__mmask16)(U), (int)(R))
+  ((__m512)__builtin_ia32_vfmsubps512_mask3(-(__v16sf)(__m512)(A), \
+                                            (__v16sf)(__m512)(B), \
+                                            (__v16sf)(__m512)(C), \
+                                            (__mmask16)(U), (int)(R)))
 
 
 static __inline__ __m512 __DEFAULT_FN_ATTRS512
@@ -3312,63 +3312,63 @@ _mm512_maskz_permutex2var_epi64(__mmask8 __U, __m512i __A, __m512i __I,
 }
 
 #define _mm512_alignr_epi64(A, B, I) \
-  (__m512i)__builtin_ia32_alignq512((__v8di)(__m512i)(A), \
-                                    (__v8di)(__m512i)(B), (int)(I))
+  ((__m512i)__builtin_ia32_alignq512((__v8di)(__m512i)(A), \
+                                     (__v8di)(__m512i)(B), (int)(I)))
 
 #define _mm512_mask_alignr_epi64(W, U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                 (__v8di)_mm512_alignr_epi64((A), (B), (imm)), \
-                                 (__v8di)(__m512i)(W))
+  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+                                  (__v8di)_mm512_alignr_epi64((A), (B), (imm)), \
+                                  (__v8di)(__m512i)(W)))
 
 #define _mm512_maskz_alignr_epi64(U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                 (__v8di)_mm512_alignr_epi64((A), (B), (imm)), \
-                                 (__v8di)_mm512_setzero_si512())
+  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+                                  (__v8di)_mm512_alignr_epi64((A), (B), (imm)), \
+                                  (__v8di)_mm512_setzero_si512()))
 
 #define _mm512_alignr_epi32(A, B, I) \
-  (__m512i)__builtin_ia32_alignd512((__v16si)(__m512i)(A), \
-                                    (__v16si)(__m512i)(B), (int)(I))
+  ((__m512i)__builtin_ia32_alignd512((__v16si)(__m512i)(A), \
+                                     (__v16si)(__m512i)(B), (int)(I)))
 
 #define _mm512_mask_alignr_epi32(W, U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                (__v16si)_mm512_alignr_epi32((A), (B), (imm)), \
-                                (__v16si)(__m512i)(W))
+  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+                                 (__v16si)_mm512_alignr_epi32((A), (B), (imm)), \
+                                 (__v16si)(__m512i)(W)))
 
 #define _mm512_maskz_alignr_epi32(U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                (__v16si)_mm512_alignr_epi32((A), (B), (imm)), \
-                                (__v16si)_mm512_setzero_si512())
+  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+                                 (__v16si)_mm512_alignr_epi32((A), (B), (imm)), \
+                                 (__v16si)_mm512_setzero_si512()))
 /* Vector Extract */
 
 #define _mm512_extractf64x4_pd(A, I) \
-  (__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(I), \
-                                            (__v4df)_mm256_undefined_pd(), \
-                                            (__mmask8)-1)
+  ((__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(I), \
+                                             (__v4df)_mm256_undefined_pd(), \
+                                             (__mmask8)-1))
 
 #define _mm512_mask_extractf64x4_pd(W, U, A, imm) \
-  (__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(imm), \
-                                            (__v4df)(__m256d)(W), \
-                                            (__mmask8)(U))
+  ((__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(imm), \
+                                             (__v4df)(__m256d)(W), \
+                                             (__mmask8)(U)))
 
 #define _mm512_maskz_extractf64x4_pd(U, A, imm) \
-  (__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(imm), \
-                                            (__v4df)_mm256_setzero_pd(), \
-                                            (__mmask8)(U))
+  ((__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(imm), \
+                                             (__v4df)_mm256_setzero_pd(), \
+                                             (__mmask8)(U)))
 
 #define _mm512_extractf32x4_ps(A, I) \
-  (__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(I), \
-                                           (__v4sf)_mm_undefined_ps(), \
-                                           (__mmask8)-1)
+  ((__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(I), \
+                                            (__v4sf)_mm_undefined_ps(), \
+                                            (__mmask8)-1))
 
 #define _mm512_mask_extractf32x4_ps(W, U, A, imm) \
-  (__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(imm), \
-                                           (__v4sf)(__m128)(W), \
-                                           (__mmask8)(U))
+  ((__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(imm), \
+                                            (__v4sf)(__m128)(W), \
+                                            (__mmask8)(U)))
 
 #define _mm512_maskz_extractf32x4_ps(U, A, imm) \
-  (__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(imm), \
-                                           (__v4sf)_mm_setzero_ps(), \
-                                           (__mmask8)(U))
+  ((__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(imm), \
+                                            (__v4sf)_mm_setzero_ps(), \
+                                            (__mmask8)(U)))
 
 /* Vector Blend */
 
@@ -3407,14 +3407,14 @@ _mm512_mask_blend_epi32(__mmask16 __U, __m512i __A, __m512i __W)
 /* Compare */
 
 #define _mm512_cmp_round_ps_mask(A, B, P, R) \
-  (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \
-                                          (__v16sf)(__m512)(B), (int)(P), \
-                                          (__mmask16)-1, (int)(R))
+  ((__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \
+                                           (__v16sf)(__m512)(B), (int)(P), \
+                                           (__mmask16)-1, (int)(R)))
 
 #define _mm512_mask_cmp_round_ps_mask(U, A, B, P, R) \
-  (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \
-                                          (__v16sf)(__m512)(B), (int)(P), \
-                                          (__mmask16)(U), (int)(R))
+  ((__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \
+                                           (__v16sf)(__m512)(B), (int)(P), \
+                                           (__mmask16)(U), (int)(R)))
 
 #define _mm512_cmp_ps_mask(A, B, P) \
   _mm512_cmp_round_ps_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION)
@@ -3462,14 +3462,14 @@ _mm512_mask_blend_epi32(__mmask16 __U, __m512i __A, __m512i __W)
     _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_ORD_Q)
 
 #define _mm512_cmp_round_pd_mask(A, B, P, R) \
-  (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \
-                                         (__v8df)(__m512d)(B), (int)(P), \
-                                         (__mmask8)-1, (int)(R))
+  ((__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \
+                                          (__v8df)(__m512d)(B), (int)(P), \
+                                          (__mmask8)-1, (int)(R)))
 
 #define _mm512_mask_cmp_round_pd_mask(U, A, B, P, R) \
-  (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \
-                                         (__v8df)(__m512d)(B), (int)(P), \
-                                         (__mmask8)(U), (int)(R))
+  ((__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \
+                                          (__v8df)(__m512d)(B), (int)(P), \
+                                          (__mmask8)(U), (int)(R)))
 
 #define _mm512_cmp_pd_mask(A, B, P) \
   _mm512_cmp_round_pd_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION)
@@ -3519,19 +3519,19 @@ _mm512_mask_blend_epi32(__mmask16 __U, __m512i __A, __m512i __W)
 /* Conversion */
 
 #define _mm512_cvtt_roundps_epu32(A, R) \
-  (__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
-                                             (__v16si)_mm512_undefined_epi32(), \
-                                             (__mmask16)-1, (int)(R))
+  ((__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
+                                              (__v16si)_mm512_undefined_epi32(), \
+                                              (__mmask16)-1, (int)(R)))
 
 #define _mm512_mask_cvtt_roundps_epu32(W, U, A, R) \
-  (__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
-                                             (__v16si)(__m512i)(W), \
-                                             (__mmask16)(U), (int)(R))
+  ((__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
+                                              (__v16si)(__m512i)(W), \
+                                              (__mmask16)(U), (int)(R)))
 
 #define _mm512_maskz_cvtt_roundps_epu32(U, A, R) \
-  (__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
-                                             (__v16si)_mm512_setzero_si512(), \
-                                             (__mmask16)(U), (int)(R))
+  ((__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
+                                              (__v16si)_mm512_setzero_si512(), \
+                                              (__mmask16)(U), (int)(R)))
 
 
 static __inline __m512i __DEFAULT_FN_ATTRS512
@@ -3563,34 +3563,34 @@ _mm512_maskz_cvttps_epu32 (__mmask16 __U, __m512 __A)
 }
 
 #define _mm512_cvt_roundepi32_ps(A, R) \
-  (__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
-                                          (__v16sf)_mm512_setzero_ps(), \
-                                          (__mmask16)-1, (int)(R))
+  ((__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
+                                           (__v16sf)_mm512_setzero_ps(), \
+                                           (__mmask16)-1, (int)(R)))
 
 #define _mm512_mask_cvt_roundepi32_ps(W, U, A, R) \
-  (__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
-                                          (__v16sf)(__m512)(W), \
-                                          (__mmask16)(U), (int)(R))
+  ((__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
+                                           (__v16sf)(__m512)(W), \
+                                           (__mmask16)(U), (int)(R)))
 
 #define _mm512_maskz_cvt_roundepi32_ps(U, A, R) \
-  (__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
-                                          (__v16sf)_mm512_setzero_ps(), \
-                                          (__mmask16)(U), (int)(R))
+  ((__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
+                                           (__v16sf)_mm512_setzero_ps(), \
+                                           (__mmask16)(U), (int)(R)))
 
 #define _mm512_cvt_roundepu32_ps(A, R) \
-  (__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
-                                           (__v16sf)_mm512_setzero_ps(), \
-                                           (__mmask16)-1, (int)(R))
+  ((__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
+                                            (__v16sf)_mm512_setzero_ps(), \
+                                            (__mmask16)-1, (int)(R)))
 
 #define _mm512_mask_cvt_roundepu32_ps(W, U, A, R) \
-  (__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
-                                           (__v16sf)(__m512)(W), \
-                                           (__mmask16)(U), (int)(R))
+  ((__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
+                                            (__v16sf)(__m512)(W), \
+                                            (__mmask16)(U), (int)(R)))
 
 #define _mm512_maskz_cvt_roundepu32_ps(U, A, R) \
-  (__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
-                                           (__v16sf)_mm512_setzero_ps(), \
-                                           (__mmask16)(U), (int)(R))
+  ((__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
+                                            (__v16sf)_mm512_setzero_ps(), \
+                                            (__mmask16)(U), (int)(R)))
 
 static __inline__ __m512 __DEFAULT_FN_ATTRS512
 _mm512_cvtepu32_ps (__m512i __A)
@@ -3705,19 +3705,19 @@ _mm512_mask_cvtepu32lo_pd(__m512d __W, __mmask8 __U,__m512i __A)
 }
 
 #define _mm512_cvt_roundpd_ps(A, R) \
-  (__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
-                                          (__v8sf)_mm256_setzero_ps(), \
-                                          (__mmask8)-1, (int)(R))
+  ((__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
+                                           (__v8sf)_mm256_setzero_ps(), \
+                                           (__mmask8)-1, (int)(R)))
 
 #define _mm512_mask_cvt_roundpd_ps(W, U, A, R) \
-  (__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
-                                          (__v8sf)(__m256)(W), (__mmask8)(U), \
-                                          (int)(R))
+  ((__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
+                                           (__v8sf)(__m256)(W), (__mmask8)(U), \
+                                           (int)(R)))
 
 #define _mm512_maskz_cvt_roundpd_ps(U, A, R) \
-  (__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
-                                          (__v8sf)_mm256_setzero_ps(), \
-                                          (__mmask8)(U), (int)(R))
+  ((__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
+                                           (__v8sf)_mm256_setzero_ps(), \
+                                           (__mmask8)(U), (int)(R)))
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS512
 _mm512_cvtpd_ps (__m512d __A)
@@ -3765,38 +3765,38 @@ _mm512_mask_cvtpd_pslo (__m512 __W, __mmask8 __U,__m512d __A)
 }
 
 #define _mm512_cvt_roundps_ph(A, I) \
-  (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
-                                            (__v16hi)_mm256_undefined_si256(), \
-                                            (__mmask16)-1)
+  ((__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
+                                             (__v16hi)_mm256_undefined_si256(), \
+                                             (__mmask16)-1))
 
 #define _mm512_mask_cvt_roundps_ph(U, W, A, I) \
-  (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
-                                            (__v16hi)(__m256i)(U), \
-                                            (__mmask16)(W))
+  ((__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
+                                             (__v16hi)(__m256i)(U), \
+                                             (__mmask16)(W)))
 
 #define _mm512_maskz_cvt_roundps_ph(W, A, I) \
-  (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
-                                            (__v16hi)_mm256_setzero_si256(), \
-                                            (__mmask16)(W))
+  ((__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
+                                             (__v16hi)_mm256_setzero_si256(), \
+                                             (__mmask16)(W)))
 
 #define _mm512_cvtps_ph       _mm512_cvt_roundps_ph
 #define _mm512_mask_cvtps_ph  _mm512_mask_cvt_roundps_ph
 #define _mm512_maskz_cvtps_ph _mm512_maskz_cvt_roundps_ph
 
 #define _mm512_cvt_roundph_ps(A, R) \
-  (__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
-                                           (__v16sf)_mm512_undefined_ps(), \
-                                           (__mmask16)-1, (int)(R))
+  ((__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
+                                            (__v16sf)_mm512_undefined_ps(), \
+                                            (__mmask16)-1, (int)(R)))
 
 #define _mm512_mask_cvt_roundph_ps(W, U, A, R) \
-  (__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
-                                           (__v16sf)(__m512)(W), \
-                                           (__mmask16)(U), (int)(R))
+  ((__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
+                                            (__v16sf)(__m512)(W), \
+                                            (__mmask16)(U), (int)(R)))
 
 #define _mm512_maskz_cvt_roundph_ps(U, A, R) \
-  (__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
-                                           (__v16sf)_mm512_setzero_ps(), \
-                                           (__mmask16)(U), (int)(R))
+  ((__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
+                                            (__v16sf)_mm512_setzero_ps(), \
+                                            (__mmask16)(U), (int)(R)))
 
 
 static  __inline __m512 __DEFAULT_FN_ATTRS512
@@ -3828,19 +3828,19 @@ _mm512_maskz_cvtph_ps (__mmask16 __U, __m256i __A)
 }
 
 #define _mm512_cvtt_roundpd_epi32(A, R) \
-  (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
-                                            (__v8si)_mm256_setzero_si256(), \
-                                            (__mmask8)-1, (int)(R))
+  ((__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
+                                             (__v8si)_mm256_setzero_si256(), \
+                                             (__mmask8)-1, (int)(R)))
 
 #define _mm512_mask_cvtt_roundpd_epi32(W, U, A, R) \
-  (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
-                                            (__v8si)(__m256i)(W), \
-                                            (__mmask8)(U), (int)(R))
+  ((__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
+                                             (__v8si)(__m256i)(W), \
+                                             (__mmask8)(U), (int)(R)))
 
 #define _mm512_maskz_cvtt_roundpd_epi32(U, A, R) \
-  (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
-                                            (__v8si)_mm256_setzero_si256(), \
-                                            (__mmask8)(U), (int)(R))
+  ((__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
+                                             (__v8si)_mm256_setzero_si256(), \
+                                             (__mmask8)(U), (int)(R)))
 
 static __inline __m256i __DEFAULT_FN_ATTRS512
 _mm512_cvttpd_epi32(__m512d __a)
@@ -3870,19 +3870,19 @@ _mm512_maskz_cvttpd_epi32 (__mmask8 __U, __m512d __A)
 }
 
 #define _mm512_cvtt_roundps_epi32(A, R) \
-  (__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
-                                            (__v16si)_mm512_setzero_si512(), \
-                                            (__mmask16)-1, (int)(R))
+  ((__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
+                                             (__v16si)_mm512_setzero_si512(), \
+                                             (__mmask16)-1, (int)(R)))
 
 #define _mm512_mask_cvtt_roundps_epi32(W, U, A, R) \
-  (__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
-                                            (__v16si)(__m512i)(W), \
-                                            (__mmask16)(U), (int)(R))
+  ((__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
+                                             (__v16si)(__m512i)(W), \
+                                             (__mmask16)(U), (int)(R)))
 
 #define _mm512_maskz_cvtt_roundps_epi32(U, A, R) \
-  (__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
-                                            (__v16si)_mm512_setzero_si512(), \
-                                            (__mmask16)(U), (int)(R))
+  ((__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
+                                             (__v16si)_mm512_setzero_si512(), \
+                                             (__mmask16)(U), (int)(R)))
 
 static __inline __m512i __DEFAULT_FN_ATTRS512
 _mm512_cvttps_epi32(__m512 __a)
@@ -3912,19 +3912,19 @@ _mm512_maskz_cvttps_epi32 (__mmask16 __U, __m512 __A)
 }
 
 #define _mm512_cvt_roundps_epi32(A, R) \
-  (__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
-                                           (__v16si)_mm512_setzero_si512(), \
-                                           (__mmask16)-1, (int)(R))
+  ((__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
+                                            (__v16si)_mm512_setzero_si512(), \
+                                            (__mmask16)-1, (int)(R)))
 
 #define _mm512_mask_cvt_roundps_epi32(W, U, A, R) \
-  (__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
-                                           (__v16si)(__m512i)(W), \
-                                           (__mmask16)(U), (int)(R))
+  ((__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
+                                            (__v16si)(__m512i)(W), \
+                                            (__mmask16)(U), (int)(R)))
 
 #define _mm512_maskz_cvt_roundps_epi32(U, A, R) \
-  (__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
-                                           (__v16si)_mm512_setzero_si512(), \
-                                           (__mmask16)(U), (int)(R))
+  ((__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
+                                            (__v16si)_mm512_setzero_si512(), \
+                                            (__mmask16)(U), (int)(R)))
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_cvtps_epi32 (__m512 __A)
@@ -3955,19 +3955,19 @@ _mm512_maskz_cvtps_epi32 (__mmask16 __U, __m512 __A)
 }
 
 #define _mm512_cvt_roundpd_epi32(A, R) \
-  (__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
-                                           (__v8si)_mm256_setzero_si256(), \
-                                           (__mmask8)-1, (int)(R))
+  ((__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
+                                            (__v8si)_mm256_setzero_si256(), \
+                                            (__mmask8)-1, (int)(R)))
 
 #define _mm512_mask_cvt_roundpd_epi32(W, U, A, R) \
-  (__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
-                                           (__v8si)(__m256i)(W), \
-                                           (__mmask8)(U), (int)(R))
+  ((__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
+                                            (__v8si)(__m256i)(W), \
+                                            (__mmask8)(U), (int)(R)))
 
 #define _mm512_maskz_cvt_roundpd_epi32(U, A, R) \
-  (__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
-                                           (__v8si)_mm256_setzero_si256(), \
-                                           (__mmask8)(U), (int)(R))
+  ((__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
+                                            (__v8si)_mm256_setzero_si256(), \
+                                            (__mmask8)(U), (int)(R)))
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS512
 _mm512_cvtpd_epi32 (__m512d __A)
@@ -3999,19 +3999,19 @@ _mm512_maskz_cvtpd_epi32 (__mmask8 __U, __m512d __A)
 }
 
 #define _mm512_cvt_roundps_epu32(A, R) \
-  (__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
-                                            (__v16si)_mm512_setzero_si512(), \
-                                            (__mmask16)-1, (int)(R))
+  ((__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
+                                             (__v16si)_mm512_setzero_si512(), \
+                                             (__mmask16)-1, (int)(R)))
 
 #define _mm512_mask_cvt_roundps_epu32(W, U, A, R) \
-  (__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
-                                            (__v16si)(__m512i)(W), \
-                                            (__mmask16)(U), (int)(R))
+  ((__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
+                                             (__v16si)(__m512i)(W), \
+                                             (__mmask16)(U), (int)(R)))
 
 #define _mm512_maskz_cvt_roundps_epu32(U, A, R) \
-  (__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
-                                            (__v16si)_mm512_setzero_si512(), \
-                                            (__mmask16)(U), (int)(R))
+  ((__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
+                                             (__v16si)_mm512_setzero_si512(), \
+                                             (__mmask16)(U), (int)(R)))
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_cvtps_epu32 ( __m512 __A)
@@ -4043,19 +4043,19 @@ _mm512_maskz_cvtps_epu32 ( __mmask16 __U, __m512 __A)
 }
 
 #define _mm512_cvt_roundpd_epu32(A, R) \
-  (__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
-                                            (__v8si)_mm256_setzero_si256(), \
-                                            (__mmask8)-1, (int)(R))
+  ((__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
+                                             (__v8si)_mm256_setzero_si256(), \
+                                             (__mmask8)-1, (int)(R)))
 
 #define _mm512_mask_cvt_roundpd_epu32(W, U, A, R) \
-  (__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
-                                            (__v8si)(__m256i)(W), \
-                                            (__mmask8)(U), (int)(R))
+  ((__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
+                                             (__v8si)(__m256i)(W), \
+                                             (__mmask8)(U), (int)(R)))
 
 #define _mm512_maskz_cvt_roundpd_epu32(U, A, R) \
-  (__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
-                                            (__v8si)_mm256_setzero_si256(), \
-                                            (__mmask8)(U), (int)(R))
+  ((__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
+                                             (__v8si)_mm256_setzero_si256(), \
+                                             (__mmask8)(U), (int)(R)))
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS512
 _mm512_cvtpd_epu32 (__m512d __A)
@@ -4975,70 +4975,70 @@ _mm512_maskz_rorv_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
 
 
 #define _mm512_cmp_epi32_mask(a, b, p) \
-  (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \
-                                         (__v16si)(__m512i)(b), (int)(p), \
-                                         (__mmask16)-1)
+  ((__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \
+                                          (__v16si)(__m512i)(b), (int)(p), \
+                                          (__mmask16)-1))
 
 #define _mm512_cmp_epu32_mask(a, b, p) \
-  (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \
-                                          (__v16si)(__m512i)(b), (int)(p), \
-                                          (__mmask16)-1)
+  ((__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \
+                                           (__v16si)(__m512i)(b), (int)(p), \
+                                           (__mmask16)-1))
 
 #define _mm512_cmp_epi64_mask(a, b, p) \
-  (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \
-                                        (__v8di)(__m512i)(b), (int)(p), \
-                                        (__mmask8)-1)
+  ((__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \
+                                         (__v8di)(__m512i)(b), (int)(p), \
+                                         (__mmask8)-1))
 
 #define _mm512_cmp_epu64_mask(a, b, p) \
-  (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \
-                                         (__v8di)(__m512i)(b), (int)(p), \
-                                         (__mmask8)-1)
+  ((__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \
+                                          (__v8di)(__m512i)(b), (int)(p), \
+                                          (__mmask8)-1))
 
 #define _mm512_mask_cmp_epi32_mask(m, a, b, p) \
-  (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \
-                                         (__v16si)(__m512i)(b), (int)(p), \
-                                         (__mmask16)(m))
+  ((__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \
+                                          (__v16si)(__m512i)(b), (int)(p), \
+                                          (__mmask16)(m)))
 
 #define _mm512_mask_cmp_epu32_mask(m, a, b, p) \
-  (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \
-                                          (__v16si)(__m512i)(b), (int)(p), \
-                                          (__mmask16)(m))
+  ((__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \
+                                           (__v16si)(__m512i)(b), (int)(p), \
+                                           (__mmask16)(m)))
 
 #define _mm512_mask_cmp_epi64_mask(m, a, b, p) \
-  (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \
-                                        (__v8di)(__m512i)(b), (int)(p), \
-                                        (__mmask8)(m))
+  ((__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \
+                                         (__v8di)(__m512i)(b), (int)(p), \
+                                         (__mmask8)(m)))
 
 #define _mm512_mask_cmp_epu64_mask(m, a, b, p) \
-  (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \
-                                         (__v8di)(__m512i)(b), (int)(p), \
-                                         (__mmask8)(m))
+  ((__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \
+                                          (__v8di)(__m512i)(b), (int)(p), \
+                                          (__mmask8)(m)))
 
 #define _mm512_rol_epi32(a, b) \
-  (__m512i)__builtin_ia32_prold512((__v16si)(__m512i)(a), (int)(b))
+  ((__m512i)__builtin_ia32_prold512((__v16si)(__m512i)(a), (int)(b)))
 
 #define _mm512_mask_rol_epi32(W, U, a, b) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                      (__v16si)_mm512_rol_epi32((a), (b)), \
-                                      (__v16si)(__m512i)(W))
+  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+                                       (__v16si)_mm512_rol_epi32((a), (b)), \
+                                       (__v16si)(__m512i)(W)))
 
 #define _mm512_maskz_rol_epi32(U, a, b) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                      (__v16si)_mm512_rol_epi32((a), (b)), \
-                                      (__v16si)_mm512_setzero_si512())
+  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+                                       (__v16si)_mm512_rol_epi32((a), (b)), \
+                                       (__v16si)_mm512_setzero_si512()))
 
 #define _mm512_rol_epi64(a, b) \
-  (__m512i)__builtin_ia32_prolq512((__v8di)(__m512i)(a), (int)(b))
+  ((__m512i)__builtin_ia32_prolq512((__v8di)(__m512i)(a), (int)(b)))
 
 #define _mm512_mask_rol_epi64(W, U, a, b) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                      (__v8di)_mm512_rol_epi64((a), (b)), \
-                                      (__v8di)(__m512i)(W))
+  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+                                       (__v8di)_mm512_rol_epi64((a), (b)), \
+                                       (__v8di)(__m512i)(W)))
 
 #define _mm512_maskz_rol_epi64(U, a, b) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                      (__v8di)_mm512_rol_epi64((a), (b)), \
-                                      (__v8di)_mm512_setzero_si512())
+  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+                                       (__v8di)_mm512_rol_epi64((a), (b)), \
+                                       (__v8di)_mm512_setzero_si512()))
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_rolv_epi32 (__m512i __A, __m512i __B)
@@ -5085,30 +5085,30 @@ _mm512_maskz_rolv_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
 }
 
 #define _mm512_ror_epi32(A, B) \
-  (__m512i)__builtin_ia32_prord512((__v16si)(__m512i)(A), (int)(B))
+  ((__m512i)__builtin_ia32_prord512((__v16si)(__m512i)(A), (int)(B)))
 
 #define _mm512_mask_ror_epi32(W, U, A, B) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                      (__v16si)_mm512_ror_epi32((A), (B)), \
-                                      (__v16si)(__m512i)(W))
+  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+                                       (__v16si)_mm512_ror_epi32((A), (B)), \
+                                       (__v16si)(__m512i)(W)))
 
 #define _mm512_maskz_ror_epi32(U, A, B) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                      (__v16si)_mm512_ror_epi32((A), (B)), \
-                                      (__v16si)_mm512_setzero_si512())
+  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+                                       (__v16si)_mm512_ror_epi32((A), (B)), \
+                                       (__v16si)_mm512_setzero_si512()))
 
 #define _mm512_ror_epi64(A, B) \
-  (__m512i)__builtin_ia32_prorq512((__v8di)(__m512i)(A), (int)(B))
+  ((__m512i)__builtin_ia32_prorq512((__v8di)(__m512i)(A), (int)(B)))
 
 #define _mm512_mask_ror_epi64(W, U, A, B) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                      (__v8di)_mm512_ror_epi64((A), (B)), \
-                                      (__v8di)(__m512i)(W))
+  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+                                       (__v8di)_mm512_ror_epi64((A), (B)), \
+                                       (__v8di)(__m512i)(W)))
 
 #define _mm512_maskz_ror_epi64(U, A, B) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                      (__v8di)_mm512_ror_epi64((A), (B)), \
-                                      (__v8di)_mm512_setzero_si512())
+  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+                                       (__v8di)_mm512_ror_epi64((A), (B)), \
+                                       (__v8di)_mm512_setzero_si512()))
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_slli_epi32(__m512i __A, unsigned int __B)
@@ -5304,168 +5304,168 @@ _mm512_maskz_movedup_pd (__mmask8 __U, __m512d __A)
 }
 
 #define _mm512_fixupimm_round_pd(A, B, C, imm, R) \
-  (__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
-                                             (__v8df)(__m512d)(B), \
-                                             (__v8di)(__m512i)(C), (int)(imm), \
-                                             (__mmask8)-1, (int)(R))
+  ((__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
+                                              (__v8df)(__m512d)(B), \
+                                              (__v8di)(__m512i)(C), (int)(imm), \
+                                              (__mmask8)-1, (int)(R)))
 
 #define _mm512_mask_fixupimm_round_pd(A, U, B, C, imm, R) \
-  (__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
-                                             (__v8df)(__m512d)(B), \
-                                             (__v8di)(__m512i)(C), (int)(imm), \
-                                             (__mmask8)(U), (int)(R))
+  ((__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
+                                              (__v8df)(__m512d)(B), \
+                                              (__v8di)(__m512i)(C), (int)(imm), \
+                                              (__mmask8)(U), (int)(R)))
 
 #define _mm512_fixupimm_pd(A, B, C, imm) \
-  (__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
-                                             (__v8df)(__m512d)(B), \
-                                             (__v8di)(__m512i)(C), (int)(imm), \
-                                             (__mmask8)-1, \
-                                             _MM_FROUND_CUR_DIRECTION)
+  ((__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
+                                              (__v8df)(__m512d)(B), \
+                                              (__v8di)(__m512i)(C), (int)(imm), \
+                                              (__mmask8)-1, \
+                                              _MM_FROUND_CUR_DIRECTION))
 
 #define _mm512_mask_fixupimm_pd(A, U, B, C, imm) \
-  (__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
-                                             (__v8df)(__m512d)(B), \
-                                             (__v8di)(__m512i)(C), (int)(imm), \
-                                             (__mmask8)(U), \
-                                             _MM_FROUND_CUR_DIRECTION)
+  ((__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
+                                              (__v8df)(__m512d)(B), \
+                                              (__v8di)(__m512i)(C), (int)(imm), \
+                                              (__mmask8)(U), \
+                                              _MM_FROUND_CUR_DIRECTION))
 
 #define _mm512_maskz_fixupimm_round_pd(U, A, B, C, imm, R) \
-  (__m512d)__builtin_ia32_fixupimmpd512_maskz((__v8df)(__m512d)(A), \
-                                              (__v8df)(__m512d)(B), \
-                                              (__v8di)(__m512i)(C), \
-                                              (int)(imm), (__mmask8)(U), \
-                                              (int)(R))
+  ((__m512d)__builtin_ia32_fixupimmpd512_maskz((__v8df)(__m512d)(A), \
+                                               (__v8df)(__m512d)(B), \
+                                               (__v8di)(__m512i)(C), \
+                                               (int)(imm), (__mmask8)(U), \
+                                               (int)(R)))
 
 #define _mm512_maskz_fixupimm_pd(U, A, B, C, imm) \
-  (__m512d)__builtin_ia32_fixupimmpd512_maskz((__v8df)(__m512d)(A), \
-                                              (__v8df)(__m512d)(B), \
-                                              (__v8di)(__m512i)(C), \
-                                              (int)(imm), (__mmask8)(U), \
-                                              _MM_FROUND_CUR_DIRECTION)
+  ((__m512d)__builtin_ia32_fixupimmpd512_maskz((__v8df)(__m512d)(A), \
+                                               (__v8df)(__m512d)(B), \
+                                               (__v8di)(__m512i)(C), \
+                                               (int)(imm), (__mmask8)(U), \
+                                               _MM_FROUND_CUR_DIRECTION))
 
 #define _mm512_fixupimm_round_ps(A, B, C, imm, R) \
-  (__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
-                                            (__v16sf)(__m512)(B), \
-                                            (__v16si)(__m512i)(C), (int)(imm), \
-                                            (__mmask16)-1, (int)(R))
+  ((__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
+                                             (__v16sf)(__m512)(B), \
+                                             (__v16si)(__m512i)(C), (int)(imm), \
+                                             (__mmask16)-1, (int)(R)))
 
 #define _mm512_mask_fixupimm_round_ps(A, U, B, C, imm, R) \
-  (__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
-                                            (__v16sf)(__m512)(B), \
-                                            (__v16si)(__m512i)(C), (int)(imm), \
-                                            (__mmask16)(U), (int)(R))
+  ((__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
+                                             (__v16sf)(__m512)(B), \
+                                             (__v16si)(__m512i)(C), (int)(imm), \
+                                             (__mmask16)(U), (int)(R)))
 
 #define _mm512_fixupimm_ps(A, B, C, imm) \
-  (__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
-                                            (__v16sf)(__m512)(B), \
-                                            (__v16si)(__m512i)(C), (int)(imm), \
-                                            (__mmask16)-1, \
-                                            _MM_FROUND_CUR_DIRECTION)
+  ((__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
+                                             (__v16sf)(__m512)(B), \
+                                             (__v16si)(__m512i)(C), (int)(imm), \
+                                             (__mmask16)-1, \
+                                             _MM_FROUND_CUR_DIRECTION))
 
 #define _mm512_mask_fixupimm_ps(A, U, B, C, imm) \
-  (__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
-                                            (__v16sf)(__m512)(B), \
-                                            (__v16si)(__m512i)(C), (int)(imm), \
-                                            (__mmask16)(U), \
-                                            _MM_FROUND_CUR_DIRECTION)
+  ((__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
+                                             (__v16sf)(__m512)(B), \
+                                             (__v16si)(__m512i)(C), (int)(imm), \
+                                             (__mmask16)(U), \
+                                             _MM_FROUND_CUR_DIRECTION))
 
 #define _mm512_maskz_fixupimm_round_ps(U, A, B, C, imm, R) \
-  (__m512)__builtin_ia32_fixupimmps512_maskz((__v16sf)(__m512)(A), \
-                                             (__v16sf)(__m512)(B), \
-                                             (__v16si)(__m512i)(C), \
-                                             (int)(imm), (__mmask16)(U), \
-                                             (int)(R))
+  ((__m512)__builtin_ia32_fixupimmps512_maskz((__v16sf)(__m512)(A), \
+                                              (__v16sf)(__m512)(B), \
+                                              (__v16si)(__m512i)(C), \
+                                              (int)(imm), (__mmask16)(U), \
+                                              (int)(R)))
 
 #define _mm512_maskz_fixupimm_ps(U, A, B, C, imm) \
-  (__m512)__builtin_ia32_fixupimmps512_maskz((__v16sf)(__m512)(A), \
-                                             (__v16sf)(__m512)(B), \
-                                             (__v16si)(__m512i)(C), \
-                                             (int)(imm), (__mmask16)(U), \
-                                             _MM_FROUND_CUR_DIRECTION)
+  ((__m512)__builtin_ia32_fixupimmps512_maskz((__v16sf)(__m512)(A), \
+                                              (__v16sf)(__m512)(B), \
+                                              (__v16si)(__m512i)(C), \
+                                              (int)(imm), (__mmask16)(U), \
+                                              _MM_FROUND_CUR_DIRECTION))
 
 #define _mm_fixupimm_round_sd(A, B, C, imm, R) \
-  (__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
-                                          (__v2df)(__m128d)(B), \
-                                          (__v2di)(__m128i)(C), (int)(imm), \
-                                          (__mmask8)-1, (int)(R))
+  ((__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
+                                           (__v2df)(__m128d)(B), \
+                                           (__v2di)(__m128i)(C), (int)(imm), \
+                                           (__mmask8)-1, (int)(R)))
 
 #define _mm_mask_fixupimm_round_sd(A, U, B, C, imm, R) \
-  (__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
-                                          (__v2df)(__m128d)(B), \
-                                          (__v2di)(__m128i)(C), (int)(imm), \
-                                          (__mmask8)(U), (int)(R))
+  ((__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
+                                           (__v2df)(__m128d)(B), \
+                                           (__v2di)(__m128i)(C), (int)(imm), \
+                                           (__mmask8)(U), (int)(R)))
 
 #define _mm_fixupimm_sd(A, B, C, imm) \
-  (__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
-                                          (__v2df)(__m128d)(B), \
-                                          (__v2di)(__m128i)(C), (int)(imm), \
-                                          (__mmask8)-1, \
-                                          _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_fixupimm_sd(A, U, B, C, imm) \
-  (__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
-                                          (__v2df)(__m128d)(B), \
-                                          (__v2di)(__m128i)(C), (int)(imm), \
-                                          (__mmask8)(U), \
-                                          _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_maskz_fixupimm_round_sd(U, A, B, C, imm, R) \
-  (__m128d)__builtin_ia32_fixupimmsd_maskz((__v2df)(__m128d)(A), \
+  ((__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
                                            (__v2df)(__m128d)(B), \
                                            (__v2di)(__m128i)(C), (int)(imm), \
-                                           (__mmask8)(U), (int)(R))
+                                           (__mmask8)-1, \
+                                           _MM_FROUND_CUR_DIRECTION))
 
-#define _mm_maskz_fixupimm_sd(U, A, B, C, imm) \
-  (__m128d)__builtin_ia32_fixupimmsd_maskz((__v2df)(__m128d)(A), \
+#define _mm_mask_fixupimm_sd(A, U, B, C, imm) \
+  ((__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
                                            (__v2df)(__m128d)(B), \
                                            (__v2di)(__m128i)(C), (int)(imm), \
                                            (__mmask8)(U), \
-                                           _MM_FROUND_CUR_DIRECTION)
+                                           _MM_FROUND_CUR_DIRECTION))
+
+#define _mm_maskz_fixupimm_round_sd(U, A, B, C, imm, R) \
+  ((__m128d)__builtin_ia32_fixupimmsd_maskz((__v2df)(__m128d)(A), \
+                                            (__v2df)(__m128d)(B), \
+                                            (__v2di)(__m128i)(C), (int)(imm), \
+                                            (__mmask8)(U), (int)(R)))
+
+#define _mm_maskz_fixupimm_sd(U, A, B, C, imm) \
+  ((__m128d)__builtin_ia32_fixupimmsd_maskz((__v2df)(__m128d)(A), \
+                                            (__v2df)(__m128d)(B), \
+                                            (__v2di)(__m128i)(C), (int)(imm), \
+                                            (__mmask8)(U), \
+                                            _MM_FROUND_CUR_DIRECTION))
 
 #define _mm_fixupimm_round_ss(A, B, C, imm, R) \
-  (__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
-                                         (__v4sf)(__m128)(B), \
-                                         (__v4si)(__m128i)(C), (int)(imm), \
-                                         (__mmask8)-1, (int)(R))
+  ((__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
+                                          (__v4sf)(__m128)(B), \
+                                          (__v4si)(__m128i)(C), (int)(imm), \
+                                          (__mmask8)-1, (int)(R)))
 
 #define _mm_mask_fixupimm_round_ss(A, U, B, C, imm, R) \
-  (__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
-                                         (__v4sf)(__m128)(B), \
-                                         (__v4si)(__m128i)(C), (int)(imm), \
-                                         (__mmask8)(U), (int)(R))
+  ((__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
+                                          (__v4sf)(__m128)(B), \
+                                          (__v4si)(__m128i)(C), (int)(imm), \
+                                          (__mmask8)(U), (int)(R)))
 
 #define _mm_fixupimm_ss(A, B, C, imm) \
-  (__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
-                                         (__v4sf)(__m128)(B), \
-                                         (__v4si)(__m128i)(C), (int)(imm), \
-                                         (__mmask8)-1, \
-                                         _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_fixupimm_ss(A, U, B, C, imm) \
-  (__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
-                                         (__v4sf)(__m128)(B), \
-                                         (__v4si)(__m128i)(C), (int)(imm), \
-                                         (__mmask8)(U), \
-                                         _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_maskz_fixupimm_round_ss(U, A, B, C, imm, R) \
-  (__m128)__builtin_ia32_fixupimmss_maskz((__v4sf)(__m128)(A), \
+  ((__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
                                           (__v4sf)(__m128)(B), \
                                           (__v4si)(__m128i)(C), (int)(imm), \
-                                          (__mmask8)(U), (int)(R))
+                                          (__mmask8)-1, \
+                                          _MM_FROUND_CUR_DIRECTION))
 
-#define _mm_maskz_fixupimm_ss(U, A, B, C, imm) \
-  (__m128)__builtin_ia32_fixupimmss_maskz((__v4sf)(__m128)(A), \
+#define _mm_mask_fixupimm_ss(A, U, B, C, imm) \
+  ((__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
                                           (__v4sf)(__m128)(B), \
                                           (__v4si)(__m128i)(C), (int)(imm), \
                                           (__mmask8)(U), \
-                                          _MM_FROUND_CUR_DIRECTION)
+                                          _MM_FROUND_CUR_DIRECTION))
+
+#define _mm_maskz_fixupimm_round_ss(U, A, B, C, imm, R) \
+  ((__m128)__builtin_ia32_fixupimmss_maskz((__v4sf)(__m128)(A), \
+                                           (__v4sf)(__m128)(B), \
+                                           (__v4si)(__m128i)(C), (int)(imm), \
+                                           (__mmask8)(U), (int)(R)))
+
+#define _mm_maskz_fixupimm_ss(U, A, B, C, imm) \
+  ((__m128)__builtin_ia32_fixupimmss_maskz((__v4sf)(__m128)(A), \
+                                           (__v4sf)(__m128)(B), \
+                                           (__v4si)(__m128i)(C), (int)(imm), \
+                                           (__mmask8)(U), \
+                                           _MM_FROUND_CUR_DIRECTION))
 
 #define _mm_getexp_round_sd(A, B, R) \
-  (__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
-                                                 (__v2df)(__m128d)(B), \
-                                                 (__v2df)_mm_setzero_pd(), \
-                                                 (__mmask8)-1, (int)(R))
+  ((__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
+                                                  (__v2df)(__m128d)(B), \
+                                                  (__v2df)_mm_setzero_pd(), \
+                                                  (__mmask8)-1, (int)(R)))
 
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
@@ -5486,10 +5486,10 @@ _mm_mask_getexp_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
 }
 
 #define _mm_mask_getexp_round_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
-                                                 (__v2df)(__m128d)(B), \
-                                                 (__v2df)(__m128d)(W), \
-                                                 (__mmask8)(U), (int)(R))
+  ((__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
+                                                  (__v2df)(__m128d)(B), \
+                                                  (__v2df)(__m128d)(W), \
+                                                  (__mmask8)(U), (int)(R)))
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_maskz_getexp_sd (__mmask8 __U, __m128d __A, __m128d __B)
@@ -5502,16 +5502,16 @@ _mm_maskz_getexp_sd (__mmask8 __U, __m128d __A, __m128d __B)
 }
 
 #define _mm_maskz_getexp_round_sd(U, A, B, R) \
-  (__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
-                                                 (__v2df)(__m128d)(B), \
-                                                 (__v2df)_mm_setzero_pd(), \
-                                                 (__mmask8)(U), (int)(R))
+  ((__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
+                                                  (__v2df)(__m128d)(B), \
+                                                  (__v2df)_mm_setzero_pd(), \
+                                                  (__mmask8)(U), (int)(R)))
 
 #define _mm_getexp_round_ss(A, B, R) \
-  (__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
-                                                (__v4sf)(__m128)(B), \
-                                                (__v4sf)_mm_setzero_ps(), \
-                                                (__mmask8)-1, (int)(R))
+  ((__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
+                                                 (__v4sf)(__m128)(B), \
+                                                 (__v4sf)_mm_setzero_ps(), \
+                                                 (__mmask8)-1, (int)(R)))
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_getexp_ss (__m128 __A, __m128 __B)
@@ -5531,10 +5531,10 @@ _mm_mask_getexp_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
 }
 
 #define _mm_mask_getexp_round_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
-                                                (__v4sf)(__m128)(B), \
-                                                (__v4sf)(__m128)(W), \
-                                                (__mmask8)(U), (int)(R))
+  ((__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
+                                                 (__v4sf)(__m128)(B), \
+                                                 (__v4sf)(__m128)(W), \
+                                                 (__mmask8)(U), (int)(R)))
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_maskz_getexp_ss (__mmask8 __U, __m128 __A, __m128 __B)
@@ -5547,100 +5547,100 @@ _mm_maskz_getexp_ss (__mmask8 __U, __m128 __A, __m128 __B)
 }
 
 #define _mm_maskz_getexp_round_ss(U, A, B, R) \
-  (__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
-                                                (__v4sf)(__m128)(B), \
-                                                (__v4sf)_mm_setzero_ps(), \
-                                                (__mmask8)(U), (int)(R))
+  ((__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
+                                                 (__v4sf)(__m128)(B), \
+                                                 (__v4sf)_mm_setzero_ps(), \
+                                                 (__mmask8)(U), (int)(R)))
 
 #define _mm_getmant_round_sd(A, B, C, D, R) \
-  (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
-                                               (__v2df)(__m128d)(B), \
-                                               (int)(((D)<<2) | (C)), \
-                                               (__v2df)_mm_setzero_pd(), \
-                                               (__mmask8)-1, (int)(R))
+  ((__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
+                                                (__v2df)(__m128d)(B), \
+                                                (int)(((D)<<2) | (C)), \
+                                                (__v2df)_mm_setzero_pd(), \
+                                                (__mmask8)-1, (int)(R)))
 
 #define _mm_getmant_sd(A, B, C, D)  \
-  (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
-                                               (__v2df)(__m128d)(B), \
-                                               (int)(((D)<<2) | (C)), \
-                                               (__v2df)_mm_setzero_pd(), \
-                                               (__mmask8)-1, \
-                                               _MM_FROUND_CUR_DIRECTION)
+  ((__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
+                                                (__v2df)(__m128d)(B), \
+                                                (int)(((D)<<2) | (C)), \
+                                                (__v2df)_mm_setzero_pd(), \
+                                                (__mmask8)-1, \
+                                                _MM_FROUND_CUR_DIRECTION))
 
 #define _mm_mask_getmant_sd(W, U, A, B, C, D) \
-  (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
-                                               (__v2df)(__m128d)(B), \
-                                               (int)(((D)<<2) | (C)), \
-                                               (__v2df)(__m128d)(W), \
-                                               (__mmask8)(U), \
-                                               _MM_FROUND_CUR_DIRECTION)
+  ((__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
+                                                (__v2df)(__m128d)(B), \
+                                                (int)(((D)<<2) | (C)), \
+                                                (__v2df)(__m128d)(W), \
+                                                (__mmask8)(U), \
+                                                _MM_FROUND_CUR_DIRECTION))
 
 #define _mm_mask_getmant_round_sd(W, U, A, B, C, D, R) \
-  (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
-                                               (__v2df)(__m128d)(B), \
-                                               (int)(((D)<<2) | (C)), \
-                                               (__v2df)(__m128d)(W), \
-                                               (__mmask8)(U), (int)(R))
+  ((__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
+                                                (__v2df)(__m128d)(B), \
+                                                (int)(((D)<<2) | (C)), \
+                                                (__v2df)(__m128d)(W), \
+                                                (__mmask8)(U), (int)(R)))
 
 #define _mm_maskz_getmant_sd(U, A, B, C, D) \
-  (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
-                                               (__v2df)(__m128d)(B), \
-                                               (int)(((D)<<2) | (C)), \
-                                               (__v2df)_mm_setzero_pd(), \
-                                               (__mmask8)(U), \
-                                               _MM_FROUND_CUR_DIRECTION)
+  ((__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
+                                                (__v2df)(__m128d)(B), \
+                                                (int)(((D)<<2) | (C)), \
+                                                (__v2df)_mm_setzero_pd(), \
+                                                (__mmask8)(U), \
+                                                _MM_FROUND_CUR_DIRECTION))
 
 #define _mm_maskz_getmant_round_sd(U, A, B, C, D, R) \
-  (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
-                                               (__v2df)(__m128d)(B), \
-                                               (int)(((D)<<2) | (C)), \
-                                               (__v2df)_mm_setzero_pd(), \
-                                               (__mmask8)(U), (int)(R))
+  ((__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
+                                                (__v2df)(__m128d)(B), \
+                                                (int)(((D)<<2) | (C)), \
+                                                (__v2df)_mm_setzero_pd(), \
+                                                (__mmask8)(U), (int)(R)))
 
-#define _mm_getmant_round_ss(A, B, C, D, R) \
-  (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
-                                              (__v4sf)(__m128)(B), \
-                                              (int)(((D)<<2) | (C)), \
-                                              (__v4sf)_mm_setzero_ps(), \
-                                              (__mmask8)-1, (int)(R))
+#define _mm_getmant_round_ss(A, B, C, D, R) \
+  ((__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
+                                               (__v4sf)(__m128)(B), \
+                                               (int)(((D)<<2) | (C)), \
+                                               (__v4sf)_mm_setzero_ps(), \
+                                               (__mmask8)-1, (int)(R)))
 
 #define _mm_getmant_ss(A, B, C, D) \
-  (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
-                                              (__v4sf)(__m128)(B), \
-                                              (int)(((D)<<2) | (C)), \
-                                              (__v4sf)_mm_setzero_ps(), \
-                                              (__mmask8)-1, \
-                                              _MM_FROUND_CUR_DIRECTION)
+  ((__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
+                                               (__v4sf)(__m128)(B), \
+                                               (int)(((D)<<2) | (C)), \
+                                               (__v4sf)_mm_setzero_ps(), \
+                                               (__mmask8)-1, \
+                                               _MM_FROUND_CUR_DIRECTION))
 
 #define _mm_mask_getmant_ss(W, U, A, B, C, D) \
-  (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
-                                              (__v4sf)(__m128)(B), \
-                                              (int)(((D)<<2) | (C)), \
-                                              (__v4sf)(__m128)(W), \
-                                              (__mmask8)(U), \
-                                              _MM_FROUND_CUR_DIRECTION)
+  ((__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
+                                               (__v4sf)(__m128)(B), \
+                                               (int)(((D)<<2) | (C)), \
+                                               (__v4sf)(__m128)(W), \
+                                               (__mmask8)(U), \
+                                               _MM_FROUND_CUR_DIRECTION))
 
 #define _mm_mask_getmant_round_ss(W, U, A, B, C, D, R) \
-  (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
-                                              (__v4sf)(__m128)(B), \
-                                              (int)(((D)<<2) | (C)), \
-                                              (__v4sf)(__m128)(W), \
-                                              (__mmask8)(U), (int)(R))
+  ((__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
+                                               (__v4sf)(__m128)(B), \
+                                               (int)(((D)<<2) | (C)), \
+                                               (__v4sf)(__m128)(W), \
+                                               (__mmask8)(U), (int)(R)))
 
 #define _mm_maskz_getmant_ss(U, A, B, C, D) \
-  (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
-                                              (__v4sf)(__m128)(B), \
-                                              (int)(((D)<<2) | (C)), \
-                                              (__v4sf)_mm_setzero_ps(), \
-                                              (__mmask8)(U), \
-                                              _MM_FROUND_CUR_DIRECTION)
+  ((__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
+                                               (__v4sf)(__m128)(B), \
+                                               (int)(((D)<<2) | (C)), \
+                                               (__v4sf)_mm_setzero_ps(), \
+                                               (__mmask8)(U), \
+                                               _MM_FROUND_CUR_DIRECTION))
 
 #define _mm_maskz_getmant_round_ss(U, A, B, C, D, R) \
-  (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
-                                              (__v4sf)(__m128)(B), \
-                                              (int)(((D)<<2) | (C)), \
-                                              (__v4sf)_mm_setzero_ps(), \
-                                              (__mmask8)(U), (int)(R))
+  ((__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
+                                               (__v4sf)(__m128)(B), \
+                                               (int)(((D)<<2) | (C)), \
+                                               (__v4sf)_mm_setzero_ps(), \
+                                               (__mmask8)(U), (int)(R)))
 
 static __inline__ __mmask16 __DEFAULT_FN_ATTRS
 _mm512_kmov (__mmask16 __A)
@@ -5649,16 +5649,16 @@ _mm512_kmov (__mmask16 __A)
 }
 
 #define _mm_comi_round_sd(A, B, P, R) \
-  (int)__builtin_ia32_vcomisd((__v2df)(__m128d)(A), (__v2df)(__m128d)(B), \
-                              (int)(P), (int)(R))
+  ((int)__builtin_ia32_vcomisd((__v2df)(__m128d)(A), (__v2df)(__m128d)(B), \
+                               (int)(P), (int)(R)))
 
 #define _mm_comi_round_ss(A, B, P, R) \
-  (int)__builtin_ia32_vcomiss((__v4sf)(__m128)(A), (__v4sf)(__m128)(B), \
-                              (int)(P), (int)(R))
+  ((int)__builtin_ia32_vcomiss((__v4sf)(__m128)(A), (__v4sf)(__m128)(B), \
+                               (int)(P), (int)(R)))
 
 #ifdef __x86_64__
 #define _mm_cvt_roundsd_si64(A, R) \
-  (long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R))
+  ((long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R)))
 #endif
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -5926,54 +5926,54 @@ _mm512_maskz_srlv_epi64(__mmask8 __U, __m512i __X, __m512i __Y)
 }
 
 #define _mm512_ternarylogic_epi32(A, B, C, imm) \
-  (__m512i)__builtin_ia32_pternlogd512_mask((__v16si)(__m512i)(A), \
-                                            (__v16si)(__m512i)(B), \
-                                            (__v16si)(__m512i)(C), (int)(imm), \
-                                            (__mmask16)-1)
+  ((__m512i)__builtin_ia32_pternlogd512_mask((__v16si)(__m512i)(A), \
+                                             (__v16si)(__m512i)(B), \
+                                             (__v16si)(__m512i)(C), (int)(imm), \
+                                             (__mmask16)-1))
 
 #define _mm512_mask_ternarylogic_epi32(A, U, B, C, imm) \
-  (__m512i)__builtin_ia32_pternlogd512_mask((__v16si)(__m512i)(A), \
-                                            (__v16si)(__m512i)(B), \
-                                            (__v16si)(__m512i)(C), (int)(imm), \
-                                            (__mmask16)(U))
+  ((__m512i)__builtin_ia32_pternlogd512_mask((__v16si)(__m512i)(A), \
+                                             (__v16si)(__m512i)(B), \
+                                             (__v16si)(__m512i)(C), (int)(imm), \
+                                             (__mmask16)(U)))
 
 #define _mm512_maskz_ternarylogic_epi32(U, A, B, C, imm) \
-  (__m512i)__builtin_ia32_pternlogd512_maskz((__v16si)(__m512i)(A), \
-                                             (__v16si)(__m512i)(B), \
-                                             (__v16si)(__m512i)(C), \
-                                             (int)(imm), (__mmask16)(U))
+  ((__m512i)__builtin_ia32_pternlogd512_maskz((__v16si)(__m512i)(A), \
+                                              (__v16si)(__m512i)(B), \
+                                              (__v16si)(__m512i)(C), \
+                                              (int)(imm), (__mmask16)(U)))
 
 #define _mm512_ternarylogic_epi64(A, B, C, imm) \
-  (__m512i)__builtin_ia32_pternlogq512_mask((__v8di)(__m512i)(A), \
-                                            (__v8di)(__m512i)(B), \
-                                            (__v8di)(__m512i)(C), (int)(imm), \
-                                            (__mmask8)-1)
+  ((__m512i)__builtin_ia32_pternlogq512_mask((__v8di)(__m512i)(A), \
+                                             (__v8di)(__m512i)(B), \
+                                             (__v8di)(__m512i)(C), (int)(imm), \
+                                             (__mmask8)-1))
 
 #define _mm512_mask_ternarylogic_epi64(A, U, B, C, imm) \
-  (__m512i)__builtin_ia32_pternlogq512_mask((__v8di)(__m512i)(A), \
-                                            (__v8di)(__m512i)(B), \
-                                            (__v8di)(__m512i)(C), (int)(imm), \
-                                            (__mmask8)(U))
-
-#define _mm512_maskz_ternarylogic_epi64(U, A, B, C, imm) \
-  (__m512i)__builtin_ia32_pternlogq512_maskz((__v8di)(__m512i)(A), \
+  ((__m512i)__builtin_ia32_pternlogq512_mask((__v8di)(__m512i)(A), \
                                              (__v8di)(__m512i)(B), \
                                              (__v8di)(__m512i)(C), (int)(imm), \
-                                             (__mmask8)(U))
+                                             (__mmask8)(U)))
+
+#define _mm512_maskz_ternarylogic_epi64(U, A, B, C, imm) \
+  ((__m512i)__builtin_ia32_pternlogq512_maskz((__v8di)(__m512i)(A), \
+                                              (__v8di)(__m512i)(B), \
+                                              (__v8di)(__m512i)(C), (int)(imm), \
+                                              (__mmask8)(U)))
 
 #ifdef __x86_64__
 #define _mm_cvt_roundsd_i64(A, R) \
-  (long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R))
+  ((long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R)))
 #endif
 
 #define _mm_cvt_roundsd_si32(A, R) \
-  (int)__builtin_ia32_vcvtsd2si32((__v2df)(__m128d)(A), (int)(R))
+  ((int)__builtin_ia32_vcvtsd2si32((__v2df)(__m128d)(A), (int)(R)))
 
 #define _mm_cvt_roundsd_i32(A, R) \
-  (int)__builtin_ia32_vcvtsd2si32((__v2df)(__m128d)(A), (int)(R))
+  ((int)__builtin_ia32_vcvtsd2si32((__v2df)(__m128d)(A), (int)(R)))
 
 #define _mm_cvt_roundsd_u32(A, R) \
-  (unsigned int)__builtin_ia32_vcvtsd2usi32((__v2df)(__m128d)(A), (int)(R))
+  ((unsigned int)__builtin_ia32_vcvtsd2usi32((__v2df)(__m128d)(A), (int)(R)))
 
 static __inline__ unsigned __DEFAULT_FN_ATTRS128
 _mm_cvtsd_u32 (__m128d __A)
@@ -5984,8 +5984,8 @@ _mm_cvtsd_u32 (__m128d __A)
 
 #ifdef __x86_64__
 #define _mm_cvt_roundsd_u64(A, R) \
-  (unsigned long long)__builtin_ia32_vcvtsd2usi64((__v2df)(__m128d)(A), \
-                                                  (int)(R))
+  ((unsigned long long)__builtin_ia32_vcvtsd2usi64((__v2df)(__m128d)(A), \
+                                                   (int)(R)))
 
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS128
 _mm_cvtsd_u64 (__m128d __A)
@@ -5997,21 +5997,21 @@ _mm_cvtsd_u64 (__m128d __A)
 #endif
 
 #define _mm_cvt_roundss_si32(A, R) \
-  (int)__builtin_ia32_vcvtss2si32((__v4sf)(__m128)(A), (int)(R))
+  ((int)__builtin_ia32_vcvtss2si32((__v4sf)(__m128)(A), (int)(R)))
 
 #define _mm_cvt_roundss_i32(A, R) \
-  (int)__builtin_ia32_vcvtss2si32((__v4sf)(__m128)(A), (int)(R))
+  ((int)__builtin_ia32_vcvtss2si32((__v4sf)(__m128)(A), (int)(R)))
 
 #ifdef __x86_64__
 #define _mm_cvt_roundss_si64(A, R) \
-  (long long)__builtin_ia32_vcvtss2si64((__v4sf)(__m128)(A), (int)(R))
+  ((long long)__builtin_ia32_vcvtss2si64((__v4sf)(__m128)(A), (int)(R)))
 
 #define _mm_cvt_roundss_i64(A, R) \
-  (long long)__builtin_ia32_vcvtss2si64((__v4sf)(__m128)(A), (int)(R))
+  ((long long)__builtin_ia32_vcvtss2si64((__v4sf)(__m128)(A), (int)(R)))
 #endif
 
 #define _mm_cvt_roundss_u32(A, R) \
-  (unsigned int)__builtin_ia32_vcvtss2usi32((__v4sf)(__m128)(A), (int)(R))
+  ((unsigned int)__builtin_ia32_vcvtss2usi32((__v4sf)(__m128)(A), (int)(R)))
 
 static __inline__ unsigned __DEFAULT_FN_ATTRS128
 _mm_cvtss_u32 (__m128 __A)
@@ -6022,8 +6022,8 @@ _mm_cvtss_u32 (__m128 __A)
 
 #ifdef __x86_64__
 #define _mm_cvt_roundss_u64(A, R) \
-  (unsigned long long)__builtin_ia32_vcvtss2usi64((__v4sf)(__m128)(A), \
-                                                  (int)(R))
+  ((unsigned long long)__builtin_ia32_vcvtss2usi64((__v4sf)(__m128)(A), \
+                                                   (int)(R)))
 
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS128
 _mm_cvtss_u64 (__m128 __A)
@@ -6035,10 +6035,10 @@ _mm_cvtss_u64 (__m128 __A)
 #endif
 
 #define _mm_cvtt_roundsd_i32(A, R) \
-  (int)__builtin_ia32_vcvttsd2si32((__v2df)(__m128d)(A), (int)(R))
+  ((int)__builtin_ia32_vcvttsd2si32((__v2df)(__m128d)(A), (int)(R)))
 
 #define _mm_cvtt_roundsd_si32(A, R) \
-  (int)__builtin_ia32_vcvttsd2si32((__v2df)(__m128d)(A), (int)(R))
+  ((int)__builtin_ia32_vcvttsd2si32((__v2df)(__m128d)(A), (int)(R)))
 
 static __inline__ int __DEFAULT_FN_ATTRS128
 _mm_cvttsd_i32 (__m128d __A)
@@ -6049,10 +6049,10 @@ _mm_cvttsd_i32 (__m128d __A)
 
 #ifdef __x86_64__
 #define _mm_cvtt_roundsd_si64(A, R) \
-  (long long)__builtin_ia32_vcvttsd2si64((__v2df)(__m128d)(A), (int)(R))
+  ((long long)__builtin_ia32_vcvttsd2si64((__v2df)(__m128d)(A), (int)(R)))
 
 #define _mm_cvtt_roundsd_i64(A, R) \
-  (long long)__builtin_ia32_vcvttsd2si64((__v2df)(__m128d)(A), (int)(R))
+  ((long long)__builtin_ia32_vcvttsd2si64((__v2df)(__m128d)(A), (int)(R)))
 
 static __inline__ long long __DEFAULT_FN_ATTRS128
 _mm_cvttsd_i64 (__m128d __A)
@@ -6063,7 +6063,7 @@ _mm_cvttsd_i64 (__m128d __A)
 #endif
 
 #define _mm_cvtt_roundsd_u32(A, R) \
-  (unsigned int)__builtin_ia32_vcvttsd2usi32((__v2df)(__m128d)(A), (int)(R))
+  ((unsigned int)__builtin_ia32_vcvttsd2usi32((__v2df)(__m128d)(A), (int)(R)))
 
 static __inline__ unsigned __DEFAULT_FN_ATTRS128
 _mm_cvttsd_u32 (__m128d __A)
@@ -6074,8 +6074,8 @@ _mm_cvttsd_u32 (__m128d __A)
 
 #ifdef __x86_64__
 #define _mm_cvtt_roundsd_u64(A, R) \
-  (unsigned long long)__builtin_ia32_vcvttsd2usi64((__v2df)(__m128d)(A), \
-                                                   (int)(R))
+  ((unsigned long long)__builtin_ia32_vcvttsd2usi64((__v2df)(__m128d)(A), \
+                                                    (int)(R)))
 
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS128
 _mm_cvttsd_u64 (__m128d __A)
@@ -6087,10 +6087,10 @@ _mm_cvttsd_u64 (__m128d __A)
 #endif
 
 #define _mm_cvtt_roundss_i32(A, R) \
-  (int)__builtin_ia32_vcvttss2si32((__v4sf)(__m128)(A), (int)(R))
+  ((int)__builtin_ia32_vcvttss2si32((__v4sf)(__m128)(A), (int)(R)))
 
 #define _mm_cvtt_roundss_si32(A, R) \
-  (int)__builtin_ia32_vcvttss2si32((__v4sf)(__m128)(A), (int)(R))
+  ((int)__builtin_ia32_vcvttss2si32((__v4sf)(__m128)(A), (int)(R)))
 
 static __inline__ int __DEFAULT_FN_ATTRS128
 _mm_cvttss_i32 (__m128 __A)
@@ -6101,10 +6101,10 @@ _mm_cvttss_i32 (__m128 __A)
 
 #ifdef __x86_64__
 #define _mm_cvtt_roundss_i64(A, R) \
-  (long long)__builtin_ia32_vcvttss2si64((__v4sf)(__m128)(A), (int)(R))
+  ((long long)__builtin_ia32_vcvttss2si64((__v4sf)(__m128)(A), (int)(R)))
 
 #define _mm_cvtt_roundss_si64(A, R) \
-  (long long)__builtin_ia32_vcvttss2si64((__v4sf)(__m128)(A), (int)(R))
+  ((long long)__builtin_ia32_vcvttss2si64((__v4sf)(__m128)(A), (int)(R)))
 
 static __inline__ long long __DEFAULT_FN_ATTRS128
 _mm_cvttss_i64 (__m128 __A)
@@ -6115,7 +6115,7 @@ _mm_cvttss_i64 (__m128 __A)
 #endif
 
 #define _mm_cvtt_roundss_u32(A, R) \
-  (unsigned int)__builtin_ia32_vcvttss2usi32((__v4sf)(__m128)(A), (int)(R))
+  ((unsigned int)__builtin_ia32_vcvttss2usi32((__v4sf)(__m128)(A), (int)(R)))
 
 static __inline__ unsigned __DEFAULT_FN_ATTRS128
 _mm_cvttss_u32 (__m128 __A)
@@ -6126,8 +6126,8 @@ _mm_cvttss_u32 (__m128 __A)
 
 #ifdef __x86_64__
 #define _mm_cvtt_roundss_u64(A, R) \
-  (unsigned long long)__builtin_ia32_vcvttss2usi64((__v4sf)(__m128)(A), \
-                                                   (int)(R))
+  ((unsigned long long)__builtin_ia32_vcvttss2usi64((__v4sf)(__m128)(A), \
+                                                    (int)(R)))
 
 static __inline__ unsigned long long __DEFAULT_FN_ATTRS128
 _mm_cvttss_u64 (__m128 __A)
@@ -6139,30 +6139,30 @@ _mm_cvttss_u64 (__m128 __A)
 #endif
 
 #define _mm512_permute_pd(X, C) \
-  (__m512d)__builtin_ia32_vpermilpd512((__v8df)(__m512d)(X), (int)(C))
+  ((__m512d)__builtin_ia32_vpermilpd512((__v8df)(__m512d)(X), (int)(C)))
 
 #define _mm512_mask_permute_pd(W, U, X, C) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                       (__v8df)_mm512_permute_pd((X), (C)), \
-                                       (__v8df)(__m512d)(W))
+  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+                                        (__v8df)_mm512_permute_pd((X), (C)), \
+                                        (__v8df)(__m512d)(W)))
 
 #define _mm512_maskz_permute_pd(U, X, C) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                       (__v8df)_mm512_permute_pd((X), (C)), \
-                                       (__v8df)_mm512_setzero_pd())
+  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+                                        (__v8df)_mm512_permute_pd((X), (C)), \
+                                        (__v8df)_mm512_setzero_pd()))
 
 #define _mm512_permute_ps(X, C) \
-  (__m512)__builtin_ia32_vpermilps512((__v16sf)(__m512)(X), (int)(C))
+  ((__m512)__builtin_ia32_vpermilps512((__v16sf)(__m512)(X), (int)(C)))
 
 #define _mm512_mask_permute_ps(W, U, X, C) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                      (__v16sf)_mm512_permute_ps((X), (C)), \
-                                      (__v16sf)(__m512)(W))
+  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+                                       (__v16sf)_mm512_permute_ps((X), (C)), \
+                                       (__v16sf)(__m512)(W)))
 
 #define _mm512_maskz_permute_ps(U, X, C) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                      (__v16sf)_mm512_permute_ps((X), (C)), \
-                                      (__v16sf)_mm512_setzero_ps())
+  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+                                       (__v16sf)_mm512_permute_ps((X), (C)), \
+                                       (__v16sf)_mm512_setzero_ps()))
 
 static __inline__ __m512d __DEFAULT_FN_ATTRS512
 _mm512_permutevar_pd(__m512d __A, __m512i __C)
@@ -6274,19 +6274,19 @@ _mm512_maskz_permutex2var_ps(__mmask16 __U, __m512 __A, __m512i __I, __m512 __B)
 
 
 #define _mm512_cvtt_roundpd_epu32(A, R) \
-  (__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
-                                             (__v8si)_mm256_undefined_si256(), \
-                                             (__mmask8)-1, (int)(R))
+  ((__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
+                                              (__v8si)_mm256_undefined_si256(), \
+                                              (__mmask8)-1, (int)(R)))
 
 #define _mm512_mask_cvtt_roundpd_epu32(W, U, A, R) \
-  (__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
-                                             (__v8si)(__m256i)(W), \
-                                             (__mmask8)(U), (int)(R))
+  ((__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
+                                              (__v8si)(__m256i)(W), \
+                                              (__mmask8)(U), (int)(R)))
 
 #define _mm512_maskz_cvtt_roundpd_epu32(U, A, R) \
-  (__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
-                                             (__v8si)_mm256_setzero_si256(), \
-                                             (__mmask8)(U), (int)(R))
+  ((__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
+                                              (__v8si)_mm256_setzero_si256(), \
+                                              (__mmask8)(U), (int)(R)))
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS512
 _mm512_cvttpd_epu32 (__m512d __A)
@@ -6318,106 +6318,106 @@ _mm512_maskz_cvttpd_epu32 (__mmask8 __U, __m512d __A)
 }
 
 #define _mm_roundscale_round_sd(A, B, imm, R) \
-  (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
-                                                (__v2df)(__m128d)(B), \
-                                                (__v2df)_mm_setzero_pd(), \
-                                                (__mmask8)-1, (int)(imm), \
-                                                (int)(R))
+  ((__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
+                                                 (__v2df)(__m128d)(B), \
+                                                 (__v2df)_mm_setzero_pd(), \
+                                                 (__mmask8)-1, (int)(imm), \
+                                                 (int)(R)))
 
 #define _mm_roundscale_sd(A, B, imm) \
-  (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
-                                                (__v2df)(__m128d)(B), \
-                                                (__v2df)_mm_setzero_pd(), \
-                                                (__mmask8)-1, (int)(imm), \
-                                                _MM_FROUND_CUR_DIRECTION)
+  ((__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
+                                                 (__v2df)(__m128d)(B), \
+                                                 (__v2df)_mm_setzero_pd(), \
+                                                 (__mmask8)-1, (int)(imm), \
+                                                 _MM_FROUND_CUR_DIRECTION))
 
 #define _mm_mask_roundscale_sd(W, U, A, B, imm) \
-  (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
-                                                (__v2df)(__m128d)(B), \
-                                                (__v2df)(__m128d)(W), \
-                                                (__mmask8)(U), (int)(imm), \
-                                                _MM_FROUND_CUR_DIRECTION)
+  ((__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
+                                                 (__v2df)(__m128d)(B), \
+                                                 (__v2df)(__m128d)(W), \
+                                                 (__mmask8)(U), (int)(imm), \
+                                                 _MM_FROUND_CUR_DIRECTION))
 
 #define _mm_mask_roundscale_round_sd(W, U, A, B, I, R) \
-  (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
-                                                (__v2df)(__m128d)(B), \
-                                                (__v2df)(__m128d)(W), \
-                                                (__mmask8)(U), (int)(I), \
-                                                (int)(R))
+  ((__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
+                                                 (__v2df)(__m128d)(B), \
+                                                 (__v2df)(__m128d)(W), \
+                                                 (__mmask8)(U), (int)(I), \
+                                                 (int)(R)))
 
 #define _mm_maskz_roundscale_sd(U, A, B, I) \
-  (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
-                                                (__v2df)(__m128d)(B), \
-                                                (__v2df)_mm_setzero_pd(), \
-                                                (__mmask8)(U), (int)(I), \
-                                                _MM_FROUND_CUR_DIRECTION)
+  ((__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
+                                                 (__v2df)(__m128d)(B), \
+                                                 (__v2df)_mm_setzero_pd(), \
+                                                 (__mmask8)(U), (int)(I), \
+                                                 _MM_FROUND_CUR_DIRECTION))
 
 #define _mm_maskz_roundscale_round_sd(U, A, B, I, R) \
-  (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
-                                                (__v2df)(__m128d)(B), \
-                                                (__v2df)_mm_setzero_pd(), \
-                                                (__mmask8)(U), (int)(I), \
-                                                (int)(R))
+  ((__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
+                                                 (__v2df)(__m128d)(B), \
+                                                 (__v2df)_mm_setzero_pd(), \
+                                                 (__mmask8)(U), (int)(I), \
+                                                 (int)(R)))
 
 #define _mm_roundscale_round_ss(A, B, imm, R) \
-  (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
-                                               (__v4sf)(__m128)(B), \
-                                               (__v4sf)_mm_setzero_ps(), \
-                                               (__mmask8)-1, (int)(imm), \
-                                               (int)(R))
+  ((__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
+                                                (__v4sf)(__m128)(B), \
+                                                (__v4sf)_mm_setzero_ps(), \
+                                                (__mmask8)-1, (int)(imm), \
+                                                (int)(R)))
 
 #define _mm_roundscale_ss(A, B, imm) \
-  (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
-                                               (__v4sf)(__m128)(B), \
-                                               (__v4sf)_mm_setzero_ps(), \
-                                               (__mmask8)-1, (int)(imm), \
-                                               _MM_FROUND_CUR_DIRECTION)
+  ((__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
+                                                (__v4sf)(__m128)(B), \
+                                                (__v4sf)_mm_setzero_ps(), \
+                                                (__mmask8)-1, (int)(imm), \
+                                                _MM_FROUND_CUR_DIRECTION))
 
 #define _mm_mask_roundscale_ss(W, U, A, B, I) \
-  (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
-                                               (__v4sf)(__m128)(B), \
-                                               (__v4sf)(__m128)(W), \
-                                               (__mmask8)(U), (int)(I), \
-                                               _MM_FROUND_CUR_DIRECTION)
+  ((__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
+                                                (__v4sf)(__m128)(B), \
+                                                (__v4sf)(__m128)(W), \
+                                                (__mmask8)(U), (int)(I), \
+                                                _MM_FROUND_CUR_DIRECTION))
 
 #define _mm_mask_roundscale_round_ss(W, U, A, B, I, R) \
-  (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
-                                               (__v4sf)(__m128)(B), \
-                                               (__v4sf)(__m128)(W), \
-                                               (__mmask8)(U), (int)(I), \
-                                               (int)(R))
+  ((__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
+                                                (__v4sf)(__m128)(B), \
+                                                (__v4sf)(__m128)(W), \
+                                                (__mmask8)(U), (int)(I), \
+                                                (int)(R)))
 
 #define _mm_maskz_roundscale_ss(U, A, B, I) \
-  (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
-                                               (__v4sf)(__m128)(B), \
-                                               (__v4sf)_mm_setzero_ps(), \
-                                               (__mmask8)(U), (int)(I), \
-                                               _MM_FROUND_CUR_DIRECTION)
+  ((__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
+                                                (__v4sf)(__m128)(B), \
+                                                (__v4sf)_mm_setzero_ps(), \
+                                                (__mmask8)(U), (int)(I), \
+                                                _MM_FROUND_CUR_DIRECTION))
 
 #define _mm_maskz_roundscale_round_ss(U, A, B, I, R) \
-  (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
-                                               (__v4sf)(__m128)(B), \
-                                               (__v4sf)_mm_setzero_ps(), \
-                                               (__mmask8)(U), (int)(I), \
-                                               (int)(R))
+  ((__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
+                                                (__v4sf)(__m128)(B), \
+                                                (__v4sf)_mm_setzero_ps(), \
+                                                (__mmask8)(U), (int)(I), \
+                                                (int)(R)))
 
 #define _mm512_scalef_round_pd(A, B, R) \
-  (__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
-                                           (__v8df)(__m512d)(B), \
-                                           (__v8df)_mm512_undefined_pd(), \
-                                           (__mmask8)-1, (int)(R))
+  ((__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
+                                            (__v8df)(__m512d)(B), \
+                                            (__v8df)_mm512_undefined_pd(), \
+                                            (__mmask8)-1, (int)(R)))
 
 #define _mm512_mask_scalef_round_pd(W, U, A, B, R) \
-  (__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
-                                           (__v8df)(__m512d)(B), \
-                                           (__v8df)(__m512d)(W), \
-                                           (__mmask8)(U), (int)(R))
+  ((__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
+                                            (__v8df)(__m512d)(B), \
+                                            (__v8df)(__m512d)(W), \
+                                            (__mmask8)(U), (int)(R)))
 
 #define _mm512_maskz_scalef_round_pd(U, A, B, R) \
-  (__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
-                                           (__v8df)(__m512d)(B), \
-                                           (__v8df)_mm512_setzero_pd(), \
-                                           (__mmask8)(U), (int)(R))
+  ((__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
+                                            (__v8df)(__m512d)(B), \
+                                            (__v8df)_mm512_setzero_pd(), \
+                                            (__mmask8)(U), (int)(R)))
 
 static __inline__ __m512d __DEFAULT_FN_ATTRS512
 _mm512_scalef_pd (__m512d __A, __m512d __B)
@@ -6452,22 +6452,22 @@ _mm512_maskz_scalef_pd (__mmask8 __U, __m512d __A, __m512d __B)
 }
 
 #define _mm512_scalef_round_ps(A, B, R) \
-  (__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
-                                          (__v16sf)(__m512)(B), \
-                                          (__v16sf)_mm512_undefined_ps(), \
-                                          (__mmask16)-1, (int)(R))
+  ((__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
+                                           (__v16sf)(__m512)(B), \
+                                           (__v16sf)_mm512_undefined_ps(), \
+                                           (__mmask16)-1, (int)(R)))
 
 #define _mm512_mask_scalef_round_ps(W, U, A, B, R) \
-  (__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
-                                          (__v16sf)(__m512)(B), \
-                                          (__v16sf)(__m512)(W), \
-                                          (__mmask16)(U), (int)(R))
+  ((__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
+                                           (__v16sf)(__m512)(B), \
+                                           (__v16sf)(__m512)(W), \
+                                           (__mmask16)(U), (int)(R)))
 
 #define _mm512_maskz_scalef_round_ps(U, A, B, R) \
-  (__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
-                                          (__v16sf)(__m512)(B), \
-                                          (__v16sf)_mm512_setzero_ps(), \
-                                          (__mmask16)(U), (int)(R))
+  ((__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
+                                           (__v16sf)(__m512)(B), \
+                                           (__v16sf)_mm512_setzero_ps(), \
+                                           (__mmask16)(U), (int)(R)))
 
 static __inline__ __m512 __DEFAULT_FN_ATTRS512
 _mm512_scalef_ps (__m512 __A, __m512 __B)
@@ -6502,10 +6502,10 @@ _mm512_maskz_scalef_ps (__mmask16 __U, __m512 __A, __m512 __B)
 }
 
 #define _mm_scalef_round_sd(A, B, R) \
-  (__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
-                                              (__v2df)(__m128d)(B), \
-                                              (__v2df)_mm_setzero_pd(), \
-                                              (__mmask8)-1, (int)(R))
+  ((__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
+                                               (__v2df)(__m128d)(B), \
+                                               (__v2df)_mm_setzero_pd(), \
+                                               (__mmask8)-1, (int)(R)))
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_scalef_sd (__m128d __A, __m128d __B)
@@ -6527,10 +6527,10 @@ _mm_mask_scalef_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
 }
 
 #define _mm_mask_scalef_round_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
-                                              (__v2df)(__m128d)(B), \
-                                              (__v2df)(__m128d)(W), \
-                                              (__mmask8)(U), (int)(R))
+  ((__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
+                                               (__v2df)(__m128d)(B), \
+                                               (__v2df)(__m128d)(W), \
+                                               (__mmask8)(U), (int)(R)))
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_maskz_scalef_sd (__mmask8 __U, __m128d __A, __m128d __B)
@@ -6543,16 +6543,16 @@ _mm_maskz_scalef_sd (__mmask8 __U, __m128d __A, __m128d __B)
 }
 
 #define _mm_maskz_scalef_round_sd(U, A, B, R) \
-  (__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
-                                              (__v2df)(__m128d)(B), \
-                                              (__v2df)_mm_setzero_pd(), \
-                                              (__mmask8)(U), (int)(R))
+  ((__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
+                                               (__v2df)(__m128d)(B), \
+                                               (__v2df)_mm_setzero_pd(), \
+                                               (__mmask8)(U), (int)(R)))
 
 #define _mm_scalef_round_ss(A, B, R) \
-  (__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
-                                             (__v4sf)(__m128)(B), \
-                                             (__v4sf)_mm_setzero_ps(), \
-                                             (__mmask8)-1, (int)(R))
+  ((__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
+                                              (__v4sf)(__m128)(B), \
+                                              (__v4sf)_mm_setzero_ps(), \
+                                              (__mmask8)-1, (int)(R)))
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_scalef_ss (__m128 __A, __m128 __B)
@@ -6574,10 +6574,10 @@ _mm_mask_scalef_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
 }
 
 #define _mm_mask_scalef_round_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
-                                             (__v4sf)(__m128)(B), \
-                                             (__v4sf)(__m128)(W), \
-                                             (__mmask8)(U), (int)(R))
+  ((__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
+                                              (__v4sf)(__m128)(B), \
+                                              (__v4sf)(__m128)(W), \
+                                              (__mmask8)(U), (int)(R)))
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_maskz_scalef_ss (__mmask8 __U, __m128 __A, __m128 __B)
@@ -6590,11 +6590,11 @@ _mm_maskz_scalef_ss (__mmask8 __U, __m128 __A, __m128 __B)
 }
 
 #define _mm_maskz_scalef_round_ss(U, A, B, R) \
-  (__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
-                                             (__v4sf)(__m128)(B), \
-                                             (__v4sf)_mm_setzero_ps(), \
-                                             (__mmask8)(U), \
-                                             (int)(R))
+  ((__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
+                                              (__v4sf)(__m128)(B), \
+                                              (__v4sf)_mm_setzero_ps(), \
+                                              (__mmask8)(U), \
+                                              (int)(R)))
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
 _mm512_srai_epi32(__m512i __A, unsigned int __B)
@@ -6642,94 +6642,94 @@ _mm512_maskz_srai_epi64(__mmask8 __U, __m512i __A, unsigned int __B)
 }
 
 #define _mm512_shuffle_f32x4(A, B, imm) \
-  (__m512)__builtin_ia32_shuf_f32x4((__v16sf)(__m512)(A), \
-                                    (__v16sf)(__m512)(B), (int)(imm))
+  ((__m512)__builtin_ia32_shuf_f32x4((__v16sf)(__m512)(A), \
+                                     (__v16sf)(__m512)(B), (int)(imm)))
 
 #define _mm512_mask_shuffle_f32x4(W, U, A, B, imm) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                      (__v16sf)_mm512_shuffle_f32x4((A), (B), (imm)), \
-                                      (__v16sf)(__m512)(W))
+  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+                                       (__v16sf)_mm512_shuffle_f32x4((A), (B), (imm)), \
+                                       (__v16sf)(__m512)(W)))
 
 #define _mm512_maskz_shuffle_f32x4(U, A, B, imm) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                      (__v16sf)_mm512_shuffle_f32x4((A), (B), (imm)), \
-                                      (__v16sf)_mm512_setzero_ps())
+  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+                                       (__v16sf)_mm512_shuffle_f32x4((A), (B), (imm)), \
+                                       (__v16sf)_mm512_setzero_ps()))
 
 #define _mm512_shuffle_f64x2(A, B, imm) \
-  (__m512d)__builtin_ia32_shuf_f64x2((__v8df)(__m512d)(A), \
-                                     (__v8df)(__m512d)(B), (int)(imm))
+  ((__m512d)__builtin_ia32_shuf_f64x2((__v8df)(__m512d)(A), \
+                                      (__v8df)(__m512d)(B), (int)(imm)))
 
 #define _mm512_mask_shuffle_f64x2(W, U, A, B, imm) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                       (__v8df)_mm512_shuffle_f64x2((A), (B), (imm)), \
-                                       (__v8df)(__m512d)(W))
+  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+                                        (__v8df)_mm512_shuffle_f64x2((A), (B), (imm)), \
+                                        (__v8df)(__m512d)(W)))
 
 #define _mm512_maskz_shuffle_f64x2(U, A, B, imm) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                       (__v8df)_mm512_shuffle_f64x2((A), (B), (imm)), \
-                                       (__v8df)_mm512_setzero_pd())
+  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+                                        (__v8df)_mm512_shuffle_f64x2((A), (B), (imm)), \
+                                        (__v8df)_mm512_setzero_pd()))
 
 #define _mm512_shuffle_i32x4(A, B, imm) \
-  (__m512i)__builtin_ia32_shuf_i32x4((__v16si)(__m512i)(A), \
-                                     (__v16si)(__m512i)(B), (int)(imm))
+  ((__m512i)__builtin_ia32_shuf_i32x4((__v16si)(__m512i)(A), \
+                                      (__v16si)(__m512i)(B), (int)(imm)))
 
 #define _mm512_mask_shuffle_i32x4(W, U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                      (__v16si)_mm512_shuffle_i32x4((A), (B), (imm)), \
-                                      (__v16si)(__m512i)(W))
+  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+                                       (__v16si)_mm512_shuffle_i32x4((A), (B), (imm)), \
+                                       (__v16si)(__m512i)(W)))
 
 #define _mm512_maskz_shuffle_i32x4(U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                      (__v16si)_mm512_shuffle_i32x4((A), (B), (imm)), \
-                                      (__v16si)_mm512_setzero_si512())
+  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+                                       (__v16si)_mm512_shuffle_i32x4((A), (B), (imm)), \
+                                       (__v16si)_mm512_setzero_si512()))
 
 #define _mm512_shuffle_i64x2(A, B, imm) \
-  (__m512i)__builtin_ia32_shuf_i64x2((__v8di)(__m512i)(A), \
-                                     (__v8di)(__m512i)(B), (int)(imm))
+  ((__m512i)__builtin_ia32_shuf_i64x2((__v8di)(__m512i)(A), \
+                                      (__v8di)(__m512i)(B), (int)(imm)))
 
 #define _mm512_mask_shuffle_i64x2(W, U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                      (__v8di)_mm512_shuffle_i64x2((A), (B), (imm)), \
-                                      (__v8di)(__m512i)(W))
+  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+                                       (__v8di)_mm512_shuffle_i64x2((A), (B), (imm)), \
+                                       (__v8di)(__m512i)(W)))
 
 #define _mm512_maskz_shuffle_i64x2(U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                      (__v8di)_mm512_shuffle_i64x2((A), (B), (imm)), \
-                                      (__v8di)_mm512_setzero_si512())
+  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+                                       (__v8di)_mm512_shuffle_i64x2((A), (B), (imm)), \
+                                       (__v8di)_mm512_setzero_si512()))
 
 #define _mm512_shuffle_pd(A, B, M) \
-  (__m512d)__builtin_ia32_shufpd512((__v8df)(__m512d)(A), \
-                                    (__v8df)(__m512d)(B), (int)(M))
+  ((__m512d)__builtin_ia32_shufpd512((__v8df)(__m512d)(A), \
+                                     (__v8df)(__m512d)(B), (int)(M)))
 
 #define _mm512_mask_shuffle_pd(W, U, A, B, M) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                       (__v8df)_mm512_shuffle_pd((A), (B), (M)), \
-                                       (__v8df)(__m512d)(W))
+  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+                                        (__v8df)_mm512_shuffle_pd((A), (B), (M)), \
+                                        (__v8df)(__m512d)(W)))
 
 #define _mm512_maskz_shuffle_pd(U, A, B, M) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                       (__v8df)_mm512_shuffle_pd((A), (B), (M)), \
-                                       (__v8df)_mm512_setzero_pd())
+  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+                                        (__v8df)_mm512_shuffle_pd((A), (B), (M)), \
+                                        (__v8df)_mm512_setzero_pd()))
 
 #define _mm512_shuffle_ps(A, B, M) \
-  (__m512)__builtin_ia32_shufps512((__v16sf)(__m512)(A), \
-                                   (__v16sf)(__m512)(B), (int)(M))
+  ((__m512)__builtin_ia32_shufps512((__v16sf)(__m512)(A), \
+                                    (__v16sf)(__m512)(B), (int)(M)))
 
 #define _mm512_mask_shuffle_ps(W, U, A, B, M) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                      (__v16sf)_mm512_shuffle_ps((A), (B), (M)), \
-                                      (__v16sf)(__m512)(W))
+  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+                                       (__v16sf)_mm512_shuffle_ps((A), (B), (M)), \
+                                       (__v16sf)(__m512)(W)))
 
 #define _mm512_maskz_shuffle_ps(U, A, B, M) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                      (__v16sf)_mm512_shuffle_ps((A), (B), (M)), \
-                                      (__v16sf)_mm512_setzero_ps())
+  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+                                       (__v16sf)_mm512_shuffle_ps((A), (B), (M)), \
+                                       (__v16sf)_mm512_setzero_ps()))
 
 #define _mm_sqrt_round_sd(A, B, R) \
-  (__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)_mm_setzero_pd(), \
-                                            (__mmask8)-1, (int)(R))
+  ((__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
+                                             (__v2df)(__m128d)(B), \
+                                             (__v2df)_mm_setzero_pd(), \
+                                             (__mmask8)-1, (int)(R)))
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_mask_sqrt_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
@@ -6742,10 +6742,10 @@ _mm_mask_sqrt_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
 }
 
 #define _mm_mask_sqrt_round_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)(__m128d)(W), \
-                                            (__mmask8)(U), (int)(R))
+  ((__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
+                                             (__v2df)(__m128d)(B), \
+                                             (__v2df)(__m128d)(W), \
+                                             (__mmask8)(U), (int)(R)))
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_maskz_sqrt_sd (__mmask8 __U, __m128d __A, __m128d __B)
@@ -6758,16 +6758,16 @@ _mm_maskz_sqrt_sd (__mmask8 __U, __m128d __A, __m128d __B)
 }
 
 #define _mm_maskz_sqrt_round_sd(U, A, B, R) \
-  (__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)_mm_setzero_pd(), \
-                                            (__mmask8)(U), (int)(R))
+  ((__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
+                                             (__v2df)(__m128d)(B), \
+                                             (__v2df)_mm_setzero_pd(), \
+                                             (__mmask8)(U), (int)(R)))
 
 #define _mm_sqrt_round_ss(A, B, R) \
-  (__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)_mm_setzero_ps(), \
-                                           (__mmask8)-1, (int)(R))
+  ((__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
+                                            (__v4sf)(__m128)(B), \
+                                            (__v4sf)_mm_setzero_ps(), \
+                                            (__mmask8)-1, (int)(R)))
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_mask_sqrt_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
@@ -6780,10 +6780,10 @@ _mm_mask_sqrt_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
 }
 
 #define _mm_mask_sqrt_round_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)(__m128)(W), (__mmask8)(U), \
-                                           (int)(R))
+  ((__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
+                                            (__v4sf)(__m128)(B), \
+                                            (__v4sf)(__m128)(W), (__mmask8)(U), \
+                                            (int)(R)))
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_maskz_sqrt_ss (__mmask8 __U, __m128 __A, __m128 __B)
@@ -6796,10 +6796,10 @@ _mm_maskz_sqrt_ss (__mmask8 __U, __m128 __A, __m128 __B)
 }
 
 #define _mm_maskz_sqrt_round_ss(U, A, B, R) \
-  (__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)_mm_setzero_ps(), \
-                                           (__mmask8)(U), (int)(R))
+  ((__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
+                                            (__v4sf)(__m128)(B), \
+                                            (__v4sf)_mm_setzero_ps(), \
+                                            (__mmask8)(U), (int)(R)))
 
 static __inline__ __m512 __DEFAULT_FN_ATTRS512
 _mm512_broadcast_f32x4(__m128 __A)
@@ -7366,183 +7366,183 @@ _mm512_mask_cvtepi64_storeu_epi16 (void *__P, __mmask8 __M, __m512i __A)
 }
 
 #define _mm512_extracti32x4_epi32(A, imm) \
-  (__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
-                                            (__v4si)_mm_undefined_si128(), \
-                                            (__mmask8)-1)
+  ((__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
+                                             (__v4si)_mm_undefined_si128(), \
+                                             (__mmask8)-1))
 
 #define _mm512_mask_extracti32x4_epi32(W, U, A, imm) \
-  (__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
-                                            (__v4si)(__m128i)(W), \
-                                            (__mmask8)(U))
+  ((__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
+                                             (__v4si)(__m128i)(W), \
+                                             (__mmask8)(U)))
 
 #define _mm512_maskz_extracti32x4_epi32(U, A, imm) \
-  (__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
-                                            (__v4si)_mm_setzero_si128(), \
-                                            (__mmask8)(U))
+  ((__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
+                                             (__v4si)_mm_setzero_si128(), \
+                                             (__mmask8)(U)))
 
 #define _mm512_extracti64x4_epi64(A, imm) \
-  (__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
-                                            (__v4di)_mm256_undefined_si256(), \
-                                            (__mmask8)-1)
+  ((__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
+                                             (__v4di)_mm256_undefined_si256(), \
+                                             (__mmask8)-1))
 
 #define _mm512_mask_extracti64x4_epi64(W, U, A, imm) \
-  (__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
-                                            (__v4di)(__m256i)(W), \
-                                            (__mmask8)(U))
+  ((__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
+                                             (__v4di)(__m256i)(W), \
+                                             (__mmask8)(U)))
 
 #define _mm512_maskz_extracti64x4_epi64(U, A, imm) \
-  (__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
-                                            (__v4di)_mm256_setzero_si256(), \
-                                            (__mmask8)(U))
+  ((__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
+                                             (__v4di)_mm256_setzero_si256(), \
+                                             (__mmask8)(U)))
 
 #define _mm512_insertf64x4(A, B, imm) \
-  (__m512d)__builtin_ia32_insertf64x4((__v8df)(__m512d)(A), \
-                                      (__v4df)(__m256d)(B), (int)(imm))
+  ((__m512d)__builtin_ia32_insertf64x4((__v8df)(__m512d)(A), \
+                                       (__v4df)(__m256d)(B), (int)(imm)))
 
 #define _mm512_mask_insertf64x4(W, U, A, B, imm) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                  (__v8df)_mm512_insertf64x4((A), (B), (imm)), \
-                                  (__v8df)(__m512d)(W))
+  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+                                   (__v8df)_mm512_insertf64x4((A), (B), (imm)), \
+                                   (__v8df)(__m512d)(W)))
 
 #define _mm512_maskz_insertf64x4(U, A, B, imm) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                  (__v8df)_mm512_insertf64x4((A), (B), (imm)), \
-                                  (__v8df)_mm512_setzero_pd())
+  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+                                   (__v8df)_mm512_insertf64x4((A), (B), (imm)), \
+                                   (__v8df)_mm512_setzero_pd()))
 
 #define _mm512_inserti64x4(A, B, imm) \
-  (__m512i)__builtin_ia32_inserti64x4((__v8di)(__m512i)(A), \
-                                      (__v4di)(__m256i)(B), (int)(imm))
+  ((__m512i)__builtin_ia32_inserti64x4((__v8di)(__m512i)(A), \
+                                       (__v4di)(__m256i)(B), (int)(imm)))
 
 #define _mm512_mask_inserti64x4(W, U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                  (__v8di)_mm512_inserti64x4((A), (B), (imm)), \
-                                  (__v8di)(__m512i)(W))
+  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+                                   (__v8di)_mm512_inserti64x4((A), (B), (imm)), \
+                                   (__v8di)(__m512i)(W)))
 
 #define _mm512_maskz_inserti64x4(U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                  (__v8di)_mm512_inserti64x4((A), (B), (imm)), \
-                                  (__v8di)_mm512_setzero_si512())
+  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+                                   (__v8di)_mm512_inserti64x4((A), (B), (imm)), \
+                                   (__v8di)_mm512_setzero_si512()))
 
 #define _mm512_insertf32x4(A, B, imm) \
-  (__m512)__builtin_ia32_insertf32x4((__v16sf)(__m512)(A), \
-                                     (__v4sf)(__m128)(B), (int)(imm))
+  ((__m512)__builtin_ia32_insertf32x4((__v16sf)(__m512)(A), \
+                                      (__v4sf)(__m128)(B), (int)(imm)))
 
 #define _mm512_mask_insertf32x4(W, U, A, B, imm) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                 (__v16sf)_mm512_insertf32x4((A), (B), (imm)), \
-                                 (__v16sf)(__m512)(W))
+  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+                                  (__v16sf)_mm512_insertf32x4((A), (B), (imm)), \
+                                  (__v16sf)(__m512)(W)))
 
 #define _mm512_maskz_insertf32x4(U, A, B, imm) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                 (__v16sf)_mm512_insertf32x4((A), (B), (imm)), \
-                                 (__v16sf)_mm512_setzero_ps())
+  ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+                                  (__v16sf)_mm512_insertf32x4((A), (B), (imm)), \
+                                  (__v16sf)_mm512_setzero_ps()))
 
 #define _mm512_inserti32x4(A, B, imm) \
-  (__m512i)__builtin_ia32_inserti32x4((__v16si)(__m512i)(A), \
-                                      (__v4si)(__m128i)(B), (int)(imm))
+  ((__m512i)__builtin_ia32_inserti32x4((__v16si)(__m512i)(A), \
+                                       (__v4si)(__m128i)(B), (int)(imm)))
 
 #define _mm512_mask_inserti32x4(W, U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                 (__v16si)_mm512_inserti32x4((A), (B), (imm)), \
-                                 (__v16si)(__m512i)(W))
+  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+                                  (__v16si)_mm512_inserti32x4((A), (B), (imm)), \
+                                  (__v16si)(__m512i)(W)))
 
 #define _mm512_maskz_inserti32x4(U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                 (__v16si)_mm512_inserti32x4((A), (B), (imm)), \
-                                 (__v16si)_mm512_setzero_si512())
+  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+                                  (__v16si)_mm512_inserti32x4((A), (B), (imm)), \
+                                  (__v16si)_mm512_setzero_si512()))
 
 #define _mm512_getmant_round_pd(A, B, C, R) \
-  (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v8df)_mm512_undefined_pd(), \
-                                            (__mmask8)-1, (int)(R))
+  ((__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
+                                             (int)(((C)<<2) | (B)), \
+                                             (__v8df)_mm512_undefined_pd(), \
+                                             (__mmask8)-1, (int)(R)))
 
 #define _mm512_mask_getmant_round_pd(W, U, A, B, C, R) \
-  (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v8df)(__m512d)(W), \
-                                            (__mmask8)(U), (int)(R))
+  ((__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
+                                             (int)(((C)<<2) | (B)), \
+                                             (__v8df)(__m512d)(W), \
+                                             (__mmask8)(U), (int)(R)))
 
 #define _mm512_maskz_getmant_round_pd(U, A, B, C, R) \
-  (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v8df)_mm512_setzero_pd(), \
-                                            (__mmask8)(U), (int)(R))
+  ((__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
+                                             (int)(((C)<<2) | (B)), \
+                                             (__v8df)_mm512_setzero_pd(), \
+                                             (__mmask8)(U), (int)(R)))
 
 #define _mm512_getmant_pd(A, B, C) \
-  (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v8df)_mm512_setzero_pd(), \
-                                            (__mmask8)-1, \
-                                            _MM_FROUND_CUR_DIRECTION)
+  ((__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
+                                             (int)(((C)<<2) | (B)), \
+                                             (__v8df)_mm512_setzero_pd(), \
+                                             (__mmask8)-1, \
+                                             _MM_FROUND_CUR_DIRECTION))
 
 #define _mm512_mask_getmant_pd(W, U, A, B, C) \
-  (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v8df)(__m512d)(W), \
-                                            (__mmask8)(U), \
-                                            _MM_FROUND_CUR_DIRECTION)
+  ((__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
+                                             (int)(((C)<<2) | (B)), \
+                                             (__v8df)(__m512d)(W), \
+                                             (__mmask8)(U), \
+                                             _MM_FROUND_CUR_DIRECTION))
 
 #define _mm512_maskz_getmant_pd(U, A, B, C) \
-  (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v8df)_mm512_setzero_pd(), \
-                                            (__mmask8)(U), \
-                                            _MM_FROUND_CUR_DIRECTION)
+  ((__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
+                                             (int)(((C)<<2) | (B)), \
+                                             (__v8df)_mm512_setzero_pd(), \
+                                             (__mmask8)(U), \
+                                             _MM_FROUND_CUR_DIRECTION))
 
 #define _mm512_getmant_round_ps(A, B, C, R) \
-  (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
-                                           (int)(((C)<<2) | (B)), \
-                                           (__v16sf)_mm512_undefined_ps(), \
-                                           (__mmask16)-1, (int)(R))
+  ((__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
+                                            (int)(((C)<<2) | (B)), \
+                                            (__v16sf)_mm512_undefined_ps(), \
+                                            (__mmask16)-1, (int)(R)))
 
 #define _mm512_mask_getmant_round_ps(W, U, A, B, C, R) \
-  (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
-                                           (int)(((C)<<2) | (B)), \
-                                           (__v16sf)(__m512)(W), \
-                                           (__mmask16)(U), (int)(R))
+  ((__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
+                                            (int)(((C)<<2) | (B)), \
+                                            (__v16sf)(__m512)(W), \
+                                            (__mmask16)(U), (int)(R)))
 
 #define _mm512_maskz_getmant_round_ps(U, A, B, C, R) \
-  (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
-                                           (int)(((C)<<2) | (B)), \
-                                           (__v16sf)_mm512_setzero_ps(), \
-                                           (__mmask16)(U), (int)(R))
+  ((__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
+                                            (int)(((C)<<2) | (B)), \
+                                            (__v16sf)_mm512_setzero_ps(), \
+                                            (__mmask16)(U), (int)(R)))
 
 #define _mm512_getmant_ps(A, B, C) \
-  (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
-                                           (int)(((C)<<2)|(B)), \
-                                           (__v16sf)_mm512_undefined_ps(), \
-                                           (__mmask16)-1, \
-                                           _MM_FROUND_CUR_DIRECTION)
+  ((__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
+                                            (int)(((C)<<2)|(B)), \
+                                            (__v16sf)_mm512_undefined_ps(), \
+                                            (__mmask16)-1, \
+                                            _MM_FROUND_CUR_DIRECTION))
 
 #define _mm512_mask_getmant_ps(W, U, A, B, C) \
-  (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
-                                           (int)(((C)<<2)|(B)), \
-                                           (__v16sf)(__m512)(W), \
-                                           (__mmask16)(U), \
-                                           _MM_FROUND_CUR_DIRECTION)
+  ((__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
+                                            (int)(((C)<<2)|(B)), \
+                                            (__v16sf)(__m512)(W), \
+                                            (__mmask16)(U), \
+                                            _MM_FROUND_CUR_DIRECTION))
 
 #define _mm512_maskz_getmant_ps(U, A, B, C) \
-  (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
-                                           (int)(((C)<<2)|(B)), \
-                                           (__v16sf)_mm512_setzero_ps(), \
-                                           (__mmask16)(U), \
-                                           _MM_FROUND_CUR_DIRECTION)
+  ((__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
+                                            (int)(((C)<<2)|(B)), \
+                                            (__v16sf)_mm512_setzero_ps(), \
+                                            (__mmask16)(U), \
+                                            _MM_FROUND_CUR_DIRECTION))
 
 #define _mm512_getexp_round_pd(A, R) \
-  (__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
-                                           (__v8df)_mm512_undefined_pd(), \
-                                           (__mmask8)-1, (int)(R))
+  ((__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
+                                            (__v8df)_mm512_undefined_pd(), \
+                                            (__mmask8)-1, (int)(R)))
 
 #define _mm512_mask_getexp_round_pd(W, U, A, R) \
-  (__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
-                                           (__v8df)(__m512d)(W), \
-                                           (__mmask8)(U), (int)(R))
+  ((__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
+                                            (__v8df)(__m512d)(W), \
+                                            (__mmask8)(U), (int)(R)))
 
 #define _mm512_maskz_getexp_round_pd(U, A, R) \
-  (__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
-                                           (__v8df)_mm512_setzero_pd(), \
-                                           (__mmask8)(U), (int)(R))
+  ((__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
+                                            (__v8df)_mm512_setzero_pd(), \
+                                            (__mmask8)(U), (int)(R)))
 
 static __inline__ __m512d __DEFAULT_FN_ATTRS512
 _mm512_getexp_pd (__m512d __A)
@@ -7572,19 +7572,19 @@ _mm512_maskz_getexp_pd (__mmask8 __U, __m512d __A)
 }
 
 #define _mm512_getexp_round_ps(A, R) \
-  (__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
-                                          (__v16sf)_mm512_undefined_ps(), \
-                                          (__mmask16)-1, (int)(R))
+  ((__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
+                                           (__v16sf)_mm512_undefined_ps(), \
+                                           (__mmask16)-1, (int)(R)))
 
 #define _mm512_mask_getexp_round_ps(W, U, A, R) \
-  (__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
-                                          (__v16sf)(__m512)(W), \
-                                          (__mmask16)(U), (int)(R))
+  ((__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
+                                           (__v16sf)(__m512)(W), \
+                                           (__mmask16)(U), (int)(R)))
 
 #define _mm512_maskz_getexp_round_ps(U, A, R) \
-  (__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
-                                          (__v16sf)_mm512_setzero_ps(), \
-                                          (__mmask16)(U), (int)(R))
+  ((__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
+                                           (__v16sf)_mm512_setzero_ps(), \
+                                           (__mmask16)(U), (int)(R)))
 
 static __inline__ __m512 __DEFAULT_FN_ATTRS512
 _mm512_getexp_ps (__m512 __A)
@@ -7614,100 +7614,100 @@ _mm512_maskz_getexp_ps (__mmask16 __U, __m512 __A)
 }
 
 #define _mm512_i64gather_ps(index, addr, scale) \
-  (__m256)__builtin_ia32_gatherdiv16sf((__v8sf)_mm256_undefined_ps(), \
-                                       (void const *)(addr), \
-                                       (__v8di)(__m512i)(index), (__mmask8)-1, \
-                                       (int)(scale))
+  ((__m256)__builtin_ia32_gatherdiv16sf((__v8sf)_mm256_undefined_ps(), \
+                                        (void const *)(addr), \
+                                        (__v8di)(__m512i)(index), (__mmask8)-1, \
+                                        (int)(scale)))
 
 #define _mm512_mask_i64gather_ps(v1_old, mask, index, addr, scale) \
-  (__m256)__builtin_ia32_gatherdiv16sf((__v8sf)(__m256)(v1_old),\
-                                       (void const *)(addr), \
-                                       (__v8di)(__m512i)(index), \
-                                       (__mmask8)(mask), (int)(scale))
-
-#define _mm512_i64gather_epi32(index, addr, scale) \
-  (__m256i)__builtin_ia32_gatherdiv16si((__v8si)_mm256_undefined_si256(), \
+  ((__m256)__builtin_ia32_gatherdiv16sf((__v8sf)(__m256)(v1_old),\
                                         (void const *)(addr), \
                                         (__v8di)(__m512i)(index), \
-                                        (__mmask8)-1, (int)(scale))
+                                        (__mmask8)(mask), (int)(scale)))
+
+#define _mm512_i64gather_epi32(index, addr, scale) \
+  ((__m256i)__builtin_ia32_gatherdiv16si((__v8si)_mm256_undefined_si256(), \
+                                         (void const *)(addr), \
+                                         (__v8di)(__m512i)(index), \
+                                         (__mmask8)-1, (int)(scale)))
 
 #define _mm512_mask_i64gather_epi32(v1_old, mask, index, addr, scale) \
-  (__m256i)__builtin_ia32_gatherdiv16si((__v8si)(__m256i)(v1_old), \
-                                        (void const *)(addr), \
-                                        (__v8di)(__m512i)(index), \
-                                        (__mmask8)(mask), (int)(scale))
+  ((__m256i)__builtin_ia32_gatherdiv16si((__v8si)(__m256i)(v1_old), \
+                                         (void const *)(addr), \
+                                         (__v8di)(__m512i)(index), \
+                                         (__mmask8)(mask), (int)(scale)))
 
 #define _mm512_i64gather_pd(index, addr, scale) \
-  (__m512d)__builtin_ia32_gatherdiv8df((__v8df)_mm512_undefined_pd(), \
-                                       (void const *)(addr), \
-                                       (__v8di)(__m512i)(index), (__mmask8)-1, \
-                                       (int)(scale))
+  ((__m512d)__builtin_ia32_gatherdiv8df((__v8df)_mm512_undefined_pd(), \
+                                        (void const *)(addr), \
+                                        (__v8di)(__m512i)(index), (__mmask8)-1, \
+                                        (int)(scale)))
 
 #define _mm512_mask_i64gather_pd(v1_old, mask, index, addr, scale) \
-  (__m512d)__builtin_ia32_gatherdiv8df((__v8df)(__m512d)(v1_old), \
-                                       (void const *)(addr), \
-                                       (__v8di)(__m512i)(index), \
-                                       (__mmask8)(mask), (int)(scale))
+  ((__m512d)__builtin_ia32_gatherdiv8df((__v8df)(__m512d)(v1_old), \
+                                        (void const *)(addr), \
+                                        (__v8di)(__m512i)(index), \
+                                        (__mmask8)(mask), (int)(scale)))
 
 #define _mm512_i64gather_epi64(index, addr, scale) \
-  (__m512i)__builtin_ia32_gatherdiv8di((__v8di)_mm512_undefined_epi32(), \
-                                       (void const *)(addr), \
-                                       (__v8di)(__m512i)(index), (__mmask8)-1, \
-                                       (int)(scale))
+  ((__m512i)__builtin_ia32_gatherdiv8di((__v8di)_mm512_undefined_epi32(), \
+                                        (void const *)(addr), \
+                                        (__v8di)(__m512i)(index), (__mmask8)-1, \
+                                        (int)(scale)))
 
 #define _mm512_mask_i64gather_epi64(v1_old, mask, index, addr, scale) \
-  (__m512i)__builtin_ia32_gatherdiv8di((__v8di)(__m512i)(v1_old), \
-                                       (void const *)(addr), \
-                                       (__v8di)(__m512i)(index), \
-                                       (__mmask8)(mask), (int)(scale))
+  ((__m512i)__builtin_ia32_gatherdiv8di((__v8di)(__m512i)(v1_old), \
+                                        (void const *)(addr), \
+                                        (__v8di)(__m512i)(index), \
+                                        (__mmask8)(mask), (int)(scale)))
 
 #define _mm512_i32gather_ps(index, addr, scale) \
-  (__m512)__builtin_ia32_gathersiv16sf((__v16sf)_mm512_undefined_ps(), \
-                                       (void const *)(addr), \
-                                       (__v16si)(__m512)(index), \
-                                       (__mmask16)-1, (int)(scale))
+  ((__m512)__builtin_ia32_gathersiv16sf((__v16sf)_mm512_undefined_ps(), \
+                                        (void const *)(addr), \
+                                        (__v16si)(__m512)(index), \
+                                        (__mmask16)-1, (int)(scale)))
 
 #define _mm512_mask_i32gather_ps(v1_old, mask, index, addr, scale) \
-  (__m512)__builtin_ia32_gathersiv16sf((__v16sf)(__m512)(v1_old), \
-                                       (void const *)(addr), \
-                                       (__v16si)(__m512)(index), \
-                                       (__mmask16)(mask), (int)(scale))
+  ((__m512)__builtin_ia32_gathersiv16sf((__v16sf)(__m512)(v1_old), \
+                                        (void const *)(addr), \
+                                        (__v16si)(__m512)(index), \
+                                        (__mmask16)(mask), (int)(scale)))
 
 #define _mm512_i32gather_epi32(index, addr, scale) \
-  (__m512i)__builtin_ia32_gathersiv16si((__v16si)_mm512_undefined_epi32(), \
-                                        (void const *)(addr), \
-                                        (__v16si)(__m512i)(index), \
-                                        (__mmask16)-1, (int)(scale))
+  ((__m512i)__builtin_ia32_gathersiv16si((__v16si)_mm512_undefined_epi32(), \
+                                         (void const *)(addr), \
+                                         (__v16si)(__m512i)(index), \
+                                         (__mmask16)-1, (int)(scale)))
 
 #define _mm512_mask_i32gather_epi32(v1_old, mask, index, addr, scale) \
-  (__m512i)__builtin_ia32_gathersiv16si((__v16si)(__m512i)(v1_old), \
-                                        (void const *)(addr), \
-                                        (__v16si)(__m512i)(index), \
-                                        (__mmask16)(mask), (int)(scale))
+  ((__m512i)__builtin_ia32_gathersiv16si((__v16si)(__m512i)(v1_old), \
+                                         (void const *)(addr), \
+                                         (__v16si)(__m512i)(index), \
+                                         (__mmask16)(mask), (int)(scale)))
 
 #define _mm512_i32gather_pd(index, addr, scale) \
-  (__m512d)__builtin_ia32_gathersiv8df((__v8df)_mm512_undefined_pd(), \
-                                       (void const *)(addr), \
-                                       (__v8si)(__m256i)(index), (__mmask8)-1, \
-                                       (int)(scale))
+  ((__m512d)__builtin_ia32_gathersiv8df((__v8df)_mm512_undefined_pd(), \
+                                        (void const *)(addr), \
+                                        (__v8si)(__m256i)(index), (__mmask8)-1, \
+                                        (int)(scale)))
 
 #define _mm512_mask_i32gather_pd(v1_old, mask, index, addr, scale) \
-  (__m512d)__builtin_ia32_gathersiv8df((__v8df)(__m512d)(v1_old), \
-                                       (void const *)(addr), \
-                                       (__v8si)(__m256i)(index), \
-                                       (__mmask8)(mask), (int)(scale))
+  ((__m512d)__builtin_ia32_gathersiv8df((__v8df)(__m512d)(v1_old), \
+                                        (void const *)(addr), \
+                                        (__v8si)(__m256i)(index), \
+                                        (__mmask8)(mask), (int)(scale)))
 
 #define _mm512_i32gather_epi64(index, addr, scale) \
-  (__m512i)__builtin_ia32_gathersiv8di((__v8di)_mm512_undefined_epi32(), \
-                                       (void const *)(addr), \
-                                       (__v8si)(__m256i)(index), (__mmask8)-1, \
-                                       (int)(scale))
+  ((__m512i)__builtin_ia32_gathersiv8di((__v8di)_mm512_undefined_epi32(), \
+                                        (void const *)(addr), \
+                                        (__v8si)(__m256i)(index), (__mmask8)-1, \
+                                        (int)(scale)))
 
 #define _mm512_mask_i32gather_epi64(v1_old, mask, index, addr, scale) \
-  (__m512i)__builtin_ia32_gathersiv8di((__v8di)(__m512i)(v1_old), \
-                                       (void const *)(addr), \
-                                       (__v8si)(__m256i)(index), \
-                                       (__mmask8)(mask), (int)(scale))
+  ((__m512i)__builtin_ia32_gathersiv8di((__v8di)(__m512i)(v1_old), \
+                                        (void const *)(addr), \
+                                        (__v8si)(__m256i)(index), \
+                                        (__mmask8)(mask), (int)(scale)))
 
 #define _mm512_i64scatter_ps(addr, index, v1, scale) \
   __builtin_ia32_scatterdiv16sf((void *)(addr), (__mmask8)-1, \
@@ -7800,16 +7800,16 @@ _mm_mask_fmadd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
 }
 
 #define _mm_fmadd_round_ss(A, B, C, R) \
-  (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
-                                        (__v4sf)(__m128)(B), \
-                                        (__v4sf)(__m128)(C), (__mmask8)-1, \
-                                        (int)(R))
+  ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
+                                         (__v4sf)(__m128)(B), \
+                                         (__v4sf)(__m128)(C), (__mmask8)-1, \
+                                         (int)(R)))
 
 #define _mm_mask_fmadd_round_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
-                                        (__v4sf)(__m128)(A), \
-                                        (__v4sf)(__m128)(B), (__mmask8)(U), \
-                                        (int)(R))
+  ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
+                                         (__v4sf)(__m128)(A), \
+                                         (__v4sf)(__m128)(B), (__mmask8)(U), \
+                                         (int)(R)))
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_maskz_fmadd_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
@@ -7822,10 +7822,10 @@ _mm_maskz_fmadd_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
 }
 
 #define _mm_maskz_fmadd_round_ss(U, A, B, C, R) \
-  (__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
-                                         (__v4sf)(__m128)(B), \
-                                         (__v4sf)(__m128)(C), (__mmask8)(U), \
-                                         (int)(R))
+  ((__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
+                                          (__v4sf)(__m128)(B), \
+                                          (__v4sf)(__m128)(C), (__mmask8)(U), \
+                                          (int)(R)))
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_mask3_fmadd_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
@@ -7838,10 +7838,10 @@ _mm_mask3_fmadd_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
 }
 
 #define _mm_mask3_fmadd_round_ss(W, X, Y, U, R) \
-  (__m128)__builtin_ia32_vfmaddss3_mask3((__v4sf)(__m128)(W), \
-                                         (__v4sf)(__m128)(X), \
-                                         (__v4sf)(__m128)(Y), (__mmask8)(U), \
-                                         (int)(R))
+  ((__m128)__builtin_ia32_vfmaddss3_mask3((__v4sf)(__m128)(W), \
+                                          (__v4sf)(__m128)(X), \
+                                          (__v4sf)(__m128)(Y), (__mmask8)(U), \
+                                          (int)(R)))
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_mask_fmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
@@ -7854,16 +7854,16 @@ _mm_mask_fmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
 }
 
 #define _mm_fmsub_round_ss(A, B, C, R) \
-  (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
-                                        (__v4sf)(__m128)(B), \
-                                        -(__v4sf)(__m128)(C), (__mmask8)-1, \
-                                        (int)(R))
+  ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
+                                         (__v4sf)(__m128)(B), \
+                                         -(__v4sf)(__m128)(C), (__mmask8)-1, \
+                                         (int)(R)))
 
 #define _mm_mask_fmsub_round_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
-                                        (__v4sf)(__m128)(A), \
-                                        -(__v4sf)(__m128)(B), (__mmask8)(U), \
-                                        (int)(R))
+  ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
+                                         (__v4sf)(__m128)(A), \
+                                         -(__v4sf)(__m128)(B), (__mmask8)(U), \
+                                         (int)(R)))
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_maskz_fmsub_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
@@ -7876,10 +7876,10 @@ _mm_maskz_fmsub_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
 }
 
 #define _mm_maskz_fmsub_round_ss(U, A, B, C, R) \
-  (__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
-                                         (__v4sf)(__m128)(B), \
-                                         -(__v4sf)(__m128)(C), (__mmask8)(U), \
-                                         (int)(R))
+  ((__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
+                                          (__v4sf)(__m128)(B), \
+                                          -(__v4sf)(__m128)(C), (__mmask8)(U), \
+                                          (int)(R)))
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_mask3_fmsub_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
@@ -7892,10 +7892,10 @@ _mm_mask3_fmsub_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
 }
 
 #define _mm_mask3_fmsub_round_ss(W, X, Y, U, R) \
-  (__m128)__builtin_ia32_vfmsubss3_mask3((__v4sf)(__m128)(W), \
-                                         (__v4sf)(__m128)(X), \
-                                         (__v4sf)(__m128)(Y), (__mmask8)(U), \
-                                         (int)(R))
+  ((__m128)__builtin_ia32_vfmsubss3_mask3((__v4sf)(__m128)(W), \
+                                          (__v4sf)(__m128)(X), \
+                                          (__v4sf)(__m128)(Y), (__mmask8)(U), \
+                                          (int)(R)))
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_mask_fnmadd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
@@ -7908,16 +7908,16 @@ _mm_mask_fnmadd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
 }
 
 #define _mm_fnmadd_round_ss(A, B, C, R) \
-  (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
-                                        -(__v4sf)(__m128)(B), \
-                                        (__v4sf)(__m128)(C), (__mmask8)-1, \
-                                        (int)(R))
+  ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
+                                         -(__v4sf)(__m128)(B), \
+                                         (__v4sf)(__m128)(C), (__mmask8)-1, \
+                                         (int)(R)))
 
 #define _mm_mask_fnmadd_round_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
-                                        -(__v4sf)(__m128)(A), \
-                                        (__v4sf)(__m128)(B), (__mmask8)(U), \
-                                        (int)(R))
+  ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
+                                         -(__v4sf)(__m128)(A), \
+                                         (__v4sf)(__m128)(B), (__mmask8)(U), \
+                                         (int)(R)))
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_maskz_fnmadd_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
@@ -7930,10 +7930,10 @@ _mm_maskz_fnmadd_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
 }
 
 #define _mm_maskz_fnmadd_round_ss(U, A, B, C, R) \
-  (__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
-                                         -(__v4sf)(__m128)(B), \
-                                         (__v4sf)(__m128)(C), (__mmask8)(U), \
-                                         (int)(R))
+  ((__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
+                                          -(__v4sf)(__m128)(B), \
+                                          (__v4sf)(__m128)(C), (__mmask8)(U), \
+                                          (int)(R)))
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_mask3_fnmadd_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
@@ -7946,10 +7946,10 @@ _mm_mask3_fnmadd_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
 }
 
 #define _mm_mask3_fnmadd_round_ss(W, X, Y, U, R) \
-  (__m128)__builtin_ia32_vfmaddss3_mask3((__v4sf)(__m128)(W), \
-                                         -(__v4sf)(__m128)(X), \
-                                         (__v4sf)(__m128)(Y), (__mmask8)(U), \
-                                         (int)(R))
+  ((__m128)__builtin_ia32_vfmaddss3_mask3((__v4sf)(__m128)(W), \
+                                          -(__v4sf)(__m128)(X), \
+                                          (__v4sf)(__m128)(Y), (__mmask8)(U), \
+                                          (int)(R)))
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_mask_fnmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
@@ -7962,16 +7962,16 @@ _mm_mask_fnmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
 }
 
 #define _mm_fnmsub_round_ss(A, B, C, R) \
-  (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
-                                        -(__v4sf)(__m128)(B), \
-                                        -(__v4sf)(__m128)(C), (__mmask8)-1, \
-                                        (int)(R))
+  ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
+                                         -(__v4sf)(__m128)(B), \
+                                         -(__v4sf)(__m128)(C), (__mmask8)-1, \
+                                         (int)(R)))
 
 #define _mm_mask_fnmsub_round_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
-                                        -(__v4sf)(__m128)(A), \
-                                        -(__v4sf)(__m128)(B), (__mmask8)(U), \
-                                        (int)(R))
+  ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
+                                         -(__v4sf)(__m128)(A), \
+                                         -(__v4sf)(__m128)(B), (__mmask8)(U), \
+                                         (int)(R)))
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_maskz_fnmsub_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
@@ -7984,10 +7984,10 @@ _mm_maskz_fnmsub_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
 }
 
 #define _mm_maskz_fnmsub_round_ss(U, A, B, C, R) \
-  (__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
-                                         -(__v4sf)(__m128)(B), \
-                                         -(__v4sf)(__m128)(C), (__mmask8)(U), \
-                                         (int)(R))
+  ((__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
+                                          -(__v4sf)(__m128)(B), \
+                                          -(__v4sf)(__m128)(C), (__mmask8)(U), \
+                                          (int)(R)))
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_mask3_fnmsub_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
@@ -8000,10 +8000,10 @@ _mm_mask3_fnmsub_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
 }
 
 #define _mm_mask3_fnmsub_round_ss(W, X, Y, U, R) \
-  (__m128)__builtin_ia32_vfmsubss3_mask3((__v4sf)(__m128)(W), \
-                                         -(__v4sf)(__m128)(X), \
-                                         (__v4sf)(__m128)(Y), (__mmask8)(U), \
-                                         (int)(R))
+  ((__m128)__builtin_ia32_vfmsubss3_mask3((__v4sf)(__m128)(W), \
+                                          -(__v4sf)(__m128)(X), \
+                                          (__v4sf)(__m128)(Y), (__mmask8)(U), \
+                                          (int)(R)))
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_mask_fmadd_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
@@ -8016,16 +8016,16 @@ _mm_mask_fmadd_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
 }
 
 #define _mm_fmadd_round_sd(A, B, C, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
-                                         (__v2df)(__m128d)(B), \
-                                         (__v2df)(__m128d)(C), (__mmask8)-1, \
-                                         (int)(R))
+  ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
+                                          (__v2df)(__m128d)(B), \
+                                          (__v2df)(__m128d)(C), (__mmask8)-1, \
+                                          (int)(R)))
 
 #define _mm_mask_fmadd_round_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
-                                         (__v2df)(__m128d)(A), \
-                                         (__v2df)(__m128d)(B), (__mmask8)(U), \
-                                         (int)(R))
+  ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
+                                          (__v2df)(__m128d)(A), \
+                                          (__v2df)(__m128d)(B), (__mmask8)(U), \
+                                          (int)(R)))
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_maskz_fmadd_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
@@ -8038,10 +8038,10 @@ _mm_maskz_fmadd_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
 }
 
 #define _mm_maskz_fmadd_round_sd(U, A, B, C, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
-                                          (__v2df)(__m128d)(B), \
-                                          (__v2df)(__m128d)(C), (__mmask8)(U), \
-                                          (int)(R))
+  ((__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
+                                           (__v2df)(__m128d)(B), \
+                                           (__v2df)(__m128d)(C), (__mmask8)(U), \
+                                           (int)(R)))
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_mask3_fmadd_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
@@ -8054,10 +8054,10 @@ _mm_mask3_fmadd_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
 }
 
 #define _mm_mask3_fmadd_round_sd(W, X, Y, U, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_mask3((__v2df)(__m128d)(W), \
-                                          (__v2df)(__m128d)(X), \
-                                          (__v2df)(__m128d)(Y), (__mmask8)(U), \
-                                          (int)(R))
+  ((__m128d)__builtin_ia32_vfmaddsd3_mask3((__v2df)(__m128d)(W), \
+                                           (__v2df)(__m128d)(X), \
+                                           (__v2df)(__m128d)(Y), (__mmask8)(U), \
+                                           (int)(R)))
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_mask_fmsub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
@@ -8070,16 +8070,16 @@ _mm_mask_fmsub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
 }
 
 #define _mm_fmsub_round_sd(A, B, C, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
-                                         (__v2df)(__m128d)(B), \
-                                         -(__v2df)(__m128d)(C), (__mmask8)-1, \
-                                         (int)(R))
+  ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
+                                          (__v2df)(__m128d)(B), \
+                                          -(__v2df)(__m128d)(C), (__mmask8)-1, \
+                                          (int)(R)))
 
 #define _mm_mask_fmsub_round_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
-                                         (__v2df)(__m128d)(A), \
-                                         -(__v2df)(__m128d)(B), (__mmask8)(U), \
-                                         (int)(R))
+  ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
+                                          (__v2df)(__m128d)(A), \
+                                          -(__v2df)(__m128d)(B), (__mmask8)(U), \
+                                          (int)(R)))
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_maskz_fmsub_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
@@ -8092,10 +8092,10 @@ _mm_maskz_fmsub_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
 }
 
 #define _mm_maskz_fmsub_round_sd(U, A, B, C, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
-                                          (__v2df)(__m128d)(B), \
-                                          -(__v2df)(__m128d)(C), \
-                                          (__mmask8)(U), (int)(R))
+  ((__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
+                                           (__v2df)(__m128d)(B), \
+                                           -(__v2df)(__m128d)(C), \
+                                           (__mmask8)(U), (int)(R)))
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_mask3_fmsub_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
@@ -8108,10 +8108,10 @@ _mm_mask3_fmsub_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
 }
 
 #define _mm_mask3_fmsub_round_sd(W, X, Y, U, R) \
-  (__m128d)__builtin_ia32_vfmsubsd3_mask3((__v2df)(__m128d)(W), \
-                                          (__v2df)(__m128d)(X), \
-                                          (__v2df)(__m128d)(Y), \
-                                          (__mmask8)(U), (int)(R))
+  ((__m128d)__builtin_ia32_vfmsubsd3_mask3((__v2df)(__m128d)(W), \
+                                           (__v2df)(__m128d)(X), \
+                                           (__v2df)(__m128d)(Y), \
+                                           (__mmask8)(U), (int)(R)))
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_mask_fnmadd_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
@@ -8124,16 +8124,16 @@ _mm_mask_fnmadd_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
 }
 
 #define _mm_fnmadd_round_sd(A, B, C, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
-                                         -(__v2df)(__m128d)(B), \
-                                         (__v2df)(__m128d)(C), (__mmask8)-1, \
-                                         (int)(R))
+  ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
+                                          -(__v2df)(__m128d)(B), \
+                                          (__v2df)(__m128d)(C), (__mmask8)-1, \
+                                          (int)(R)))
 
 #define _mm_mask_fnmadd_round_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
-                                         -(__v2df)(__m128d)(A), \
-                                         (__v2df)(__m128d)(B), (__mmask8)(U), \
-                                         (int)(R))
+  ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
+                                          -(__v2df)(__m128d)(A), \
+                                          (__v2df)(__m128d)(B), (__mmask8)(U), \
+                                          (int)(R)))
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_maskz_fnmadd_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
@@ -8146,10 +8146,10 @@ _mm_maskz_fnmadd_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
 }
 
 #define _mm_maskz_fnmadd_round_sd(U, A, B, C, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
-                                          -(__v2df)(__m128d)(B), \
-                                          (__v2df)(__m128d)(C), (__mmask8)(U), \
-                                          (int)(R))
+  ((__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
+                                           -(__v2df)(__m128d)(B), \
+                                           (__v2df)(__m128d)(C), (__mmask8)(U), \
+                                           (int)(R)))
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_mask3_fnmadd_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
@@ -8162,10 +8162,10 @@ _mm_mask3_fnmadd_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
 }
 
 #define _mm_mask3_fnmadd_round_sd(W, X, Y, U, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_mask3((__v2df)(__m128d)(W), \
-                                          -(__v2df)(__m128d)(X), \
-                                          (__v2df)(__m128d)(Y), (__mmask8)(U), \
-                                          (int)(R))
+  ((__m128d)__builtin_ia32_vfmaddsd3_mask3((__v2df)(__m128d)(W), \
+                                           -(__v2df)(__m128d)(X), \
+                                           (__v2df)(__m128d)(Y), (__mmask8)(U), \
+                                           (int)(R)))
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_mask_fnmsub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
@@ -8178,16 +8178,16 @@ _mm_mask_fnmsub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
 }
 
 #define _mm_fnmsub_round_sd(A, B, C, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
-                                         -(__v2df)(__m128d)(B), \
-                                         -(__v2df)(__m128d)(C), (__mmask8)-1, \
-                                         (int)(R))
+  ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
+                                          -(__v2df)(__m128d)(B), \
+                                          -(__v2df)(__m128d)(C), (__mmask8)-1, \
+                                          (int)(R)))
 
 #define _mm_mask_fnmsub_round_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
-                                         -(__v2df)(__m128d)(A), \
-                                         -(__v2df)(__m128d)(B), (__mmask8)(U), \
-                                         (int)(R))
+  ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
+                                          -(__v2df)(__m128d)(A), \
+                                          -(__v2df)(__m128d)(B), (__mmask8)(U), \
+                                          (int)(R)))
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_maskz_fnmsub_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
@@ -8200,11 +8200,11 @@ _mm_maskz_fnmsub_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
 }
 
 #define _mm_maskz_fnmsub_round_sd(U, A, B, C, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
-                                          -(__v2df)(__m128d)(B), \
-                                          -(__v2df)(__m128d)(C), \
-                                          (__mmask8)(U), \
-                                          (int)(R))
+  ((__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
+                                           -(__v2df)(__m128d)(B), \
+                                           -(__v2df)(__m128d)(C), \
+                                           (__mmask8)(U), \
+                                           (int)(R)))
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_mask3_fnmsub_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
@@ -8217,36 +8217,36 @@ _mm_mask3_fnmsub_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
 }
 
 #define _mm_mask3_fnmsub_round_sd(W, X, Y, U, R) \
-  (__m128d)__builtin_ia32_vfmsubsd3_mask3((__v2df)(__m128d)(W), \
-                                          -(__v2df)(__m128d)(X), \
-                                          (__v2df)(__m128d)(Y), \
-                                          (__mmask8)(U), (int)(R))
+  ((__m128d)__builtin_ia32_vfmsubsd3_mask3((__v2df)(__m128d)(W), \
+                                           -(__v2df)(__m128d)(X), \
+                                           (__v2df)(__m128d)(Y), \
+                                           (__mmask8)(U), (int)(R)))
 
 #define _mm512_permutex_pd(X, C) \
-  (__m512d)__builtin_ia32_permdf512((__v8df)(__m512d)(X), (int)(C))
+  ((__m512d)__builtin_ia32_permdf512((__v8df)(__m512d)(X), (int)(C)))
 
 #define _mm512_mask_permutex_pd(W, U, X, C) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                       (__v8df)_mm512_permutex_pd((X), (C)), \
-                                       (__v8df)(__m512d)(W))
+  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+                                        (__v8df)_mm512_permutex_pd((X), (C)), \
+                                        (__v8df)(__m512d)(W)))
 
 #define _mm512_maskz_permutex_pd(U, X, C) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                       (__v8df)_mm512_permutex_pd((X), (C)), \
-                                       (__v8df)_mm512_setzero_pd())
+  ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+                                        (__v8df)_mm512_permutex_pd((X), (C)), \
+                                        (__v8df)_mm512_setzero_pd()))
 
 #define _mm512_permutex_epi64(X, C) \
-  (__m512i)__builtin_ia32_permdi512((__v8di)(__m512i)(X), (int)(C))
+  ((__m512i)__builtin_ia32_permdi512((__v8di)(__m512i)(X), (int)(C)))
 
 #define _mm512_mask_permutex_epi64(W, U, X, C) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                      (__v8di)_mm512_permutex_epi64((X), (C)), \
-                                      (__v8di)(__m512i)(W))
+  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+                                       (__v8di)_mm512_permutex_epi64((X), (C)), \
+                                       (__v8di)(__m512i)(W)))
 
 #define _mm512_maskz_permutex_epi64(U, X, C) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                      (__v8di)_mm512_permutex_epi64((X), (C)), \
-                                      (__v8di)_mm512_setzero_si512())
+  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+                                       (__v8di)_mm512_permutex_epi64((X), (C)), \
+                                       (__v8di)_mm512_setzero_si512()))
 
 static __inline__ __m512d __DEFAULT_FN_ATTRS512
 _mm512_permutexvar_pd (__m512i __X, __m512d __Y)
@@ -8416,10 +8416,10 @@ _mm512_kxor (__mmask16 __A, __mmask16 __B)
 #define _kxor_mask16 _mm512_kxor
 
 #define _kshiftli_mask16(A, I) \
-  (__mmask16)__builtin_ia32_kshiftlihi((__mmask16)(A), (unsigned int)(I))
+  ((__mmask16)__builtin_ia32_kshiftlihi((__mmask16)(A), (unsigned int)(I)))
 
 #define _kshiftri_mask16(A, I) \
-  (__mmask16)__builtin_ia32_kshiftrihi((__mmask16)(A), (unsigned int)(I))
+  ((__mmask16)__builtin_ia32_kshiftrihi((__mmask16)(A), (unsigned int)(I)))
 
 static __inline__ unsigned int __DEFAULT_FN_ATTRS
 _cvtmask16_u32(__mmask16 __A) {
@@ -8538,48 +8538,48 @@ _mm512_maskz_compress_epi32 (__mmask16 __U, __m512i __A)
 }
 
 #define _mm_cmp_round_ss_mask(X, Y, P, R) \
-  (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
-                                      (__v4sf)(__m128)(Y), (int)(P), \
-                                      (__mmask8)-1, (int)(R))
+  ((__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
+                                       (__v4sf)(__m128)(Y), (int)(P), \
+                                       (__mmask8)-1, (int)(R)))
 
 #define _mm_mask_cmp_round_ss_mask(M, X, Y, P, R) \
-  (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
-                                      (__v4sf)(__m128)(Y), (int)(P), \
-                                      (__mmask8)(M), (int)(R))
+  ((__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
+                                       (__v4sf)(__m128)(Y), (int)(P), \
+                                       (__mmask8)(M), (int)(R)))
 
 #define _mm_cmp_ss_mask(X, Y, P) \
-  (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
-                                      (__v4sf)(__m128)(Y), (int)(P), \
-                                      (__mmask8)-1, \
-                                      _MM_FROUND_CUR_DIRECTION)
+  ((__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
+                                       (__v4sf)(__m128)(Y), (int)(P), \
+                                       (__mmask8)-1, \
+                                       _MM_FROUND_CUR_DIRECTION))
 
 #define _mm_mask_cmp_ss_mask(M, X, Y, P) \
-  (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
-                                      (__v4sf)(__m128)(Y), (int)(P), \
-                                      (__mmask8)(M), \
-                                      _MM_FROUND_CUR_DIRECTION)
+  ((__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
+                                       (__v4sf)(__m128)(Y), (int)(P), \
+                                       (__mmask8)(M), \
+                                       _MM_FROUND_CUR_DIRECTION))
 
 #define _mm_cmp_round_sd_mask(X, Y, P, R) \
-  (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
-                                      (__v2df)(__m128d)(Y), (int)(P), \
-                                      (__mmask8)-1, (int)(R))
+  ((__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
+                                       (__v2df)(__m128d)(Y), (int)(P), \
+                                       (__mmask8)-1, (int)(R)))
 
 #define _mm_mask_cmp_round_sd_mask(M, X, Y, P, R) \
-  (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
-                                      (__v2df)(__m128d)(Y), (int)(P), \
-                                      (__mmask8)(M), (int)(R))
+  ((__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
+                                       (__v2df)(__m128d)(Y), (int)(P), \
+                                       (__mmask8)(M), (int)(R)))
 
 #define _mm_cmp_sd_mask(X, Y, P) \
-  (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
-                                      (__v2df)(__m128d)(Y), (int)(P), \
-                                      (__mmask8)-1, \
-                                      _MM_FROUND_CUR_DIRECTION)
+  ((__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
+                                       (__v2df)(__m128d)(Y), (int)(P), \
+                                       (__mmask8)-1, \
+                                       _MM_FROUND_CUR_DIRECTION))
 
 #define _mm_mask_cmp_sd_mask(M, X, Y, P) \
-  (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
-                                      (__v2df)(__m128d)(Y), (int)(P), \
-                                      (__mmask8)(M), \
-                                      _MM_FROUND_CUR_DIRECTION)
+  ((__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
+                                       (__v2df)(__m128d)(Y), (int)(P), \
+                                       (__mmask8)(M), \
+                                       _MM_FROUND_CUR_DIRECTION))
 
 /* Bit Test */
 
@@ -8760,17 +8760,17 @@ _mm_maskz_load_sd (__mmask8 __U, const double* __A)
 }
 
 #define _mm512_shuffle_epi32(A, I) \
-  (__m512i)__builtin_ia32_pshufd512((__v16si)(__m512i)(A), (int)(I))
+  ((__m512i)__builtin_ia32_pshufd512((__v16si)(__m512i)(A), (int)(I)))
 
 #define _mm512_mask_shuffle_epi32(W, U, A, I) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                      (__v16si)_mm512_shuffle_epi32((A), (I)), \
-                                      (__v16si)(__m512i)(W))
+  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+                                       (__v16si)_mm512_shuffle_epi32((A), (I)), \
+                                       (__v16si)(__m512i)(W)))
 
 #define _mm512_maskz_shuffle_epi32(U, A, I) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                      (__v16si)_mm512_shuffle_epi32((A), (I)), \
-                                      (__v16si)_mm512_setzero_si512())
+  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+                                       (__v16si)_mm512_shuffle_epi32((A), (I)), \
+                                       (__v16si)_mm512_setzero_si512()))
 
 static __inline__ __m512d __DEFAULT_FN_ATTRS512
 _mm512_mask_expand_pd (__m512d __W, __mmask8 __U, __m512d __A)
@@ -8901,19 +8901,19 @@ _mm512_maskz_expand_epi32 (__mmask16 __U, __m512i __A)
 }
 
 #define _mm512_cvt_roundps_pd(A, R) \
-  (__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
-                                           (__v8df)_mm512_undefined_pd(), \
-                                           (__mmask8)-1, (int)(R))
+  ((__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
+                                            (__v8df)_mm512_undefined_pd(), \
+                                            (__mmask8)-1, (int)(R)))
 
 #define _mm512_mask_cvt_roundps_pd(W, U, A, R) \
-  (__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
-                                           (__v8df)(__m512d)(W), \
-                                           (__mmask8)(U), (int)(R))
+  ((__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
+                                            (__v8df)(__m512d)(W), \
+                                            (__mmask8)(U), (int)(R)))
 
 #define _mm512_maskz_cvt_roundps_pd(U, A, R) \
-  (__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
-                                           (__v8df)_mm512_setzero_pd(), \
-                                           (__mmask8)(U), (int)(R))
+  ((__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
+                                            (__v8df)_mm512_setzero_pd(), \
+                                            (__mmask8)(U), (int)(R)))
 
 static __inline__ __m512d __DEFAULT_FN_ATTRS512
 _mm512_cvtps_pd (__m256 __A)
@@ -9010,22 +9010,22 @@ _mm512_mask_compressstoreu_epi32 (void *__P, __mmask16 __U, __m512i __A)
 }
 
 #define _mm_cvt_roundsd_ss(A, B, R) \
-  (__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
-                                             (__v2df)(__m128d)(B), \
-                                             (__v4sf)_mm_undefined_ps(), \
-                                             (__mmask8)-1, (int)(R))
+  ((__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
+                                              (__v2df)(__m128d)(B), \
+                                              (__v4sf)_mm_undefined_ps(), \
+                                              (__mmask8)-1, (int)(R)))
 
 #define _mm_mask_cvt_roundsd_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
-                                             (__v2df)(__m128d)(B), \
-                                             (__v4sf)(__m128)(W), \
-                                             (__mmask8)(U), (int)(R))
+  ((__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
+                                              (__v2df)(__m128d)(B), \
+                                              (__v4sf)(__m128)(W), \
+                                              (__mmask8)(U), (int)(R)))
 
 #define _mm_maskz_cvt_roundsd_ss(U, A, B, R) \
-  (__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
-                                             (__v2df)(__m128d)(B), \
-                                             (__v4sf)_mm_setzero_ps(), \
-                                             (__mmask8)(U), (int)(R))
+  ((__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
+                                              (__v2df)(__m128d)(B), \
+                                              (__v4sf)_mm_setzero_ps(), \
+                                              (__mmask8)(U), (int)(R)))
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_mask_cvtsd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128d __B)
@@ -9058,47 +9058,47 @@ _mm_maskz_cvtsd_ss (__mmask8 __U, __m128 __A, __m128d __B)
 
 #ifdef __x86_64__
 #define _mm_cvt_roundi64_sd(A, B, R) \
-  (__m128d)__builtin_ia32_cvtsi2sd64((__v2df)(__m128d)(A), (long long)(B), \
-                                     (int)(R))
+  ((__m128d)__builtin_ia32_cvtsi2sd64((__v2df)(__m128d)(A), (long long)(B), \
+                                      (int)(R)))
 
 #define _mm_cvt_roundsi64_sd(A, B, R) \
-  (__m128d)__builtin_ia32_cvtsi2sd64((__v2df)(__m128d)(A), (long long)(B), \
-                                     (int)(R))
+  ((__m128d)__builtin_ia32_cvtsi2sd64((__v2df)(__m128d)(A), (long long)(B), \
+                                      (int)(R)))
 #endif
 
 #define _mm_cvt_roundsi32_ss(A, B, R) \
-  (__m128)__builtin_ia32_cvtsi2ss32((__v4sf)(__m128)(A), (int)(B), (int)(R))
+  ((__m128)__builtin_ia32_cvtsi2ss32((__v4sf)(__m128)(A), (int)(B), (int)(R)))
 
 #define _mm_cvt_roundi32_ss(A, B, R) \
-  (__m128)__builtin_ia32_cvtsi2ss32((__v4sf)(__m128)(A), (int)(B), (int)(R))
+  ((__m128)__builtin_ia32_cvtsi2ss32((__v4sf)(__m128)(A), (int)(B), (int)(R)))
 
 #ifdef __x86_64__
 #define _mm_cvt_roundsi64_ss(A, B, R) \
-  (__m128)__builtin_ia32_cvtsi2ss64((__v4sf)(__m128)(A), (long long)(B), \
-                                    (int)(R))
+  ((__m128)__builtin_ia32_cvtsi2ss64((__v4sf)(__m128)(A), (long long)(B), \
+                                     (int)(R)))
 
 #define _mm_cvt_roundi64_ss(A, B, R) \
-  (__m128)__builtin_ia32_cvtsi2ss64((__v4sf)(__m128)(A), (long long)(B), \
-                                    (int)(R))
+  ((__m128)__builtin_ia32_cvtsi2ss64((__v4sf)(__m128)(A), (long long)(B), \
+                                     (int)(R)))
 #endif
 
 #define _mm_cvt_roundss_sd(A, B, R) \
-  (__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
-                                              (__v4sf)(__m128)(B), \
-                                              (__v2df)_mm_undefined_pd(), \
-                                              (__mmask8)-1, (int)(R))
+  ((__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
+                                               (__v4sf)(__m128)(B), \
+                                               (__v2df)_mm_undefined_pd(), \
+                                               (__mmask8)-1, (int)(R)))
 
 #define _mm_mask_cvt_roundss_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
-                                              (__v4sf)(__m128)(B), \
-                                              (__v2df)(__m128d)(W), \
-                                              (__mmask8)(U), (int)(R))
+  ((__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
+                                               (__v4sf)(__m128)(B), \
+                                               (__v2df)(__m128d)(W), \
+                                               (__mmask8)(U), (int)(R)))
 
 #define _mm_maskz_cvt_roundss_sd(U, A, B, R) \
-  (__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
-                                              (__v4sf)(__m128)(B), \
-                                              (__v2df)_mm_setzero_pd(), \
-                                              (__mmask8)(U), (int)(R))
+  ((__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
+                                               (__v4sf)(__m128)(B), \
+                                               (__v2df)_mm_setzero_pd(), \
+                                               (__mmask8)(U), (int)(R)))
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_mask_cvtss_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128 __B)
@@ -9127,8 +9127,8 @@ _mm_cvtu32_sd (__m128d __A, unsigned __B)
 
 #ifdef __x86_64__
 #define _mm_cvt_roundu64_sd(A, B, R) \
-  (__m128d)__builtin_ia32_cvtusi2sd64((__v2df)(__m128d)(A), \
-                                      (unsigned long long)(B), (int)(R))
+  ((__m128d)__builtin_ia32_cvtusi2sd64((__v2df)(__m128d)(A), \
+                                       (unsigned long long)(B), (int)(R)))
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_cvtu64_sd (__m128d __A, unsigned long long __B)
@@ -9139,8 +9139,8 @@ _mm_cvtu64_sd (__m128d __A, unsigned long long __B)
 #endif
 
 #define _mm_cvt_roundu32_ss(A, B, R) \
-  (__m128)__builtin_ia32_cvtusi2ss32((__v4sf)(__m128)(A), (unsigned int)(B), \
-                                     (int)(R))
+  ((__m128)__builtin_ia32_cvtusi2ss32((__v4sf)(__m128)(A), (unsigned int)(B), \
+                                      (int)(R)))
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_cvtu32_ss (__m128 __A, unsigned __B)
@@ -9151,8 +9151,8 @@ _mm_cvtu32_ss (__m128 __A, unsigned __B)
 
 #ifdef __x86_64__
 #define _mm_cvt_roundu64_ss(A, B, R) \
-  (__m128)__builtin_ia32_cvtusi2ss64((__v4sf)(__m128)(A), \
-                                     (unsigned long long)(B), (int)(R))
+  ((__m128)__builtin_ia32_cvtusi2ss64((__v4sf)(__m128)(A), \
+                                      (unsigned long long)(B), (int)(R)))
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_cvtu64_ss (__m128 __A, unsigned long long __B)

diff  --git a/clang/lib/Headers/avx512vbmi2intrin.h b/clang/lib/Headers/avx512vbmi2intrin.h
index a23144616ce3..17fa77722c64 100644
--- a/clang/lib/Headers/avx512vbmi2intrin.h
+++ b/clang/lib/Headers/avx512vbmi2intrin.h
@@ -129,88 +129,88 @@ _mm512_maskz_expandloadu_epi8(__mmask64 __U, void const *__P)
 }
 
 #define _mm512_shldi_epi64(A, B, I) \
-  (__m512i)__builtin_ia32_vpshldq512((__v8di)(__m512i)(A), \
-                                     (__v8di)(__m512i)(B), (int)(I))
+  ((__m512i)__builtin_ia32_vpshldq512((__v8di)(__m512i)(A), \
+                                      (__v8di)(__m512i)(B), (int)(I)))
 
 #define _mm512_mask_shldi_epi64(S, U, A, B, I) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                    (__v8di)_mm512_shldi_epi64((A), (B), (I)), \
-                                    (__v8di)(__m512i)(S))
+  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+                                     (__v8di)_mm512_shldi_epi64((A), (B), (I)), \
+                                     (__v8di)(__m512i)(S)))
 
 #define _mm512_maskz_shldi_epi64(U, A, B, I) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                    (__v8di)_mm512_shldi_epi64((A), (B), (I)), \
-                                    (__v8di)_mm512_setzero_si512())
+  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+                                     (__v8di)_mm512_shldi_epi64((A), (B), (I)), \
+                                     (__v8di)_mm512_setzero_si512()))
 
 #define _mm512_shldi_epi32(A, B, I) \
-  (__m512i)__builtin_ia32_vpshldd512((__v16si)(__m512i)(A), \
-                                     (__v16si)(__m512i)(B), (int)(I))
+  ((__m512i)__builtin_ia32_vpshldd512((__v16si)(__m512i)(A), \
+                                      (__v16si)(__m512i)(B), (int)(I)))
 
 #define _mm512_mask_shldi_epi32(S, U, A, B, I) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                   (__v16si)_mm512_shldi_epi32((A), (B), (I)), \
-                                   (__v16si)(__m512i)(S))
+  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+                                    (__v16si)_mm512_shldi_epi32((A), (B), (I)), \
+                                    (__v16si)(__m512i)(S)))
 
 #define _mm512_maskz_shldi_epi32(U, A, B, I) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                   (__v16si)_mm512_shldi_epi32((A), (B), (I)), \
-                                   (__v16si)_mm512_setzero_si512())
+  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+                                    (__v16si)_mm512_shldi_epi32((A), (B), (I)), \
+                                    (__v16si)_mm512_setzero_si512()))
 
 #define _mm512_shldi_epi16(A, B, I) \
-  (__m512i)__builtin_ia32_vpshldw512((__v32hi)(__m512i)(A), \
-                                     (__v32hi)(__m512i)(B), (int)(I))
+  ((__m512i)__builtin_ia32_vpshldw512((__v32hi)(__m512i)(A), \
+                                      (__v32hi)(__m512i)(B), (int)(I)))
 
 #define _mm512_mask_shldi_epi16(S, U, A, B, I) \
-  (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
-                                   (__v32hi)_mm512_shldi_epi16((A), (B), (I)), \
-                                   (__v32hi)(__m512i)(S))
+  ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+                                    (__v32hi)_mm512_shldi_epi16((A), (B), (I)), \
+                                    (__v32hi)(__m512i)(S)))
 
 #define _mm512_maskz_shldi_epi16(U, A, B, I) \
-  (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
-                                   (__v32hi)_mm512_shldi_epi16((A), (B), (I)), \
-                                   (__v32hi)_mm512_setzero_si512())
+  ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+                                    (__v32hi)_mm512_shldi_epi16((A), (B), (I)), \
+                                    (__v32hi)_mm512_setzero_si512()))
 
 #define _mm512_shrdi_epi64(A, B, I) \
-  (__m512i)__builtin_ia32_vpshrdq512((__v8di)(__m512i)(A), \
-                                     (__v8di)(__m512i)(B), (int)(I))
+  ((__m512i)__builtin_ia32_vpshrdq512((__v8di)(__m512i)(A), \
+                                      (__v8di)(__m512i)(B), (int)(I)))
 
 #define _mm512_mask_shrdi_epi64(S, U, A, B, I) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                    (__v8di)_mm512_shrdi_epi64((A), (B), (I)), \
-                                    (__v8di)(__m512i)(S))
+  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+                                     (__v8di)_mm512_shrdi_epi64((A), (B), (I)), \
+                                     (__v8di)(__m512i)(S)))
 
 #define _mm512_maskz_shrdi_epi64(U, A, B, I) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                    (__v8di)_mm512_shrdi_epi64((A), (B), (I)), \
-                                    (__v8di)_mm512_setzero_si512())
+  ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+                                     (__v8di)_mm512_shrdi_epi64((A), (B), (I)), \
+                                     (__v8di)_mm512_setzero_si512()))
 
 #define _mm512_shrdi_epi32(A, B, I) \
-  (__m512i)__builtin_ia32_vpshrdd512((__v16si)(__m512i)(A), \
-                                     (__v16si)(__m512i)(B), (int)(I))
+  ((__m512i)__builtin_ia32_vpshrdd512((__v16si)(__m512i)(A), \
+                                      (__v16si)(__m512i)(B), (int)(I)))
 
 #define _mm512_mask_shrdi_epi32(S, U, A, B, I) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                   (__v16si)_mm512_shrdi_epi32((A), (B), (I)), \
-                                   (__v16si)(__m512i)(S))
+  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+                                    (__v16si)_mm512_shrdi_epi32((A), (B), (I)), \
+                                    (__v16si)(__m512i)(S)))
 
 #define _mm512_maskz_shrdi_epi32(U, A, B, I) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                   (__v16si)_mm512_shrdi_epi32((A), (B), (I)), \
-                                   (__v16si)_mm512_setzero_si512())
+  ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+                                    (__v16si)_mm512_shrdi_epi32((A), (B), (I)), \
+                                    (__v16si)_mm512_setzero_si512()))
 
 #define _mm512_shrdi_epi16(A, B, I) \
-  (__m512i)__builtin_ia32_vpshrdw512((__v32hi)(__m512i)(A), \
-                                     (__v32hi)(__m512i)(B), (int)(I))
+  ((__m512i)__builtin_ia32_vpshrdw512((__v32hi)(__m512i)(A), \
+                                      (__v32hi)(__m512i)(B), (int)(I)))
 
 #define _mm512_mask_shrdi_epi16(S, U, A, B, I) \
-  (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
-                                   (__v32hi)_mm512_shrdi_epi16((A), (B), (I)), \
-                                   (__v32hi)(__m512i)(S))
+  ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+                                    (__v32hi)_mm512_shrdi_epi16((A), (B), (I)), \
+                                    (__v32hi)(__m512i)(S)))
 
 #define _mm512_maskz_shrdi_epi16(U, A, B, I) \
-  (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
-                                   (__v32hi)_mm512_shrdi_epi16((A), (B), (I)), \
-                                   (__v32hi)_mm512_setzero_si512())
+  ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+                                    (__v32hi)_mm512_shrdi_epi16((A), (B), (I)), \
+                                    (__v32hi)_mm512_setzero_si512()))
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_shldv_epi64(__m512i __A, __m512i __B, __m512i __C)

diff  --git a/clang/lib/Headers/avx512vlbwintrin.h b/clang/lib/Headers/avx512vlbwintrin.h
index 6ed10ed9803b..7873516053ec 100644
--- a/clang/lib/Headers/avx512vlbwintrin.h
+++ b/clang/lib/Headers/avx512vlbwintrin.h
@@ -21,84 +21,84 @@
 /* Integer compare */
 
 #define _mm_cmp_epi8_mask(a, b, p) \
-  (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)(__m128i)(a), \
-                                         (__v16qi)(__m128i)(b), (int)(p), \
-                                         (__mmask16)-1)
+  ((__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)(__m128i)(a), \
+                                          (__v16qi)(__m128i)(b), (int)(p), \
+                                          (__mmask16)-1))
 
 #define _mm_mask_cmp_epi8_mask(m, a, b, p) \
-  (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)(__m128i)(a), \
-                                         (__v16qi)(__m128i)(b), (int)(p), \
-                                         (__mmask16)(m))
+  ((__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)(__m128i)(a), \
+                                          (__v16qi)(__m128i)(b), (int)(p), \
+                                          (__mmask16)(m)))
 
 #define _mm_cmp_epu8_mask(a, b, p) \
-  (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)(__m128i)(a), \
-                                          (__v16qi)(__m128i)(b), (int)(p), \
-                                          (__mmask16)-1)
+  ((__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)(__m128i)(a), \
+                                           (__v16qi)(__m128i)(b), (int)(p), \
+                                           (__mmask16)-1))
 
 #define _mm_mask_cmp_epu8_mask(m, a, b, p) \
-  (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)(__m128i)(a), \
-                                          (__v16qi)(__m128i)(b), (int)(p), \
-                                          (__mmask16)(m))
+  ((__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)(__m128i)(a), \
+                                           (__v16qi)(__m128i)(b), (int)(p), \
+                                           (__mmask16)(m)))
 
 #define _mm256_cmp_epi8_mask(a, b, p) \
-  (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)(__m256i)(a), \
-                                         (__v32qi)(__m256i)(b), (int)(p), \
-                                         (__mmask32)-1)
+  ((__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)(__m256i)(a), \
+                                          (__v32qi)(__m256i)(b), (int)(p), \
+                                          (__mmask32)-1))
 
 #define _mm256_mask_cmp_epi8_mask(m, a, b, p) \
-  (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)(__m256i)(a), \
-                                         (__v32qi)(__m256i)(b), (int)(p), \
-                                         (__mmask32)(m))
+  ((__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)(__m256i)(a), \
+                                          (__v32qi)(__m256i)(b), (int)(p), \
+                                          (__mmask32)(m)))
 
 #define _mm256_cmp_epu8_mask(a, b, p) \
-  (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)(__m256i)(a), \
-                                          (__v32qi)(__m256i)(b), (int)(p), \
-                                          (__mmask32)-1)
+  ((__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)(__m256i)(a), \
+                                           (__v32qi)(__m256i)(b), (int)(p), \
+                                           (__mmask32)-1))
 
 #define _mm256_mask_cmp_epu8_mask(m, a, b, p) \
-  (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)(__m256i)(a), \
-                                          (__v32qi)(__m256i)(b), (int)(p), \
-                                          (__mmask32)(m))
+  ((__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)(__m256i)(a), \
+                                           (__v32qi)(__m256i)(b), (int)(p), \
+                                           (__mmask32)(m)))
 
 #define _mm_cmp_epi16_mask(a, b, p) \
-  (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)(__m128i)(a), \
-                                        (__v8hi)(__m128i)(b), (int)(p), \
-                                        (__mmask8)-1)
+  ((__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)(__m128i)(a), \
+                                         (__v8hi)(__m128i)(b), (int)(p), \
+                                         (__mmask8)-1))
 
 #define _mm_mask_cmp_epi16_mask(m, a, b, p) \
-  (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)(__m128i)(a), \
-                                        (__v8hi)(__m128i)(b), (int)(p), \
-                                        (__mmask8)(m))
+  ((__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)(__m128i)(a), \
+                                         (__v8hi)(__m128i)(b), (int)(p), \
+                                         (__mmask8)(m)))
 
 #define _mm_cmp_epu16_mask(a, b, p) \
-  (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)(__m128i)(a), \
-                                         (__v8hi)(__m128i)(b), (int)(p), \
-                                         (__mmask8)-1)
+  ((__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)(__m128i)(a), \
+                                          (__v8hi)(__m128i)(b), (int)(p), \
+                                          (__mmask8)-1))
 
 #define _mm_mask_cmp_epu16_mask(m, a, b, p) \
-  (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)(__m128i)(a), \
-                                         (__v8hi)(__m128i)(b), (int)(p), \
-                                         (__mmask8)(m))
+  ((__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)(__m128i)(a), \
+                                          (__v8hi)(__m128i)(b), (int)(p), \
+                                          (__mmask8)(m)))
 
 #define _mm256_cmp_epi16_mask(a, b, p) \
-  (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)(__m256i)(a), \
-                                         (__v16hi)(__m256i)(b), (int)(p), \
-                                         (__mmask16)-1)
+  ((__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)(__m256i)(a), \
+                                          (__v16hi)(__m256i)(b), (int)(p), \
+                                          (__mmask16)-1))
 
 #define _mm256_mask_cmp_epi16_mask(m, a, b, p) \
-  (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)(__m256i)(a), \
-                                         (__v16hi)(__m256i)(b), (int)(p), \
-                                         (__mmask16)(m))
+  ((__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)(__m256i)(a), \
+                                          (__v16hi)(__m256i)(b), (int)(p), \
+                                          (__mmask16)(m)))
 
 #define _mm256_cmp_epu16_mask(a, b, p) \
-  (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)(__m256i)(a), \
-                                          (__v16hi)(__m256i)(b), (int)(p), \
-                                          (__mmask16)-1)
+  ((__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)(__m256i)(a), \
+                                           (__v16hi)(__m256i)(b), (int)(p), \
+                                           (__mmask16)-1))
 
 #define _mm256_mask_cmp_epu16_mask(m, a, b, p) \
-  (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)(__m256i)(a), \
-                                          (__v16hi)(__m256i)(b), (int)(p), \
-                                          (__mmask16)(m))
+  ((__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)(__m256i)(a), \
+                                           (__v16hi)(__m256i)(b), (int)(p), \
+                                           (__mmask16)(m)))
 
 #define _mm_cmpeq_epi8_mask(A, B) \
     _mm_cmp_epi8_mask((A), (B), _MM_CMPINT_EQ)
@@ -1821,46 +1821,46 @@ _mm256_maskz_cvtepu8_epi16 (__mmask16 __U, __m128i __A)
 
 
 #define _mm_mask_shufflehi_epi16(W, U, A, imm) \
-  (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
-                                      (__v8hi)_mm_shufflehi_epi16((A), (imm)), \
-                                      (__v8hi)(__m128i)(W))
+  ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
+                                       (__v8hi)_mm_shufflehi_epi16((A), (imm)), \
+                                       (__v8hi)(__m128i)(W)))
 
 #define _mm_maskz_shufflehi_epi16(U, A, imm) \
-  (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
-                                      (__v8hi)_mm_shufflehi_epi16((A), (imm)), \
-                                      (__v8hi)_mm_setzero_si128())
+  ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
+                                       (__v8hi)_mm_shufflehi_epi16((A), (imm)), \
+                                       (__v8hi)_mm_setzero_si128()))
 
 #define _mm256_mask_shufflehi_epi16(W, U, A, imm) \
-  (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
-                                      (__v16hi)_mm256_shufflehi_epi16((A), (imm)), \
-                                      (__v16hi)(__m256i)(W))
+  ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+                                       (__v16hi)_mm256_shufflehi_epi16((A), (imm)), \
+                                       (__v16hi)(__m256i)(W)))
 
 #define _mm256_maskz_shufflehi_epi16(U, A, imm) \
-  (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
-                                      (__v16hi)_mm256_shufflehi_epi16((A), (imm)), \
-                                      (__v16hi)_mm256_setzero_si256())
+  ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+                                       (__v16hi)_mm256_shufflehi_epi16((A), (imm)), \
+                                       (__v16hi)_mm256_setzero_si256()))
 
 #define _mm_mask_shufflelo_epi16(W, U, A, imm) \
-  (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
-                                      (__v8hi)_mm_shufflelo_epi16((A), (imm)), \
-                                      (__v8hi)(__m128i)(W))
+  ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
+                                       (__v8hi)_mm_shufflelo_epi16((A), (imm)), \
+                                       (__v8hi)(__m128i)(W)))
 
 #define _mm_maskz_shufflelo_epi16(U, A, imm) \
-  (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
-                                      (__v8hi)_mm_shufflelo_epi16((A), (imm)), \
-                                      (__v8hi)_mm_setzero_si128())
+  ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
+                                       (__v8hi)_mm_shufflelo_epi16((A), (imm)), \
+                                       (__v8hi)_mm_setzero_si128()))
 
 #define _mm256_mask_shufflelo_epi16(W, U, A, imm) \
-  (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
-                                      (__v16hi)_mm256_shufflelo_epi16((A), \
-                                                                      (imm)), \
-                                      (__v16hi)(__m256i)(W))
+  ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+                                       (__v16hi)_mm256_shufflelo_epi16((A), \
+                                                                       (imm)), \
+                                       (__v16hi)(__m256i)(W)))
 
 #define _mm256_maskz_shufflelo_epi16(U, A, imm) \
-  (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
-                                      (__v16hi)_mm256_shufflelo_epi16((A), \
-                                                                      (imm)), \
-                                      (__v16hi)_mm256_setzero_si256())
+  ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+                                       (__v16hi)_mm256_shufflelo_epi16((A), \
+                                                                       (imm)), \
+                                       (__v16hi)_mm256_setzero_si256()))
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_sllv_epi16(__m256i __A, __m256i __B)
@@ -2756,52 +2756,52 @@ _mm256_mask_permutexvar_epi16 (__m256i __W, __mmask16 __M, __m256i __A,
 }
 
 #define _mm_mask_alignr_epi8(W, U, A, B, N) \
-  (__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
+  ((__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
                                  (__v16qi)_mm_alignr_epi8((A), (B), (int)(N)), \
-                                 (__v16qi)(__m128i)(W))
+                                 (__v16qi)(__m128i)(W)))
 
 #define _mm_maskz_alignr_epi8(U, A, B, N) \
-  (__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
+  ((__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
                                  (__v16qi)_mm_alignr_epi8((A), (B), (int)(N)), \
-                                 (__v16qi)_mm_setzero_si128())
+                                 (__v16qi)_mm_setzero_si128()))
 
 #define _mm256_mask_alignr_epi8(W, U, A, B, N) \
-  (__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
+  ((__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
                               (__v32qi)_mm256_alignr_epi8((A), (B), (int)(N)), \
-                              (__v32qi)(__m256i)(W))
+                              (__v32qi)(__m256i)(W)))
 
 #define _mm256_maskz_alignr_epi8(U, A, B, N) \
-  (__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
+  ((__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
                               (__v32qi)_mm256_alignr_epi8((A), (B), (int)(N)), \
-                              (__v32qi)_mm256_setzero_si256())
+                              (__v32qi)_mm256_setzero_si256()))
 
 #define _mm_dbsad_epu8(A, B, imm) \
-  (__m128i)__builtin_ia32_dbpsadbw128((__v16qi)(__m128i)(A), \
-                                      (__v16qi)(__m128i)(B), (int)(imm))
+  ((__m128i)__builtin_ia32_dbpsadbw128((__v16qi)(__m128i)(A), \
+                                       (__v16qi)(__m128i)(B), (int)(imm)))
 
 #define _mm_mask_dbsad_epu8(W, U, A, B, imm) \
-  (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
+  ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
                                       (__v8hi)_mm_dbsad_epu8((A), (B), (imm)), \
-                                      (__v8hi)(__m128i)(W))
+                                      (__v8hi)(__m128i)(W)))
 
 #define _mm_maskz_dbsad_epu8(U, A, B, imm) \
-  (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
+  ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
                                       (__v8hi)_mm_dbsad_epu8((A), (B), (imm)), \
-                                      (__v8hi)_mm_setzero_si128())
+                                      (__v8hi)_mm_setzero_si128()))
 
 #define _mm256_dbsad_epu8(A, B, imm) \
-  (__m256i)__builtin_ia32_dbpsadbw256((__v32qi)(__m256i)(A), \
-                                      (__v32qi)(__m256i)(B), (int)(imm))
+  ((__m256i)__builtin_ia32_dbpsadbw256((__v32qi)(__m256i)(A), \
+                                       (__v32qi)(__m256i)(B), (int)(imm)))
 
 #define _mm256_mask_dbsad_epu8(W, U, A, B, imm) \
-  (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+  ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
                                   (__v16hi)_mm256_dbsad_epu8((A), (B), (imm)), \
-                                  (__v16hi)(__m256i)(W))
+                                  (__v16hi)(__m256i)(W)))
 
 #define _mm256_maskz_dbsad_epu8(U, A, B, imm) \
-  (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+  ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
                                   (__v16hi)_mm256_dbsad_epu8((A), (B), (imm)), \
-                                  (__v16hi)_mm256_setzero_si256())
+                                  (__v16hi)_mm256_setzero_si256()))
 
 #undef __DEFAULT_FN_ATTRS128
 #undef __DEFAULT_FN_ATTRS256

diff  --git a/clang/lib/Headers/avx512vldqintrin.h b/clang/lib/Headers/avx512vldqintrin.h
index 95ba574ea821..713e1a18a1b3 100644
--- a/clang/lib/Headers/avx512vldqintrin.h
+++ b/clang/lib/Headers/avx512vldqintrin.h
@@ -773,134 +773,134 @@ _mm256_maskz_cvtepu64_ps (__mmask8 __U, __m256i __A) {
 }
 
 #define _mm_range_pd(A, B, C) \
-  (__m128d)__builtin_ia32_rangepd128_mask((__v2df)(__m128d)(A), \
-                                          (__v2df)(__m128d)(B), (int)(C), \
-                                          (__v2df)_mm_setzero_pd(), \
-                                          (__mmask8)-1)
+  ((__m128d)__builtin_ia32_rangepd128_mask((__v2df)(__m128d)(A), \
+                                           (__v2df)(__m128d)(B), (int)(C), \
+                                           (__v2df)_mm_setzero_pd(), \
+                                           (__mmask8)-1))
 
 #define _mm_mask_range_pd(W, U, A, B, C) \
-  (__m128d)__builtin_ia32_rangepd128_mask((__v2df)(__m128d)(A), \
-                                          (__v2df)(__m128d)(B), (int)(C), \
-                                          (__v2df)(__m128d)(W), \
-                                          (__mmask8)(U))
+  ((__m128d)__builtin_ia32_rangepd128_mask((__v2df)(__m128d)(A), \
+                                           (__v2df)(__m128d)(B), (int)(C), \
+                                           (__v2df)(__m128d)(W), \
+                                           (__mmask8)(U)))
 
 #define _mm_maskz_range_pd(U, A, B, C) \
-  (__m128d)__builtin_ia32_rangepd128_mask((__v2df)(__m128d)(A), \
-                                          (__v2df)(__m128d)(B), (int)(C), \
-                                          (__v2df)_mm_setzero_pd(), \
-                                          (__mmask8)(U))
+  ((__m128d)__builtin_ia32_rangepd128_mask((__v2df)(__m128d)(A), \
+                                           (__v2df)(__m128d)(B), (int)(C), \
+                                           (__v2df)_mm_setzero_pd(), \
+                                           (__mmask8)(U)))
 
 #define _mm256_range_pd(A, B, C) \
-  (__m256d)__builtin_ia32_rangepd256_mask((__v4df)(__m256d)(A), \
-                                          (__v4df)(__m256d)(B), (int)(C), \
-                                          (__v4df)_mm256_setzero_pd(), \
-                                          (__mmask8)-1)
+  ((__m256d)__builtin_ia32_rangepd256_mask((__v4df)(__m256d)(A), \
+                                           (__v4df)(__m256d)(B), (int)(C), \
+                                           (__v4df)_mm256_setzero_pd(), \
+                                           (__mmask8)-1))
 
 #define _mm256_mask_range_pd(W, U, A, B, C) \
-  (__m256d)__builtin_ia32_rangepd256_mask((__v4df)(__m256d)(A), \
-                                          (__v4df)(__m256d)(B), (int)(C), \
-                                          (__v4df)(__m256d)(W), \
-                                          (__mmask8)(U))
+  ((__m256d)__builtin_ia32_rangepd256_mask((__v4df)(__m256d)(A), \
+                                           (__v4df)(__m256d)(B), (int)(C), \
+                                           (__v4df)(__m256d)(W), \
+                                           (__mmask8)(U)))
 
 #define _mm256_maskz_range_pd(U, A, B, C) \
-  (__m256d)__builtin_ia32_rangepd256_mask((__v4df)(__m256d)(A), \
-                                          (__v4df)(__m256d)(B), (int)(C), \
-                                          (__v4df)_mm256_setzero_pd(), \
-                                          (__mmask8)(U))
+  ((__m256d)__builtin_ia32_rangepd256_mask((__v4df)(__m256d)(A), \
+                                           (__v4df)(__m256d)(B), (int)(C), \
+                                           (__v4df)_mm256_setzero_pd(), \
+                                           (__mmask8)(U)))
 
 #define _mm_range_ps(A, B, C) \
-  (__m128)__builtin_ia32_rangeps128_mask((__v4sf)(__m128)(A), \
-                                         (__v4sf)(__m128)(B), (int)(C), \
-                                         (__v4sf)_mm_setzero_ps(), \
-                                         (__mmask8)-1)
+  ((__m128)__builtin_ia32_rangeps128_mask((__v4sf)(__m128)(A), \
+                                          (__v4sf)(__m128)(B), (int)(C), \
+                                          (__v4sf)_mm_setzero_ps(), \
+                                          (__mmask8)-1))
 
 #define _mm_mask_range_ps(W, U, A, B, C) \
-  (__m128)__builtin_ia32_rangeps128_mask((__v4sf)(__m128)(A), \
-                                         (__v4sf)(__m128)(B), (int)(C), \
-                                         (__v4sf)(__m128)(W), (__mmask8)(U))
+  ((__m128)__builtin_ia32_rangeps128_mask((__v4sf)(__m128)(A), \
+                                          (__v4sf)(__m128)(B), (int)(C), \
+                                          (__v4sf)(__m128)(W), (__mmask8)(U)))
 
 #define _mm_maskz_range_ps(U, A, B, C) \
-  (__m128)__builtin_ia32_rangeps128_mask((__v4sf)(__m128)(A), \
-                                         (__v4sf)(__m128)(B), (int)(C), \
-                                         (__v4sf)_mm_setzero_ps(), \
-                                         (__mmask8)(U))
+  ((__m128)__builtin_ia32_rangeps128_mask((__v4sf)(__m128)(A), \
+                                          (__v4sf)(__m128)(B), (int)(C), \
+                                          (__v4sf)_mm_setzero_ps(), \
+                                          (__mmask8)(U)))
 
 #define _mm256_range_ps(A, B, C) \
-  (__m256)__builtin_ia32_rangeps256_mask((__v8sf)(__m256)(A), \
-                                         (__v8sf)(__m256)(B), (int)(C), \
-                                         (__v8sf)_mm256_setzero_ps(), \
-                                         (__mmask8)-1)
+  ((__m256)__builtin_ia32_rangeps256_mask((__v8sf)(__m256)(A), \
+                                          (__v8sf)(__m256)(B), (int)(C), \
+                                          (__v8sf)_mm256_setzero_ps(), \
+                                          (__mmask8)-1))
 
 #define _mm256_mask_range_ps(W, U, A, B, C) \
-  (__m256)__builtin_ia32_rangeps256_mask((__v8sf)(__m256)(A), \
-                                         (__v8sf)(__m256)(B), (int)(C), \
-                                         (__v8sf)(__m256)(W), (__mmask8)(U))
+  ((__m256)__builtin_ia32_rangeps256_mask((__v8sf)(__m256)(A), \
+                                          (__v8sf)(__m256)(B), (int)(C), \
+                                          (__v8sf)(__m256)(W), (__mmask8)(U)))
 
 #define _mm256_maskz_range_ps(U, A, B, C) \
-  (__m256)__builtin_ia32_rangeps256_mask((__v8sf)(__m256)(A), \
-                                         (__v8sf)(__m256)(B), (int)(C), \
-                                         (__v8sf)_mm256_setzero_ps(), \
-                                         (__mmask8)(U))
+  ((__m256)__builtin_ia32_rangeps256_mask((__v8sf)(__m256)(A), \
+                                          (__v8sf)(__m256)(B), (int)(C), \
+                                          (__v8sf)_mm256_setzero_ps(), \
+                                          (__mmask8)(U)))
 
 #define _mm_reduce_pd(A, B) \
-  (__m128d)__builtin_ia32_reducepd128_mask((__v2df)(__m128d)(A), (int)(B), \
-                                           (__v2df)_mm_setzero_pd(), \
-                                           (__mmask8)-1)
+  ((__m128d)__builtin_ia32_reducepd128_mask((__v2df)(__m128d)(A), (int)(B), \
+                                            (__v2df)_mm_setzero_pd(), \
+                                            (__mmask8)-1))
 
 #define _mm_mask_reduce_pd(W, U, A, B) \
-  (__m128d)__builtin_ia32_reducepd128_mask((__v2df)(__m128d)(A), (int)(B), \
-                                           (__v2df)(__m128d)(W), \
-                                           (__mmask8)(U))
+  ((__m128d)__builtin_ia32_reducepd128_mask((__v2df)(__m128d)(A), (int)(B), \
+                                            (__v2df)(__m128d)(W), \
+                                            (__mmask8)(U)))
 
 #define _mm_maskz_reduce_pd(U, A, B) \
-  (__m128d)__builtin_ia32_reducepd128_mask((__v2df)(__m128d)(A), (int)(B), \
-                                           (__v2df)_mm_setzero_pd(), \
-                                           (__mmask8)(U))
+  ((__m128d)__builtin_ia32_reducepd128_mask((__v2df)(__m128d)(A), (int)(B), \
+                                            (__v2df)_mm_setzero_pd(), \
+                                            (__mmask8)(U)))
 
 #define _mm256_reduce_pd(A, B) \
-  (__m256d)__builtin_ia32_reducepd256_mask((__v4df)(__m256d)(A), (int)(B), \
-                                           (__v4df)_mm256_setzero_pd(), \
-                                           (__mmask8)-1)
+  ((__m256d)__builtin_ia32_reducepd256_mask((__v4df)(__m256d)(A), (int)(B), \
+                                            (__v4df)_mm256_setzero_pd(), \
+                                            (__mmask8)-1))
 
 #define _mm256_mask_reduce_pd(W, U, A, B) \
-  (__m256d)__builtin_ia32_reducepd256_mask((__v4df)(__m256d)(A), (int)(B), \
-                                           (__v4df)(__m256d)(W), \
-                                           (__mmask8)(U))
+  ((__m256d)__builtin_ia32_reducepd256_mask((__v4df)(__m256d)(A), (int)(B), \
+                                            (__v4df)(__m256d)(W), \
+                                            (__mmask8)(U)))
 
 #define _mm256_maskz_reduce_pd(U, A, B) \
-  (__m256d)__builtin_ia32_reducepd256_mask((__v4df)(__m256d)(A), (int)(B), \
-                                           (__v4df)_mm256_setzero_pd(), \
-                                           (__mmask8)(U))
+  ((__m256d)__builtin_ia32_reducepd256_mask((__v4df)(__m256d)(A), (int)(B), \
+                                            (__v4df)_mm256_setzero_pd(), \
+                                            (__mmask8)(U)))
 
 #define _mm_reduce_ps(A, B) \
-  (__m128)__builtin_ia32_reduceps128_mask((__v4sf)(__m128)(A), (int)(B), \
-                                          (__v4sf)_mm_setzero_ps(), \
-                                          (__mmask8)-1)
+  ((__m128)__builtin_ia32_reduceps128_mask((__v4sf)(__m128)(A), (int)(B), \
+                                           (__v4sf)_mm_setzero_ps(), \
+                                           (__mmask8)-1))
 
 #define _mm_mask_reduce_ps(W, U, A, B) \
-  (__m128)__builtin_ia32_reduceps128_mask((__v4sf)(__m128)(A), (int)(B), \
-                                          (__v4sf)(__m128)(W), \
-                                          (__mmask8)(U))
+  ((__m128)__builtin_ia32_reduceps128_mask((__v4sf)(__m128)(A), (int)(B), \
+                                           (__v4sf)(__m128)(W), \
+                                           (__mmask8)(U)))
 
 #define _mm_maskz_reduce_ps(U, A, B) \
-  (__m128)__builtin_ia32_reduceps128_mask((__v4sf)(__m128)(A), (int)(B), \
-                                          (__v4sf)_mm_setzero_ps(), \
-                                          (__mmask8)(U))
+  ((__m128)__builtin_ia32_reduceps128_mask((__v4sf)(__m128)(A), (int)(B), \
+                                           (__v4sf)_mm_setzero_ps(), \
+                                           (__mmask8)(U)))
 
 #define _mm256_reduce_ps(A, B) \
-  (__m256)__builtin_ia32_reduceps256_mask((__v8sf)(__m256)(A), (int)(B), \
-                                          (__v8sf)_mm256_setzero_ps(), \
-                                          (__mmask8)-1)
+  ((__m256)__builtin_ia32_reduceps256_mask((__v8sf)(__m256)(A), (int)(B), \
+                                           (__v8sf)_mm256_setzero_ps(), \
+                                           (__mmask8)-1))
 
 #define _mm256_mask_reduce_ps(W, U, A, B) \
-  (__m256)__builtin_ia32_reduceps256_mask((__v8sf)(__m256)(A), (int)(B), \
-                                          (__v8sf)(__m256)(W), \
-                                          (__mmask8)(U))
+  ((__m256)__builtin_ia32_reduceps256_mask((__v8sf)(__m256)(A), (int)(B), \
+                                           (__v8sf)(__m256)(W), \
+                                           (__mmask8)(U)))
 
 #define _mm256_maskz_reduce_ps(U, A, B) \
-  (__m256)__builtin_ia32_reduceps256_mask((__v8sf)(__m256)(A), (int)(B), \
-                                          (__v8sf)_mm256_setzero_ps(), \
-                                          (__mmask8)(U))
+  ((__m256)__builtin_ia32_reduceps256_mask((__v8sf)(__m256)(A), (int)(B), \
+                                           (__v8sf)_mm256_setzero_ps(), \
+                                           (__mmask8)(U)))
 
 static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
 _mm_movepi32_mask (__m128i __A)
@@ -1066,100 +1066,100 @@ _mm256_maskz_broadcast_i64x2 (__mmask8 __M, __m128i __A)
 }
 
 #define _mm256_extractf64x2_pd(A, imm) \
-  (__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \
-                                                (int)(imm), \
-                                                (__v2df)_mm_undefined_pd(), \
-                                                (__mmask8)-1)
+  ((__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \
+                                                 (int)(imm), \
+                                                 (__v2df)_mm_undefined_pd(), \
+                                                 (__mmask8)-1))
 
 #define _mm256_mask_extractf64x2_pd(W, U, A, imm) \
-  (__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \
-                                                (int)(imm), \
-                                                (__v2df)(__m128d)(W), \
-                                                (__mmask8)(U))
+  ((__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \
+                                                 (int)(imm), \
+                                                 (__v2df)(__m128d)(W), \
+                                                 (__mmask8)(U)))
 
 #define _mm256_maskz_extractf64x2_pd(U, A, imm) \
-  (__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \
-                                                (int)(imm), \
-                                                (__v2df)_mm_setzero_pd(), \
-                                                (__mmask8)(U))
+  ((__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \
+                                                 (int)(imm), \
+                                                 (__v2df)_mm_setzero_pd(), \
+                                                 (__mmask8)(U)))
 
 #define _mm256_extracti64x2_epi64(A, imm) \
-  (__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \
+  ((__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \
                                                 (int)(imm), \
                                                 (__v2di)_mm_undefined_si128(), \
-                                                (__mmask8)-1)
+                                                (__mmask8)-1))
 
 #define _mm256_mask_extracti64x2_epi64(W, U, A, imm) \
-  (__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \
-                                                (int)(imm), \
-                                                (__v2di)(__m128i)(W), \
-                                                (__mmask8)(U))
+  ((__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \
+                                                 (int)(imm), \
+                                                 (__v2di)(__m128i)(W), \
+                                                 (__mmask8)(U)))
 
 #define _mm256_maskz_extracti64x2_epi64(U, A, imm) \
-  (__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \
-                                                (int)(imm), \
-                                                (__v2di)_mm_setzero_si128(), \
-                                                (__mmask8)(U))
+  ((__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \
+                                                 (int)(imm), \
+                                                 (__v2di)_mm_setzero_si128(), \
+                                                 (__mmask8)(U)))
 
 #define _mm256_insertf64x2(A, B, imm) \
-  (__m256d)__builtin_ia32_insertf64x2_256((__v4df)(__m256d)(A), \
-                                          (__v2df)(__m128d)(B), (int)(imm))
+  ((__m256d)__builtin_ia32_insertf64x2_256((__v4df)(__m256d)(A), \
+                                           (__v2df)(__m128d)(B), (int)(imm)))
 
 #define _mm256_mask_insertf64x2(W, U, A, B, imm) \
-  (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
+  ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
                                   (__v4df)_mm256_insertf64x2((A), (B), (imm)), \
-                                  (__v4df)(__m256d)(W))
+                                  (__v4df)(__m256d)(W)))
 
 #define _mm256_maskz_insertf64x2(U, A, B, imm) \
-  (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
+  ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
                                   (__v4df)_mm256_insertf64x2((A), (B), (imm)), \
-                                  (__v4df)_mm256_setzero_pd())
+                                  (__v4df)_mm256_setzero_pd()))
 
 #define _mm256_inserti64x2(A, B, imm) \
-  (__m256i)__builtin_ia32_inserti64x2_256((__v4di)(__m256i)(A), \
-                                          (__v2di)(__m128i)(B), (int)(imm))
+  ((__m256i)__builtin_ia32_inserti64x2_256((__v4di)(__m256i)(A), \
+                                           (__v2di)(__m128i)(B), (int)(imm)))
 
 #define _mm256_mask_inserti64x2(W, U, A, B, imm) \
-  (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
-                                  (__v4di)_mm256_inserti64x2((A), (B), (imm)), \
-                                  (__v4di)(__m256i)(W))
+  ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
+                                   (__v4di)_mm256_inserti64x2((A), (B), (imm)), \
+                                   (__v4di)(__m256i)(W)))
 
 #define _mm256_maskz_inserti64x2(U, A, B, imm) \
-  (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
-                                  (__v4di)_mm256_inserti64x2((A), (B), (imm)), \
-                                  (__v4di)_mm256_setzero_si256())
+  ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
+                                   (__v4di)_mm256_inserti64x2((A), (B), (imm)), \
+                                   (__v4di)_mm256_setzero_si256()))
 
 #define _mm_mask_fpclass_pd_mask(U, A, imm) \
-  (__mmask8)__builtin_ia32_fpclasspd128_mask((__v2df)(__m128d)(A), (int)(imm), \
-                                             (__mmask8)(U))
+  ((__mmask8)__builtin_ia32_fpclasspd128_mask((__v2df)(__m128d)(A), (int)(imm), \
+                                              (__mmask8)(U)))
 
 #define _mm_fpclass_pd_mask(A, imm) \
-  (__mmask8)__builtin_ia32_fpclasspd128_mask((__v2df)(__m128d)(A), (int)(imm), \
-                                             (__mmask8)-1)
+  ((__mmask8)__builtin_ia32_fpclasspd128_mask((__v2df)(__m128d)(A), (int)(imm), \
+                                              (__mmask8)-1))
 
 #define _mm256_mask_fpclass_pd_mask(U, A, imm) \
-  (__mmask8)__builtin_ia32_fpclasspd256_mask((__v4df)(__m256d)(A), (int)(imm), \
-                                             (__mmask8)(U))
+  ((__mmask8)__builtin_ia32_fpclasspd256_mask((__v4df)(__m256d)(A), (int)(imm), \
+                                              (__mmask8)(U)))
 
 #define _mm256_fpclass_pd_mask(A, imm) \
-  (__mmask8)__builtin_ia32_fpclasspd256_mask((__v4df)(__m256d)(A), (int)(imm), \
-                                             (__mmask8)-1)
+  ((__mmask8)__builtin_ia32_fpclasspd256_mask((__v4df)(__m256d)(A), (int)(imm), \
+                                              (__mmask8)-1))
 
 #define _mm_mask_fpclass_ps_mask(U, A, imm) \
-  (__mmask8)__builtin_ia32_fpclassps128_mask((__v4sf)(__m128)(A), (int)(imm), \
-                                             (__mmask8)(U))
+  ((__mmask8)__builtin_ia32_fpclassps128_mask((__v4sf)(__m128)(A), (int)(imm), \
+                                              (__mmask8)(U)))
 
 #define _mm_fpclass_ps_mask(A, imm) \
-  (__mmask8)__builtin_ia32_fpclassps128_mask((__v4sf)(__m128)(A), (int)(imm), \
-                                             (__mmask8)-1)
+  ((__mmask8)__builtin_ia32_fpclassps128_mask((__v4sf)(__m128)(A), (int)(imm), \
+                                              (__mmask8)-1))
 
 #define _mm256_mask_fpclass_ps_mask(U, A, imm) \
-  (__mmask8)__builtin_ia32_fpclassps256_mask((__v8sf)(__m256)(A), (int)(imm), \
-                                             (__mmask8)(U))
+  ((__mmask8)__builtin_ia32_fpclassps256_mask((__v8sf)(__m256)(A), (int)(imm), \
+                                              (__mmask8)(U)))
 
 #define _mm256_fpclass_ps_mask(A, imm) \
-  (__mmask8)__builtin_ia32_fpclassps256_mask((__v8sf)(__m256)(A), (int)(imm), \
-                                             (__mmask8)-1)
+  ((__mmask8)__builtin_ia32_fpclassps256_mask((__v8sf)(__m256)(A), (int)(imm), \
+                                              (__mmask8)-1))
 
 #undef __DEFAULT_FN_ATTRS128
 #undef __DEFAULT_FN_ATTRS256

diff  --git a/clang/lib/Headers/avx512vlintrin.h b/clang/lib/Headers/avx512vlintrin.h
index 968c10efeac0..0519dba59081 100644
--- a/clang/lib/Headers/avx512vlintrin.h
+++ b/clang/lib/Headers/avx512vlintrin.h
@@ -771,124 +771,124 @@ _mm_maskz_xor_epi64(__mmask8 __U, __m128i __A, __m128i __B)
 }
 
 #define _mm_cmp_epi32_mask(a, b, p) \
-  (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)(__m128i)(a), \
-                                        (__v4si)(__m128i)(b), (int)(p), \
-                                        (__mmask8)-1)
+  ((__mmask8)__builtin_ia32_cmpd128_mask((__v4si)(__m128i)(a), \
+                                         (__v4si)(__m128i)(b), (int)(p), \
+                                         (__mmask8)-1))
 
 #define _mm_mask_cmp_epi32_mask(m, a, b, p) \
-  (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)(__m128i)(a), \
-                                        (__v4si)(__m128i)(b), (int)(p), \
-                                        (__mmask8)(m))
+  ((__mmask8)__builtin_ia32_cmpd128_mask((__v4si)(__m128i)(a), \
+                                         (__v4si)(__m128i)(b), (int)(p), \
+                                         (__mmask8)(m)))
 
 #define _mm_cmp_epu32_mask(a, b, p) \
-  (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)(__m128i)(a), \
-                                         (__v4si)(__m128i)(b), (int)(p), \
-                                         (__mmask8)-1)
+  ((__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)(__m128i)(a), \
+                                          (__v4si)(__m128i)(b), (int)(p), \
+                                          (__mmask8)-1))
 
 #define _mm_mask_cmp_epu32_mask(m, a, b, p) \
-  (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)(__m128i)(a), \
-                                         (__v4si)(__m128i)(b), (int)(p), \
-                                         (__mmask8)(m))
+  ((__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)(__m128i)(a), \
+                                          (__v4si)(__m128i)(b), (int)(p), \
+                                          (__mmask8)(m)))
 
 #define _mm256_cmp_epi32_mask(a, b, p) \
-  (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)(__m256i)(a), \
-                                        (__v8si)(__m256i)(b), (int)(p), \
-                                        (__mmask8)-1)
+  ((__mmask8)__builtin_ia32_cmpd256_mask((__v8si)(__m256i)(a), \
+                                         (__v8si)(__m256i)(b), (int)(p), \
+                                         (__mmask8)-1))
 
 #define _mm256_mask_cmp_epi32_mask(m, a, b, p) \
-  (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)(__m256i)(a), \
-                                        (__v8si)(__m256i)(b), (int)(p), \
-                                        (__mmask8)(m))
+  ((__mmask8)__builtin_ia32_cmpd256_mask((__v8si)(__m256i)(a), \
+                                         (__v8si)(__m256i)(b), (int)(p), \
+                                         (__mmask8)(m)))
 
 #define _mm256_cmp_epu32_mask(a, b, p) \
-  (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)(__m256i)(a), \
-                                         (__v8si)(__m256i)(b), (int)(p), \
-                                         (__mmask8)-1)
+  ((__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)(__m256i)(a), \
+                                          (__v8si)(__m256i)(b), (int)(p), \
+                                          (__mmask8)-1))
 
 #define _mm256_mask_cmp_epu32_mask(m, a, b, p) \
-  (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)(__m256i)(a), \
-                                         (__v8si)(__m256i)(b), (int)(p), \
-                                         (__mmask8)(m))
+  ((__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)(__m256i)(a), \
+                                          (__v8si)(__m256i)(b), (int)(p), \
+                                          (__mmask8)(m)))
 
 #define _mm_cmp_epi64_mask(a, b, p) \
-  (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)(__m128i)(a), \
-                                        (__v2di)(__m128i)(b), (int)(p), \
-                                        (__mmask8)-1)
+  ((__mmask8)__builtin_ia32_cmpq128_mask((__v2di)(__m128i)(a), \
+                                         (__v2di)(__m128i)(b), (int)(p), \
+                                         (__mmask8)-1))
 
 #define _mm_mask_cmp_epi64_mask(m, a, b, p) \
-  (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)(__m128i)(a), \
-                                        (__v2di)(__m128i)(b), (int)(p), \
-                                        (__mmask8)(m))
+  ((__mmask8)__builtin_ia32_cmpq128_mask((__v2di)(__m128i)(a), \
+                                         (__v2di)(__m128i)(b), (int)(p), \
+                                         (__mmask8)(m)))
 
 #define _mm_cmp_epu64_mask(a, b, p) \
-  (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)(__m128i)(a), \
-                                         (__v2di)(__m128i)(b), (int)(p), \
-                                         (__mmask8)-1)
+  ((__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)(__m128i)(a), \
+                                          (__v2di)(__m128i)(b), (int)(p), \
+                                          (__mmask8)-1))
 
 #define _mm_mask_cmp_epu64_mask(m, a, b, p) \
-  (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)(__m128i)(a), \
-                                         (__v2di)(__m128i)(b), (int)(p), \
-                                         (__mmask8)(m))
+  ((__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)(__m128i)(a), \
+                                          (__v2di)(__m128i)(b), (int)(p), \
+                                          (__mmask8)(m)))
 
 #define _mm256_cmp_epi64_mask(a, b, p) \
-  (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)(__m256i)(a), \
-                                        (__v4di)(__m256i)(b), (int)(p), \
-                                        (__mmask8)-1)
+  ((__mmask8)__builtin_ia32_cmpq256_mask((__v4di)(__m256i)(a), \
+                                         (__v4di)(__m256i)(b), (int)(p), \
+                                         (__mmask8)-1))
 
 #define _mm256_mask_cmp_epi64_mask(m, a, b, p) \
-  (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)(__m256i)(a), \
-                                        (__v4di)(__m256i)(b), (int)(p), \
-                                        (__mmask8)(m))
+  ((__mmask8)__builtin_ia32_cmpq256_mask((__v4di)(__m256i)(a), \
+                                         (__v4di)(__m256i)(b), (int)(p), \
+                                         (__mmask8)(m)))
 
 #define _mm256_cmp_epu64_mask(a, b, p) \
-  (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)(__m256i)(a), \
-                                         (__v4di)(__m256i)(b), (int)(p), \
-                                         (__mmask8)-1)
+  ((__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)(__m256i)(a), \
+                                          (__v4di)(__m256i)(b), (int)(p), \
+                                          (__mmask8)-1))
 
 #define _mm256_mask_cmp_epu64_mask(m, a, b, p) \
-  (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)(__m256i)(a), \
-                                         (__v4di)(__m256i)(b), (int)(p), \
-                                         (__mmask8)(m))
+  ((__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)(__m256i)(a), \
+                                          (__v4di)(__m256i)(b), (int)(p), \
+                                          (__mmask8)(m)))
 
 #define _mm256_cmp_ps_mask(a, b, p)  \
-  (__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \
-                                         (__v8sf)(__m256)(b), (int)(p), \
-                                         (__mmask8)-1)
+  ((__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \
+                                          (__v8sf)(__m256)(b), (int)(p), \
+                                          (__mmask8)-1))
 
 #define _mm256_mask_cmp_ps_mask(m, a, b, p)  \
-  (__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \
-                                         (__v8sf)(__m256)(b), (int)(p), \
-                                         (__mmask8)(m))
+  ((__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \
+                                          (__v8sf)(__m256)(b), (int)(p), \
+                                          (__mmask8)(m)))
 
 #define _mm256_cmp_pd_mask(a, b, p)  \
-  (__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256d)(a), \
-                                         (__v4df)(__m256d)(b), (int)(p), \
-                                         (__mmask8)-1)
+  ((__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256d)(a), \
+                                          (__v4df)(__m256d)(b), (int)(p), \
+                                          (__mmask8)-1))
 
 #define _mm256_mask_cmp_pd_mask(m, a, b, p)  \
-  (__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256d)(a), \
-                                         (__v4df)(__m256d)(b), (int)(p), \
-                                         (__mmask8)(m))
+  ((__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256d)(a), \
+                                          (__v4df)(__m256d)(b), (int)(p), \
+                                          (__mmask8)(m)))
 
 #define _mm_cmp_ps_mask(a, b, p)  \
-  (__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \
-                                         (__v4sf)(__m128)(b), (int)(p), \
-                                         (__mmask8)-1)
+  ((__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \
+                                          (__v4sf)(__m128)(b), (int)(p), \
+                                          (__mmask8)-1))
 
 #define _mm_mask_cmp_ps_mask(m, a, b, p)  \
-  (__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \
-                                         (__v4sf)(__m128)(b), (int)(p), \
-                                         (__mmask8)(m))
+  ((__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \
+                                          (__v4sf)(__m128)(b), (int)(p), \
+                                          (__mmask8)(m)))
 
 #define _mm_cmp_pd_mask(a, b, p)  \
-  (__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128d)(a), \
-                                         (__v2df)(__m128d)(b), (int)(p), \
-                                         (__mmask8)-1)
+  ((__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128d)(a), \
+                                          (__v2df)(__m128d)(b), (int)(p), \
+                                          (__mmask8)-1))
 
 #define _mm_mask_cmp_pd_mask(m, a, b, p)  \
-  (__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128d)(a), \
-                                         (__v2df)(__m128d)(b), (int)(p), \
-                                         (__mmask8)(m))
+  ((__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128d)(a), \
+                                          (__v2df)(__m128d)(b), (int)(p), \
+                                          (__mmask8)(m)))
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_mask_fmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
@@ -3289,78 +3289,78 @@ _mm256_maskz_min_epu64 (__mmask8 __M, __m256i __A, __m256i __B) {
 }
 
 #define _mm_roundscale_pd(A, imm) \
-  (__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \
-                                              (int)(imm), \
-                                              (__v2df)_mm_setzero_pd(), \
-                                              (__mmask8)-1)
+  ((__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \
+                                               (int)(imm), \
+                                               (__v2df)_mm_setzero_pd(), \
+                                               (__mmask8)-1))
 
 
 #define _mm_mask_roundscale_pd(W, U, A, imm) \
-  (__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \
-                                              (int)(imm), \
-                                              (__v2df)(__m128d)(W), \
-                                              (__mmask8)(U))
+  ((__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \
+                                               (int)(imm), \
+                                               (__v2df)(__m128d)(W), \
+                                               (__mmask8)(U)))
 
 
 #define _mm_maskz_roundscale_pd(U, A, imm) \
-  (__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \
-                                              (int)(imm), \
-                                              (__v2df)_mm_setzero_pd(), \
-                                              (__mmask8)(U))
+  ((__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \
+                                               (int)(imm), \
+                                               (__v2df)_mm_setzero_pd(), \
+                                               (__mmask8)(U)))
 
 
 #define _mm256_roundscale_pd(A, imm) \
-  (__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \
-                                              (int)(imm), \
-                                              (__v4df)_mm256_setzero_pd(), \
-                                              (__mmask8)-1)
+  ((__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \
+                                               (int)(imm), \
+                                               (__v4df)_mm256_setzero_pd(), \
+                                               (__mmask8)-1))
 
 
 #define _mm256_mask_roundscale_pd(W, U, A, imm) \
-  (__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \
-                                              (int)(imm), \
-                                              (__v4df)(__m256d)(W), \
-                                              (__mmask8)(U))
+  ((__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \
+                                               (int)(imm), \
+                                               (__v4df)(__m256d)(W), \
+                                               (__mmask8)(U)))
 
 
 #define _mm256_maskz_roundscale_pd(U, A, imm)  \
-  (__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \
-                                              (int)(imm), \
-                                              (__v4df)_mm256_setzero_pd(), \
-                                              (__mmask8)(U))
+  ((__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \
+                                               (int)(imm), \
+                                               (__v4df)_mm256_setzero_pd(), \
+                                               (__mmask8)(U)))
 
 #define _mm_roundscale_ps(A, imm)  \
-  (__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \
-                                             (__v4sf)_mm_setzero_ps(), \
-                                             (__mmask8)-1)
+  ((__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \
+                                              (__v4sf)_mm_setzero_ps(), \
+                                              (__mmask8)-1))
 
 
 #define _mm_mask_roundscale_ps(W, U, A, imm)  \
-  (__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \
-                                             (__v4sf)(__m128)(W), \
-                                             (__mmask8)(U))
+  ((__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \
+                                              (__v4sf)(__m128)(W), \
+                                              (__mmask8)(U)))
 
 
 #define _mm_maskz_roundscale_ps(U, A, imm)  \
-  (__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \
-                                             (__v4sf)_mm_setzero_ps(), \
-                                             (__mmask8)(U))
+  ((__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \
+                                              (__v4sf)_mm_setzero_ps(), \
+                                              (__mmask8)(U)))
 
 #define _mm256_roundscale_ps(A, imm)  \
-  (__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \
-                                             (__v8sf)_mm256_setzero_ps(), \
-                                             (__mmask8)-1)
+  ((__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \
+                                              (__v8sf)_mm256_setzero_ps(), \
+                                              (__mmask8)-1))
 
 #define _mm256_mask_roundscale_ps(W, U, A, imm)  \
-  (__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \
-                                             (__v8sf)(__m256)(W), \
-                                             (__mmask8)(U))
+  ((__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \
+                                              (__v8sf)(__m256)(W), \
+                                              (__mmask8)(U)))
 
 
 #define _mm256_maskz_roundscale_ps(U, A, imm)  \
-  (__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \
-                                             (__v8sf)_mm256_setzero_ps(), \
-                                             (__mmask8)(U))
+  ((__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \
+                                              (__v8sf)_mm256_setzero_ps(), \
+                                              (__mmask8)(U)))
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_scalef_pd (__m128d __A, __m128d __B) {
@@ -4298,56 +4298,56 @@ _mm256_maskz_scalef_ps (__mmask8 __U, __m256 __A, __m256 __B) {
 
 
 #define _mm_rol_epi32(a, b) \
-  (__m128i)__builtin_ia32_prold128((__v4si)(__m128i)(a), (int)(b))
+  ((__m128i)__builtin_ia32_prold128((__v4si)(__m128i)(a), (int)(b)))
 
 #define _mm_mask_rol_epi32(w, u, a, b) \
-  (__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \
-                                      (__v4si)_mm_rol_epi32((a), (b)), \
-                                      (__v4si)(__m128i)(w))
+  ((__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \
+                                       (__v4si)_mm_rol_epi32((a), (b)), \
+                                       (__v4si)(__m128i)(w)))
 
 #define _mm_maskz_rol_epi32(u, a, b) \
-  (__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \
-                                      (__v4si)_mm_rol_epi32((a), (b)), \
-                                      (__v4si)_mm_setzero_si128())
+  ((__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \
+                                       (__v4si)_mm_rol_epi32((a), (b)), \
+                                       (__v4si)_mm_setzero_si128()))
 
 #define _mm256_rol_epi32(a, b) \
-  (__m256i)__builtin_ia32_prold256((__v8si)(__m256i)(a), (int)(b))
+  ((__m256i)__builtin_ia32_prold256((__v8si)(__m256i)(a), (int)(b)))
 
 #define _mm256_mask_rol_epi32(w, u, a, b) \
-  (__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \
-                                      (__v8si)_mm256_rol_epi32((a), (b)), \
-                                      (__v8si)(__m256i)(w))
+  ((__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \
+                                       (__v8si)_mm256_rol_epi32((a), (b)), \
+                                       (__v8si)(__m256i)(w)))
 
 #define _mm256_maskz_rol_epi32(u, a, b) \
-  (__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \
-                                      (__v8si)_mm256_rol_epi32((a), (b)), \
-                                      (__v8si)_mm256_setzero_si256())
+  ((__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \
+                                       (__v8si)_mm256_rol_epi32((a), (b)), \
+                                       (__v8si)_mm256_setzero_si256()))
 
 #define _mm_rol_epi64(a, b) \
-  (__m128i)__builtin_ia32_prolq128((__v2di)(__m128i)(a), (int)(b))
+  ((__m128i)__builtin_ia32_prolq128((__v2di)(__m128i)(a), (int)(b)))
 
 #define _mm_mask_rol_epi64(w, u, a, b) \
-  (__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \
-                                      (__v2di)_mm_rol_epi64((a), (b)), \
-                                      (__v2di)(__m128i)(w))
+  ((__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \
+                                       (__v2di)_mm_rol_epi64((a), (b)), \
+                                       (__v2di)(__m128i)(w)))
 
 #define _mm_maskz_rol_epi64(u, a, b) \
-  (__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \
-                                      (__v2di)_mm_rol_epi64((a), (b)), \
-                                      (__v2di)_mm_setzero_si128())
+  ((__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \
+                                       (__v2di)_mm_rol_epi64((a), (b)), \
+                                       (__v2di)_mm_setzero_si128()))
 
 #define _mm256_rol_epi64(a, b) \
-  (__m256i)__builtin_ia32_prolq256((__v4di)(__m256i)(a), (int)(b))
+  ((__m256i)__builtin_ia32_prolq256((__v4di)(__m256i)(a), (int)(b)))
 
 #define _mm256_mask_rol_epi64(w, u, a, b) \
-  (__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \
-                                      (__v4di)_mm256_rol_epi64((a), (b)), \
-                                      (__v4di)(__m256i)(w))
+  ((__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \
+                                       (__v4di)_mm256_rol_epi64((a), (b)), \
+                                       (__v4di)(__m256i)(w)))
 
 #define _mm256_maskz_rol_epi64(u, a, b) \
-  (__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \
-                                      (__v4di)_mm256_rol_epi64((a), (b)), \
-                                      (__v4di)_mm256_setzero_si256())
+  ((__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \
+                                       (__v4di)_mm256_rol_epi64((a), (b)), \
+                                       (__v4di)_mm256_setzero_si256()))
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_rolv_epi32 (__m128i __A, __m128i __B)
@@ -4438,56 +4438,56 @@ _mm256_maskz_rolv_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
 }
 
 #define _mm_ror_epi32(a, b) \
-  (__m128i)__builtin_ia32_prord128((__v4si)(__m128i)(a), (int)(b))
+  ((__m128i)__builtin_ia32_prord128((__v4si)(__m128i)(a), (int)(b)))
 
 #define _mm_mask_ror_epi32(w, u, a, b) \
-  (__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \
-                                      (__v4si)_mm_ror_epi32((a), (b)), \
-                                      (__v4si)(__m128i)(w))
+  ((__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \
+                                       (__v4si)_mm_ror_epi32((a), (b)), \
+                                       (__v4si)(__m128i)(w)))
 
 #define _mm_maskz_ror_epi32(u, a, b) \
-  (__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \
-                                      (__v4si)_mm_ror_epi32((a), (b)), \
-                                      (__v4si)_mm_setzero_si128())
+  ((__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \
+                                       (__v4si)_mm_ror_epi32((a), (b)), \
+                                       (__v4si)_mm_setzero_si128()))
 
 #define _mm256_ror_epi32(a, b) \
-  (__m256i)__builtin_ia32_prord256((__v8si)(__m256i)(a), (int)(b))
+  ((__m256i)__builtin_ia32_prord256((__v8si)(__m256i)(a), (int)(b)))
 
 #define _mm256_mask_ror_epi32(w, u, a, b) \
-  (__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \
-                                      (__v8si)_mm256_ror_epi32((a), (b)), \
-                                      (__v8si)(__m256i)(w))
+  ((__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \
+                                       (__v8si)_mm256_ror_epi32((a), (b)), \
+                                       (__v8si)(__m256i)(w)))
 
 #define _mm256_maskz_ror_epi32(u, a, b) \
-  (__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \
-                                      (__v8si)_mm256_ror_epi32((a), (b)), \
-                                      (__v8si)_mm256_setzero_si256())
+  ((__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \
+                                       (__v8si)_mm256_ror_epi32((a), (b)), \
+                                       (__v8si)_mm256_setzero_si256()))
 
 #define _mm_ror_epi64(a, b) \
-  (__m128i)__builtin_ia32_prorq128((__v2di)(__m128i)(a), (int)(b))
+  ((__m128i)__builtin_ia32_prorq128((__v2di)(__m128i)(a), (int)(b)))
 
 #define _mm_mask_ror_epi64(w, u, a, b) \
-  (__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \
-                                      (__v2di)_mm_ror_epi64((a), (b)), \
-                                      (__v2di)(__m128i)(w))
+  ((__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \
+                                       (__v2di)_mm_ror_epi64((a), (b)), \
+                                       (__v2di)(__m128i)(w)))
 
 #define _mm_maskz_ror_epi64(u, a, b) \
-  (__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \
-                                      (__v2di)_mm_ror_epi64((a), (b)), \
-                                      (__v2di)_mm_setzero_si128())
+  ((__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \
+                                       (__v2di)_mm_ror_epi64((a), (b)), \
+                                       (__v2di)_mm_setzero_si128()))
 
 #define _mm256_ror_epi64(a, b) \
-  (__m256i)__builtin_ia32_prorq256((__v4di)(__m256i)(a), (int)(b))
+  ((__m256i)__builtin_ia32_prorq256((__v4di)(__m256i)(a), (int)(b)))
 
 #define _mm256_mask_ror_epi64(w, u, a, b) \
-  (__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \
-                                      (__v4di)_mm256_ror_epi64((a), (b)), \
-                                      (__v4di)(__m256i)(w))
+  ((__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \
+                                       (__v4di)_mm256_ror_epi64((a), (b)), \
+                                       (__v4di)(__m256i)(w)))
 
 #define _mm256_maskz_ror_epi64(u, a, b) \
-  (__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \
-                                      (__v4di)_mm256_ror_epi64((a), (b)), \
-                                      (__v4di)_mm256_setzero_si256())
+  ((__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \
+                                       (__v4di)_mm256_ror_epi64((a), (b)), \
+                                       (__v4di)_mm256_setzero_si256()))
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_mask_sll_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
@@ -5356,76 +5356,76 @@ _mm256_maskz_set1_epi64 (__mmask8 __M, long long __A)
 }
 
 #define _mm_fixupimm_pd(A, B, C, imm) \
-  (__m128d)__builtin_ia32_fixupimmpd128_mask((__v2df)(__m128d)(A), \
-                                             (__v2df)(__m128d)(B), \
-                                             (__v2di)(__m128i)(C), (int)(imm), \
-                                             (__mmask8)-1)
+  ((__m128d)__builtin_ia32_fixupimmpd128_mask((__v2df)(__m128d)(A), \
+                                              (__v2df)(__m128d)(B), \
+                                              (__v2di)(__m128i)(C), (int)(imm), \
+                                              (__mmask8)-1))
 
 #define _mm_mask_fixupimm_pd(A, U, B, C, imm) \
-  (__m128d)__builtin_ia32_fixupimmpd128_mask((__v2df)(__m128d)(A), \
-                                             (__v2df)(__m128d)(B), \
-                                             (__v2di)(__m128i)(C), (int)(imm), \
-                                             (__mmask8)(U))
+  ((__m128d)__builtin_ia32_fixupimmpd128_mask((__v2df)(__m128d)(A), \
+                                              (__v2df)(__m128d)(B), \
+                                              (__v2di)(__m128i)(C), (int)(imm), \
+                                              (__mmask8)(U)))
 
 #define _mm_maskz_fixupimm_pd(U, A, B, C, imm) \
-  (__m128d)__builtin_ia32_fixupimmpd128_maskz((__v2df)(__m128d)(A), \
-                                              (__v2df)(__m128d)(B), \
-                                              (__v2di)(__m128i)(C), \
-                                              (int)(imm), (__mmask8)(U))
+  ((__m128d)__builtin_ia32_fixupimmpd128_maskz((__v2df)(__m128d)(A), \
+                                               (__v2df)(__m128d)(B), \
+                                               (__v2di)(__m128i)(C), \
+                                               (int)(imm), (__mmask8)(U)))
 
 #define _mm256_fixupimm_pd(A, B, C, imm) \
-  (__m256d)__builtin_ia32_fixupimmpd256_mask((__v4df)(__m256d)(A), \
-                                             (__v4df)(__m256d)(B), \
-                                             (__v4di)(__m256i)(C), (int)(imm), \
-                                             (__mmask8)-1)
+  ((__m256d)__builtin_ia32_fixupimmpd256_mask((__v4df)(__m256d)(A), \
+                                              (__v4df)(__m256d)(B), \
+                                              (__v4di)(__m256i)(C), (int)(imm), \
+                                              (__mmask8)-1))
 
 #define _mm256_mask_fixupimm_pd(A, U, B, C, imm) \
-  (__m256d)__builtin_ia32_fixupimmpd256_mask((__v4df)(__m256d)(A), \
-                                             (__v4df)(__m256d)(B), \
-                                             (__v4di)(__m256i)(C), (int)(imm), \
-                                             (__mmask8)(U))
+  ((__m256d)__builtin_ia32_fixupimmpd256_mask((__v4df)(__m256d)(A), \
+                                              (__v4df)(__m256d)(B), \
+                                              (__v4di)(__m256i)(C), (int)(imm), \
+                                              (__mmask8)(U)))
 
 #define _mm256_maskz_fixupimm_pd(U, A, B, C, imm) \
-  (__m256d)__builtin_ia32_fixupimmpd256_maskz((__v4df)(__m256d)(A), \
-                                              (__v4df)(__m256d)(B), \
-                                              (__v4di)(__m256i)(C), \
-                                              (int)(imm), (__mmask8)(U))
+  ((__m256d)__builtin_ia32_fixupimmpd256_maskz((__v4df)(__m256d)(A), \
+                                               (__v4df)(__m256d)(B), \
+                                               (__v4di)(__m256i)(C), \
+                                               (int)(imm), (__mmask8)(U)))
 
 #define _mm_fixupimm_ps(A, B, C, imm) \
-  (__m128)__builtin_ia32_fixupimmps128_mask((__v4sf)(__m128)(A), \
-                                            (__v4sf)(__m128)(B), \
-                                            (__v4si)(__m128i)(C), (int)(imm), \
-                                            (__mmask8)-1)
+  ((__m128)__builtin_ia32_fixupimmps128_mask((__v4sf)(__m128)(A), \
+                                             (__v4sf)(__m128)(B), \
+                                             (__v4si)(__m128i)(C), (int)(imm), \
+                                             (__mmask8)-1))
 
 #define _mm_mask_fixupimm_ps(A, U, B, C, imm) \
-  (__m128)__builtin_ia32_fixupimmps128_mask((__v4sf)(__m128)(A), \
-                                            (__v4sf)(__m128)(B), \
-                                            (__v4si)(__m128i)(C), (int)(imm), \
-                                            (__mmask8)(U))
-
-#define _mm_maskz_fixupimm_ps(U, A, B, C, imm) \
-  (__m128)__builtin_ia32_fixupimmps128_maskz((__v4sf)(__m128)(A), \
+  ((__m128)__builtin_ia32_fixupimmps128_mask((__v4sf)(__m128)(A), \
                                              (__v4sf)(__m128)(B), \
                                              (__v4si)(__m128i)(C), (int)(imm), \
-                                             (__mmask8)(U))
+                                             (__mmask8)(U)))
+
+#define _mm_maskz_fixupimm_ps(U, A, B, C, imm) \
+  ((__m128)__builtin_ia32_fixupimmps128_maskz((__v4sf)(__m128)(A), \
+                                              (__v4sf)(__m128)(B), \
+                                              (__v4si)(__m128i)(C), (int)(imm), \
+                                              (__mmask8)(U)))
 
 #define _mm256_fixupimm_ps(A, B, C, imm) \
-  (__m256)__builtin_ia32_fixupimmps256_mask((__v8sf)(__m256)(A), \
-                                            (__v8sf)(__m256)(B), \
-                                            (__v8si)(__m256i)(C), (int)(imm), \
-                                            (__mmask8)-1)
+  ((__m256)__builtin_ia32_fixupimmps256_mask((__v8sf)(__m256)(A), \
+                                             (__v8sf)(__m256)(B), \
+                                             (__v8si)(__m256i)(C), (int)(imm), \
+                                             (__mmask8)-1))
 
 #define _mm256_mask_fixupimm_ps(A, U, B, C, imm) \
-  (__m256)__builtin_ia32_fixupimmps256_mask((__v8sf)(__m256)(A), \
-                                            (__v8sf)(__m256)(B), \
-                                            (__v8si)(__m256i)(C), (int)(imm), \
-                                            (__mmask8)(U))
-
-#define _mm256_maskz_fixupimm_ps(U, A, B, C, imm) \
-  (__m256)__builtin_ia32_fixupimmps256_maskz((__v8sf)(__m256)(A), \
+  ((__m256)__builtin_ia32_fixupimmps256_mask((__v8sf)(__m256)(A), \
                                              (__v8sf)(__m256)(B), \
                                              (__v8si)(__m256i)(C), (int)(imm), \
-                                             (__mmask8)(U))
+                                             (__mmask8)(U)))
+
+#define _mm256_maskz_fixupimm_ps(U, A, B, C, imm) \
+  ((__m256)__builtin_ia32_fixupimmps256_maskz((__v8sf)(__m256)(A), \
+                                              (__v8sf)(__m256)(B), \
+                                              (__v8si)(__m256i)(C), (int)(imm), \
+                                              (__mmask8)(U)))
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_mask_load_pd (__m128d __W, __mmask8 __U, void const *__P)
@@ -6033,44 +6033,44 @@ _mm256_maskz_rcp14_ps (__mmask8 __U, __m256 __A)
 }
 
 #define _mm_mask_permute_pd(W, U, X, C) \
-  (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
-                                       (__v2df)_mm_permute_pd((X), (C)), \
-                                       (__v2df)(__m128d)(W))
+  ((__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
+                                        (__v2df)_mm_permute_pd((X), (C)), \
+                                        (__v2df)(__m128d)(W)))
 
 #define _mm_maskz_permute_pd(U, X, C) \
-  (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
-                                       (__v2df)_mm_permute_pd((X), (C)), \
-                                       (__v2df)_mm_setzero_pd())
+  ((__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
+                                        (__v2df)_mm_permute_pd((X), (C)), \
+                                        (__v2df)_mm_setzero_pd()))
 
 #define _mm256_mask_permute_pd(W, U, X, C) \
-  (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
-                                       (__v4df)_mm256_permute_pd((X), (C)), \
-                                       (__v4df)(__m256d)(W))
+  ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
+                                        (__v4df)_mm256_permute_pd((X), (C)), \
+                                        (__v4df)(__m256d)(W)))
 
 #define _mm256_maskz_permute_pd(U, X, C) \
-  (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
-                                       (__v4df)_mm256_permute_pd((X), (C)), \
-                                       (__v4df)_mm256_setzero_pd())
+  ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
+                                        (__v4df)_mm256_permute_pd((X), (C)), \
+                                        (__v4df)_mm256_setzero_pd()))
 
 #define _mm_mask_permute_ps(W, U, X, C) \
-  (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
-                                      (__v4sf)_mm_permute_ps((X), (C)), \
-                                      (__v4sf)(__m128)(W))
+  ((__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
+                                       (__v4sf)_mm_permute_ps((X), (C)), \
+                                       (__v4sf)(__m128)(W)))
 
 #define _mm_maskz_permute_ps(U, X, C) \
-  (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
-                                      (__v4sf)_mm_permute_ps((X), (C)), \
-                                      (__v4sf)_mm_setzero_ps())
+  ((__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
+                                       (__v4sf)_mm_permute_ps((X), (C)), \
+                                       (__v4sf)_mm_setzero_ps()))
 
 #define _mm256_mask_permute_ps(W, U, X, C) \
-  (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
-                                      (__v8sf)_mm256_permute_ps((X), (C)), \
-                                      (__v8sf)(__m256)(W))
+  ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
+                                       (__v8sf)_mm256_permute_ps((X), (C)), \
+                                       (__v8sf)(__m256)(W)))
 
 #define _mm256_maskz_permute_ps(U, X, C) \
-  (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
-                                      (__v8sf)_mm256_permute_ps((X), (C)), \
-                                      (__v8sf)_mm256_setzero_ps())
+  ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
+                                       (__v8sf)_mm256_permute_ps((X), (C)), \
+                                       (__v8sf)_mm256_setzero_ps()))
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_mask_permutevar_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128i __C)
@@ -6526,175 +6526,175 @@ _mm256_maskz_srai_epi64(__mmask8 __U, __m256i __A, unsigned int __imm)
 }
 
 #define _mm_ternarylogic_epi32(A, B, C, imm) \
-  (__m128i)__builtin_ia32_pternlogd128_mask((__v4si)(__m128i)(A), \
-                                            (__v4si)(__m128i)(B), \
-                                            (__v4si)(__m128i)(C), (int)(imm), \
-                                            (__mmask8)-1)
+  ((__m128i)__builtin_ia32_pternlogd128_mask((__v4si)(__m128i)(A), \
+                                             (__v4si)(__m128i)(B), \
+                                             (__v4si)(__m128i)(C), (int)(imm), \
+                                             (__mmask8)-1))
 
 #define _mm_mask_ternarylogic_epi32(A, U, B, C, imm) \
-  (__m128i)__builtin_ia32_pternlogd128_mask((__v4si)(__m128i)(A), \
-                                            (__v4si)(__m128i)(B), \
-                                            (__v4si)(__m128i)(C), (int)(imm), \
-                                            (__mmask8)(U))
-
-#define _mm_maskz_ternarylogic_epi32(U, A, B, C, imm) \
-  (__m128i)__builtin_ia32_pternlogd128_maskz((__v4si)(__m128i)(A), \
+  ((__m128i)__builtin_ia32_pternlogd128_mask((__v4si)(__m128i)(A), \
                                              (__v4si)(__m128i)(B), \
                                              (__v4si)(__m128i)(C), (int)(imm), \
-                                             (__mmask8)(U))
+                                             (__mmask8)(U)))
+
+#define _mm_maskz_ternarylogic_epi32(U, A, B, C, imm) \
+  ((__m128i)__builtin_ia32_pternlogd128_maskz((__v4si)(__m128i)(A), \
+                                              (__v4si)(__m128i)(B), \
+                                              (__v4si)(__m128i)(C), (int)(imm), \
+                                              (__mmask8)(U)))
 
 #define _mm256_ternarylogic_epi32(A, B, C, imm) \
-  (__m256i)__builtin_ia32_pternlogd256_mask((__v8si)(__m256i)(A), \
-                                            (__v8si)(__m256i)(B), \
-                                            (__v8si)(__m256i)(C), (int)(imm), \
-                                            (__mmask8)-1)
+  ((__m256i)__builtin_ia32_pternlogd256_mask((__v8si)(__m256i)(A), \
+                                             (__v8si)(__m256i)(B), \
+                                             (__v8si)(__m256i)(C), (int)(imm), \
+                                             (__mmask8)-1))
 
 #define _mm256_mask_ternarylogic_epi32(A, U, B, C, imm) \
-  (__m256i)__builtin_ia32_pternlogd256_mask((__v8si)(__m256i)(A), \
-                                            (__v8si)(__m256i)(B), \
-                                            (__v8si)(__m256i)(C), (int)(imm), \
-                                            (__mmask8)(U))
-
-#define _mm256_maskz_ternarylogic_epi32(U, A, B, C, imm) \
-  (__m256i)__builtin_ia32_pternlogd256_maskz((__v8si)(__m256i)(A), \
+  ((__m256i)__builtin_ia32_pternlogd256_mask((__v8si)(__m256i)(A), \
                                              (__v8si)(__m256i)(B), \
                                              (__v8si)(__m256i)(C), (int)(imm), \
-                                             (__mmask8)(U))
+                                             (__mmask8)(U)))
+
+#define _mm256_maskz_ternarylogic_epi32(U, A, B, C, imm) \
+  ((__m256i)__builtin_ia32_pternlogd256_maskz((__v8si)(__m256i)(A), \
+                                              (__v8si)(__m256i)(B), \
+                                              (__v8si)(__m256i)(C), (int)(imm), \
+                                              (__mmask8)(U)))
 
 #define _mm_ternarylogic_epi64(A, B, C, imm) \
-  (__m128i)__builtin_ia32_pternlogq128_mask((__v2di)(__m128i)(A), \
-                                            (__v2di)(__m128i)(B), \
-                                            (__v2di)(__m128i)(C), (int)(imm), \
-                                            (__mmask8)-1)
+  ((__m128i)__builtin_ia32_pternlogq128_mask((__v2di)(__m128i)(A), \
+                                             (__v2di)(__m128i)(B), \
+                                             (__v2di)(__m128i)(C), (int)(imm), \
+                                             (__mmask8)-1))
 
 #define _mm_mask_ternarylogic_epi64(A, U, B, C, imm) \
-  (__m128i)__builtin_ia32_pternlogq128_mask((__v2di)(__m128i)(A), \
-                                            (__v2di)(__m128i)(B), \
-                                            (__v2di)(__m128i)(C), (int)(imm), \
-                                            (__mmask8)(U))
-
-#define _mm_maskz_ternarylogic_epi64(U, A, B, C, imm) \
-  (__m128i)__builtin_ia32_pternlogq128_maskz((__v2di)(__m128i)(A), \
+  ((__m128i)__builtin_ia32_pternlogq128_mask((__v2di)(__m128i)(A), \
                                              (__v2di)(__m128i)(B), \
                                              (__v2di)(__m128i)(C), (int)(imm), \
-                                             (__mmask8)(U))
+                                             (__mmask8)(U)))
+
+#define _mm_maskz_ternarylogic_epi64(U, A, B, C, imm) \
+  ((__m128i)__builtin_ia32_pternlogq128_maskz((__v2di)(__m128i)(A), \
+                                              (__v2di)(__m128i)(B), \
+                                              (__v2di)(__m128i)(C), (int)(imm), \
+                                              (__mmask8)(U)))
 
 #define _mm256_ternarylogic_epi64(A, B, C, imm) \
-  (__m256i)__builtin_ia32_pternlogq256_mask((__v4di)(__m256i)(A), \
-                                            (__v4di)(__m256i)(B), \
-                                            (__v4di)(__m256i)(C), (int)(imm), \
-                                            (__mmask8)-1)
+  ((__m256i)__builtin_ia32_pternlogq256_mask((__v4di)(__m256i)(A), \
+                                             (__v4di)(__m256i)(B), \
+                                             (__v4di)(__m256i)(C), (int)(imm), \
+                                             (__mmask8)-1))
 
 #define _mm256_mask_ternarylogic_epi64(A, U, B, C, imm) \
-  (__m256i)__builtin_ia32_pternlogq256_mask((__v4di)(__m256i)(A), \
-                                            (__v4di)(__m256i)(B), \
-                                            (__v4di)(__m256i)(C), (int)(imm), \
-                                            (__mmask8)(U))
-
-#define _mm256_maskz_ternarylogic_epi64(U, A, B, C, imm) \
-  (__m256i)__builtin_ia32_pternlogq256_maskz((__v4di)(__m256i)(A), \
+  ((__m256i)__builtin_ia32_pternlogq256_mask((__v4di)(__m256i)(A), \
                                              (__v4di)(__m256i)(B), \
                                              (__v4di)(__m256i)(C), (int)(imm), \
-                                             (__mmask8)(U))
+                                             (__mmask8)(U)))
+
+#define _mm256_maskz_ternarylogic_epi64(U, A, B, C, imm) \
+  ((__m256i)__builtin_ia32_pternlogq256_maskz((__v4di)(__m256i)(A), \
+                                              (__v4di)(__m256i)(B), \
+                                              (__v4di)(__m256i)(C), (int)(imm), \
+                                              (__mmask8)(U)))
 
 
 
 #define _mm256_shuffle_f32x4(A, B, imm) \
-  (__m256)__builtin_ia32_shuf_f32x4_256((__v8sf)(__m256)(A), \
-                                        (__v8sf)(__m256)(B), (int)(imm))
+  ((__m256)__builtin_ia32_shuf_f32x4_256((__v8sf)(__m256)(A), \
+                                         (__v8sf)(__m256)(B), (int)(imm)))
 
 #define _mm256_mask_shuffle_f32x4(W, U, A, B, imm) \
-  (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
-                                      (__v8sf)_mm256_shuffle_f32x4((A), (B), (imm)), \
-                                      (__v8sf)(__m256)(W))
+  ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
+                                       (__v8sf)_mm256_shuffle_f32x4((A), (B), (imm)), \
+                                       (__v8sf)(__m256)(W)))
 
 #define _mm256_maskz_shuffle_f32x4(U, A, B, imm) \
-  (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
-                                      (__v8sf)_mm256_shuffle_f32x4((A), (B), (imm)), \
-                                      (__v8sf)_mm256_setzero_ps())
+  ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
+                                       (__v8sf)_mm256_shuffle_f32x4((A), (B), (imm)), \
+                                       (__v8sf)_mm256_setzero_ps()))
 
 #define _mm256_shuffle_f64x2(A, B, imm) \
-  (__m256d)__builtin_ia32_shuf_f64x2_256((__v4df)(__m256d)(A), \
-                                         (__v4df)(__m256d)(B), (int)(imm))
+  ((__m256d)__builtin_ia32_shuf_f64x2_256((__v4df)(__m256d)(A), \
+                                          (__v4df)(__m256d)(B), (int)(imm)))
 
 #define _mm256_mask_shuffle_f64x2(W, U, A, B, imm) \
-  (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
-                                      (__v4df)_mm256_shuffle_f64x2((A), (B), (imm)), \
-                                      (__v4df)(__m256d)(W))
+  ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
+                                       (__v4df)_mm256_shuffle_f64x2((A), (B), (imm)), \
+                                       (__v4df)(__m256d)(W)))
 
 #define _mm256_maskz_shuffle_f64x2(U, A, B, imm) \
-  (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
-                                      (__v4df)_mm256_shuffle_f64x2((A), (B), (imm)), \
-                                      (__v4df)_mm256_setzero_pd())
+  ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
+                                       (__v4df)_mm256_shuffle_f64x2((A), (B), (imm)), \
+                                       (__v4df)_mm256_setzero_pd()))
 
 #define _mm256_shuffle_i32x4(A, B, imm) \
-  (__m256i)__builtin_ia32_shuf_i32x4_256((__v8si)(__m256i)(A), \
-                                         (__v8si)(__m256i)(B), (int)(imm))
+  ((__m256i)__builtin_ia32_shuf_i32x4_256((__v8si)(__m256i)(A), \
+                                          (__v8si)(__m256i)(B), (int)(imm)))
 
 #define _mm256_mask_shuffle_i32x4(W, U, A, B, imm) \
-  (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
-                                      (__v8si)_mm256_shuffle_i32x4((A), (B), (imm)), \
-                                      (__v8si)(__m256i)(W))
+  ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
+                                       (__v8si)_mm256_shuffle_i32x4((A), (B), (imm)), \
+                                       (__v8si)(__m256i)(W)))
 
 #define _mm256_maskz_shuffle_i32x4(U, A, B, imm) \
-  (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
-                                      (__v8si)_mm256_shuffle_i32x4((A), (B), (imm)), \
-                                      (__v8si)_mm256_setzero_si256())
+  ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
+                                       (__v8si)_mm256_shuffle_i32x4((A), (B), (imm)), \
+                                       (__v8si)_mm256_setzero_si256()))
 
 #define _mm256_shuffle_i64x2(A, B, imm) \
-  (__m256i)__builtin_ia32_shuf_i64x2_256((__v4di)(__m256i)(A), \
-                                         (__v4di)(__m256i)(B), (int)(imm))
+  ((__m256i)__builtin_ia32_shuf_i64x2_256((__v4di)(__m256i)(A), \
+                                          (__v4di)(__m256i)(B), (int)(imm)))
 
 #define _mm256_mask_shuffle_i64x2(W, U, A, B, imm) \
-  (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
-                                      (__v4di)_mm256_shuffle_i64x2((A), (B), (imm)), \
-                                      (__v4di)(__m256i)(W))
+  ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
+                                       (__v4di)_mm256_shuffle_i64x2((A), (B), (imm)), \
+                                       (__v4di)(__m256i)(W)))
 
 
 #define _mm256_maskz_shuffle_i64x2(U, A, B, imm) \
-  (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
-                                      (__v4di)_mm256_shuffle_i64x2((A), (B), (imm)), \
-                                      (__v4di)_mm256_setzero_si256())
+  ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
+                                       (__v4di)_mm256_shuffle_i64x2((A), (B), (imm)), \
+                                       (__v4di)_mm256_setzero_si256()))
 
 #define _mm_mask_shuffle_pd(W, U, A, B, M) \
-  (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
-                                       (__v2df)_mm_shuffle_pd((A), (B), (M)), \
-                                       (__v2df)(__m128d)(W))
+  ((__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
+                                        (__v2df)_mm_shuffle_pd((A), (B), (M)), \
+                                        (__v2df)(__m128d)(W)))
 
 #define _mm_maskz_shuffle_pd(U, A, B, M) \
-  (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
-                                       (__v2df)_mm_shuffle_pd((A), (B), (M)), \
-                                       (__v2df)_mm_setzero_pd())
+  ((__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
+                                        (__v2df)_mm_shuffle_pd((A), (B), (M)), \
+                                        (__v2df)_mm_setzero_pd()))
 
 #define _mm256_mask_shuffle_pd(W, U, A, B, M) \
-  (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
-                                       (__v4df)_mm256_shuffle_pd((A), (B), (M)), \
-                                       (__v4df)(__m256d)(W))
+  ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
+                                        (__v4df)_mm256_shuffle_pd((A), (B), (M)), \
+                                        (__v4df)(__m256d)(W)))
 
 #define _mm256_maskz_shuffle_pd(U, A, B, M) \
-  (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
-                                       (__v4df)_mm256_shuffle_pd((A), (B), (M)), \
-                                       (__v4df)_mm256_setzero_pd())
+  ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
+                                        (__v4df)_mm256_shuffle_pd((A), (B), (M)), \
+                                        (__v4df)_mm256_setzero_pd()))
 
 #define _mm_mask_shuffle_ps(W, U, A, B, M) \
-  (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
-                                      (__v4sf)_mm_shuffle_ps((A), (B), (M)), \
-                                      (__v4sf)(__m128)(W))
+  ((__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
+                                       (__v4sf)_mm_shuffle_ps((A), (B), (M)), \
+                                       (__v4sf)(__m128)(W)))
 
 #define _mm_maskz_shuffle_ps(U, A, B, M) \
-  (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
-                                      (__v4sf)_mm_shuffle_ps((A), (B), (M)), \
-                                      (__v4sf)_mm_setzero_ps())
+  ((__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
+                                       (__v4sf)_mm_shuffle_ps((A), (B), (M)), \
+                                       (__v4sf)_mm_setzero_ps()))
 
 #define _mm256_mask_shuffle_ps(W, U, A, B, M) \
-  (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
-                                      (__v8sf)_mm256_shuffle_ps((A), (B), (M)), \
-                                      (__v8sf)(__m256)(W))
+  ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
+                                       (__v8sf)_mm256_shuffle_ps((A), (B), (M)), \
+                                       (__v8sf)(__m256)(W)))
 
 #define _mm256_maskz_shuffle_ps(U, A, B, M) \
-  (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
-                                      (__v8sf)_mm256_shuffle_ps((A), (B), (M)), \
-                                      (__v8sf)_mm256_setzero_ps())
+  ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
+                                       (__v8sf)_mm256_shuffle_ps((A), (B), (M)), \
+                                       (__v8sf)_mm256_setzero_ps()))
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_rsqrt14_pd (__m128d __A)
@@ -7834,262 +7834,262 @@ _mm256_mask_cvtepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
 }
 
 #define _mm256_extractf32x4_ps(A, imm) \
-  (__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
-                                               (int)(imm), \
-                                               (__v4sf)_mm_undefined_ps(), \
-                                               (__mmask8)-1)
+  ((__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
+                                                (int)(imm), \
+                                                (__v4sf)_mm_undefined_ps(), \
+                                                (__mmask8)-1))
 
 #define _mm256_mask_extractf32x4_ps(W, U, A, imm) \
-  (__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
-                                               (int)(imm), \
-                                               (__v4sf)(__m128)(W), \
-                                               (__mmask8)(U))
+  ((__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
+                                                (int)(imm), \
+                                                (__v4sf)(__m128)(W), \
+                                                (__mmask8)(U)))
 
 #define _mm256_maskz_extractf32x4_ps(U, A, imm) \
-  (__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
-                                               (int)(imm), \
-                                               (__v4sf)_mm_setzero_ps(), \
-                                               (__mmask8)(U))
+  ((__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
+                                                (int)(imm), \
+                                                (__v4sf)_mm_setzero_ps(), \
+                                                (__mmask8)(U)))
 
 #define _mm256_extracti32x4_epi32(A, imm) \
-  (__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
-                                                (int)(imm), \
-                                                (__v4si)_mm_undefined_si128(), \
-                                                (__mmask8)-1)
+  ((__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
+                                                 (int)(imm), \
+                                                 (__v4si)_mm_undefined_si128(), \
+                                                 (__mmask8)-1))
 
 #define _mm256_mask_extracti32x4_epi32(W, U, A, imm) \
-  (__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
-                                                (int)(imm), \
-                                                (__v4si)(__m128i)(W), \
-                                                (__mmask8)(U))
+  ((__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
+                                                 (int)(imm), \
+                                                 (__v4si)(__m128i)(W), \
+                                                 (__mmask8)(U)))
 
 #define _mm256_maskz_extracti32x4_epi32(U, A, imm) \
-  (__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
-                                                (int)(imm), \
-                                                (__v4si)_mm_setzero_si128(), \
-                                                (__mmask8)(U))
+  ((__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
+                                                 (int)(imm), \
+                                                 (__v4si)_mm_setzero_si128(), \
+                                                 (__mmask8)(U)))
 
 #define _mm256_insertf32x4(A, B, imm) \
-  (__m256)__builtin_ia32_insertf32x4_256((__v8sf)(__m256)(A), \
-                                         (__v4sf)(__m128)(B), (int)(imm))
+  ((__m256)__builtin_ia32_insertf32x4_256((__v8sf)(__m256)(A), \
+                                          (__v4sf)(__m128)(B), (int)(imm)))
 
 #define _mm256_mask_insertf32x4(W, U, A, B, imm) \
-  (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
+  ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
                                   (__v8sf)_mm256_insertf32x4((A), (B), (imm)), \
-                                  (__v8sf)(__m256)(W))
+                                  (__v8sf)(__m256)(W)))
 
 #define _mm256_maskz_insertf32x4(U, A, B, imm) \
-  (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
+  ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
                                   (__v8sf)_mm256_insertf32x4((A), (B), (imm)), \
-                                  (__v8sf)_mm256_setzero_ps())
+                                  (__v8sf)_mm256_setzero_ps()))
 
 #define _mm256_inserti32x4(A, B, imm) \
-  (__m256i)__builtin_ia32_inserti32x4_256((__v8si)(__m256i)(A), \
-                                          (__v4si)(__m128i)(B), (int)(imm))
+  ((__m256i)__builtin_ia32_inserti32x4_256((__v8si)(__m256i)(A), \
+                                           (__v4si)(__m128i)(B), (int)(imm)))
 
 #define _mm256_mask_inserti32x4(W, U, A, B, imm) \
-  (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
+  ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
                                   (__v8si)_mm256_inserti32x4((A), (B), (imm)), \
-                                  (__v8si)(__m256i)(W))
+                                  (__v8si)(__m256i)(W)))
 
 #define _mm256_maskz_inserti32x4(U, A, B, imm) \
-  (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
+  ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
                                   (__v8si)_mm256_inserti32x4((A), (B), (imm)), \
-                                  (__v8si)_mm256_setzero_si256())
+                                  (__v8si)_mm256_setzero_si256()))
 
 #define _mm_getmant_pd(A, B, C) \
-  (__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v2df)_mm_setzero_pd(), \
-                                            (__mmask8)-1)
+  ((__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \
+                                             (int)(((C)<<2) | (B)), \
+                                             (__v2df)_mm_setzero_pd(), \
+                                             (__mmask8)-1))
 
 #define _mm_mask_getmant_pd(W, U, A, B, C) \
-  (__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v2df)(__m128d)(W), \
-                                            (__mmask8)(U))
+  ((__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \
+                                             (int)(((C)<<2) | (B)), \
+                                             (__v2df)(__m128d)(W), \
+                                             (__mmask8)(U)))
 
 #define _mm_maskz_getmant_pd(U, A, B, C) \
-  (__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v2df)_mm_setzero_pd(), \
-                                            (__mmask8)(U))
+  ((__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \
+                                             (int)(((C)<<2) | (B)), \
+                                             (__v2df)_mm_setzero_pd(), \
+                                             (__mmask8)(U)))
 
 #define _mm256_getmant_pd(A, B, C) \
-  (__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v4df)_mm256_setzero_pd(), \
-                                            (__mmask8)-1)
+  ((__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \
+                                             (int)(((C)<<2) | (B)), \
+                                             (__v4df)_mm256_setzero_pd(), \
+                                             (__mmask8)-1))
 
 #define _mm256_mask_getmant_pd(W, U, A, B, C) \
-  (__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v4df)(__m256d)(W), \
-                                            (__mmask8)(U))
+  ((__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \
+                                             (int)(((C)<<2) | (B)), \
+                                             (__v4df)(__m256d)(W), \
+                                             (__mmask8)(U)))
 
 #define _mm256_maskz_getmant_pd(U, A, B, C) \
-  (__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v4df)_mm256_setzero_pd(), \
-                                            (__mmask8)(U))
+  ((__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \
+                                             (int)(((C)<<2) | (B)), \
+                                             (__v4df)_mm256_setzero_pd(), \
+                                             (__mmask8)(U)))
 
 #define _mm_getmant_ps(A, B, C) \
-  (__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \
-                                           (int)(((C)<<2) | (B)), \
-                                           (__v4sf)_mm_setzero_ps(), \
-                                           (__mmask8)-1)
+  ((__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \
+                                            (int)(((C)<<2) | (B)), \
+                                            (__v4sf)_mm_setzero_ps(), \
+                                            (__mmask8)-1))
 
 #define _mm_mask_getmant_ps(W, U, A, B, C) \
-  (__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \
-                                           (int)(((C)<<2) | (B)), \
-                                           (__v4sf)(__m128)(W), \
-                                           (__mmask8)(U))
+  ((__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \
+                                            (int)(((C)<<2) | (B)), \
+                                            (__v4sf)(__m128)(W), \
+                                            (__mmask8)(U)))
 
 #define _mm_maskz_getmant_ps(U, A, B, C) \
-  (__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \
-                                           (int)(((C)<<2) | (B)), \
-                                           (__v4sf)_mm_setzero_ps(), \
-                                           (__mmask8)(U))
+  ((__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \
+                                            (int)(((C)<<2) | (B)), \
+                                            (__v4sf)_mm_setzero_ps(), \
+                                            (__mmask8)(U)))
 
 #define _mm256_getmant_ps(A, B, C) \
-  (__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \
-                                           (int)(((C)<<2) | (B)), \
-                                           (__v8sf)_mm256_setzero_ps(), \
-                                           (__mmask8)-1)
+  ((__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \
+                                            (int)(((C)<<2) | (B)), \
+                                            (__v8sf)_mm256_setzero_ps(), \
+                                            (__mmask8)-1))
 
 #define _mm256_mask_getmant_ps(W, U, A, B, C) \
-  (__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \
-                                           (int)(((C)<<2) | (B)), \
-                                           (__v8sf)(__m256)(W), \
-                                           (__mmask8)(U))
+  ((__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \
+                                            (int)(((C)<<2) | (B)), \
+                                            (__v8sf)(__m256)(W), \
+                                            (__mmask8)(U)))
 
 #define _mm256_maskz_getmant_ps(U, A, B, C) \
-  (__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \
-                                           (int)(((C)<<2) | (B)), \
-                                           (__v8sf)_mm256_setzero_ps(), \
-                                           (__mmask8)(U))
+  ((__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \
+                                            (int)(((C)<<2) | (B)), \
+                                            (__v8sf)_mm256_setzero_ps(), \
+                                            (__mmask8)(U)))
 
 #define _mm_mmask_i64gather_pd(v1_old, mask, index, addr, scale) \
-  (__m128d)__builtin_ia32_gather3div2df((__v2df)(__m128d)(v1_old), \
-                                        (void const *)(addr), \
-                                        (__v2di)(__m128i)(index), \
-                                        (__mmask8)(mask), (int)(scale))
+  ((__m128d)__builtin_ia32_gather3div2df((__v2df)(__m128d)(v1_old), \
+                                         (void const *)(addr), \
+                                         (__v2di)(__m128i)(index), \
+                                         (__mmask8)(mask), (int)(scale)))
 
 #define _mm_mmask_i64gather_epi64(v1_old, mask, index, addr, scale) \
-  (__m128i)__builtin_ia32_gather3div2di((__v2di)(__m128i)(v1_old), \
-                                        (void const *)(addr), \
-                                        (__v2di)(__m128i)(index), \
-                                        (__mmask8)(mask), (int)(scale))
+  ((__m128i)__builtin_ia32_gather3div2di((__v2di)(__m128i)(v1_old), \
+                                         (void const *)(addr), \
+                                         (__v2di)(__m128i)(index), \
+                                         (__mmask8)(mask), (int)(scale)))
 
 #define _mm256_mmask_i64gather_pd(v1_old, mask, index, addr, scale) \
-  (__m256d)__builtin_ia32_gather3div4df((__v4df)(__m256d)(v1_old), \
-                                        (void const *)(addr), \
-                                        (__v4di)(__m256i)(index), \
-                                        (__mmask8)(mask), (int)(scale))
+  ((__m256d)__builtin_ia32_gather3div4df((__v4df)(__m256d)(v1_old), \
+                                         (void const *)(addr), \
+                                         (__v4di)(__m256i)(index), \
+                                         (__mmask8)(mask), (int)(scale)))
 
 #define _mm256_mmask_i64gather_epi64(v1_old, mask, index, addr, scale) \
-  (__m256i)__builtin_ia32_gather3div4di((__v4di)(__m256i)(v1_old), \
-                                        (void const *)(addr), \
-                                        (__v4di)(__m256i)(index), \
-                                        (__mmask8)(mask), (int)(scale))
+  ((__m256i)__builtin_ia32_gather3div4di((__v4di)(__m256i)(v1_old), \
+                                         (void const *)(addr), \
+                                         (__v4di)(__m256i)(index), \
+                                         (__mmask8)(mask), (int)(scale)))
 
 #define _mm_mmask_i64gather_ps(v1_old, mask, index, addr, scale) \
-  (__m128)__builtin_ia32_gather3div4sf((__v4sf)(__m128)(v1_old), \
-                                       (void const *)(addr), \
-                                       (__v2di)(__m128i)(index), \
-                                       (__mmask8)(mask), (int)(scale))
-
-#define _mm_mmask_i64gather_epi32(v1_old, mask, index, addr, scale) \
-  (__m128i)__builtin_ia32_gather3div4si((__v4si)(__m128i)(v1_old), \
+  ((__m128)__builtin_ia32_gather3div4sf((__v4sf)(__m128)(v1_old), \
                                         (void const *)(addr), \
                                         (__v2di)(__m128i)(index), \
-                                        (__mmask8)(mask), (int)(scale))
+                                        (__mmask8)(mask), (int)(scale)))
 
-#define _mm256_mmask_i64gather_ps(v1_old, mask, index, addr, scale) \
-  (__m128)__builtin_ia32_gather3div8sf((__v4sf)(__m128)(v1_old), \
-                                       (void const *)(addr), \
-                                       (__v4di)(__m256i)(index), \
-                                       (__mmask8)(mask), (int)(scale))
+#define _mm_mmask_i64gather_epi32(v1_old, mask, index, addr, scale) \
+  ((__m128i)__builtin_ia32_gather3div4si((__v4si)(__m128i)(v1_old), \
+                                         (void const *)(addr), \
+                                         (__v2di)(__m128i)(index), \
+                                         (__mmask8)(mask), (int)(scale)))
 
-#define _mm256_mmask_i64gather_epi32(v1_old, mask, index, addr, scale) \
-  (__m128i)__builtin_ia32_gather3div8si((__v4si)(__m128i)(v1_old), \
+#define _mm256_mmask_i64gather_ps(v1_old, mask, index, addr, scale) \
+  ((__m128)__builtin_ia32_gather3div8sf((__v4sf)(__m128)(v1_old), \
                                         (void const *)(addr), \
                                         (__v4di)(__m256i)(index), \
-                                        (__mmask8)(mask), (int)(scale))
+                                        (__mmask8)(mask), (int)(scale)))
+
+#define _mm256_mmask_i64gather_epi32(v1_old, mask, index, addr, scale) \
+  ((__m128i)__builtin_ia32_gather3div8si((__v4si)(__m128i)(v1_old), \
+                                         (void const *)(addr), \
+                                         (__v4di)(__m256i)(index), \
+                                         (__mmask8)(mask), (int)(scale)))
 
 #define _mm_mmask_i32gather_pd(v1_old, mask, index, addr, scale) \
-  (__m128d)__builtin_ia32_gather3siv2df((__v2df)(__m128d)(v1_old), \
-                                        (void const *)(addr), \
-                                        (__v4si)(__m128i)(index), \
-                                        (__mmask8)(mask), (int)(scale))
+  ((__m128d)__builtin_ia32_gather3siv2df((__v2df)(__m128d)(v1_old), \
+                                         (void const *)(addr), \
+                                         (__v4si)(__m128i)(index), \
+                                         (__mmask8)(mask), (int)(scale)))
 
 #define _mm_mmask_i32gather_epi64(v1_old, mask, index, addr, scale) \
-  (__m128i)__builtin_ia32_gather3siv2di((__v2di)(__m128i)(v1_old), \
-                                        (void const *)(addr), \
-                                        (__v4si)(__m128i)(index), \
-                                        (__mmask8)(mask), (int)(scale))
+  ((__m128i)__builtin_ia32_gather3siv2di((__v2di)(__m128i)(v1_old), \
+                                         (void const *)(addr), \
+                                         (__v4si)(__m128i)(index), \
+                                         (__mmask8)(mask), (int)(scale)))
 
 #define _mm256_mmask_i32gather_pd(v1_old, mask, index, addr, scale) \
-  (__m256d)__builtin_ia32_gather3siv4df((__v4df)(__m256d)(v1_old), \
-                                        (void const *)(addr), \
-                                        (__v4si)(__m128i)(index), \
-                                        (__mmask8)(mask), (int)(scale))
+  ((__m256d)__builtin_ia32_gather3siv4df((__v4df)(__m256d)(v1_old), \
+                                         (void const *)(addr), \
+                                         (__v4si)(__m128i)(index), \
+                                         (__mmask8)(mask), (int)(scale)))
 
 #define _mm256_mmask_i32gather_epi64(v1_old, mask, index, addr, scale) \
-  (__m256i)__builtin_ia32_gather3siv4di((__v4di)(__m256i)(v1_old), \
-                                        (void const *)(addr), \
-                                        (__v4si)(__m128i)(index), \
-                                        (__mmask8)(mask), (int)(scale))
+  ((__m256i)__builtin_ia32_gather3siv4di((__v4di)(__m256i)(v1_old), \
+                                         (void const *)(addr), \
+                                         (__v4si)(__m128i)(index), \
+                                         (__mmask8)(mask), (int)(scale)))
 
 #define _mm_mmask_i32gather_ps(v1_old, mask, index, addr, scale) \
-  (__m128)__builtin_ia32_gather3siv4sf((__v4sf)(__m128)(v1_old), \
-                                       (void const *)(addr), \
-                                       (__v4si)(__m128i)(index), \
-                                       (__mmask8)(mask), (int)(scale))
-
-#define _mm_mmask_i32gather_epi32(v1_old, mask, index, addr, scale) \
-  (__m128i)__builtin_ia32_gather3siv4si((__v4si)(__m128i)(v1_old), \
+  ((__m128)__builtin_ia32_gather3siv4sf((__v4sf)(__m128)(v1_old), \
                                         (void const *)(addr), \
                                         (__v4si)(__m128i)(index), \
-                                        (__mmask8)(mask), (int)(scale))
+                                        (__mmask8)(mask), (int)(scale)))
 
-#define _mm256_mmask_i32gather_ps(v1_old, mask, index, addr, scale) \
-  (__m256)__builtin_ia32_gather3siv8sf((__v8sf)(__m256)(v1_old), \
-                                       (void const *)(addr), \
-                                       (__v8si)(__m256i)(index), \
-                                       (__mmask8)(mask), (int)(scale))
+#define _mm_mmask_i32gather_epi32(v1_old, mask, index, addr, scale) \
+  ((__m128i)__builtin_ia32_gather3siv4si((__v4si)(__m128i)(v1_old), \
+                                         (void const *)(addr), \
+                                         (__v4si)(__m128i)(index), \
+                                         (__mmask8)(mask), (int)(scale)))
 
-#define _mm256_mmask_i32gather_epi32(v1_old, mask, index, addr, scale) \
-  (__m256i)__builtin_ia32_gather3siv8si((__v8si)(__m256i)(v1_old), \
+#define _mm256_mmask_i32gather_ps(v1_old, mask, index, addr, scale) \
+  ((__m256)__builtin_ia32_gather3siv8sf((__v8sf)(__m256)(v1_old), \
                                         (void const *)(addr), \
                                         (__v8si)(__m256i)(index), \
-                                        (__mmask8)(mask), (int)(scale))
+                                        (__mmask8)(mask), (int)(scale)))
+
+#define _mm256_mmask_i32gather_epi32(v1_old, mask, index, addr, scale) \
+  ((__m256i)__builtin_ia32_gather3siv8si((__v8si)(__m256i)(v1_old), \
+                                         (void const *)(addr), \
+                                         (__v8si)(__m256i)(index), \
+                                         (__mmask8)(mask), (int)(scale)))
 
 #define _mm256_permutex_pd(X, C) \
-  (__m256d)__builtin_ia32_permdf256((__v4df)(__m256d)(X), (int)(C))
+  ((__m256d)__builtin_ia32_permdf256((__v4df)(__m256d)(X), (int)(C)))
 
 #define _mm256_mask_permutex_pd(W, U, X, C) \
-  (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
+  ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
                                        (__v4df)_mm256_permutex_pd((X), (C)), \
-                                       (__v4df)(__m256d)(W))
+                                       (__v4df)(__m256d)(W)))
 
 #define _mm256_maskz_permutex_pd(U, X, C) \
-  (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
-                                       (__v4df)_mm256_permutex_pd((X), (C)), \
-                                       (__v4df)_mm256_setzero_pd())
+  ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
+                                        (__v4df)_mm256_permutex_pd((X), (C)), \
+                                        (__v4df)_mm256_setzero_pd()))
 
 #define _mm256_permutex_epi64(X, C) \
-  (__m256i)__builtin_ia32_permdi256((__v4di)(__m256i)(X), (int)(C))
+  ((__m256i)__builtin_ia32_permdi256((__v4di)(__m256i)(X), (int)(C)))
 
 #define _mm256_mask_permutex_epi64(W, U, X, C) \
-  (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
+  ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
                                       (__v4di)_mm256_permutex_epi64((X), (C)), \
-                                      (__v4di)(__m256i)(W))
+                                      (__v4di)(__m256i)(W)))
 
 #define _mm256_maskz_permutex_epi64(U, X, C) \
-  (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
+  ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
                                       (__v4di)_mm256_permutex_epi64((X), (C)), \
-                                      (__v4di)_mm256_setzero_si256())
+                                      (__v4di)_mm256_setzero_si256()))
 
 static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_permutexvar_pd (__m256i __X, __m256d __Y)
@@ -8175,60 +8175,60 @@ _mm256_maskz_permutexvar_epi32(__mmask8 __M, __m256i __X, __m256i __Y)
 }
 
 #define _mm_alignr_epi32(A, B, imm) \
-  (__m128i)__builtin_ia32_alignd128((__v4si)(__m128i)(A), \
-                                    (__v4si)(__m128i)(B), (int)(imm))
+  ((__m128i)__builtin_ia32_alignd128((__v4si)(__m128i)(A), \
+                                     (__v4si)(__m128i)(B), (int)(imm)))
 
 #define _mm_mask_alignr_epi32(W, U, A, B, imm) \
-  (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
+  ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
                                     (__v4si)_mm_alignr_epi32((A), (B), (imm)), \
-                                    (__v4si)(__m128i)(W))
+                                    (__v4si)(__m128i)(W)))
 
 #define _mm_maskz_alignr_epi32(U, A, B, imm) \
-  (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
+  ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
                                     (__v4si)_mm_alignr_epi32((A), (B), (imm)), \
-                                    (__v4si)_mm_setzero_si128())
+                                    (__v4si)_mm_setzero_si128()))
 
 #define _mm256_alignr_epi32(A, B, imm) \
-  (__m256i)__builtin_ia32_alignd256((__v8si)(__m256i)(A), \
-                                    (__v8si)(__m256i)(B), (int)(imm))
+  ((__m256i)__builtin_ia32_alignd256((__v8si)(__m256i)(A), \
+                                     (__v8si)(__m256i)(B), (int)(imm)))
 
 #define _mm256_mask_alignr_epi32(W, U, A, B, imm) \
-  (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
+  ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
                                  (__v8si)_mm256_alignr_epi32((A), (B), (imm)), \
-                                 (__v8si)(__m256i)(W))
+                                 (__v8si)(__m256i)(W)))
 
 #define _mm256_maskz_alignr_epi32(U, A, B, imm) \
-  (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
+  ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
                                  (__v8si)_mm256_alignr_epi32((A), (B), (imm)), \
-                                 (__v8si)_mm256_setzero_si256())
+                                 (__v8si)_mm256_setzero_si256()))
 
 #define _mm_alignr_epi64(A, B, imm) \
-  (__m128i)__builtin_ia32_alignq128((__v2di)(__m128i)(A), \
-                                    (__v2di)(__m128i)(B), (int)(imm))
+  ((__m128i)__builtin_ia32_alignq128((__v2di)(__m128i)(A), \
+                                     (__v2di)(__m128i)(B), (int)(imm)))
 
 #define _mm_mask_alignr_epi64(W, U, A, B, imm) \
-  (__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
+  ((__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
                                     (__v2di)_mm_alignr_epi64((A), (B), (imm)), \
-                                    (__v2di)(__m128i)(W))
+                                    (__v2di)(__m128i)(W)))
 
 #define _mm_maskz_alignr_epi64(U, A, B, imm) \
-  (__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
+  ((__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
                                     (__v2di)_mm_alignr_epi64((A), (B), (imm)), \
-                                    (__v2di)_mm_setzero_si128())
+                                    (__v2di)_mm_setzero_si128()))
 
 #define _mm256_alignr_epi64(A, B, imm) \
-  (__m256i)__builtin_ia32_alignq256((__v4di)(__m256i)(A), \
-                                    (__v4di)(__m256i)(B), (int)(imm))
+  ((__m256i)__builtin_ia32_alignq256((__v4di)(__m256i)(A), \
+                                     (__v4di)(__m256i)(B), (int)(imm)))
 
 #define _mm256_mask_alignr_epi64(W, U, A, B, imm) \
-  (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
+  ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
                                  (__v4di)_mm256_alignr_epi64((A), (B), (imm)), \
-                                 (__v4di)(__m256i)(W))
+                                 (__v4di)(__m256i)(W)))
 
 #define _mm256_maskz_alignr_epi64(U, A, B, imm) \
-  (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
+  ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
                                  (__v4di)_mm256_alignr_epi64((A), (B), (imm)), \
-                                 (__v4di)_mm256_setzero_si256())
+                                 (__v4di)_mm256_setzero_si256()))
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_mask_movehdup_ps (__m128 __W, __mmask8 __U, __m128 __A)
@@ -8295,24 +8295,24 @@ _mm256_maskz_moveldup_ps (__mmask8 __U, __m256 __A)
 }
 
 #define _mm256_mask_shuffle_epi32(W, U, A, I) \
-  (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
-                                      (__v8si)_mm256_shuffle_epi32((A), (I)), \
-                                      (__v8si)(__m256i)(W))
+  ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
+                                       (__v8si)_mm256_shuffle_epi32((A), (I)), \
+                                       (__v8si)(__m256i)(W)))
 
 #define _mm256_maskz_shuffle_epi32(U, A, I) \
-  (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
-                                      (__v8si)_mm256_shuffle_epi32((A), (I)), \
-                                      (__v8si)_mm256_setzero_si256())
+  ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
+                                       (__v8si)_mm256_shuffle_epi32((A), (I)), \
+                                       (__v8si)_mm256_setzero_si256()))
 
 #define _mm_mask_shuffle_epi32(W, U, A, I) \
-  (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
-                                      (__v4si)_mm_shuffle_epi32((A), (I)), \
-                                      (__v4si)(__m128i)(W))
+  ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
+                                       (__v4si)_mm_shuffle_epi32((A), (I)), \
+                                       (__v4si)(__m128i)(W)))
 
 #define _mm_maskz_shuffle_epi32(U, A, I) \
-  (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
-                                      (__v4si)_mm_shuffle_epi32((A), (I)), \
-                                      (__v4si)_mm_setzero_si128())
+  ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
+                                       (__v4si)_mm_shuffle_epi32((A), (I)), \
+                                       (__v4si)_mm_setzero_si128()))
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_mask_mov_pd (__m128d __W, __mmask8 __U, __m128d __A)
@@ -8413,27 +8413,27 @@ _mm256_maskz_cvtph_ps (__mmask8 __U, __m128i __A)
 }
 
 #define _mm_mask_cvt_roundps_ph(W, U, A, I) \
-  (__m128i)__builtin_ia32_vcvtps2ph_mask((__v4sf)(__m128)(A), (int)(I), \
-                                         (__v8hi)(__m128i)(W), \
-                                         (__mmask8)(U))
+  ((__m128i)__builtin_ia32_vcvtps2ph_mask((__v4sf)(__m128)(A), (int)(I), \
+                                          (__v8hi)(__m128i)(W), \
+                                          (__mmask8)(U)))
 
 #define _mm_maskz_cvt_roundps_ph(U, A, I) \
-  (__m128i)__builtin_ia32_vcvtps2ph_mask((__v4sf)(__m128)(A), (int)(I), \
-                                         (__v8hi)_mm_setzero_si128(), \
-                                         (__mmask8)(U))
+  ((__m128i)__builtin_ia32_vcvtps2ph_mask((__v4sf)(__m128)(A), (int)(I), \
+                                          (__v8hi)_mm_setzero_si128(), \
+                                          (__mmask8)(U)))
 
 #define _mm_mask_cvtps_ph  _mm_mask_cvt_roundps_ph
 #define _mm_maskz_cvtps_ph _mm_maskz_cvt_roundps_ph
 
 #define _mm256_mask_cvt_roundps_ph(W, U, A, I) \
-  (__m128i)__builtin_ia32_vcvtps2ph256_mask((__v8sf)(__m256)(A), (int)(I), \
-                                            (__v8hi)(__m128i)(W), \
-                                            (__mmask8)(U))
+  ((__m128i)__builtin_ia32_vcvtps2ph256_mask((__v8sf)(__m256)(A), (int)(I), \
+                                             (__v8hi)(__m128i)(W), \
+                                             (__mmask8)(U)))
 
 #define _mm256_maskz_cvt_roundps_ph(U, A, I) \
-  (__m128i)__builtin_ia32_vcvtps2ph256_mask((__v8sf)(__m256)(A), (int)(I), \
-                                            (__v8hi)_mm_setzero_si128(), \
-                                            (__mmask8)(U))
+  ((__m128i)__builtin_ia32_vcvtps2ph256_mask((__v8sf)(__m256)(A), (int)(I), \
+                                             (__v8hi)_mm_setzero_si128(), \
+                                             (__mmask8)(U)))
 
 #define _mm256_mask_cvtps_ph  _mm256_mask_cvt_roundps_ph
 #define _mm256_maskz_cvtps_ph _mm256_maskz_cvt_roundps_ph

diff  --git a/clang/lib/Headers/avx512vlvbmi2intrin.h b/clang/lib/Headers/avx512vlvbmi2intrin.h
index a40f926de75a..fac1f232415a 100644
--- a/clang/lib/Headers/avx512vlvbmi2intrin.h
+++ b/clang/lib/Headers/avx512vlvbmi2intrin.h
@@ -239,172 +239,172 @@ _mm256_maskz_expandloadu_epi8(__mmask32 __U, void const *__P)
 }
 
 #define _mm256_shldi_epi64(A, B, I) \
-  (__m256i)__builtin_ia32_vpshldq256((__v4di)(__m256i)(A), \
-                                     (__v4di)(__m256i)(B), (int)(I))
+  ((__m256i)__builtin_ia32_vpshldq256((__v4di)(__m256i)(A), \
+                                      (__v4di)(__m256i)(B), (int)(I)))
 
 #define _mm256_mask_shldi_epi64(S, U, A, B, I) \
-  (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
-                                    (__v4di)_mm256_shldi_epi64((A), (B), (I)), \
-                                    (__v4di)(__m256i)(S))
+  ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
+                                     (__v4di)_mm256_shldi_epi64((A), (B), (I)), \
+                                     (__v4di)(__m256i)(S)))
 
 #define _mm256_maskz_shldi_epi64(U, A, B, I) \
-  (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
-                                    (__v4di)_mm256_shldi_epi64((A), (B), (I)), \
-                                    (__v4di)_mm256_setzero_si256())
+  ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
+                                     (__v4di)_mm256_shldi_epi64((A), (B), (I)), \
+                                     (__v4di)_mm256_setzero_si256()))
 
 #define _mm_shldi_epi64(A, B, I) \
-  (__m128i)__builtin_ia32_vpshldq128((__v2di)(__m128i)(A), \
-                                     (__v2di)(__m128i)(B), (int)(I))
+  ((__m128i)__builtin_ia32_vpshldq128((__v2di)(__m128i)(A), \
+                                      (__v2di)(__m128i)(B), (int)(I)))
 
 #define _mm_mask_shldi_epi64(S, U, A, B, I) \
-  (__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
-                                      (__v2di)_mm_shldi_epi64((A), (B), (I)), \
-                                      (__v2di)(__m128i)(S))
+  ((__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
+                                       (__v2di)_mm_shldi_epi64((A), (B), (I)), \
+                                       (__v2di)(__m128i)(S)))
 
 #define _mm_maskz_shldi_epi64(U, A, B, I) \
-  (__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
-                                      (__v2di)_mm_shldi_epi64((A), (B), (I)), \
-                                      (__v2di)_mm_setzero_si128())
+  ((__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
+                                       (__v2di)_mm_shldi_epi64((A), (B), (I)), \
+                                       (__v2di)_mm_setzero_si128()))
 
 #define _mm256_shldi_epi32(A, B, I) \
-  (__m256i)__builtin_ia32_vpshldd256((__v8si)(__m256i)(A), \
-                                     (__v8si)(__m256i)(B), (int)(I))
+  ((__m256i)__builtin_ia32_vpshldd256((__v8si)(__m256i)(A), \
+                                      (__v8si)(__m256i)(B), (int)(I)))
 
 #define _mm256_mask_shldi_epi32(S, U, A, B, I) \
-  (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
-                                    (__v8si)_mm256_shldi_epi32((A), (B), (I)), \
-                                    (__v8si)(__m256i)(S))
+  ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
+                                     (__v8si)_mm256_shldi_epi32((A), (B), (I)), \
+                                     (__v8si)(__m256i)(S)))
 
 #define _mm256_maskz_shldi_epi32(U, A, B, I) \
-  (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
-                                    (__v8si)_mm256_shldi_epi32((A), (B), (I)), \
-                                    (__v8si)_mm256_setzero_si256())
+  ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
+                                     (__v8si)_mm256_shldi_epi32((A), (B), (I)), \
+                                     (__v8si)_mm256_setzero_si256()))
 
 #define _mm_shldi_epi32(A, B, I) \
-  (__m128i)__builtin_ia32_vpshldd128((__v4si)(__m128i)(A), \
-                                     (__v4si)(__m128i)(B), (int)(I))
+  ((__m128i)__builtin_ia32_vpshldd128((__v4si)(__m128i)(A), \
+                                      (__v4si)(__m128i)(B), (int)(I)))
 
 #define _mm_mask_shldi_epi32(S, U, A, B, I) \
-  (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
-                                      (__v4si)_mm_shldi_epi32((A), (B), (I)), \
-                                      (__v4si)(__m128i)(S))
+  ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
+                                       (__v4si)_mm_shldi_epi32((A), (B), (I)), \
+                                       (__v4si)(__m128i)(S)))
 
 #define _mm_maskz_shldi_epi32(U, A, B, I) \
-  (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
-                                      (__v4si)_mm_shldi_epi32((A), (B), (I)), \
-                                      (__v4si)_mm_setzero_si128())
+  ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
+                                       (__v4si)_mm_shldi_epi32((A), (B), (I)), \
+                                       (__v4si)_mm_setzero_si128()))
 
 #define _mm256_shldi_epi16(A, B, I) \
-  (__m256i)__builtin_ia32_vpshldw256((__v16hi)(__m256i)(A), \
-                                     (__v16hi)(__m256i)(B), (int)(I))
+  ((__m256i)__builtin_ia32_vpshldw256((__v16hi)(__m256i)(A), \
+                                      (__v16hi)(__m256i)(B), (int)(I)))
 
 #define _mm256_mask_shldi_epi16(S, U, A, B, I) \
-  (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
-                                   (__v16hi)_mm256_shldi_epi16((A), (B), (I)), \
-                                   (__v16hi)(__m256i)(S))
+  ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+                                    (__v16hi)_mm256_shldi_epi16((A), (B), (I)), \
+                                    (__v16hi)(__m256i)(S)))
 
 #define _mm256_maskz_shldi_epi16(U, A, B, I) \
-  (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
-                                   (__v16hi)_mm256_shldi_epi16((A), (B), (I)), \
-                                   (__v16hi)_mm256_setzero_si256())
+  ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+                                    (__v16hi)_mm256_shldi_epi16((A), (B), (I)), \
+                                    (__v16hi)_mm256_setzero_si256()))
 
 #define _mm_shldi_epi16(A, B, I) \
-  (__m128i)__builtin_ia32_vpshldw128((__v8hi)(__m128i)(A), \
-                                     (__v8hi)(__m128i)(B), (int)(I))
+  ((__m128i)__builtin_ia32_vpshldw128((__v8hi)(__m128i)(A), \
+                                      (__v8hi)(__m128i)(B), (int)(I)))
 
 #define _mm_mask_shldi_epi16(S, U, A, B, I) \
-  (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
-                                      (__v8hi)_mm_shldi_epi16((A), (B), (I)), \
-                                      (__v8hi)(__m128i)(S))
+  ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
+                                       (__v8hi)_mm_shldi_epi16((A), (B), (I)), \
+                                       (__v8hi)(__m128i)(S)))
 
 #define _mm_maskz_shldi_epi16(U, A, B, I) \
-  (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
-                                      (__v8hi)_mm_shldi_epi16((A), (B), (I)), \
-                                      (__v8hi)_mm_setzero_si128())
+  ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
+                                       (__v8hi)_mm_shldi_epi16((A), (B), (I)), \
+                                       (__v8hi)_mm_setzero_si128()))
 
 #define _mm256_shrdi_epi64(A, B, I) \
-  (__m256i)__builtin_ia32_vpshrdq256((__v4di)(__m256i)(A), \
-                                     (__v4di)(__m256i)(B), (int)(I))
+  ((__m256i)__builtin_ia32_vpshrdq256((__v4di)(__m256i)(A), \
+                                      (__v4di)(__m256i)(B), (int)(I)))
 
 #define _mm256_mask_shrdi_epi64(S, U, A, B, I) \
-  (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
-                                    (__v4di)_mm256_shrdi_epi64((A), (B), (I)), \
-                                    (__v4di)(__m256i)(S))
+  ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
+                                     (__v4di)_mm256_shrdi_epi64((A), (B), (I)), \
+                                     (__v4di)(__m256i)(S)))
 
 #define _mm256_maskz_shrdi_epi64(U, A, B, I) \
-  (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
-                                    (__v4di)_mm256_shrdi_epi64((A), (B), (I)), \
-                                    (__v4di)_mm256_setzero_si256())
+  ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
+                                     (__v4di)_mm256_shrdi_epi64((A), (B), (I)), \
+                                     (__v4di)_mm256_setzero_si256()))
 
 #define _mm_shrdi_epi64(A, B, I) \
-  (__m128i)__builtin_ia32_vpshrdq128((__v2di)(__m128i)(A), \
-                                     (__v2di)(__m128i)(B), (int)(I))
+  ((__m128i)__builtin_ia32_vpshrdq128((__v2di)(__m128i)(A), \
+                                      (__v2di)(__m128i)(B), (int)(I)))
 
 #define _mm_mask_shrdi_epi64(S, U, A, B, I) \
-  (__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
-                                      (__v2di)_mm_shrdi_epi64((A), (B), (I)), \
-                                      (__v2di)(__m128i)(S))
+  ((__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
+                                       (__v2di)_mm_shrdi_epi64((A), (B), (I)), \
+                                       (__v2di)(__m128i)(S)))
 
 #define _mm_maskz_shrdi_epi64(U, A, B, I) \
-  (__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
-                                      (__v2di)_mm_shrdi_epi64((A), (B), (I)), \
-                                      (__v2di)_mm_setzero_si128())
+  ((__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
+                                       (__v2di)_mm_shrdi_epi64((A), (B), (I)), \
+                                       (__v2di)_mm_setzero_si128()))
 
 #define _mm256_shrdi_epi32(A, B, I) \
-  (__m256i)__builtin_ia32_vpshrdd256((__v8si)(__m256i)(A), \
-                                     (__v8si)(__m256i)(B), (int)(I))
+  ((__m256i)__builtin_ia32_vpshrdd256((__v8si)(__m256i)(A), \
+                                      (__v8si)(__m256i)(B), (int)(I)))
 
 #define _mm256_mask_shrdi_epi32(S, U, A, B, I) \
-  (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
-                                    (__v8si)_mm256_shrdi_epi32((A), (B), (I)), \
-                                    (__v8si)(__m256i)(S))
+  ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
+                                     (__v8si)_mm256_shrdi_epi32((A), (B), (I)), \
+                                     (__v8si)(__m256i)(S)))
 
 #define _mm256_maskz_shrdi_epi32(U, A, B, I) \
-  (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
-                                    (__v8si)_mm256_shrdi_epi32((A), (B), (I)), \
-                                    (__v8si)_mm256_setzero_si256())
+  ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
+                                     (__v8si)_mm256_shrdi_epi32((A), (B), (I)), \
+                                     (__v8si)_mm256_setzero_si256()))
 
 #define _mm_shrdi_epi32(A, B, I) \
-  (__m128i)__builtin_ia32_vpshrdd128((__v4si)(__m128i)(A), \
-                                     (__v4si)(__m128i)(B), (int)(I))
+  ((__m128i)__builtin_ia32_vpshrdd128((__v4si)(__m128i)(A), \
+                                      (__v4si)(__m128i)(B), (int)(I)))
 
 #define _mm_mask_shrdi_epi32(S, U, A, B, I) \
-  (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
-                                      (__v4si)_mm_shrdi_epi32((A), (B), (I)), \
-                                      (__v4si)(__m128i)(S))
+  ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
+                                       (__v4si)_mm_shrdi_epi32((A), (B), (I)), \
+                                       (__v4si)(__m128i)(S)))
 
 #define _mm_maskz_shrdi_epi32(U, A, B, I) \
-  (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
-                                      (__v4si)_mm_shrdi_epi32((A), (B), (I)), \
-                                      (__v4si)_mm_setzero_si128())
+  ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
+                                       (__v4si)_mm_shrdi_epi32((A), (B), (I)), \
+                                       (__v4si)_mm_setzero_si128()))
 
 #define _mm256_shrdi_epi16(A, B, I) \
-  (__m256i)__builtin_ia32_vpshrdw256((__v16hi)(__m256i)(A), \
-                                     (__v16hi)(__m256i)(B), (int)(I))
+  ((__m256i)__builtin_ia32_vpshrdw256((__v16hi)(__m256i)(A), \
+                                      (__v16hi)(__m256i)(B), (int)(I)))
 
 #define _mm256_mask_shrdi_epi16(S, U, A, B, I) \
-  (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
-                                   (__v16hi)_mm256_shrdi_epi16((A), (B), (I)), \
-                                   (__v16hi)(__m256i)(S))
+  ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+                                    (__v16hi)_mm256_shrdi_epi16((A), (B), (I)), \
+                                    (__v16hi)(__m256i)(S)))
 
 #define _mm256_maskz_shrdi_epi16(U, A, B, I) \
-  (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
-                                   (__v16hi)_mm256_shrdi_epi16((A), (B), (I)), \
-                                   (__v16hi)_mm256_setzero_si256())
+  ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+                                    (__v16hi)_mm256_shrdi_epi16((A), (B), (I)), \
+                                    (__v16hi)_mm256_setzero_si256()))
 
 #define _mm_shrdi_epi16(A, B, I) \
-  (__m128i)__builtin_ia32_vpshrdw128((__v8hi)(__m128i)(A), \
-                                     (__v8hi)(__m128i)(B), (int)(I))
+  ((__m128i)__builtin_ia32_vpshrdw128((__v8hi)(__m128i)(A), \
+                                      (__v8hi)(__m128i)(B), (int)(I)))
 
 #define _mm_mask_shrdi_epi16(S, U, A, B, I) \
-  (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
-                                      (__v8hi)_mm_shrdi_epi16((A), (B), (I)), \
-                                      (__v8hi)(__m128i)(S))
+  ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
+                                       (__v8hi)_mm_shrdi_epi16((A), (B), (I)), \
+                                       (__v8hi)(__m128i)(S)))
 
 #define _mm_maskz_shrdi_epi16(U, A, B, I) \
-  (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
-                                      (__v8hi)_mm_shrdi_epi16((A), (B), (I)), \
-                                      (__v8hi)_mm_setzero_si128())
+  ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
+                                       (__v8hi)_mm_shrdi_epi16((A), (B), (I)), \
+                                       (__v8hi)_mm_setzero_si128()))
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_shldv_epi64(__m256i __A, __m256i __B, __m256i __C)

diff  --git a/clang/lib/Headers/avx512vlvnniintrin.h b/clang/lib/Headers/avx512vlvnniintrin.h
index 71ac1b4370d4..0fb29af262f7 100644
--- a/clang/lib/Headers/avx512vlvnniintrin.h
+++ b/clang/lib/Headers/avx512vlvnniintrin.h
@@ -36,7 +36,7 @@
 ///    DST[MAX:256] := 0
 /// \endoperation
 #define _mm256_dpbusd_epi32(S, A, B) \
-  (__m256i)__builtin_ia32_vpdpbusd256((__v8si)(S), (__v8si)(A), (__v8si)(B))
+  ((__m256i)__builtin_ia32_vpdpbusd256((__v8si)(S), (__v8si)(A), (__v8si)(B)))
 
 /// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a A with
 /// corresponding signed 8-bit integers in \a B, producing 4 intermediate signed
@@ -56,7 +56,7 @@
 ///    DST[MAX:256] := 0
 /// \endoperation
 #define _mm256_dpbusds_epi32(S, A, B) \
-  (__m256i)__builtin_ia32_vpdpbusds256((__v8si)(S), (__v8si)(A), (__v8si)(B))
+  ((__m256i)__builtin_ia32_vpdpbusds256((__v8si)(S), (__v8si)(A), (__v8si)(B)))
 
 /// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a A with
 /// corresponding 16-bit integers in \a B, producing 2 intermediate signed 32-bit
@@ -74,7 +74,7 @@
 ///    DST[MAX:256] := 0
 /// \endoperation
 #define _mm256_dpwssd_epi32(S, A, B) \
-  (__m256i)__builtin_ia32_vpdpwssd256((__v8si)(S), (__v8si)(A), (__v8si)(B))
+  ((__m256i)__builtin_ia32_vpdpwssd256((__v8si)(S), (__v8si)(A), (__v8si)(B)))
 
 /// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a A with
 /// corresponding 16-bit integers in \a B, producing 2 intermediate signed 32-bit
@@ -92,7 +92,7 @@
 ///    DST[MAX:256] := 0
 /// \endoperation
 #define _mm256_dpwssds_epi32(S, A, B) \
-  (__m256i)__builtin_ia32_vpdpwssds256((__v8si)(S), (__v8si)(A), (__v8si)(B))
+  ((__m256i)__builtin_ia32_vpdpwssds256((__v8si)(S), (__v8si)(A), (__v8si)(B)))
 
 /// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a A with
 /// corresponding signed 8-bit integers in \a B, producing 4 intermediate signed
@@ -112,7 +112,7 @@
 ///    DST[MAX:128] := 0
 /// \endoperation
 #define _mm_dpbusd_epi32(S, A, B) \
-  (__m128i)__builtin_ia32_vpdpbusd128((__v4si)(S), (__v4si)(A), (__v4si)(B))
+  ((__m128i)__builtin_ia32_vpdpbusd128((__v4si)(S), (__v4si)(A), (__v4si)(B)))
 
 /// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a A with
 /// corresponding signed 8-bit integers in \a B, producing 4 intermediate signed
@@ -132,7 +132,7 @@
 ///    DST[MAX:128] := 0
 /// \endoperation
 #define _mm_dpbusds_epi32(S, A, B) \
-  (__m128i)__builtin_ia32_vpdpbusds128((__v4si)(S), (__v4si)(A), (__v4si)(B))
+  ((__m128i)__builtin_ia32_vpdpbusds128((__v4si)(S), (__v4si)(A), (__v4si)(B)))
 
 /// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a A with
 /// corresponding 16-bit integers in \a B, producing 2 intermediate signed 32-bit
@@ -150,7 +150,7 @@
 ///    DST[MAX:128] := 0
 /// \endoperation
 #define _mm_dpwssd_epi32(S, A, B) \
-  (__m128i)__builtin_ia32_vpdpwssd128((__v4si)(S), (__v4si)(A), (__v4si)(B))
+  ((__m128i)__builtin_ia32_vpdpwssd128((__v4si)(S), (__v4si)(A), (__v4si)(B)))
 
 /// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a A with
 /// corresponding 16-bit integers in \a B, producing 2 intermediate signed 32-bit
@@ -168,7 +168,7 @@
 ///    DST[MAX:128] := 0
 /// \endoperation
 #define _mm_dpwssds_epi32(S, A, B) \
-  (__m128i)__builtin_ia32_vpdpwssds128((__v4si)(S), (__v4si)(A), (__v4si)(B))
+  ((__m128i)__builtin_ia32_vpdpwssds128((__v4si)(S), (__v4si)(A), (__v4si)(B)))
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_mask_dpbusd_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B)

diff  --git a/clang/lib/Headers/f16cintrin.h b/clang/lib/Headers/f16cintrin.h
index 109b604adae3..13905e6fb0ec 100644
--- a/clang/lib/Headers/f16cintrin.h
+++ b/clang/lib/Headers/f16cintrin.h
@@ -66,8 +66,8 @@ _cvtsh_ss(unsigned short __a)
 ///    1XX: Use MXCSR.RC for rounding
 /// \returns The converted 16-bit half-precision float value.
 #define _cvtss_sh(a, imm) \
-  (unsigned short)(((__v8hi)__builtin_ia32_vcvtps2ph((__v4sf){a, 0, 0, 0}, \
-                                                     (imm)))[0])
+  ((unsigned short)(((__v8hi)__builtin_ia32_vcvtps2ph((__v4sf){a, 0, 0, 0}, \
+                                                     (imm)))[0]))
 
 /// Converts a 128-bit vector containing 32-bit float values into a
 ///    128-bit vector containing 16-bit half-precision float values.
@@ -93,7 +93,7 @@ _cvtsh_ss(unsigned short __a)
 ///    values. The lower 64 bits are used to store the converted 16-bit
 ///    half-precision floating-point values.
 #define _mm_cvtps_ph(a, imm) \
-  (__m128i)__builtin_ia32_vcvtps2ph((__v4sf)(__m128)(a), (imm))
+  ((__m128i)__builtin_ia32_vcvtps2ph((__v4sf)(__m128)(a), (imm)))
 
 /// Converts a 128-bit vector containing 16-bit half-precision float
 ///    values into a 128-bit vector containing 32-bit float values.
@@ -136,7 +136,7 @@ _mm_cvtph_ps(__m128i __a)
 /// \returns A 128-bit vector containing the converted 16-bit half-precision
 ///    float values.
 #define _mm256_cvtps_ph(a, imm) \
- (__m128i)__builtin_ia32_vcvtps2ph256((__v8sf)(__m256)(a), (imm))
+ ((__m128i)__builtin_ia32_vcvtps2ph256((__v8sf)(__m256)(a), (imm)))
 
 /// Converts a 128-bit vector containing 16-bit half-precision float
 ///    values into a 256-bit vector of [8 x float].

diff  --git a/clang/lib/Headers/gfniintrin.h b/clang/lib/Headers/gfniintrin.h
index 11a321b7c919..a59238b0b131 100644
--- a/clang/lib/Headers/gfniintrin.h
+++ b/clang/lib/Headers/gfniintrin.h
@@ -28,14 +28,14 @@
 #define __DEFAULT_FN_ATTRS_VL256 __attribute__((__always_inline__, __nodebug__, __target__("avx512bw,avx512vl,gfni"), __min_vector_width__(256)))
 
 #define _mm_gf2p8affineinv_epi64_epi8(A, B, I) \
-  (__m128i)__builtin_ia32_vgf2p8affineinvqb_v16qi((__v16qi)(__m128i)(A),          \
-                                                  (__v16qi)(__m128i)(B),          \
-                                                  (char)(I))
+  ((__m128i)__builtin_ia32_vgf2p8affineinvqb_v16qi((__v16qi)(__m128i)(A), \
+                                                   (__v16qi)(__m128i)(B), \
+                                                   (char)(I)))
 
 #define _mm_gf2p8affine_epi64_epi8(A, B, I) \
-  (__m128i)__builtin_ia32_vgf2p8affineqb_v16qi((__v16qi)(__m128i)(A),             \
-                                                  (__v16qi)(__m128i)(B),          \
-                                                  (char)(I))
+  ((__m128i)__builtin_ia32_vgf2p8affineqb_v16qi((__v16qi)(__m128i)(A), \
+                                                   (__v16qi)(__m128i)(B), \
+                                                   (char)(I)))
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_gf2p8mul_epi8(__m128i __A, __m128i __B)
@@ -46,14 +46,14 @@ _mm_gf2p8mul_epi8(__m128i __A, __m128i __B)
 
 #ifdef __AVXINTRIN_H
 #define _mm256_gf2p8affineinv_epi64_epi8(A, B, I) \
-  (__m256i)__builtin_ia32_vgf2p8affineinvqb_v32qi((__v32qi)(__m256i)(A),          \
-                                                  (__v32qi)(__m256i)(B),          \
-                                                  (char)(I))
+  ((__m256i)__builtin_ia32_vgf2p8affineinvqb_v32qi((__v32qi)(__m256i)(A), \
+                                                   (__v32qi)(__m256i)(B), \
+                                                   (char)(I)))
 
 #define _mm256_gf2p8affine_epi64_epi8(A, B, I) \
-  (__m256i)__builtin_ia32_vgf2p8affineqb_v32qi((__v32qi)(__m256i)(A),             \
-                                                  (__v32qi)(__m256i)(B),          \
-                                                  (char)(I))
+  ((__m256i)__builtin_ia32_vgf2p8affineqb_v32qi((__v32qi)(__m256i)(A), \
+                                                   (__v32qi)(__m256i)(B), \
+                                                   (char)(I)))
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS_Y
 _mm256_gf2p8mul_epi8(__m256i __A, __m256i __B)
@@ -65,32 +65,32 @@ _mm256_gf2p8mul_epi8(__m256i __A, __m256i __B)
 
 #ifdef __AVX512BWINTRIN_H
 #define _mm512_gf2p8affineinv_epi64_epi8(A, B, I) \
-  (__m512i)__builtin_ia32_vgf2p8affineinvqb_v64qi((__v64qi)(__m512i)(A),          \
-                                                  (__v64qi)(__m512i)(B),          \
-                                                  (char)(I))
+  ((__m512i)__builtin_ia32_vgf2p8affineinvqb_v64qi((__v64qi)(__m512i)(A), \
+                                                   (__v64qi)(__m512i)(B), \
+                                                   (char)(I)))
 
 #define _mm512_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) \
-   (__m512i)__builtin_ia32_selectb_512((__mmask64)(U),                            \
-        (__v64qi)_mm512_gf2p8affineinv_epi64_epi8(A, B, I),                       \
-        (__v64qi)(__m512i)(S))
+  ((__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \
+         (__v64qi)_mm512_gf2p8affineinv_epi64_epi8(A, B, I), \
+         (__v64qi)(__m512i)(S)))
 
 #define _mm512_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) \
-  (__m512i)_mm512_mask_gf2p8affineinv_epi64_epi8((__m512i)_mm512_setzero_si512(),    \
-        U, A, B, I)
+  _mm512_mask_gf2p8affineinv_epi64_epi8((__m512i)_mm512_setzero_si512(), \
+         U, A, B, I)
 
 #define _mm512_gf2p8affine_epi64_epi8(A, B, I) \
-  (__m512i)__builtin_ia32_vgf2p8affineqb_v64qi((__v64qi)(__m512i)(A),             \
-                                                  (__v64qi)(__m512i)(B),          \
-                                                  (char)(I))
+  ((__m512i)__builtin_ia32_vgf2p8affineqb_v64qi((__v64qi)(__m512i)(A), \
+                                                   (__v64qi)(__m512i)(B), \
+                                                   (char)(I)))
 
 #define _mm512_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) \
-   (__m512i)__builtin_ia32_selectb_512((__mmask64)(U),                            \
-        (__v64qi)_mm512_gf2p8affine_epi64_epi8(A, B, I),                          \
-        (__v64qi)(__m512i)(S))
+  ((__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \
+         (__v64qi)_mm512_gf2p8affine_epi64_epi8((A), (B), (I)), \
+         (__v64qi)(__m512i)(S)))
 
 #define _mm512_maskz_gf2p8affine_epi64_epi8(U, A, B, I) \
-  (__m512i)_mm512_mask_gf2p8affine_epi64_epi8((__m512i)_mm512_setzero_si512(),       \
-        U, A, B, I)
+  _mm512_mask_gf2p8affine_epi64_epi8((__m512i)_mm512_setzero_si512(), \
+         U, A, B, I)
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS_Z
 _mm512_gf2p8mul_epi8(__m512i __A, __m512i __B)
@@ -117,40 +117,39 @@ _mm512_maskz_gf2p8mul_epi8(__mmask64 __U, __m512i __A, __m512i __B)
 
 #ifdef __AVX512VLBWINTRIN_H
 #define _mm_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) \
-  (__m128i)__builtin_ia32_selectb_128((__mmask16)(U),                             \
-        (__v16qi)_mm_gf2p8affineinv_epi64_epi8(A, B, I),                          \
-        (__v16qi)(__m128i)(S))
+  ((__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
+         (__v16qi)_mm_gf2p8affineinv_epi64_epi8(A, B, I), \
+         (__v16qi)(__m128i)(S)))
 
 #define _mm_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) \
-  (__m128i)_mm_mask_gf2p8affineinv_epi64_epi8((__m128i)_mm_setzero_si128(),       \
-        U, A, B, I)
+  _mm_mask_gf2p8affineinv_epi64_epi8((__m128i)_mm_setzero_si128(), \
+         U, A, B, I)
 
 #define _mm256_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) \
-   (__m256i)__builtin_ia32_selectb_256((__mmask32)(U),                            \
-        (__v32qi)_mm256_gf2p8affineinv_epi64_epi8(A, B, I),                       \
-        (__v32qi)(__m256i)(S))
+  ((__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
+         (__v32qi)_mm256_gf2p8affineinv_epi64_epi8(A, B, I), \
+         (__v32qi)(__m256i)(S)))
 
 #define _mm256_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) \
-  (__m256i)_mm256_mask_gf2p8affineinv_epi64_epi8((__m256i)_mm256_setzero_si256(), \
-        U, A, B, I)
+  _mm256_mask_gf2p8affineinv_epi64_epi8((__m256i)_mm256_setzero_si256(), \
+         U, A, B, I)
 
 #define _mm_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) \
-  (__m128i)__builtin_ia32_selectb_128((__mmask16)(U),                             \
-        (__v16qi)_mm_gf2p8affine_epi64_epi8(A, B, I),                             \
-        (__v16qi)(__m128i)(S))
+  ((__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
+         (__v16qi)_mm_gf2p8affine_epi64_epi8(A, B, I), \
+         (__v16qi)(__m128i)(S)))
 
 #define _mm_maskz_gf2p8affine_epi64_epi8(U, A, B, I) \
-  (__m128i)_mm_mask_gf2p8affine_epi64_epi8((__m128i)_mm_setzero_si128(),          \
-        U, A, B, I)
+  _mm_mask_gf2p8affine_epi64_epi8((__m128i)_mm_setzero_si128(), U, A, B, I)
 
 #define _mm256_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) \
-   (__m256i)__builtin_ia32_selectb_256((__mmask32)(U),                            \
-        (__v32qi)_mm256_gf2p8affine_epi64_epi8(A, B, I),                          \
-        (__v32qi)(__m256i)(S))
+  ((__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
+         (__v32qi)_mm256_gf2p8affine_epi64_epi8(A, B, I), \
+         (__v32qi)(__m256i)(S)))
 
 #define _mm256_maskz_gf2p8affine_epi64_epi8(U, A, B, I) \
-  (__m256i)_mm256_mask_gf2p8affine_epi64_epi8((__m256i)_mm256_setzero_si256(),    \
-        U, A, B, I)
+  _mm256_mask_gf2p8affine_epi64_epi8((__m256i)_mm256_setzero_si256(), \
+         U, A, B, I)
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS_VL128
 _mm_mask_gf2p8mul_epi8(__m128i __S, __mmask16 __U, __m128i __A, __m128i __B)

diff  --git a/clang/lib/Headers/vpclmulqdqintrin.h b/clang/lib/Headers/vpclmulqdqintrin.h
index 44daadb07d57..485692ea2b5b 100644
--- a/clang/lib/Headers/vpclmulqdqintrin.h
+++ b/clang/lib/Headers/vpclmulqdqintrin.h
@@ -15,15 +15,15 @@
 #define __VPCLMULQDQINTRIN_H
 
 #define _mm256_clmulepi64_epi128(A, B, I) \
-  (__m256i)__builtin_ia32_pclmulqdq256((__v4di)(__m256i)(A),  \
-                                       (__v4di)(__m256i)(B),  \
-                                       (char)(I))
+  ((__m256i)__builtin_ia32_pclmulqdq256((__v4di)(__m256i)(A),  \
+                                        (__v4di)(__m256i)(B),  \
+                                        (char)(I)))
 
 #ifdef __AVX512FINTRIN_H
 #define _mm512_clmulepi64_epi128(A, B, I) \
-  (__m512i)__builtin_ia32_pclmulqdq512((__v8di)(__m512i)(A),  \
-                                       (__v8di)(__m512i)(B),  \
-                                       (char)(I))
+  ((__m512i)__builtin_ia32_pclmulqdq512((__v8di)(__m512i)(A),  \
+                                        (__v8di)(__m512i)(B),  \
+                                        (char)(I)))
 #endif // __AVX512FINTRIN_H
 
 #endif /* __VPCLMULQDQINTRIN_H */

diff  --git a/clang/lib/Headers/xopintrin.h b/clang/lib/Headers/xopintrin.h
index 5cedde41b625..976cdf4902a4 100644
--- a/clang/lib/Headers/xopintrin.h
+++ b/clang/lib/Headers/xopintrin.h
@@ -225,16 +225,16 @@ _mm_rot_epi64(__m128i __A, __m128i __B)
 }
 
 #define _mm_roti_epi8(A, N) \
-  (__m128i)__builtin_ia32_vprotbi((__v16qi)(__m128i)(A), (N))
+  ((__m128i)__builtin_ia32_vprotbi((__v16qi)(__m128i)(A), (N)))
 
 #define _mm_roti_epi16(A, N) \
-  (__m128i)__builtin_ia32_vprotwi((__v8hi)(__m128i)(A), (N))
+  ((__m128i)__builtin_ia32_vprotwi((__v8hi)(__m128i)(A), (N)))
 
 #define _mm_roti_epi32(A, N) \
-  (__m128i)__builtin_ia32_vprotdi((__v4si)(__m128i)(A), (N))
+  ((__m128i)__builtin_ia32_vprotdi((__v4si)(__m128i)(A), (N)))
 
 #define _mm_roti_epi64(A, N) \
-  (__m128i)__builtin_ia32_vprotqi((__v2di)(__m128i)(A), (N))
+  ((__m128i)__builtin_ia32_vprotqi((__v2di)(__m128i)(A), (N)))
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS
 _mm_shl_epi8(__m128i __A, __m128i __B)
@@ -285,36 +285,36 @@ _mm_sha_epi64(__m128i __A, __m128i __B)
 }
 
 #define _mm_com_epu8(A, B, N) \
-  (__m128i)__builtin_ia32_vpcomub((__v16qi)(__m128i)(A), \
-                                  (__v16qi)(__m128i)(B), (N))
+  ((__m128i)__builtin_ia32_vpcomub((__v16qi)(__m128i)(A), \
+                                   (__v16qi)(__m128i)(B), (N)))
 
 #define _mm_com_epu16(A, B, N) \
-  (__m128i)__builtin_ia32_vpcomuw((__v8hi)(__m128i)(A), \
-                                  (__v8hi)(__m128i)(B), (N))
+  ((__m128i)__builtin_ia32_vpcomuw((__v8hi)(__m128i)(A), \
+                                   (__v8hi)(__m128i)(B), (N)))
 
 #define _mm_com_epu32(A, B, N) \
-  (__m128i)__builtin_ia32_vpcomud((__v4si)(__m128i)(A), \
-                                  (__v4si)(__m128i)(B), (N))
+  ((__m128i)__builtin_ia32_vpcomud((__v4si)(__m128i)(A), \
+                                   (__v4si)(__m128i)(B), (N)))
 
 #define _mm_com_epu64(A, B, N) \
-  (__m128i)__builtin_ia32_vpcomuq((__v2di)(__m128i)(A), \
-                                  (__v2di)(__m128i)(B), (N))
+  ((__m128i)__builtin_ia32_vpcomuq((__v2di)(__m128i)(A), \
+                                   (__v2di)(__m128i)(B), (N)))
 
 #define _mm_com_epi8(A, B, N) \
-  (__m128i)__builtin_ia32_vpcomb((__v16qi)(__m128i)(A), \
-                                 (__v16qi)(__m128i)(B), (N))
+  ((__m128i)__builtin_ia32_vpcomb((__v16qi)(__m128i)(A), \
+                                  (__v16qi)(__m128i)(B), (N)))
 
 #define _mm_com_epi16(A, B, N) \
-  (__m128i)__builtin_ia32_vpcomw((__v8hi)(__m128i)(A), \
-                                 (__v8hi)(__m128i)(B), (N))
+  ((__m128i)__builtin_ia32_vpcomw((__v8hi)(__m128i)(A), \
+                                  (__v8hi)(__m128i)(B), (N)))
 
 #define _mm_com_epi32(A, B, N) \
-  (__m128i)__builtin_ia32_vpcomd((__v4si)(__m128i)(A), \
-                                 (__v4si)(__m128i)(B), (N))
+  ((__m128i)__builtin_ia32_vpcomd((__v4si)(__m128i)(A), \
+                                  (__v4si)(__m128i)(B), (N)))
 
 #define _mm_com_epi64(A, B, N) \
-  (__m128i)__builtin_ia32_vpcomq((__v2di)(__m128i)(A), \
-                                 (__v2di)(__m128i)(B), (N))
+  ((__m128i)__builtin_ia32_vpcomq((__v2di)(__m128i)(A), \
+                                  (__v2di)(__m128i)(B), (N)))
 
 #define _MM_PCOMCTRL_LT    0
 #define _MM_PCOMCTRL_LE    1
@@ -710,23 +710,23 @@ _mm_comtrue_epi64(__m128i __A, __m128i __B)
 }
 
 #define _mm_permute2_pd(X, Y, C, I) \
-  (__m128d)__builtin_ia32_vpermil2pd((__v2df)(__m128d)(X), \
-                                     (__v2df)(__m128d)(Y), \
-                                     (__v2di)(__m128i)(C), (I))
+  ((__m128d)__builtin_ia32_vpermil2pd((__v2df)(__m128d)(X), \
+                                      (__v2df)(__m128d)(Y), \
+                                      (__v2di)(__m128i)(C), (I)))
 
 #define _mm256_permute2_pd(X, Y, C, I) \
-  (__m256d)__builtin_ia32_vpermil2pd256((__v4df)(__m256d)(X), \
-                                        (__v4df)(__m256d)(Y), \
-                                        (__v4di)(__m256i)(C), (I))
+  ((__m256d)__builtin_ia32_vpermil2pd256((__v4df)(__m256d)(X), \
+                                         (__v4df)(__m256d)(Y), \
+                                         (__v4di)(__m256i)(C), (I)))
 
 #define _mm_permute2_ps(X, Y, C, I) \
-  (__m128)__builtin_ia32_vpermil2ps((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), \
-                                    (__v4si)(__m128i)(C), (I))
+  ((__m128)__builtin_ia32_vpermil2ps((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), \
+                                     (__v4si)(__m128i)(C), (I)))
 
 #define _mm256_permute2_ps(X, Y, C, I) \
-  (__m256)__builtin_ia32_vpermil2ps256((__v8sf)(__m256)(X), \
-                                       (__v8sf)(__m256)(Y), \
-                                       (__v8si)(__m256i)(C), (I))
+  ((__m256)__builtin_ia32_vpermil2ps256((__v8sf)(__m256)(X), \
+                                        (__v8sf)(__m256)(Y), \
+                                        (__v8si)(__m256i)(C), (I)))
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_frcz_ss(__m128 __A)


        


More information about the cfe-commits mailing list