r336487 - [X86] Fix various type mismatches in intrinsic headers and intrinsic tests that cause extra bitcasts to be emitted in the IR.

Craig Topper via cfe-commits cfe-commits at lists.llvm.org
Sat Jul 7 10:03:33 PDT 2018


Author: ctopper
Date: Sat Jul  7 10:03:32 2018
New Revision: 336487

URL: http://llvm.org/viewvc/llvm-project?rev=336487&view=rev
Log:
[X86] Fix various type mismatches in intrinsic headers and intrinsic tests that cause extra bitcasts to be emitted in the IR.

Found via imprecise grepping of the -O0 IR. There could still be more bugs out there.

Modified:
    cfe/trunk/lib/Headers/avx512fintrin.h
    cfe/trunk/lib/Headers/avx512vlintrin.h
    cfe/trunk/lib/Headers/avxintrin.h
    cfe/trunk/test/CodeGen/avx-builtins.c
    cfe/trunk/test/CodeGen/avx-shuffle-builtins.c
    cfe/trunk/test/CodeGen/avx512dq-builtins.c
    cfe/trunk/test/CodeGen/avx512f-builtins.c

Modified: cfe/trunk/lib/Headers/avx512fintrin.h
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Headers/avx512fintrin.h?rev=336487&r1=336486&r2=336487&view=diff
==============================================================================
--- cfe/trunk/lib/Headers/avx512fintrin.h (original)
+++ cfe/trunk/lib/Headers/avx512fintrin.h Sat Jul  7 10:03:32 2018
@@ -3374,7 +3374,7 @@ _mm512_maskz_permutex2var_epi64(__mmask8
 
 #define _mm512_extractf64x4_pd(A, I) \
   (__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(I), \
-                                            (__v4df)_mm256_undefined_si256(), \
+                                            (__v4df)_mm256_undefined_pd(), \
                                             (__mmask8)-1)
 
 #define _mm512_mask_extractf64x4_pd(W, U, A, imm) \
@@ -5544,7 +5544,7 @@ _mm_maskz_getexp_ss (__mmask8 __U, __m12
 {
  return (__m128) __builtin_ia32_getexpss128_round_mask ((__v4sf) __A,
           (__v4sf) __B,
-          (__v4sf) _mm_setzero_pd (),
+          (__v4sf) _mm_setzero_ps (),
           (__mmask8) __U,
           _MM_FROUND_CUR_DIRECTION);
 }
@@ -5634,7 +5634,7 @@ _mm_maskz_getexp_ss (__mmask8 __U, __m12
   (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
                                               (__v4sf)(__m128)(B), \
                                               (int)(((D)<<2) | (C)), \
-                                              (__v4sf)_mm_setzero_pd(), \
+                                              (__v4sf)_mm_setzero_ps(), \
                                               (__mmask8)(U), \
                                               _MM_FROUND_CUR_DIRECTION)
 
@@ -6721,24 +6721,24 @@ _mm512_maskz_srai_epi64(__mmask8 __U, __
                                        (__v8df)_mm512_setzero_pd())
 
 #define _mm512_shuffle_ps(A, B, M) \
-  (__m512d)__builtin_shufflevector((__v16sf)(__m512)(A), \
-                                   (__v16sf)(__m512)(B), \
-                                   0  + (((M) >> 0) & 0x3), \
-                                   0  + (((M) >> 2) & 0x3), \
-                                   16 + (((M) >> 4) & 0x3), \
-                                   16 + (((M) >> 6) & 0x3), \
-                                   4  + (((M) >> 0) & 0x3), \
-                                   4  + (((M) >> 2) & 0x3), \
-                                   20 + (((M) >> 4) & 0x3), \
-                                   20 + (((M) >> 6) & 0x3), \
-                                   8  + (((M) >> 0) & 0x3), \
-                                   8  + (((M) >> 2) & 0x3), \
-                                   24 + (((M) >> 4) & 0x3), \
-                                   24 + (((M) >> 6) & 0x3), \
-                                   12 + (((M) >> 0) & 0x3), \
-                                   12 + (((M) >> 2) & 0x3), \
-                                   28 + (((M) >> 4) & 0x3), \
-                                   28 + (((M) >> 6) & 0x3))
+  (__m512)__builtin_shufflevector((__v16sf)(__m512)(A), \
+                                  (__v16sf)(__m512)(B), \
+                                  0  + (((M) >> 0) & 0x3), \
+                                  0  + (((M) >> 2) & 0x3), \
+                                  16 + (((M) >> 4) & 0x3), \
+                                  16 + (((M) >> 6) & 0x3), \
+                                  4  + (((M) >> 0) & 0x3), \
+                                  4  + (((M) >> 2) & 0x3), \
+                                  20 + (((M) >> 4) & 0x3), \
+                                  20 + (((M) >> 6) & 0x3), \
+                                  8  + (((M) >> 0) & 0x3), \
+                                  8  + (((M) >> 2) & 0x3), \
+                                  24 + (((M) >> 4) & 0x3), \
+                                  24 + (((M) >> 6) & 0x3), \
+                                  12 + (((M) >> 0) & 0x3), \
+                                  12 + (((M) >> 2) & 0x3), \
+                                  28 + (((M) >> 4) & 0x3), \
+                                  28 + (((M) >> 6) & 0x3))
 
 #define _mm512_mask_shuffle_ps(W, U, A, B, M) \
   (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
@@ -7651,7 +7651,7 @@ _mm512_maskz_getexp_ps (__mmask16 __U, _
                                        (__mmask8)(mask), (int)(scale))
 
 #define _mm512_i64gather_epi32(index, addr, scale) \
-  (__m256i)__builtin_ia32_gatherdiv16si((__v8si)_mm256_undefined_ps(), \
+  (__m256i)__builtin_ia32_gatherdiv16si((__v8si)_mm256_undefined_si256(), \
                                         (int const *)(addr), \
                                         (__v8di)(__m512i)(index), \
                                         (__mmask8)-1, (int)(scale))
@@ -7675,7 +7675,7 @@ _mm512_maskz_getexp_ps (__mmask16 __U, _
                                        (__mmask8)(mask), (int)(scale))
 
 #define _mm512_i64gather_epi64(index, addr, scale) \
-  (__m512i)__builtin_ia32_gatherdiv8di((__v8di)_mm512_undefined_pd(), \
+  (__m512i)__builtin_ia32_gatherdiv8di((__v8di)_mm512_undefined_epi32(), \
                                        (long long const *)(addr), \
                                        (__v8di)(__m512i)(index), (__mmask8)-1, \
                                        (int)(scale))
@@ -7825,16 +7825,16 @@ _mm_mask_fmadd_ss (__m128 __W, __mmask8
 }
 
 #define _mm_fmadd_round_ss(A, B, C, R) \
-  (__m128d)__builtin_ia32_vfmaddss3_mask((__v2df)(__m128d)(A), \
-                                         (__v2df)(__m128d)(B), \
-                                         (__v2df)(__m128d)(C), (__mmask8)-1, \
-                                         (int)(R))
+  (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
+                                        (__v4sf)(__m128)(B), \
+                                        (__v4sf)(__m128)(C), (__mmask8)-1, \
+                                        (int)(R))
 
 #define _mm_mask_fmadd_round_ss(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_vfmaddss3_mask((__v2df)(__m128d)(W), \
-                                         (__v2df)(__m128d)(A), \
-                                         (__v2df)(__m128d)(B), (__mmask8)(U), \
-                                         (int)(R))
+  (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
+                                        (__v4sf)(__m128)(A), \
+                                        (__v4sf)(__m128)(B), (__mmask8)(U), \
+                                        (int)(R))
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS
 _mm_maskz_fmadd_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
@@ -8780,7 +8780,7 @@ static __inline__ __m512i __DEFAULT_FN_A
 _mm512_maskz_expand_epi64 ( __mmask8 __U, __m512i __A)
 {
   return (__m512i) __builtin_ia32_expanddi512_mask ((__v8di) __A,
-                (__v8di) _mm512_setzero_pd (),
+                (__v8di) _mm512_setzero_si512 (),
                 (__mmask8) __U);
 }
 
@@ -8812,7 +8812,7 @@ static __inline__ __m512i __DEFAULT_FN_A
 _mm512_maskz_expandloadu_epi64(__mmask8 __U, void const *__P)
 {
   return (__m512i) __builtin_ia32_expandloaddi512_mask ((const __v8di *)__P,
-              (__v8di) _mm512_setzero_pd(),
+              (__v8di) _mm512_setzero_si512(),
               (__mmask8) __U);
 }
 
@@ -8844,7 +8844,7 @@ static __inline__ __m512i __DEFAULT_FN_A
 _mm512_maskz_expandloadu_epi32(__mmask16 __U, void const *__P)
 {
   return (__m512i) __builtin_ia32_expandloadsi512_mask ((const __v16si *)__P,
-              (__v16si) _mm512_setzero_ps(),
+              (__v16si) _mm512_setzero_si512(),
               (__mmask16) __U);
 }
 
@@ -8876,7 +8876,7 @@ static __inline__ __m512i __DEFAULT_FN_A
 _mm512_maskz_expand_epi32 (__mmask16 __U, __m512i __A)
 {
   return (__m512i) __builtin_ia32_expandsi512_mask ((__v16si) __A,
-                (__v16si) _mm512_setzero_ps(),
+                (__v16si) _mm512_setzero_si512(),
                 (__mmask16) __U);
 }
 
@@ -8917,16 +8917,16 @@ _mm512_maskz_cvtps_pd (__mmask8 __U, __m
                                               (__v8df)_mm512_setzero_pd());
 }
 
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS
 _mm512_cvtpslo_pd (__m512 __A)
 {
-  return (__m512) _mm512_cvtps_pd(_mm512_castps512_ps256(__A));
+  return (__m512d) _mm512_cvtps_pd(_mm512_castps512_ps256(__A));
 }
 
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS
 _mm512_mask_cvtpslo_pd (__m512d __W, __mmask8 __U, __m512 __A)
 {
-  return (__m512) _mm512_mask_cvtps_pd(__W, __U, _mm512_castps512_ps256(__A));
+  return (__m512d) _mm512_mask_cvtps_pd(__W, __U, _mm512_castps512_ps256(__A));
 }
 
 static __inline__ __m512d __DEFAULT_FN_ATTRS

Modified: cfe/trunk/lib/Headers/avx512vlintrin.h
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Headers/avx512vlintrin.h?rev=336487&r1=336486&r2=336487&view=diff
==============================================================================
--- cfe/trunk/lib/Headers/avx512vlintrin.h (original)
+++ cfe/trunk/lib/Headers/avx512vlintrin.h Sat Jul  7 10:03:32 2018
@@ -3582,7 +3582,7 @@ _mm256_maskz_scalef_ps (__mmask8 __U, __
   _mm_maskz_sqrt_ps(__mmask8 __U, __m128 __A) {
     return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
                                                (__v4sf)_mm_sqrt_ps(__A),
-                                               (__v4sf)_mm_setzero_pd());
+                                               (__v4sf)_mm_setzero_ps());
   }
 
   static __inline__ __m256 __DEFAULT_FN_ATTRS
@@ -6413,7 +6413,7 @@ _mm256_maskz_srai_epi64(__mmask8 __U, __
 #define _mm256_mask_shuffle_f64x2(W, U, A, B, imm) \
   (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
                                       (__v4df)_mm256_shuffle_f64x2((A), (B), (imm)), \
-                                      (__v4df)(__m256)(W))
+                                      (__v4df)(__m256d)(W))
 
 #define _mm256_maskz_shuffle_f64x2(U, A, B, imm) \
   (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
@@ -6427,7 +6427,7 @@ _mm256_maskz_srai_epi64(__mmask8 __U, __
 #define _mm256_mask_shuffle_i32x4(W, U, A, B, imm) \
   (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
                                       (__v8si)_mm256_shuffle_i32x4((A), (B), (imm)), \
-                                      (__v8si)(__m256)(W))
+                                      (__v8si)(__m256i)(W))
 
 #define _mm256_maskz_shuffle_i32x4(U, A, B, imm) \
   (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
@@ -6441,7 +6441,7 @@ _mm256_maskz_srai_epi64(__mmask8 __U, __
 #define _mm256_mask_shuffle_i64x2(W, U, A, B, imm) \
   (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
                                       (__v4di)_mm256_shuffle_i64x2((A), (B), (imm)), \
-                                      (__v4di)(__m256)(W))
+                                      (__v4di)(__m256i)(W))
 
 
 #define _mm256_maskz_shuffle_i64x2(U, A, B, imm) \
@@ -7872,7 +7872,7 @@ _mm256_mask_cvtepi64_storeu_epi16 (void
                                        (__v4df)_mm256_setzero_pd())
 
 #define _mm256_permutex_epi64(X, C) \
-  (__m256d)__builtin_ia32_permdi256((__v4di)(__m256i)(X), (int)(C))
+  (__m256i)__builtin_ia32_permdi256((__v4di)(__m256i)(X), (int)(C))
 
 #define _mm256_mask_permutex_epi64(W, U, X, C) \
   (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \

Modified: cfe/trunk/lib/Headers/avxintrin.h
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Headers/avxintrin.h?rev=336487&r1=336486&r2=336487&view=diff
==============================================================================
--- cfe/trunk/lib/Headers/avxintrin.h (original)
+++ cfe/trunk/lib/Headers/avxintrin.h Sat Jul  7 10:03:32 2018
@@ -4966,7 +4966,7 @@ _mm256_set_m128 (__m128 __hi, __m128 __l
 static __inline __m256d __DEFAULT_FN_ATTRS
 _mm256_set_m128d (__m128d __hi, __m128d __lo)
 {
-  return (__m256d)_mm256_set_m128((__m128)__hi, (__m128)__lo);
+  return (__m256d) __builtin_shufflevector((__v2df)__lo, (__v2df)__hi, 0, 1, 2, 3);
 }
 
 /// Constructs a 256-bit integer vector by concatenating two 128-bit
@@ -4986,7 +4986,7 @@ _mm256_set_m128d (__m128d __hi, __m128d
 static __inline __m256i __DEFAULT_FN_ATTRS
 _mm256_set_m128i (__m128i __hi, __m128i __lo)
 {
-  return (__m256i)_mm256_set_m128((__m128)__hi, (__m128)__lo);
+  return (__m256i) __builtin_shufflevector((__v2di)__lo, (__v2di)__hi, 0, 1, 2, 3);
 }
 
 /// Constructs a 256-bit floating-point vector of [8 x float] by
@@ -5032,7 +5032,7 @@ _mm256_setr_m128 (__m128 __lo, __m128 __
 static __inline __m256d __DEFAULT_FN_ATTRS
 _mm256_setr_m128d (__m128d __lo, __m128d __hi)
 {
-  return (__m256d)_mm256_set_m128((__m128)__hi, (__m128)__lo);
+  return (__m256d)_mm256_set_m128d(__hi, __lo);
 }
 
 /// Constructs a 256-bit integer vector by concatenating two 128-bit
@@ -5053,7 +5053,7 @@ _mm256_setr_m128d (__m128d __lo, __m128d
 static __inline __m256i __DEFAULT_FN_ATTRS
 _mm256_setr_m128i (__m128i __lo, __m128i __hi)
 {
-  return (__m256i)_mm256_set_m128((__m128)__hi, (__m128)__lo);
+  return (__m256i)_mm256_set_m128i(__hi, __lo);
 }
 
 #undef __DEFAULT_FN_ATTRS

Modified: cfe/trunk/test/CodeGen/avx-builtins.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/avx-builtins.c?rev=336487&r1=336486&r2=336487&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/avx-builtins.c (original)
+++ cfe/trunk/test/CodeGen/avx-builtins.c Sat Jul  7 10:03:32 2018
@@ -1570,13 +1570,13 @@ __m256 test_mm256_set_m128(__m128 A, __m
 
 __m256d test_mm256_set_m128d(__m128d A, __m128d B) {
   // CHECK-LABEL: test_mm256_set_m128d
-  // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   return _mm256_set_m128d(A, B);
 }
 
 __m256i test_mm256_set_m128i(__m128i A, __m128i B) {
   // CHECK-LABEL: test_mm256_set_m128i
-  // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   return _mm256_set_m128i(A, B);
 }
 
@@ -1796,13 +1796,13 @@ __m256 test_mm256_setr_m128(__m128 A, __
 
 __m256d test_mm256_setr_m128d(__m128d A, __m128d B) {
   // CHECK-LABEL: test_mm256_setr_m128d
-  // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   return _mm256_setr_m128d(A, B);
 }
 
 __m256i test_mm256_setr_m128i(__m128i A, __m128i B) {
   // CHECK-LABEL: test_mm256_setr_m128i
-  // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   return _mm256_setr_m128i(A, B);
 }
 
@@ -1990,7 +1990,7 @@ int test_mm256_testc_ps(__m256 A, __m256
   return _mm256_testc_ps(A, B);
 }
 
-int test_mm256_testc_si256(__m256 A, __m256 B) {
+int test_mm256_testc_si256(__m256i A, __m256i B) {
   // CHECK-LABEL: test_mm256_testc_si256
   // CHECK: call i32 @llvm.x86.avx.ptestc.256(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
   return _mm256_testc_si256(A, B);
@@ -2020,7 +2020,7 @@ int test_mm256_testnzc_ps(__m256 A, __m2
   return _mm256_testnzc_ps(A, B);
 }
 
-int test_mm256_testnzc_si256(__m256 A, __m256 B) {
+int test_mm256_testnzc_si256(__m256i A, __m256i B) {
   // CHECK-LABEL: test_mm256_testnzc_si256
   // CHECK: call i32 @llvm.x86.avx.ptestnzc.256(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
   return _mm256_testnzc_si256(A, B);
@@ -2050,7 +2050,7 @@ int test_mm256_testz_ps(__m256 A, __m256
   return _mm256_testz_ps(A, B);
 }
 
-int test_mm256_testz_si256(__m256 A, __m256 B) {
+int test_mm256_testz_si256(__m256i A, __m256i B) {
   // CHECK-LABEL: test_mm256_testz_si256
   // CHECK: call i32 @llvm.x86.avx.ptestz.256(<4 x i64> %{{.*}}, <4 x i64> %{{.*}})
   return _mm256_testz_si256(A, B);

Modified: cfe/trunk/test/CodeGen/avx-shuffle-builtins.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/avx-shuffle-builtins.c?rev=336487&r1=336486&r2=336487&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/avx-shuffle-builtins.c (original)
+++ cfe/trunk/test/CodeGen/avx-shuffle-builtins.c Sat Jul  7 10:03:32 2018
@@ -171,13 +171,13 @@ __m256 test_mm256_set_m128(__m128 hi, __
 
 __m256d test_mm256_set_m128d(__m128d hi, __m128d lo) {
   // CHECK-LABEL: @test_mm256_set_m128d
-  // CHECK: shufflevector{{.*}}<i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  // CHECK: shufflevector{{.*}}<i32 0, i32 1, i32 2, i32 3>
   return _mm256_set_m128d(hi, lo);
 }
 
 __m256i test_mm256_set_m128i(__m128i hi, __m128i lo) {
   // CHECK-LABEL: @test_mm256_set_m128i
-  // CHECK: shufflevector{{.*}}<i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>  
+  // CHECK: shufflevector{{.*}}<i32 0, i32 1, i32 2, i32 3>  
   return _mm256_set_m128i(hi, lo);
 }
 
@@ -189,12 +189,12 @@ __m256 test_mm256_setr_m128(__m128 hi, _
 
 __m256d test_mm256_setr_m128d(__m128d hi, __m128d lo) {
   // CHECK-LABEL: @test_mm256_setr_m128d
-  // CHECK: shufflevector{{.*}}<i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  // CHECK: shufflevector{{.*}}<i32 0, i32 1, i32 2, i32 3>
   return _mm256_setr_m128d(lo, hi);
 }
 
 __m256i test_mm256_setr_m128i(__m128i hi, __m128i lo) {
   // CHECK-LABEL: @test_mm256_setr_m128i
-  // CHECK: shufflevector{{.*}}<i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  // CHECK: shufflevector{{.*}}<i32 0, i32 1, i32 2, i32 3>
   return _mm256_setr_m128i(lo, hi);
 }

Modified: cfe/trunk/test/CodeGen/avx512dq-builtins.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/avx512dq-builtins.c?rev=336487&r1=336486&r2=336487&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/avx512dq-builtins.c (original)
+++ cfe/trunk/test/CodeGen/avx512dq-builtins.c Sat Jul  7 10:03:32 2018
@@ -691,13 +691,13 @@ __m128d test_mm512_maskz_range_round_sd(
   return _mm_maskz_range_round_sd(__U, __A, __B, 4, 8); 
 }
 
-__m128d test_mm512_range_round_ss(__m128d __A, __m128d __B) {
+__m128 test_mm512_range_round_ss(__m128 __A, __m128 __B) {
   // CHECK-LABEL: @test_mm512_range_round_ss
   // CHECK: @llvm.x86.avx512.mask.range.ss
   return _mm_range_round_ss(__A, __B, 4, 8); 
 }
 
-__m128d test_mm512_mask_range_round_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+__m128 test_mm512_mask_range_round_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
   // CHECK-LABEL: @test_mm512_mask_range_round_ss
   // CHECK: @llvm.x86.avx512.mask.range.ss
   return _mm_mask_range_round_ss(__W, __U, __A, __B, 4, 8); 
@@ -727,13 +727,13 @@ __m128d test_mm_maskz_range_sd(__mmask8
   return _mm_maskz_range_sd(__U, __A, __B, 4); 
 }
 
-__m128d test_mm_range_ss(__m128d __A, __m128d __B) {
+__m128 test_mm_range_ss(__m128 __A, __m128 __B) {
   // CHECK-LABEL: @test_mm_range_ss
   // CHECK: @llvm.x86.avx512.mask.range.ss
   return _mm_range_ss(__A, __B, 4); 
 }
 
-__m128d test_mm_mask_range_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+__m128 test_mm_mask_range_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
   // CHECK-LABEL: @test_mm_mask_range_ss
   // CHECK: @llvm.x86.avx512.mask.range.ss
   return _mm_mask_range_ss(__W, __U, __A, __B, 4); 
@@ -1256,13 +1256,13 @@ __mmask16 test_mm512_fpclass_ps_mask(__m
   return _mm512_fpclass_ps_mask(__A, 4); 
 }
 
-__mmask8 test_mm_fpclass_sd_mask(__m128 __A)  { 
+__mmask8 test_mm_fpclass_sd_mask(__m128d __A)  { 
   // CHECK-LABEL: @test_mm_fpclass_sd_mask
   // CHECK: @llvm.x86.avx512.mask.fpclass.sd
  return _mm_fpclass_sd_mask (__A, 2);
 }
 
-__mmask8 test_mm_mask_fpclass_sd_mask(__mmask8 __U, __m128 __A)  {
+__mmask8 test_mm_mask_fpclass_sd_mask(__mmask8 __U, __m128d __A)  {
  // CHECK-LABEL: @test_mm_mask_fpclass_sd_mask
  // CHECK: @llvm.x86.avx512.mask.fpclass.sd
  return _mm_mask_fpclass_sd_mask (__U,  __A, 2);

Modified: cfe/trunk/test/CodeGen/avx512f-builtins.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/avx512f-builtins.c?rev=336487&r1=336486&r2=336487&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/avx512f-builtins.c (original)
+++ cfe/trunk/test/CodeGen/avx512f-builtins.c Sat Jul  7 10:03:32 2018
@@ -2378,14 +2378,14 @@ __m128 test_mm512_extractf32x4_ps(__m512
   return _mm512_extractf32x4_ps(a, 1);
 }
 
-__m128 test_mm512_mask_extractf32x4_ps(__m128 __W, __mmask8  __U,__m512d __A){
+__m128 test_mm512_mask_extractf32x4_ps(__m128 __W, __mmask8  __U,__m512 __A){
   // CHECK-LABEL:@test_mm512_mask_extractf32x4_ps
   // CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   // CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
   return _mm512_mask_extractf32x4_ps( __W, __U, __A, 1);
 }
 
-__m128 test_mm512_maskz_extractf32x4_ps( __mmask8  __U,__m512d __A){
+__m128 test_mm512_maskz_extractf32x4_ps( __mmask8  __U,__m512 __A){
   // CHECK-LABEL:@test_mm512_maskz_extractf32x4_ps
   // CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   // CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
@@ -5341,19 +5341,19 @@ __m128 test_mm_roundscale_ss(__m128 __A,
   return _mm_roundscale_ss(__A, __B, 3); 
 }
 
-__m128 test_mm_mask_roundscale_ss(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B){
+__m128 test_mm_mask_roundscale_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B){
   // CHECK-LABEL: @test_mm_mask_roundscale_ss
   // CHECK: @llvm.x86.avx512.mask.rndscale.ss
     return _mm_mask_roundscale_ss(__W,__U,__A,__B,3);
 }
 
-__m128 test_mm_maskz_roundscale_round_ss( __mmask8 __U, __m128d __A, __m128d __B){
+__m128 test_mm_maskz_roundscale_round_ss( __mmask8 __U, __m128 __A, __m128 __B){
   // CHECK-LABEL: @test_mm_maskz_roundscale_round_ss
   // CHECK: @llvm.x86.avx512.mask.rndscale.ss
     return _mm_maskz_roundscale_round_ss(__U,__A,__B,3,_MM_FROUND_CUR_DIRECTION);
 }
 
-__m128 test_mm_maskz_roundscale_ss(__mmask8 __U, __m128d __A, __m128d __B){
+__m128 test_mm_maskz_roundscale_ss(__mmask8 __U, __m128 __A, __m128 __B){
   // CHECK-LABEL: @test_mm_maskz_roundscale_ss
   // CHECK: @llvm.x86.avx512.mask.rndscale.ss
     return _mm_maskz_roundscale_ss(__U,__A,__B,3);
@@ -6077,24 +6077,24 @@ __m512 test_mm512_maskz_broadcast_f32x4(
   return _mm512_maskz_broadcast_f32x4(__M, _mm_loadu_ps(__A)); 
 }
 
-__m512d test_mm512_broadcast_f64x4(float const* __A) {
+__m512d test_mm512_broadcast_f64x4(double const* __A) {
   // CHECK-LABEL: @test_mm512_broadcast_f64x4
   // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
-  return _mm512_broadcast_f64x4(_mm256_loadu_ps(__A)); 
+  return _mm512_broadcast_f64x4(_mm256_loadu_pd(__A)); 
 }
 
-__m512d test_mm512_mask_broadcast_f64x4(__m512d __O, __mmask8 __M, float const* __A) {
+__m512d test_mm512_mask_broadcast_f64x4(__m512d __O, __mmask8 __M, double const* __A) {
   // CHECK-LABEL: @test_mm512_mask_broadcast_f64x4
   // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
   // CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
-  return _mm512_mask_broadcast_f64x4(__O, __M, _mm256_loadu_ps(__A)); 
+  return _mm512_mask_broadcast_f64x4(__O, __M, _mm256_loadu_pd(__A)); 
 }
 
-__m512d test_mm512_maskz_broadcast_f64x4(__mmask8 __M, float const* __A) {
+__m512d test_mm512_maskz_broadcast_f64x4(__mmask8 __M, double const* __A) {
   // CHECK-LABEL: @test_mm512_maskz_broadcast_f64x4
   // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
   // CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
-  return _mm512_maskz_broadcast_f64x4(__M, _mm256_loadu_ps(__A)); 
+  return _mm512_maskz_broadcast_f64x4(__M, _mm256_loadu_pd(__A)); 
 }
 
 __m512i test_mm512_broadcast_i32x4(__m128i const* __A) {
@@ -8630,7 +8630,7 @@ __m512d test_mm512_castpd128_pd512(__m12
   return _mm512_castpd128_pd512(__A); 
 }
 
-__m512d test_mm512_set1_epi8(char d)
+__m512i test_mm512_set1_epi8(char d)
 {
   // CHECK-LABEL: @test_mm512_set1_epi8
   // CHECK: insertelement <64 x i8> {{.*}}, i32 0
@@ -8645,7 +8645,7 @@ __m512d test_mm512_set1_epi8(char d)
   return _mm512_set1_epi8(d);
 }
 
-__m512d test_mm512_set1_epi16(short d)
+__m512i test_mm512_set1_epi16(short d)
 {
   // CHECK-LABEL: @test_mm512_set1_epi16
   // CHECK: insertelement <32 x i16> {{.*}}, i32 0
@@ -8702,14 +8702,14 @@ __m512i test_mm512_setr4_epi32(int e0, i
   return _mm512_setr4_epi64(e0, e1, e2, e3);
 }
 
-__m512i test_mm512_setr4_pd(double e0, double e1, double e2, double e3)
+__m512d test_mm512_setr4_pd(double e0, double e1, double e2, double e3)
 {
   // CHECK-LABEL: @test_mm512_setr4_pd
   // CHECK: insertelement <8 x double> {{.*}}, i32 7
   return _mm512_setr4_pd(e0,e1,e2,e3);
 }
 
- __m512i test_mm512_setr4_ps(float e0, float e1, float e2, float e3)
+ __m512 test_mm512_setr4_ps(float e0, float e1, float e2, float e3)
 {
   // CHECK-LABEL: @test_mm512_setr4_ps
   // CHECK: insertelement <16 x float> {{.*}}, i32 15




More information about the cfe-commits mailing list