[cfe-commits] r110253 - in /cfe/trunk: lib/Headers/CMakeLists.txt lib/Headers/avxintrin.h test/Headers/x86-intrinsics-headers.c

Bruno Cardoso Lopes bruno.cardoso at gmail.com
Wed Aug 4 15:03:36 PDT 2010


Author: bruno
Date: Wed Aug  4 17:03:36 2010
New Revision: 110253

URL: http://llvm.org/viewvc/llvm-project?rev=110253&view=rev
Log:
Add AVX intrinsics header

Added:
    cfe/trunk/lib/Headers/avxintrin.h
Modified:
    cfe/trunk/lib/Headers/CMakeLists.txt
    cfe/trunk/test/Headers/x86-intrinsics-headers.c

Modified: cfe/trunk/lib/Headers/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Headers/CMakeLists.txt?rev=110253&r1=110252&r2=110253&view=diff
==============================================================================
--- cfe/trunk/lib/Headers/CMakeLists.txt (original)
+++ cfe/trunk/lib/Headers/CMakeLists.txt Wed Aug  4 17:03:36 2010
@@ -1,5 +1,6 @@
 set(files
   altivec.h
+  avxintrin.h
   emmintrin.h	
   float.h		
   iso646.h	

Added: cfe/trunk/lib/Headers/avxintrin.h
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Headers/avxintrin.h?rev=110253&view=auto
==============================================================================
--- cfe/trunk/lib/Headers/avxintrin.h (added)
+++ cfe/trunk/lib/Headers/avxintrin.h Wed Aug  4 17:03:36 2010
@@ -0,0 +1,1158 @@
+/*===---- avxintrin.h - AVX intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __AVXINTRIN_H
+#define __AVXINTRIN_H
+
+#ifndef __AVX__
+#error "AVX instruction set not enabled"
+#else
+
+typedef double __v4df __attribute__ ((__vector_size__ (32)));
+typedef float __v8sf __attribute__ ((__vector_size__ (32)));
+typedef long long __v4di __attribute__ ((__vector_size__ (32)));
+typedef int __v8si __attribute__ ((__vector_size__ (32)));
+typedef short __v16hi __attribute__ ((__vector_size__ (32)));
+typedef char __v32qi __attribute__ ((__vector_size__ (32)));
+
+typedef float __m256 __attribute__ ((__vector_size__ (32)));
+typedef double __m256d __attribute__((__vector_size__(32)));
+typedef long long __m256i __attribute__((__vector_size__(32)));
+
+/* Arithmetic */
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_add_pd(__m256d a, __m256d b)
+{
+  return a+b;
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_add_ps(__m256 a, __m256 b)
+{
+  return a+b;
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_sub_pd(__m256d a, __m256d b)
+{
+  return a-b;
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_sub_ps(__m256 a, __m256 b)
+{
+  return a-b;
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_addsub_pd(__m256d a, __m256d b)
+{
+  return (__m256d)__builtin_ia32_addsubpd256((__v4df)a, (__v4df)b);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_addsub_ps(__m256 a, __m256 b)
+{
+  return (__m256)__builtin_ia32_addsubps256((__v8sf)a, (__v8sf)b);
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_div_pd(__m256d a, __m256d b)
+{
+  return a / b;
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_div_ps(__m256 a, __m256 b)
+{
+  return a / b;
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_max_pd(__m256d a, __m256d b)
+{
+  return (__m256d)__builtin_ia32_maxpd256((__v4df)a, (__v4df)b);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_max_ps(__m256 a, __m256 b)
+{
+  return (__m256)__builtin_ia32_maxps256((__v8sf)a, (__v8sf)b);
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_min_pd(__m256d a, __m256d b)
+{
+  return (__m256d)__builtin_ia32_minpd256((__v4df)a, (__v4df)b);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_min_ps(__m256 a, __m256 b)
+{
+  return (__m256)__builtin_ia32_minps256((__v8sf)a, (__v8sf)b);
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_mul_pd(__m256d a, __m256d b)
+{
+  return a * b;
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_mul_ps(__m256 a, __m256 b)
+{
+  return a * b;
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_sqrt_pd(__m256d a)
+{
+  return (__m256d)__builtin_ia32_sqrtpd256((__v4df)a);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_sqrt_ps(__m256 a)
+{
+  return (__m256)__builtin_ia32_sqrtps256((__v8sf)a);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_rsqrt_ps(__m256 a)
+{
+  return (__m256)__builtin_ia32_rsqrtps256((__v8sf)a);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_rcp_ps(__m256 a)
+{
+  return (__m256)__builtin_ia32_rcpps256((__v8sf)a);
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_round_pd(__m256d v, const int m)
+{
+  return (__m256d)__builtin_ia32_roundpd256((__v4df)v, m);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_round_ps(__m256 v, const int m)
+{
+  return (__m256)__builtin_ia32_roundps256((__v8sf)v, m);
+}
+
+#define _mm256_ceil_pd(V)  _mm256_round_pd((V), _MM_FROUND_CEIL)
+#define _mm256_floor_pd(V) _mm256_round_pd((V), _MM_FROUND_FLOOR)
+#define _mm256_ceil_ps(V)  _mm256_round_ps((V), _MM_FROUND_CEIL)
+#define _mm256_floor_ps(V) _mm256_round_ps((V), _MM_FROUND_FLOOR)
+
+/* Logical */
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_and_pd(__m256d a, __m256d b)
+{
+  return (__m256d)__builtin_ia32_andpd256((__v4df)a, (__v4df)b);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_and_ps(__m256 a, __m256 b)
+{
+  return (__m256)__builtin_ia32_andps256((__v8sf)a, (__v8sf)b);
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_andnot_pd(__m256d a, __m256d b)
+{
+  return (__m256d)__builtin_ia32_andnpd256((__v4df)a, (__v4df)b);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_andnot_ps(__m256 a, __m256 b)
+{
+  return (__m256)__builtin_ia32_andnps256((__v8sf)a, (__v8sf)b);
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_or_pd(__m256d a, __m256d b)
+{
+  return (__m256d)__builtin_ia32_orpd256((__v4df)a, (__v4df)b);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_or_ps(__m256 a, __m256 b)
+{
+  return (__m256)__builtin_ia32_orps256((__v8sf)a, (__v8sf)b);
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_xor_pd(__m256d a, __m256d b)
+{
+  return (__m256d)__builtin_ia32_xorpd256((__v4df)a, (__v4df)b);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_xor_ps(__m256 a, __m256 b)
+{
+  return (__m256)__builtin_ia32_xorps256((__v8sf)a, (__v8sf)b);
+}
+
+/* Horizontal arithmetic */
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_hadd_pd(__m256d a, __m256d b)
+{
+  return (__m256d)__builtin_ia32_haddpd256((__v4df)a, (__v4df)b);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_hadd_ps(__m256 a, __m256 b)
+{
+  return (__m256)__builtin_ia32_haddps256((__v8sf)a, (__v8sf)b);
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_hsub_pd(__m256d a, __m256d b)
+{
+  return (__m256d)__builtin_ia32_hsubpd256((__v4df)a, (__v4df)b);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_hsub_ps(__m256 a, __m256 b)
+{
+  return (__m256)__builtin_ia32_hsubps256((__v8sf)a, (__v8sf)b);
+}
+
+/* Vector permutations */
+static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_permutevar_pd(__m128d a, __m128i c)
+{
+  return (__m128d)__builtin_ia32_vpermilvarpd((__v2df)a, (__v2di)c);
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_permutevar_pd(__m256d a, __m256i c)
+{
+  return (__m256d)__builtin_ia32_vpermilvarpd256((__v4df)a, (__v4di)c);
+}
+
+static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_permutevar_ps(__m128 a, __m128i c)
+{
+  return (__m128)__builtin_ia32_vpermilvarps((__v4sf)a, (__v4si)c);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_permutevar_ps(__m256 a, __m256i c)
+{
+  return (__m256)__builtin_ia32_vpermilvarps256((__v8sf)a,
+						  (__v8si)c);
+}
+
+static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_permute_pd(__m128d a, const int c)
+{
+  return (__m128d)__builtin_ia32_vpermilpd((__v2df)a, c);
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_permute_pd(__m256d a, const int c)
+{
+  return (__m256d)__builtin_ia32_vpermilpd256((__v4df)a, c);
+}
+
+static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_permute_ps(__m128 a, const int c)
+{
+  return (__m128)__builtin_ia32_vpermilps((__v4sf)a, c);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_permute_ps(__m256 a, const int c)
+{
+  return (__m256)__builtin_ia32_vpermilps256((__v8sf)a, c);
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_permute2f128_pd(__m256d a, __m256d b, const int c)
+{
+  return (__m256d)__builtin_ia32_vperm2f128_pd256((__v4df)a, (__v4df)b, c);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_permute2f128_ps(__m256 a, __m256 b, const int c)
+{
+  return (__m256)__builtin_ia32_vperm2f128_ps256((__v8sf)a, (__v8sf)b, c);
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_permute2f128_si256(__m256i a, __m256i b, const int c)
+{
+  return (__m256i)__builtin_ia32_vperm2f128_si256((__v8si)a, (__v8si)b, c);
+}
+
+/* Vector Blend */
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_blend_pd(__m256d a, __m256d b, const int c)
+{
+  return (__m256d)__builtin_ia32_blendpd256((__v4df)a, (__v4df)b, c);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_blend_ps(__m256 a, __m256 b, const int c)
+{
+  return (__m256)__builtin_ia32_blendps256((__v8sf)a, (__v8sf)b, c);
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_blendv_pd(__m256d a, __m256d b, __m256d c)
+{
+  return (__m256d)__builtin_ia32_blendvpd256((__v4df)a, (__v4df)b, (__v4df)c);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_blendv_ps(__m256 a, __m256 b, __m256 c)
+{
+  return (__m256)__builtin_ia32_blendvps256((__v8sf)a, (__v8sf)b, (__v8sf)c);
+}
+
+/* Vector Dot Product */
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_dp_ps(__m256 a, __m256 b, const int c)
+{
+  return (__m256)__builtin_ia32_dpps256((__v8sf)a, (__v8sf)b, c);
+}
+
+/* Vector shuffle */
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_shuffle_pd(__m256d a, __m256d b, const int s)
+{
+  return (__m256d)__builtin_ia32_shufpd256((__v4df)a, (__v4df)b, s);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_shuffle_ps(__m256 a, __m256 b, const int s)
+{
+  return (__m256)__builtin_ia32_shufps256((__v8sf)a, (__v8sf)b, s);
+}
+
+/* Compare */
+#define _CMP_EQ_OQ    0x00 /* Equal (ordered, non-signaling)  */
+#define _CMP_LT_OS    0x01 /* Less-than (ordered, signaling)  */
+#define _CMP_LE_OS    0x02 /* Less-than-or-equal (ordered, signaling)  */
+#define _CMP_UNORD_Q  0x03 /* Unordered (non-signaling)  */
+#define _CMP_NEQ_UQ   0x04 /* Not-equal (unordered, non-signaling)  */
+#define _CMP_NLT_US   0x05 /* Not-less-than (unordered, signaling)  */
+#define _CMP_NLE_US   0x06 /* Not-less-than-or-equal (unordered, signaling)  */
+#define _CMP_ORD_Q    0x07 /* Ordered (nonsignaling)   */
+#define _CMP_EQ_UQ    0x08 /* Equal (unordered, non-signaling)  */
+#define _CMP_NGE_US   0x09 /* Not-greater-than-or-equal (unord, signaling)  */
+#define _CMP_NGT_US   0x0a /* Not-greater-than (unordered, signaling)  */
+#define _CMP_FALSE_OQ 0x0b /* False (ordered, non-signaling)  */
+#define _CMP_NEQ_OQ   0x0c /* Not-equal (ordered, non-signaling)  */
+#define _CMP_GE_OS    0x0d /* Greater-than-or-equal (ordered, signaling)  */
+#define _CMP_GT_OS    0x0e /* Greater-than (ordered, signaling)  */
+#define _CMP_TRUE_UQ  0x0f /* True (unordered, non-signaling)  */
+#define _CMP_EQ_OS    0x10 /* Equal (ordered, signaling)  */
+#define _CMP_LT_OQ    0x11 /* Less-than (ordered, non-signaling)  */
+#define _CMP_LE_OQ    0x12 /* Less-than-or-equal (ordered, non-signaling)  */
+#define _CMP_UNORD_S  0x13 /* Unordered (signaling)  */
+#define _CMP_NEQ_US   0x14 /* Not-equal (unordered, signaling)  */
+#define _CMP_NLT_UQ   0x15 /* Not-less-than (unordered, non-signaling)  */
+#define _CMP_NLE_UQ   0x16 /* Not-less-than-or-equal (unord, non-signaling)  */
+#define _CMP_ORD_S    0x17 /* Ordered (signaling)  */
+#define _CMP_EQ_US    0x18 /* Equal (unordered, signaling)  */
+#define _CMP_NGE_UQ   0x19 /* Not-greater-than-or-equal (unord, non-sign)  */
+#define _CMP_NGT_UQ   0x1a /* Not-greater-than (unordered, non-signaling)  */
+#define _CMP_FALSE_OS 0x1b /* False (ordered, signaling)  */
+#define _CMP_NEQ_OS   0x1c /* Not-equal (ordered, signaling)  */
+#define _CMP_GE_OQ    0x1d /* Greater-than-or-equal (ordered, non-signaling)  */
+#define _CMP_GT_OQ    0x1e /* Greater-than (ordered, non-signaling)  */
+#define _CMP_TRUE_US  0x1f /* True (unordered, signaling)  */
+
+static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmp_pd(__m128d a, __m128d b, const int c)
+{
+  return (__m128d)__builtin_ia32_cmppd((__v2df)a, (__v2df)b, c);
+}
+
+static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmp_ps(__m128 a, __m128 b, const int c)
+{
+  return (__m128)__builtin_ia32_cmpps((__v4sf)a, (__v4sf)b, c);
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_cmp_pd(__m256d a, __m256d b, const int c)
+{
+  return (__m256d)__builtin_ia32_cmppd256((__v4df)a, (__v4df)b, c);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_cmp_ps(__m256 a, __m256 b, const int c)
+{
+  return (__m256)__builtin_ia32_cmpps256((__v8sf)a, (__v8sf)b, c);
+}
+
+static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmp_sd(__m128d a, __m128d b, const int c)
+{
+  return (__m128d)__builtin_ia32_cmpsd((__v2df)a, (__v2df)b, c);
+}
+
+static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmp_ss(__m128 a, __m128 b, const int c)
+{
+  return (__m128)__builtin_ia32_cmpss((__v4sf)a, (__v4sf)b, c);
+}
+
+/* Vector extract */
+static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm256_extractf128_pd(__m256d a, const int o)
+{
+  return (__m128d)__builtin_ia32_vextractf128_pd256((__v4df)a, o);
+}
+
+static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm256_extractf128_ps(__m256 a, const int o)
+{
+  return (__m128)__builtin_ia32_vextractf128_ps256((__v8sf)a, o);
+}
+
+static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm256_extractf128_si256(__m256i a, const int o)
+{
+  return (__m128i)__builtin_ia32_vextractf128_si256((__v8si)a, o);
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+_mm256_extract_epi32(__m256i a, int const imm)
+{
+  __v8si b = (__v8si)a;
+  return b[imm];
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+_mm256_extract_epi16(__m256i a, int const imm)
+{
+  __v16hi b = (__v16hi)a;
+  return b[imm];
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+_mm256_extract_epi8(__m256i a, int const imm)
+{
+  __v32qi b = (__v32qi)a;
+  return b[imm];
+}
+
+#ifdef __x86_64__
+static __inline long long  __attribute__((__always_inline__, __nodebug__))
+_mm256_extract_epi64(__m256i a, const int imm)
+{
+  __v4di b = (__v4di)a;
+  return b[imm];
+}
+#endif
+
+/* Vector insert */
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_insertf128_pd(__m256d a, __m128d b, const int o)
+{
+  return (__m256d)__builtin_ia32_vinsertf128_pd256((__v4df)a, (__v2df)b, o);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_insertf128_ps(__m256 a, __m128 b, const int o)
+{
+  return (__m256)__builtin_ia32_vinsertf128_ps256((__v8sf)a, (__v4sf)b, o);
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_insertf128_si256(__m256i a, __m128i b, const int o)
+{
+  return (__m256i)__builtin_ia32_vinsertf128_si256((__v8si)a, (__v4si)b, o);
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_insert_epi32(__m256i a, int b, int const imm)
+{
+  __v8si c = (__v8si)a;
+  c[imm & 7] = b;
+  return (__m256i)c;
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_insert_epi16(__m256i a, int b, int const imm)
+{
+  __v16hi c = (__v16hi)a;
+  c[imm & 15] = b;
+  return (__m256i)c;
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_insert_epi8(__m256i a, int b, int const imm)
+{
+  __v32qi c = (__v32qi)a;
+  c[imm & 31] = b;
+  return (__m256i)c;
+}
+
+#ifdef __x86_64__
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_insert_epi64(__m256i a, int b, int const imm)
+{
+  __v4di c = (__v4di)a;
+  c[imm & 3] = b;
+  return (__m256i)c;
+}
+#endif
+
+/* Conversion */
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepi32_pd(__m128i a)
+{
+  return (__m256d)__builtin_ia32_cvtdq2pd256((__v4si) a);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepi32_ps(__m256i a)
+{
+  return (__m256)__builtin_ia32_cvtdq2ps256((__v8si) a);
+}
+
+static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtpd_ps(__m256d a)
+{
+  return (__m128)__builtin_ia32_cvtpd2ps256((__v4df) a);
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtps_epi32(__m256 a)
+{
+  return (__m256i)__builtin_ia32_cvtps2dq256((__v8sf) a);
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtps_pd(__m128 a)
+{
+  return (__m256d)__builtin_ia32_cvtps2pd256((__v4sf) a);
+}
+
+static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvttpd_epi32(__m256d a)
+{
+  return (__m128i)__builtin_ia32_cvttpd2dq256((__v4df) a);
+}
+
+static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtpd_epi32(__m256d a)
+{
+  return (__m128i)__builtin_ia32_cvtpd2dq256((__v4df) a);
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvttps_epi32(__m256 a)
+{
+  return (__m256i)__builtin_ia32_cvttps2dq256((__v8sf) a);
+}
+
+/* Vector replicate */
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_movehdup_ps(__m256 a)
+{
+  return (__m256)__builtin_ia32_movshdup256((__v8sf)a);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_moveldup_ps(__m256 a)
+{
+  return (__m256)__builtin_ia32_movsldup256((__v8sf)a);
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_movedup_pd(__m256d a)
+{
+  return (__m256d)__builtin_ia32_movddup256((__v4df)a);
+}
+
+/* Unpack and Interleave */
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_unpackhi_pd(__m256d a, __m256d b)
+{
+  return (__m256d)__builtin_ia32_unpckhpd256((__v4df)a, (__v4df)b);
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_unpacklo_pd(__m256d a, __m256d b)
+{
+  return (__m256d)__builtin_ia32_unpcklpd256((__v4df)a, (__v4df)b);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_unpackhi_ps(__m256 a, __m256 b)
+{
+  return (__m256)__builtin_ia32_unpckhps256((__v8sf)a, (__v8sf)b);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_unpacklo_ps(__m256 a, __m256 b)
+{
+  return (__m256)__builtin_ia32_unpcklps256((__v8sf)a, (__v8sf)b);
+}
+
+/* Bit Test */
+static __inline int __attribute__((__always_inline__, __nodebug__))
+_mm_testz_pd(__m128d a, __m128d b)
+{
+  return __builtin_ia32_vtestzpd((__v2df)a, (__v2df)b);
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+_mm_testc_pd(__m128d a, __m128d b)
+{
+  return __builtin_ia32_vtestcpd((__v2df)a, (__v2df)b);
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+_mm_testnzc_pd(__m128d a, __m128d b)
+{
+  return __builtin_ia32_vtestnzcpd((__v2df)a, (__v2df)b);
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+_mm_testz_ps(__m128 a, __m128 b)
+{
+  return __builtin_ia32_vtestzps((__v4sf)a, (__v4sf)b);
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+_mm_testc_ps(__m128 a, __m128 b)
+{
+  return __builtin_ia32_vtestcps((__v4sf)a, (__v4sf)b);
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+_mm_testnzc_ps(__m128 a, __m128 b)
+{
+  return __builtin_ia32_vtestnzcps((__v4sf)a, (__v4sf)b);
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+_mm256_testz_pd(__m256d a, __m256d b)
+{
+  return __builtin_ia32_vtestzpd256((__v4df)a, (__v4df)b);
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+_mm256_testc_pd(__m256d a, __m256d b)
+{
+  return __builtin_ia32_vtestcpd256((__v4df)a, (__v4df)b);
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+_mm256_testnzc_pd(__m256d a, __m256d b)
+{
+  return __builtin_ia32_vtestnzcpd256((__v4df)a, (__v4df)b);
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+_mm256_testz_ps(__m256 a, __m256 b)
+{
+  return __builtin_ia32_vtestzps256((__v8sf)a, (__v8sf)b);
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+_mm256_testc_ps(__m256 a, __m256 b)
+{
+  return __builtin_ia32_vtestcps256((__v8sf)a, (__v8sf)b);
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+_mm256_testnzc_ps(__m256 a, __m256 b)
+{
+  return __builtin_ia32_vtestnzcps256((__v8sf)a, (__v8sf)b);
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+_mm256_testz_si256(__m256i a, __m256i b)
+{
+  return __builtin_ia32_ptestz256((__v4di)a, (__v4di)b);
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+_mm256_testc_si256(__m256i a, __m256i b)
+{
+  return __builtin_ia32_ptestc256((__v4di)a, (__v4di)b);
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+_mm256_testnzc_si256(__m256i a, __m256i b)
+{
+  return __builtin_ia32_ptestnzc256((__v4di)a, (__v4di)b);
+}
+
+/* Vector extract sign mask */
+static __inline int __attribute__((__always_inline__, __nodebug__))
+_mm256_movemask_pd(__m256d a)
+{
+  return __builtin_ia32_movmskpd256((__v4df)a);
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+_mm256_movemask_ps(__m256 a)
+{
+  return __builtin_ia32_movmskps256((__v8sf)a);
+}
+
+/* Vector zero */
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm256_zeroall(void)
+{
+  __builtin_ia32_vzeroall();
+}
+
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm256_zeroupper(void)
+{
+  __builtin_ia32_vzeroupper();
+}
+
+/* Vector load with broadcast */
+static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_broadcast_ss(float const *a)
+{
+  return (__m128)__builtin_ia32_vbroadcastss(a);
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_broadcast_sd(double const *a)
+{
+  return (__m256d)__builtin_ia32_vbroadcastsd256(a);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_broadcast_ss(float const *a)
+{
+  return (__m256)__builtin_ia32_vbroadcastss256(a);
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_broadcast_pd(__m128d const *a)
+{
+  return (__m256d)__builtin_ia32_vbroadcastf128_pd256(a);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_broadcast_ps(__m128 const *a)
+{
+  return (__m256)__builtin_ia32_vbroadcastf128_ps256(a);
+}
+
+/* SIMD load ops */
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_load_pd(double const *p)
+{
+  return *(__m256d *)p;
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_load_ps(float const *p)
+{
+  return *(__m256 *)p;
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_loadu_pd(double const *p)
+{
+  return (__m256d)__builtin_ia32_loadupd256(p);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_loadu_ps(float const *p)
+{
+  return (__m256)__builtin_ia32_loadups256(p);
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_load_si256(__m256i const *p)
+{
+  return *p;
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_loadu_si256(__m256i const *p)
+{
+  return (__m256i)__builtin_ia32_loaddqu256((char const *)p);
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_lddqu_si256(__m256i const *p)
+{
+  return (__m256i)__builtin_ia32_lddqu256((char const *)p);
+}
+
+/* SIMD store ops */
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm256_store_pd(double *p, __m256d a)
+{
+  *(__m256d *)p = a;
+}
+
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm256_store_ps(float *p, __m256 a)
+{
+  *(__m256 *)p = a;
+}
+
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm256_storeu_pd(double *p, __m256d a)
+{
+  __builtin_ia32_storeupd256(p, (__v4df)a);
+}
+
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm256_storeu_ps(float *p, __m256 a)
+{
+  __builtin_ia32_storeups256(p, (__v8sf)a);
+}
+
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm256_store_si256(__m256i *p, __m256i a)
+{
+  *p = a;
+}
+
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm256_storeu_si256(__m256i *p, __m256i a)
+{
+  __builtin_ia32_storedqu256((char *)p, (__v32qi)a);
+}
+
+/* Conditional load ops */
+static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_maskload_pd(double const *p, __m128d m)
+{
+  return (__m128d)__builtin_ia32_maskloadpd((const __v2df *)p, (__v2df)m);
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_maskload_pd(double const *p, __m256d m)
+{
+  return (__m256d)__builtin_ia32_maskloadpd256((const __v4df *)p, (__v4df)m);
+}
+
+static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_maskload_ps(float const *p, __m128 m)
+{
+  return (__m128)__builtin_ia32_maskloadps((const __v4sf *)p, (__v4sf)m);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_maskload_ps(float const *p, __m256 m)
+{
+  return (__m256)__builtin_ia32_maskloadps256((const __v8sf *)p, (__v8sf)m);
+}
+
+/* Conditional store ops */
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm256_maskstore_ps(float *p, __m256 m, __m256 a)
+{
+  __builtin_ia32_maskstoreps256((__v8sf *)p, (__v8sf)m, (__v8sf)a);
+}
+
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm_maskstore_pd(double *p, __m128d m, __m128d a)
+{
+  __builtin_ia32_maskstorepd((__v2df *)p, (__v2df)m, (__v2df)a);
+}
+
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm256_maskstore_pd(double *p, __m256d m, __m256d a)
+{
+  __builtin_ia32_maskstorepd256((__v4df *)p, (__v4df)m, (__v4df)a);
+}
+
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm_maskstore_ps(float *p, __m128 m, __m128 a)
+{
+  __builtin_ia32_maskstoreps((__v4sf *)p, (__v4sf)m, (__v4sf)a);
+}
+
+/* Cacheability support ops */
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm256_stream_si256(__m256i *a, __m256i b)
+{
+  __builtin_ia32_movntdq256((__v4di *)a, (__v4di)b);
+}
+
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm256_stream_pd(double *a, __m256d b)
+{
+  __builtin_ia32_movntpd256(a, (__v4df)b);
+}
+
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm256_stream_ps(float *p, __m256 a)
+{
+  __builtin_ia32_movntps256(p, (__v8sf)a);
+}
+
+/* Create vectors */
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_set_pd(double a, double b, double c, double d)
+{
+  return (__m256d){ d, c, b, a };
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_set_ps(float a, float b, float c, float d,
+	            float e, float f, float g, float h)
+{
+  return (__m256){ h, g, f, e, d, c, b, a };
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_set_epi32(int i0, int i1, int i2, int i3,
+		             int i4, int i5, int i6, int i7)
+{
+  return (__m256i)(__v8si){ i7, i6, i5, i4, i3, i2, i1, i0 };
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_set_epi16(short w15, short w14, short w13, short w12,
+		             short w11, short w10, short w09, short w08,
+		             short w07, short w06, short w05, short w04,
+		             short w03, short w02, short w01, short w00)
+{
+  return (__m256i)(__v16hi){ w00, w01, w02, w03, w04, w05, w06, w07,
+                             w08, w09, w10, w11, w12, w13, w14, w15 };
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_set_epi8(char b31, char b30, char b29, char b28,
+		            char b27, char b26, char b25, char b24,
+		            char b23, char b22, char b21, char b20,
+		            char b19, char b18, char b17, char b16,
+		            char b15, char b14, char b13, char b12,
+		            char b11, char b10, char b09, char b08,
+		            char b07, char b06, char b05, char b04,
+		            char b03, char b02, char b01, char b00)
+{
+  return (__m256i)(__v32qi){
+    b00, b01, b02, b03, b04, b05, b06, b07,
+    b08, b09, b10, b11, b12, b13, b14, b15,
+    b16, b17, b18, b19, b20, b21, b22, b23,
+    b24, b25, b26, b27, b28, b29, b30, b31
+  };
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_set_epi64x(long long a, long long b, long long c, long long d)
+{
+  return (__m256i)(__v4di){ d, c, b, a };
+}
+
+/* Create vectors with elements in reverse order */
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_setr_pd(double a, double b, double c, double d)
+{
+  return (__m256d){ a, b, c, d };
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_setr_ps(float a, float b, float c, float d,
+		           float e, float f, float g, float h)
+{
+  return (__m256){ a, b, c, d, e, f, g, h };
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_setr_epi32(int i0, int i1, int i2, int i3,
+		              int i4, int i5, int i6, int i7)
+{
+  return (__m256i)(__v8si){ i0, i1, i2, i3, i4, i5, i6, i7 };
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_setr_epi16(short w15, short w14, short w13, short w12,
+		   short w11, short w10, short w09, short w08,
+		   short w07, short w06, short w05, short w04,
+		   short w03, short w02, short w01, short w00)
+{
+  return (__m256i)(__v16hi){ w15, w14, w13, w12, w11, w10, w09, w08,
+			                       w07, w06, w05, w04, w03, w02, w01, w00 };
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_setr_epi8(char b31, char b30, char b29, char b28,
+		             char b27, char b26, char b25, char b24,
+		             char b23, char b22, char b21, char b20,
+		             char b19, char b18, char b17, char b16,
+		             char b15, char b14, char b13, char b12,
+		             char b11, char b10, char b09, char b08,
+		             char b07, char b06, char b05, char b04,
+		             char b03, char b02, char b01, char b00)
+{
+  return (__m256i)(__v32qi){
+    b31, b30, b29, b28, b27, b26, b25, b24,
+		b23, b22, b21, b20, b19, b18, b17, b16,
+		b15, b14, b13, b12, b11, b10, b09, b08,
+		b07, b06, b05, b04, b03, b02, b01, b00 };
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_setr_epi64x(long long a, long long b, long long c, long long d)
+{
+  return (__m256i)(__v4di){ a, b, c, d };
+}
+
+/* Create vectors with repeated elements */
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_set1_pd(double w)
+{
+  return (__m256d){ w, w, w, w };
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_set1_ps(float w)
+{
+  return (__m256){ w, w, w, w, w, w, w, w };
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_set1_epi32(int i)
+{
+  return (__m256i)(__v8si){ i, i, i, i, i, i, i, i };
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_set1_epi16(short w)
+{
+  return (__m256i)(__v16hi){ w, w, w, w, w, w, w, w, w, w, w, w, w, w, w, w };
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_set1_epi8(char b)
+{
+  return (__m256i)(__v32qi){ b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b,
+                             b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b };
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_set1_epi64x(long long q)
+{
+  return (__m256i)(__v4di){ q, q, q, q };
+}
+
+/* Create zeroed vectors */
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_setzero_pd(void)
+{
+  return (__m256d){ 0, 0, 0, 0 };
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_setzero_ps(void)
+{
+  return (__m256){ 0, 0, 0, 0, 0, 0, 0, 0 };
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_setzero_si256(void)
+{
+  return (__m256i){ 0LL, 0LL, 0LL, 0LL };
+}
+
+/* Cast between vector types */
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_castpd_ps(__m256d in)
+{
+  return (__m256)in;
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_castpd_si256(__m256d in)
+{
+  return (__m256i)in;
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_castps_pd(__m256 in)
+{
+  return (__m256d)in;
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_castps_si256(__m256 in)
+{
+  return (__m256i)in;
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_castsi256_ps(__m256i in)
+{
+  return (__m256)in;
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_castsi256_pd(__m256i in)
+{
+  return (__m256d)in;
+}
+
+static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm256_castpd256_pd128(__m256d in)
+{
+  return (__m128d)__builtin_ia32_pd_pd256((__v4df)in);
+}
+
+static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm256_castps256_ps128(__m256 in)
+{
+  return (__m128)__builtin_ia32_ps_ps256((__v8sf)in);
+}
+
+static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm256_castsi256_si128(__m256i in)
+{
+  return (__m128i)__builtin_ia32_si_si256((__v8si)in);
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_castpd128_pd256(__m128d in)
+{
+  return (__m256d)__builtin_ia32_pd256_pd((__v2df)in);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_castps128_ps256(__m128 in)
+{
+  return (__m256)__builtin_ia32_ps256_ps((__v4sf)in);
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_castsi128_si256(__m128i in)
+{
+  return (__m256i)__builtin_ia32_si256_si((__v4si)in);
+}
+
+#endif /* __AVX__ */
+
+#endif /* __AVXINTRIN_H */

Modified: cfe/trunk/test/Headers/x86-intrinsics-headers.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/Headers/x86-intrinsics-headers.c?rev=110253&r1=110252&r2=110253&view=diff
==============================================================================
--- cfe/trunk/test/Headers/x86-intrinsics-headers.c (original)
+++ cfe/trunk/test/Headers/x86-intrinsics-headers.c Wed Aug  4 17:03:36 2010
@@ -29,4 +29,8 @@
 #include <nmmintrin.h>
 #  endif
 
+#  if defined(__AVX__)
+#include <avxintrin.h>
+#  endif
+
 #endif





More information about the cfe-commits mailing list