[cfe-commits] r61330 - in /cfe/trunk: include/clang/AST/X86Builtins.def lib/Headers/xmmintrin.devel.h

Anders Carlsson andersca at mac.com
Sun Dec 21 23:08:20 PST 2008


Author: andersca
Date: Mon Dec 22 01:08:03 2008
New Revision: 61330

URL: http://llvm.org/viewvc/llvm-project?rev=61330&view=rev
Log:
Implement the last intrinsics, _mm_insert_pi16 is the last remaining one now.

Modified:
    cfe/trunk/include/clang/AST/X86Builtins.def
    cfe/trunk/lib/Headers/xmmintrin.devel.h

Modified: cfe/trunk/include/clang/AST/X86Builtins.def
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/include/clang/AST/X86Builtins.def?rev=61330&r1=61329&r2=61330&view=diff

==============================================================================
--- cfe/trunk/include/clang/AST/X86Builtins.def (original)
+++ cfe/trunk/include/clang/AST/X86Builtins.def Mon Dec 22 01:08:03 2008
@@ -293,12 +293,7 @@
 BUILTIN(__builtin_ia32_movmskps, "iV4f", "")
 BUILTIN(__builtin_ia32_pmovmskb, "iV8c", "")
 BUILTIN(__builtin_ia32_movntps, "vf*V4f", "")
-// FIXME: the prototype for __builtin_ia32_movntq changed across different
-// versions of GCC.  Until we can replace GCC's xmmintrin.h, this is hacked to
-// be a vararg builtin instead of taking V1LLi like it should.  This loses some
-// type checking but makes us compatible with old version of GCC's xmmintrin.h
-// file.
-BUILTIN(__builtin_ia32_movntq, "vV1LLi*.", "")
+BUILTIN(__builtin_ia32_movntq, "vV1LLi*V1LLi", "")
 BUILTIN(__builtin_ia32_sfence, "v", "")
 BUILTIN(__builtin_ia32_psadbw, "V4sV8cV8c", "")
 BUILTIN(__builtin_ia32_rcpps, "V4fV4f", "")

Modified: cfe/trunk/lib/Headers/xmmintrin.devel.h
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Headers/xmmintrin.devel.h?rev=61330&r1=61329&r2=61330&view=diff

==============================================================================
--- cfe/trunk/lib/Headers/xmmintrin.devel.h (original)
+++ cfe/trunk/lib/Headers/xmmintrin.devel.h Mon Dec 22 01:08:03 2008
@@ -371,52 +371,9 @@
   return __builtin_ia32_cvtpi2ps(a, (__v2si)b);
 }
 
-static inline __m128 __attribute__((__always_inline__)) _mm_cvtpi16_ps(__m64 a)
-{
-  /* FIXME: Implement */
-  return (__m128){ 0, 0, 0, 0 };
-}
-
-static inline __m128 __attribute__((__always_inline__)) _mm_cvtpu16_ps(__m64 a)
-{
-  /* FIXME: Implement */
-  return (__m128){ 0, 0, 0, 0 };  
-}
-
-static inline __m128 __attribute__((__always_inline__)) _mm_cvtpi8_ps(__m64 a)
-{
-  /* FIXME: Implement */
-  return (__m128){ 0, 0, 0, 0 };  
-}
-
-static inline __m128 __attribute__((__always_inline__)) _mm_cvtpu8_ps(__m64 a)
-{
-  /* FIXME: Implement */
-  return (__m128){ 0, 0, 0, 0 };  
-}
-
-static inline __m128 __attribute__((__always_inline__)) _mm_cvtpi32x2_ps(__m64 a, __m64 b)
-{
-  /* FIXME: Implement */
-  return (__m128){ 0, 0, 0, 0 };  
-}
-
-static inline __m64 __attribute__((__always_inline__)) _mm_cvtps_pi16(__m128 a)
-{
-  /* FIXME: Implement */
-  return _mm_setzero_si64();
-}
-
-static inline __m64 __attribute__((__always_inline__)) _mm_cvtps_pi8(__m128 a)
-{
-  /* FIXME: Implement */
-  return _mm_setzero_si64();
-}
-
 static inline float __attribute__((__always_inline__)) _mm_cvtss_f32(__m128 a)
 {
-  /* FIXME: Implement */
-  return 0;
+  return a[0];
 }
 
 static inline __m128 __attribute__((__always_inline__)) _mm_loadh_pi(__m128 a, __m64 const *p)
@@ -651,6 +608,92 @@
   return __builtin_shufflevector(a, b, 0, 1, 4, 5);
 }
 
+static inline __m128 __attribute__((__always_inline__)) _mm_cvtpi16_ps(__m64 a)
+{
+  __m64 b, c;
+  __m128 r;
+
+  b = _mm_setzero_si64();
+  b = _mm_cmpgt_pi16(b, a);
+  c = _mm_unpackhi_pi16(a, b);  
+  r = _mm_setzero_ps();
+  r = _mm_cvtpi32_ps(r, c);
+  r = _mm_movelh_ps(r, r);
+  c = _mm_unpacklo_pi16(a, b);  
+  r = _mm_cvtpi32_ps(r, c);
+
+  return r;
+}
+
+static inline __m128 __attribute__((__always_inline__)) _mm_cvtpu16_ps(__m64 a)
+{
+  __m64 b, c;
+  __m128 r;
+
+  b = _mm_setzero_si64();
+  c = _mm_unpackhi_pi16(a, b);  
+  r = _mm_setzero_ps();
+  r = _mm_cvtpi32_ps(r, c);
+  r = _mm_movelh_ps(r, r);
+  c = _mm_unpacklo_pi16(a, b);  
+  r = _mm_cvtpi32_ps(r, c);
+
+  return r;
+}
+
+static inline __m128 __attribute__((__always_inline__)) _mm_cvtpi8_ps(__m64 a)
+{
+  __m64 b;
+  
+  b = _mm_setzero_si64();
+  b = _mm_cmpgt_pi8(b, a);
+  b = _mm_unpacklo_pi8(a, b);
+
+  return _mm_cvtpi16_ps(b);
+}
+
+static inline __m128 __attribute__((__always_inline__)) _mm_cvtpu8_ps(__m64 a)
+{
+  __m64 b;
+  
+  b = _mm_setzero_si64();
+  b = _mm_unpacklo_pi8(a, b);
+
+  return _mm_cvtpi16_ps(b);
+}
+
+static inline __m128 __attribute__((__always_inline__)) _mm_cvtpi32x2_ps(__m64 a, __m64 b)
+{
+  __m128 c;
+  
+  c = _mm_setzero_ps();  
+  c = _mm_cvtpi32_ps(c, b);
+  c = _mm_movelh_ps(c, c);
+
+  return _mm_cvtpi32_ps(c, a);
+}
+
+static inline __m64 __attribute__((__always_inline__)) _mm_cvtps_pi16(__m128 a)
+{
+  __m64 b, c;
+  
+  b = _mm_cvtps_pi32(a);
+  a = _mm_movehl_ps(a, a);
+  c = _mm_cvtps_pi32(a);
+  
+  return _mm_packs_pi16(b, c);
+}
+
+static inline __m64 __attribute__((__always_inline__)) _mm_cvtps_pi8(__m128 a)
+{
+  __m64 b, c;
+  
+  b = _mm_cvtps_pi16(a);
+  c = _mm_setzero_si64();
+  
+  return _mm_packs_pi16(b, c);
+}
+
 static inline int __attribute__((__always_inline__)) _mm_movemask_ps(__m128 a)
 {
   return __builtin_ia32_movmskps(a);





More information about the cfe-commits mailing list