r219137 - [PATCH][Power] Fix (and deprecate) vec_lvsl and vec_lvsr for little endian

Bill Schmidt wschmidt at linux.vnet.ibm.com
Mon Oct 6 12:02:20 PDT 2014


Author: wschmidt
Date: Mon Oct  6 14:02:20 2014
New Revision: 219137

URL: http://llvm.org/viewvc/llvm-project?rev=219137&view=rev
Log:
[PATCH][Power] Fix (and deprecate) vec_lvsl and vec_lvsr for little endian

The use of the vec_lvsl and vec_lvsr interfaces are discouraged for
little endian targets since Power8 hardware is a minimum requirement,
and Power8 provides reasonable performance for unaligned vector loads
and stores.  Up till now we have not provided "correct" (i.e., big-
endian-compatible) code generation for these interfaces, as to do so
produces poorly performing code.  However, this has become the source
of too many questions.

With this patch, LLVM will now produce compatible code for these
interfaces, but will also produce a deprecation warning message for
PPC64LE when one of them is used.  This should make the porting direction
clearer to programmers.  A similar patch has recently been committed to
GCC.

This patch includes a test for the warning message.  There is a companion
patch that adds two unit tests to projects/test-suite.

Added:
    cfe/trunk/test/Headers/altivec-intrin.c
Modified:
    cfe/trunk/lib/Headers/altivec.h

Modified: cfe/trunk/lib/Headers/altivec.h
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Headers/altivec.h?rev=219137&r1=219136&r2=219137&view=diff
==============================================================================
--- cfe/trunk/lib/Headers/altivec.h (original)
+++ cfe/trunk/lib/Headers/altivec.h Mon Oct  6 14:02:20 2014
@@ -2253,91 +2253,273 @@ vec_vlogefp(vector float __a)
 
 /* vec_lvsl */
 
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsl(int __a, const signed char *__b)
+{
+  vector unsigned char mask = 
+    (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+  vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+  return vec_perm(mask, mask, reverse);
+}
+#else
 static vector unsigned char __ATTRS_o_ai
 vec_lvsl(int __a, const signed char *__b)
 {
   return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
 }
+#endif
 
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsl(int __a, const unsigned char *__b)
+{
+  vector unsigned char mask =
+    (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+  vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+  return vec_perm(mask, mask, reverse);
+}
+#else
 static vector unsigned char __ATTRS_o_ai
 vec_lvsl(int __a, const unsigned char *__b)
 {
   return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
 }
+#endif
 
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsl(int __a, const short *__b)
+{
+  vector unsigned char mask =
+    (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+  vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+  return vec_perm(mask, mask, reverse);
+}
+#else
 static vector unsigned char __ATTRS_o_ai
 vec_lvsl(int __a, const short *__b)
 {
   return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
 }
+#endif
 
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsl(int __a, const unsigned short *__b)
+{
+  vector unsigned char mask =
+    (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+  vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+  return vec_perm(mask, mask, reverse);
+}
+#else
 static vector unsigned char __ATTRS_o_ai
 vec_lvsl(int __a, const unsigned short *__b)
 {
   return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
 }
+#endif
 
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsl(int __a, const int *__b)
+{
+  vector unsigned char mask =
+    (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+  vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+  return vec_perm(mask, mask, reverse);
+}
+#else
 static vector unsigned char __ATTRS_o_ai
 vec_lvsl(int __a, const int *__b)
 {
   return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
 }
+#endif
 
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsl(int __a, const unsigned int *__b)
+{
+  vector unsigned char mask =
+    (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+  vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+  return vec_perm(mask, mask, reverse);
+}
+#else
 static vector unsigned char __ATTRS_o_ai
 vec_lvsl(int __a, const unsigned int *__b)
 {
   return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
 }
+#endif
 
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsl(int __a, const float *__b)
+{
+  vector unsigned char mask =
+    (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+  vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+  return vec_perm(mask, mask, reverse);
+}
+#else
 static vector unsigned char __ATTRS_o_ai
 vec_lvsl(int __a, const float *__b)
 {
   return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
 }
+#endif
 
 /* vec_lvsr */
 
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsr(int __a, const signed char *__b)
+{
+  vector unsigned char mask =
+    (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+  vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+  return vec_perm(mask, mask, reverse);
+}
+#else
 static vector unsigned char __ATTRS_o_ai
 vec_lvsr(int __a, const signed char *__b)
 {
   return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
 }
+#endif
 
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsr(int __a, const unsigned char *__b)
+{
+  vector unsigned char mask =
+    (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+  vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+  return vec_perm(mask, mask, reverse);
+}
+#else
 static vector unsigned char __ATTRS_o_ai
 vec_lvsr(int __a, const unsigned char *__b)
 {
   return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
 }
+#endif
 
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsr(int __a, const short *__b)
+{
+  vector unsigned char mask =
+    (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+  vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+  return vec_perm(mask, mask, reverse);
+}
+#else
 static vector unsigned char __ATTRS_o_ai
 vec_lvsr(int __a, const short *__b)
 {
   return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
 }
+#endif
 
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsr(int __a, const unsigned short *__b)
+{
+  vector unsigned char mask =
+    (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+  vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+  return vec_perm(mask, mask, reverse);
+}
+#else
 static vector unsigned char __ATTRS_o_ai
 vec_lvsr(int __a, const unsigned short *__b)
 {
   return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
 }
+#endif
 
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsr(int __a, const int *__b)
+{
+  vector unsigned char mask =
+    (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+  vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+  return vec_perm(mask, mask, reverse);
+}
+#else
 static vector unsigned char __ATTRS_o_ai
 vec_lvsr(int __a, const int *__b)
 {
   return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
 }
+#endif
 
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsr(int __a, const unsigned int *__b)
+{
+  vector unsigned char mask =
+    (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+  vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+  return vec_perm(mask, mask, reverse);
+}
+#else
 static vector unsigned char __ATTRS_o_ai
 vec_lvsr(int __a, const unsigned int *__b)
 {
   return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
 }
+#endif
 
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsr(int __a, const float *__b)
+{
+  vector unsigned char mask =
+    (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+  vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+  return vec_perm(mask, mask, reverse);
+}
+#else
 static vector unsigned char __ATTRS_o_ai
 vec_lvsr(int __a, const float *__b)
 {
   return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
 }
+#endif
 
 /* vec_madd */
 

Added: cfe/trunk/test/Headers/altivec-intrin.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/Headers/altivec-intrin.c?rev=219137&view=auto
==============================================================================
--- cfe/trunk/test/Headers/altivec-intrin.c (added)
+++ cfe/trunk/test/Headers/altivec-intrin.c Mon Oct  6 14:02:20 2014
@@ -0,0 +1,18 @@
+// RUN: %clang_cc1 -triple powerpc64le-unknown-linux-gnu -target-cpu power8 \
+// RUN:     -faltivec -verify %s
+
+// Test special behavior of Altivec intrinsics in this file.
+
+#include <altivec.h>
+
+__attribute__((__aligned__(16))) float x[20];
+
+int main()
+{
+  vector unsigned char l = vec_lvsl (0, &x[1]); // expected-warning {{is deprecated: use assignment for unaligned little endian loads/stores}}
+  vector unsigned char r = vec_lvsr (0, &x[1]); // expected-warning {{is deprecated: use assignment for unaligned little endian loads/stores}}
+}
+// FIXME: As noted in ms-intrin.cpp, it would be nice if we didn't have to
+// hard-code the line number from altivec.h here.
+// expected-note at altivec.h:2374 {{deprecated here}}
+// expected-note at altivec.h:2509 {{deprecated here}}





More information about the cfe-commits mailing list