r346471 - [PowerPC] [Clang] [AltiVec] The second parameter of vec_sr function should be modulo the number of bits in the element

Zi Xuan Wu via cfe-commits cfe-commits at lists.llvm.org
Thu Nov 8 19:35:32 PST 2018


Author: wuzish
Date: Thu Nov  8 19:35:32 2018
New Revision: 346471

URL: http://llvm.org/viewvc/llvm-project?rev=346471&view=rev
Log:
[PowerPC] [Clang] [AltiVec] The second parameter of vec_sr function should be modulo the number of bits in the element

The second parameter of vec_sr function is representing shift bits and it should be modulo the number of bits in the element like what vec_sl does now. 
This is actually required by the ABI:

Each element of the result vector is the result of logically right shifting the corresponding
element of ARG1 by the number of bits specified by the value of the corresponding
element of ARG2, modulo the number of bits in the element. The bits that are shifted out
are replaced by zeros.

Differential Revision: https://reviews.llvm.org/D54087


Modified:
    cfe/trunk/lib/Headers/altivec.h
    cfe/trunk/test/CodeGen/builtins-ppc-altivec.c
    cfe/trunk/test/CodeGen/builtins-ppc-p8vector.c

Modified: cfe/trunk/lib/Headers/altivec.h
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Headers/altivec.h?rev=346471&r1=346470&r2=346471&view=diff
==============================================================================
--- cfe/trunk/lib/Headers/altivec.h (original)
+++ cfe/trunk/lib/Headers/altivec.h Thu Nov  8 19:35:32 2018
@@ -9492,49 +9492,51 @@ vec_splat_u32(signed char __a) {
 
 /* vec_sr */
 
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sr(vector signed char __a, vector unsigned char __b) {
-  vector unsigned char __res = (vector unsigned char)__a >> __b;
-  return (vector signed char)__res;
-}
-
+// vec_sr does modulo arithmetic on __b first, so __b is allowed to be more
+// than the length of __a.
 static __inline__ vector unsigned char __ATTRS_o_ai
 vec_sr(vector unsigned char __a, vector unsigned char __b) {
-  return __a >> __b;
+  return __a >>
+         (__b % (vector unsigned char)(sizeof(unsigned char) * __CHAR_BIT__));
 }
 
-static __inline__ vector signed short __ATTRS_o_ai
-vec_sr(vector signed short __a, vector unsigned short __b) {
-  vector unsigned short __res = (vector unsigned short)__a >> __b;
-  return (vector signed short)__res;
+static __inline__ vector signed char __ATTRS_o_ai
+vec_sr(vector signed char __a, vector unsigned char __b) {
+  return (vector signed char)vec_sr((vector unsigned char)__a, __b);
 }
 
 static __inline__ vector unsigned short __ATTRS_o_ai
 vec_sr(vector unsigned short __a, vector unsigned short __b) {
-  return __a >> __b;
+  return __a >>
+         (__b % (vector unsigned short)(sizeof(unsigned short) * __CHAR_BIT__));
 }
 
-static __inline__ vector signed int __ATTRS_o_ai
-vec_sr(vector signed int __a, vector unsigned int __b) {
-  vector unsigned int __res = (vector unsigned int)__a >> __b;
-  return (vector signed int)__res;
+static __inline__ vector short __ATTRS_o_ai vec_sr(vector short __a,
+                                                   vector unsigned short __b) {
+  return (vector short)vec_sr((vector unsigned short)__a, __b);
 }
 
 static __inline__ vector unsigned int __ATTRS_o_ai
 vec_sr(vector unsigned int __a, vector unsigned int __b) {
-  return __a >> __b;
+  return __a >>
+         (__b % (vector unsigned int)(sizeof(unsigned int) * __CHAR_BIT__));
 }
 
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_sr(vector signed long long __a, vector unsigned long long __b) {
-  vector unsigned long long __res = (vector unsigned long long)__a >> __b;
-  return (vector signed long long)__res;
+static __inline__ vector int __ATTRS_o_ai vec_sr(vector int __a,
+                                                 vector unsigned int __b) {
+  return (vector int)vec_sr((vector unsigned int)__a, __b);
 }
 
+#ifdef __POWER8_VECTOR__
 static __inline__ vector unsigned long long __ATTRS_o_ai
 vec_sr(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a >> __b;
+  return __a >> (__b % (vector unsigned long long)(sizeof(unsigned long long) *
+                                                   __CHAR_BIT__));
+}
+
+static __inline__ vector long long __ATTRS_o_ai
+vec_sr(vector long long __a, vector unsigned long long __b) {
+  return (vector long long)vec_sr((vector unsigned long long)__a, __b);
 }
 #endif
 
@@ -9544,12 +9546,12 @@ vec_sr(vector unsigned long long __a, ve
 
 static __inline__ vector signed char __ATTRS_o_ai
 vec_vsrb(vector signed char __a, vector unsigned char __b) {
-  return __a >> (vector signed char)__b;
+  return vec_sr(__a, __b);
 }
 
 static __inline__ vector unsigned char __ATTRS_o_ai
 vec_vsrb(vector unsigned char __a, vector unsigned char __b) {
-  return __a >> __b;
+  return vec_sr(__a, __b);
 }
 
 /* vec_vsrh */
@@ -9558,12 +9560,12 @@ vec_vsrb(vector unsigned char __a, vecto
 
 static __inline__ vector short __ATTRS_o_ai
 vec_vsrh(vector short __a, vector unsigned short __b) {
-  return __a >> (vector short)__b;
+  return vec_sr(__a, __b);
 }
 
 static __inline__ vector unsigned short __ATTRS_o_ai
 vec_vsrh(vector unsigned short __a, vector unsigned short __b) {
-  return __a >> __b;
+  return vec_sr(__a, __b);
 }
 
 /* vec_vsrw */
@@ -9572,12 +9574,12 @@ vec_vsrh(vector unsigned short __a, vect
 
 static __inline__ vector int __ATTRS_o_ai vec_vsrw(vector int __a,
                                                    vector unsigned int __b) {
-  return __a >> (vector int)__b;
+  return vec_sr(__a, __b);
 }
 
 static __inline__ vector unsigned int __ATTRS_o_ai
 vec_vsrw(vector unsigned int __a, vector unsigned int __b) {
-  return __a >> __b;
+  return vec_sr(__a, __b);
 }
 
 /* vec_sra */

Modified: cfe/trunk/test/CodeGen/builtins-ppc-altivec.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/builtins-ppc-altivec.c?rev=346471&r1=346470&r2=346471&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/builtins-ppc-altivec.c (original)
+++ cfe/trunk/test/CodeGen/builtins-ppc-altivec.c Thu Nov  8 19:35:32 2018
@@ -4256,52 +4256,76 @@ void test6() {
 
   /* vec_sr */
   res_vsc = vec_sr(vsc, vuc);
-// CHECK: lshr <16 x i8>
-// CHECK-LE: lshr <16 x i8>
+// CHECK: [[UREM:[0-9a-zA-Z%.]+]] = urem <16 x i8> {{[0-9a-zA-Z%.]+}}, <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
+// CHECK: lshr <16 x i8> {{[0-9a-zA-Z%.]+}}, [[UREM]]
+// CHECK-LE: [[UREM:[0-9a-zA-Z%.]+]] = urem <16 x i8> {{[0-9a-zA-Z%.]+}}, <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
+// CHECK-LE: lshr <16 x i8> {{[0-9a-zA-Z%.]+}}, [[UREM]]
 
   res_vuc = vec_sr(vuc, vuc);
-// CHECK: lshr <16 x i8>
-// CHECK-LE: lshr <16 x i8>
+// CHECK: [[UREM:[0-9a-zA-Z%.]+]] = urem <16 x i8> {{[0-9a-zA-Z%.]+}}, <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
+// CHECK: lshr <16 x i8> {{[0-9a-zA-Z%.]+}}, [[UREM]]
+// CHECK-LE: [[UREM:[0-9a-zA-Z%.]+]] = urem <16 x i8> {{[0-9a-zA-Z%.]+}}, <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
+// CHECK-LE: lshr <16 x i8> {{[0-9a-zA-Z%.]+}}, [[UREM]]
 
   res_vs  = vec_sr(vs, vus);
-// CHECK: lshr <8 x i16>
-// CHECK-LE: lshr <8 x i16>
+// CHECK: [[UREM:[0-9a-zA-Z%.]+]] = urem <8 x i16> {{[0-9a-zA-Z%.]+}}, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
+// CHECK: lshr <8 x i16> {{[0-9a-zA-Z%.]+}}, [[UREM]]
+// CHECK-LE: [[UREM:[0-9a-zA-Z%.]+]] = urem <8 x i16> {{[0-9a-zA-Z%.]+}}, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
+// CHECK-LE: lshr <8 x i16> {{[0-9a-zA-Z%.]+}}, [[UREM]]
 
   res_vus = vec_sr(vus, vus);
-// CHECK: lshr <8 x i16>
-// CHECK-LE: lshr <8 x i16>
+// CHECK: [[UREM:[0-9a-zA-Z%.]+]] = urem <8 x i16> {{[0-9a-zA-Z%.]+}}, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
+// CHECK: lshr <8 x i16> {{[0-9a-zA-Z%.]+}}, [[UREM]]
+// CHECK-LE: [[UREM:[0-9a-zA-Z%.]+]] = urem <8 x i16> {{[0-9a-zA-Z%.]+}}, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
+// CHECK-LE: lshr <8 x i16> {{[0-9a-zA-Z%.]+}}, [[UREM]]
 
   res_vi  = vec_sr(vi, vui);
-// CHECK: lshr <4 x i32>
-// CHECK-LE: lshr <4 x i32>
+// CHECK: [[UREM:[0-9a-zA-Z%.]+]] = urem <4 x i32> {{[0-9a-zA-Z%.]+}}, <i32 32, i32 32, i32 32, i32 32>
+// CHECK: lshr <4 x i32> {{[0-9a-zA-Z%.]+}}, [[UREM]]
+// CHECK-LE: [[UREM:[0-9a-zA-Z%.]+]] = urem <4 x i32> {{[0-9a-zA-Z%.]+}}, <i32 32, i32 32, i32 32, i32 32>
+// CHECK-LE: lshr <4 x i32> {{[0-9a-zA-Z%.]+}}, [[UREM]]
 
   res_vui = vec_sr(vui, vui);
-// CHECK: lshr <4 x i32>
-// CHECK-LE: lshr <4 x i32>
+// CHECK: [[UREM:[0-9a-zA-Z%.]+]] = urem <4 x i32> {{[0-9a-zA-Z%.]+}}, <i32 32, i32 32, i32 32, i32 32>
+// CHECK: lshr <4 x i32> {{[0-9a-zA-Z%.]+}}, [[UREM]]
+// CHECK-LE: [[UREM:[0-9a-zA-Z%.]+]] = urem <4 x i32> {{[0-9a-zA-Z%.]+}}, <i32 32, i32 32, i32 32, i32 32>
+// CHECK-LE: lshr <4 x i32> {{[0-9a-zA-Z%.]+}}, [[UREM]]
 
   res_vsc = vec_vsrb(vsc, vuc);
-// CHECK: shr <16 x i8>
-// CHECK-LE: shr <16 x i8>
+// CHECK: [[UREM:[0-9a-zA-Z%.]+]] = urem <16 x i8> {{[0-9a-zA-Z%.]+}}, <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
+// CHECK: lshr <16 x i8> {{[0-9a-zA-Z%.]+}}, [[UREM]]
+// CHECK-LE: [[UREM:[0-9a-zA-Z%.]+]] = urem <16 x i8> {{[0-9a-zA-Z%.]+}}, <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
+// CHECK-LE: lshr <16 x i8> {{[0-9a-zA-Z%.]+}}, [[UREM]]
 
   res_vuc = vec_vsrb(vuc, vuc);
-// CHECK: shr <16 x i8>
-// CHECK-LE: shr <16 x i8>
+// CHECK: [[UREM:[0-9a-zA-Z%.]+]] = urem <16 x i8> {{[0-9a-zA-Z%.]+}}, <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
+// CHECK: lshr <16 x i8> {{[0-9a-zA-Z%.]+}}, [[UREM]]
+// CHECK-LE: [[UREM:[0-9a-zA-Z%.]+]] = urem <16 x i8> {{[0-9a-zA-Z%.]+}}, <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
+// CHECK-LE: lshr <16 x i8> {{[0-9a-zA-Z%.]+}}, [[UREM]]
 
   res_vs  = vec_vsrh(vs, vus);
-// CHECK: shr <8 x i16>
-// CHECK-LE: shr <8 x i16>
+// CHECK: [[UREM:[0-9a-zA-Z%.]+]] = urem <8 x i16> {{[0-9a-zA-Z%.]+}}, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
+// CHECK: lshr <8 x i16> {{[0-9a-zA-Z%.]+}}, [[UREM]]
+// CHECK-LE: [[UREM:[0-9a-zA-Z%.]+]] = urem <8 x i16> {{[0-9a-zA-Z%.]+}}, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
+// CHECK-LE: lshr <8 x i16> {{[0-9a-zA-Z%.]+}}, [[UREM]]
 
   res_vus = vec_vsrh(vus, vus);
-// CHECK: shr <8 x i16>
-// CHECK-LE: shr <8 x i16>
+// CHECK: [[UREM:[0-9a-zA-Z%.]+]] = urem <8 x i16> {{[0-9a-zA-Z%.]+}}, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
+// CHECK: lshr <8 x i16> {{[0-9a-zA-Z%.]+}}, [[UREM]]
+// CHECK-LE: [[UREM:[0-9a-zA-Z%.]+]] = urem <8 x i16> {{[0-9a-zA-Z%.]+}}, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
+// CHECK-LE: lshr <8 x i16> {{[0-9a-zA-Z%.]+}}, [[UREM]]
 
   res_vi  = vec_vsrw(vi, vui);
-// CHECK: shr <4 x i32>
-// CHECK-LE: shr <4 x i32>
+// CHECK: [[UREM:[0-9a-zA-Z%.]+]] = urem <4 x i32> {{[0-9a-zA-Z%.]+}}, <i32 32, i32 32, i32 32, i32 32>
+// CHECK: lshr <4 x i32> {{[0-9a-zA-Z%.]+}}, [[UREM]]
+// CHECK-LE: [[UREM:[0-9a-zA-Z%.]+]] = urem <4 x i32> {{[0-9a-zA-Z%.]+}}, <i32 32, i32 32, i32 32, i32 32>
+// CHECK-LE: lshr <4 x i32> {{[0-9a-zA-Z%.]+}}, [[UREM]]
 
   res_vui = vec_vsrw(vui, vui);
-// CHECK: shr <4 x i32>
-// CHECK-LE: shr <4 x i32>
+// CHECK: [[UREM:[0-9a-zA-Z%.]+]] = urem <4 x i32> {{[0-9a-zA-Z%.]+}}, <i32 32, i32 32, i32 32, i32 32>
+// CHECK: lshr <4 x i32> {{[0-9a-zA-Z%.]+}}, [[UREM]]
+// CHECK-LE: [[UREM:[0-9a-zA-Z%.]+]] = urem <4 x i32> {{[0-9a-zA-Z%.]+}}, <i32 32, i32 32, i32 32, i32 32>
+// CHECK-LE: lshr <4 x i32> {{[0-9a-zA-Z%.]+}}, [[UREM]]
 
   /* vec_sra */
   res_vsc = vec_sra(vsc, vuc);

Modified: cfe/trunk/test/CodeGen/builtins-ppc-p8vector.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/builtins-ppc-p8vector.c?rev=346471&r1=346470&r2=346471&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/builtins-ppc-p8vector.c (original)
+++ cfe/trunk/test/CodeGen/builtins-ppc-p8vector.c Thu Nov  8 19:35:32 2018
@@ -1066,13 +1066,17 @@ void test1() {
 
   /* vec_sr */
   res_vsll = vec_sr(vsll, vull);
-// CHECK: lshr <2 x i64>
-// CHECK-LE: lshr <2 x i64>
+// CHECK: [[UREM:[0-9a-zA-Z%.]+]] = urem <2 x i64> {{[0-9a-zA-Z%.]+}}, <i64 64, i64 64>
+// CHECK: lshr <2 x i64> {{[0-9a-zA-Z%.]+}}, [[UREM]]
+// CHECK-LE: [[UREM:[0-9a-zA-Z%.]+]] = urem <2 x i64> {{[0-9a-zA-Z%.]+}}, <i64 64, i64 64>
+// CHECK-LE: lshr <2 x i64> {{[0-9a-zA-Z%.]+}}, [[UREM]]
 // CHECK-PPC: error: call to 'vec_sr' is ambiguous
 
   res_vull = vec_sr(vull, vull);
-// CHECK: lshr <2 x i64>
-// CHECK-LE: lshr <2 x i64>
+// CHECK: [[UREM:[0-9a-zA-Z%.]+]] = urem <2 x i64> {{[0-9a-zA-Z%.]+}}, <i64 64, i64 64>
+// CHECK: lshr <2 x i64> {{[0-9a-zA-Z%.]+}}, [[UREM]]
+// CHECK-LE: [[UREM:[0-9a-zA-Z%.]+]] = urem <2 x i64> {{[0-9a-zA-Z%.]+}}, <i64 64, i64 64>
+// CHECK-LE: lshr <2 x i64> {{[0-9a-zA-Z%.]+}}, [[UREM]]
 // CHECK-PPC: error: call to 'vec_sr' is ambiguous
 
   /* vec_sra */




More information about the cfe-commits mailing list