[clang] 070e402 - [PowerPC][Altivec] Emit correct builtin for single precision vec_all_ne

Nemanja Ivanovic via cfe-commits cfe-commits at lists.llvm.org
Thu Nov 7 18:41:55 PST 2019


Author: Nemanja Ivanovic
Date: 2019-11-07T20:40:32-06:00
New Revision: 070e4027b02453f0962e5b61335a517581c5528f

URL: https://github.com/llvm/llvm-project/commit/070e4027b02453f0962e5b61335a517581c5528f
DIFF: https://github.com/llvm/llvm-project/commit/070e4027b02453f0962e5b61335a517581c5528f.diff

LOG: [PowerPC][Altivec] Emit correct builtin for single precision vec_all_ne

We currently emit a double precision comparison instruction for this, whereas we
need to emit the single precision version.

Differential revision: https://reviews.llvm.org/D64024

Added: 
    

Modified: 
    clang/lib/Headers/altivec.h
    clang/test/CodeGen/builtins-ppc-p8vector.c

Removed: 
    


################################################################################
diff  --git a/clang/lib/Headers/altivec.h b/clang/lib/Headers/altivec.h
index 8352f8f740c2..77a0e494df36 100644
--- a/clang/lib/Headers/altivec.h
+++ b/clang/lib/Headers/altivec.h
@@ -14784,7 +14784,7 @@ static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool long long __a,
 static __inline__ int __ATTRS_o_ai vec_all_ne(vector float __a,
                                               vector float __b) {
 #ifdef __VSX__
-  return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ, (vector double)__a, (vector double)__b);
+  return __builtin_vsx_xvcmpeqsp_p(__CR6_EQ, __a, __b);
 #else
   return __builtin_altivec_vcmpeqfp_p(__CR6_EQ, __a, __b);
 #endif

diff  --git a/clang/test/CodeGen/builtins-ppc-p8vector.c b/clang/test/CodeGen/builtins-ppc-p8vector.c
index a686b0a0796b..d494e463105e 100644
--- a/clang/test/CodeGen/builtins-ppc-p8vector.c
+++ b/clang/test/CodeGen/builtins-ppc-p8vector.c
@@ -469,6 +469,10 @@ void test1() {
 // CHECK: @llvm.ppc.vsx.xvcmpeqdp.p
 // CHECK-LE: @llvm.ppc.vsx.xvcmpeqdp.p
 
+  res_i = vec_all_eq(vfa, vfa);
+// CHECK: @llvm.ppc.vsx.xvcmpeqsp.p
+// CHECK-LE: @llvm.ppc.vsx.xvcmpeqsp.p
+
   /* vec_all_ne */
   res_i = vec_all_ne(vsll, vsll);
 // CHECK: @llvm.ppc.altivec.vcmpequd.p
@@ -515,6 +519,13 @@ void test1() {
   dummy();
 // CHECK: @dummy
 
+  res_i = vec_all_ne(vfa, vfa);
+// CHECK: @llvm.ppc.vsx.xvcmpeqsp.p
+// CHECK-LE: @llvm.ppc.vsx.xvcmpeqsp.p
+
+  dummy();
+// CHECK: @dummy
+
   res_i = vec_all_nge(vda, vda);
 // CHECK: @llvm.ppc.vsx.xvcmpgedp.p
 // CHECK-LE: @llvm.ppc.vsx.xvcmpgedp.p
@@ -563,6 +574,10 @@ void test1() {
 // CHECK: @llvm.ppc.vsx.xvcmpeqdp.p
 // CHECK-LE: @llvm.ppc.vsx.xvcmpeqdp.p
 
+  res_i = vec_any_eq(vfa, vfa);
+// CHECK: @llvm.ppc.vsx.xvcmpeqsp.p
+// CHECK-LE: @llvm.ppc.vsx.xvcmpeqsp.p
+
   /* vec_any_ne */
   res_i = vec_any_ne(vsll, vsll);
 // CHECK: @llvm.ppc.altivec.vcmpequd.p
@@ -603,6 +618,10 @@ void test1() {
 // CHECK: @llvm.ppc.vsx.xvcmpeqdp.p
 // CHECK-LE: @llvm.ppc.vsx.xvcmpeqdp.p
 
+  res_i = vec_any_ne(vfa, vfa);
+// CHECK: @llvm.ppc.vsx.xvcmpeqsp.p
+// CHECK-LE: @llvm.ppc.vsx.xvcmpeqsp.p
+
   /* vec_all_ge */
   res_i = vec_all_ge(vsll, vsll);
 // CHECK: @llvm.ppc.altivec.vcmpgtsd.p
@@ -643,6 +662,10 @@ void test1() {
 // CHECK: @llvm.ppc.vsx.xvcmpgedp.p
 // CHECK-LE: @llvm.ppc.vsx.xvcmpgedp.p
 
+  res_i = vec_all_ge(vfa, vfa);
+// CHECK: @llvm.ppc.vsx.xvcmpgesp.p
+// CHECK-LE: @llvm.ppc.vsx.xvcmpgesp.p
+
   /* vec_all_gt */
   res_i = vec_all_gt(vsll, vsll);
 // CHECK: @llvm.ppc.altivec.vcmpgtsd.p
@@ -683,6 +706,10 @@ void test1() {
 // CHECK: @llvm.ppc.vsx.xvcmpgtdp.p
 // CHECK-LE: @llvm.ppc.vsx.xvcmpgtdp.p
 
+  res_i = vec_all_gt(vfa, vfa);
+// CHECK: @llvm.ppc.vsx.xvcmpgtsp.p
+// CHECK-LE: @llvm.ppc.vsx.xvcmpgtsp.p
+
   /* vec_all_le */
   res_i = vec_all_le(vsll, vsll);
 // CHECK: @llvm.ppc.altivec.vcmpgtsd.p
@@ -723,6 +750,10 @@ void test1() {
 // CHECK: @llvm.ppc.vsx.xvcmpgedp.p
 // CHECK-LE: @llvm.ppc.vsx.xvcmpgedp.p
 
+  res_i = vec_all_le(vfa, vfa);
+// CHECK: @llvm.ppc.vsx.xvcmpgesp.p
+// CHECK-LE: @llvm.ppc.vsx.xvcmpgesp.p
+
   /* vec_all_lt */
   res_i = vec_all_lt(vsll, vsll);
 // CHECK: @llvm.ppc.altivec.vcmpgtsd.p
@@ -763,10 +794,18 @@ void test1() {
 // CHECK: @llvm.ppc.vsx.xvcmpgtdp.p
 // CHECK-LE: @llvm.ppc.vsx.xvcmpgtdp.p
 
+  res_i = vec_all_lt(vfa, vfa);
+// CHECK: @llvm.ppc.vsx.xvcmpgtsp.p
+// CHECK-LE: @llvm.ppc.vsx.xvcmpgtsp.p
+
   res_i = vec_all_nan(vda);
 // CHECK: @llvm.ppc.vsx.xvcmpeqdp.p
 // CHECK-LE: @llvm.ppc.vsx.xvcmpeqdp.p
 
+  res_i = vec_all_nan(vfa);
+// CHECK: @llvm.ppc.vsx.xvcmpeqsp.p
+// CHECK-LE: @llvm.ppc.vsx.xvcmpeqsp.p
+
   /* vec_any_ge */
   res_i = vec_any_ge(vsll, vsll);
 // CHECK: @llvm.ppc.altivec.vcmpgtsd.p
@@ -807,6 +846,10 @@ void test1() {
 // CHECK: @llvm.ppc.vsx.xvcmpgedp.p
 // CHECK-LE: @llvm.ppc.vsx.xvcmpgedp.p
 
+  res_i = vec_any_ge(vfa, vfa);
+// CHECK: @llvm.ppc.vsx.xvcmpgesp.p
+// CHECK-LE: @llvm.ppc.vsx.xvcmpgesp.p
+
   /* vec_any_gt */
   res_i = vec_any_gt(vsll, vsll);
 // CHECK: @llvm.ppc.altivec.vcmpgtsd.p
@@ -887,6 +930,10 @@ void test1() {
 // CHECK: @llvm.ppc.vsx.xvcmpgedp.p
 // CHECK-LE: @llvm.ppc.vsx.xvcmpgedp.p
 
+  res_i = vec_any_le(vfa, vfa);
+// CHECK: @llvm.ppc.vsx.xvcmpgesp.p
+// CHECK-LE: @llvm.ppc.vsx.xvcmpgesp.p
+
   /* vec_any_lt */
   res_i = vec_any_lt(vsll, vsll);
 // CHECK: @llvm.ppc.altivec.vcmpgtsd.p
@@ -927,6 +974,10 @@ void test1() {
 // CHECK: @llvm.ppc.vsx.xvcmpgtdp.p
 // CHECK-LE: @llvm.ppc.vsx.xvcmpgtdp.p
 
+  res_i = vec_any_lt(vfa, vfa);
+// CHECK: @llvm.ppc.vsx.xvcmpgtsp.p
+// CHECK-LE: @llvm.ppc.vsx.xvcmpgtsp.p
+
   /* vec_max */
   res_vsll = vec_max(vsll, vsll);
 // CHECK: @llvm.ppc.altivec.vmaxsd
@@ -1309,6 +1360,12 @@ void test1() {
 // CHECK-LE: [[T1:%.+]] = and <2 x i64>
 // CHECK-LE: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
 
+  res_vf = vec_nand(vfa, vfa);
+// CHECK: [[T1:%.+]] = and <4 x i32>
+// CHECK: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
+// CHECK-LE: [[T1:%.+]] = and <4 x i32>
+// CHECK-LE: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
+
   /* vec_orc */
   res_vsc = vec_orc(vsc, vsc);
 // CHECK: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>


        


More information about the cfe-commits mailing list