r241904 - Add missing builtins to altivec.h for ABI compliance (vol. 3)

Nemanja Ivanovic nemanja.i.ibm at gmail.com
Fri Jul 10 06:11:34 PDT 2015


Author: nemanjai
Date: Fri Jul 10 08:11:34 2015
New Revision: 241904

URL: http://llvm.org/viewvc/llvm-project?rev=241904&view=rev
Log:
Add missing builtins to altivec.h for ABI compliance (vol. 3)

This patch corresponds to review:
http://reviews.llvm.org/D10972

Fix for the handling of dependent features that are enabled by default
on some CPU's (such as -mvsx, -mpower8-vector).

Also provides a number of new interfaces or fixes existing ones in
altivec.h.

Changed signatures to conform to ABI:
vector short vec_perm(vector signed short, vector signed short, vector unsigned char)
vector int vec_perm(vector signed int, vector signed int, vector unsigned char)
vector long long vec_perm(vector signed long long, vector signed long long, vector unsigned char)
vector signed char vec_sld(vector signed char, vector signed char, const int)
vector unsigned char vec_sld(vector unsigned char, vector unsigned char, const int)
vector bool char vec_sld(vector bool char, vector bool char, const int)
vector unsigned short vec_sld(vector unsigned short, vector unsigned short, const int)
vector signed short vec_sld(vector signed short, vector signed short, const int)
vector signed int vec_sld(vector signed int, vector signed int, const int)
vector unsigned int vec_sld(vector unsigned int, vector unsigned int, const int)
vector float vec_sld(vector float, vector float, const int)
vector signed char vec_splat(vector signed char, const int)
vector unsigned char vec_splat(vector unsigned char, const int)
vector bool char vec_splat(vector bool char, const int)
vector signed short vec_splat(vector signed short, const int)
vector unsigned short vec_splat(vector unsigned short, const int)
vector bool short vec_splat(vector bool short, const int)
vector pixel vec_splat(vector pixel, const int)
vector signed int vec_splat(vector signed int, const int)
vector unsigned int vec_splat(vector unsigned int, const int)
vector bool int vec_splat(vector bool int, const int)
vector float vec_splat(vector float, const int)

Added a VSX path to:
vector float vec_round(vector float)

Added interfaces:
vector signed char vec_eqv(vector signed char, vector signed char)
vector signed char vec_eqv(vector bool char, vector signed char)
vector signed char vec_eqv(vector signed char, vector bool char)
vector unsigned char vec_eqv(vector unsigned char, vector unsigned char)
vector unsigned char vec_eqv(vector bool char, vector unsigned char)
vector unsigned char vec_eqv(vector unsigned char, vector bool char)
vector signed short vec_eqv(vector signed short, vector signed short)
vector signed short vec_eqv(vector bool short, vector signed short)
vector signed short vec_eqv(vector signed short, vector bool short)
vector unsigned short vec_eqv(vector unsigned short, vector unsigned short)
vector unsigned short vec_eqv(vector bool short, vector unsigned short)
vector unsigned short vec_eqv(vector unsigned short, vector bool short)
vector signed int vec_eqv(vector signed int, vector signed int)
vector signed int vec_eqv(vector bool int, vector signed int)
vector signed int vec_eqv(vector signed int, vector bool int)
vector unsigned int vec_eqv(vector unsigned int, vector unsigned int)
vector unsigned int vec_eqv(vector bool int, vector unsigned int)
vector unsigned int vec_eqv(vector unsigned int, vector bool int)
vector signed long long vec_eqv(vector signed long long, vector signed long long)
vector signed long long vec_eqv(vector bool long long, vector signed long long)
vector signed long long vec_eqv(vector signed long long, vector bool long long)
vector unsigned long long vec_eqv(vector unsigned long long, vector unsigned long long)
vector unsigned long long vec_eqv(vector bool long long, vector unsigned long long)
vector unsigned long long vec_eqv(vector unsigned long long, vector bool long long)
vector float vec_eqv(vector float, vector float)
vector float vec_eqv(vector bool int, vector float)
vector float vec_eqv(vector float, vector bool int)
vector double vec_eqv(vector double, vector double)
vector double vec_eqv(vector bool long long, vector double)
vector double vec_eqv(vector double, vector bool long long)
vector bool long long vec_perm(vector bool long long, vector bool long long, vector unsigned char)
vector double vec_round(vector double)
vector double vec_splat(vector double, const int)
vector bool long long vec_splat(vector bool long long, const int)
vector signed long long vec_splat(vector signed long long, const int)
vector unsigned long long vec_splat(vector unsigned long long,
vector bool int vec_sld(vector bool int, vector bool int, const int)
vector bool short vec_sld(vector bool short, vector bool short, const int)

Added:
    cfe/trunk/test/Driver/ppc-dependent-options.cpp
Modified:
    cfe/trunk/include/clang/Basic/BuiltinsPPC.def
    cfe/trunk/include/clang/Basic/DiagnosticCommonKinds.td
    cfe/trunk/lib/Basic/Targets.cpp
    cfe/trunk/lib/Headers/altivec.h
    cfe/trunk/test/CodeGen/builtins-ppc-altivec.c
    cfe/trunk/test/CodeGen/builtins-ppc-p8vector.c
    cfe/trunk/test/CodeGen/builtins-ppc-vsx.c

Modified: cfe/trunk/include/clang/Basic/BuiltinsPPC.def
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/include/clang/Basic/BuiltinsPPC.def?rev=241904&r1=241903&r2=241904&view=diff
==============================================================================
--- cfe/trunk/include/clang/Basic/BuiltinsPPC.def (original)
+++ cfe/trunk/include/clang/Basic/BuiltinsPPC.def Fri Jul 10 08:11:34 2015
@@ -312,6 +312,8 @@ BUILTIN(__builtin_vsx_xvrsqrtesp, "V4fV4
 BUILTIN(__builtin_vsx_xvsqrtdp, "V2dV2d", "")
 BUILTIN(__builtin_vsx_xvsqrtsp, "V4fV4f", "")
 
+BUILTIN(__builtin_vsx_xxleqv, "V4UiV4UiV4Ui", "")
+
 // HTM builtins
 BUILTIN(__builtin_tbegin, "UiUIi", "")
 BUILTIN(__builtin_tend, "UiUIi", "")

Modified: cfe/trunk/include/clang/Basic/DiagnosticCommonKinds.td
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/include/clang/Basic/DiagnosticCommonKinds.td?rev=241904&r1=241903&r2=241904&view=diff
==============================================================================
--- cfe/trunk/include/clang/Basic/DiagnosticCommonKinds.td (original)
+++ cfe/trunk/include/clang/Basic/DiagnosticCommonKinds.td Fri Jul 10 08:11:34 2015
@@ -179,6 +179,8 @@ def err_target_unsupported_fpmath : Erro
     "the '%0' unit is not supported with this instruction set">;
 def err_target_unsupported_unaligned : Error<
   "the %0 sub-architecture does not support unaligned accesses">;
+def err_opt_not_valid_with_opt : Error<
+  "option '%0' cannot be specified with '%1'">;
 
 // Source manager
 def err_cannot_open_file : Error<"cannot open file '%0': %1">, DefaultFatal;

Modified: cfe/trunk/lib/Basic/Targets.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Basic/Targets.cpp?rev=241904&r1=241903&r2=241904&view=diff
==============================================================================
--- cfe/trunk/lib/Basic/Targets.cpp (original)
+++ cfe/trunk/lib/Basic/Targets.cpp Fri Jul 10 08:11:34 2015
@@ -863,6 +863,8 @@ public:
   bool handleTargetFeatures(std::vector<std::string> &Features,
                             DiagnosticsEngine &Diags) override;
   bool hasFeature(StringRef Feature) const override;
+  void setFeatureEnabled(llvm::StringMap<bool> &Features, StringRef Name,
+                         bool Enabled) const override;
 
   void getGCCRegNames(const char * const *&Names,
                       unsigned &NumNames) const override;
@@ -1036,7 +1038,6 @@ bool PPCTargetInfo::handleTargetFeatures
 
     if (Feature == "power8-vector") {
       HasP8Vector = true;
-      HasVSX = true;
       continue;
     }
 
@@ -1047,7 +1048,6 @@ bool PPCTargetInfo::handleTargetFeatures
 
     if (Feature == "direct-move") {
       HasDirectMove = true;
-      HasVSX = true;
       continue;
     }
 
@@ -1064,6 +1064,15 @@ bool PPCTargetInfo::handleTargetFeatures
     // TODO: Finish this list and add an assert that we've handled them
     // all.
   }
+  if (!HasVSX && (HasP8Vector || HasDirectMove)) {
+    if (HasP8Vector)
+      Diags.Report(diag::err_opt_not_valid_with_opt) << "-mpower8-vector" <<
+                                                        "-mno-vsx";
+    else if (HasDirectMove)
+      Diags.Report(diag::err_opt_not_valid_with_opt) << "-mdirect-move" <<
+                                                        "-mno-vsx";
+    return false;
+  }
 
   return true;
 }
@@ -1285,6 +1294,11 @@ void PPCTargetInfo::getDefaultFeatures(l
     .Case("ppc64le", true)
     .Case("pwr8", true)
     .Default(false);
+  Features["vsx"] = llvm::StringSwitch<bool>(CPU)
+    .Case("ppc64le", true)
+    .Case("pwr8", true)
+    .Case("pwr7", true)
+    .Default(false);
 }
 
 bool PPCTargetInfo::hasFeature(StringRef Feature) const {
@@ -1301,6 +1315,39 @@ bool PPCTargetInfo::hasFeature(StringRef
     .Default(false);
 }
 
+/*  There is no clear way for the target to know which of the features in the
+    final feature vector came from defaults and which are actually specified by
+    the user. To that end, we use the fact that this function is not called on
+    default features - only user specified ones. By the first time this
+    function is called, the default features are populated.
+    We then keep track of the features that the user specified so that we
+    can ensure we do not override a user's request (only defaults).
+    For example:
+    -mcpu=pwr8 -mno-vsx (should disable vsx and everything that depends on it)
+    -mcpu=pwr8 -mdirect-move -mno-vsx (should actually be diagnosed)
+
+NOTE: Do not call this from PPCTargetInfo::getDefaultFeatures
+*/
+void PPCTargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
+                                      StringRef Name, bool Enabled) const {
+  static llvm::StringMap<bool> ExplicitFeatures;
+  ExplicitFeatures[Name] = Enabled;
+
+  // At this point, -mno-vsx turns off the dependent features but we respect
+  // the user's requests.
+  if (!Enabled && Name == "vsx") {
+    Features["direct-move"] = ExplicitFeatures["direct-move"];
+    Features["power8-vector"] = ExplicitFeatures["power8-vector"];
+  }
+  if ((Enabled && Name == "power8-vector") ||
+      (Enabled && Name == "direct-move")) {
+    if (ExplicitFeatures.find("vsx") == ExplicitFeatures.end()) {
+      Features["vsx"] = true;
+    }
+  }
+  Features[Name] = Enabled;
+}
+
 const char * const PPCTargetInfo::GCCRegNames[] = {
   "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
   "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",

Modified: cfe/trunk/lib/Headers/altivec.h
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Headers/altivec.h?rev=241904&r1=241903&r2=241904&view=diff
==============================================================================
--- cfe/trunk/lib/Headers/altivec.h (original)
+++ cfe/trunk/lib/Headers/altivec.h Fri Jul 10 08:11:34 2015
@@ -48,7 +48,8 @@ static vector bool char __ATTRS_o_ai vec
                                               vector bool char __b,
                                               vector unsigned char __c);
 
-static vector short __ATTRS_o_ai vec_perm(vector short __a, vector short __b,
+static vector short __ATTRS_o_ai vec_perm(vector signed short __a,
+                                          vector signed short __b,
                                           vector unsigned char __c);
 
 static vector unsigned short __ATTRS_o_ai vec_perm(vector unsigned short __a,
@@ -62,7 +63,8 @@ static vector bool short __ATTRS_o_ai ve
 static vector pixel __ATTRS_o_ai vec_perm(vector pixel __a, vector pixel __b,
                                           vector unsigned char __c);
 
-static vector int __ATTRS_o_ai vec_perm(vector int __a, vector int __b,
+static vector int __ATTRS_o_ai vec_perm(vector signed int __a,
+                                        vector signed int __b,
                                         vector unsigned char __c);
 
 static vector unsigned int __ATTRS_o_ai vec_perm(vector unsigned int __a,
@@ -77,14 +79,18 @@ static vector float __ATTRS_o_ai vec_per
                                           vector unsigned char __c);
 
 #ifdef __VSX__
-static vector long long __ATTRS_o_ai vec_perm(vector long long __a,
-                                              vector long long __b,
+static vector long long __ATTRS_o_ai vec_perm(vector signed long long __a,
+                                              vector signed long long __b,
                                               vector unsigned char __c);
 
 static vector unsigned long long __ATTRS_o_ai
 vec_perm(vector unsigned long long __a, vector unsigned long long __b,
          vector unsigned char __c);
 
+static vector bool long long __ATTRS_o_ai
+vec_perm(vector bool long long __a, vector bool long long __b,
+         vector unsigned char __c);
+
 static vector double __ATTRS_o_ai vec_perm(vector double __a, vector double __b,
                                            vector unsigned char __c);
 #endif
@@ -1841,6 +1847,189 @@ vec_dstt(const void *__a, int __b, int _
   __builtin_altivec_dstt(__a, __b, __c);
 }
 
+/* vec_eqv */
+
+#ifdef __POWER8_VECTOR__
+static vector signed char __ATTRS_o_ai vec_eqv(vector signed char __a,
+                                               vector signed char __b) {
+  return (vector signed char)__builtin_vsx_xxleqv((vector unsigned int)__a,
+                                                  (vector unsigned int)__b);
+}
+
+static vector signed char __ATTRS_o_ai vec_eqv(vector bool char __a,
+                                               vector signed char __b) {
+  return (vector signed char)__builtin_vsx_xxleqv((vector unsigned int)__a,
+                                                  (vector unsigned int)__b);
+}
+
+static vector signed char __ATTRS_o_ai vec_eqv(vector signed char __a,
+                                               vector bool char __b) {
+  return (vector signed char)__builtin_vsx_xxleqv((vector unsigned int)__a,
+                                                  (vector unsigned int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_eqv(vector unsigned char __a,
+                                                 vector unsigned char __b) {
+  return (vector unsigned char)__builtin_vsx_xxleqv((vector unsigned int)__a,
+                                                    (vector unsigned int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_eqv(vector bool char __a,
+                                                 vector unsigned char __b) {
+  return (vector unsigned char)__builtin_vsx_xxleqv((vector unsigned int)__a,
+                                                    (vector unsigned int)__b);
+}
+
+static vector unsigned char __ATTRS_o_ai vec_eqv(vector unsigned char __a,
+                                                 vector bool char __b) {
+  return (vector unsigned char)__builtin_vsx_xxleqv((vector unsigned int)__a,
+                                                    (vector unsigned int)__b);
+}
+
+static vector signed short __ATTRS_o_ai vec_eqv(vector signed short __a,
+                                                vector signed short __b) {
+  return (vector signed short)__builtin_vsx_xxleqv((vector unsigned int)__a,
+                                                   (vector unsigned int)__b);
+}
+
+static vector signed short __ATTRS_o_ai vec_eqv(vector bool short __a,
+                                                vector signed short __b) {
+  return (vector signed short)__builtin_vsx_xxleqv((vector unsigned int)__a,
+                                                   (vector unsigned int)__b);
+}
+
+static vector signed short __ATTRS_o_ai vec_eqv(vector signed short __a,
+                                                vector bool short __b) {
+  return (vector signed short)__builtin_vsx_xxleqv((vector unsigned int)__a,
+                                                   (vector unsigned int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_eqv(vector unsigned short __a,
+                                                  vector unsigned short __b) {
+  return (vector unsigned short)__builtin_vsx_xxleqv((vector unsigned int)__a,
+                                                     (vector unsigned int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_eqv(vector bool short __a,
+                                                  vector unsigned short __b) {
+  return (vector unsigned short)__builtin_vsx_xxleqv((vector unsigned int)__a,
+                                                     (vector unsigned int)__b);
+}
+
+static vector unsigned short __ATTRS_o_ai vec_eqv(vector unsigned short __a,
+                                                  vector bool short __b) {
+  return (vector unsigned short)__builtin_vsx_xxleqv((vector unsigned int)__a,
+                                                     (vector unsigned int)__b);
+}
+
+static vector signed int __ATTRS_o_ai vec_eqv(vector signed int __a,
+                                              vector signed int __b) {
+  return (vector signed int)__builtin_vsx_xxleqv((vector unsigned int)__a,
+                                                 (vector unsigned int)__b);
+}
+
+static vector signed int __ATTRS_o_ai vec_eqv(vector bool int __a,
+                                              vector signed int __b) {
+  return (vector signed int)__builtin_vsx_xxleqv((vector unsigned int)__a,
+                                                 (vector unsigned int)__b);
+}
+
+static vector signed int __ATTRS_o_ai vec_eqv(vector signed int __a,
+                                              vector bool int __b) {
+  return (vector signed int)__builtin_vsx_xxleqv((vector unsigned int)__a,
+                                                 (vector unsigned int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_eqv(vector unsigned int __a,
+                                                vector unsigned int __b) {
+  return __builtin_vsx_xxleqv((vector unsigned int)__a,
+                              (vector unsigned int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_eqv(vector bool int __a,
+                                                vector unsigned int __b) {
+  return __builtin_vsx_xxleqv((vector unsigned int)__a,
+                              (vector unsigned int)__b);
+}
+
+static vector unsigned int __ATTRS_o_ai vec_eqv(vector unsigned int __a,
+                                                vector bool int __b) {
+  return __builtin_vsx_xxleqv((vector unsigned int)__a,
+                              (vector unsigned int)__b);
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_eqv(vector signed long long __a, vector signed long long __b) {
+  return (vector signed long long)
+    __builtin_vsx_xxleqv((vector unsigned int)__a, (vector unsigned int)__b);
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_eqv(vector bool long long __a, vector signed long long __b) {
+  return (vector signed long long)
+    __builtin_vsx_xxleqv((vector unsigned int)__a, (vector unsigned int)__b);
+}
+
+static vector signed long long __ATTRS_o_ai
+vec_eqv(vector signed long long __a, vector bool long long __b) {
+  return (vector signed long long)
+    __builtin_vsx_xxleqv((vector unsigned int)__a, (vector unsigned int)__b);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_eqv(vector unsigned long long __a, vector unsigned long long __b) {
+  return (vector unsigned long long)
+    __builtin_vsx_xxleqv((vector unsigned int)__a, (vector unsigned int)__b);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_eqv(vector bool long long __a, vector unsigned long long __b) {
+  return (vector unsigned long long)
+    __builtin_vsx_xxleqv((vector unsigned int)__a, (vector unsigned int)__b);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_eqv(vector unsigned long long __a, vector bool long long __b) {
+  return (vector unsigned long long)
+    __builtin_vsx_xxleqv((vector unsigned int)__a, (vector unsigned int)__b);
+}
+
+static vector float __ATTRS_o_ai vec_eqv(vector float __a, vector float __b) {
+  return (vector float)__builtin_vsx_xxleqv((vector unsigned int)__a,
+                                            (vector unsigned int)__b);
+}
+
+static vector float __ATTRS_o_ai vec_eqv(vector bool int __a,
+                                         vector float __b) {
+  return (vector float)__builtin_vsx_xxleqv((vector unsigned int)__a,
+                                            (vector unsigned int)__b);
+}
+
+static vector float __ATTRS_o_ai vec_eqv(vector float __a,
+                                         vector bool int __b) {
+  return (vector float)__builtin_vsx_xxleqv((vector unsigned int)__a,
+                                            (vector unsigned int)__b);
+}
+
+static vector double __ATTRS_o_ai vec_eqv(vector double __a,
+                                          vector double __b) {
+  return (vector double)__builtin_vsx_xxleqv((vector unsigned int)__a,
+                                             (vector unsigned int)__b);
+}
+
+static vector double __ATTRS_o_ai vec_eqv(vector bool long long __a,
+                                          vector double __b) {
+  return (vector double)__builtin_vsx_xxleqv((vector unsigned int)__a,
+                                             (vector unsigned int)__b);
+}
+
+static vector double __ATTRS_o_ai vec_eqv(vector double __a,
+                                          vector bool long long __b) {
+  return (vector double)__builtin_vsx_xxleqv((vector unsigned int)__a,
+                                             (vector unsigned int)__b);
+}
+#endif
+
 /* vec_expte */
 
 static vector float __attribute__((__always_inline__))
@@ -4982,7 +5171,8 @@ static vector bool char __ATTRS_o_ai vec
 #endif
 }
 
-static vector short __ATTRS_o_ai vec_perm(vector short __a, vector short __b,
+static vector short __ATTRS_o_ai vec_perm(vector signed short __a,
+                                          vector signed short __b,
                                           vector unsigned char __c) {
 #ifdef __LITTLE_ENDIAN__
   vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
@@ -5040,7 +5230,8 @@ static vector pixel __ATTRS_o_ai vec_per
 #endif
 }
 
-static vector int __ATTRS_o_ai vec_perm(vector int __a, vector int __b,
+static vector int __ATTRS_o_ai vec_perm(vector signed int __a,
+                                        vector signed int __b,
                                         vector unsigned char __c) {
 #ifdef __LITTLE_ENDIAN__
   vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
@@ -5097,8 +5288,8 @@ static vector float __ATTRS_o_ai vec_per
 }
 
 #ifdef __VSX__
-static vector long long __ATTRS_o_ai vec_perm(vector long long __a,
-                                              vector long long __b,
+static vector long long __ATTRS_o_ai vec_perm(vector signed long long __a,
+                                              vector signed long long __b,
                                               vector unsigned char __c) {
 #ifdef __LITTLE_ENDIAN__
   vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
@@ -5125,6 +5316,21 @@ vec_perm(vector unsigned long long __a,
 #endif
 }
 
+static vector bool long long __ATTRS_o_ai
+vec_perm(vector bool long long __a, vector bool long long __b,
+         vector unsigned char __c) {
+#ifdef __LITTLE_ENDIAN__
+  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
+                              255, 255, 255, 255, 255, 255, 255, 255};
+  __d = vec_xor(__c, __d);
+  return (vector bool long long)__builtin_altivec_vperm_4si(
+      (vector int)__b, (vector int)__a, __d);
+#else
+  return (vector bool long long)__builtin_altivec_vperm_4si(
+      (vector int)__a, (vector int)__b, __c);
+#endif
+}
+
 static vector double __ATTRS_o_ai vec_perm(vector double __a, vector double __b,
                                            vector unsigned char __c) {
 #ifdef __LITTLE_ENDIAN__
@@ -5319,12 +5525,19 @@ static vector unsigned int __ATTRS_o_ai
 
 /* vec_round */
 
-static vector float __attribute__((__always_inline__))
-vec_round(vector float __a) {
+static vector float __ATTRS_o_ai vec_round(vector float __a) {
+#ifdef __VSX__
+  return __builtin_vsx_xvrspi(__a);
+#else
   return __builtin_altivec_vrfin(__a);
+#endif
 }
 
 #ifdef __VSX__
+static vector double __ATTRS_o_ai vec_round(vector double __a) {
+  return __builtin_vsx_xvrdpi(__a);
+}
+
 /* vec_rint */
 
 static vector float __ATTRS_o_ai
@@ -5742,78 +5955,121 @@ static vector unsigned int __ATTRS_o_ai
 
 static vector signed char __ATTRS_o_ai vec_sld(vector signed char __a,
                                                vector signed char __b,
-                                               unsigned char __c) {
+                                               unsigned const int __c) {
+  unsigned char __d = __c & 0x0F;
   return vec_perm(
       __a, __b,
-      (vector unsigned char)(__c, __c + 1, __c + 2, __c + 3, __c + 4, __c + 5,
-                             __c + 6, __c + 7, __c + 8, __c + 9, __c + 10,
-                             __c + 11, __c + 12, __c + 13, __c + 14, __c + 15));
+      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
 }
 
 static vector unsigned char __ATTRS_o_ai vec_sld(vector unsigned char __a,
                                                  vector unsigned char __b,
-                                                 unsigned char __c) {
+                                                 unsigned const int __c) {
+  unsigned char __d = __c & 0x0F;
   return vec_perm(
       __a, __b,
-      (vector unsigned char)(__c, __c + 1, __c + 2, __c + 3, __c + 4, __c + 5,
-                             __c + 6, __c + 7, __c + 8, __c + 9, __c + 10,
-                             __c + 11, __c + 12, __c + 13, __c + 14, __c + 15));
+      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
 }
 
-static vector short __ATTRS_o_ai vec_sld(vector short __a, vector short __b,
-                                         unsigned char __c) {
+static vector bool char __ATTRS_o_ai vec_sld(vector bool char __a,
+                                             vector bool char __b,
+                                             unsigned const int __c) {
+  unsigned char __d = __c & 0x0F;
   return vec_perm(
       __a, __b,
-      (vector unsigned char)(__c, __c + 1, __c + 2, __c + 3, __c + 4, __c + 5,
-                             __c + 6, __c + 7, __c + 8, __c + 9, __c + 10,
-                             __c + 11, __c + 12, __c + 13, __c + 14, __c + 15));
+      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+}
+
+static vector signed short __ATTRS_o_ai vec_sld(vector signed short __a,
+                                                vector signed short __b,
+                                                unsigned const int __c) {
+  unsigned char __d = __c & 0x0F;
+  return vec_perm(
+      __a, __b,
+      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
 }
 
 static vector unsigned short __ATTRS_o_ai vec_sld(vector unsigned short __a,
                                                   vector unsigned short __b,
-                                                  unsigned char __c) {
+                                                  unsigned const int __c) {
+  unsigned char __d = __c & 0x0F;
   return vec_perm(
       __a, __b,
-      (vector unsigned char)(__c, __c + 1, __c + 2, __c + 3, __c + 4, __c + 5,
-                             __c + 6, __c + 7, __c + 8, __c + 9, __c + 10,
-                             __c + 11, __c + 12, __c + 13, __c + 14, __c + 15));
+      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+}
+
+static vector bool short __ATTRS_o_ai vec_sld(vector bool short __a,
+                                              vector bool short __b,
+                                              unsigned const int __c) {
+  unsigned char __d = __c & 0x0F;
+  return vec_perm(
+      __a, __b,
+      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
 }
 
 static vector pixel __ATTRS_o_ai vec_sld(vector pixel __a, vector pixel __b,
-                                         unsigned char __c) {
+                                         unsigned const int __c) {
+  unsigned char __d = __c & 0x0F;
   return vec_perm(
       __a, __b,
-      (vector unsigned char)(__c, __c + 1, __c + 2, __c + 3, __c + 4, __c + 5,
-                             __c + 6, __c + 7, __c + 8, __c + 9, __c + 10,
-                             __c + 11, __c + 12, __c + 13, __c + 14, __c + 15));
+      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
 }
 
-static vector int __ATTRS_o_ai vec_sld(vector int __a, vector int __b,
-                                       unsigned char __c) {
+static vector signed int __ATTRS_o_ai vec_sld(vector signed int __a,
+                                              vector signed int __b,
+                                              unsigned const int __c) {
+  unsigned char __d = __c & 0x0F;
   return vec_perm(
       __a, __b,
-      (vector unsigned char)(__c, __c + 1, __c + 2, __c + 3, __c + 4, __c + 5,
-                             __c + 6, __c + 7, __c + 8, __c + 9, __c + 10,
-                             __c + 11, __c + 12, __c + 13, __c + 14, __c + 15));
+      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
 }
 
 static vector unsigned int __ATTRS_o_ai vec_sld(vector unsigned int __a,
                                                 vector unsigned int __b,
-                                                unsigned char __c) {
+                                                unsigned const int __c) {
+  unsigned char __d = __c & 0x0F;
   return vec_perm(
       __a, __b,
-      (vector unsigned char)(__c, __c + 1, __c + 2, __c + 3, __c + 4, __c + 5,
-                             __c + 6, __c + 7, __c + 8, __c + 9, __c + 10,
-                             __c + 11, __c + 12, __c + 13, __c + 14, __c + 15));
+      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+}
+
+static vector bool int __ATTRS_o_ai vec_sld(vector bool int __a,
+                                            vector bool int __b,
+                                            unsigned const int __c) {
+  unsigned char __d = __c & 0x0F;
+  return vec_perm(
+      __a, __b,
+      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
 }
 
 static vector float __ATTRS_o_ai vec_sld(vector float __a, vector float __b,
-                                         unsigned char __c) {
+                                         unsigned const int __c) {
+  unsigned char __d = __c & 0x0F;
   return vec_perm(
       __a, __b,
-      (vector unsigned char)(__c, __c + 1, __c + 2, __c + 3, __c + 4, __c + 5,
-                             __c + 6, __c + 7, __c + 8, __c + 9, __c + 10,
-                             __c + 11, __c + 12, __c + 13, __c + 14, __c + 15));
+      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
 }
 
 /* vec_vsldoi */
@@ -6422,91 +6678,131 @@ static vector float __ATTRS_o_ai vec_vsl
 /* vec_splat */
 
 static vector signed char __ATTRS_o_ai vec_splat(vector signed char __a,
-                                                 unsigned char __b) {
-  return vec_perm(__a, __a, (vector unsigned char)(__b));
+                                                 unsigned const int __b) {
+  return vec_perm(__a, __a, (vector unsigned char)(__b & 0x0F));
 }
 
 static vector unsigned char __ATTRS_o_ai vec_splat(vector unsigned char __a,
-                                                   unsigned char __b) {
-  return vec_perm(__a, __a, (vector unsigned char)(__b));
+                                                   unsigned const int __b) {
+  return vec_perm(__a, __a, (vector unsigned char)(__b & 0x0F));
 }
 
 static vector bool char __ATTRS_o_ai vec_splat(vector bool char __a,
-                                               unsigned char __b) {
-  return vec_perm(__a, __a, (vector unsigned char)(__b));
+                                               unsigned const int __b) {
+  return vec_perm(__a, __a, (vector unsigned char)(__b & 0x0F));
 }
 
-static vector short __ATTRS_o_ai vec_splat(vector short __a,
-                                           unsigned char __b) {
-  __b *= 2;
-  unsigned char b1 = __b + 1;
+static vector signed short __ATTRS_o_ai vec_splat(vector signed short __a,
+                                                  unsigned const int __b) {
+  unsigned char b0 = (__b & 0x07) * 2;
+  unsigned char b1 = b0 + 1;
   return vec_perm(__a, __a,
-                  (vector unsigned char)(__b, b1, __b, b1, __b, b1, __b, b1,
-                                         __b, b1, __b, b1, __b, b1, __b, b1));
+                  (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1,
+                                         b0, b1, b0, b1, b0, b1, b0, b1));
 }
 
 static vector unsigned short __ATTRS_o_ai vec_splat(vector unsigned short __a,
-                                                    unsigned char __b) {
-  __b *= 2;
-  unsigned char b1 = __b + 1;
+                                                    unsigned const int __b) {
+  unsigned char b0 = (__b & 0x07) * 2;
+  unsigned char b1 = b0 + 1;
   return vec_perm(__a, __a,
-                  (vector unsigned char)(__b, b1, __b, b1, __b, b1, __b, b1,
-                                         __b, b1, __b, b1, __b, b1, __b, b1));
+                  (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1,
+                                         b0, b1, b0, b1, b0, b1, b0, b1));
 }
 
 static vector bool short __ATTRS_o_ai vec_splat(vector bool short __a,
-                                                unsigned char __b) {
-  __b *= 2;
-  unsigned char b1 = __b + 1;
+                                                unsigned const int __b) {
+  unsigned char b0 = (__b & 0x07) * 2;
+  unsigned char b1 = b0 + 1;
   return vec_perm(__a, __a,
-                  (vector unsigned char)(__b, b1, __b, b1, __b, b1, __b, b1,
-                                         __b, b1, __b, b1, __b, b1, __b, b1));
+                  (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1,
+                                         b0, b1, b0, b1, b0, b1, b0, b1));
 }
 
 static vector pixel __ATTRS_o_ai vec_splat(vector pixel __a,
-                                           unsigned char __b) {
-  __b *= 2;
-  unsigned char b1 = __b + 1;
+                                           unsigned const int __b) {
+  unsigned char b0 = (__b & 0x07) * 2;
+  unsigned char b1 = b0 + 1;
   return vec_perm(__a, __a,
-                  (vector unsigned char)(__b, b1, __b, b1, __b, b1, __b, b1,
-                                         __b, b1, __b, b1, __b, b1, __b, b1));
+                  (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1,
+                                         b0, b1, b0, b1, b0, b1, b0, b1));
 }
 
-static vector int __ATTRS_o_ai vec_splat(vector int __a, unsigned char __b) {
-  __b *= 4;
-  unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3;
+static vector signed int __ATTRS_o_ai vec_splat(vector signed int __a,
+                                                unsigned const int __b) {
+  unsigned char b0 = (__b & 0x03) * 4;
+  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3;
   return vec_perm(__a, __a,
-                  (vector unsigned char)(__b, b1, b2, b3, __b, b1, b2, b3, __b,
-                                         b1, b2, b3, __b, b1, b2, b3));
+                  (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0,
+                                         b1, b2, b3, b0, b1, b2, b3));
 }
 
 static vector unsigned int __ATTRS_o_ai vec_splat(vector unsigned int __a,
-                                                  unsigned char __b) {
-  __b *= 4;
-  unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3;
+                                                  unsigned const int __b) {
+  unsigned char b0 = (__b & 0x03) * 4;
+  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3;
   return vec_perm(__a, __a,
-                  (vector unsigned char)(__b, b1, b2, b3, __b, b1, b2, b3, __b,
-                                         b1, b2, b3, __b, b1, b2, b3));
+                  (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0,
+                                         b1, b2, b3, b0, b1, b2, b3));
 }
 
 static vector bool int __ATTRS_o_ai vec_splat(vector bool int __a,
-                                              unsigned char __b) {
-  __b *= 4;
-  unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3;
+                                              unsigned const int __b) {
+  unsigned char b0 = (__b & 0x03) * 4;
+  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3;
   return vec_perm(__a, __a,
-                  (vector unsigned char)(__b, b1, b2, b3, __b, b1, b2, b3, __b,
-                                         b1, b2, b3, __b, b1, b2, b3));
+                  (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0,
+                                         b1, b2, b3, b0, b1, b2, b3));
 }
 
 static vector float __ATTRS_o_ai vec_splat(vector float __a,
-                                           unsigned char __b) {
-  __b *= 4;
-  unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3;
+                                           unsigned const int __b) {
+  unsigned char b0 = (__b & 0x03) * 4;
+  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3;
   return vec_perm(__a, __a,
-                  (vector unsigned char)(__b, b1, b2, b3, __b, b1, b2, b3, __b,
-                                         b1, b2, b3, __b, b1, b2, b3));
+                  (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0,
+                                         b1, b2, b3, b0, b1, b2, b3));
 }
 
+#ifdef __VSX__
+static vector double __ATTRS_o_ai vec_splat(vector double __a,
+                                            unsigned const int __b) {
+  unsigned char b0 = (__b & 0x01) * 8;
+  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4,
+                b5 = b0 + 5, b6 = b0 + 6, b7 = b0 + 7;
+  return vec_perm(__a, __a,
+                  (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7,
+                                         b0, b1, b2, b3, b4, b5, b6, b7));
+}
+static vector bool long long __ATTRS_o_ai vec_splat(vector bool long long __a,
+                                                    unsigned const int __b) {
+  unsigned char b0 = (__b & 0x01) * 8;
+  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4,
+                b5 = b0 + 5, b6 = b0 + 6, b7 = b0 + 7;
+  return vec_perm(__a, __a,
+                  (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7,
+                                         b0, b1, b2, b3, b4, b5, b6, b7));
+}
+static vector signed long long __ATTRS_o_ai
+vec_splat(vector signed long long __a, unsigned const int __b) {
+  unsigned char b0 = (__b & 0x01) * 8;
+  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4,
+                b5 = b0 + 5, b6 = b0 + 6, b7 = b0 + 7;
+  return vec_perm(__a, __a,
+                  (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7,
+                                         b0, b1, b2, b3, b4, b5, b6, b7));
+}
+static vector unsigned long long __ATTRS_o_ai
+vec_splat(vector unsigned long long __a, unsigned const int __b) {
+  unsigned char b0 = (__b & 0x01) * 8;
+  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4,
+                b5 = b0 + 5, b6 = b0 + 6, b7 = b0 + 7;
+  return vec_perm(__a, __a,
+                  (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7,
+                                         b0, b1, b2, b3, b4, b5, b6, b7));
+}
+#endif
+
 /* vec_vspltb */
 
 #define __builtin_altivec_vspltb vec_vspltb
@@ -6678,7 +6974,8 @@ static vector unsigned int __ATTRS_o_ai
 
 static vector signed char __ATTRS_o_ai vec_sr(vector signed char __a,
                                               vector unsigned char __b) {
-  return __a >> (vector signed char)__b;
+  vector unsigned char __res = (vector unsigned char)__a >> __b;
+  return (vector signed char)__res;
 }
 
 static vector unsigned char __ATTRS_o_ai vec_sr(vector unsigned char __a,
@@ -6686,9 +6983,10 @@ static vector unsigned char __ATTRS_o_ai
   return __a >> __b;
 }
 
-static vector short __ATTRS_o_ai vec_sr(vector short __a,
+static vector signed short __ATTRS_o_ai vec_sr(vector signed short __a,
                                         vector unsigned short __b) {
-  return __a >> (vector short)__b;
+  vector unsigned short __res = (vector unsigned short)__a >> __b;
+  return (vector signed short)__res;
 }
 
 static vector unsigned short __ATTRS_o_ai vec_sr(vector unsigned short __a,
@@ -6696,8 +6994,10 @@ static vector unsigned short __ATTRS_o_a
   return __a >> __b;
 }
 
-static vector int __ATTRS_o_ai vec_sr(vector int __a, vector unsigned int __b) {
-  return __a >> (vector int)__b;
+static vector signed int __ATTRS_o_ai vec_sr(vector signed int __a,
+                                             vector unsigned int __b) {
+  vector unsigned int __res = (vector unsigned int)__a >> __b;
+  return (vector signed int)__res;
 }
 
 static vector unsigned int __ATTRS_o_ai vec_sr(vector unsigned int __a,
@@ -6708,7 +7008,8 @@ static vector unsigned int __ATTRS_o_ai
 #ifdef __POWER8_VECTOR__
 static vector signed long long __ATTRS_o_ai
 vec_sr(vector signed long long __a, vector unsigned long long __b) {
-  return __a >> (vector long long)__b;
+  vector unsigned long long __res = (vector unsigned long long)__a >> __b;
+  return (vector signed long long)__res;
 }
 
 static vector unsigned long long __ATTRS_o_ai

Modified: cfe/trunk/test/CodeGen/builtins-ppc-altivec.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/builtins-ppc-altivec.c?rev=241904&r1=241903&r2=241904&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/builtins-ppc-altivec.c (original)
+++ cfe/trunk/test/CodeGen/builtins-ppc-altivec.c Fri Jul 10 08:11:34 2015
@@ -3272,6 +3272,15 @@ void test6() {
 // CHECK: @llvm.ppc.altivec.vperm
 // CHECK-LE: @llvm.ppc.altivec.vperm
 
+  res_vbs = vec_sld(vbs, vbs, 0);
+// CHECK: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
+// CHECK-LE: xor <16 x i8>
+// CHECK-LE: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
+
   res_vp  = vec_sld(vp, vp, 0);
 // CHECK: @llvm.ppc.altivec.vperm
 // CHECK-LE: @llvm.ppc.altivec.vperm
@@ -3284,6 +3293,11 @@ void test6() {
 // CHECK: @llvm.ppc.altivec.vperm
 // CHECK-LE: @llvm.ppc.altivec.vperm
 
+  res_vbi = vec_sld(vbi, vbi, 0);
+// CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> {{.+}}, <4 x i32> {{.+}}, <16 x i8>
+// CHECK-LE: xor <16 x i8>
+// CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> {{.+}}, <4 x i32> {{.+}}, <16 x i8>
+
   res_vf  = vec_sld(vf, vf, 0);
 // CHECK: @llvm.ppc.altivec.vperm
 // CHECK-LE: @llvm.ppc.altivec.vperm
@@ -3802,28 +3816,28 @@ void test6() {
 
   /* vec_sr */
   res_vsc = vec_sr(vsc, vuc);
-// CHECK: shr <16 x i8>
-// CHECK-LE: shr <16 x i8>
+// CHECK: lshr <16 x i8>
+// CHECK-LE: lshr <16 x i8>
 
   res_vuc = vec_sr(vuc, vuc);
-// CHECK: shr <16 x i8>
-// CHECK-LE: shr <16 x i8>
+// CHECK: lshr <16 x i8>
+// CHECK-LE: lshr <16 x i8>
 
   res_vs  = vec_sr(vs, vus);
-// CHECK: shr <8 x i16>
-// CHECK-LE: shr <8 x i16>
+// CHECK: lshr <8 x i16>
+// CHECK-LE: lshr <8 x i16>
 
   res_vus = vec_sr(vus, vus);
-// CHECK: shr <8 x i16>
-// CHECK-LE: shr <8 x i16>
+// CHECK: lshr <8 x i16>
+// CHECK-LE: lshr <8 x i16>
 
   res_vi  = vec_sr(vi, vui);
-// CHECK: shr <4 x i32>
-// CHECK-LE: shr <4 x i32>
+// CHECK: lshr <4 x i32>
+// CHECK-LE: lshr <4 x i32>
 
   res_vui = vec_sr(vui, vui);
-// CHECK: shr <4 x i32>
-// CHECK-LE: shr <4 x i32>
+// CHECK: lshr <4 x i32>
+// CHECK-LE: lshr <4 x i32>
 
   res_vsc = vec_vsrb(vsc, vuc);
 // CHECK: shr <16 x i8>

Modified: cfe/trunk/test/CodeGen/builtins-ppc-p8vector.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/builtins-ppc-p8vector.c?rev=241904&r1=241903&r2=241904&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/builtins-ppc-p8vector.c (original)
+++ cfe/trunk/test/CodeGen/builtins-ppc-p8vector.c Fri Jul 10 08:11:34 2015
@@ -9,23 +9,41 @@
 
 vector signed char vsc = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5 };
 vector unsigned char vuc = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5 };
-vector int vi = { -1, 2, -3, 4 };
+vector bool char vbc = { 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1 };
+
+vector signed short vss = { 0, 1, 2, 3, 4, 5, 6, 7 };
+vector unsigned short vus = { 0, 1, 2, 3, 4, 5, 6, 7 };
+vector bool short vbs = { 1, 1, 0, 0, 0, 0, 1, 1 };
+
+vector signed int vsi = { -1, 2, -3, 4 };
 vector unsigned int vui = { 1, 2, 3, 4 };
 vector bool int vbi = {0, -1, -1, 0};
-vector bool long long vbll = { 1, 0 };
+
 vector signed long long vsll = { 1, 2 };
 vector unsigned long long vull = { 1, 2 };
+vector bool long long vbll = { 1, 0 };
+
+vector float vfa = { 1.e-4f, -132.23f, -22.1, 32.00f };
 vector double vda = { 1.e-11, -132.23e10 };
 
 int res_i;
 vector signed char res_vsc;
 vector unsigned char res_vuc;
-vector int res_vi;
+vector bool char res_vbc;
+
+vector signed short res_vss;
+vector unsigned short res_vus;
+vector bool short res_vbs;
+
+vector int res_vsi;
 vector unsigned int res_vui;
 vector bool int res_vbi;
-vector bool long long res_vbll;
+
 vector signed long long res_vsll;
 vector unsigned long long res_vull;
+vector bool long long res_vbll;
+
+vector double res_vf;
 vector double res_vd;
 
 // CHECK-LABEL: define void @test1
@@ -60,7 +78,7 @@ void test1() {
 // CHECK: @llvm.ppc.altivec.vperm
 // CHECK-LE: @llvm.ppc.altivec.vperm
   
-  res_vi = vec_mergee(vi, vi);
+  res_vsi = vec_mergee(vsi, vsi);
 // CHECK: @llvm.ppc.altivec.vperm
 // CHECK-LE: @llvm.ppc.altivec.vperm
 
@@ -74,7 +92,7 @@ void test1() {
 // CHECK: @llvm.ppc.altivec.vperm
 // CHECK-LE: @llvm.ppc.altivec.vperm
 
-  res_vi = vec_mergeo(vi, vi);
+  res_vsi = vec_mergeo(vsi, vsi);
 // CHECK: @llvm.ppc.altivec.vperm
 // CHECK-LE: @llvm.ppc.altivec.vperm
 
@@ -138,6 +156,292 @@ void test1() {
 // CHECK-LE: call <2 x i64> @llvm.ppc.altivec.vcmpgtud(<2 x i64> %{{[0-9]*}}, <2 x i64> %{{[0-9]*}})
 // CHECK-PPC: error: call to 'vec_cmplt' is ambiguous
 
+  /* vec_eqv */
+  res_vsc =  vec_eqv(vsc, vsc);
+// CHECK: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <16 x i8>
+// CHECK-LE: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <16 x i8>
+// CHECK-PPC: error: assigning to
+
+  res_vsc =  vec_eqv(vbc, vsc);
+// CHECK: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <16 x i8>
+// CHECK-LE: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <16 x i8>
+// CHECK-PPC: error: assigning to
+
+  res_vsc =  vec_eqv(vsc, vbc);
+// CHECK: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <16 x i8>
+// CHECK-LE: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <16 x i8>
+// CHECK-PPC: error: assigning to
+
+  res_vuc =  vec_eqv(vuc, vuc);
+// CHECK: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <16 x i8>
+// CHECK-LE: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <16 x i8>
+// CHECK-PPC: error: assigning to
+
+  res_vuc =  vec_eqv(vbc, vuc);
+// CHECK: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <16 x i8>
+// CHECK-LE: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <16 x i8>
+// CHECK-PPC: error: assigning to
+
+  res_vuc =  vec_eqv(vuc, vbc);
+// CHECK: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <16 x i8>
+// CHECK-LE: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <16 x i8>
+// CHECK-PPC: error: assigning to
+
+  res_vss =  vec_eqv(vss, vss);
+// CHECK: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <8 x i16>
+// CHECK-LE: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <8 x i16>
+// CHECK-PPC: error: assigning to
+
+  res_vss =  vec_eqv(vbs, vss);
+// CHECK: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <8 x i16>
+// CHECK-LE: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <8 x i16>
+// CHECK-PPC: error: assigning to
+
+  res_vss =  vec_eqv(vss, vbs);
+// CHECK: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <8 x i16>
+// CHECK-LE: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <8 x i16>
+// CHECK-PPC: error: assigning to
+
+  res_vus =  vec_eqv(vus, vus);
+// CHECK: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <8 x i16>
+// CHECK-LE: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <8 x i16>
+// CHECK-PPC: error: assigning to
+
+  res_vus =  vec_eqv(vbs, vus);
+// CHECK: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <8 x i16>
+// CHECK-LE: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <8 x i16>
+// CHECK-PPC: error: assigning to
+
+  res_vus =  vec_eqv(vus, vbs);
+// CHECK: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <8 x i16>
+// CHECK-LE: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <8 x i16>
+// CHECK-PPC: error: assigning to
+
+  res_vsi =  vec_eqv(vsi, vsi);
+// CHECK: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
+// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
+// CHECK-PPC: error: assigning to
+
+  res_vsi =  vec_eqv(vbi, vsi);
+// CHECK: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
+// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
+// CHECK-PPC: error: assigning to
+
+  res_vsi =  vec_eqv(vsi, vbi);
+// CHECK: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
+// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
+// CHECK-PPC: error: assigning to
+
+  res_vui =  vec_eqv(vui, vui);
+// CHECK: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
+// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
+// CHECK-PPC: error: assigning to
+
+  res_vui =  vec_eqv(vbi, vui);
+// CHECK: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
+// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
+// CHECK-PPC: error: assigning to
+
+  res_vui =  vec_eqv(vui, vbi);
+// CHECK: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
+// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
+// CHECK-PPC: error: assigning to
+
+  res_vsll =  vec_eqv(vsll, vsll);
+// CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <2 x i64>
+// CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x i64>
+// CHECK-PPC: error: assigning to
+
+  res_vsll =  vec_eqv(vbll, vsll);
+// CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <2 x i64>
+// CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x i64>
+// CHECK-PPC: error: assigning to
+
+  res_vsll =  vec_eqv(vsll, vbll);
+// CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <2 x i64>
+// CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x i64>
+// CHECK-PPC: error: assigning to
+
+  res_vull =  vec_eqv(vull, vull);
+// CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <2 x i64>
+// CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x i64>
+// CHECK-PPC: error: assigning to
+
+  res_vull =  vec_eqv(vbll, vull);
+// CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <2 x i64>
+// CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x i64>
+// CHECK-PPC: error: assigning to
+
+  res_vull =  vec_eqv(vull, vbll);
+// CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <2 x i64>
+// CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x i64>
+// CHECK-PPC: error: assigning to
+
+  res_vf = vec_eqv(vfa, vfa);
+// CHECK: [[T1:%.+]] = bitcast <4 x float> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <4 x float> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <4 x float>
+// CHECK-LE: [[T1:%.+]] = bitcast <4 x float> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <4 x float> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <4 x float>
+// CHECK-PPC: error: assigning to
+
+  res_vf = vec_eqv(vbi, vfa);
+// CHECK: [[T2:%.+]] = bitcast <4 x float> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.+}}, <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <4 x float>
+// CHECK-LE: [[T2:%.+]] = bitcast <4 x float> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.+}}, <4 x i32> [[T2]])
+// CHECK-PPC: error: assigning to
+
+  res_vf = vec_eqv(vfa, vbi);
+// CHECK: [[T1:%.+]] = bitcast <4 x float> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32>
+// CHECK: bitcast <4 x i32> [[T3]] to <4 x float>
+// CHECK-LE: [[T1:%.+]] = bitcast <4 x float> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32>
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <4 x float>
+// CHECK-PPC: error: assigning to
+
+  res_vd = vec_eqv(vda, vda);
+// CHECK: [[T1:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <2 x double>
+// CHECK-LE: [[T1:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x double>
+// CHECK-PPC: error: assigning to
+
+  res_vd = vec_eqv(vbll, vda);
+// CHECK: [[T2:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.+}}, <4 x i32> [[T2]])
+// CHECK: bitcast <4 x i32> [[T3]] to <2 x double>
+// CHECK-LE: [[T2:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.+}}, <4 x i32> [[T2]])
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x double>
+// CHECK-PPC: error: assigning to
+
+  res_vd = vec_eqv(vda, vbll);
+// CHECK: [[T1:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
+// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32>
+// CHECK: bitcast <4 x i32> [[T3]] to <2 x double>
+// CHECK-LE: [[T1:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
+// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32>
+// CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x double>
+// CHECK-PPC: error: assigning to
+
   /* ----------------------- predicates --------------------------- */
   /* vec_all_eq */
   res_i = vec_all_eq(vsll, vsll);
@@ -634,7 +938,7 @@ void test1() {
 // CHECK-PPC: error: call to 'vec_min' is ambiguous
 
   /* vec_mule */
-  res_vsll = vec_mule(vi, vi);
+  res_vsll = vec_mule(vsi, vsi);
 // CHECK: @llvm.ppc.altivec.vmulesw
 // CHECK-LE: @llvm.ppc.altivec.vmulosw
 // CHECK-PPC: error: call to 'vec_mule' is ambiguous
@@ -645,7 +949,7 @@ void test1() {
 // CHECK-PPC: error: call to 'vec_mule' is ambiguous
 
   /* vec_mulo */
-  res_vsll = vec_mulo(vi, vi);
+  res_vsll = vec_mulo(vsi, vsi);
 // CHECK: @llvm.ppc.altivec.vmulosw
 // CHECK-LE: @llvm.ppc.altivec.vmulesw
 // CHECK-PPC: error: call to 'vec_mulo' is ambiguous
@@ -656,7 +960,7 @@ void test1() {
 // CHECK-PPC: error: call to 'vec_mulo' is ambiguous
 
   /* vec_packs */
-  res_vi = vec_packs(vsll, vsll);
+  res_vsi = vec_packs(vsll, vsll);
 // CHECK: @llvm.ppc.altivec.vpksdss
 // CHECK-LE: @llvm.ppc.altivec.vpksdss
 // CHECK-PPC: error: call to 'vec_packs' is ambiguous
@@ -701,8 +1005,8 @@ void test1() {
 
   /* vec_sr */
   res_vsll = vec_sr(vsll, vull);
-// CHECK: ashr <2 x i64>
-// CHECK-LE: ashr <2 x i64>
+// CHECK: lshr <2 x i64>
+// CHECK-LE: lshr <2 x i64>
 // CHECK-PPC: error: call to 'vec_sr' is ambiguous
 
   res_vull = vec_sr(vull, vull);
@@ -722,7 +1026,7 @@ void test1() {
 // CHECK-PPC: error: call to 'vec_sra' is ambiguous
 
   /* vec_unpackh */
-  res_vsll = vec_unpackh(vi);
+  res_vsll = vec_unpackh(vsi);
 // CHECK: llvm.ppc.altivec.vupkhsw
 // CHECK-LE: llvm.ppc.altivec.vupklsw
 // CHECK-PPC: error: call to 'vec_unpackh' is ambiguous
@@ -733,7 +1037,7 @@ void test1() {
 // CHECK-PPC: error: call to 'vec_unpackh' is ambiguous
 
   /* vec_unpackl */
-  res_vsll = vec_unpackl(vi);
+  res_vsll = vec_unpackl(vsi);
 // CHECK: llvm.ppc.altivec.vupklsw
 // CHECK-LE: llvm.ppc.altivec.vupkhsw
 // CHECK-PPC: error: call to 'vec_unpackl' is ambiguous
@@ -744,7 +1048,7 @@ void test1() {
 // CHECK-PPC: error: call to 'vec_unpackl' is ambiguous
 
   /* vec_vpksdss */
-  res_vi = vec_vpksdss(vsll, vsll);
+  res_vsi = vec_vpksdss(vsll, vsll);
 // CHECK: llvm.ppc.altivec.vpksdss
 // CHECK-LE: llvm.ppc.altivec.vpksdss
 // CHECK-PPC: warning: implicit declaration of function 'vec_vpksdss'
@@ -756,7 +1060,7 @@ void test1() {
 // CHECK-PPC: warning: implicit declaration of function 'vec_vpksdus'
 
   /* vec_vpkudum */
-  res_vi = vec_vpkudum(vsll, vsll);
+  res_vsi = vec_vpkudum(vsll, vsll);
 // CHECK: vperm
 // CHECK-LE: vperm
 // CHECK-PPC: warning: implicit declaration of function 'vec_vpkudum'
@@ -771,7 +1075,7 @@ void test1() {
 // CHECK-PPC: warning: implicit declaration of function 'vec_vpkudus'
 
   /* vec_vupkhsw */
-  res_vsll = vec_vupkhsw(vi);
+  res_vsll = vec_vupkhsw(vsi);
 // CHECK: llvm.ppc.altivec.vupkhsw
 // CHECK-LE: llvm.ppc.altivec.vupklsw
 // CHECK-PPC: warning: implicit declaration of function 'vec_vupkhsw'
@@ -781,7 +1085,7 @@ void test1() {
 // CHECK-LE: llvm.ppc.altivec.vupklsw
 
   /* vec_vupklsw */
-  res_vsll = vec_vupklsw(vi);
+  res_vsll = vec_vupklsw(vsi);
 // CHECK: llvm.ppc.altivec.vupklsw
 // CHECK-LE: llvm.ppc.altivec.vupkhsw
 // CHECK-PPC: warning: implicit declaration of function 'vec_vupklsw'

Modified: cfe/trunk/test/CodeGen/builtins-ppc-vsx.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/builtins-ppc-vsx.c?rev=241904&r1=241903&r2=241904&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/builtins-ppc-vsx.c (original)
+++ cfe/trunk/test/CodeGen/builtins-ppc-vsx.c Fri Jul 10 08:11:34 2015
@@ -1,6 +1,6 @@
 // REQUIRES: powerpc-registered-target
 // RUN: %clang_cc1 -faltivec -target-feature +vsx -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
-// RUN: %clang_cc1 -faltivec -target-feature +vsx -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck %s
+// RUN: %clang_cc1 -faltivec -target-feature +vsx -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK-LE
 
 vector unsigned char vuc = { 8,  9, 10, 11, 12, 13, 14, 15,
                              0,  1,  2,  3,  4,  5,  6,  7};
@@ -27,39 +27,57 @@ void dummy() { }
 
 void test1() {
 // CHECK-LABEL: define void @test1
+// CHECK-LE-LABEL: define void @test1
 
   res_vd = vec_add(vd, vd);
 // CHECK: fadd <2 x double>
+// CHECK-LE: fadd <2 x double>
 
   res_vd = vec_and(vbll, vd);
 // CHECK: and <2 x i64>
 // CHECK: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
+// CHECK-LE: and <2 x i64>
+// CHECK-LE: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
 
   res_vd = vec_and(vd, vbll);
 // CHECK: and <2 x i64>
 // CHECK: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
+// CHECK-LE: and <2 x i64>
+// CHECK-LE: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
 
   res_vd = vec_and(vd, vd);
 // CHECK: and <2 x i64>
 // CHECK: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
+// CHECK-LE: and <2 x i64>
+// CHECK-LE: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
 
   dummy();
 // CHECK: call void @dummy()
+// CHECK-LE: call void @dummy()
 
   res_vd = vec_andc(vbll, vd);
 // CHECK: bitcast <2 x double> %{{[0-9]*}} to <2 x i64>
 // CHECK: xor <2 x i64> %{{[0-9]*}}, <i64 -1, i64 -1>
 // CHECK: and <2 x i64>
 // CHECK: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
+// CHECK-LE: bitcast <2 x double> %{{[0-9]*}} to <2 x i64>
+// CHECK-LE: xor <2 x i64> %{{[0-9]*}}, <i64 -1, i64 -1>
+// CHECK-LE: and <2 x i64>
+// CHECK-LE: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
 
   dummy();
 // CHECK: call void @dummy()
+// CHECK-LE: call void @dummy()
 
   res_vd = vec_andc(vd, vbll);
 // CHECK: bitcast <2 x double> %{{[0-9]*}} to <2 x i64>
 // CHECK: xor <2 x i64> %{{[0-9]*}}, <i64 -1, i64 -1>
 // CHECK: and <2 x i64>
 // CHECK: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
+// CHECK-LE: bitcast <2 x double> %{{[0-9]*}} to <2 x i64>
+// CHECK-LE: xor <2 x i64> %{{[0-9]*}}, <i64 -1, i64 -1>
+// CHECK-LE: and <2 x i64>
+// CHECK-LE: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
 
   dummy();
 // CHECK: call void @dummy()
@@ -72,316 +90,483 @@ void test1() {
 
   dummy();
 // CHECK: call void @dummy()
+// CHECK-LE: call void @dummy()
 
   res_vd = vec_ceil(vd);
 // CHECK: call <2 x double> @llvm.ceil.v2f64(<2 x double> %{{[0-9]*}})
+// CHECK-LE: call <2 x double> @llvm.ceil.v2f64(<2 x double> %{{[0-9]*}})
 
   res_vf = vec_ceil(vf);
 // CHECK: call <4 x float> @llvm.ceil.v4f32(<4 x float> %{{[0-9]*}})
+// CHECK-LE: call <4 x float> @llvm.ceil.v4f32(<4 x float> %{{[0-9]*}})
 
   res_vbll = vec_cmpeq(vd, vd);
 // CHECK: call <2 x i64> @llvm.ppc.vsx.xvcmpeqdp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
+// CHECK-LE: call <2 x i64> @llvm.ppc.vsx.xvcmpeqdp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
 
   res_vbi = vec_cmpeq(vf, vf);
 // CHECK: call <4 x i32> @llvm.ppc.vsx.xvcmpeqsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
+// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xvcmpeqsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
 
   res_vbll = vec_cmpge(vd, vd);
 // CHECK: call <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
+// CHECK-LE: call <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
 
   res_vbi = vec_cmpge(vf, vf);
 // CHECK: call <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
+// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
 
   res_vbll = vec_cmpgt(vd, vd);
 // CHECK: call <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
+// CHECK-LE: call <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
 
   res_vbi = vec_cmpgt(vf, vf);
 // CHECK: call <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
+// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
 
   res_vbll = vec_cmple(vd, vd);
 // CHECK: call <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
+// CHECK-LE: call <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
 
   res_vbi = vec_cmple(vf, vf);
 // CHECK: call <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
+// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
 
   res_vbll = vec_cmplt(vd, vd);
 // CHECK: call <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
+// CHECK-LE: call <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
 
   res_vbi = vec_cmplt(vf, vf);
 // CHECK: call <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
+// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
 
   /* vec_div */
   res_vf = vec_div(vf, vf);
 // CHECK: @llvm.ppc.vsx.xvdivsp
+// CHECK-LE: @llvm.ppc.vsx.xvdivsp
 
   res_vd = vec_div(vd, vd);
 // CHECK: @llvm.ppc.vsx.xvdivdp
+// CHECK-LE: @llvm.ppc.vsx.xvdivdp
 
   /* vec_max */
   res_vf = vec_max(vf, vf);
 // CHECK: @llvm.ppc.vsx.xvmaxsp
+// CHECK-LE: @llvm.ppc.vsx.xvmaxsp
 
   res_vd = vec_max(vd, vd);
 // CHECK: @llvm.ppc.vsx.xvmaxdp
+// CHECK-LE: @llvm.ppc.vsx.xvmaxdp
 
   res_vf = vec_vmaxfp(vf, vf);
 // CHECK: @llvm.ppc.vsx.xvmaxsp
+// CHECK-LE: @llvm.ppc.vsx.xvmaxsp
 
   /* vec_min */
   res_vf = vec_min(vf, vf);
 // CHECK: @llvm.ppc.vsx.xvminsp
+// CHECK-LE: @llvm.ppc.vsx.xvminsp
 
   res_vd = vec_min(vd, vd);
 // CHECK: @llvm.ppc.vsx.xvmindp
+// CHECK-LE: @llvm.ppc.vsx.xvmindp
 
   res_vf = vec_vminfp(vf, vf);
 // CHECK: @llvm.ppc.vsx.xvminsp
+// CHECK-LE: @llvm.ppc.vsx.xvminsp
 
   res_d = __builtin_vsx_xsmaxdp(d, d);
 // CHECK: @llvm.ppc.vsx.xsmaxdp
+// CHECK-LE: @llvm.ppc.vsx.xsmaxdp
 
   res_d = __builtin_vsx_xsmindp(d, d);
 // CHECK: @llvm.ppc.vsx.xsmindp
+// CHECK-LE: @llvm.ppc.vsx.xsmindp
 
   /* vec_perm */
   res_vsll = vec_perm(vsll, vsll, vuc);
 // CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
 
   res_vull = vec_perm(vull, vull, vuc);
 // CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+  res_vbll = vec_perm(vbll, vbll, vuc);
+// CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
+// CHECK-LE: xor <16 x i8>
+// CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
+
+  res_vf = vec_round(vf);
+// CHECK: call <4 x float> @llvm.round.v4f32(<4 x float>
+// CHECK-LE: call <4 x float> @llvm.round.v4f32(<4 x float>
+
+  res_vd = vec_round(vd);
+// CHECK: call <2 x double> @llvm.round.v2f64(<2 x double>
+// CHECK-LE: call <2 x double> @llvm.round.v2f64(<2 x double>
 
   res_vd = vec_perm(vd, vd, vuc);
 // CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+  res_vd = vec_splat(vd, 1);
+// CHECK: [[T1:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
+// CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
+// CHECK-LE: xor <16 x i8>
+// CHECK-LE: [[T1:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
+// CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
+
+  res_vbll = vec_splat(vbll, 1);
+// CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
+// CHECK-LE: xor <16 x i8>
+// CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
+
+  res_vsll =  vec_splat(vsll, 1);
+// CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
+// CHECK-LE: xor <16 x i8>
+// CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
+
+  res_vull =  vec_splat(vull, 1);
+// CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
+// CHECK-LE: xor <16 x i8>
+// CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
+// CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
 
   res_vsll = vec_vperm(vsll, vsll, vuc);
 // CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
 
   res_vull = vec_vperm(vull, vull, vuc);
 // CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
 
   res_vd = vec_vperm(vd, vd, vuc);
 // CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
 
   /* vec_vsx_ld */
 
   res_vsi = vec_vsx_ld(0, &vsi);
 // CHECK: @llvm.ppc.vsx.lxvw4x
+// CHECK-LE: @llvm.ppc.vsx.lxvw4x
 
   res_vui = vec_vsx_ld(0, &vui);
 // CHECK: @llvm.ppc.vsx.lxvw4x
+// CHECK-LE: @llvm.ppc.vsx.lxvw4x
 
   res_vf = vec_vsx_ld (0, &vf);
 // CHECK: @llvm.ppc.vsx.lxvw4x
+// CHECK-LE: @llvm.ppc.vsx.lxvw4x
 
   res_vsll = vec_vsx_ld(0, &vsll);
 // CHECK: @llvm.ppc.vsx.lxvd2x
+// CHECK-LE: @llvm.ppc.vsx.lxvd2x
 
   res_vull = vec_vsx_ld(0, &vull);
 // CHECK: @llvm.ppc.vsx.lxvd2x
+// CHECK-LE: @llvm.ppc.vsx.lxvd2x
 
   res_vd = vec_vsx_ld(0, &vd);
 // CHECK: @llvm.ppc.vsx.lxvd2x
+// CHECK-LE: @llvm.ppc.vsx.lxvd2x
 
   /* vec_vsx_st */
 
   vec_vsx_st(vsi, 0, &res_vsi);
 // CHECK: @llvm.ppc.vsx.stxvw4x
+// CHECK-LE: @llvm.ppc.vsx.stxvw4x
 
   vec_vsx_st(vui, 0, &res_vui);
 // CHECK: @llvm.ppc.vsx.stxvw4x
+// CHECK-LE: @llvm.ppc.vsx.stxvw4x
 
   vec_vsx_st(vf, 0, &res_vf);
 // CHECK: @llvm.ppc.vsx.stxvw4x
+// CHECK-LE: @llvm.ppc.vsx.stxvw4x
 
   vec_vsx_st(vsll, 0, &res_vsll);
 // CHECK: @llvm.ppc.vsx.stxvd2x
+// CHECK-LE: @llvm.ppc.vsx.stxvd2x
 
   vec_vsx_st(vull, 0, &res_vull);
 // CHECK: @llvm.ppc.vsx.stxvd2x
+// CHECK-LE: @llvm.ppc.vsx.stxvd2x
 
   vec_vsx_st(vd, 0, &res_vd);
 // CHECK: @llvm.ppc.vsx.stxvd2x
+// CHECK-LE: @llvm.ppc.vsx.stxvd2x
 
   /* vec_and */
   res_vsll = vec_and(vsll, vsll);
 // CHECK: and <2 x i64>
+// CHECK-LE: and <2 x i64>
 
   res_vsll = vec_and(vbll, vsll);
 // CHECK: and <2 x i64>
+// CHECK-LE: and <2 x i64>
 
   res_vsll = vec_and(vsll, vbll);
 // CHECK: and <2 x i64>
+// CHECK-LE: and <2 x i64>
 
   res_vull = vec_and(vull, vull);
 // CHECK: and <2 x i64>
+// CHECK-LE: and <2 x i64>
 
   res_vull = vec_and(vbll, vull);
 // CHECK: and <2 x i64>
+// CHECK-LE: and <2 x i64>
 
   res_vull = vec_and(vull, vbll);
 // CHECK: and <2 x i64>
+// CHECK-LE: and <2 x i64>
 
   res_vbll = vec_and(vbll, vbll);
 // CHECK: and <2 x i64>
+// CHECK-LE: and <2 x i64>
 
   /* vec_vand */
   res_vsll = vec_vand(vsll, vsll);
 // CHECK: and <2 x i64>
+// CHECK-LE: and <2 x i64>
 
   res_vsll = vec_vand(vbll, vsll);
 // CHECK: and <2 x i64>
+// CHECK-LE: and <2 x i64>
 
   res_vsll = vec_vand(vsll, vbll);
 // CHECK: and <2 x i64>
+// CHECK-LE: and <2 x i64>
 
   res_vull = vec_vand(vull, vull);
 // CHECK: and <2 x i64>
+// CHECK-LE: and <2 x i64>
 
   res_vull = vec_vand(vbll, vull);
 // CHECK: and <2 x i64>
+// CHECK-LE: and <2 x i64>
 
   res_vull = vec_vand(vull, vbll);
 // CHECK: and <2 x i64>
+// CHECK-LE: and <2 x i64>
 
   res_vbll = vec_vand(vbll, vbll);
 // CHECK: and <2 x i64>
+// CHECK-LE: and <2 x i64>
 
   /* vec_andc */
   res_vsll = vec_andc(vsll, vsll);
 // CHECK: xor <2 x i64>
 // CHECK: and <2 x i64>
+// CHECK-LE: xor <2 x i64>
+// CHECK-LE: and <2 x i64>
 
   res_vsll = vec_andc(vbll, vsll);
 // CHECK: xor <2 x i64>
 // CHECK: and <2 x i64>
+// CHECK-LE: xor <2 x i64>
+// CHECK-LE: and <2 x i64>
 
   res_vsll = vec_andc(vsll, vbll);
 // CHECK: xor <2 x i64>
 // CHECK: and <2 x i64>
+// CHECK-LE: xor <2 x i64>
+// CHECK-LE: and <2 x i64>
 
   res_vull = vec_andc(vull, vull);
 // CHECK: xor <2 x i64>
 // CHECK: and <2 x i64>
+// CHECK-LE: xor <2 x i64>
+// CHECK-LE: and <2 x i64>
 
   res_vull = vec_andc(vbll, vull);
 // CHECK: xor <2 x i64>
 // CHECK: and <2 x i64>
+// CHECK-LE: xor <2 x i64>
+// CHECK-LE: and <2 x i64>
 
   res_vull = vec_andc(vull, vbll);
 // CHECK: xor <2 x i64>
 // CHECK: and <2 x i64>
+// CHECK-LE: xor <2 x i64>
+// CHECK-LE: and <2 x i64>
 
   res_vbll = vec_andc(vbll, vbll);
 // CHECK: xor <2 x i64>
 // CHECK: and <2 x i64>
+// CHECK-LE: xor <2 x i64>
+// CHECK-LE: and <2 x i64>
 
   res_vf = vec_floor(vf);
 // CHECK: call <4 x float> @llvm.floor.v4f32(<4 x float> %{{[0-9]+}})
+// CHECK-LE: call <4 x float> @llvm.floor.v4f32(<4 x float> %{{[0-9]+}})
 
   res_vd = vec_floor(vd);
 // CHECK: call <2 x double> @llvm.floor.v2f64(<2 x double> %{{[0-9]+}})
+// CHECK-LE: call <2 x double> @llvm.floor.v2f64(<2 x double> %{{[0-9]+}})
 
   res_vf = vec_madd(vf, vf, vf);
 // CHECK: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}})
+// CHECK-LE: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}})
 
   res_vd = vec_madd(vd, vd, vd);
 // CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}})
+// CHECK-LE: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}})
 
   res_vf = vec_msub(vf, vf, vf);
 // CHECK: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{[0-9]+}}
 // CHECK-NEXT: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float>
+// CHECK-LE: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{[0-9]+}}
+// CHECK-LE-NEXT: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float>
 
   res_vd = vec_msub(vd, vd, vd);
 // CHECK: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{[0-9]+}}
 // CHECK-NEXT: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double>
+// CHECK-LE: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{[0-9]+}}
+// CHECK-LE-NEXT: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double>
 
   res_vf = vec_mul(vf, vf);
 // CHECK: fmul <4 x float> %{{[0-9]+}}, %{{[0-9]+}}
+// CHECK-LE: fmul <4 x float> %{{[0-9]+}}, %{{[0-9]+}}
 
   res_vd = vec_mul(vd, vd);
 // CHECK: fmul <2 x double> %{{[0-9]+}}, %{{[0-9]+}}
+// CHECK-LE: fmul <2 x double> %{{[0-9]+}}, %{{[0-9]+}}
 
   res_vf = vec_nearbyint(vf);
 // CHECK: call <4 x float> @llvm.round.v4f32(<4 x float> %{{[0-9]+}})
+// CHECK-LE: call <4 x float> @llvm.round.v4f32(<4 x float> %{{[0-9]+}})
 
   res_vd = vec_nearbyint(vd);
 // CHECK: call <2 x double> @llvm.round.v2f64(<2 x double> %{{[0-9]+}})
+// CHECK-LE: call <2 x double> @llvm.round.v2f64(<2 x double> %{{[0-9]+}})
 
   res_vf = vec_nmadd(vf, vf, vf);
 // CHECK: [[FM:[0-9]+]] = call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}})
 // CHECK-NEXT: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %[[FM]]
+// CHECK-LE: [[FM:[0-9]+]] = call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}})
+// CHECK-LE-NEXT: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %[[FM]]
 
   res_vd = vec_nmadd(vd, vd, vd);
 // CHECK: [[FM:[0-9]+]] = call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}})
 // CHECK-NEXT: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %[[FM]]
+// CHECK-LE: [[FM:[0-9]+]] = call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}})
+// CHECK-LE-NEXT: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %[[FM]]
 
   res_vf = vec_nmsub(vf, vf, vf);
 // CHECK: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{[0-9]+}}
 // CHECK-NEXT: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float>
 // CHECK: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{[0-9]+}}
+// CHECK-LE: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{[0-9]+}}
+// CHECK-LE-NEXT: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float>
+// CHECK-LE: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{[0-9]+}}
 
   res_vd = vec_nmsub(vd, vd, vd);
 // CHECK: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{[0-9]+}}
 // CHECK-NEXT: [[FM:[0-9]+]] = call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double>
 // CHECK-NEXT: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %[[FM]]
+// CHECK-LE: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{[0-9]+}}
+// CHECK-LE-NEXT: [[FM:[0-9]+]] = call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double>
+// CHECK-LE-NEXT: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %[[FM]]
 
   /* vec_nor */
   res_vsll = vec_nor(vsll, vsll);
 // CHECK: or <2 x i64>
 // CHECK: xor <2 x i64>
+// CHECK-LE: or <2 x i64>
+// CHECK-LE: xor <2 x i64>
 
   res_vull = vec_nor(vull, vull);
 // CHECK: or <2 x i64>
 // CHECK: xor <2 x i64>
+// CHECK-LE: or <2 x i64>
+// CHECK-LE: xor <2 x i64>
 
   res_vull = vec_nor(vbll, vbll);
 // CHECK: or <2 x i64>
 // CHECK: xor <2 x i64>
+// CHECK-LE: or <2 x i64>
+// CHECK-LE: xor <2 x i64>
 
   res_vd = vec_nor(vd, vd);
 // CHECK: bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
 // CHECK: [[OR:%.+]] = or <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
 // CHECK-NEXT: xor <2 x i64> [[OR]], <i64 -1, i64 -1>
+// CHECK-LE: bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
+// CHECK-LE: [[OR:%.+]] = or <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
+// CHECK-LE-NEXT: xor <2 x i64> [[OR]], <i64 -1, i64 -1>
 
   /* vec_or */
   res_vsll = vec_or(vsll, vsll);
 // CHECK: or <2 x i64>
+// CHECK-LE: or <2 x i64>
 
   res_vsll = vec_or(vbll, vsll);
 // CHECK: or <2 x i64>
+// CHECK-LE: or <2 x i64>
 
   res_vsll = vec_or(vsll, vbll);
 // CHECK: or <2 x i64>
+// CHECK-LE: or <2 x i64>
 
   res_vull = vec_or(vull, vull);
 // CHECK: or <2 x i64>
+// CHECK-LE: or <2 x i64>
 
   res_vull = vec_or(vbll, vull);
 // CHECK: or <2 x i64>
+// CHECK-LE: or <2 x i64>
 
   res_vull = vec_or(vull, vbll);
 // CHECK: or <2 x i64>
+// CHECK-LE: or <2 x i64>
 
   res_vbll = vec_or(vbll, vbll);
 // CHECK: or <2 x i64>
+// CHECK-LE: or <2 x i64>
 
   res_vd = vec_or(vd, vd);
 // CHECK: bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
 // CHECK: or <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
+// CHECK-LE: bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
+// CHECK-LE: or <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
 
   res_vf = vec_rint(vf);
 // CHECK: call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %{{[0-9]+}})
+// CHECK-LE: call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %{{[0-9]+}})
 
   res_vd = vec_rint(vd);
 // CHECK: call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %{{[0-9]+}})
+// CHECK-LE: call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %{{[0-9]+}})
 
   res_vf = vec_rsqrte(vf);
 // CHECK: call <4 x float> @llvm.ppc.vsx.xvrsqrtesp(<4 x float> %{{[0-9]+}})
+// CHECK-LE: call <4 x float> @llvm.ppc.vsx.xvrsqrtesp(<4 x float> %{{[0-9]+}})
 
   res_vd = vec_rsqrte(vd);
 // CHECK: call <2 x double> @llvm.ppc.vsx.xvrsqrtedp(<2 x double> %{{[0-9]+}})
+// CHECK-LE: call <2 x double> @llvm.ppc.vsx.xvrsqrtedp(<2 x double> %{{[0-9]+}})
 
   dummy();
 // CHECK: call void @dummy()
+// CHECK-LE: call void @dummy()
 
   res_vf = vec_sel(vd, vd, vbll);
 // CHECK: xor <2 x i64> %{{[0-9]+}}, <i64 -1, i64 -1>
@@ -389,9 +574,15 @@ void test1() {
 // CHECK: and <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
 // CHECK: or <2 x i64>
 // CHECK: bitcast <2 x i64> %{{[0-9]+}} to <2 x double>
+// CHECK-LE: xor <2 x i64> %{{[0-9]+}}, <i64 -1, i64 -1>
+// CHECK-LE: and <2 x i64> %{{[0-9]+}},
+// CHECK-LE: and <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
+// CHECK-LE: or <2 x i64>
+// CHECK-LE: bitcast <2 x i64> %{{[0-9]+}} to <2 x double>
 
   dummy();
 // CHECK: call void @dummy()
+// CHECK-LE: call void @dummy()
 
   res_vd = vec_sel(vd, vd, vull);
 // CHECK: xor <2 x i64> %{{[0-9]+}}, <i64 -1, i64 -1>
@@ -399,107 +590,147 @@ void test1() {
 // CHECK: and <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
 // CHECK: or <2 x i64>
 // CHECK: bitcast <2 x i64> %{{[0-9]+}} to <2 x double>
+// CHECK-LE: xor <2 x i64> %{{[0-9]+}}, <i64 -1, i64 -1>
+// CHECK-LE: and <2 x i64> %{{[0-9]+}},
+// CHECK-LE: and <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
+// CHECK-LE: or <2 x i64>
+// CHECK-LE: bitcast <2 x i64> %{{[0-9]+}} to <2 x double>
 
   res_vf = vec_sqrt(vf);
 // CHECK: call <4 x float> @llvm.sqrt.v4f32(<4 x float> %{{[0-9]+}})
+// CHECK-LE: call <4 x float> @llvm.sqrt.v4f32(<4 x float> %{{[0-9]+}})
 
   res_vd = vec_sqrt(vd);
 // CHECK: call <2 x double> @llvm.sqrt.v2f64(<2 x double> %{{[0-9]+}})
+// CHECK-LE: call <2 x double> @llvm.sqrt.v2f64(<2 x double> %{{[0-9]+}})
 
   res_vd = vec_sub(vd, vd);
 // CHECK: fsub <2 x double> %{{[0-9]+}}, %{{[0-9]+}}
+// CHECK-LE: fsub <2 x double> %{{[0-9]+}}, %{{[0-9]+}}
 
   res_vf = vec_trunc(vf);
 // CHECK: call <4 x float> @llvm.trunc.v4f32(<4 x float> %{{[0-9]+}})
+// CHECK-LE: call <4 x float> @llvm.trunc.v4f32(<4 x float> %{{[0-9]+}})
 
   res_vd = vec_trunc(vd);
 // CHECK: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{[0-9]+}})
+// CHECK-LE: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{[0-9]+}})
 
   /* vec_vor */
   res_vsll = vec_vor(vsll, vsll);
 // CHECK: or <2 x i64>
+// CHECK-LE: or <2 x i64>
 
   res_vsll = vec_vor(vbll, vsll);
 // CHECK: or <2 x i64>
+// CHECK-LE: or <2 x i64>
 
   res_vsll = vec_vor(vsll, vbll);
 // CHECK: or <2 x i64>
+// CHECK-LE: or <2 x i64>
 
   res_vull = vec_vor(vull, vull);
 // CHECK: or <2 x i64>
+// CHECK-LE: or <2 x i64>
 
   res_vull = vec_vor(vbll, vull);
 // CHECK: or <2 x i64>
+// CHECK-LE: or <2 x i64>
 
   res_vull = vec_vor(vull, vbll);
 // CHECK: or <2 x i64>
+// CHECK-LE: or <2 x i64>
 
   res_vbll = vec_vor(vbll, vbll);
 // CHECK: or <2 x i64>
+// CHECK-LE: or <2 x i64>
 
   /* vec_xor */
   res_vsll = vec_xor(vsll, vsll);
 // CHECK: xor <2 x i64>
+// CHECK-LE: xor <2 x i64>
 
   res_vsll = vec_xor(vbll, vsll);
 // CHECK: xor <2 x i64>
+// CHECK-LE: xor <2 x i64>
 
   res_vsll = vec_xor(vsll, vbll);
 // CHECK: xor <2 x i64>
+// CHECK-LE: xor <2 x i64>
 
   res_vull = vec_xor(vull, vull);
 // CHECK: xor <2 x i64>
+// CHECK-LE: xor <2 x i64>
 
   res_vull = vec_xor(vbll, vull);
 // CHECK: xor <2 x i64>
+// CHECK-LE: xor <2 x i64>
 
   res_vull = vec_xor(vull, vbll);
 // CHECK: xor <2 x i64>
+// CHECK-LE: xor <2 x i64>
 
   res_vbll = vec_xor(vbll, vbll);
 // CHECK: xor <2 x i64>
+// CHECK-LE: xor <2 x i64>
 
   dummy();
 // CHECK: call void @dummy()
+// CHECK-LE: call void @dummy()
 
   res_vd = vec_xor(vd, vd);
 // CHECK: [[X1:%.+]] = xor <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
 // CHECK: bitcast <2 x i64> [[X1]] to <2 x double>
+// CHECK-LE: [[X1:%.+]] = xor <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
+// CHECK-LE: bitcast <2 x i64> [[X1]] to <2 x double>
 
   dummy();
 // CHECK: call void @dummy()
+// CHECK-LE: call void @dummy()
 
   res_vd = vec_xor(vd, vbll);
 // CHECK: [[X1:%.+]] = xor <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
 // CHECK: bitcast <2 x i64> [[X1]] to <2 x double>
+// CHECK-LE: [[X1:%.+]] = xor <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
+// CHECK-LE: bitcast <2 x i64> [[X1]] to <2 x double>
 
   dummy();
 // CHECK: call void @dummy()
+// CHECK-LE: call void @dummy()
 
   res_vd = vec_xor(vbll, vd);
 // CHECK: [[X1:%.+]] = xor <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
 // CHECK: bitcast <2 x i64> [[X1]] to <2 x double>
+// CHECK-LE: [[X1:%.+]] = xor <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
+// CHECK-LE: bitcast <2 x i64> [[X1]] to <2 x double>
 
   /* vec_vxor */
   res_vsll = vec_vxor(vsll, vsll);
 // CHECK: xor <2 x i64>
+// CHECK-LE: xor <2 x i64>
 
   res_vsll = vec_vxor(vbll, vsll);
 // CHECK: xor <2 x i64>
+// CHECK-LE: xor <2 x i64>
 
   res_vsll = vec_vxor(vsll, vbll);
 // CHECK: xor <2 x i64>
+// CHECK-LE: xor <2 x i64>
 
   res_vull = vec_vxor(vull, vull);
 // CHECK: xor <2 x i64>
+// CHECK-LE: xor <2 x i64>
 
   res_vull = vec_vxor(vbll, vull);
 // CHECK: xor <2 x i64>
+// CHECK-LE: xor <2 x i64>
 
   res_vull = vec_vxor(vull, vbll);
 // CHECK: xor <2 x i64>
+// CHECK-LE: xor <2 x i64>
 
   res_vbll = vec_vxor(vbll, vbll);
 // CHECK: xor <2 x i64>
+// CHECK-LE: xor <2 x i64>
 
 }

Added: cfe/trunk/test/Driver/ppc-dependent-options.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/Driver/ppc-dependent-options.cpp?rev=241904&view=auto
==============================================================================
--- cfe/trunk/test/Driver/ppc-dependent-options.cpp (added)
+++ cfe/trunk/test/Driver/ppc-dependent-options.cpp Fri Jul 10 08:11:34 2015
@@ -0,0 +1,61 @@
+// REQUIRES: powerpc-registered-target
+// RUN: not %clang -fsyntax-only -mcpu=power8 -std=c++11 %s 2>&1 | \
+// RUN: FileCheck %s -check-prefix=CHECK-DEFAULT
+
+// RUN: not %clang -fsyntax-only -mcpu=power8 -std=c++11 \
+// RUN: -mno-vsx -mpower8-vector %s 2>&1 | FileCheck %s \
+// RUN: -check-prefix=CHECK-NVSX-P8V
+
+// RUN: not %clang -fsyntax-only -mcpu=power8 -std=c++11 \
+// RUN: -mno-vsx -mdirect-move %s 2>&1 | FileCheck %s \
+// RUN: -check-prefix=CHECK-NVSX-DMV
+
+// RUN: not %clang -fsyntax-only -mcpu=power8 -std=c++11 \
+// RUN: -mno-vsx -mpower8-vector -mvsx %s 2>&1 | FileCheck %s \
+// RUN: -check-prefix=CHECK-DEFAULT
+
+// RUN: not %clang -fsyntax-only -mcpu=power8 -std=c++11 \
+// RUN: -mno-vsx -mdirect-move -mvsx %s 2>&1 | FileCheck %s \
+// RUN: -check-prefix=CHECK-DEFAULT
+
+// RUN: not %clang -fsyntax-only -mcpu=power8 -std=c++11 \
+// RUN: -mpower8-vector -mno-vsx %s 2>&1 | FileCheck %s \
+// RUN: -check-prefix=CHECK-NVSX-P8V
+
+// RUN: not %clang -fsyntax-only -mcpu=power8 -std=c++11 \
+// RUN: -mdirect-move -mno-vsx %s 2>&1 | FileCheck %s \
+// RUN: -check-prefix=CHECK-NVSX-DMV
+
+// RUN: not %clang -fsyntax-only -mcpu=power8 -std=c++11 \
+// RUN: -mno-vsx %s 2>&1 | FileCheck %s \
+// RUN: -check-prefix=CHECK-NVSX
+
+// RUN: not %clang -fsyntax-only -mcpu=power6 -std=c++11 %s 2>&1 | \
+// RUN: FileCheck %s -check-prefix=CHECK-NVSX
+
+// RUN: not %clang -fsyntax-only -mcpu=power6 -std=c++11 \
+// RUN: -mpower8-vector %s 2>&1 | FileCheck %s \
+// RUN: -check-prefix=CHECK-DEFAULT
+
+// RUN: not %clang -fsyntax-only -mcpu=power6 -std=c++11 \
+// RUN: -mdirect-move %s 2>&1 | FileCheck %s \
+// RUN: -check-prefix=CHECK-VSX
+
+#ifdef __VSX__
+static_assert(false, "VSX enabled");
+#endif
+
+#ifdef __POWER8_VECTOR__
+static_assert(false, "P8V enabled");
+#endif
+
+#if !defined(__VSX__) && !defined(__POWER8_VECTOR__)
+static_assert(false, "Neither enabled");
+#endif
+
+// CHECK-DEFAULT: VSX enabled
+// CHECK-DEFAULT: P8V enabled
+// CHECK-NVSX-P8V: error: option '-mpower8-vector' cannot be specified with '-mno-vsx'
+// CHECK-NVSX-DMV: error: option '-mdirect-move' cannot be specified with '-mno-vsx'
+// CHECK-NVSX: Neither enabled
+// CHECK-VSX: VSX enabled





More information about the cfe-commits mailing list