[llvm] 820f508 - [PowerPC] Removing _massv place holder

Masoud Ataei via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 8 13:46:29 PST 2021


Author: Masoud Ataei
Date: 2021-03-08T21:43:24Z
New Revision: 820f508b08d7c94b2dd7847e9710d2bc36d3dd45

URL: https://github.com/llvm/llvm-project/commit/820f508b08d7c94b2dd7847e9710d2bc36d3dd45
DIFF: https://github.com/llvm/llvm-project/commit/820f508b08d7c94b2dd7847e9710d2bc36d3dd45.diff

LOG: [PowerPC] Removing _massv place holder

Since P8 is the oldest machine supported by MASSV pass,
_massv place holder is removed and the oldest version of
MASSV functions is assumed. If the P9 vector specific is
detected in the compilation process, the P8 prefix will
be updated to P9.

Differential Revision: https://reviews.llvm.org/D98064

Added: 
    

Modified: 
    llvm/include/llvm/Analysis/VecFuncs.def
    llvm/lib/Target/PowerPC/PPCLowerMASSVEntries.cpp
    llvm/test/CodeGen/Generic/replace-intrinsics-with-veclib.ll
    llvm/test/CodeGen/PowerPC/lower-massv-attr.ll
    llvm/test/CodeGen/PowerPC/lower-massv.ll
    llvm/test/CodeGen/PowerPC/pow_massv_075_025exp.ll
    llvm/test/CodeGen/PowerPC/powf_massv_075_025exp.ll
    llvm/test/Transforms/LoopVectorize/PowerPC/massv-altivec.ll
    llvm/test/Transforms/LoopVectorize/PowerPC/massv-calls.ll
    llvm/test/Transforms/LoopVectorize/PowerPC/massv-nobuiltin.ll
    llvm/test/Transforms/LoopVectorize/PowerPC/massv-unsupported.ll
    llvm/test/Transforms/LoopVectorize/PowerPC/widened-massv-call.ll
    llvm/test/Transforms/LoopVectorize/PowerPC/widened-massv-vfabi-attr.ll
    llvm/test/Transforms/Util/add-TLI-mappings.ll

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/Analysis/VecFuncs.def b/llvm/include/llvm/Analysis/VecFuncs.def
index 1593d03ffdb7..3391afd7d3ba 100644
--- a/llvm/include/llvm/Analysis/VecFuncs.def
+++ b/llvm/include/llvm/Analysis/VecFuncs.def
@@ -150,72 +150,72 @@ TLI_DEFINE_VECFUNC("llvm.log.f32", "_ZGVdN8v_logf", FIXED(8))
 // IBM MASS library's vector Functions
 
 // Floating-Point Arithmetic and Auxiliary Functions
-TLI_DEFINE_VECFUNC("cbrt", "__cbrtd2_massv", FIXED(2))
-TLI_DEFINE_VECFUNC("cbrtf", "__cbrtf4_massv", FIXED(4))
-TLI_DEFINE_VECFUNC("pow", "__powd2_massv", FIXED(2))
-TLI_DEFINE_VECFUNC("llvm.pow.f64", "__powd2_massv", FIXED(2))
-TLI_DEFINE_VECFUNC("powf", "__powf4_massv", FIXED(4))
-TLI_DEFINE_VECFUNC("llvm.pow.f32", "__powf4_massv", FIXED(4))
+TLI_DEFINE_VECFUNC("cbrt", "__cbrtd2_P8", FIXED(2))
+TLI_DEFINE_VECFUNC("cbrtf", "__cbrtf4_P8", FIXED(4))
+TLI_DEFINE_VECFUNC("pow", "__powd2_P8", FIXED(2))
+TLI_DEFINE_VECFUNC("llvm.pow.f64", "__powd2_P8", FIXED(2))
+TLI_DEFINE_VECFUNC("powf", "__powf4_P8", FIXED(4))
+TLI_DEFINE_VECFUNC("llvm.pow.f32", "__powf4_P8", FIXED(4))
 
 // Exponential and Logarithmic Functions
-TLI_DEFINE_VECFUNC("exp", "__expd2_massv", FIXED(2))
-TLI_DEFINE_VECFUNC("llvm.exp.f64", "__expd2_massv", FIXED(2))
-TLI_DEFINE_VECFUNC("expf", "__expf4_massv", FIXED(4))
-TLI_DEFINE_VECFUNC("llvm.exp.f32", "__expf4_massv", FIXED(4))
-TLI_DEFINE_VECFUNC("exp2", "__exp2d2_massv", FIXED(2))
-TLI_DEFINE_VECFUNC("llvm.exp2.f64", "__exp2d2_massv", FIXED(2))
-TLI_DEFINE_VECFUNC("exp2f", "__exp2f4_massv", FIXED(4))
-TLI_DEFINE_VECFUNC("llvm.exp2.f32", "__exp2f4_massv", FIXED(4))
-TLI_DEFINE_VECFUNC("expm1", "__expm1d2_massv", FIXED(2))
-TLI_DEFINE_VECFUNC("expm1f", "__expm1f4_massv", FIXED(4))
-TLI_DEFINE_VECFUNC("log", "__logd2_massv", FIXED(2))
-TLI_DEFINE_VECFUNC("llvm.log.f64", "__logd2_massv", FIXED(2))
-TLI_DEFINE_VECFUNC("logf", "__logf4_massv", FIXED(4))
-TLI_DEFINE_VECFUNC("llvm.log.f32", "__logf4_massv", FIXED(4))
-TLI_DEFINE_VECFUNC("log1p", "__log1pd2_massv", FIXED(2))
-TLI_DEFINE_VECFUNC("log1pf", "__log1pf4_massv", FIXED(4))
-TLI_DEFINE_VECFUNC("log10", "__log10d2_massv", FIXED(2))
-TLI_DEFINE_VECFUNC("llvm.log10.f64", "__log10d2_massv", FIXED(2))
-TLI_DEFINE_VECFUNC("log10f", "__log10f4_massv", FIXED(4))
-TLI_DEFINE_VECFUNC("llvm.log10.f32", "__log10f4_massv", FIXED(4))
-TLI_DEFINE_VECFUNC("log2", "__log2d2_massv", FIXED(2))
-TLI_DEFINE_VECFUNC("llvm.log2.f64", "__log2d2_massv", FIXED(2))
-TLI_DEFINE_VECFUNC("log2f", "__log2f4_massv", FIXED(4))
-TLI_DEFINE_VECFUNC("llvm.log2.f32", "__log2f4_massv", FIXED(4))
+TLI_DEFINE_VECFUNC("exp", "__expd2_P8", FIXED(2))
+TLI_DEFINE_VECFUNC("llvm.exp.f64", "__expd2_P8", FIXED(2))
+TLI_DEFINE_VECFUNC("expf", "__expf4_P8", FIXED(4))
+TLI_DEFINE_VECFUNC("llvm.exp.f32", "__expf4_P8", FIXED(4))
+TLI_DEFINE_VECFUNC("exp2", "__exp2d2_P8", FIXED(2))
+TLI_DEFINE_VECFUNC("llvm.exp2.f64", "__exp2d2_P8", FIXED(2))
+TLI_DEFINE_VECFUNC("exp2f", "__exp2f4_P8", FIXED(4))
+TLI_DEFINE_VECFUNC("llvm.exp2.f32", "__exp2f4_P8", FIXED(4))
+TLI_DEFINE_VECFUNC("expm1", "__expm1d2_P8", FIXED(2))
+TLI_DEFINE_VECFUNC("expm1f", "__expm1f4_P8", FIXED(4))
+TLI_DEFINE_VECFUNC("log", "__logd2_P8", FIXED(2))
+TLI_DEFINE_VECFUNC("llvm.log.f64", "__logd2_P8", FIXED(2))
+TLI_DEFINE_VECFUNC("logf", "__logf4_P8", FIXED(4))
+TLI_DEFINE_VECFUNC("llvm.log.f32", "__logf4_P8", FIXED(4))
+TLI_DEFINE_VECFUNC("log1p", "__log1pd2_P8", FIXED(2))
+TLI_DEFINE_VECFUNC("log1pf", "__log1pf4_P8", FIXED(4))
+TLI_DEFINE_VECFUNC("log10", "__log10d2_P8", FIXED(2))
+TLI_DEFINE_VECFUNC("llvm.log10.f64", "__log10d2_P8", FIXED(2))
+TLI_DEFINE_VECFUNC("log10f", "__log10f4_P8", FIXED(4))
+TLI_DEFINE_VECFUNC("llvm.log10.f32", "__log10f4_P8", FIXED(4))
+TLI_DEFINE_VECFUNC("log2", "__log2d2_P8", FIXED(2))
+TLI_DEFINE_VECFUNC("llvm.log2.f64", "__log2d2_P8", FIXED(2))
+TLI_DEFINE_VECFUNC("log2f", "__log2f4_P8", FIXED(4))
+TLI_DEFINE_VECFUNC("llvm.log2.f32", "__log2f4_P8", FIXED(4))
 
 // Trigonometric Functions
-TLI_DEFINE_VECFUNC("sin", "__sind2_massv", FIXED(2))
-TLI_DEFINE_VECFUNC("llvm.sin.f64", "__sind2_massv", FIXED(2))
-TLI_DEFINE_VECFUNC("sinf", "__sinf4_massv", FIXED(4))
-TLI_DEFINE_VECFUNC("llvm.sin.f32", "__sinf4_massv", FIXED(4))
-TLI_DEFINE_VECFUNC("cos", "__cosd2_massv", FIXED(2))
-TLI_DEFINE_VECFUNC("llvm.cos.f64", "__cosd2_massv", FIXED(2))
-TLI_DEFINE_VECFUNC("cosf", "__cosf4_massv", FIXED(4))
-TLI_DEFINE_VECFUNC("llvm.cos.f32", "__cosf4_massv", FIXED(4))
-TLI_DEFINE_VECFUNC("tan", "__tand2_massv", FIXED(2))
-TLI_DEFINE_VECFUNC("tanf", "__tanf4_massv", FIXED(4))
-TLI_DEFINE_VECFUNC("asin", "__asind2_massv", FIXED(2))
-TLI_DEFINE_VECFUNC("asinf", "__asinf4_massv", FIXED(4))
-TLI_DEFINE_VECFUNC("acos", "__acosd2_massv", FIXED(2))
-TLI_DEFINE_VECFUNC("acosf", "__acosf4_massv", FIXED(4))
-TLI_DEFINE_VECFUNC("atan", "__atand2_massv", FIXED(2))
-TLI_DEFINE_VECFUNC("atanf", "__atanf4_massv", FIXED(4))
-TLI_DEFINE_VECFUNC("atan2", "__atan2d2_massv", FIXED(2))
-TLI_DEFINE_VECFUNC("atan2f", "__atan2f4_massv", FIXED(4))
+TLI_DEFINE_VECFUNC("sin", "__sind2_P8", FIXED(2))
+TLI_DEFINE_VECFUNC("llvm.sin.f64", "__sind2_P8", FIXED(2))
+TLI_DEFINE_VECFUNC("sinf", "__sinf4_P8", FIXED(4))
+TLI_DEFINE_VECFUNC("llvm.sin.f32", "__sinf4_P8", FIXED(4))
+TLI_DEFINE_VECFUNC("cos", "__cosd2_P8", FIXED(2))
+TLI_DEFINE_VECFUNC("llvm.cos.f64", "__cosd2_P8", FIXED(2))
+TLI_DEFINE_VECFUNC("cosf", "__cosf4_P8", FIXED(4))
+TLI_DEFINE_VECFUNC("llvm.cos.f32", "__cosf4_P8", FIXED(4))
+TLI_DEFINE_VECFUNC("tan", "__tand2_P8", FIXED(2))
+TLI_DEFINE_VECFUNC("tanf", "__tanf4_P8", FIXED(4))
+TLI_DEFINE_VECFUNC("asin", "__asind2_P8", FIXED(2))
+TLI_DEFINE_VECFUNC("asinf", "__asinf4_P8", FIXED(4))
+TLI_DEFINE_VECFUNC("acos", "__acosd2_P8", FIXED(2))
+TLI_DEFINE_VECFUNC("acosf", "__acosf4_P8", FIXED(4))
+TLI_DEFINE_VECFUNC("atan", "__atand2_P8", FIXED(2))
+TLI_DEFINE_VECFUNC("atanf", "__atanf4_P8", FIXED(4))
+TLI_DEFINE_VECFUNC("atan2", "__atan2d2_P8", FIXED(2))
+TLI_DEFINE_VECFUNC("atan2f", "__atan2f4_P8", FIXED(4))
 
 // Hyperbolic Functions
-TLI_DEFINE_VECFUNC("sinh", "__sinhd2_massv", FIXED(2))
-TLI_DEFINE_VECFUNC("sinhf", "__sinhf4_massv", FIXED(4))
-TLI_DEFINE_VECFUNC("cosh", "__coshd2_massv", FIXED(2))
-TLI_DEFINE_VECFUNC("coshf", "__coshf4_massv", FIXED(4))
-TLI_DEFINE_VECFUNC("tanh", "__tanhd2_massv", FIXED(2))
-TLI_DEFINE_VECFUNC("tanhf", "__tanhf4_massv", FIXED(4))
-TLI_DEFINE_VECFUNC("asinh", "__asinhd2_massv", FIXED(2))
-TLI_DEFINE_VECFUNC("asinhf", "__asinhf4_massv", FIXED(4))
-TLI_DEFINE_VECFUNC("acosh", "__acoshd2_massv", FIXED(2))
-TLI_DEFINE_VECFUNC("acoshf", "__acoshf4_massv", FIXED(4))
-TLI_DEFINE_VECFUNC("atanh", "__atanhd2_massv", FIXED(2))
-TLI_DEFINE_VECFUNC("atanhf", "__atanhf4_massv", FIXED(4))
+TLI_DEFINE_VECFUNC("sinh", "__sinhd2_P8", FIXED(2))
+TLI_DEFINE_VECFUNC("sinhf", "__sinhf4_P8", FIXED(4))
+TLI_DEFINE_VECFUNC("cosh", "__coshd2_P8", FIXED(2))
+TLI_DEFINE_VECFUNC("coshf", "__coshf4_P8", FIXED(4))
+TLI_DEFINE_VECFUNC("tanh", "__tanhd2_P8", FIXED(2))
+TLI_DEFINE_VECFUNC("tanhf", "__tanhf4_P8", FIXED(4))
+TLI_DEFINE_VECFUNC("asinh", "__asinhd2_P8", FIXED(2))
+TLI_DEFINE_VECFUNC("asinhf", "__asinhf4_P8", FIXED(4))
+TLI_DEFINE_VECFUNC("acosh", "__acoshd2_P8", FIXED(2))
+TLI_DEFINE_VECFUNC("acoshf", "__acoshf4_P8", FIXED(4))
+TLI_DEFINE_VECFUNC("atanh", "__atanhd2_P8", FIXED(2))
+TLI_DEFINE_VECFUNC("atanhf", "__atanhf4_P8", FIXED(4))
 
 
 #elif defined(TLI_DEFINE_SVML_VECFUNCS)

diff  --git a/llvm/lib/Target/PowerPC/PPCLowerMASSVEntries.cpp b/llvm/lib/Target/PowerPC/PPCLowerMASSVEntries.cpp
index e71aad998b42..d16bdeaf365d 100644
--- a/llvm/lib/Target/PowerPC/PPCLowerMASSVEntries.cpp
+++ b/llvm/lib/Target/PowerPC/PPCLowerMASSVEntries.cpp
@@ -29,7 +29,7 @@ using namespace llvm;
 namespace {
 
 // Length of the suffix "massv", which is specific to IBM MASSV library entries.
-const unsigned MASSVSuffixLength = 5;
+const unsigned MASSVSuffixLength = 2;
 
 static StringRef MASSVFuncs[] = {
 #define TLI_DEFINE_MASSV_VECFUNCS_NAMES
@@ -101,7 +101,7 @@ PPCLowerMASSVEntries::createMASSVFuncName(Function &Func,
 /// intrinsics when the exponent is 0.25 or 0.75.
 bool PPCLowerMASSVEntries::handlePowSpecialCases(CallInst *CI, Function &Func,
                                                  Module &M) {
-  if (Func.getName() != "__powf4_massv" && Func.getName() != "__powd2_massv")
+  if (Func.getName() != "__powf4_P8" && Func.getName() != "__powd2_P8")
     return false;
 
   if (Constant *Exp = dyn_cast<Constant>(CI->getArgOperand(1)))

diff  --git a/llvm/test/CodeGen/Generic/replace-intrinsics-with-veclib.ll b/llvm/test/CodeGen/Generic/replace-intrinsics-with-veclib.ll
index d0b31f322f44..f6e64a40f4ec 100644
--- a/llvm/test/CodeGen/Generic/replace-intrinsics-with-veclib.ll
+++ b/llvm/test/CodeGen/Generic/replace-intrinsics-with-veclib.ll
@@ -47,7 +47,7 @@ define <4 x float> @exp_f32(<4 x float> %in) {
 ;
 ; MASSV-LABEL: define {{[^@]+}}@exp_f32
 ; MASSV-SAME: (<4 x float> [[IN:%.*]]) {
-; MASSV-NEXT:    [[TMP1:%.*]] = call <4 x float> @__expf4_massv(<4 x float> [[IN]])
+; MASSV-NEXT:    [[TMP1:%.*]] = call <4 x float> @__expf4_P8(<4 x float> [[IN]])
 ; MASSV-NEXT:    ret <4 x float> [[TMP1]]
 ;
 ; ACCELERATE-LABEL: define {{[^@]+}}@exp_f32

diff  --git a/llvm/test/CodeGen/PowerPC/lower-massv-attr.ll b/llvm/test/CodeGen/PowerPC/lower-massv-attr.ll
index 65e178431199..bcaf2913a845 100644
--- a/llvm/test/CodeGen/PowerPC/lower-massv-attr.ll
+++ b/llvm/test/CodeGen/PowerPC/lower-massv-attr.ll
@@ -1,8 +1,8 @@
 ; RUN: llc -verify-machineinstrs -mcpu=pwr9  < %s -mtriple=powerpc64le-unknown-linux-gnu -mattr=-power9-vector | FileCheck -check-prefixes=CHECK-PWR8,CHECK-ALL %s 
 ; RUN: llc -verify-machineinstrs -mcpu=pwr8  < %s -mtriple=powerpc64le-unknown-linux-gnu -mattr=+power9-vector | FileCheck -check-prefixes=CHECK-PWR9,CHECK-ALL %s 
 
-declare <2 x double> @__cbrtd2_massv(<2 x double>)
-declare <4 x float> @__cbrtf4_massv(<4 x float>)
+declare <2 x double> @__cbrtd2_P8(<2 x double>)
+declare <4 x float> @__cbrtf4_P8(<4 x float>)
 
 ; cbrt without the power9-vector attribute on the caller
 ; check massv calls are correctly targeted for Power8
@@ -12,7 +12,7 @@ define <2 x double>  @cbrt_f64_massv_nopwr9(<2 x double> %opnd) #0 {
 ; CHECK-NOT: bl __cbrtd2_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <2 x double> @__cbrtd2_massv(<2 x double> %opnd)
+  %1 = call <2 x double> @__cbrtd2_P8(<2 x double> %opnd)
   ret <2 x double> %1 
 }
 
@@ -22,8 +22,9 @@ define <2 x double>  @cbrt_f64_massv_pwr9(<2 x double> %opnd) #1 {
 ; CHECK-ALL-LABEL: @cbrt_f64_massv_pwr9
 ; CHECK-PWR9: bl __cbrtd2_P9
 ; CHECK-NOT: bl __cbrtd2_massv
+; CHECK-NOT: bl __cbrtd2_P8
 ; CHECK-ALL: blr
 ;
-  %1 = call <2 x double> @__cbrtd2_massv(<2 x double> %opnd)
+  %1 = call <2 x double> @__cbrtd2_P8(<2 x double> %opnd)
   ret <2 x double> %1 
 }

diff  --git a/llvm/test/CodeGen/PowerPC/lower-massv.ll b/llvm/test/CodeGen/PowerPC/lower-massv.ll
index 301aa7b94e66..3ecb3be41ad1 100644
--- a/llvm/test/CodeGen/PowerPC/lower-massv.ll
+++ b/llvm/test/CodeGen/PowerPC/lower-massv.ll
@@ -2,71 +2,71 @@
 ; RUN: llc -verify-machineinstrs -mcpu=pwr8  < %s -mtriple=powerpc64le-unknown-linux-gnu | FileCheck -check-prefixes=CHECK-PWR8,CHECK-ALL %s 
 ; RUN: llc -verify-machineinstrs -mcpu=pwr8  < %s -mtriple=powerpc64le-unknown-linux-gnu | FileCheck --check-prefix=CHECK-ALL %s 
 
-declare <2 x double> @__cbrtd2_massv(<2 x double>)
-declare <4 x float> @__cbrtf4_massv(<4 x float>)
+declare <2 x double> @__cbrtd2_P8(<2 x double>)
+declare <4 x float> @__cbrtf4_P8(<4 x float>)
 
-declare <2 x double> @__powd2_massv(<2 x double>, <2 x double>)
-declare <4 x float> @__powf4_massv(<4 x float>, <4 x float>)
+declare <2 x double> @__powd2_P8(<2 x double>, <2 x double>)
+declare <4 x float> @__powf4_P8(<4 x float>, <4 x float>)
 
-declare <2 x double> @__expd2_massv(<2 x double>)
-declare <4 x float> @__expf4_massv(<4 x float>)
+declare <2 x double> @__expd2_P8(<2 x double>)
+declare <4 x float> @__expf4_P8(<4 x float>)
 
-declare <2 x double> @__exp2d2_massv(<2 x double>)
-declare <4 x float> @__exp2f4_massv(<4 x float>)
+declare <2 x double> @__exp2d2_P8(<2 x double>)
+declare <4 x float> @__exp2f4_P8(<4 x float>)
 
-declare <2 x double> @__expm1d2_massv(<2 x double>)
-declare <4 x float> @__expm1f4_massv(<4 x float>)
+declare <2 x double> @__expm1d2_P8(<2 x double>)
+declare <4 x float> @__expm1f4_P8(<4 x float>)
 
-declare <2 x double> @__logd2_massv(<2 x double>)
-declare <4 x float> @__logf4_massv(<4 x float>)
+declare <2 x double> @__logd2_P8(<2 x double>)
+declare <4 x float> @__logf4_P8(<4 x float>)
 
-declare <2 x double> @__log1pd2_massv(<2 x double>)
-declare <4 x float> @__log1pf4_massv(<4 x float>)
+declare <2 x double> @__log1pd2_P8(<2 x double>)
+declare <4 x float> @__log1pf4_P8(<4 x float>)
 
-declare <2 x double> @__log10d2_massv(<2 x double>)
-declare <4 x float> @__log10f4_massv(<4 x float>)
+declare <2 x double> @__log10d2_P8(<2 x double>)
+declare <4 x float> @__log10f4_P8(<4 x float>)
 
-declare <2 x double> @__log2d2_massv(<2 x double>)
-declare <4 x float> @__log2f4_massv(<4 x float>)
+declare <2 x double> @__log2d2_P8(<2 x double>)
+declare <4 x float> @__log2f4_P8(<4 x float>)
 
-declare <2 x double> @__sind2_massv(<2 x double>)
-declare <4 x float> @__sinf4_massv(<4 x float>)
+declare <2 x double> @__sind2_P8(<2 x double>)
+declare <4 x float> @__sinf4_P8(<4 x float>)
 
-declare <2 x double> @__cosd2_massv(<2 x double>)
-declare <4 x float> @__cosf4_massv(<4 x float>)
+declare <2 x double> @__cosd2_P8(<2 x double>)
+declare <4 x float> @__cosf4_P8(<4 x float>)
 
-declare <2 x double> @__tand2_massv(<2 x double>)
-declare <4 x float> @__tanf4_massv(<4 x float>)
+declare <2 x double> @__tand2_P8(<2 x double>)
+declare <4 x float> @__tanf4_P8(<4 x float>)
 
-declare <2 x double> @__asind2_massv(<2 x double>)
-declare <4 x float> @__asinf4_massv(<4 x float>)
+declare <2 x double> @__asind2_P8(<2 x double>)
+declare <4 x float> @__asinf4_P8(<4 x float>)
 
-declare <2 x double> @__acosd2_massv(<2 x double>)
-declare <4 x float> @__acosf4_massv(<4 x float>)
+declare <2 x double> @__acosd2_P8(<2 x double>)
+declare <4 x float> @__acosf4_P8(<4 x float>)
 
-declare <2 x double> @__atand2_massv(<2 x double>)
-declare <4 x float> @__atanf4_massv(<4 x float>)
+declare <2 x double> @__atand2_P8(<2 x double>)
+declare <4 x float> @__atanf4_P8(<4 x float>)
 
-declare <2 x double> @__atan2d2_massv(<2 x double>)
-declare <4 x float> @__atan2f4_massv(<4 x float>)
+declare <2 x double> @__atan2d2_P8(<2 x double>)
+declare <4 x float> @__atan2f4_P8(<4 x float>)
 
-declare <2 x double> @__sinhd2_massv(<2 x double>)
-declare <4 x float> @__sinhf4_massv(<4 x float>)
+declare <2 x double> @__sinhd2_P8(<2 x double>)
+declare <4 x float> @__sinhf4_P8(<4 x float>)
 
-declare <2 x double> @__coshd2_massv(<2 x double>)
-declare <4 x float> @__coshf4_massv(<4 x float>)
+declare <2 x double> @__coshd2_P8(<2 x double>)
+declare <4 x float> @__coshf4_P8(<4 x float>)
 
-declare <2 x double> @__tanhd2_massv(<2 x double>)
-declare <4 x float> @__tanhf4_massv(<4 x float>)
+declare <2 x double> @__tanhd2_P8(<2 x double>)
+declare <4 x float> @__tanhf4_P8(<4 x float>)
 
-declare <2 x double> @__asinhd2_massv(<2 x double>)
-declare <4 x float> @__asinhf4_massv(<4 x float>)
+declare <2 x double> @__asinhd2_P8(<2 x double>)
+declare <4 x float> @__asinhf4_P8(<4 x float>)
 
-declare <2 x double> @__acoshd2_massv(<2 x double>)
-declare <4 x float> @__acoshf4_massv(<4 x float>)
+declare <2 x double> @__acoshd2_P8(<2 x double>)
+declare <4 x float> @__acoshf4_P8(<4 x float>)
 
-declare <2 x double> @__atanhd2_massv(<2 x double>)
-declare <4 x float> @__atanhf4_massv(<4 x float>)
+declare <2 x double> @__atanhd2_P8(<2 x double>)
+declare <4 x float> @__atanhf4_P8(<4 x float>)
 
 ; following tests check generation of subtarget-specific calls
 ; cbrt
@@ -77,7 +77,7 @@ define <2 x double>  @cbrt_f64_massv(<2 x double> %opnd) {
 ; CHECK-NOT: bl __cbrtd2_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <2 x double> @__cbrtd2_massv(<2 x double> %opnd)
+  %1 = call <2 x double> @__cbrtd2_P8(<2 x double> %opnd)
   ret <2 x double> %1 
 }
 
@@ -88,7 +88,7 @@ define <4 x float>  @cbrt_f32_massv(<4 x float> %opnd) {
 ; CHECK-NOT: bl __cbrtf4_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <4 x float> @__cbrtf4_massv(<4 x float> %opnd)
+  %1 = call <4 x float> @__cbrtf4_P8(<4 x float> %opnd)
   ret <4 x float> %1 
 }
 
@@ -100,7 +100,7 @@ define <2 x double>  @pow_f64_massv(<2 x double> %opnd1, <2 x double> %opnd2) {
 ; CHECK-NOT: bl __powd2_massv
 ; CHECK-ALL: blr
 ;
- %1 = call <2 x double> @__powd2_massv(<2 x double> %opnd1, <2 x double> %opnd2)
+ %1 = call <2 x double> @__powd2_P8(<2 x double> %opnd1, <2 x double> %opnd2)
   ret <2 x double> %1 
 }
 
@@ -111,7 +111,7 @@ define <4 x float>  @pow_f32_massv(<4 x float> %opnd1, <4 x float> %opnd2) {
 ; CHECK-NOT: bl __powf4_massv
 ; CHECK-ALL: blr
 ;
- %1 = call <4 x float> @__powf4_massv(<4 x float> %opnd1, <4 x float> %opnd2)
+ %1 = call <4 x float> @__powf4_P8(<4 x float> %opnd1, <4 x float> %opnd2)
   ret <4 x float> %1 
 }
 
@@ -123,7 +123,7 @@ define <2 x double>  @exp_f64_massv(<2 x double> %opnd) {
 ; CHECK-NOT: bl __expd2_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <2 x double> @__expd2_massv(<2 x double> %opnd)
+  %1 = call <2 x double> @__expd2_P8(<2 x double> %opnd)
   ret <2 x double> %1 
 }
 
@@ -134,7 +134,7 @@ define <4 x float>  @exp_f32_massv(<4 x float> %opnd) {
 ; CHECK-NOT: bl __expf4_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <4 x float> @__expf4_massv(<4 x float> %opnd)
+  %1 = call <4 x float> @__expf4_P8(<4 x float> %opnd)
   ret <4 x float> %1 
 }
 
@@ -146,7 +146,7 @@ define <2 x double>  @exp2_f64_massv(<2 x double> %opnd) {
 ; CHECK-NOT: bl __exp2d2_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <2 x double> @__exp2d2_massv(<2 x double> %opnd)
+  %1 = call <2 x double> @__exp2d2_P8(<2 x double> %opnd)
   ret <2 x double> %1 
 }
 
@@ -157,7 +157,7 @@ define <4 x float>  @exp2_f32_massv(<4 x float> %opnd) {
 ; CHECK-NOT: bl __exp2f4_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <4 x float> @__exp2f4_massv(<4 x float> %opnd)
+  %1 = call <4 x float> @__exp2f4_P8(<4 x float> %opnd)
   ret <4 x float> %1 
 }
 
@@ -169,7 +169,7 @@ define <2 x double>  @expm1_f64_massv(<2 x double> %opnd) {
 ; CHECK-NOT: bl __expm1d2_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <2 x double> @__expm1d2_massv(<2 x double> %opnd)
+  %1 = call <2 x double> @__expm1d2_P8(<2 x double> %opnd)
   ret <2 x double> %1 
 }
 
@@ -180,7 +180,7 @@ define <4 x float>  @expm1_f32_massv(<4 x float> %opnd) {
 ; CHECK-NOT: bl __expm1f4_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <4 x float> @__expm1f4_massv(<4 x float> %opnd)
+  %1 = call <4 x float> @__expm1f4_P8(<4 x float> %opnd)
   ret <4 x float> %1 
 }
 
@@ -192,7 +192,7 @@ define <2 x double>  @log_f64_massv(<2 x double> %opnd) {
 ; CHECK-NOT: bl __logd2_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <2 x double> @__logd2_massv(<2 x double> %opnd)
+  %1 = call <2 x double> @__logd2_P8(<2 x double> %opnd)
   ret <2 x double> %1 
 }
 
@@ -203,7 +203,7 @@ define <4 x float>  @log_f32_massv(<4 x float> %opnd) {
 ; CHECK-NOT: bl __logf4_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <4 x float> @__logf4_massv(<4 x float> %opnd)
+  %1 = call <4 x float> @__logf4_P8(<4 x float> %opnd)
   ret <4 x float> %1 
 }
 
@@ -215,7 +215,7 @@ define <2 x double>  @log1p_f64_massv(<2 x double> %opnd) {
 ; CHECK-NOT: bl __log1pd2_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <2 x double> @__log1pd2_massv(<2 x double> %opnd)
+  %1 = call <2 x double> @__log1pd2_P8(<2 x double> %opnd)
   ret <2 x double> %1 
 }
 
@@ -226,7 +226,7 @@ define <4 x float>  @log1p_f32_massv(<4 x float> %opnd) {
 ; CHECK-NOT: bl __log1pf4_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <4 x float> @__log1pf4_massv(<4 x float> %opnd)
+  %1 = call <4 x float> @__log1pf4_P8(<4 x float> %opnd)
   ret <4 x float> %1 
 }
 
@@ -238,7 +238,7 @@ define <2 x double>  @log10_f64_massv(<2 x double> %opnd) {
 ; CHECK-NOT: bl __log10d2_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <2 x double> @__log10d2_massv(<2 x double> %opnd)
+  %1 = call <2 x double> @__log10d2_P8(<2 x double> %opnd)
   ret <2 x double> %1 
 }
 
@@ -249,7 +249,7 @@ define <4 x float>  @log10_f32_massv(<4 x float> %opnd) {
 ; CHECK-NOT: bl __log10f4_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <4 x float> @__log10f4_massv(<4 x float> %opnd)
+  %1 = call <4 x float> @__log10f4_P8(<4 x float> %opnd)
   ret <4 x float> %1 
 }
 
@@ -261,7 +261,7 @@ define <2 x double>  @log2_f64_massv(<2 x double> %opnd) {
 ; CHECK-NOT: bl __log2d2_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <2 x double> @__log2d2_massv(<2 x double> %opnd)
+  %1 = call <2 x double> @__log2d2_P8(<2 x double> %opnd)
   ret <2 x double> %1 
 }
 
@@ -272,7 +272,7 @@ define <4 x float>  @log2_f32_massv(<4 x float> %opnd) {
 ; CHECK-NOT: bl __log2f4_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <4 x float> @__log2f4_massv(<4 x float> %opnd)
+  %1 = call <4 x float> @__log2f4_P8(<4 x float> %opnd)
   ret <4 x float> %1 
 }
 
@@ -284,7 +284,7 @@ define <2 x double>  @sin_f64_massv(<2 x double> %opnd) {
 ; CHECK-NOT: bl __sind2_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <2 x double> @__sind2_massv(<2 x double> %opnd)
+  %1 = call <2 x double> @__sind2_P8(<2 x double> %opnd)
   ret <2 x double> %1 
 }
 
@@ -295,7 +295,7 @@ define <4 x float>  @sin_f32_massv(<4 x float> %opnd) {
 ; CHECK-NOT: bl __sinf4_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <4 x float> @__sinf4_massv(<4 x float> %opnd)
+  %1 = call <4 x float> @__sinf4_P8(<4 x float> %opnd)
   ret <4 x float> %1 
 }
 
@@ -307,7 +307,7 @@ define <2 x double>  @cos_f64_massv(<2 x double> %opnd) {
 ; CHECK-NOT: bl __cosd2_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <2 x double> @__cosd2_massv(<2 x double> %opnd)
+  %1 = call <2 x double> @__cosd2_P8(<2 x double> %opnd)
   ret <2 x double> %1 
 }
 
@@ -318,7 +318,7 @@ define <4 x float>  @cos_f32_massv(<4 x float> %opnd) {
 ; CHECK-NOT: bl __cosf4_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <4 x float> @__cosf4_massv(<4 x float> %opnd)
+  %1 = call <4 x float> @__cosf4_P8(<4 x float> %opnd)
   ret <4 x float> %1 
 }
 
@@ -330,7 +330,7 @@ define <2 x double>  @tan_f64_massv(<2 x double> %opnd) {
 ; CHECK-NOT: bl __tand2_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <2 x double> @__tand2_massv(<2 x double> %opnd)
+  %1 = call <2 x double> @__tand2_P8(<2 x double> %opnd)
   ret <2 x double> %1 
 }
 
@@ -341,7 +341,7 @@ define <4 x float>  @tan_f32_massv(<4 x float> %opnd) {
 ; CHECK-NOT: bl __tanf4_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <4 x float> @__tanf4_massv(<4 x float> %opnd)
+  %1 = call <4 x float> @__tanf4_P8(<4 x float> %opnd)
   ret <4 x float> %1 
 }
 
@@ -353,7 +353,7 @@ define <2 x double>  @asin_f64_massv(<2 x double> %opnd) {
 ; CHECK-NOT: bl __asind2_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <2 x double> @__asind2_massv(<2 x double> %opnd)
+  %1 = call <2 x double> @__asind2_P8(<2 x double> %opnd)
   ret <2 x double> %1 
 }
 
@@ -364,7 +364,7 @@ define <4 x float>  @asin_f32_massv(<4 x float> %opnd) {
 ; CHECK-NOT: bl __asinf4_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <4 x float> @__asinf4_massv(<4 x float> %opnd)
+  %1 = call <4 x float> @__asinf4_P8(<4 x float> %opnd)
   ret <4 x float> %1 
 }
 
@@ -376,7 +376,7 @@ define <2 x double>  @acos_f64_massv(<2 x double> %opnd) {
 ; CHECK-NOT: bl __acosd2_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <2 x double> @__acosd2_massv(<2 x double> %opnd)
+  %1 = call <2 x double> @__acosd2_P8(<2 x double> %opnd)
   ret <2 x double> %1 
 }
 
@@ -387,7 +387,7 @@ define <4 x float>  @acos_f32_massv(<4 x float> %opnd) {
 ; CHECK-NOT: bl __acosf4_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <4 x float> @__acosf4_massv(<4 x float> %opnd)
+  %1 = call <4 x float> @__acosf4_P8(<4 x float> %opnd)
   ret <4 x float> %1 
 }
 
@@ -399,7 +399,7 @@ define <2 x double>  @atan_f64_massv(<2 x double> %opnd) {
 ; CHECK-NOT: bl __atand2_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <2 x double> @__atand2_massv(<2 x double> %opnd)
+  %1 = call <2 x double> @__atand2_P8(<2 x double> %opnd)
   ret <2 x double> %1 
 }
 
@@ -410,7 +410,7 @@ define <4 x float>  @atan_f32_massv(<4 x float> %opnd) {
 ; CHECK-NOT: bl __atanf4_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <4 x float> @__atanf4_massv(<4 x float> %opnd)
+  %1 = call <4 x float> @__atanf4_P8(<4 x float> %opnd)
   ret <4 x float> %1 
 }
 
@@ -422,7 +422,7 @@ define <2 x double>  @atan2_f64_massv(<2 x double> %opnd) {
 ; CHECK-NOT: bl __atan2d2_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <2 x double> @__atan2d2_massv(<2 x double> %opnd)
+  %1 = call <2 x double> @__atan2d2_P8(<2 x double> %opnd)
   ret <2 x double> %1 
 }
 
@@ -433,7 +433,7 @@ define <4 x float>  @atan2_f32_massv(<4 x float> %opnd) {
 ; CHECK-NOT: bl __atan2f4_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <4 x float> @__atan2f4_massv(<4 x float> %opnd)
+  %1 = call <4 x float> @__atan2f4_P8(<4 x float> %opnd)
   ret <4 x float> %1 
 }
 
@@ -445,7 +445,7 @@ define <2 x double>  @sinh_f64_massv(<2 x double> %opnd) {
 ; CHECK-NOT: bl __sinhd2_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <2 x double> @__sinhd2_massv(<2 x double> %opnd)
+  %1 = call <2 x double> @__sinhd2_P8(<2 x double> %opnd)
   ret <2 x double> %1 
 }
 
@@ -456,7 +456,7 @@ define <4 x float>  @sinh_f32_massv(<4 x float> %opnd) {
 ; CHECK-NOT: bl __sinhf4_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <4 x float> @__sinhf4_massv(<4 x float> %opnd)
+  %1 = call <4 x float> @__sinhf4_P8(<4 x float> %opnd)
   ret <4 x float> %1 
 }
 
@@ -468,7 +468,7 @@ define <2 x double>  @cosh_f64_massv(<2 x double> %opnd) {
 ; CHECK-NOT: bl __coshd2_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <2 x double> @__coshd2_massv(<2 x double> %opnd)
+  %1 = call <2 x double> @__coshd2_P8(<2 x double> %opnd)
   ret <2 x double> %1 
 }
 
@@ -479,7 +479,7 @@ define <4 x float>  @cosh_f32_massv(<4 x float> %opnd) {
 ; CHECK-NOT: bl __coshf4_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <4 x float> @__coshf4_massv(<4 x float> %opnd)
+  %1 = call <4 x float> @__coshf4_P8(<4 x float> %opnd)
   ret <4 x float> %1 
 }
 
@@ -491,7 +491,7 @@ define <2 x double>  @tanh_f64_massv(<2 x double> %opnd) {
 ; CHECK-NOT: bl __tanhd2_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <2 x double> @__tanhd2_massv(<2 x double> %opnd)
+  %1 = call <2 x double> @__tanhd2_P8(<2 x double> %opnd)
   ret <2 x double> %1 
 }
 
@@ -502,7 +502,7 @@ define <4 x float>  @tanh_f32_massv(<4 x float> %opnd) {
 ; CHECK-NOT: bl __tanhf4_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <4 x float> @__tanhf4_massv(<4 x float> %opnd)
+  %1 = call <4 x float> @__tanhf4_P8(<4 x float> %opnd)
   ret <4 x float> %1 
 }
 
@@ -514,7 +514,7 @@ define <2 x double>  @asinh_f64_massv(<2 x double> %opnd) {
 ; CHECK-NOT: bl __asinhd2_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <2 x double> @__asinhd2_massv(<2 x double> %opnd)
+  %1 = call <2 x double> @__asinhd2_P8(<2 x double> %opnd)
   ret <2 x double> %1 
 }
 
@@ -525,7 +525,7 @@ define <4 x float>  @asinh_f32_massv(<4 x float> %opnd) {
 ; CHECK-NOT: bl __asinhf4_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <4 x float> @__asinhf4_massv(<4 x float> %opnd)
+  %1 = call <4 x float> @__asinhf4_P8(<4 x float> %opnd)
   ret <4 x float> %1 
 }
 
@@ -537,7 +537,7 @@ define <2 x double>  @acosh_f64_massv(<2 x double> %opnd) {
 ; CHECK-NOT: bl __acoshd2_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <2 x double> @__acoshd2_massv(<2 x double> %opnd)
+  %1 = call <2 x double> @__acoshd2_P8(<2 x double> %opnd)
   ret <2 x double> %1 
 }
 
@@ -548,7 +548,7 @@ define <4 x float>  @acosh_f32_massv(<4 x float> %opnd) {
 ; CHECK-NOT: bl __acoshf4_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <4 x float> @__acoshf4_massv(<4 x float> %opnd)
+  %1 = call <4 x float> @__acoshf4_P8(<4 x float> %opnd)
   ret <4 x float> %1 
 }
 
@@ -560,7 +560,7 @@ define <2 x double>  @atanh_f64_massv(<2 x double> %opnd) {
 ; CHECK-NOT: bl __atanhd2_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <2 x double> @__atanhd2_massv(<2 x double> %opnd)
+  %1 = call <2 x double> @__atanhd2_P8(<2 x double> %opnd)
   ret <2 x double> %1 
 }
 
@@ -571,7 +571,7 @@ define <4 x float>  @atanh_f32_massv(<4 x float> %opnd) {
 ; CHECK-NOT: bl __atanhf4_massv
 ; CHECK-ALL: blr
 ;
-  %1 = call <4 x float> @__atanhf4_massv(<4 x float> %opnd)
+  %1 = call <4 x float> @__atanhf4_P8(<4 x float> %opnd)
   ret <4 x float> %1 
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/pow_massv_075_025exp.ll b/llvm/test/CodeGen/PowerPC/pow_massv_075_025exp.ll
index f4c3a149bfcd..47aaa67c9315 100644
--- a/llvm/test/CodeGen/PowerPC/pow_massv_075_025exp.ll
+++ b/llvm/test/CodeGen/PowerPC/pow_massv_075_025exp.ll
@@ -19,7 +19,7 @@ vector.body:
   %wide.load = load <2 x double>, <2 x double>* %0, align 8
   %1 = bitcast double* %next.gep31 to <2 x double>*
   %wide.load33 = load <2 x double>, <2 x double>* %1, align 8
-  %2 = call ninf afn nsz <2 x double> @__powd2_massv(<2 x double> %wide.load, <2 x double> %wide.load33)
+  %2 = call ninf afn nsz <2 x double> @__powd2_P8(<2 x double> %wide.load, <2 x double> %wide.load33)
   %3 = bitcast double* %next.gep to <2 x double>*
   store <2 x double> %2, <2 x double>* %3, align 8
   %index.next = add i64 %index, 2
@@ -45,7 +45,7 @@ vector.body:
   %next.gep19 = getelementptr double, double* %x, i64 %index
   %0 = bitcast double* %next.gep19 to <2 x double>*
   %wide.load = load <2 x double>, <2 x double>* %0, align 8
-  %1 = call ninf afn nsz <2 x double> @__powd2_massv(<2 x double> %wide.load, <2 x double> <double 7.600000e-01, double 7.600000e-01>)
+  %1 = call ninf afn nsz <2 x double> @__powd2_P8(<2 x double> %wide.load, <2 x double> <double 7.600000e-01, double 7.600000e-01>)
   %2 = bitcast double* %next.gep to <2 x double>*
   store <2 x double> %1, <2 x double>* %2, align 8
   %index.next = add i64 %index, 2
@@ -71,7 +71,7 @@ vector.body:
   %next.gep19 = getelementptr double, double* %x, i64 %index
   %0 = bitcast double* %next.gep19 to <2 x double>*
   %wide.load = load <2 x double>, <2 x double>* %0, align 8
-  %1 = call ninf afn nsz <2 x double> @__powd2_massv(<2 x double> %wide.load, <2 x double> <double 7.700000e-01, double 7.600000e-01>)
+  %1 = call ninf afn nsz <2 x double> @__powd2_P8(<2 x double> %wide.load, <2 x double> <double 7.700000e-01, double 7.600000e-01>)
   %2 = bitcast double* %next.gep to <2 x double>*
   store <2 x double> %1, <2 x double>* %2, align 8
   %index.next = add i64 %index, 2
@@ -97,7 +97,7 @@ vector.body:
   %next.gep19 = getelementptr double, double* %x, i64 %index
   %0 = bitcast double* %next.gep19 to <2 x double>*
   %wide.load = load <2 x double>, <2 x double>* %0, align 8
-  %1 = call ninf afn nsz <2 x double> @__powd2_massv(<2 x double> %wide.load, <2 x double> <double 7.700000e-01, double 7.500000e-01>)
+  %1 = call ninf afn nsz <2 x double> @__powd2_P8(<2 x double> %wide.load, <2 x double> <double 7.700000e-01, double 7.500000e-01>)
   %2 = bitcast double* %next.gep to <2 x double>*
   store <2 x double> %1, <2 x double>* %2, align 8
   %index.next = add i64 %index, 2
@@ -123,7 +123,7 @@ vector.body:
   %next.gep19 = getelementptr double, double* %x, i64 %index
   %0 = bitcast double* %next.gep19 to <2 x double>*
   %wide.load = load <2 x double>, <2 x double>* %0, align 8
-  %1 = call ninf afn nsz <2 x double> @__powd2_massv(<2 x double> %wide.load, <2 x double> <double 7.700000e-01, double 2.500000e-01>)
+  %1 = call ninf afn nsz <2 x double> @__powd2_P8(<2 x double> %wide.load, <2 x double> <double 7.700000e-01, double 2.500000e-01>)
   %2 = bitcast double* %next.gep to <2 x double>*
   store <2 x double> %1, <2 x double>* %2, align 8
   %index.next = add i64 %index, 2
@@ -149,7 +149,7 @@ vector.body:
   %next.gep19 = getelementptr double, double* %x, i64 %index
   %0 = bitcast double* %next.gep19 to <2 x double>*
   %wide.load = load <2 x double>, <2 x double>* %0, align 8
-  %1 = call ninf afn <2 x double> @__powd2_massv(<2 x double> %wide.load, <2 x double> <double 7.500000e-01, double 7.500000e-01>)
+  %1 = call ninf afn <2 x double> @__powd2_P8(<2 x double> %wide.load, <2 x double> <double 7.500000e-01, double 7.500000e-01>)
   %2 = bitcast double* %next.gep to <2 x double>*
   store <2 x double> %1, <2 x double>* %2, align 8
   %index.next = add i64 %index, 2
@@ -175,7 +175,7 @@ vector.body:
   %next.gep19 = getelementptr double, double* %x, i64 %index
   %0 = bitcast double* %next.gep19 to <2 x double>*
   %wide.load = load <2 x double>, <2 x double>* %0, align 8
-  %1 = call ninf afn nsz <2 x double> @__powd2_massv(<2 x double> %wide.load, <2 x double> <double 2.500000e-01, double 2.500000e-01>)
+  %1 = call ninf afn nsz <2 x double> @__powd2_P8(<2 x double> %wide.load, <2 x double> <double 2.500000e-01, double 2.500000e-01>)
   %2 = bitcast double* %next.gep to <2 x double>*
   store <2 x double> %1, <2 x double>* %2, align 8
   %index.next = add i64 %index, 2
@@ -202,7 +202,7 @@ vector.body:
   %next.gep19 = getelementptr double, double* %x, i64 %index
   %0 = bitcast double* %next.gep19 to <2 x double>*
   %wide.load = load <2 x double>, <2 x double>* %0, align 8
-  %1 = call <2 x double> @__powd2_massv(<2 x double> %wide.load, <2 x double> <double 7.500000e-01, double 7.500000e-01>)
+  %1 = call <2 x double> @__powd2_P8(<2 x double> %wide.load, <2 x double> <double 7.500000e-01, double 7.500000e-01>)
   %2 = bitcast double* %next.gep to <2 x double>*
   store <2 x double> %1, <2 x double>* %2, align 8
   %index.next = add i64 %index, 2
@@ -229,7 +229,7 @@ vector.body:
   %next.gep19 = getelementptr double, double* %x, i64 %index
   %0 = bitcast double* %next.gep19 to <2 x double>*
   %wide.load = load <2 x double>, <2 x double>* %0, align 8
-  %1 = call <2 x double> @__powd2_massv(<2 x double> %wide.load, <2 x double> <double 2.500000e-01, double 2.500000e-01>)
+  %1 = call <2 x double> @__powd2_P8(<2 x double> %wide.load, <2 x double> <double 2.500000e-01, double 2.500000e-01>)
   %2 = bitcast double* %next.gep to <2 x double>*
   store <2 x double> %1, <2 x double>* %2, align 8
   %index.next = add i64 %index, 2
@@ -241,4 +241,4 @@ for.end:
 }
 
 ; Function Attrs: nounwind readnone speculatable willreturn
-declare <2 x double> @__powd2_massv(<2 x double>, <2 x double>) #1
+declare <2 x double> @__powd2_P8(<2 x double>, <2 x double>) #1

diff  --git a/llvm/test/CodeGen/PowerPC/powf_massv_075_025exp.ll b/llvm/test/CodeGen/PowerPC/powf_massv_075_025exp.ll
index caa247acc2b6..1231da96456d 100644
--- a/llvm/test/CodeGen/PowerPC/powf_massv_075_025exp.ll
+++ b/llvm/test/CodeGen/PowerPC/powf_massv_075_025exp.ll
@@ -19,7 +19,7 @@ vector.body:
   %wide.load = load <4 x float>, <4 x float>* %0, align 4
   %1 = bitcast float* %next.gep31 to <4 x float>*
   %wide.load33 = load <4 x float>, <4 x float>* %1, align 4
-  %2 = call ninf afn nsz <4 x float> @__powf4_massv(<4 x float> %wide.load, <4 x float> %wide.load33)
+  %2 = call ninf afn nsz <4 x float> @__powf4_P8(<4 x float> %wide.load, <4 x float> %wide.load33)
   %3 = bitcast float* %next.gep to <4 x float>*
   store <4 x float> %2, <4 x float>* %3, align 4
   %index.next = add i64 %index, 4
@@ -45,7 +45,7 @@ vector.body:
   %next.gep19 = getelementptr float, float* %x, i64 %index
   %0 = bitcast float* %next.gep19 to <4 x float>*
   %wide.load = load <4 x float>, <4 x float>* %0, align 4
-  %1 = call ninf afn nsz <4 x float> @__powf4_massv(<4 x float> %wide.load, <4 x float> <float 0x3FE851EB80000000, float 0x3FE851EB80000000, float 0x3FE851EB80000000, float 0x3FE851EB80000000>)
+  %1 = call ninf afn nsz <4 x float> @__powf4_P8(<4 x float> %wide.load, <4 x float> <float 0x3FE851EB80000000, float 0x3FE851EB80000000, float 0x3FE851EB80000000, float 0x3FE851EB80000000>)
   %2 = bitcast float* %next.gep to <4 x float>*
   store <4 x float> %1, <4 x float>* %2, align 4
   %index.next = add i64 %index, 4
@@ -71,7 +71,7 @@ vector.body:
   %next.gep19 = getelementptr float, float* %x, i64 %index
   %0 = bitcast float* %next.gep19 to <4 x float>*
   %wide.load = load <4 x float>, <4 x float>* %0, align 4
-  %1 = call ninf afn nsz <4 x float> @__powf4_massv(<4 x float> %wide.load, <4 x float> <float 0x3FE861EB80000000, float 0x3FE871EB80000000, float 0x3FE851EB80000000, float 0x3FE851EB80000000>)
+  %1 = call ninf afn nsz <4 x float> @__powf4_P8(<4 x float> %wide.load, <4 x float> <float 0x3FE861EB80000000, float 0x3FE871EB80000000, float 0x3FE851EB80000000, float 0x3FE851EB80000000>)
   %2 = bitcast float* %next.gep to <4 x float>*
   store <4 x float> %1, <4 x float>* %2, align 4
   %index.next = add i64 %index, 4
@@ -97,7 +97,7 @@ vector.body:
   %next.gep19 = getelementptr float, float* %x, i64 %index
   %0 = bitcast float* %next.gep19 to <4 x float>*
   %wide.load = load <4 x float>, <4 x float>* %0, align 4
-  %1 = call ninf afn nsz <4 x float> @__powf4_massv(<4 x float> %wide.load, <4 x float> <float 7.500000e-01, float 7.500000e-01, float 7.500000e-01, float 0x3FE851EB80000000>)
+  %1 = call ninf afn nsz <4 x float> @__powf4_P8(<4 x float> %wide.load, <4 x float> <float 7.500000e-01, float 7.500000e-01, float 7.500000e-01, float 0x3FE851EB80000000>)
   %2 = bitcast float* %next.gep to <4 x float>*
   store <4 x float> %1, <4 x float>* %2, align 4
   %index.next = add i64 %index, 4
@@ -123,7 +123,7 @@ vector.body:
   %next.gep19 = getelementptr float, float* %x, i64 %index
   %0 = bitcast float* %next.gep19 to <4 x float>*
   %wide.load = load <4 x float>, <4 x float>* %0, align 4
-  %1 = call ninf afn nsz <4 x float> @__powf4_massv(<4 x float> %wide.load, <4 x float> <float 0x3FE851EB80000000, float 2.500000e-01, float 0x3FE851EB80000000, float 2.500000e-01>)
+  %1 = call ninf afn nsz <4 x float> @__powf4_P8(<4 x float> %wide.load, <4 x float> <float 0x3FE851EB80000000, float 2.500000e-01, float 0x3FE851EB80000000, float 2.500000e-01>)
   %2 = bitcast float* %next.gep to <4 x float>*
   store <4 x float> %1, <4 x float>* %2, align 4
   %index.next = add i64 %index, 4
@@ -149,7 +149,7 @@ vector.body:
   %next.gep19 = getelementptr float, float* %x, i64 %index
   %0 = bitcast float* %next.gep19 to <4 x float>*
   %wide.load = load <4 x float>, <4 x float>* %0, align 4
-  %1 = call ninf afn <4 x float> @__powf4_massv(<4 x float> %wide.load, <4 x float> <float 7.500000e-01, float 7.500000e-01, float 7.500000e-01, float 7.500000e-01>)
+  %1 = call ninf afn <4 x float> @__powf4_P8(<4 x float> %wide.load, <4 x float> <float 7.500000e-01, float 7.500000e-01, float 7.500000e-01, float 7.500000e-01>)
   %2 = bitcast float* %next.gep to <4 x float>*
   store <4 x float> %1, <4 x float>* %2, align 4
   %index.next = add i64 %index, 4
@@ -175,7 +175,7 @@ vector.body:
   %next.gep19 = getelementptr float, float* %x, i64 %index
   %0 = bitcast float* %next.gep19 to <4 x float>*
   %wide.load = load <4 x float>, <4 x float>* %0, align 4
-  %1 = call ninf afn nsz <4 x float> @__powf4_massv(<4 x float> %wide.load, <4 x float> <float 2.500000e-01, float 2.500000e-01, float 2.500000e-01, float 2.500000e-01>)
+  %1 = call ninf afn nsz <4 x float> @__powf4_P8(<4 x float> %wide.load, <4 x float> <float 2.500000e-01, float 2.500000e-01, float 2.500000e-01, float 2.500000e-01>)
   %2 = bitcast float* %next.gep to <4 x float>*
   store <4 x float> %1, <4 x float>* %2, align 4
   %index.next = add i64 %index, 4
@@ -202,7 +202,7 @@ vector.body:
   %next.gep19 = getelementptr float, float* %x, i64 %index
   %0 = bitcast float* %next.gep19 to <4 x float>*
   %wide.load = load <4 x float>, <4 x float>* %0, align 4
-  %1 = call <4 x float> @__powf4_massv(<4 x float> %wide.load, <4 x float> <float 7.500000e-01, float 7.500000e-01, float 7.500000e-01, float 7.500000e-01>)
+  %1 = call <4 x float> @__powf4_P8(<4 x float> %wide.load, <4 x float> <float 7.500000e-01, float 7.500000e-01, float 7.500000e-01, float 7.500000e-01>)
   %2 = bitcast float* %next.gep to <4 x float>*
   store <4 x float> %1, <4 x float>* %2, align 4
   %index.next = add i64 %index, 4
@@ -229,7 +229,7 @@ vector.body:
   %next.gep19 = getelementptr float, float* %x, i64 %index
   %0 = bitcast float* %next.gep19 to <4 x float>*
   %wide.load = load <4 x float>, <4 x float>* %0, align 4
-  %1 = call <4 x float> @__powf4_massv(<4 x float> %wide.load, <4 x float> <float 2.500000e-01, float 2.500000e-01, float 2.500000e-01, float 2.500000e-01>)
+  %1 = call <4 x float> @__powf4_P8(<4 x float> %wide.load, <4 x float> <float 2.500000e-01, float 2.500000e-01, float 2.500000e-01, float 2.500000e-01>)
   %2 = bitcast float* %next.gep to <4 x float>*
   store <4 x float> %1, <4 x float>* %2, align 4
   %index.next = add i64 %index, 4
@@ -241,4 +241,4 @@ for.end:
 }
 
 ; Function Attrs: nounwind readnone speculatable willreturn
-declare <4 x float> @__powf4_massv(<4 x float>, <4 x float>)
+declare <4 x float> @__powf4_P8(<4 x float>, <4 x float>)

diff  --git a/llvm/test/Transforms/LoopVectorize/PowerPC/massv-altivec.ll b/llvm/test/Transforms/LoopVectorize/PowerPC/massv-altivec.ll
index df2aa99f2d4b..dd60af19554f 100644
--- a/llvm/test/Transforms/LoopVectorize/PowerPC/massv-altivec.ll
+++ b/llvm/test/Transforms/LoopVectorize/PowerPC/massv-altivec.ll
@@ -13,7 +13,7 @@ declare float @atanhf(float) #0
 ; Check that massv entries are not generated.
 define void @cbrt_f64(double* nocapture %varray) {
 ; CHECK-LABEL: @cbrt_f64(
-; CHECK-NOT: __cbrtd2_massv{{.*}}<2 x double>
+; CHECK-NOT: __cbrtd2_P8{{.*}}<2 x double>
 ; CHECK: ret void
 ;
 entry:
@@ -36,7 +36,7 @@ for.end:
 
 define void @cbrt_f32(float* nocapture %varray) {
 ; CHECK-LABEL: @cbrt_f32(
-; CHECK-NOT: __cbrtf4_massv{{.*}}<4 x float>
+; CHECK-NOT: __cbrtf4_P8{{.*}}<4 x float>
 ; CHECK: ret void
 ;
 entry:
@@ -59,7 +59,7 @@ for.end:
 
 define void @atanh_f64(double* nocapture %varray) {
 ; CHECK-LABEL: @atanh_f64(
-; CHECK-NOT: __atanhd2_massv{{.*}}<2 x double>
+; CHECK-NOT: __atanhd2_P8{{.*}}<2 x double>
 ; CHECK: ret void
 ;
 entry:
@@ -82,7 +82,7 @@ for.end:
 
 define void @atanh_f32(float* nocapture %varray) {
 ; CHECK-LABEL: @atanh_f32(
-; CHECK-NOT: __atanhf4_massv{{.*}}<2 x double>
+; CHECK-NOT: __atanhf4_P8{{.*}}<2 x double>
 ; CHECK: ret void
 ;
 entry:

diff  --git a/llvm/test/Transforms/LoopVectorize/PowerPC/massv-calls.ll b/llvm/test/Transforms/LoopVectorize/PowerPC/massv-calls.ll
index 8e48d760e482..59da19592c66 100644
--- a/llvm/test/Transforms/LoopVectorize/PowerPC/massv-calls.ll
+++ b/llvm/test/Transforms/LoopVectorize/PowerPC/massv-calls.ll
@@ -90,7 +90,7 @@ declare float @atanhf(float) #0
 
 define void @cbrt_f64(double* nocapture %varray) {
 ; CHECK-LABEL: @cbrt_f64(
-; CHECK: __cbrtd2_massv{{.*}}<2 x double>
+; CHECK: __cbrtd2_P8{{.*}}<2 x double>
 ; CHECK: ret void
 ;
 entry:
@@ -113,7 +113,7 @@ for.end:
 
 define void @cbrt_f32(float* nocapture %varray) {
 ; CHECK-LABEL: @cbrt_f32(
-; CHECK: __cbrtf4_massv{{.*}}<4 x float>
+; CHECK: __cbrtf4_P8{{.*}}<4 x float>
 ; CHECK: ret void
 ;
 entry:
@@ -136,7 +136,7 @@ for.end:
 
 define void @pow_f64(double* nocapture %varray, double* nocapture readonly %exp) {
 ; CHECK-LABEL: @pow_f64(
-; CHECK:  __powd2_massv{{.*}}<2 x double>
+; CHECK:  __powd2_P8{{.*}}<2 x double>
 ; CHECK:  ret void
 ;
 entry:
@@ -161,7 +161,7 @@ for.end:
 
 define void @pow_f64_intrinsic(double* nocapture %varray, double* nocapture readonly %exp) {
 ; CHECK-LABEL: @pow_f64_intrinsic(
-; CHECK: __powd2_massv{{.*}}<2 x double>
+; CHECK: __powd2_P8{{.*}}<2 x double>
 ; CHECK: ret void
 ;
 entry:
@@ -186,7 +186,7 @@ for.end:
 
 define void @pow_f32(float* nocapture %varray, float* nocapture readonly %exp) {
 ; CHECK-LABEL: @pow_f32(
-; CHECK: __powf4_massv{{.*}}<4 x float>
+; CHECK: __powf4_P8{{.*}}<4 x float>
 ; CHECK: ret void
 ;
 entry:
@@ -211,7 +211,7 @@ for.end:
 
 define void @pow_f32_intrinsic(float* nocapture %varray, float* nocapture readonly %exp) {
 ; CHECK-LABEL: @pow_f32_intrinsic(
-; CHECK: __powf4_massv{{.*}}<4 x float>
+; CHECK: __powf4_P8{{.*}}<4 x float>
 ; CHECK: ret void
 ;
 entry:
@@ -236,7 +236,7 @@ for.end:
 
 define void @sqrt_f64(double* nocapture %varray) {
 ; CHECK-LABEL: @sqrt_f64(
-; CHECK-NOT: __sqrtd2_massv{{.*}}<2 x double>
+; CHECK-NOT: __sqrtd2_P8{{.*}}<2 x double>
 ; CHECK: ret void
 ;
 entry:
@@ -259,7 +259,7 @@ for.end:
 
 define void @sqrt_f32(float* nocapture %varray) {
 ; CHECK-LABEL: @sqrt_f32(
-; CHECK-NOT: __sqrtf4_massv{{.*}}<4 x float>
+; CHECK-NOT: __sqrtf4_P8{{.*}}<4 x float>
 ; CHECK: ret void
 ;
 entry:
@@ -282,7 +282,7 @@ for.end:
 
 define void @exp_f64(double* nocapture %varray) {
 ; CHECK-LABEL: @exp_f64(
-; CHECK: __expd2_massv{{.*}}<2 x double>
+; CHECK: __expd2_P8{{.*}}<2 x double>
 ; CHECK: ret void
 ;
 entry:
@@ -305,7 +305,7 @@ for.end:
 
 define void @exp_f64_intrinsic(double* nocapture %varray) {
 ; CHECK-LABEL: @exp_f64_intrinsic(
-; CHECK: __expd2_massv{{.*}}<2 x double>
+; CHECK: __expd2_P8{{.*}}<2 x double>
 ; CHECK: ret void
 ;
 entry:
@@ -328,7 +328,7 @@ for.end:
 
 define void @exp_f32(float* nocapture %varray) {
 ; CHECK-LABEL: @exp_f32(
-; CHECK: __expf4_massv{{.*}}<4 x float>
+; CHECK: __expf4_P8{{.*}}<4 x float>
 ; CHECK: ret void
 ;
 entry:
@@ -351,7 +351,7 @@ for.end:
 
 define void @exp_f32_intrinsic(float* nocapture %varray) {
 ; CHECK-LABEL: @exp_f32_intrinsic(
-; CHECK: __expf4_massv{{.*}}<4 x float>
+; CHECK: __expf4_P8{{.*}}<4 x float>
 ; CHECK: ret void
 ;
 entry:
@@ -374,7 +374,7 @@ for.end:
 
 define void @exp2_f64(double* nocapture %varray) {
 ; CHECK-LABEL: @exp2_f64(
-; CHECK: __exp2d2_massv{{.*}}<2 x double>
+; CHECK: __exp2d2_P8{{.*}}<2 x double>
 ; CHECK:  ret void
 ;
 entry:
@@ -397,7 +397,7 @@ for.end:
 
 define void @exp2_f64_intrinsic(double* nocapture %varray) {
 ; CHECK-LABEL: @exp2_f64_intrinsic(
-; CHECK: __exp2d2_massv{{.*}}<2 x double>
+; CHECK: __exp2d2_P8{{.*}}<2 x double>
 ; CHECK: ret void
 ;
 entry:
@@ -420,7 +420,7 @@ for.end:
 
 define void @exp2_f32(float* nocapture %varray) {
 ; CHECK-LABEL: @exp2_f32(
-; CHECK: __exp2f4_massv{{.*}}<4 x float>
+; CHECK: __exp2f4_P8{{.*}}<4 x float>
 ; CHECK: ret void
 ;
 entry:
@@ -443,7 +443,7 @@ for.end:
 
 define void @exp2_f32_intrinsic(float* nocapture %varray) {
 ; CHECK-LABEL: @exp2_f32_intrinsic(
-; CHECK: __exp2f4_massv{{.*}}<4 x float>
+; CHECK: __exp2f4_P8{{.*}}<4 x float>
 ; CHECK: ret void
 ;
 entry:
@@ -466,7 +466,7 @@ for.end:
 
 define void @expm1_f64(double* nocapture %varray) {
 ; CHECK-LABEL: @expm1_f64(
-; CHECK: __expm1d2_massv{{.*}}<2 x double>
+; CHECK: __expm1d2_P8{{.*}}<2 x double>
 ; CHECK:  ret void
 ;
 entry:
@@ -489,7 +489,7 @@ for.end:
 
 define void @expm1_f32(float* nocapture %varray) {
 ; CHECK-LABEL: @expm1_f32(
-; CHECK: __expm1f4_massv{{.*}}<4 x float>
+; CHECK: __expm1f4_P8{{.*}}<4 x float>
 ; CHECK: ret void
 ;
 entry:
@@ -512,7 +512,7 @@ for.end:
 
 define void @log_f64(double* nocapture %varray) {
 ; CHECK-LABEL: @log_f64(
-; CHECK: __logd2_massv{{.*}}<2 x double>
+; CHECK: __logd2_P8{{.*}}<2 x double>
 ; CHECK: ret void
 ;
 entry:
@@ -535,7 +535,7 @@ for.end:
 
 define void @log_f64_intrinsic(double* nocapture %varray) {
 ; CHECK-LABEL: @log_f64_intrinsic(
-; CHECK: __logd2_massv{{.*}}<2 x double>
+; CHECK: __logd2_P8{{.*}}<2 x double>
 ; CHECK: ret void
 ;
 entry:
@@ -558,7 +558,7 @@ for.end:
 
 define void @log_f32(float* nocapture %varray) {
 ; CHECK-LABEL: @log_f32(
-; CHECK: __logf4_massv{{.*}}<4 x float>
+; CHECK: __logf4_P8{{.*}}<4 x float>
 ; CHECK: ret void
 ;
 entry:
@@ -581,7 +581,7 @@ for.end:
 
 define void @log_f32_intrinsic(float* nocapture %varray) {
 ; CHECK-LABEL: @log_f32_intrinsic(
-; CHECK: __logf4_massv{{.*}}<4 x float>
+; CHECK: __logf4_P8{{.*}}<4 x float>
 ; CHECK: ret void
 ;
 entry:
@@ -604,7 +604,7 @@ for.end:
 
 define void @log1p_f64(double* nocapture %varray) {
 ; CHECK-LABEL: @log1p_f64(
-; CHECK: __log1pd2_massv{{.*}}<2 x double>
+; CHECK: __log1pd2_P8{{.*}}<2 x double>
 ; CHECK: ret void
 ;
 entry:
@@ -627,7 +627,7 @@ for.end:
 
 define void @log1p_f32(float* nocapture %varray) {
 ; CHECK-LABEL: @log1p_f32(
-; CHECK: __log1pf4_massv{{.*}}<4 x float>
+; CHECK: __log1pf4_P8{{.*}}<4 x float>
 ; CHECK: ret void
 ;
 entry:
@@ -650,7 +650,7 @@ for.end:
 
 define void @log10_f64(double* nocapture %varray) {
 ; CHECK-LABEL: @log10_f64(
-; CHECK: __log10d2_massv(<2 x double>
+; CHECK: __log10d2_P8(<2 x double>
 ; CHECK: ret void
 ;
 entry:
@@ -673,7 +673,7 @@ for.end:
 
 define void @log10_f64_intrinsic(double* nocapture %varray) {
 ; CHECK-LABEL: @log10_f64_intrinsic(
-; CHECK: __log10d2_massv{{.*}}<2 x double>
+; CHECK: __log10d2_P8{{.*}}<2 x double>
 ; CHECK: ret void
 ;
 entry:
@@ -696,7 +696,7 @@ for.end:
 
 define void @log10_f32(float* nocapture %varray) {
 ; CHECK-LABEL: @log10_f32(
-; CHECK: __log10f4_massv{{.*}}<4 x float>
+; CHECK: __log10f4_P8{{.*}}<4 x float>
 ; CHECK: ret void
 ;
 entry:
@@ -719,7 +719,7 @@ for.end:
 
 define void @log10_f32_intrinsic(float* nocapture %varray) {
 ; CHECK-LABEL: @log10_f32_intrinsic(
-; CHECK: __log10f4_massv{{.*}}<4 x float>
+; CHECK: __log10f4_P8{{.*}}<4 x float>
 ; CHECK: ret void
 ;
 entry:
@@ -742,7 +742,7 @@ for.end:
 
 define void @log2_f64(double* nocapture %varray) {
 ; CHECK-LABEL: @log2_f64(
-; CHECK: __log2d2_massv(<2 x double>
+; CHECK: __log2d2_P8(<2 x double>
 ; CHECK: ret void
 ;
 entry:
@@ -765,7 +765,7 @@ for.end:
 
 define void @log2_f64_intrinsic(double* nocapture %varray) {
 ; CHECK-LABEL: @log2_f64_intrinsic(
-; CHECK: __log2d2_massv{{.*}}<2 x double>
+; CHECK: __log2d2_P8{{.*}}<2 x double>
 ; CHECK: ret void
 ;
 entry:
@@ -788,7 +788,7 @@ for.end:
 
 define void @log2_f32(float* nocapture %varray) {
 ; CHECK-LABEL: @log2_f32(
-; CHECK: __log2f4_massv{{.*}}<4 x float>
+; CHECK: __log2f4_P8{{.*}}<4 x float>
 ; CHECK: ret void
 ;
 entry:
@@ -811,7 +811,7 @@ for.end:
 
 define void @log2_f32_intrinsic(float* nocapture %varray) {
 ; CHECK-LABEL: @log2_f32_intrinsic(
-; CHECK: __log2f4_massv{{.*}}<4 x float>
+; CHECK: __log2f4_P8{{.*}}<4 x float>
 ; CHECK: ret void
 ;
 entry:
@@ -834,7 +834,7 @@ for.end:
 
 define void @sin_f64(double* nocapture %varray) {
 ; CHECK-LABEL: @sin_f64(
-; CHECK: __sind2_massv{{.*}}<2 x double>
+; CHECK: __sind2_P8{{.*}}<2 x double>
 ; CHECK: ret void
 ;
 entry:
@@ -857,7 +857,7 @@ for.end:
 
 define void @sin_f64_intrinsic(double* nocapture %varray) {
 ; CHECK-LABEL: @sin_f64_intrinsic(
-; CHECK: __sind2_massv{{.*}}<2 x double>
+; CHECK: __sind2_P8{{.*}}<2 x double>
 ; CHECK: ret void
 ;
 entry:
@@ -880,7 +880,7 @@ for.end:
 
 define void @sin_f32(float* nocapture %varray) {
 ; CHECK-LABEL: @sin_f32(
-; CHECK: __sinf4_massv{{.*}}<4 x float>
+; CHECK: __sinf4_P8{{.*}}<4 x float>
 ; CHECK: ret void
 ;
 entry:
@@ -903,7 +903,7 @@ for.end:
 
 define void @sin_f32_intrinsic(float* nocapture %varray) {
 ; CHECK-LABEL: @sin_f32_intrinsic(
-; CHECK: __sinf4_massv{{.*}}<4 x float>
+; CHECK: __sinf4_P8{{.*}}<4 x float>
 ; CHECK: ret void
 ;
 entry:
@@ -926,7 +926,7 @@ for.end:
 
 define void @cos_f64(double* nocapture %varray) {
 ; CHECK-LABEL: @cos_f64(
-; CHECK: __cosd2_massv{{.*}}<2 x double>
+; CHECK: __cosd2_P8{{.*}}<2 x double>
 ; CHECK: ret void
 ;
 entry:
@@ -949,7 +949,7 @@ for.end:
 
 define void @cos_f64_intrinsic(double* nocapture %varray) {
 ; CHECK-LABEL: @cos_f64_intrinsic(
-; CHECK:    [[TMP5:%.*]] = call <2 x double> @__cosd2_massv(<2 x double> [[TMP4:%.*]])
+; CHECK:    [[TMP5:%.*]] = call <2 x double> @__cosd2_P8(<2 x double> [[TMP4:%.*]])
 ; CHECK:    ret void
 ;
 entry:
@@ -972,7 +972,7 @@ for.end:
 
 define void @cos_f32(float* nocapture %varray) {
 ; CHECK-LABEL: @cos_f32(
-; CHECK: __cosf4_massv{{.*}}<4 x float>
+; CHECK: __cosf4_P8{{.*}}<4 x float>
 ; CHECK: ret void
 ;
 entry:
@@ -995,7 +995,7 @@ for.end:
 
 define void @cos_f32_intrinsic(float* nocapture %varray) {
 ; CHECK-LABEL: @cos_f32_intrinsic(
-; CHECK: __cosf4_massv{{.*}}<4 x float>
+; CHECK: __cosf4_P8{{.*}}<4 x float>
 ; CHECK: ret void
 ;
 entry:
@@ -1018,7 +1018,7 @@ for.end:
 
 define void @tan_f64(double* nocapture %varray) {
 ; CHECK-LABEL: @tan_f64(
-; CHECK: __tand2_massv{{.*}}<2 x double>
+; CHECK: __tand2_P8{{.*}}<2 x double>
 ; CHECK: ret void
 ;
 entry:
@@ -1041,7 +1041,7 @@ for.end:
 
 define void @tan_f32(float* nocapture %varray) {
 ; CHECK-LABEL: @tan_f32(
-; CHECK: __tanf4_massv{{.*}}<4 x float>
+; CHECK: __tanf4_P8{{.*}}<4 x float>
 ; CHECK: ret void
 ;
 entry:
@@ -1064,7 +1064,7 @@ for.end:
 
 define void @asin_f64(double* nocapture %varray) {
 ; CHECK-LABEL: @asin_f64(
-; CHECK: __asind2_massv{{.*}}<2 x double>
+; CHECK: __asind2_P8{{.*}}<2 x double>
 ; CHECK: ret void
 ;
 entry:
@@ -1087,7 +1087,7 @@ for.end:
 
 define void @asin_f32(float* nocapture %varray) {
 ; CHECK-LABEL: @asin_f32(
-; CHECK: __asinf4_massv{{.*}}<4 x float>
+; CHECK: __asinf4_P8{{.*}}<4 x float>
 ; CHECK: ret void
 ;
 entry:
@@ -1110,7 +1110,7 @@ for.end:
 
 define void @acos_f64(double* nocapture %varray) {
 ; CHECK-LABEL: @acos_f64(
-; CHECK: __acosd2_massv{{.*}}<2 x double>
+; CHECK: __acosd2_P8{{.*}}<2 x double>
 ; CHECK: ret void
 ;
 entry:
@@ -1133,7 +1133,7 @@ for.end:
 
 define void @acos_f32(float* nocapture %varray) {
 ; CHECK-LABEL: @acos_f32(
-; CHECK: __acosf4_massv{{.*}}<4 x float>
+; CHECK: __acosf4_P8{{.*}}<4 x float>
 ; CHECK: ret void
 ;
 entry:
@@ -1156,7 +1156,7 @@ for.end:
 
 define void @atan_f64(double* nocapture %varray) {
 ; CHECK-LABEL: @atan_f64(
-; CHECK: __atand2_massv{{.*}}<2 x double>
+; CHECK: __atand2_P8{{.*}}<2 x double>
 ; CHECK: ret void
 ;
 entry:
@@ -1179,7 +1179,7 @@ for.end:
 
 define void @atan_f32(float* nocapture %varray) {
 ; CHECK-LABEL: @atan_f32(
-; CHECK: __atanf4_massv{{.*}}<4 x float>
+; CHECK: __atanf4_P8{{.*}}<4 x float>
 ; CHECK: ret void
 ;
 entry:
@@ -1202,7 +1202,7 @@ for.end:
 
 define void @atan2_f64(double* nocapture %varray) {
 ; CHECK-LABEL: @atan2_f64(
-; CHECK: __atan2d2_massv{{.*}}<2 x double>
+; CHECK: __atan2d2_P8{{.*}}<2 x double>
 ; CHECK: ret void
 ;
 entry:
@@ -1225,7 +1225,7 @@ for.end:
 
 define void @atan2_f32(float* nocapture %varray) {
 ; CHECK-LABEL: @atan2_f32(
-; CHECK: __atan2f4_massv{{.*}}<4 x float>
+; CHECK: __atan2f4_P8{{.*}}<4 x float>
 ; CHECK: ret void
 ;
 entry:
@@ -1248,7 +1248,7 @@ for.end:
 
 define void @sinh_f64(double* nocapture %varray) {
 ; CHECK-LABEL: @sinh_f64(
-; CHECK: __sinhd2_massv{{.*}}<2 x double>
+; CHECK: __sinhd2_P8{{.*}}<2 x double>
 ; CHECK: ret void
 ;
 entry:
@@ -1271,7 +1271,7 @@ for.end:
 
 define void @sinh_f32(float* nocapture %varray) {
 ; CHECK-LABEL: @sinh_f32(
-; CHECK: __sinhf4_massv{{.*}}<4 x float>
+; CHECK: __sinhf4_P8{{.*}}<4 x float>
 ; CHECK: ret void
 ;
 entry:
@@ -1294,7 +1294,7 @@ for.end:
 
 define void @cosh_f64(double* nocapture %varray) {
 ; CHECK-LABEL: @cosh_f64(
-; CHECK: __coshd2_massv{{.*}}<2 x double>
+; CHECK: __coshd2_P8{{.*}}<2 x double>
 ; CHECK: ret void
 ;
 entry:
@@ -1317,7 +1317,7 @@ for.end:
 
 define void @cosh_f32(float* nocapture %varray) {
 ; CHECK-LABEL: @cosh_f32(
-; CHECK: __coshf4_massv{{.*}}<4 x float>
+; CHECK: __coshf4_P8{{.*}}<4 x float>
 ; CHECK: ret void
 ;
 entry:
@@ -1340,7 +1340,7 @@ for.end:
 
 define void @tanh_f64(double* nocapture %varray) {
 ; CHECK-LABEL: @tanh_f64(
-; CHECK: __tanhd2_massv{{.*}}<2 x double>
+; CHECK: __tanhd2_P8{{.*}}<2 x double>
 ; CHECK: ret void
 ;
 entry:
@@ -1363,7 +1363,7 @@ for.end:
 
 define void @tanh_f32(float* nocapture %varray) {
 ; CHECK-LABEL: @tanh_f32(
-; CHECK: __tanhf4_massv{{.*}}<4 x float>
+; CHECK: __tanhf4_P8{{.*}}<4 x float>
 ; CHECK: ret void
 ;
 entry:
@@ -1386,7 +1386,7 @@ for.end:
 
 define void @asinh_f64(double* nocapture %varray) {
 ; CHECK-LABEL: @asinh_f64(
-; CHECK: __asinhd2_massv{{.*}}<2 x double>
+; CHECK: __asinhd2_P8{{.*}}<2 x double>
 ; CHECK: ret void
 ;
 entry:
@@ -1409,7 +1409,7 @@ for.end:
 
 define void @asinh_f32(float* nocapture %varray) {
 ; CHECK-LABEL: @asinh_f32(
-; CHECK: __asinhf4_massv{{.*}}<4 x float>
+; CHECK: __asinhf4_P8{{.*}}<4 x float>
 ; CHECK: ret void
 ;
 entry:
@@ -1432,7 +1432,7 @@ for.end:
 
 define void @acosh_f64(double* nocapture %varray) {
 ; CHECK-LABEL: @acosh_f64(
-; CHECK: __acoshd2_massv{{.*}}<2 x double>
+; CHECK: __acoshd2_P8{{.*}}<2 x double>
 ; CHECK: ret void
 ;
 entry:
@@ -1455,7 +1455,7 @@ for.end:
 
 define void @acosh_f32(float* nocapture %varray) {
 ; CHECK-LABEL: @acosh_f32(
-; CHECK: __acoshf4_massv{{.*}}<4 x float>
+; CHECK: __acoshf4_P8{{.*}}<4 x float>
 ; CHECK: ret void
 ;
 entry:
@@ -1478,7 +1478,7 @@ for.end:
 
 define void @atanh_f64(double* nocapture %varray) {
 ; CHECK-LABEL: @atanh_f64(
-; CHECK: __atanhd2_massv{{.*}}<2 x double>
+; CHECK: __atanhd2_P8{{.*}}<2 x double>
 ; CHECK: ret void
 ;
 entry:
@@ -1501,7 +1501,7 @@ for.end:
 
 define void @atanh_f32(float* nocapture %varray) {
 ; CHECK-LABEL: @atanh_f32(
-; CHECK: __atanhf4_massv{{.*}}<4 x float>
+; CHECK: __atanhf4_P8{{.*}}<4 x float>
 ; CHECK: ret void
 ;
 entry:

diff  --git a/llvm/test/Transforms/LoopVectorize/PowerPC/massv-nobuiltin.ll b/llvm/test/Transforms/LoopVectorize/PowerPC/massv-nobuiltin.ll
index 872326934ab4..76daeb22e1da 100644
--- a/llvm/test/Transforms/LoopVectorize/PowerPC/massv-nobuiltin.ll
+++ b/llvm/test/Transforms/LoopVectorize/PowerPC/massv-nobuiltin.ll
@@ -9,7 +9,7 @@ declare float @atanhf(float) #1
 ; Check that functions marked as nobuiltin are not lowered to massv entries.
 define void @atanh_f64(double* nocapture %varray) {
 ; CHECK-LABEL: @atanh_f64(
-; CHECK-NOT: __atanhd2_massv{{.*}}<2 x double>
+; CHECK-NOT: __atanhd2_P8{{.*}}<2 x double>
 ; CHECK: ret void
 ;
 entry:
@@ -32,7 +32,7 @@ for.end:
 
 define void @atanh_f32(float* nocapture %varray) {
 ; CHECK-LABEL: @atanh_f32(
-; CHECK-NOT: __atanhf4_massv{{.*}}<2 x double>
+; CHECK-NOT: __atanhf4_P8{{.*}}<2 x double>
 ; CHECK: ret void
 ;
 entry:

diff  --git a/llvm/test/Transforms/LoopVectorize/PowerPC/massv-unsupported.ll b/llvm/test/Transforms/LoopVectorize/PowerPC/massv-unsupported.ll
index 575f9b2700ad..39885ac1b16c 100644
--- a/llvm/test/Transforms/LoopVectorize/PowerPC/massv-unsupported.ll
+++ b/llvm/test/Transforms/LoopVectorize/PowerPC/massv-unsupported.ll
@@ -13,6 +13,7 @@ declare float @llvm.sqrt.f32(float) #0
 define void @ceil_f64(double* nocapture %varray) {
 ; CHECK-LABEL: @ceil_f64(
 ; CHECK-NOT: __ceild2_massv{{.*}}<2 x double>
+; CHECK-NOT: __ceild2_P8{{.*}}<2 x double>
 ; CHECK: ret void
 ;
 entry:
@@ -37,6 +38,7 @@ for.end:
 define void @fabs_f32(float* nocapture %varray) {
 ; CHECK-LABEL: @fabs_f32(
 ; CHECK-NOT: __fabsf4_massv{{.*}}<4 x float>
+; CHECK-NOT: __fabsf4_P8{{.*}}<4 x float>
 ; CHECK: ret void
 ;
 entry:

diff  --git a/llvm/test/Transforms/LoopVectorize/PowerPC/widened-massv-call.ll b/llvm/test/Transforms/LoopVectorize/PowerPC/widened-massv-call.ll
index b2b70f33378a..0c0fbf98bd88 100644
--- a/llvm/test/Transforms/LoopVectorize/PowerPC/widened-massv-call.ll
+++ b/llvm/test/Transforms/LoopVectorize/PowerPC/widened-massv-call.ll
@@ -15,7 +15,7 @@ define dso_local double @test(float* %Arr) {
 ; CHECK-NEXT:    [[TMP2:%.*]] = bitcast float* [[TMP1]] to <2 x float>*
 ; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <2 x float>, <2 x float>* [[TMP2]], align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = fpext <2 x float> [[WIDE_LOAD]] to <2 x double>
-; CHECK-NEXT:    [[TMP4:%.*]] = call fast <2 x double> @__sind2_massv(<2 x double> [[TMP3]])
+; CHECK-NEXT:    [[TMP4:%.*]] = call fast <2 x double> @__sind2_P8(<2 x double> [[TMP3]])
 ; CHECK-NEXT:    [[TMP5]] = fadd fast <2 x double> [[TMP4]], [[VEC_PHI]]
 ; CHECK-NEXT:    [[INDEX_NEXT]] = add i32 [[INDEX]], 2
 ; CHECK-NEXT:    [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 128

diff  --git a/llvm/test/Transforms/LoopVectorize/PowerPC/widened-massv-vfabi-attr.ll b/llvm/test/Transforms/LoopVectorize/PowerPC/widened-massv-vfabi-attr.ll
index d1323d776ac0..4a1c2fa62b68 100644
--- a/llvm/test/Transforms/LoopVectorize/PowerPC/widened-massv-vfabi-attr.ll
+++ b/llvm/test/Transforms/LoopVectorize/PowerPC/widened-massv-vfabi-attr.ll
@@ -14,7 +14,7 @@ define dso_local double @test(float* %Arr) {
 ; CHECK-NEXT:    [[TMP2:%.*]] = bitcast float* [[TMP1]] to <2 x float>*
 ; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <2 x float>, <2 x float>* [[TMP2]], align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = fpext <2 x float> [[WIDE_LOAD]] to <2 x double>
-; CHECK-NEXT:    [[TMP4:%.*]] = call fast <2 x double> @__sind2_massv(<2 x double> [[TMP3]])
+; CHECK-NEXT:    [[TMP4:%.*]] = call fast <2 x double> @__sind2_P8(<2 x double> [[TMP3]])
 ; CHECK-NEXT:    [[TMP5]] = fadd fast <2 x double> [[VEC_PHI]], [[TMP4]]
 ; CHECK-NEXT:    [[INDEX_NEXT]] = add i32 [[INDEX]], 2
 ; CHECK-NEXT:    [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 128
@@ -53,6 +53,6 @@ for.end:
 }
 
 declare double @llvm.sin.f64(double) #0
-declare <2 x double> @__sind2_massv(<2 x double>) #0
+declare <2 x double> @__sind2_P8(<2 x double>) #0
 attributes #0 = { nounwind readnone speculatable willreturn }
-attributes #1 = { "vector-function-abi-variant"="_ZGV_LLVM_N2v_llvm.sin.f64(__sind2_massv)" }
+attributes #1 = { "vector-function-abi-variant"="_ZGV_LLVM_N2v_llvm.sin.f64(__sind2_P8)" }

diff  --git a/llvm/test/Transforms/Util/add-TLI-mappings.ll b/llvm/test/Transforms/Util/add-TLI-mappings.ll
index 49fb3aa1736f..bfb04201d9eb 100644
--- a/llvm/test/Transforms/Util/add-TLI-mappings.ll
+++ b/llvm/test/Transforms/Util/add-TLI-mappings.ll
@@ -19,8 +19,8 @@ target triple = "x86_64-unknown-linux-gnu"
 ; SVML-SAME:          i8* bitcast (<8 x float> (<8 x float>)* @__svml_log10f8 to i8*),
 ; SVML-SAME:          i8* bitcast (<16 x float> (<16 x float>)* @__svml_log10f16 to i8*)
 ; MASSV-SAME:       [2 x i8*] [
-; MASSV-SAME:         i8* bitcast (<2 x double> (<2 x double>)* @__sind2_massv to i8*),
-; MASSV-SAME:         i8* bitcast (<4 x float> (<4 x float>)* @__log10f4_massv to i8*)
+; MASSV-SAME:         i8* bitcast (<2 x double> (<2 x double>)* @__sind2_P8 to i8*),
+; MASSV-SAME:         i8* bitcast (<4 x float> (<4 x float>)* @__log10f4_P8 to i8*)
 ; ACCELERATE-SAME:  [1 x i8*] [
 ; ACCELERATE-SAME:    i8* bitcast (<4 x float> (<4 x float>)* @vlog10f to i8*)
 ; LIBMVEC-X86-SAME: [2 x i8*] [
@@ -64,9 +64,9 @@ attributes #0 = { nounwind readnone }
 ; SVML-SAME:   _ZGV_LLVM_N8v_sin(__svml_sin8)" }
 
 ; MASSV:      attributes #[[SIN]] = { "vector-function-abi-variant"=
-; MASSV-SAME:   "_ZGV_LLVM_N2v_sin(__sind2_massv)" }
+; MASSV-SAME:   "_ZGV_LLVM_N2v_sin(__sind2_P8)" }
 ; MASSV:      attributes #[[LOG10]] = { "vector-function-abi-variant"=
-; MASSV-SAME:   "_ZGV_LLVM_N4v_llvm.log10.f32(__log10f4_massv)" }
+; MASSV-SAME:   "_ZGV_LLVM_N4v_llvm.log10.f32(__log10f4_P8)" }
 
 ; ACCELERATE:      attributes #[[LOG10]] = { "vector-function-abi-variant"=
 ; ACCELERATE-SAME:   "_ZGV_LLVM_N4v_llvm.log10.f32(vlog10f)" }


        


More information about the llvm-commits mailing list