[llvm] 66d1899 - Fix errors in use of strictfp attribute.

Kevin P. Neal via llvm-commits llvm-commits at lists.llvm.org
Fri May 29 09:25:33 PDT 2020


Author: Kevin P. Neal
Date: 2020-05-29T12:25:13-04:00
New Revision: 66d1899e2ffd2581f774ecf51ced4a325e7d004b

URL: https://github.com/llvm/llvm-project/commit/66d1899e2ffd2581f774ecf51ced4a325e7d004b
DIFF: https://github.com/llvm/llvm-project/commit/66d1899e2ffd2581f774ecf51ced4a325e7d004b.diff

LOG: Fix errors in use of strictfp attribute.

Errors spotted with use of: https://reviews.llvm.org/D68233

Added: 
    

Modified: 
    llvm/test/CodeGen/PowerPC/fp-strict-f128.ll
    llvm/test/CodeGen/PowerPC/fp-strict-minmax.ll
    llvm/test/CodeGen/PowerPC/fp-strict.ll
    llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/PowerPC/fp-strict-f128.ll b/llvm/test/CodeGen/PowerPC/fp-strict-f128.ll
index 2f92382f4709..21ddb799141d 100644
--- a/llvm/test/CodeGen/PowerPC/fp-strict-f128.ll
+++ b/llvm/test/CodeGen/PowerPC/fp-strict-f128.ll
@@ -9,7 +9,7 @@ declare fp128 @llvm.experimental.constrained.fdiv.f128(fp128, fp128, metadata, m
 declare fp128 @llvm.experimental.constrained.fma.f128(fp128, fp128, fp128, metadata, metadata)
 declare fp128 @llvm.experimental.constrained.sqrt.f128(fp128, metadata, metadata)
 
-define fp128 @fadd_f128(fp128 %f1, fp128 %f2) {
+define fp128 @fadd_f128(fp128 %f1, fp128 %f2) #0 {
 ; CHECK-LABEL: fadd_f128:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xsaddqp v2, v2, v3
@@ -17,11 +17,11 @@ define fp128 @fadd_f128(fp128 %f1, fp128 %f2) {
   %res = call fp128 @llvm.experimental.constrained.fadd.f128(
                         fp128 %f1, fp128 %f2,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   ret fp128 %res
 }
 
-define fp128 @fsub_f128(fp128 %f1, fp128 %f2) {
+define fp128 @fsub_f128(fp128 %f1, fp128 %f2) #0 {
 ; CHECK-LABEL: fsub_f128:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xssubqp v2, v2, v3
@@ -29,11 +29,11 @@ define fp128 @fsub_f128(fp128 %f1, fp128 %f2) {
   %res = call fp128 @llvm.experimental.constrained.fsub.f128(
                         fp128 %f1, fp128 %f2,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   ret fp128 %res
 }
 
-define fp128 @fmul_f128(fp128 %f1, fp128 %f2) {
+define fp128 @fmul_f128(fp128 %f1, fp128 %f2) #0 {
 ; CHECK-LABEL: fmul_f128:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xsmulqp v2, v2, v3
@@ -41,11 +41,11 @@ define fp128 @fmul_f128(fp128 %f1, fp128 %f2) {
   %res = call fp128 @llvm.experimental.constrained.fmul.f128(
                         fp128 %f1, fp128 %f2,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   ret fp128 %res
 }
 
-define fp128 @fdiv_f128(fp128 %f1, fp128 %f2) {
+define fp128 @fdiv_f128(fp128 %f1, fp128 %f2) #0 {
 ; CHECK-LABEL: fdiv_f128:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xsdivqp v2, v2, v3
@@ -53,11 +53,11 @@ define fp128 @fdiv_f128(fp128 %f1, fp128 %f2) {
   %res = call fp128 @llvm.experimental.constrained.fdiv.f128(
                         fp128 %f1, fp128 %f2,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   ret fp128 %res
 }
 
-define fp128 @fmadd_f128(fp128 %f0, fp128 %f1, fp128 %f2) {
+define fp128 @fmadd_f128(fp128 %f0, fp128 %f1, fp128 %f2) #0 {
 ; CHECK-LABEL: fmadd_f128:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xsmaddqp v4, v2, v3
@@ -66,11 +66,11 @@ define fp128 @fmadd_f128(fp128 %f0, fp128 %f1, fp128 %f2) {
   %res = call fp128 @llvm.experimental.constrained.fma.f128(
                         fp128 %f0, fp128 %f1, fp128 %f2,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   ret fp128 %res
 }
 
-define fp128 @fmsub_f128(fp128 %f0, fp128 %f1, fp128 %f2) {
+define fp128 @fmsub_f128(fp128 %f0, fp128 %f1, fp128 %f2) #0 {
 ; CHECK-LABEL: fmsub_f128:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xsmsubqp v4, v2, v3
@@ -80,11 +80,11 @@ define fp128 @fmsub_f128(fp128 %f0, fp128 %f1, fp128 %f2) {
   %res = call fp128 @llvm.experimental.constrained.fma.f128(
                         fp128 %f0, fp128 %f1, fp128 %neg,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   ret fp128 %res
 }
 
-define fp128 @fnmadd_f128(fp128 %f0, fp128 %f1, fp128 %f2) {
+define fp128 @fnmadd_f128(fp128 %f0, fp128 %f1, fp128 %f2) #0 {
 ; CHECK-LABEL: fnmadd_f128:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xsnmaddqp v4, v2, v3
@@ -93,12 +93,12 @@ define fp128 @fnmadd_f128(fp128 %f0, fp128 %f1, fp128 %f2) {
   %fma = call fp128 @llvm.experimental.constrained.fma.f128(
                         fp128 %f0, fp128 %f1, fp128 %f2,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   %res = fneg fp128 %fma
   ret fp128 %res
 }
 
-define fp128 @fnmsub_f128(fp128 %f0, fp128 %f1, fp128 %f2) {
+define fp128 @fnmsub_f128(fp128 %f0, fp128 %f1, fp128 %f2) #0 {
 ; CHECK-LABEL: fnmsub_f128:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xsnmsubqp v4, v2, v3
@@ -108,13 +108,13 @@ define fp128 @fnmsub_f128(fp128 %f0, fp128 %f1, fp128 %f2) {
   %fma = call fp128 @llvm.experimental.constrained.fma.f128(
                         fp128 %f0, fp128 %f1, fp128 %neg,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   %res = fneg fp128 %fma
   ret fp128 %res
 }
 
 
-define fp128 @fsqrt_f128(fp128 %f1) {
+define fp128 @fsqrt_f128(fp128 %f1) #0 {
 ; CHECK-LABEL: fsqrt_f128:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xssqrtqp v2, v2
@@ -122,6 +122,8 @@ define fp128 @fsqrt_f128(fp128 %f1) {
   %res = call fp128 @llvm.experimental.constrained.sqrt.f128(
                         fp128 %f1,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   ret fp128 %res
 }
+
+attributes #0 = { strictfp }

diff  --git a/llvm/test/CodeGen/PowerPC/fp-strict-minmax.ll b/llvm/test/CodeGen/PowerPC/fp-strict-minmax.ll
index 14e8be5d7d39..7663708e6b4e 100644
--- a/llvm/test/CodeGen/PowerPC/fp-strict-minmax.ll
+++ b/llvm/test/CodeGen/PowerPC/fp-strict-minmax.ll
@@ -9,47 +9,49 @@ declare <2 x double> @llvm.experimental.constrained.maxnum.v2f64(<2 x double>, <
 declare <4 x float> @llvm.experimental.constrained.minnum.v4f32(<4 x float>, <4 x float>, metadata)
 declare <2 x double> @llvm.experimental.constrained.minnum.v2f64(<2 x double>, <2 x double>, metadata)
 
-define <4 x float> @fmaxnum_v4f32(<4 x float> %vf0, <4 x float> %vf1) {
+define <4 x float> @fmaxnum_v4f32(<4 x float> %vf0, <4 x float> %vf1) #0 {
 ; CHECK-LABEL: fmaxnum_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvmaxsp v2, v2, v3
 ; CHECK-NEXT:    blr
   %res = call <4 x float> @llvm.experimental.constrained.maxnum.v4f32(
                         <4 x float> %vf0, <4 x float> %vf1,
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   ret <4 x float> %res
 }
 
-define <2 x double> @fmaxnum_v2f64(<2 x double> %vf0, <2 x double> %vf1) {
+define <2 x double> @fmaxnum_v2f64(<2 x double> %vf0, <2 x double> %vf1) #0 {
 ; CHECK-LABEL: fmaxnum_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvmaxdp v2, v2, v3
 ; CHECK-NEXT:    blr
   %res = call <2 x double> @llvm.experimental.constrained.maxnum.v2f64(
                         <2 x double> %vf0, <2 x double> %vf1,
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   ret <2 x double> %res
 }
 
 
-define <4 x float> @fminnum_v4f32(<4 x float> %vf0, <4 x float> %vf1) {
+define <4 x float> @fminnum_v4f32(<4 x float> %vf0, <4 x float> %vf1) #0 {
 ; CHECK-LABEL: fminnum_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvminsp v2, v2, v3
 ; CHECK-NEXT:    blr
   %res = call <4 x float> @llvm.experimental.constrained.minnum.v4f32(
                         <4 x float> %vf0, <4 x float> %vf1,
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   ret <4 x float> %res
 }
 
-define <2 x double> @fminnum_v2f64(<2 x double> %vf0, <2 x double> %vf1) {
+define <2 x double> @fminnum_v2f64(<2 x double> %vf0, <2 x double> %vf1) #0 {
 ; CHECK-LABEL: fminnum_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvmindp v2, v2, v3
 ; CHECK-NEXT:    blr
   %res = call <2 x double> @llvm.experimental.constrained.minnum.v2f64(
                         <2 x double> %vf0, <2 x double> %vf1,
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   ret <2 x double> %res
 }
+
+attributes #0 = { strictfp }

diff  --git a/llvm/test/CodeGen/PowerPC/fp-strict.ll b/llvm/test/CodeGen/PowerPC/fp-strict.ll
index 743f68029be9..04e6f967a2b8 100644
--- a/llvm/test/CodeGen/PowerPC/fp-strict.ll
+++ b/llvm/test/CodeGen/PowerPC/fp-strict.ll
@@ -33,7 +33,7 @@ declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadat
 declare <4 x float> @llvm.experimental.constrained.sqrt.v4f32(<4 x float>, metadata, metadata)
 declare <2 x double> @llvm.experimental.constrained.sqrt.v2f64(<2 x double>, metadata, metadata)
 
-define float @fadd_f32(float %f1, float %f2) {
+define float @fadd_f32(float %f1, float %f2) #0 {
 ; CHECK-LABEL: fadd_f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xsaddsp f1, f1, f2
@@ -46,11 +46,11 @@ define float @fadd_f32(float %f1, float %f2) {
   %res = call float @llvm.experimental.constrained.fadd.f32(
                         float %f1, float %f2,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   ret float %res
 }
 
-define double @fadd_f64(double %f1, double %f2) {
+define double @fadd_f64(double %f1, double %f2) #0 {
 ; CHECK-LABEL: fadd_f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xsadddp f1, f1, f2
@@ -63,11 +63,11 @@ define double @fadd_f64(double %f1, double %f2) {
   %res = call double @llvm.experimental.constrained.fadd.f64(
                         double %f1, double %f2,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   ret double %res
 }
 
-define <4 x float> @fadd_v4f32(<4 x float> %vf1, <4 x float> %vf2) {
+define <4 x float> @fadd_v4f32(<4 x float> %vf1, <4 x float> %vf2) #0 {
 ; CHECK-LABEL: fadd_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvaddsp v2, v2, v3
@@ -101,11 +101,11 @@ define <4 x float> @fadd_v4f32(<4 x float> %vf1, <4 x float> %vf2) {
   %res = call <4 x float> @llvm.experimental.constrained.fadd.v4f32(
                         <4 x float> %vf1, <4 x float> %vf2,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   ret <4 x float> %res
 }
 
-define <2 x double> @fadd_v2f64(<2 x double> %vf1, <2 x double> %vf2) {
+define <2 x double> @fadd_v2f64(<2 x double> %vf1, <2 x double> %vf2) #0 {
 ; CHECK-LABEL: fadd_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvadddp v2, v2, v3
@@ -119,11 +119,11 @@ define <2 x double> @fadd_v2f64(<2 x double> %vf1, <2 x double> %vf2) {
   %res = call <2 x double> @llvm.experimental.constrained.fadd.v2f64(
                         <2 x double> %vf1, <2 x double> %vf2,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   ret <2 x double> %res
 }
 
-define float @fsub_f32(float %f1, float %f2) {
+define float @fsub_f32(float %f1, float %f2) #0 {
 ; CHECK-LABEL: fsub_f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xssubsp f1, f1, f2
@@ -137,11 +137,11 @@ define float @fsub_f32(float %f1, float %f2) {
   %res = call float @llvm.experimental.constrained.fsub.f32(
                         float %f1, float %f2,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   ret float %res;
 }
 
-define double @fsub_f64(double %f1, double %f2) {
+define double @fsub_f64(double %f1, double %f2) #0 {
 ; CHECK-LABEL: fsub_f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xssubdp f1, f1, f2
@@ -155,11 +155,11 @@ define double @fsub_f64(double %f1, double %f2) {
   %res = call double @llvm.experimental.constrained.fsub.f64(
                         double %f1, double %f2,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   ret double %res;
 }
 
-define <4 x float> @fsub_v4f32(<4 x float> %vf1, <4 x float> %vf2) {
+define <4 x float> @fsub_v4f32(<4 x float> %vf1, <4 x float> %vf2) #0 {
 ; CHECK-LABEL: fsub_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvsubsp v2, v2, v3
@@ -193,11 +193,11 @@ define <4 x float> @fsub_v4f32(<4 x float> %vf1, <4 x float> %vf2) {
   %res = call <4 x float> @llvm.experimental.constrained.fsub.v4f32(
                         <4 x float> %vf1, <4 x float> %vf2,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   ret <4 x float> %res;
 }
 
-define <2 x double> @fsub_v2f64(<2 x double> %vf1, <2 x double> %vf2) {
+define <2 x double> @fsub_v2f64(<2 x double> %vf1, <2 x double> %vf2) #0 {
 ; CHECK-LABEL: fsub_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvsubdp v2, v2, v3
@@ -211,11 +211,11 @@ define <2 x double> @fsub_v2f64(<2 x double> %vf1, <2 x double> %vf2) {
   %res = call <2 x double> @llvm.experimental.constrained.fsub.v2f64(
                         <2 x double> %vf1, <2 x double> %vf2,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   ret <2 x double> %res;
 }
 
-define float @fmul_f32(float %f1, float %f2) {
+define float @fmul_f32(float %f1, float %f2) #0 {
 ; CHECK-LABEL: fmul_f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xsmulsp f1, f1, f2
@@ -229,11 +229,11 @@ define float @fmul_f32(float %f1, float %f2) {
   %res = call float @llvm.experimental.constrained.fmul.f32(
                         float %f1, float %f2,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   ret float %res;
 }
 
-define double @fmul_f64(double %f1, double %f2) {
+define double @fmul_f64(double %f1, double %f2) #0 {
 ; CHECK-LABEL: fmul_f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xsmuldp f1, f1, f2
@@ -247,11 +247,11 @@ define double @fmul_f64(double %f1, double %f2) {
   %res = call double @llvm.experimental.constrained.fmul.f64(
                         double %f1, double %f2,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   ret double %res;
 }
 
-define <4 x float> @fmul_v4f32(<4 x float> %vf1, <4 x float> %vf2) {
+define <4 x float> @fmul_v4f32(<4 x float> %vf1, <4 x float> %vf2) #0 {
 ; CHECK-LABEL: fmul_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvmulsp v2, v2, v3
@@ -285,11 +285,11 @@ define <4 x float> @fmul_v4f32(<4 x float> %vf1, <4 x float> %vf2) {
   %res = call <4 x float> @llvm.experimental.constrained.fmul.v4f32(
                         <4 x float> %vf1, <4 x float> %vf2,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   ret <4 x float> %res;
 }
 
-define <2 x double> @fmul_v2f64(<2 x double> %vf1, <2 x double> %vf2) {
+define <2 x double> @fmul_v2f64(<2 x double> %vf1, <2 x double> %vf2) #0 {
 ; CHECK-LABEL: fmul_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvmuldp v2, v2, v3
@@ -303,11 +303,11 @@ define <2 x double> @fmul_v2f64(<2 x double> %vf1, <2 x double> %vf2) {
   %res = call <2 x double> @llvm.experimental.constrained.fmul.v2f64(
                         <2 x double> %vf1, <2 x double> %vf2,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   ret <2 x double> %res;
 }
 
-define float @fdiv_f32(float %f1, float %f2) {
+define float @fdiv_f32(float %f1, float %f2) #0 {
 ; CHECK-LABEL: fdiv_f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xsdivsp f1, f1, f2
@@ -321,11 +321,11 @@ define float @fdiv_f32(float %f1, float %f2) {
   %res = call float @llvm.experimental.constrained.fdiv.f32(
                         float %f1, float %f2,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   ret float %res;
 }
 
-define double @fdiv_f64(double %f1, double %f2) {
+define double @fdiv_f64(double %f1, double %f2) #0 {
 ; CHECK-LABEL: fdiv_f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xsdivdp f1, f1, f2
@@ -339,11 +339,11 @@ define double @fdiv_f64(double %f1, double %f2) {
   %res = call double @llvm.experimental.constrained.fdiv.f64(
                         double %f1, double %f2,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   ret double %res;
 }
 
-define <4 x float> @fdiv_v4f32(<4 x float> %vf1, <4 x float> %vf2) {
+define <4 x float> @fdiv_v4f32(<4 x float> %vf1, <4 x float> %vf2) #0 {
 ; CHECK-LABEL: fdiv_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvdivsp v2, v2, v3
@@ -377,11 +377,11 @@ define <4 x float> @fdiv_v4f32(<4 x float> %vf1, <4 x float> %vf2) {
   %res = call <4 x float> @llvm.experimental.constrained.fdiv.v4f32(
                         <4 x float> %vf1, <4 x float> %vf2,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   ret <4 x float> %res
 }
 
-define <2 x double> @fdiv_v2f64(<2 x double> %vf1, <2 x double> %vf2) {
+define <2 x double> @fdiv_v2f64(<2 x double> %vf1, <2 x double> %vf2) #0 {
 ; CHECK-LABEL: fdiv_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvdivdp v2, v2, v3
@@ -395,11 +395,11 @@ define <2 x double> @fdiv_v2f64(<2 x double> %vf1, <2 x double> %vf2) {
   %res = call <2 x double> @llvm.experimental.constrained.fdiv.v2f64(
                         <2 x double> %vf1, <2 x double> %vf2,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   ret <2 x double> %res
 }
 
-define double @no_fma_fold(double %f1, double %f2, double %f3) {
+define double @no_fma_fold(double %f1, double %f2, double %f3) #0 {
 ; CHECK-LABEL: no_fma_fold:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xsmuldp f0, f1, f2
@@ -414,15 +414,15 @@ define double @no_fma_fold(double %f1, double %f2, double %f3) {
   %mul = call double @llvm.experimental.constrained.fmul.f64(
                         double %f1, double %f2,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   %add = call double @llvm.experimental.constrained.fadd.f64(
                         double %mul, double %f3,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   ret double %add
 }
 
-define float @fmadd_f32(float %f0, float %f1, float %f2) {
+define float @fmadd_f32(float %f0, float %f1, float %f2) #0 {
 ; CHECK-LABEL: fmadd_f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xsmaddasp f3, f1, f2
@@ -436,11 +436,11 @@ define float @fmadd_f32(float %f0, float %f1, float %f2) {
   %res = call float @llvm.experimental.constrained.fma.f32(
                         float %f0, float %f1, float %f2,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   ret float %res
 }
 
-define double @fmadd_f64(double %f0, double %f1, double %f2) {
+define double @fmadd_f64(double %f0, double %f1, double %f2) #0 {
 ; CHECK-LABEL: fmadd_f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xsmaddadp f3, f1, f2
@@ -454,11 +454,11 @@ define double @fmadd_f64(double %f0, double %f1, double %f2) {
   %res = call double @llvm.experimental.constrained.fma.f64(
                         double %f0, double %f1, double %f2,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   ret double %res
 }
 
-define <4 x float> @fmadd_v4f32(<4 x float> %vf0, <4 x float> %vf1, <4 x float> %vf2) {
+define <4 x float> @fmadd_v4f32(<4 x float> %vf0, <4 x float> %vf1, <4 x float> %vf2) #0 {
 ; CHECK-LABEL: fmadd_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvmaddasp v4, v2, v3
@@ -499,11 +499,11 @@ define <4 x float> @fmadd_v4f32(<4 x float> %vf0, <4 x float> %vf1, <4 x float>
   %res = call <4 x float> @llvm.experimental.constrained.fma.v4f32(
                         <4 x float> %vf0, <4 x float> %vf1, <4 x float> %vf2,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   ret <4 x float> %res
 }
 
-define <2 x double> @fmadd_v2f64(<2 x double> %vf0, <2 x double> %vf1, <2 x double> %vf2) {
+define <2 x double> @fmadd_v2f64(<2 x double> %vf0, <2 x double> %vf1, <2 x double> %vf2) #0 {
 ; CHECK-LABEL: fmadd_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvmaddadp v4, v2, v3
@@ -518,11 +518,11 @@ define <2 x double> @fmadd_v2f64(<2 x double> %vf0, <2 x double> %vf1, <2 x doub
   %res = call <2 x double> @llvm.experimental.constrained.fma.v2f64(
                         <2 x double> %vf0, <2 x double> %vf1, <2 x double> %vf2,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   ret <2 x double> %res
 }
 
-define float @fmsub_f32(float %f0, float %f1, float %f2) {
+define float @fmsub_f32(float %f0, float %f1, float %f2) #0 {
 ; CHECK-LABEL: fmsub_f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xsmsubasp f3, f1, f2
@@ -537,11 +537,11 @@ define float @fmsub_f32(float %f0, float %f1, float %f2) {
   %res = call float @llvm.experimental.constrained.fma.f32(
                         float %f0, float %f1, float %neg,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   ret float %res
 }
 
-define double @fmsub_f64(double %f0, double %f1, double %f2) {
+define double @fmsub_f64(double %f0, double %f1, double %f2) #0 {
 ; CHECK-LABEL: fmsub_f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xsmsubadp f3, f1, f2
@@ -556,11 +556,11 @@ define double @fmsub_f64(double %f0, double %f1, double %f2) {
   %res = call double @llvm.experimental.constrained.fma.f64(
                         double %f0, double %f1, double %neg,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   ret double %res
 }
 
-define <4 x float> @fmsub_v4f32(<4 x float> %vf0, <4 x float> %vf1, <4 x float> %vf2) {
+define <4 x float> @fmsub_v4f32(<4 x float> %vf0, <4 x float> %vf1, <4 x float> %vf2) #0 {
 ; CHECK-LABEL: fmsub_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvmsubasp v4, v2, v3
@@ -605,11 +605,11 @@ define <4 x float> @fmsub_v4f32(<4 x float> %vf0, <4 x float> %vf1, <4 x float>
   %res = call <4 x float> @llvm.experimental.constrained.fma.v4f32(
                         <4 x float> %vf0, <4 x float> %vf1, <4 x float> %neg,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   ret <4 x float> %res
 }
 
-define <2 x double> @fmsub_v2f64(<2 x double> %vf0, <2 x double> %vf1, <2 x double> %vf2) {
+define <2 x double> @fmsub_v2f64(<2 x double> %vf0, <2 x double> %vf1, <2 x double> %vf2) #0 {
 ; CHECK-LABEL: fmsub_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvmsubadp v4, v2, v3
@@ -625,11 +625,11 @@ define <2 x double> @fmsub_v2f64(<2 x double> %vf0, <2 x double> %vf1, <2 x doub
   %res = call <2 x double> @llvm.experimental.constrained.fma.v2f64(
                         <2 x double> %vf0, <2 x double> %vf1, <2 x double> %neg,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   ret <2 x double> %res
 }
 
-define float @fnmadd_f32(float %f0, float %f1, float %f2) {
+define float @fnmadd_f32(float %f0, float %f1, float %f2) #0 {
 ; CHECK-LABEL: fnmadd_f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xsnmaddasp f3, f1, f2
@@ -643,12 +643,12 @@ define float @fnmadd_f32(float %f0, float %f1, float %f2) {
   %fma = call float @llvm.experimental.constrained.fma.f32(
                         float %f0, float %f1, float %f2,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   %res = fneg float %fma
   ret float %res
 }
 
-define double @fnmadd_f64(double %f0, double %f1, double %f2) {
+define double @fnmadd_f64(double %f0, double %f1, double %f2) #0 {
 ; CHECK-LABEL: fnmadd_f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xsnmaddadp f3, f1, f2
@@ -662,12 +662,12 @@ define double @fnmadd_f64(double %f0, double %f1, double %f2) {
   %fma = call double @llvm.experimental.constrained.fma.f64(
                         double %f0, double %f1, double %f2,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   %res = fneg double %fma
   ret double %res
 }
 
-define <4 x float> @fnmadd_v4f32(<4 x float> %vf0, <4 x float> %vf1, <4 x float> %vf2) {
+define <4 x float> @fnmadd_v4f32(<4 x float> %vf0, <4 x float> %vf1, <4 x float> %vf2) #0 {
 ; CHECK-LABEL: fnmadd_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvmaddasp v4, v2, v3
@@ -711,12 +711,12 @@ define <4 x float> @fnmadd_v4f32(<4 x float> %vf0, <4 x float> %vf1, <4 x float>
   %fma = call <4 x float> @llvm.experimental.constrained.fma.v4f32(
                         <4 x float> %vf0, <4 x float> %vf1, <4 x float> %vf2,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   %res = fneg <4 x float> %fma
   ret <4 x float> %res
 }
 
-define <2 x double> @fnmadd_v2f64(<2 x double> %vf0, <2 x double> %vf1, <2 x double> %vf2) {
+define <2 x double> @fnmadd_v2f64(<2 x double> %vf0, <2 x double> %vf1, <2 x double> %vf2) #0 {
 ; CHECK-LABEL: fnmadd_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvnmaddadp v4, v2, v3
@@ -731,12 +731,12 @@ define <2 x double> @fnmadd_v2f64(<2 x double> %vf0, <2 x double> %vf1, <2 x dou
   %fma = call <2 x double> @llvm.experimental.constrained.fma.v2f64(
                         <2 x double> %vf0, <2 x double> %vf1, <2 x double> %vf2,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   %res = fneg <2 x double> %fma
   ret <2 x double> %res
 }
 
-define float @fnmsub_f32(float %f0, float %f1, float %f2) {
+define float @fnmsub_f32(float %f0, float %f1, float %f2) #0 {
 ; CHECK-LABEL: fnmsub_f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xsnmsubasp f3, f1, f2
@@ -751,12 +751,12 @@ define float @fnmsub_f32(float %f0, float %f1, float %f2) {
   %fma = call float @llvm.experimental.constrained.fma.f32(
                         float %f0, float %f1, float %neg,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   %res = fneg float %fma
   ret float %res
 }
 
-define double @fnmsub_f64(double %f0, double %f1, double %f2) {
+define double @fnmsub_f64(double %f0, double %f1, double %f2) #0 {
 ; CHECK-LABEL: fnmsub_f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xsnmsubadp f3, f1, f2
@@ -771,12 +771,12 @@ define double @fnmsub_f64(double %f0, double %f1, double %f2) {
   %fma = call double @llvm.experimental.constrained.fma.f64(
                         double %f0, double %f1, double %neg,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   %res = fneg double %fma
   ret double %res
 }
 
-define <4 x float> @fnmsub_v4f32(<4 x float> %vf0, <4 x float> %vf1, <4 x float> %vf2) {
+define <4 x float> @fnmsub_v4f32(<4 x float> %vf0, <4 x float> %vf1, <4 x float> %vf2) #0 {
 ; CHECK-LABEL: fnmsub_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvnmsubasp v4, v2, v3
@@ -822,12 +822,12 @@ define <4 x float> @fnmsub_v4f32(<4 x float> %vf0, <4 x float> %vf1, <4 x float>
   %fma = call <4 x float> @llvm.experimental.constrained.fma.v4f32(
                         <4 x float> %vf0, <4 x float> %vf1, <4 x float> %neg,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   %res = fneg <4 x float> %fma
   ret <4 x float> %res
 }
 
-define <2 x double> @fnmsub_v2f64(<2 x double> %vf0, <2 x double> %vf1, <2 x double> %vf2) {
+define <2 x double> @fnmsub_v2f64(<2 x double> %vf0, <2 x double> %vf1, <2 x double> %vf2) #0 {
 ; CHECK-LABEL: fnmsub_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvnmsubadp v4, v2, v3
@@ -843,12 +843,12 @@ define <2 x double> @fnmsub_v2f64(<2 x double> %vf0, <2 x double> %vf1, <2 x dou
   %fma = call <2 x double> @llvm.experimental.constrained.fma.v2f64(
                         <2 x double> %vf0, <2 x double> %vf1, <2 x double> %neg,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   %res = fneg <2 x double> %fma
   ret <2 x double> %res
 }
 
-define float @fsqrt_f32(float %f1) {
+define float @fsqrt_f32(float %f1) #0 {
 ; CHECK-LABEL: fsqrt_f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xssqrtsp f1, f1
@@ -861,11 +861,11 @@ define float @fsqrt_f32(float %f1) {
   %res = call float @llvm.experimental.constrained.sqrt.f32(
                         float %f1,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   ret float %res
 }
 
-define double @fsqrt_f64(double %f1) {
+define double @fsqrt_f64(double %f1) #0 {
 ; CHECK-LABEL: fsqrt_f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xssqrtdp f1, f1
@@ -878,11 +878,11 @@ define double @fsqrt_f64(double %f1) {
   %res = call double @llvm.experimental.constrained.sqrt.f64(
                         double %f1,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   ret double %res
 }
 
-define <4 x float> @fsqrt_v4f32(<4 x float> %vf1) {
+define <4 x float> @fsqrt_v4f32(<4 x float> %vf1) #0 {
 ; CHECK-LABEL: fsqrt_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvsqrtsp v2, v2
@@ -910,11 +910,11 @@ define <4 x float> @fsqrt_v4f32(<4 x float> %vf1) {
   %res = call <4 x float> @llvm.experimental.constrained.sqrt.v4f32(
                         <4 x float> %vf1,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   ret <4 x float> %res
 }
 
-define <2 x double> @fsqrt_v2f64(<2 x double> %vf1) {
+define <2 x double> @fsqrt_v2f64(<2 x double> %vf1) #0 {
 ; CHECK-LABEL: fsqrt_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvsqrtdp v2, v2
@@ -928,6 +928,8 @@ define <2 x double> @fsqrt_v2f64(<2 x double> %vf1) {
   %res = call <2 x double> @llvm.experimental.constrained.sqrt.v2f64(
                         <2 x double> %vf1,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict")
+                        metadata !"fpexcept.strict") #0
   ret <2 x double> %res
 }
+
+attributes #0 = { strictfp }

diff  --git a/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll
index 8284607482a3..c9d9cf870e49 100644
--- a/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll
+++ b/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll
@@ -3,7 +3,7 @@
 ; RUN: llc -O3 -mtriple=powerpc64le-linux-gnu -mcpu=pwr9 < %s | FileCheck --check-prefix=PC64LE9 %s
 ; RUN: llc -O3 -mtriple=powerpc64-linux-gnu < %s | FileCheck --check-prefix=PC64 %s
 
-define ppc_fp128 @test_fadd_ppc_fp128(ppc_fp128 %first, ppc_fp128 %second) nounwind {
+define ppc_fp128 @test_fadd_ppc_fp128(ppc_fp128 %first, ppc_fp128 %second) #0 {
 ; PC64LE-LABEL: test_fadd_ppc_fp128:
 ; PC64LE:       # %bb.0: # %entry
 ; PC64LE-NEXT:    mflr 0
@@ -44,11 +44,11 @@ entry:
                     ppc_fp128 %first,
                     ppc_fp128 %second,
                     metadata !"round.dynamic",
-                    metadata !"fpexcept.strict")
+                    metadata !"fpexcept.strict") #1
   ret ppc_fp128 %add
 }
 
-define ppc_fp128 @test_fsub_ppc_fp128(ppc_fp128 %first, ppc_fp128 %second) nounwind {
+define ppc_fp128 @test_fsub_ppc_fp128(ppc_fp128 %first, ppc_fp128 %second) #0 {
 ; PC64LE-LABEL: test_fsub_ppc_fp128:
 ; PC64LE:       # %bb.0: # %entry
 ; PC64LE-NEXT:    mflr 0
@@ -89,11 +89,11 @@ entry:
                     ppc_fp128 %first,
                     ppc_fp128 %second,
                     metadata !"round.dynamic",
-                    metadata !"fpexcept.strict")
+                    metadata !"fpexcept.strict") #1
   ret ppc_fp128 %sub
 }
 
-define ppc_fp128 @test_fmul_ppc_fp128(ppc_fp128 %first, ppc_fp128 %second) nounwind {
+define ppc_fp128 @test_fmul_ppc_fp128(ppc_fp128 %first, ppc_fp128 %second) #0 {
 ; PC64LE-LABEL: test_fmul_ppc_fp128:
 ; PC64LE:       # %bb.0: # %entry
 ; PC64LE-NEXT:    mflr 0
@@ -134,11 +134,11 @@ entry:
                     ppc_fp128 %first,
                     ppc_fp128 %second,
                     metadata !"round.dynamic",
-                    metadata !"fpexcept.strict")
+                    metadata !"fpexcept.strict") #1
   ret ppc_fp128 %mul
 }
 
-define ppc_fp128 @test_fdiv_ppc_fp128(ppc_fp128 %first, ppc_fp128 %second) nounwind {
+define ppc_fp128 @test_fdiv_ppc_fp128(ppc_fp128 %first, ppc_fp128 %second) #0 {
 ; PC64LE-LABEL: test_fdiv_ppc_fp128:
 ; PC64LE:       # %bb.0: # %entry
 ; PC64LE-NEXT:    mflr 0
@@ -179,11 +179,11 @@ entry:
                     ppc_fp128 %first,
                     ppc_fp128 %second,
                     metadata !"round.dynamic",
-                    metadata !"fpexcept.strict")
+                    metadata !"fpexcept.strict") #1
   ret ppc_fp128 %div
 }
 
-define ppc_fp128 @test_frem_ppc_fp128(ppc_fp128 %first, ppc_fp128 %second) nounwind {
+define ppc_fp128 @test_frem_ppc_fp128(ppc_fp128 %first, ppc_fp128 %second) #0 {
 ; PC64LE-LABEL: test_frem_ppc_fp128:
 ; PC64LE:       # %bb.0: # %entry
 ; PC64LE-NEXT:    mflr 0
@@ -224,11 +224,11 @@ entry:
                     ppc_fp128 %first,
                     ppc_fp128 %second,
                     metadata !"round.dynamic",
-                    metadata !"fpexcept.strict")
+                    metadata !"fpexcept.strict") #1
   ret ppc_fp128 %rem
 }
 
-define ppc_fp128 @test_fma_ppc_fp128(ppc_fp128 %first, ppc_fp128 %second, ppc_fp128 %third) nounwind {
+define ppc_fp128 @test_fma_ppc_fp128(ppc_fp128 %first, ppc_fp128 %second, ppc_fp128 %third) #0 {
 ; PC64LE-LABEL: test_fma_ppc_fp128:
 ; PC64LE:       # %bb.0: # %entry
 ; PC64LE-NEXT:    mflr 0
@@ -270,11 +270,11 @@ entry:
                     ppc_fp128 %second,
                     ppc_fp128 %third,
                     metadata !"round.dynamic",
-                    metadata !"fpexcept.strict")
+                    metadata !"fpexcept.strict") #1
   ret ppc_fp128 %add
 }
 
-define ppc_fp128 @test_sqrt_ppc_fp128(ppc_fp128 %first) nounwind {
+define ppc_fp128 @test_sqrt_ppc_fp128(ppc_fp128 %first) #0 {
 ; PC64LE-LABEL: test_sqrt_ppc_fp128:
 ; PC64LE:       # %bb.0: # %entry
 ; PC64LE-NEXT:    mflr 0
@@ -314,11 +314,11 @@ entry:
   %sqrt = call ppc_fp128 @llvm.experimental.constrained.sqrt.ppcf128(
                     ppc_fp128 %first,
                     metadata !"round.dynamic",
-                    metadata !"fpexcept.strict")
+                    metadata !"fpexcept.strict") #1
   ret ppc_fp128 %sqrt
 }
 
-define ppc_fp128 @test_pow_ppc_fp128(ppc_fp128 %first, ppc_fp128 %second) nounwind {
+define ppc_fp128 @test_pow_ppc_fp128(ppc_fp128 %first, ppc_fp128 %second) #0 {
 ; PC64LE-LABEL: test_pow_ppc_fp128:
 ; PC64LE:       # %bb.0: # %entry
 ; PC64LE-NEXT:    mflr 0
@@ -359,11 +359,11 @@ entry:
                     ppc_fp128 %first,
                     ppc_fp128 %second,
                     metadata !"round.dynamic",
-                    metadata !"fpexcept.strict")
+                    metadata !"fpexcept.strict") #1
   ret ppc_fp128 %pow
 }
 
-define ppc_fp128 @test_powi_ppc_fp128(ppc_fp128 %first, i32 %second) nounwind {
+define ppc_fp128 @test_powi_ppc_fp128(ppc_fp128 %first, i32 %second) #0 {
 ; PC64LE-LABEL: test_powi_ppc_fp128:
 ; PC64LE:       # %bb.0: # %entry
 ; PC64LE-NEXT:    mflr 0
@@ -407,11 +407,11 @@ entry:
                     ppc_fp128 %first,
                     i32 %second,
                     metadata !"round.dynamic",
-                    metadata !"fpexcept.strict")
+                    metadata !"fpexcept.strict") #1
   ret ppc_fp128 %powi
 }
 
-define ppc_fp128 @test_sin_ppc_fp128(ppc_fp128 %first) nounwind {
+define ppc_fp128 @test_sin_ppc_fp128(ppc_fp128 %first) #0 {
 ; PC64LE-LABEL: test_sin_ppc_fp128:
 ; PC64LE:       # %bb.0: # %entry
 ; PC64LE-NEXT:    mflr 0
@@ -451,11 +451,11 @@ entry:
   %sin = call ppc_fp128 @llvm.experimental.constrained.sin.ppcf128(
                     ppc_fp128 %first,
                     metadata !"round.dynamic",
-                    metadata !"fpexcept.strict")
+                    metadata !"fpexcept.strict") #1
   ret ppc_fp128 %sin
 }
 
-define ppc_fp128 @test_cos_ppc_fp128(ppc_fp128 %first) nounwind {
+define ppc_fp128 @test_cos_ppc_fp128(ppc_fp128 %first) #0 {
 ; PC64LE-LABEL: test_cos_ppc_fp128:
 ; PC64LE:       # %bb.0: # %entry
 ; PC64LE-NEXT:    mflr 0
@@ -495,11 +495,11 @@ entry:
   %cos = call ppc_fp128 @llvm.experimental.constrained.cos.ppcf128(
                     ppc_fp128 %first,
                     metadata !"round.dynamic",
-                    metadata !"fpexcept.strict")
+                    metadata !"fpexcept.strict") #1
   ret ppc_fp128 %cos
 }
 
-define ppc_fp128 @test_exp_ppc_fp128(ppc_fp128 %first) nounwind {
+define ppc_fp128 @test_exp_ppc_fp128(ppc_fp128 %first) #0 {
 ; PC64LE-LABEL: test_exp_ppc_fp128:
 ; PC64LE:       # %bb.0: # %entry
 ; PC64LE-NEXT:    mflr 0
@@ -539,11 +539,11 @@ entry:
   %exp = call ppc_fp128 @llvm.experimental.constrained.exp.ppcf128(
                     ppc_fp128 %first,
                     metadata !"round.dynamic",
-                    metadata !"fpexcept.strict")
+                    metadata !"fpexcept.strict") #1
   ret ppc_fp128 %exp
 }
 
-define ppc_fp128 @test_exp2_ppc_fp128(ppc_fp128 %first) nounwind {
+define ppc_fp128 @test_exp2_ppc_fp128(ppc_fp128 %first) #0 {
 ; PC64LE-LABEL: test_exp2_ppc_fp128:
 ; PC64LE:       # %bb.0: # %entry
 ; PC64LE-NEXT:    mflr 0
@@ -583,11 +583,11 @@ entry:
   %exp2 = call ppc_fp128 @llvm.experimental.constrained.exp2.ppcf128(
                     ppc_fp128 %first,
                     metadata !"round.dynamic",
-                    metadata !"fpexcept.strict")
+                    metadata !"fpexcept.strict") #1
   ret ppc_fp128 %exp2
 }
 
-define ppc_fp128 @test_log_ppc_fp128(ppc_fp128 %first) nounwind {
+define ppc_fp128 @test_log_ppc_fp128(ppc_fp128 %first) #0 {
 ; PC64LE-LABEL: test_log_ppc_fp128:
 ; PC64LE:       # %bb.0: # %entry
 ; PC64LE-NEXT:    mflr 0
@@ -627,11 +627,11 @@ entry:
   %log = call ppc_fp128 @llvm.experimental.constrained.log.ppcf128(
                     ppc_fp128 %first,
                     metadata !"round.dynamic",
-                    metadata !"fpexcept.strict")
+                    metadata !"fpexcept.strict") #1
   ret ppc_fp128 %log
 }
 
-define ppc_fp128 @test_log2_ppc_fp128(ppc_fp128 %first) nounwind {
+define ppc_fp128 @test_log2_ppc_fp128(ppc_fp128 %first) #0 {
 ; PC64LE-LABEL: test_log2_ppc_fp128:
 ; PC64LE:       # %bb.0: # %entry
 ; PC64LE-NEXT:    mflr 0
@@ -671,11 +671,11 @@ entry:
   %log2 = call ppc_fp128 @llvm.experimental.constrained.log2.ppcf128(
                     ppc_fp128 %first,
                     metadata !"round.dynamic",
-                    metadata !"fpexcept.strict")
+                    metadata !"fpexcept.strict") #1
   ret ppc_fp128 %log2
 }
 
-define ppc_fp128 @test_log10_ppc_fp128(ppc_fp128 %first) nounwind {
+define ppc_fp128 @test_log10_ppc_fp128(ppc_fp128 %first) #0 {
 ; PC64LE-LABEL: test_log10_ppc_fp128:
 ; PC64LE:       # %bb.0: # %entry
 ; PC64LE-NEXT:    mflr 0
@@ -715,11 +715,11 @@ entry:
   %log10 = call ppc_fp128 @llvm.experimental.constrained.log10.ppcf128(
                     ppc_fp128 %first,
                     metadata !"round.dynamic",
-                    metadata !"fpexcept.strict")
+                    metadata !"fpexcept.strict") #1
   ret ppc_fp128 %log10
 }
 
-define ppc_fp128 @test_rint_ppc_fp128(ppc_fp128 %first) nounwind {
+define ppc_fp128 @test_rint_ppc_fp128(ppc_fp128 %first) #0 {
 ; PC64LE-LABEL: test_rint_ppc_fp128:
 ; PC64LE:       # %bb.0: # %entry
 ; PC64LE-NEXT:    mflr 0
@@ -759,11 +759,11 @@ entry:
   %rint = call ppc_fp128 @llvm.experimental.constrained.rint.ppcf128(
                     ppc_fp128 %first,
                     metadata !"round.dynamic",
-                    metadata !"fpexcept.strict")
+                    metadata !"fpexcept.strict") #1
   ret ppc_fp128 %rint
 }
 
-define ppc_fp128 @test_nearbyint_ppc_fp128(ppc_fp128 %first) nounwind {
+define ppc_fp128 @test_nearbyint_ppc_fp128(ppc_fp128 %first) #0 {
 ; PC64LE-LABEL: test_nearbyint_ppc_fp128:
 ; PC64LE:       # %bb.0: # %entry
 ; PC64LE-NEXT:    mflr 0
@@ -803,11 +803,11 @@ entry:
   %nearbyint = call ppc_fp128 @llvm.experimental.constrained.nearbyint.ppcf128(
                     ppc_fp128 %first,
                     metadata !"round.dynamic",
-                    metadata !"fpexcept.strict")
+                    metadata !"fpexcept.strict") #1
   ret ppc_fp128 %nearbyint
 }
 
-define ppc_fp128 @test_maxnum_ppc_fp128(ppc_fp128 %first, ppc_fp128 %second) nounwind {
+define ppc_fp128 @test_maxnum_ppc_fp128(ppc_fp128 %first, ppc_fp128 %second) #0 {
 ; PC64LE-LABEL: test_maxnum_ppc_fp128:
 ; PC64LE:       # %bb.0: # %entry
 ; PC64LE-NEXT:    mflr 0
@@ -847,11 +847,11 @@ entry:
   %maxnum = call ppc_fp128 @llvm.experimental.constrained.maxnum.ppcf128(
                     ppc_fp128 %first,
                     ppc_fp128 %second,
-                    metadata !"fpexcept.strict")
+                    metadata !"fpexcept.strict") #1
   ret ppc_fp128 %maxnum
 }
 
-define ppc_fp128 @test_minnum_ppc_fp128(ppc_fp128 %first, ppc_fp128 %second) nounwind {
+define ppc_fp128 @test_minnum_ppc_fp128(ppc_fp128 %first, ppc_fp128 %second) #0 {
 ; PC64LE-LABEL: test_minnum_ppc_fp128:
 ; PC64LE:       # %bb.0: # %entry
 ; PC64LE-NEXT:    mflr 0
@@ -891,11 +891,11 @@ entry:
   %minnum = call ppc_fp128 @llvm.experimental.constrained.minnum.ppcf128(
                     ppc_fp128 %first,
                     ppc_fp128 %second,
-                    metadata !"fpexcept.strict")
+                    metadata !"fpexcept.strict") #1
   ret ppc_fp128 %minnum
 }
 
-define ppc_fp128 @test_ceil_ppc_fp128(ppc_fp128 %first) nounwind {
+define ppc_fp128 @test_ceil_ppc_fp128(ppc_fp128 %first) #0 {
 ; PC64LE-LABEL: test_ceil_ppc_fp128:
 ; PC64LE:       # %bb.0: # %entry
 ; PC64LE-NEXT:    mflr 0
@@ -934,11 +934,11 @@ define ppc_fp128 @test_ceil_ppc_fp128(ppc_fp128 %first) nounwind {
 entry:
   %ceil = call ppc_fp128 @llvm.experimental.constrained.ceil.ppcf128(
                     ppc_fp128 %first,
-                    metadata !"fpexcept.strict")
+                    metadata !"fpexcept.strict") #1
   ret ppc_fp128 %ceil
 }
 
-define ppc_fp128 @test_floor_ppc_fp128(ppc_fp128 %first) nounwind {
+define ppc_fp128 @test_floor_ppc_fp128(ppc_fp128 %first) #0 {
 ; PC64LE-LABEL: test_floor_ppc_fp128:
 ; PC64LE:       # %bb.0: # %entry
 ; PC64LE-NEXT:    mflr 0
@@ -977,11 +977,11 @@ define ppc_fp128 @test_floor_ppc_fp128(ppc_fp128 %first) nounwind {
 entry:
   %floor = call ppc_fp128 @llvm.experimental.constrained.floor.ppcf128(
                     ppc_fp128 %first,
-                    metadata !"fpexcept.strict")
+                    metadata !"fpexcept.strict") #1
   ret ppc_fp128 %floor
 }
 
-define ppc_fp128 @test_round_ppc_fp128(ppc_fp128 %first) nounwind {
+define ppc_fp128 @test_round_ppc_fp128(ppc_fp128 %first) #0 {
 ; PC64LE-LABEL: test_round_ppc_fp128:
 ; PC64LE:       # %bb.0: # %entry
 ; PC64LE-NEXT:    mflr 0
@@ -1020,11 +1020,11 @@ define ppc_fp128 @test_round_ppc_fp128(ppc_fp128 %first) nounwind {
 entry:
   %round = call ppc_fp128 @llvm.experimental.constrained.round.ppcf128(
                     ppc_fp128 %first,
-                    metadata !"fpexcept.strict")
+                    metadata !"fpexcept.strict") #1
   ret ppc_fp128 %round
 }
 
-define ppc_fp128 @test_trunc_ppc_fp128(ppc_fp128 %first) nounwind {
+define ppc_fp128 @test_trunc_ppc_fp128(ppc_fp128 %first) #0 {
 ; PC64LE-LABEL: test_trunc_ppc_fp128:
 ; PC64LE:       # %bb.0: # %entry
 ; PC64LE-NEXT:    mflr 0
@@ -1063,11 +1063,11 @@ define ppc_fp128 @test_trunc_ppc_fp128(ppc_fp128 %first) nounwind {
 entry:
   %trunc = call ppc_fp128 @llvm.experimental.constrained.trunc.ppcf128(
                     ppc_fp128 %first,
-                    metadata !"fpexcept.strict")
+                    metadata !"fpexcept.strict") #1
   ret ppc_fp128 %trunc
 }
 
-define float @test_fptrunc_ppc_fp128_f32(ppc_fp128 %first) nounwind {
+define float @test_fptrunc_ppc_fp128_f32(ppc_fp128 %first) #0 {
 ; PC64LE-LABEL: test_fptrunc_ppc_fp128_f32:
 ; PC64LE:       # %bb.0: # %entry
 ; PC64LE-NEXT:    frsp 1, 1
@@ -1086,11 +1086,11 @@ entry:
   %fptrunc = call float @llvm.experimental.constrained.fptrunc.ppcf128.f32(
                     ppc_fp128 %first,
                     metadata !"round.dynamic",
-                    metadata !"fpexcept.strict")
+                    metadata !"fpexcept.strict") #1
   ret float %fptrunc
 }
 
-define double @test_fptrunc_ppc_fp128_f64(ppc_fp128 %first) nounwind {
+define double @test_fptrunc_ppc_fp128_f64(ppc_fp128 %first) #0 {
 ; PC64LE-LABEL: test_fptrunc_ppc_fp128_f64:
 ; PC64LE:       # %bb.0: # %entry
 ; PC64LE-NEXT:    blr
@@ -1106,11 +1106,11 @@ entry:
   %fptrunc = call double @llvm.experimental.constrained.fptrunc.ppcf128.f64(
                     ppc_fp128 %first,
                     metadata !"round.dynamic",
-                    metadata !"fpexcept.strict")
+                    metadata !"fpexcept.strict") #1
   ret double %fptrunc
 }
 
-define ppc_fp128 @test_fpext_ppc_fp128_f32(float %first) nounwind {
+define ppc_fp128 @test_fpext_ppc_fp128_f32(float %first) #0 {
 ; PC64LE-LABEL: test_fpext_ppc_fp128_f32:
 ; PC64LE:       # %bb.0: # %entry
 ; PC64LE-NEXT:    xxlxor 2, 2, 2
@@ -1129,11 +1129,11 @@ define ppc_fp128 @test_fpext_ppc_fp128_f32(float %first) nounwind {
 entry:
   %fpext = call ppc_fp128 @llvm.experimental.constrained.fpext.f32.ppcf128(
                     float %first,
-                    metadata !"fpexcept.strict")
+                    metadata !"fpexcept.strict") #1
   ret ppc_fp128 %fpext
 }
 
-define ppc_fp128 @test_fpext_ppc_fp128_f64(double %first) nounwind {
+define ppc_fp128 @test_fpext_ppc_fp128_f64(double %first) #0 {
 ; PC64LE-LABEL: test_fpext_ppc_fp128_f64:
 ; PC64LE:       # %bb.0: # %entry
 ; PC64LE-NEXT:    xxlxor 2, 2, 2
@@ -1152,11 +1152,11 @@ define ppc_fp128 @test_fpext_ppc_fp128_f64(double %first) nounwind {
 entry:
   %fpext = call ppc_fp128 @llvm.experimental.constrained.fpext.f64.ppcf128(
                     double %first,
-                    metadata !"fpexcept.strict")
+                    metadata !"fpexcept.strict") #1
   ret ppc_fp128 %fpext
 }
 
-define i64 @test_fptosi_ppc_i64_ppc_fp128(ppc_fp128 %first) nounwind {
+define i64 @test_fptosi_ppc_i64_ppc_fp128(ppc_fp128 %first) #0 {
 ; PC64LE-LABEL: test_fptosi_ppc_i64_ppc_fp128:
 ; PC64LE:       # %bb.0: # %entry
 ; PC64LE-NEXT:    mflr 0
@@ -1195,11 +1195,11 @@ define i64 @test_fptosi_ppc_i64_ppc_fp128(ppc_fp128 %first) nounwind {
 entry:
   %fpext = call i64 @llvm.experimental.constrained.fptosi.i64.ppcf128(
                     ppc_fp128 %first,
-                    metadata !"fpexcept.strict")
+                    metadata !"fpexcept.strict") #1
   ret i64 %fpext
 }
 
-define i32 @test_fptosi_ppc_i32_ppc_fp128(ppc_fp128 %first) nounwind {
+define i32 @test_fptosi_ppc_i32_ppc_fp128(ppc_fp128 %first) #0 {
 ; PC64LE-LABEL: test_fptosi_ppc_i32_ppc_fp128:
 ; PC64LE:       # %bb.0: # %entry
 ; PC64LE-NEXT:    mflr 0
@@ -1238,11 +1238,11 @@ define i32 @test_fptosi_ppc_i32_ppc_fp128(ppc_fp128 %first) nounwind {
 entry:
   %fpext = call i32 @llvm.experimental.constrained.fptosi.i32.ppcf128(
                     ppc_fp128  %first,
-                    metadata !"fpexcept.strict")
+                    metadata !"fpexcept.strict") #1
   ret i32 %fpext
 }
 
-define i64 @test_fptoui_ppc_i64_ppc_fp128(ppc_fp128 %first) nounwind {
+define i64 @test_fptoui_ppc_i64_ppc_fp128(ppc_fp128 %first) #0 {
 ; PC64LE-LABEL: test_fptoui_ppc_i64_ppc_fp128:
 ; PC64LE:       # %bb.0: # %entry
 ; PC64LE-NEXT:    mflr 0
@@ -1281,11 +1281,11 @@ define i64 @test_fptoui_ppc_i64_ppc_fp128(ppc_fp128 %first) nounwind {
 entry:
   %fpext = call i64 @llvm.experimental.constrained.fptoui.i64.ppcf128(
                     ppc_fp128   %first,
-                    metadata !"fpexcept.strict")
+                    metadata !"fpexcept.strict") #1
   ret i64 %fpext
 }
 
-define i32 @test_fptoui_ppc_i32_ppc_fp128(ppc_fp128 %first) nounwind {
+define i32 @test_fptoui_ppc_i32_ppc_fp128(ppc_fp128 %first) #0 {
 ; PC64LE-LABEL: test_fptoui_ppc_i32_ppc_fp128:
 ; PC64LE:       # %bb.0: # %entry
 ; PC64LE-NEXT:    mflr 0
@@ -1324,13 +1324,13 @@ define i32 @test_fptoui_ppc_i32_ppc_fp128(ppc_fp128 %first) nounwind {
 entry:
   %fpext = call i32 @llvm.experimental.constrained.fptoui.i32.ppcf128(
                     ppc_fp128   %first,
-                    metadata !"fpexcept.strict")
+                    metadata !"fpexcept.strict") #1
   ret i32 %fpext
 }
 
 ; Test that resultant libcalls retain order even when their non-strict FLOP form could be
 ; trivially optimized into 
diff ering sequences.
-define void @test_constrained_libcall_multichain(float* %firstptr, ppc_fp128* %result) nounwind {
+define void @test_constrained_libcall_multichain(float* %firstptr, ppc_fp128* %result) #0 {
 ; PC64LE-LABEL: test_constrained_libcall_multichain:
 ; PC64LE:       # %bb.0:
 ; PC64LE-NEXT:    mflr 0
@@ -1490,7 +1490,7 @@ define void @test_constrained_libcall_multichain(float* %firstptr, ppc_fp128* %r
   %load = load float, float* %firstptr
   %first = call ppc_fp128 @llvm.experimental.constrained.fpext.f32.ppcf128(
                     float %load,
-                    metadata !"fpexcept.strict")
+                    metadata !"fpexcept.strict") #1
   store ppc_fp128 %first, ppc_fp128* %result
 
   ; For unconstrained FLOPs, these next two FP instructions would necessarily
@@ -1499,14 +1499,14 @@ define void @test_constrained_libcall_multichain(float* %firstptr, ppc_fp128* %r
                     ppc_fp128 %first,
                     ppc_fp128 %first,
                     metadata !"round.dynamic",
-                    metadata !"fpexcept.strict")
+                    metadata !"fpexcept.strict") #1
   %stridx1 = getelementptr ppc_fp128, ppc_fp128* %result, i32 1
   store ppc_fp128 %fadd, ppc_fp128* %stridx1
   %fmul = call ppc_fp128 @llvm.experimental.constrained.fmul.ppcf128(
                     ppc_fp128 %fadd,
                     ppc_fp128 %fadd,
                     metadata !"round.dynamic",
-                    metadata !"fpexcept.strict")
+                    metadata !"fpexcept.strict") #1
   %stridx2 = getelementptr ppc_fp128, ppc_fp128* %stridx1, i32 1
   store ppc_fp128 %fadd, ppc_fp128* %stridx2
 
@@ -1517,17 +1517,20 @@ define void @test_constrained_libcall_multichain(float* %firstptr, ppc_fp128* %r
                     ppc_fp128 %first,
                     i32 2,
                     metadata !"round.dynamic",
-                    metadata !"fpexcept.strict")
+                    metadata !"fpexcept.strict") #1
   %tinypow = call float @llvm.experimental.constrained.fptrunc.ppcf128.f32(
                     ppc_fp128 %powi,
                     metadata !"round.dynamic",
-                    metadata !"fpexcept.strict")
+                    metadata !"fpexcept.strict") #1
   store float %tinypow, float* %firstptr
   %stridxn1 = getelementptr ppc_fp128, ppc_fp128* %result, i32 -1
   store ppc_fp128 %powi, ppc_fp128* %stridxn1
   ret void
 }
 
+attributes #0 = { nounwind strictfp }
+attributes #1 = { strictfp }
+
 declare ppc_fp128 @llvm.experimental.constrained.fadd.ppcf128(ppc_fp128, ppc_fp128, metadata, metadata)
 declare ppc_fp128 @llvm.experimental.constrained.ceil.ppcf128(ppc_fp128, metadata)
 declare ppc_fp128 @llvm.experimental.constrained.cos.ppcf128(ppc_fp128, metadata, metadata)


        


More information about the llvm-commits mailing list