[llvm] 2866c6c - [NFC] [PowerPC] Narrow fast-math flags in tests

Qiu Chaofan via llvm-commits llvm-commits at lists.llvm.org
Wed May 13 02:23:02 PDT 2020


Author: Qiu Chaofan
Date: 2020-05-13T17:22:45+08:00
New Revision: 2866c6cad475f668bd0bffabb0513dc96ff157be

URL: https://github.com/llvm/llvm-project/commit/2866c6cad475f668bd0bffabb0513dc96ff157be
DIFF: https://github.com/llvm/llvm-project/commit/2866c6cad475f668bd0bffabb0513dc96ff157be.diff

LOG: [NFC] [PowerPC] Narrow fast-math flags in tests

A lot of tests under PowerPC are using fast flag, while fast is just
alias of 7 fast-math flags. This change makes test points clearer.

mc-instrlat.ll and sms-iterator.ll keeps unchanged since they are not
testing fast-math behavior. (one for machine combiner crash, one for
machine pipeliner bug)

Reviewed By: steven.zhang, spatel

Differential Revision: https://reviews.llvm.org/D78989

Added: 
    

Modified: 
    llvm/test/CodeGen/PowerPC/combine-fneg.ll
    llvm/test/CodeGen/PowerPC/fdiv.ll
    llvm/test/CodeGen/PowerPC/fma-assoc.ll
    llvm/test/CodeGen/PowerPC/fma-combine.ll
    llvm/test/CodeGen/PowerPC/fma-mutate.ll
    llvm/test/CodeGen/PowerPC/fma-negate.ll
    llvm/test/CodeGen/PowerPC/fma-precision.ll
    llvm/test/CodeGen/PowerPC/fmf-propagation.ll
    llvm/test/CodeGen/PowerPC/load-two-flts.ll
    llvm/test/CodeGen/PowerPC/pow.75.ll
    llvm/test/CodeGen/PowerPC/ppc64-P9-setb.ll
    llvm/test/CodeGen/PowerPC/qpx-recipest.ll
    llvm/test/CodeGen/PowerPC/recipest.ll
    llvm/test/CodeGen/PowerPC/repeated-fp-divisors.ll
    llvm/test/CodeGen/PowerPC/scalar-equal.ll
    llvm/test/CodeGen/PowerPC/scalar-min-max.ll
    llvm/test/CodeGen/PowerPC/scalar_cmp.ll
    llvm/test/CodeGen/PowerPC/vec-min-max.ll
    llvm/test/CodeGen/PowerPC/vsx-fma-mutate-trivial-copy.ll
    llvm/test/CodeGen/PowerPC/vsx-recip-est.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/PowerPC/combine-fneg.ll b/llvm/test/CodeGen/PowerPC/combine-fneg.ll
index 14bace2f95f8..11af9835bc38 100644
--- a/llvm/test/CodeGen/PowerPC/combine-fneg.ll
+++ b/llvm/test/CodeGen/PowerPC/combine-fneg.ll
@@ -23,7 +23,7 @@ define <4 x double> @fneg_fdiv_splat(double %a0, <4 x double> %a1) {
 entry:
   %splat.splatinsert = insertelement <4 x double> undef, double %a0, i32 0
   %splat.splat = shufflevector <4 x double> %splat.splatinsert, <4 x double> undef, <4 x i32> zeroinitializer
-  %div = fdiv fast <4 x double> %a1, %splat.splat
-  %sub = fsub fast <4 x double> <double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00>, %div
+  %div = fdiv reassoc nsz arcp <4 x double> %a1, %splat.splat
+  %sub = fsub reassoc nsz <4 x double> <double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00>, %div
   ret <4 x double> %sub
 }

diff  --git a/llvm/test/CodeGen/PowerPC/fdiv.ll b/llvm/test/CodeGen/PowerPC/fdiv.ll
index 183e514d994a..96b737944c2d 100644
--- a/llvm/test/CodeGen/PowerPC/fdiv.ll
+++ b/llvm/test/CodeGen/PowerPC/fdiv.ll
@@ -10,6 +10,6 @@ define dso_local float @foo(float %0, float %1) local_unnamed_addr {
 ; CHECK-NEXT:    xsmaddasp 0, 3, 1
 ; CHECK-NEXT:    fmr 1, 0
 ; CHECK-NEXT:    blr
-  %3 = fdiv fast float %0, %1
+  %3 = fdiv reassoc arcp float %0, %1
   ret float %3
 }

diff  --git a/llvm/test/CodeGen/PowerPC/fma-assoc.ll b/llvm/test/CodeGen/PowerPC/fma-assoc.ll
index a89972918862..b5a839f3485d 100644
--- a/llvm/test/CodeGen/PowerPC/fma-assoc.ll
+++ b/llvm/test/CodeGen/PowerPC/fma-assoc.ll
@@ -364,10 +364,10 @@ define double @test_fast_FMSUB_ASSOC2(double %A, double %B, double %C,
 ; CHECK-VSX-NEXT:    fmr 1, 3
 ; CHECK-VSX-NEXT:    blr
                                  double %D, double %E) {
-  %F = fmul fast double %A, %B         ; <double> [#uses=1]
-  %G = fmul fast double %C, %D         ; <double> [#uses=1]
-  %H = fadd fast double %F, %G         ; <double> [#uses=1]
-  %I = fsub fast double %E, %H         ; <double> [#uses=1]
+  %F = fmul reassoc double %A, %B         ; <double> [#uses=1]
+  %G = fmul reassoc double %C, %D         ; <double> [#uses=1]
+  %H = fadd reassoc double %F, %G         ; <double> [#uses=1]
+  %I = fsub reassoc nsz double %E, %H         ; <double> [#uses=1]
   ret double %I
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/fma-combine.ll b/llvm/test/CodeGen/PowerPC/fma-combine.ll
index 8973dc1334d7..f3fc615b5755 100644
--- a/llvm/test/CodeGen/PowerPC/fma-combine.ll
+++ b/llvm/test/CodeGen/PowerPC/fma-combine.ll
@@ -182,16 +182,16 @@ define float @fma_combine_no_ice() {
 ; CHECK-NEXT:    blr
   %tmp = load float, float* undef, align 4
   %tmp2 = load float, float* undef, align 4
-  %tmp3 = fmul fast float %tmp, 0x3FE372D780000000
-  %tmp4 = fadd fast float %tmp3, 1.000000e+00
-  %tmp5 = fmul fast float %tmp2, %tmp4
+  %tmp3 = fmul reassoc float %tmp, 0x3FE372D780000000
+  %tmp4 = fadd reassoc float %tmp3, 1.000000e+00
+  %tmp5 = fmul reassoc float %tmp2, %tmp4
   %tmp6 = load float, float* undef, align 4
   %tmp7 = load float, float* undef, align 4
-  %tmp8 = fmul fast float %tmp7, 0x3FE372D780000000
-  %tmp9 = fsub fast float -1.000000e+00, %tmp8
-  %tmp10 = fmul fast float %tmp9, %tmp6
-  %tmp11 = fadd fast float %tmp5, 5.000000e-01
-  %tmp12 = fadd fast float %tmp11, %tmp10
+  %tmp8 = fmul reassoc float %tmp7, 0x3FE372D780000000
+  %tmp9 = fsub reassoc nsz float -1.000000e+00, %tmp8
+  %tmp10 = fmul reassoc float %tmp9, %tmp6
+  %tmp11 = fadd reassoc float %tmp5, 5.000000e-01
+  %tmp12 = fadd reassoc float %tmp11, %tmp10
   ret float %tmp12
 }
 
@@ -231,10 +231,10 @@ define double @getNegatedExpression_crash(double %x, double %y) {
 ; CHECK-NEXT:    xsmaddadp 0, 3, 2
 ; CHECK-NEXT:    fmr 1, 0
 ; CHECK-NEXT:    blr
-  %neg = fneg fast double %x
-  %fma = call fast double @llvm.fma.f64(double %neg, double 42.0, double -1.0)
-  %add = fadd fast double %x, 1.0
-  %fma1 = call fast double @llvm.fma.f64(double %fma, double %y, double %add)
+  %neg = fneg reassoc double %x
+  %fma = call reassoc nsz double @llvm.fma.f64(double %neg, double 42.0, double -1.0)
+  %add = fadd reassoc nsz double %x, 1.0
+  %fma1 = call reassoc nsz double @llvm.fma.f64(double %fma, double %y, double %add)
   ret double %fma1
 }
 declare double @llvm.fma.f64(double, double, double) nounwind readnone

diff  --git a/llvm/test/CodeGen/PowerPC/fma-mutate.ll b/llvm/test/CodeGen/PowerPC/fma-mutate.ll
index 86fad0af2481..f3dc1c6c8ab4 100644
--- a/llvm/test/CodeGen/PowerPC/fma-mutate.ll
+++ b/llvm/test/CodeGen/PowerPC/fma-mutate.ll
@@ -14,7 +14,7 @@ define double @foo3_fmf(double %a) nounwind {
 ; CHECK-NOT: fmr
 ; CHECK: xsmaddmdp
 ; CHECK: xsmaddadp
-  %r = call fast double @llvm.sqrt.f64(double %a)
+  %r = call reassoc afn ninf double @llvm.sqrt.f64(double %a)
   ret double %r
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/fma-negate.ll b/llvm/test/CodeGen/PowerPC/fma-negate.ll
index cb260532b494..254934d69170 100644
--- a/llvm/test/CodeGen/PowerPC/fma-negate.ll
+++ b/llvm/test/CodeGen/PowerPC/fma-negate.ll
@@ -167,8 +167,8 @@ define double @test_fast_mul_sub_f64(double %a, double %b, double %c) {
 ; NO-VSX-NEXT:    fnmsub 1, 2, 3, 1
 ; NO-VSX-NEXT:    blr
 entry:
-  %0 = fmul fast double %b, %c
-  %1 = fsub fast double %a, %0
+  %0 = fmul reassoc double %b, %c
+  %1 = fsub reassoc double %a, %0
   ret double %1
 }
 
@@ -187,9 +187,9 @@ define double @test_fast_2mul_sub_f64(double %a, double %b, double %c,
 ; NO-VSX-NEXT:    blr
                                       double %d) {
 entry:
-  %0 = fmul fast double %a, %b
-  %1 = fmul fast double %c, %d
-  %2 = fsub fast double %0, %1
+  %0 = fmul reassoc double %a, %b
+  %1 = fmul reassoc double %c, %d
+  %2 = fsub reassoc double %0, %1
   ret double %2
 }
 
@@ -205,8 +205,8 @@ define double @test_fast_neg_fma_f64(double %a, double %b, double %c) {
 ; NO-VSX-NEXT:    fnmsub 1, 1, 2, 3
 ; NO-VSX-NEXT:    blr
 entry:
-  %0 = fsub fast double -0.0, %a
-  %1 = call fast double @llvm.fma.f64(double %0, double %b, double %c)
+  %0 = fsub reassoc double -0.0, %a
+  %1 = call reassoc double @llvm.fma.f64(double %0, double %b, double %c)
   ret double %1
 }
 
@@ -221,8 +221,8 @@ define float @test_fast_mul_sub_f32(float %a, float %b, float %c) {
 ; NO-VSX-NEXT:    fnmsubs 1, 2, 3, 1
 ; NO-VSX-NEXT:    blr
 entry:
-  %0 = fmul fast float %b, %c
-  %1 = fsub fast float %a, %0
+  %0 = fmul reassoc float %b, %c
+  %1 = fsub reassoc float %a, %0
   ret float %1
 }
 
@@ -240,9 +240,9 @@ define float @test_fast_2mul_sub_f32(float %a, float %b, float %c, float %d) {
 ; NO-VSX-NEXT:    fmsubs 1, 1, 2, 0
 ; NO-VSX-NEXT:    blr
 entry:
-  %0 = fmul fast float %a, %b
-  %1 = fmul fast float %c, %d
-  %2 = fsub fast float %0, %1
+  %0 = fmul reassoc float %a, %b
+  %1 = fmul reassoc float %c, %d
+  %2 = fsub reassoc float %0, %1
   ret float %2
 }
 
@@ -258,8 +258,8 @@ define float @test_fast_neg_fma_f32(float %a, float %b, float %c) {
 ; NO-VSX-NEXT:    fnmsubs 1, 1, 2, 3
 ; NO-VSX-NEXT:    blr
 entry:
-  %0 = fsub fast float -0.0, %a
-  %1 = call fast float @llvm.fma.f32(float %0, float %b, float %c)
+  %0 = fsub reassoc float -0.0, %a
+  %1 = call reassoc float @llvm.fma.f32(float %0, float %b, float %c)
   ret float %1
 }
 
@@ -277,8 +277,8 @@ define <2 x double> @test_fast_neg_fma_v2f64(<2 x double> %a, <2 x double> %b,
 ; NO-VSX-NEXT:    blr
                                              <2 x double> %c) {
 entry:
-  %0 = fsub fast <2 x double> <double -0.0, double -0.0>, %a
-  %1 = call fast <2 x double> @llvm.fma.v2f64(<2 x double> %0, <2 x double> %b,
+  %0 = fsub reassoc <2 x double> <double -0.0, double -0.0>, %a
+  %1 = call reassoc <2 x double> @llvm.fma.v2f64(<2 x double> %0, <2 x double> %b,
                                               <2 x double> %c)
   ret <2 x double> %1
 }
@@ -299,10 +299,10 @@ define <4 x float> @test_fast_neg_fma_v4f32(<4 x float> %a, <4 x float> %b,
 ; NO-VSX-NEXT:    blr
                                             <4 x float> %c) {
 entry:
-  %0 = fsub fast <4 x float> <float -0.0, float -0.0, float -0.0,
-                              float -0.0>, %a
-  %1 = call fast <4 x float> @llvm.fma.v4f32(<4 x float> %0, <4 x float> %b,
-                                             <4 x float> %c)
+  %0 = fsub reassoc <4 x float> <float -0.0, float -0.0, float -0.0,
+                                          float -0.0>, %a
+  %1 = call reassoc <4 x float> @llvm.fma.v4f32(<4 x float> %0, <4 x float> %b,
+                                                         <4 x float> %c)
   ret <4 x float> %1
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/fma-precision.ll b/llvm/test/CodeGen/PowerPC/fma-precision.ll
index 89b5e097d8a6..8455e67e4fc5 100644
--- a/llvm/test/CodeGen/PowerPC/fma-precision.ll
+++ b/llvm/test/CodeGen/PowerPC/fma-precision.ll
@@ -11,10 +11,10 @@ define double @fsub1(double %a, double %b, double %c, double %d)  {
 ; CHECK-NEXT:    xsmuldp 1, 0, 1
 ; CHECK-NEXT:    blr
 entry:
-  %mul = fmul fast double %b, %a
-  %mul1 = fmul fast double %d, %c
-  %sub = fsub fast double %mul, %mul1
-  %mul3 = fmul fast double %mul, %sub
+  %mul = fmul reassoc double %b, %a
+  %mul1 = fmul reassoc double %d, %c
+  %sub = fsub reassoc double %mul, %mul1
+  %mul3 = fmul reassoc double %mul, %sub
   ret double %mul3
 }
 
@@ -28,10 +28,10 @@ define double @fsub2(double %a, double %b, double %c, double %d)  {
 ; CHECK-NEXT:    xsmuldp 1, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-  %mul = fmul fast double %b, %a
-  %mul1 = fmul fast double %d, %c
-  %sub = fsub fast double %mul, %mul1
-  %mul3 = fmul fast double %mul1, %sub
+  %mul = fmul reassoc double %b, %a
+  %mul1 = fmul reassoc double %d, %c
+  %sub = fsub reassoc double %mul, %mul1
+  %mul3 = fmul reassoc double %mul1, %sub
   ret double %mul3
 }
 
@@ -44,9 +44,9 @@ define double @fsub3(double %a, double %b, double %c, double %d)  {
 ; CHECK-NEXT:    fmr 1, 0
 ; CHECK-NEXT:    blr
 entry:
-  %mul = fmul fast double %b, %a
-  %mul1 = fmul fast double %d, %c
-  %sub = fsub fast double %mul, %mul1
+  %mul = fmul reassoc double %b, %a
+  %mul1 = fmul reassoc double %d, %c
+  %sub = fsub reassoc double %mul, %mul1
   ret double %sub
 }
 
@@ -60,10 +60,10 @@ define double @fadd1(double %a, double %b, double %c, double %d)  {
 ; CHECK-NEXT:    xsmuldp 1, 0, 1
 ; CHECK-NEXT:    blr
 entry:
-  %mul = fmul fast double %b, %a
-  %mul1 = fmul fast double %d, %c
-  %add = fadd fast double %mul1, %mul
-  %mul3 = fmul fast double %mul, %add
+  %mul = fmul reassoc double %b, %a
+  %mul1 = fmul reassoc double %d, %c
+  %add = fadd reassoc double %mul1, %mul
+  %mul3 = fmul reassoc double %mul, %add
   ret double %mul3
 }
 
@@ -77,10 +77,10 @@ define double @fadd2(double %a, double %b, double %c, double %d)  {
 ; CHECK-NEXT:    xsmuldp 1, 0, 3
 ; CHECK-NEXT:    blr
 entry:
-  %mul = fmul fast double %b, %a
-  %mul1 = fmul fast double %d, %c
-  %add = fadd fast double %mul1, %mul
-  %mul3 = fmul fast double %mul1, %add
+  %mul = fmul reassoc double %b, %a
+  %mul1 = fmul reassoc double %d, %c
+  %add = fadd reassoc double %mul1, %mul
+  %mul3 = fmul reassoc double %mul1, %add
   ret double %mul3
 }
 
@@ -92,9 +92,9 @@ define double @fadd3(double %a, double %b, double %c, double %d)  {
 ; CHECK-NEXT:    xsmaddadp 1, 4, 3
 ; CHECK-NEXT:    blr
 entry:
-  %mul = fmul fast double %b, %a
-  %mul1 = fmul fast double %d, %c
-  %add = fadd fast double %mul1, %mul
+  %mul = fmul reassoc double %b, %a
+  %mul1 = fmul reassoc double %d, %c
+  %add = fadd reassoc double %mul1, %mul
   ret double %add
 }
 
@@ -108,12 +108,12 @@ define double @fma_multi_uses1(double %a, double %b, double %c, double %d, doubl
 ; CHECK-NEXT:    xsnmsubadp 1, 3, 4
 ; CHECK-NEXT:    stfd 0, 0(9)
 ; CHECK-NEXT:    blr
-  %ab = fmul fast double %a, %b
-  %cd = fmul fast double %c, %d
+  %ab = fmul reassoc double %a, %b
+  %cd = fmul reassoc double %c, %d
   store double %ab, double* %p1 ; extra use of %ab
   store double %ab, double* %p2 ; another extra use of %ab
   store double %cd, double* %p3 ; extra use of %cd
-  %r = fsub fast double %ab, %cd
+  %r = fsub reassoc double %ab, %cd
   ret double %r
 }
 
@@ -128,12 +128,12 @@ define double @fma_multi_uses2(double %a, double %b, double %c, double %d, doubl
 ; CHECK-NEXT:    xsmsubadp 0, 1, 2
 ; CHECK-NEXT:    fmr 1, 0
 ; CHECK-NEXT:    blr
-  %ab = fmul fast double %a, %b
-  %cd = fmul fast double %c, %d
+  %ab = fmul reassoc double %a, %b
+  %cd = fmul reassoc double %c, %d
   store double %ab, double* %p1 ; extra use of %ab
   store double %cd, double* %p2 ; extra use of %cd
   store double %cd, double* %p3 ; another extra use of %cd
-  %r = fsub fast double %ab, %cd
+  %r = fsub reassoc double %ab, %cd
   ret double %r
 }
 
@@ -150,14 +150,14 @@ define double @fma_multi_uses3(double %a, double %b, double %c, double %d, doubl
 ; CHECK-NEXT:    xsnmsubadp 0, 3, 4
 ; CHECK-NEXT:    xsadddp 1, 0, 1
 ; CHECK-NEXT:    blr
-  %ab = fmul fast double %a, %b
-  %cd = fmul fast double %c, %d
-  %fg = fmul fast double %f, %g
+  %ab = fmul reassoc double %a, %b
+  %cd = fmul reassoc double %c, %d
+  %fg = fmul reassoc double %f, %g
   store double %ab, double* %p1 ; extra use of %ab
   store double %ab, double* %p2 ; another extra use of %ab
   store double %fg, double* %p3 ; extra use of %fg
-  %q = fsub fast double %fg, %cd ; The uses of %cd reduce to 1 after %r is folded. 2 uses of %fg, fold %cd, remove def of %cd
-  %r = fsub fast double %ab, %cd ; Fold %r before %q. 3 uses of %ab, 2 uses of %cd, fold %cd
-  %add = fadd fast double %r, %q
+  %q = fsub reassoc double %fg, %cd ; The uses of %cd reduce to 1 after %r is folded. 2 uses of %fg, fold %cd, remove def of %cd
+  %r = fsub reassoc double %ab, %cd ; Fold %r before %q. 3 uses of %ab, 2 uses of %cd, fold %cd
+  %add = fadd reassoc double %r, %q
   ret double %add
 }

diff  --git a/llvm/test/CodeGen/PowerPC/fmf-propagation.ll b/llvm/test/CodeGen/PowerPC/fmf-propagation.ll
index 351a98c5546c..90ea31b26916 100644
--- a/llvm/test/CodeGen/PowerPC/fmf-propagation.ll
+++ b/llvm/test/CodeGen/PowerPC/fmf-propagation.ll
@@ -107,7 +107,7 @@ define float @fmul_fadd_reassoc2(float %x, float %y, float %z) {
 ; The fadd is now fully 'fast'. This implies that contraction is allowed.
 
 ; FMFDEBUG-LABEL: Optimized lowered selection DAG: %bb.0 'fmul_fadd_fast1:'
-; FMFDEBUG:         fma nnan ninf nsz arcp contract afn reassoc {{t[0-9]+}}, {{t[0-9]+}}, {{t[0-9]+}}
+; FMFDEBUG:         fma reassoc {{t[0-9]+}}, {{t[0-9]+}}, {{t[0-9]+}}
 ; FMFDEBUG:       Type-legalized selection DAG: %bb.0 'fmul_fadd_fast1:'
 
 define float @fmul_fadd_fast1(float %x, float %y, float %z) {
@@ -122,15 +122,15 @@ define float @fmul_fadd_fast1(float %x, float %y, float %z) {
 ; GLOBAL-NEXT:    xsmaddasp 3, 1, 2
 ; GLOBAL-NEXT:    fmr 1, 3
 ; GLOBAL-NEXT:    blr
-  %mul = fmul fast float %x, %y
-  %add = fadd fast float %mul, %z
+  %mul = fmul reassoc float %x, %y
+  %add = fadd reassoc float %mul, %z
   ret float %add
 }
 
 ; This shouldn't change anything - the intermediate fmul result is now also flagged.
 
 ; FMFDEBUG-LABEL: Optimized lowered selection DAG: %bb.0 'fmul_fadd_fast2:'
-; FMFDEBUG:         fma nnan ninf nsz arcp contract afn reassoc {{t[0-9]+}}, {{t[0-9]+}}, {{t[0-9]+}}
+; FMFDEBUG:         fma reassoc {{t[0-9]+}}, {{t[0-9]+}}, {{t[0-9]+}}
 ; FMFDEBUG:       Type-legalized selection DAG: %bb.0 'fmul_fadd_fast2:'
 
 define float @fmul_fadd_fast2(float %x, float %y, float %z) {
@@ -145,8 +145,8 @@ define float @fmul_fadd_fast2(float %x, float %y, float %z) {
 ; GLOBAL-NEXT:    xsmaddasp 3, 1, 2
 ; GLOBAL-NEXT:    fmr 1, 3
 ; GLOBAL-NEXT:    blr
-  %mul = fmul fast float %x, %y
-  %add = fadd fast float %mul, %z
+  %mul = fmul reassoc float %x, %y
+  %add = fadd reassoc float %mul, %z
   ret float %add
 }
 
@@ -212,11 +212,11 @@ define float @fmul_fma_reassoc2(float %x) {
 ; The FMA is now fully 'fast'. This implies that reassociation is allowed.
 
 ; FMFDEBUG-LABEL: Optimized lowered selection DAG: %bb.0 'fmul_fma_fast1:'
-; FMFDEBUG:         fmul nnan ninf nsz arcp contract afn reassoc {{t[0-9]+}}
+; FMFDEBUG:         fmul reassoc {{t[0-9]+}}
 ; FMFDEBUG:       Type-legalized selection DAG: %bb.0 'fmul_fma_fast1:'
 
 ; GLOBALDEBUG-LABEL: Optimized lowered selection DAG: %bb.0 'fmul_fma_fast1:'
-; GLOBALDEBUG:         fmul nnan ninf nsz arcp contract afn reassoc {{t[0-9]+}}
+; GLOBALDEBUG:         fmul reassoc {{t[0-9]+}}
 ; GLOBALDEBUG:       Type-legalized selection DAG: %bb.0 'fmul_fma_fast1:'
 
 define float @fmul_fma_fast1(float %x) {
@@ -234,18 +234,18 @@ define float @fmul_fma_fast1(float %x) {
 ; GLOBAL-NEXT:    xsmulsp 1, 1, 0
 ; GLOBAL-NEXT:    blr
   %mul = fmul float %x, 42.0
-  %fma = call fast float @llvm.fma.f32(float %x, float 7.0, float %mul)
+  %fma = call reassoc float @llvm.fma.f32(float %x, float 7.0, float %mul)
   ret float %fma
 }
 
 ; This shouldn't change anything - the intermediate fmul result is now also flagged.
 
 ; FMFDEBUG-LABEL: Optimized lowered selection DAG: %bb.0 'fmul_fma_fast2:'
-; FMFDEBUG:         fmul nnan ninf nsz arcp contract afn reassoc {{t[0-9]+}}
+; FMFDEBUG:         fmul reassoc {{t[0-9]+}}
 ; FMFDEBUG:       Type-legalized selection DAG: %bb.0 'fmul_fma_fast2:'
 
 ; GLOBALDEBUG-LABEL: Optimized lowered selection DAG: %bb.0 'fmul_fma_fast2:'
-; GLOBALDEBUG:         fmul nnan ninf nsz arcp contract afn reassoc {{t[0-9]+}}
+; GLOBALDEBUG:         fmul reassoc {{t[0-9]+}}
 ; GLOBALDEBUG:       Type-legalized selection DAG: %bb.0 'fmul_fma_fast2:'
 
 define float @fmul_fma_fast2(float %x) {
@@ -262,8 +262,8 @@ define float @fmul_fma_fast2(float %x) {
 ; GLOBAL-NEXT:    lfs 0, .LCPI9_0 at toc@l(3)
 ; GLOBAL-NEXT:    xsmulsp 1, 1, 0
 ; GLOBAL-NEXT:    blr
-  %mul = fmul fast float %x, 42.0
-  %fma = call fast float @llvm.fma.f32(float %x, float 7.0, float %mul)
+  %mul = fmul reassoc float %x, 42.0
+  %fma = call reassoc float @llvm.fma.f32(float %x, float 7.0, float %mul)
   ret float %fma
 }
 
@@ -408,11 +408,11 @@ define float @sqrt_afn_preserve_sign_inf(float %x) #1 {
 ; The call is now fully 'fast'. This implies that approximation is allowed.
 
 ; FMFDEBUG-LABEL: Optimized lowered selection DAG: %bb.0 'sqrt_fast_ieee:'
-; FMFDEBUG:         fmul nnan ninf nsz arcp contract afn reassoc {{t[0-9]+}}
+; FMFDEBUG:         fmul ninf afn reassoc {{t[0-9]+}}
 ; FMFDEBUG:       Type-legalized selection DAG: %bb.0 'sqrt_fast_ieee:'
 
 ; GLOBALDEBUG-LABEL: Optimized lowered selection DAG: %bb.0 'sqrt_fast_ieee:'
-; GLOBALDEBUG:         fmul nnan ninf nsz arcp contract afn reassoc {{t[0-9]+}}
+; GLOBALDEBUG:         fmul ninf afn reassoc {{t[0-9]+}}
 ; GLOBALDEBUG:       Type-legalized selection DAG: %bb.0 'sqrt_fast_ieee:'
 
 define float @sqrt_fast_ieee(float %x) #0 {
@@ -459,18 +459,18 @@ define float @sqrt_fast_ieee(float %x) #0 {
 ; GLOBAL-NEXT:  .LBB14_2:
 ; GLOBAL-NEXT:    fmr 1, 0
 ; GLOBAL-NEXT:    blr
-  %rt = call fast float @llvm.sqrt.f32(float %x)
+  %rt = call reassoc afn ninf float @llvm.sqrt.f32(float %x)
   ret float %rt
 }
 
 ; The call is now fully 'fast'. This implies that approximation is allowed.
 
 ; FMFDEBUG-LABEL: Optimized lowered selection DAG: %bb.0 'sqrt_fast_preserve_sign:'
-; FMFDEBUG:         fmul nnan ninf nsz arcp contract afn reassoc {{t[0-9]+}}
+; FMFDEBUG:         fmul ninf afn reassoc {{t[0-9]+}}
 ; FMFDEBUG:       Type-legalized selection DAG: %bb.0 'sqrt_fast_preserve_sign:'
 
 ; GLOBALDEBUG-LABEL: Optimized lowered selection DAG: %bb.0 'sqrt_fast_preserve_sign:'
-; GLOBALDEBUG:         fmul nnan ninf nsz arcp contract afn reassoc {{t[0-9]+}}
+; GLOBALDEBUG:         fmul ninf afn reassoc {{t[0-9]+}}
 ; GLOBALDEBUG:       Type-legalized selection DAG: %bb.0 'sqrt_fast_preserve_sign:'
 
 define float @sqrt_fast_preserve_sign(float %x) #1 {
@@ -511,7 +511,7 @@ define float @sqrt_fast_preserve_sign(float %x) #1 {
 ; GLOBAL-NEXT:  .LBB15_2:
 ; GLOBAL-NEXT:    fmr 1, 0
 ; GLOBAL-NEXT:    blr
-  %rt = call fast float @llvm.sqrt.f32(float %x)
+  %rt = call reassoc ninf afn float @llvm.sqrt.f32(float %x)
   ret float %rt
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/load-two-flts.ll b/llvm/test/CodeGen/PowerPC/load-two-flts.ll
index 584c89cc71f5..1cfcff5e0160 100644
--- a/llvm/test/CodeGen/PowerPC/load-two-flts.ll
+++ b/llvm/test/CodeGen/PowerPC/load-two-flts.ll
@@ -10,12 +10,12 @@ entry:
   %v5 = bitcast i32 %v4 to float
   %v6 = trunc i64 %v2 to i32
   %v7 = bitcast i32 %v6 to float
-  %mul_ad.i.i = fmul fast float %v5, %v1
-  %mul_bc.i.i = fmul fast float %v7, %v0
-  %mul_i.i.i = fadd fast float %mul_ad.i.i, %mul_bc.i.i
-  %mul_ac.i.i = fmul fast float %v5, %v0
-  %mul_bd.i.i = fmul fast float %v7, %v1
-  %mul_r.i.i = fsub fast float %mul_ac.i.i, %mul_bd.i.i
+  %mul_ad.i.i = fmul float %v5, %v1
+  %mul_bc.i.i = fmul float %v7, %v0
+  %mul_i.i.i = fadd float %mul_ad.i.i, %mul_bc.i.i
+  %mul_ac.i.i = fmul float %v5, %v0
+  %mul_bd.i.i = fmul float %v7, %v1
+  %mul_r.i.i = fsub float %mul_ac.i.i, %mul_bd.i.i
   store float %mul_r.i.i, float* %_M_value.realp.i.i, align 4
   store float %mul_i.i.i, float* %_M_value.imagp.i.i, align 4
   ret void
@@ -38,12 +38,12 @@ entry:
   %v5 = bitcast i32 %v4 to float
   %v6 = trunc i64 %v2 to i32
   %v7 = bitcast i32 %v6 to float
-  %mul_ad.i.i = fmul fast float %v5, %v1
-  %mul_bc.i.i = fmul fast float %v7, %v0
-  %mul_i.i.i = fadd fast float %mul_ad.i.i, %mul_bc.i.i
-  %mul_ac.i.i = fmul fast float %v5, %v0
-  %mul_bd.i.i = fmul fast float %v7, %v1
-  %mul_r.i.i = fsub fast float %mul_ac.i.i, %mul_bd.i.i
+  %mul_ad.i.i = fmul float %v5, %v1
+  %mul_bc.i.i = fmul float %v7, %v0
+  %mul_i.i.i = fadd float %mul_ad.i.i, %mul_bc.i.i
+  %mul_ac.i.i = fmul float %v5, %v0
+  %mul_bd.i.i = fmul float %v7, %v1
+  %mul_r.i.i = fsub float %mul_ac.i.i, %mul_bd.i.i
   store float %mul_r.i.i, float* %_M_value.realp.i.i, align 4
   store float %mul_i.i.i, float* %_M_value.imagp.i.i, align 4
   ret i64* %r

diff  --git a/llvm/test/CodeGen/PowerPC/pow.75.ll b/llvm/test/CodeGen/PowerPC/pow.75.ll
index c05afd302340..723755ce1546 100644
--- a/llvm/test/CodeGen/PowerPC/pow.75.ll
+++ b/llvm/test/CodeGen/PowerPC/pow.75.ll
@@ -28,21 +28,21 @@ define double @pow_f64_three_fourth_fmf(double %x) nounwind {
 }
 
 define <4 x float> @pow_v4f32_three_fourth_fmf(<4 x float> %x) nounwind {
-; CHECK: Combining: {{.*}}: v4f32 = fpow nnan ninf nsz arcp contract afn reassoc [[X:t[0-9]+]], {{.*}}
-; CHECK-NEXT: Creating new node: [[SQRT:t[0-9]+]]: v4f32 = fsqrt nnan ninf nsz arcp contract afn reassoc [[X]]
-; CHECK-NEXT: Creating new node: [[SQRTSQRT:t[0-9]+]]: v4f32 = fsqrt nnan ninf nsz arcp contract afn reassoc [[SQRT]]
-; CHECK-NEXT: Creating new node: [[R:t[0-9]+]]: v4f32 = fmul nnan ninf nsz arcp contract afn reassoc [[SQRT]], [[SQRTSQRT]]
-; CHECK-NEXT:  ... into: [[R]]: v4f32 = fmul nnan ninf nsz arcp contract afn reassoc [[SQRT]], [[SQRTSQRT]]
-  %r = call fast <4 x float> @llvm.pow.v4f32(<4 x float> %x, <4 x float> <float 7.5e-1, float 7.5e-1, float 7.5e-01, float 7.5e-01>)
+; CHECK: Combining: {{.*}}: v4f32 = fpow ninf afn [[X:t[0-9]+]], {{.*}}
+; CHECK-NEXT: Creating new node: [[SQRT:t[0-9]+]]: v4f32 = fsqrt ninf afn [[X]]
+; CHECK-NEXT: Creating new node: [[SQRTSQRT:t[0-9]+]]: v4f32 = fsqrt ninf afn [[SQRT]]
+; CHECK-NEXT: Creating new node: [[R:t[0-9]+]]: v4f32 = fmul ninf afn [[SQRT]], [[SQRTSQRT]]
+; CHECK-NEXT:  ... into: [[R]]: v4f32 = fmul ninf afn [[SQRT]], [[SQRTSQRT]]
+  %r = call ninf afn <4 x float> @llvm.pow.v4f32(<4 x float> %x, <4 x float> <float 7.5e-1, float 7.5e-1, float 7.5e-01, float 7.5e-01>)
   ret <4 x float> %r
 }
 
 define <2 x double> @pow_v2f64_three_fourth_fmf(<2 x double> %x) nounwind {
-; CHECK: Combining: {{.*}}: v2f64 = fpow nnan ninf nsz arcp contract afn reassoc [[X:t[0-9]+]], {{.*}}
-; CHECK-NEXT: Creating new node: [[SQRT:t[0-9]+]]: v2f64 = fsqrt nnan ninf nsz arcp contract afn reassoc [[X]]
-; CHECK-NEXT: Creating new node: [[SQRTSQRT:t[0-9]+]]: v2f64 = fsqrt nnan ninf nsz arcp contract afn reassoc [[SQRT]]
-; CHECK-NEXT: Creating new node: [[R:t[0-9]+]]: v2f64 = fmul nnan ninf nsz arcp contract afn reassoc [[SQRT]], [[SQRTSQRT]]
-; CHECK-NEXT:  ... into: [[R]]: v2f64 = fmul nnan ninf nsz arcp contract afn reassoc [[SQRT]], [[SQRTSQRT]]
-  %r = call fast <2 x double> @llvm.pow.v2f64(<2 x double> %x, <2 x double> <double 7.5e-1, double 7.5e-1>)
+; CHECK: Combining: {{.*}}: v2f64 = fpow ninf afn [[X:t[0-9]+]], {{.*}}
+; CHECK-NEXT: Creating new node: [[SQRT:t[0-9]+]]: v2f64 = fsqrt ninf afn [[X]]
+; CHECK-NEXT: Creating new node: [[SQRTSQRT:t[0-9]+]]: v2f64 = fsqrt ninf afn [[SQRT]]
+; CHECK-NEXT: Creating new node: [[R:t[0-9]+]]: v2f64 = fmul ninf afn [[SQRT]], [[SQRTSQRT]]
+; CHECK-NEXT:  ... into: [[R]]: v2f64 = fmul ninf afn [[SQRT]], [[SQRTSQRT]]
+  %r = call ninf afn <2 x double> @llvm.pow.v2f64(<2 x double> %x, <2 x double> <double 7.5e-1, double 7.5e-1>)
   ret <2 x double> %r
 }

diff  --git a/llvm/test/CodeGen/PowerPC/ppc64-P9-setb.ll b/llvm/test/CodeGen/PowerPC/ppc64-P9-setb.ll
index ace75a76a5dd..feb20465c510 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-P9-setb.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-P9-setb.ll
@@ -1195,8 +1195,8 @@ define i64 @setbuc(i8 %a, i8 %b) {
 
 ; select_cc lhs, rhs, -1, (zext (setcc rhs, lhs, setlt)), setlt
 define i64 @setbf1(float %a, float %b) {
-  %t1 = fcmp fast olt float %a, %b
-  %t2 = fcmp fast olt float %b, %a
+  %t1 = fcmp nnan olt float %a, %b
+  %t2 = fcmp nnan olt float %b, %a
   %t3 = zext i1 %t2 to i64
   %t4 = select i1 %t1, i64 -1, i64 %t3
   ret i64 %t4
@@ -1215,8 +1215,8 @@ define i64 @setbf1(float %a, float %b) {
 
 ; select_cc lhs, rhs, -1, (zext (setcc lhs, rhs, setlt)), setgt
 define i64 @setbf2(float %a, float %b) {
-  %t1 = fcmp fast ogt float %b, %a
-  %t2 = fcmp fast olt float %b, %a
+  %t1 = fcmp nnan ogt float %b, %a
+  %t2 = fcmp nnan olt float %b, %a
   %t3 = zext i1 %t2 to i64
   %t4 = select i1 %t1, i64 -1, i64 %t3
   ret i64 %t4
@@ -1235,8 +1235,8 @@ define i64 @setbf2(float %a, float %b) {
 
 ; select_cc lhs, rhs, 0, (select_cc lhs, rhs, -1, 1, setgt), seteq
 define i64 @setbdf1(double %a, double %b) {
-  %t1 = fcmp fast oeq double %b, %a
-  %t2 = fcmp fast ogt double %b, %a
+  %t1 = fcmp nnan oeq double %b, %a
+  %t2 = fcmp nnan ogt double %b, %a
   %t3 = select i1 %t2, i64 -1, i64 1
   %t4 = select i1 %t1, i64 0, i64 %t3
   ret i64 %t4
@@ -1254,8 +1254,8 @@ define i64 @setbdf1(double %a, double %b) {
 
 ; select_cc lhs, rhs, 1, (sext (setcc lhs, rhs, setgt)), setlt
 define i64 @setbdf2(double %a, double %b) {
-  %t1 = fcmp fast olt double %b, %a
-  %t2 = fcmp fast ogt double %b, %a
+  %t1 = fcmp nnan olt double %b, %a
+  %t2 = fcmp nnan ogt double %b, %a
   %t3 = sext i1 %t2 to i64
   %t4 = select i1 %t1, i64 1, i64 %t3
   ret i64 %t4
@@ -1274,8 +1274,8 @@ define i64 @setbdf2(double %a, double %b) {
 }
 
 define i64 @setbf128(fp128 %a, fp128 %b) {
-  %t1 = fcmp fast ogt fp128 %a, %b
-  %t2 = fcmp fast olt fp128 %a, %b
+  %t1 = fcmp nnan ogt fp128 %a, %b
+  %t2 = fcmp nnan olt fp128 %a, %b
   %t3 = sext i1 %t2 to i64
   %t4 = select i1 %t1, i64 1, i64 %t3
   ret i64 %t4

diff  --git a/llvm/test/CodeGen/PowerPC/qpx-recipest.ll b/llvm/test/CodeGen/PowerPC/qpx-recipest.ll
index 4f3abd2f60d6..7e639e03ad04 100644
--- a/llvm/test/CodeGen/PowerPC/qpx-recipest.ll
+++ b/llvm/test/CodeGen/PowerPC/qpx-recipest.ll
@@ -23,8 +23,8 @@ define <4 x double> @foo_fmf(<4 x double> %a, <4 x double> %b) nounwind {
 ; CHECK-NEXT:    qvfmul 1, 1, 0
 ; CHECK-NEXT:    blr
 entry:
-  %x = call fast <4 x double> @llvm.sqrt.v4f64(<4 x double> %b)
-  %r = fdiv fast <4 x double> %a, %x
+  %x = call ninf afn reassoc <4 x double> @llvm.sqrt.v4f64(<4 x double> %b)
+  %r = fdiv arcp reassoc <4 x double> %a, %x
   ret <4 x double> %r
 }
 
@@ -71,9 +71,9 @@ define <4 x double> @foof_fmf(<4 x double> %a, <4 x float> %b) nounwind {
 ; CHECK-NEXT:    qvfmul 1, 1, 0
 ; CHECK-NEXT:    blr
 entry:
-  %x = call fast <4 x float> @llvm.sqrt.v4f32(<4 x float> %b)
+  %x = call afn ninf reassoc <4 x float> @llvm.sqrt.v4f32(<4 x float> %b)
   %y = fpext <4 x float> %x to <4 x double>
-  %r = fdiv fast <4 x double> %a, %y
+  %r = fdiv arcp reassoc nsz <4 x double> %a, %y
   ret <4 x double> %r
 }
 
@@ -131,9 +131,9 @@ define <4 x float> @food_fmf(<4 x float> %a, <4 x double> %b) nounwind {
 ; CHECK-NEXT:    qvfmuls 1, 1, 0
 ; CHECK-NEXT:    blr
 entry:
-  %x = call fast <4 x double> @llvm.sqrt.v4f64(<4 x double> %b)
+  %x = call afn ninf reassoc <4 x double> @llvm.sqrt.v4f64(<4 x double> %b)
   %y = fptrunc <4 x double> %x to <4 x float>
-  %r = fdiv fast <4 x float> %a, %y
+  %r = fdiv arcp reassoc <4 x float> %a, %y
   ret <4 x float> %r
 }
 
@@ -188,8 +188,8 @@ define <4 x float> @goo_fmf(<4 x float> %a, <4 x float> %b) nounwind {
 ; CHECK-NEXT:    qvfmuls 1, 1, 0
 ; CHECK-NEXT:    blr
 entry:
-  %x = call fast <4 x float> @llvm.sqrt.v4f32(<4 x float> %b)
-  %r = fdiv fast <4 x float> %a, %x
+  %x = call afn ninf reassoc <4 x float> @llvm.sqrt.v4f32(<4 x float> %b)
+  %r = fdiv arcp reassoc nsz <4 x float> %a, %x
   ret <4 x float> %r
 }
 
@@ -236,7 +236,7 @@ define <4 x double> @foo2_fmf(<4 x double> %a, <4 x double> %b) nounwind {
 ; CHECK-NEXT:    qvfmadd 1, 0, 1, 3
 ; CHECK-NEXT:    blr
 entry:
-  %r = fdiv fast <4 x double> %a, %b
+  %r = fdiv arcp reassoc nsz <4 x double> %a, %b
   ret <4 x double> %r
 }
 
@@ -272,7 +272,7 @@ define <4 x float> @goo2_fmf(<4 x float> %a, <4 x float> %b) nounwind {
 ; CHECK-NEXT:    qvfmadds 1, 0, 1, 3
 ; CHECK-NEXT:    blr
 entry:
-  %r = fdiv fast <4 x float> %a, %b
+  %r = fdiv arcp reassoc <4 x float> %a, %b
   ret <4 x float> %r
 }
 
@@ -326,7 +326,7 @@ define <4 x double> @foo3_fmf_denorm_on(<4 x double> %a) #0 {
 ; CHECK-NEXT:    qvfsel 1, 1, 3, 0
 ; CHECK-NEXT:    blr
 entry:
-  %r = call fast <4 x double> @llvm.sqrt.v4f64(<4 x double> %a)
+  %r = call reassoc ninf afn <4 x double> @llvm.sqrt.v4f64(<4 x double> %a)
   ret <4 x double> %r
 }
 
@@ -352,7 +352,7 @@ define <4 x double> @foo3_fmf_denorm_off(<4 x double> %a) #1 {
 ; CHECK-NEXT:    qvfsel 1, 1, 2, 0
 ; CHECK-NEXT:    blr
 entry:
-  %r = call fast <4 x double> @llvm.sqrt.v4f64(<4 x double> %a)
+  %r = call afn reassoc ninf <4 x double> @llvm.sqrt.v4f64(<4 x double> %a)
   ret <4 x double> %r
 }
 
@@ -421,7 +421,7 @@ define <4 x float> @goo3_fmf_denorm_on(<4 x float> %a) #0 {
 ; CHECK-NEXT:    qvfsel 1, 1, 4, 0
 ; CHECK-NEXT:    blr
 entry:
-  %r = call fast <4 x float> @llvm.sqrt.v4f32(<4 x float> %a)
+  %r = call reassoc afn ninf nsz <4 x float> @llvm.sqrt.v4f32(<4 x float> %a)
   ret <4 x float> %r
 }
 
@@ -444,7 +444,7 @@ define <4 x float> @goo3_fmf_denorm_off(<4 x float> %a) #1 {
 ; CHECK-NEXT:    qvfsel 1, 1, 3, 0
 ; CHECK-NEXT:    blr
 entry:
-  %r = call fast <4 x float> @llvm.sqrt.v4f32(<4 x float> %a)
+  %r = call reassoc ninf afn nsz <4 x float> @llvm.sqrt.v4f32(<4 x float> %a)
   ret <4 x float> %r
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/recipest.ll b/llvm/test/CodeGen/PowerPC/recipest.ll
index 7fb9b07152ef..c6b2f396aec0 100644
--- a/llvm/test/CodeGen/PowerPC/recipest.ll
+++ b/llvm/test/CodeGen/PowerPC/recipest.ll
@@ -26,8 +26,8 @@ define double @foo_fmf(double %a, double %b) nounwind {
 ; CHECK-NEXT:    fmul 0, 0, 2
 ; CHECK-NEXT:    fmul 1, 1, 0
 ; CHECK-NEXT:    blr
-  %x = call fast double @llvm.sqrt.f64(double %b)
-  %r = fdiv fast double %a, %x
+  %x = call arcp reassoc double @llvm.sqrt.f64(double %b)
+  %r = fdiv arcp reassoc double %a, %x
   ret double %r
 }
 
@@ -48,8 +48,8 @@ define double @no_estimate_refinement_f64(double %a, double %b) #0 {
 ; CHECK-NEXT:    frsqrte 0, 2
 ; CHECK-NEXT:    fmul 1, 1, 0
 ; CHECK-NEXT:    blr
-  %x = call fast double @llvm.sqrt.f64(double %b)
-  %r = fdiv fast double %a, %x
+  %x = call arcp reassoc double @llvm.sqrt.f64(double %b)
+  %r = fdiv arcp reassoc double %a, %x
   ret double %r
 }
 
@@ -67,9 +67,9 @@ define double @foof_fmf(double %a, float %b) nounwind {
 ; CHECK-NEXT:    fmuls 0, 0, 2
 ; CHECK-NEXT:    fmul 1, 1, 0
 ; CHECK-NEXT:    blr
-  %x = call fast float @llvm.sqrt.f32(float %b)
+  %x = call reassoc arcp float @llvm.sqrt.f32(float %b)
   %y = fpext float %x to double
-  %r = fdiv fast double %a, %y
+  %r = fdiv reassoc arcp double %a, %y
   ret double %r
 }
 
@@ -104,9 +104,9 @@ define float @food_fmf(float %a, double %b) nounwind {
 ; CHECK-NEXT:    frsp 0, 0
 ; CHECK-NEXT:    fmuls 1, 1, 0
 ; CHECK-NEXT:    blr
-  %x = call fast double @llvm.sqrt.f64(double %b)
+  %x = call reassoc arcp double @llvm.sqrt.f64(double %b)
   %y = fptrunc double %x to float
-  %r = fdiv fast float %a, %y
+  %r = fdiv reassoc arcp float %a, %y
   ret float %r
 }
 
@@ -137,8 +137,8 @@ define float @goo_fmf(float %a, float %b) nounwind {
 ; CHECK-NEXT:    fmuls 0, 0, 2
 ; CHECK-NEXT:    fmuls 1, 1, 0
 ; CHECK-NEXT:    blr
-  %x = call fast float @llvm.sqrt.f32(float %b)
-  %r = fdiv fast float %a, %x
+  %x = call reassoc arcp float @llvm.sqrt.f32(float %b)
+  %r = fdiv reassoc arcp float %a, %x
   ret float %r
 }
 
@@ -159,8 +159,8 @@ define float @no_estimate_refinement_f32(float %a, float %b) #0 {
 ; CHECK-NEXT:    frsqrtes 0, 2
 ; CHECK-NEXT:    fmuls 1, 1, 0
 ; CHECK-NEXT:    blr
-  %x = call fast float @llvm.sqrt.f32(float %b)
-  %r = fdiv fast float %a, %x
+  %x = call reassoc arcp float @llvm.sqrt.f32(float %b)
+  %r = fdiv reassoc arcp float %a, %x
   ret float %r
 }
 
@@ -184,9 +184,9 @@ define float @rsqrt_fmul_fmf(float %a, float %b, float %c) {
 ; CHECK-NEXT:    fmadds 0, 1, 0, 4
 ; CHECK-NEXT:    fmuls 1, 3, 0
 ; CHECK-NEXT:    blr
-  %x = call fast float @llvm.sqrt.f32(float %a)
-  %y = fmul fast float %x, %b
-  %z = fdiv fast float %c, %y
+  %x = call reassoc arcp float @llvm.sqrt.f32(float %a)
+  %y = fmul reassoc float %x, %b
+  %z = fdiv reassoc arcp float %c, %y
   ret float %z
 }
 
@@ -223,8 +223,8 @@ define <4 x float> @hoo_fmf(<4 x float> %a, <4 x float> %b) nounwind {
 ; CHECK-NEXT:    vmaddfp 3, 5, 3, 4
 ; CHECK-NEXT:    vmaddfp 2, 2, 3, 4
 ; CHECK-NEXT:    blr
-  %x = call fast <4 x float> @llvm.sqrt.v4f32(<4 x float> %b)
-  %r = fdiv fast <4 x float> %a, %x
+  %x = call reassoc arcp <4 x float> @llvm.sqrt.v4f32(<4 x float> %b)
+  %r = fdiv reassoc arcp <4 x float> %a, %x
   ret <4 x float> %r
 }
 
@@ -275,7 +275,7 @@ define double @foo2_fmf(double %a, double %b) nounwind {
 ; CHECK-NEXT:    fnmsub 1, 2, 3, 1
 ; CHECK-NEXT:    fmadd 1, 0, 1, 3
 ; CHECK-NEXT:    blr
-  %r = fdiv fast double %a, %b
+  %r = fdiv reassoc arcp nsz double %a, %b
   ret double %r
 }
 
@@ -296,7 +296,7 @@ define float @goo2_fmf(float %a, float %b) nounwind {
 ; CHECK-NEXT:    fnmsubs 1, 2, 3, 1
 ; CHECK-NEXT:    fmadds 1, 0, 1, 3
 ; CHECK-NEXT:    blr
-  %r = fdiv fast float %a, %b
+  %r = fdiv reassoc arcp float %a, %b
   ret float %r
 }
 
@@ -322,7 +322,7 @@ define <4 x float> @hoo2_fmf(<4 x float> %a, <4 x float> %b) nounwind {
 ; CHECK-NEXT:    vmaddfp 2, 3, 4, 2
 ; CHECK-NEXT:    vmaddfp 2, 5, 2, 4
 ; CHECK-NEXT:    blr
-  %r = fdiv fast <4 x float> %a, %b
+  %r = fdiv reassoc arcp <4 x float> %a, %b
   ret <4 x float> %r
 }
 
@@ -383,7 +383,7 @@ define double @foo3_fmf(double %a) nounwind {
 ; CHECK-NEXT:    addis 3, 2, .LCPI20_3 at toc@ha
 ; CHECK-NEXT:    lfs 1, .LCPI20_3 at toc@l(3)
 ; CHECK-NEXT:    blr
-  %r = call fast double @llvm.sqrt.f64(double %a)
+  %r = call reassoc ninf afn double @llvm.sqrt.f64(double %a)
   ret double %r
 }
 
@@ -419,7 +419,7 @@ define float @goo3_fmf(float %a) nounwind {
 ; CHECK-NEXT:    addis 3, 2, .LCPI22_3 at toc@ha
 ; CHECK-NEXT:    lfs 1, .LCPI22_3 at toc@l(3)
 ; CHECK-NEXT:    blr
-  %r = call fast float @llvm.sqrt.f32(float %a)
+  %r = call reassoc ninf afn float @llvm.sqrt.f32(float %a)
   ret float %r
 }
 
@@ -452,7 +452,7 @@ define <4 x float> @hoo3_fmf(<4 x float> %a) #1 {
 ; CHECK-NEXT:    vcmpeqfp 2, 2, 0
 ; CHECK-NEXT:    vsel 2, 3, 0, 2
 ; CHECK-NEXT:    blr
-  %r = call fast <4 x float> @llvm.sqrt.v4f32(<4 x float> %a)
+  %r = call reassoc ninf afn <4 x float> @llvm.sqrt.v4f32(<4 x float> %a)
   ret <4 x float> %r
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/repeated-fp-divisors.ll b/llvm/test/CodeGen/PowerPC/repeated-fp-divisors.ll
index 2cdf832838a8..3c7a9b94a891 100644
--- a/llvm/test/CodeGen/PowerPC/repeated-fp-divisors.ll
+++ b/llvm/test/CodeGen/PowerPC/repeated-fp-divisors.ll
@@ -20,8 +20,8 @@ define <4 x float> @repeated_fp_divisor(float %a, <4 x float> %b) {
 ; CHECK-NEXT:    blr
   %ins = insertelement <4 x float> undef, float %a, i32 0
   %splat = shufflevector <4 x float> %ins, <4 x float> undef, <4 x i32> zeroinitializer
-  %t1 = fmul fast <4 x float> %b, <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 0x3FF028F5C0000000>
-  %mul = fdiv fast <4 x float> %t1, %splat
+  %t1 = fmul reassoc <4 x float> %b, <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 0x3FF028F5C0000000>
+  %mul = fdiv reassoc arcp nsz <4 x float> %t1, %splat
   ret <4 x float> %mul
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/scalar-equal.ll b/llvm/test/CodeGen/PowerPC/scalar-equal.ll
index 41dabb9c8e16..90e1655fd94c 100644
--- a/llvm/test/CodeGen/PowerPC/scalar-equal.ll
+++ b/llvm/test/CodeGen/PowerPC/scalar-equal.ll
@@ -90,7 +90,7 @@ define double @testoeq_fast(double %a, double %b, double %c, double %d) {
 ; NO-FAST-P8-NEXT:    fsel f1, f1, f0, f4
 ; NO-FAST-P8-NEXT:    blr
 entry:
-  %cmp = fcmp fast oeq double %a, %b
-  %cond = select fast i1 %cmp, double %c, double %d
+  %cmp = fcmp nnan ninf nsz oeq double %a, %b
+  %cond = select nnan ninf nsz i1 %cmp, double %c, double %d
   ret double %cond
 }

diff  --git a/llvm/test/CodeGen/PowerPC/scalar-min-max.ll b/llvm/test/CodeGen/PowerPC/scalar-min-max.ll
index d5e4a59277cc..216d498e8541 100644
--- a/llvm/test/CodeGen/PowerPC/scalar-min-max.ll
+++ b/llvm/test/CodeGen/PowerPC/scalar-min-max.ll
@@ -126,7 +126,7 @@ define dso_local float @testfmax_fast(float %a, float %b) local_unnamed_addr {
 ; NO-FAST-P8-NEXT:    fsel f1, f0, f2, f1
 ; NO-FAST-P8-NEXT:    blr
 entry:
-  %cmp = fcmp fast ogt float %a, %b
+  %cmp = fcmp nnan ninf ogt float %a, %b
   %cond = select i1 %cmp, float %a, float %b
   ret float %cond
 }
@@ -147,7 +147,7 @@ define dso_local double @testdmax_fast(double %a, double %b) local_unnamed_addr
 ; NO-FAST-P8-NEXT:    fsel f1, f0, f2, f1
 ; NO-FAST-P8-NEXT:    blr
 entry:
-  %cmp = fcmp fast ogt double %a, %b
+  %cmp = fcmp nnan ninf ogt double %a, %b
   %cond = select i1 %cmp, double %a, double %b
   ret double %cond
 }
@@ -168,7 +168,7 @@ define dso_local float @testfmin_fast(float %a, float %b) local_unnamed_addr {
 ; NO-FAST-P8-NEXT:    fsel f1, f0, f2, f1
 ; NO-FAST-P8-NEXT:    blr
 entry:
-  %cmp = fcmp fast olt float %a, %b
+  %cmp = fcmp nnan ninf olt float %a, %b
   %cond = select i1 %cmp, float %a, float %b
   ret float %cond
 }
@@ -189,7 +189,7 @@ define dso_local double @testdmin_fast(double %a, double %b) local_unnamed_addr
 ; NO-FAST-P8-NEXT:    fsel f1, f0, f2, f1
 ; NO-FAST-P8-NEXT:    blr
 entry:
-  %cmp = fcmp fast olt double %a, %b
+  %cmp = fcmp nnan ninf olt double %a, %b
   %cond = select i1 %cmp, double %a, double %b
   ret double %cond
 }

diff  --git a/llvm/test/CodeGen/PowerPC/scalar_cmp.ll b/llvm/test/CodeGen/PowerPC/scalar_cmp.ll
index 1fe0ec758587..c827d242e2c4 100644
--- a/llvm/test/CodeGen/PowerPC/scalar_cmp.ll
+++ b/llvm/test/CodeGen/PowerPC/scalar_cmp.ll
@@ -132,8 +132,8 @@ define float @select_fast_oeq_float(float %a, float %b, float %c, float %d) {
 ; NO-FAST-P9-NEXT:    fsel f1, f0, f1, f4
 ; NO-FAST-P9-NEXT:    blr
 entry:
-  %cmp = fcmp fast oeq float %a, %b
-  %cond = select fast i1 %cmp, float %c, float %d
+  %cmp = fcmp nnan ninf nsz oeq float %a, %b
+  %cond = select i1 %cmp, float %c, float %d
   ret float %cond
 }
 
@@ -170,8 +170,8 @@ define double @select_fast_oeq_double(double %a, double %b, double %c, double %d
 ; NO-FAST-P9-NEXT:    fsel f1, f0, f1, f4
 ; NO-FAST-P9-NEXT:    blr
 entry:
-  %cmp = fcmp fast oeq double %a, %b
-  %cond = select fast i1 %cmp, double %c, double %d
+  %cmp = fcmp nnan ninf nsz oeq double %a, %b
+  %cond = select i1 %cmp, double %c, double %d
   ret double %cond
 }
 
@@ -296,8 +296,8 @@ define float @select_fast_one_float(float %a, float %b, float %c, float %d) {
 ; NO-FAST-P9-NEXT:    fsel f1, f0, f1, f3
 ; NO-FAST-P9-NEXT:    blr
 entry:
-  %cmp = fcmp fast one float %a, %b
-  %cond = select fast i1 %cmp, float %c, float %d
+  %cmp = fcmp nnan ninf nsz one float %a, %b
+  %cond = select i1 %cmp, float %c, float %d
   ret float %cond
 }
 
@@ -334,8 +334,8 @@ define double @select_fast_one_double(double %a, double %b, double %c, double %d
 ; NO-FAST-P9-NEXT:    fsel f1, f0, f1, f3
 ; NO-FAST-P9-NEXT:    blr
 entry:
-  %cmp = fcmp fast one double %a, %b
-  %cond = select fast i1 %cmp, double %c, double %d
+  %cmp = fcmp nnan ninf nsz one double %a, %b
+  %cond = select i1 %cmp, double %c, double %d
   ret double %cond
 }
 
@@ -444,8 +444,8 @@ define float @select_fast_oge_float(float %a, float %b, float %c, float %d) {
 ; NO-FAST-P9-NEXT:    fsel f1, f0, f3, f4
 ; NO-FAST-P9-NEXT:    blr
 entry:
-  %cmp = fcmp fast oge float %a, %b
-  %cond = select fast i1 %cmp, float %c, float %d
+  %cmp = fcmp nnan ninf nsz oge float %a, %b
+  %cond = select i1 %cmp, float %c, float %d
   ret float %cond
 }
 
@@ -474,8 +474,8 @@ define double @select_fast_oge_double(double %a, double %b, double %c, double %d
 ; NO-FAST-P9-NEXT:    fsel f1, f0, f3, f4
 ; NO-FAST-P9-NEXT:    blr
 entry:
-  %cmp = fcmp fast oge double %a, %b
-  %cond = select fast i1 %cmp, double %c, double %d
+  %cmp = fcmp nnan ninf nsz oge double %a, %b
+  %cond = select i1 %cmp, double %c, double %d
   ret double %cond
 }
 
@@ -580,8 +580,8 @@ define float @select_fast_olt_float(float %a, float %b, float %c, float %d) {
 ; NO-FAST-P9-NEXT:    fsel f1, f0, f4, f3
 ; NO-FAST-P9-NEXT:    blr
 entry:
-  %cmp = fcmp fast olt float %a, %b
-  %cond = select fast i1 %cmp, float %c, float %d
+  %cmp = fcmp ninf nnan nsz olt float %a, %b
+  %cond = select i1 %cmp, float %c, float %d
   ret float %cond
 }
 
@@ -610,8 +610,8 @@ define double @select_fast_olt_double(double %a, double %b, double %c, double %d
 ; NO-FAST-P9-NEXT:    fsel f1, f0, f4, f3
 ; NO-FAST-P9-NEXT:    blr
 entry:
-  %cmp = fcmp fast olt double %a, %b
-  %cond = select fast i1 %cmp, double %c, double %d
+  %cmp = fcmp nnan ninf nsz olt double %a, %b
+  %cond = select i1 %cmp, double %c, double %d
   ret double %cond
 }
 
@@ -716,8 +716,8 @@ define float @select_fast_ogt_float(float %a, float %b, float %c, float %d) {
 ; NO-FAST-P9-NEXT:    fsel f1, f0, f4, f3
 ; NO-FAST-P9-NEXT:    blr
 entry:
-  %cmp = fcmp fast ogt float %a, %b
-  %cond = select fast i1 %cmp, float %c, float %d
+  %cmp = fcmp nnan ninf nsz ogt float %a, %b
+  %cond = select i1 %cmp, float %c, float %d
   ret float %cond
 }
 
@@ -746,8 +746,8 @@ define double @select_fast_ogt_double(double %a, double %b, double %c, double %d
 ; NO-FAST-P9-NEXT:    fsel f1, f0, f4, f3
 ; NO-FAST-P9-NEXT:    blr
 entry:
-  %cmp = fcmp fast ogt double %a, %b
-  %cond = select fast i1 %cmp, double %c, double %d
+  %cmp = fcmp nnan ninf nsz ogt double %a, %b
+  %cond = select i1 %cmp, double %c, double %d
   ret double %cond
 }
 
@@ -856,8 +856,8 @@ define float @select_fast_ole_float(float %a, float %b, float %c, float %d) {
 ; NO-FAST-P9-NEXT:    fsel f1, f0, f3, f4
 ; NO-FAST-P9-NEXT:    blr
 entry:
-  %cmp = fcmp fast ole float %a, %b
-  %cond = select fast i1 %cmp, float %c, float %d
+  %cmp = fcmp nnan ninf nsz ole float %a, %b
+  %cond = select i1 %cmp, float %c, float %d
   ret float %cond
 }
 
@@ -886,8 +886,8 @@ define double @select_fast_ole_double(double %a, double %b, double %c, double %d
 ; NO-FAST-P9-NEXT:    fsel f1, f0, f3, f4
 ; NO-FAST-P9-NEXT:    blr
 entry:
-  %cmp = fcmp fast ole double %a, %b
-  %cond = select fast i1 %cmp, double %c, double %d
+  %cmp = fcmp nnan ninf nsz ole double %a, %b
+  %cond = select i1 %cmp, double %c, double %d
   ret double %cond
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/vec-min-max.ll b/llvm/test/CodeGen/PowerPC/vec-min-max.ll
index 23ab95d64559..c544524cd69b 100644
--- a/llvm/test/CodeGen/PowerPC/vec-min-max.ll
+++ b/llvm/test/CodeGen/PowerPC/vec-min-max.ll
@@ -99,7 +99,7 @@ define <4 x float> @getsmaxf32(<4 x float> %a, <4 x float> %b) {
 ; NOP8VEC-NEXT:    xvmaxsp 34, 34, 35
 ; NOP8VEC-NEXT:    blr
 entry:
-  %0 = fcmp fast oge <4 x float> %a, %b
+  %0 = fcmp nnan nsz oge <4 x float> %a, %b
   %1 = select <4 x i1> %0, <4 x float> %a, <4 x float> %b
   ret <4 x float> %1
 }
@@ -115,7 +115,7 @@ define <2 x double> @getsmaxf64(<2 x double> %a, <2 x double> %b) {
 ; NOP8VEC-NEXT:    xvmaxdp 34, 34, 35
 ; NOP8VEC-NEXT:    blr
 entry:
-  %0 = fcmp fast oge <2 x double> %a, %b
+  %0 = fcmp nnan nsz oge <2 x double> %a, %b
   %1 = select <2 x i1> %0, <2 x double> %a, <2 x double> %b
   ret <2 x double> %1
 }
@@ -216,7 +216,7 @@ define <4 x float> @getsminf32(<4 x float> %a, <4 x float> %b) {
 ; NOP8VEC-NEXT:    xvminsp 34, 34, 35
 ; NOP8VEC-NEXT:    blr
 entry:
-  %0 = fcmp fast ole <4 x float> %a, %b
+  %0 = fcmp nnan nsz ole <4 x float> %a, %b
   %1 = select <4 x i1> %0, <4 x float> %a, <4 x float> %b
   ret <4 x float> %1
 }
@@ -232,7 +232,7 @@ define <2 x double> @getsminf64(<2 x double> %a, <2 x double> %b) {
 ; NOP8VEC-NEXT:    xvmindp 34, 34, 35
 ; NOP8VEC-NEXT:    blr
 entry:
-  %0 = fcmp fast ole <2 x double> %a, %b
+  %0 = fcmp nnan nsz ole <2 x double> %a, %b
   %1 = select <2 x i1> %0, <2 x double> %a, <2 x double> %b
   ret <2 x double> %1
 }

diff  --git a/llvm/test/CodeGen/PowerPC/vsx-fma-mutate-trivial-copy.ll b/llvm/test/CodeGen/PowerPC/vsx-fma-mutate-trivial-copy.ll
index 5a838886dde1..9809287021f6 100644
--- a/llvm/test/CodeGen/PowerPC/vsx-fma-mutate-trivial-copy.ll
+++ b/llvm/test/CodeGen/PowerPC/vsx-fma-mutate-trivial-copy.ll
@@ -14,11 +14,11 @@ for.body.lr.ph:                                   ; preds = %entry
   br label %for.body
 
 for.body:                                         ; preds = %for.body, %for.body.lr.ph
-  %div = fdiv fast float 0.000000e+00, %W
-  %add = fadd fast float %div, %d_min
+  %div = fdiv reassoc arcp float 0.000000e+00, %W
+  %add = fadd reassoc float %div, %d_min
   %conv2 = fpext float %add to double
   %0 = tail call double @llvm.sqrt.f64(double %conv2)
-  %div4 = fdiv fast double %conv3, %0
+  %div4 = fdiv reassoc arcp double %conv3, %0
   %call = tail call signext i32 bitcast (i32 (...)* @p_col_helper to i32 (double)*)(double %div4) #2
   br label %for.body
 

diff  --git a/llvm/test/CodeGen/PowerPC/vsx-recip-est.ll b/llvm/test/CodeGen/PowerPC/vsx-recip-est.ll
index e1d2cdc5e9cf..f6a6a3172235 100644
--- a/llvm/test/CodeGen/PowerPC/vsx-recip-est.ll
+++ b/llvm/test/CodeGen/PowerPC/vsx-recip-est.ll
@@ -10,7 +10,7 @@ define float @emit_xsresp() {
 entry:
   %0 = load float, float* @a, align 4
   %1 = load float, float* @b, align 4
-  %div = fdiv fast float %0, %1
+  %div = fdiv arcp float %0, %1
   ret float %div
 ; CHECK-LABEL: @emit_xsresp
 ; CHECK: xsresp {{[0-9]+}}
@@ -24,7 +24,7 @@ entry:
   %0 = load float, float* %f.addr, align 4
   %1 = load float, float* @b, align 4
   %2 = call float @llvm.sqrt.f32(float %1)
-  %div = fdiv fast float %0, %2
+  %div = fdiv arcp float %0, %2
   ret float %div
 ; CHECK-LABEL: @emit_xsrsqrtesp
 ; CHECK: xsrsqrtesp {{[0-9]+}}
@@ -38,7 +38,7 @@ define double @emit_xsredp() {
 entry:
   %0 = load double, double* @c, align 8
   %1 = load double, double* @d, align 8
-  %div = fdiv fast double %0, %1
+  %div = fdiv arcp double %0, %1
   ret double %div
 ; CHECK-LABEL: @emit_xsredp
 ; CHECK: xsredp {{[0-9]+}}
@@ -52,7 +52,7 @@ entry:
   %0 = load double, double* %f.addr, align 8
   %1 = load double, double* @d, align 8
   %2 = call double @llvm.sqrt.f64(double %1)
-  %div = fdiv fast double %0, %2
+  %div = fdiv arcp double %0, %2
   ret double %div
 ; CHECK-LABEL: @emit_xsrsqrtedp
 ; CHECK: xsrsqrtedp {{[0-9]+}}


        


More information about the llvm-commits mailing list