[llvm] 9774308 - [SystemZ] Avoid mixing strict and non-strict FP operations in tests

Ulrich Weigand via llvm-commits llvm-commits at lists.llvm.org
Wed Nov 20 10:51:50 PST 2019


Author: Ulrich Weigand
Date: 2019-11-20T19:51:30+01:00
New Revision: 97743089bffba797d7bc7358ddb852b7b050b0c8

URL: https://github.com/llvm/llvm-project/commit/97743089bffba797d7bc7358ddb852b7b050b0c8
DIFF: https://github.com/llvm/llvm-project/commit/97743089bffba797d7bc7358ddb852b7b050b0c8.diff

LOG: [SystemZ] Avoid mixing strict and non-strict FP operations in tests

This is to prepare for having the IR verifier reject mixed functions.
Note that fp-strict-mul-02.ll and fp-strict-mul-04.ll still remain
to be fixed.

Added: 
    

Modified: 
    llvm/test/CodeGen/SystemZ/fp-strict-add-03.ll
    llvm/test/CodeGen/SystemZ/fp-strict-div-03.ll
    llvm/test/CodeGen/SystemZ/fp-strict-mul-02.ll
    llvm/test/CodeGen/SystemZ/fp-strict-mul-04.ll
    llvm/test/CodeGen/SystemZ/fp-strict-mul-05.ll
    llvm/test/CodeGen/SystemZ/fp-strict-mul-11.ll
    llvm/test/CodeGen/SystemZ/fp-strict-sub-03.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/SystemZ/fp-strict-add-03.ll b/llvm/test/CodeGen/SystemZ/fp-strict-add-03.ll
index 0aeef7c25453..13710a1f0881 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-add-03.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-add-03.ll
@@ -3,6 +3,7 @@
 ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
 
 declare fp128 @llvm.experimental.constrained.fadd.f128(fp128, fp128, metadata, metadata)
+declare fp128 @llvm.experimental.constrained.fpext.f128.f32(float, metadata)
 
 ; There is no memory form of 128-bit addition.
 define void @f1(fp128 *%ptr, float %f2) strictfp {
@@ -15,11 +16,14 @@ define void @f1(fp128 *%ptr, float %f2) strictfp {
 ; CHECK: std %f2, 8(%r2)
 ; CHECK: br %r14
   %f1 = load fp128, fp128 *%ptr
-  %f2x = fpext float %f2 to fp128
+  %f2x = call fp128 @llvm.experimental.constrained.fpext.f128.f32(float %f2,
+                                               metadata !"fpexcept.strict") #0
   %sum = call fp128 @llvm.experimental.constrained.fadd.f128(
                         fp128 %f1, fp128 %f2x,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict") strictfp
+                        metadata !"fpexcept.strict") #0
   store fp128 %sum, fp128 *%ptr
   ret void
 }
+
+attributes #0 = { strictfp }

diff  --git a/llvm/test/CodeGen/SystemZ/fp-strict-div-03.ll b/llvm/test/CodeGen/SystemZ/fp-strict-div-03.ll
index fcd2184ac4fe..a53512eeb51e 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-div-03.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-div-03.ll
@@ -3,6 +3,7 @@
 ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
 
 declare fp128 @llvm.experimental.constrained.fdiv.f128(fp128, fp128, metadata, metadata)
+declare fp128 @llvm.experimental.constrained.fpext.f128.f32(float, metadata)
 
 ; There is no memory form of 128-bit division.
 define void @f1(fp128 *%ptr, float %f2) strictfp {
@@ -15,11 +16,14 @@ define void @f1(fp128 *%ptr, float %f2) strictfp {
 ; CHECK: std %f3, 8(%r2)
 ; CHECK: br %r14
   %f1 = load fp128, fp128 *%ptr
-  %f2x = fpext float %f2 to fp128
+  %f2x = call fp128 @llvm.experimental.constrained.fpext.f128.f32(float %f2,
+                                               metadata !"fpexcept.strict") #0
   %sum = call fp128 @llvm.experimental.constrained.fdiv.f128(
                         fp128 %f1, fp128 %f2x,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict") strictfp
+                        metadata !"fpexcept.strict") #0
   store fp128 %sum, fp128 *%ptr
   ret void
 }
+
+attributes #0 = { strictfp }

diff  --git a/llvm/test/CodeGen/SystemZ/fp-strict-mul-02.ll b/llvm/test/CodeGen/SystemZ/fp-strict-mul-02.ll
index 7acabef29f4e..33e865d3d93a 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-mul-02.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-mul-02.ll
@@ -1,10 +1,13 @@
 ; Test strict multiplication of two f32s, producing an f64 result.
-; FIXME: we do not have a strict version of fpext yet
+; FIXME: We should use llvm.experimental.constrained.fpext, but we currently
+;        cannot match a combination of two strict operations in ISel.
 ;
 ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
 
 declare float @foo()
 declare double @llvm.experimental.constrained.fmul.f64(double, double, metadata, metadata)
+declare float @llvm.experimental.constrained.fadd.f32(float, float, metadata, metadata)
+declare float @llvm.experimental.constrained.fptrunc.f32.f64(double, metadata, metadata)
 
 ; Check register multiplication.
 define double @f1(float %f1, float %f2) #0 {
@@ -133,17 +136,50 @@ define float @f7(float *%ptr0) #0 {
   %val9 = load float, float *%ptr9
   %val10 = load float, float *%ptr10
 
-  %frob0 = fadd float %val0, %val0
-  %frob1 = fadd float %val1, %val1
-  %frob2 = fadd float %val2, %val2
-  %frob3 = fadd float %val3, %val3
-  %frob4 = fadd float %val4, %val4
-  %frob5 = fadd float %val5, %val5
-  %frob6 = fadd float %val6, %val6
-  %frob7 = fadd float %val7, %val7
-  %frob8 = fadd float %val8, %val8
-  %frob9 = fadd float %val9, %val9
-  %frob10 = fadd float %val9, %val10
+  %frob0 = call float @llvm.experimental.constrained.fadd.f32(
+                        float %val0, float %val0,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
+  %frob1 = call float @llvm.experimental.constrained.fadd.f32(
+                        float %val1, float %val1,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
+  %frob2 = call float @llvm.experimental.constrained.fadd.f32(
+                        float %val2, float %val2,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
+  %frob3 = call float @llvm.experimental.constrained.fadd.f32(
+                        float %val3, float %val3,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
+  %frob4 = call float @llvm.experimental.constrained.fadd.f32(
+                        float %val4, float %val4,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
+  %frob5 = call float @llvm.experimental.constrained.fadd.f32(
+                        float %val5, float %val5,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
+  %frob6 = call float @llvm.experimental.constrained.fadd.f32(
+                        float %val6, float %val6,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
+  %frob7 = call float @llvm.experimental.constrained.fadd.f32(
+                        float %val7, float %val7,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
+  %frob8 = call float @llvm.experimental.constrained.fadd.f32(
+                        float %val8, float %val8,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
+  %frob9 = call float @llvm.experimental.constrained.fadd.f32(
+                        float %val9, float %val9,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
+  %frob10 = call float @llvm.experimental.constrained.fadd.f32(
+                        float %val10, float %val10,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
 
   store float %frob0, float *%ptr0
   store float %frob1, float *%ptr1
@@ -169,7 +205,10 @@ define float @f7(float *%ptr0) #0 {
                         double %mul0, double 1.01,
                         metadata !"round.dynamic",
                         metadata !"fpexcept.strict") #0
-  %trunc0 = fptrunc double %extra0 to float
+  %trunc0 = call float @llvm.experimental.constrained.fptrunc.f32.f64(
+                        double %extra0,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
 
   %accext1 = fpext float %trunc0 to double
   %ext1 = fpext float %frob1 to double
@@ -181,7 +220,10 @@ define float @f7(float *%ptr0) #0 {
                         double %mul1, double 1.11,
                         metadata !"round.dynamic",
                         metadata !"fpexcept.strict") #0
-  %trunc1 = fptrunc double %extra1 to float
+  %trunc1 = call float @llvm.experimental.constrained.fptrunc.f32.f64(
+                        double %extra1,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
 
   %accext2 = fpext float %trunc1 to double
   %ext2 = fpext float %frob2 to double
@@ -193,7 +235,10 @@ define float @f7(float *%ptr0) #0 {
                         double %mul2, double 1.21,
                         metadata !"round.dynamic",
                         metadata !"fpexcept.strict") #0
-  %trunc2 = fptrunc double %extra2 to float
+  %trunc2 = call float @llvm.experimental.constrained.fptrunc.f32.f64(
+                        double %extra2,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
 
   %accext3 = fpext float %trunc2 to double
   %ext3 = fpext float %frob3 to double
@@ -205,7 +250,10 @@ define float @f7(float *%ptr0) #0 {
                         double %mul3, double 1.31,
                         metadata !"round.dynamic",
                         metadata !"fpexcept.strict") #0
-  %trunc3 = fptrunc double %extra3 to float
+  %trunc3 = call float @llvm.experimental.constrained.fptrunc.f32.f64(
+                        double %extra3,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
 
   %accext4 = fpext float %trunc3 to double
   %ext4 = fpext float %frob4 to double
@@ -217,7 +265,10 @@ define float @f7(float *%ptr0) #0 {
                         double %mul4, double 1.41,
                         metadata !"round.dynamic",
                         metadata !"fpexcept.strict") #0
-  %trunc4 = fptrunc double %extra4 to float
+  %trunc4 = call float @llvm.experimental.constrained.fptrunc.f32.f64(
+                        double %extra4,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
 
   %accext5 = fpext float %trunc4 to double
   %ext5 = fpext float %frob5 to double
@@ -229,7 +280,10 @@ define float @f7(float *%ptr0) #0 {
                         double %mul5, double 1.51,
                         metadata !"round.dynamic",
                         metadata !"fpexcept.strict") #0
-  %trunc5 = fptrunc double %extra5 to float
+  %trunc5 = call float @llvm.experimental.constrained.fptrunc.f32.f64(
+                        double %extra5,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
 
   %accext6 = fpext float %trunc5 to double
   %ext6 = fpext float %frob6 to double
@@ -241,7 +295,10 @@ define float @f7(float *%ptr0) #0 {
                         double %mul6, double 1.61,
                         metadata !"round.dynamic",
                         metadata !"fpexcept.strict") #0
-  %trunc6 = fptrunc double %extra6 to float
+  %trunc6 = call float @llvm.experimental.constrained.fptrunc.f32.f64(
+                        double %extra6,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
 
   %accext7 = fpext float %trunc6 to double
   %ext7 = fpext float %frob7 to double
@@ -253,7 +310,10 @@ define float @f7(float *%ptr0) #0 {
                         double %mul7, double 1.71,
                         metadata !"round.dynamic",
                         metadata !"fpexcept.strict") #0
-  %trunc7 = fptrunc double %extra7 to float
+  %trunc7 = call float @llvm.experimental.constrained.fptrunc.f32.f64(
+                        double %extra7,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
 
   %accext8 = fpext float %trunc7 to double
   %ext8 = fpext float %frob8 to double
@@ -265,7 +325,10 @@ define float @f7(float *%ptr0) #0 {
                         double %mul8, double 1.81,
                         metadata !"round.dynamic",
                         metadata !"fpexcept.strict") #0
-  %trunc8 = fptrunc double %extra8 to float
+  %trunc8 = call float @llvm.experimental.constrained.fptrunc.f32.f64(
+                        double %extra8,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
 
   %accext9 = fpext float %trunc8 to double
   %ext9 = fpext float %frob9 to double
@@ -277,7 +340,10 @@ define float @f7(float *%ptr0) #0 {
                         double %mul9, double 1.91,
                         metadata !"round.dynamic",
                         metadata !"fpexcept.strict") #0
-  %trunc9 = fptrunc double %extra9 to float
+  %trunc9 = call float @llvm.experimental.constrained.fptrunc.f32.f64(
+                        double %extra9,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
 
   ret float %trunc9
 }

diff  --git a/llvm/test/CodeGen/SystemZ/fp-strict-mul-04.ll b/llvm/test/CodeGen/SystemZ/fp-strict-mul-04.ll
index 924845a99d74..9a8c868ad15a 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-mul-04.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-mul-04.ll
@@ -1,9 +1,12 @@
 ; Test strict multiplication of two f64s, producing an f128 result.
-; FIXME: we do not have a strict version of fpext yet
+; FIXME: We should use llvm.experimental.constrained.fpext, but we currently
+;        cannot match a combination of two strict operations in ISel.
 ;
 ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
 
 declare fp128 @llvm.experimental.constrained.fmul.f128(fp128, fp128, metadata, metadata)
+declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata)
+declare double @llvm.experimental.constrained.fptrunc.f64.f128(fp128, metadata, metadata)
 
 declare double @foo()
 
@@ -154,17 +157,50 @@ define double @f7(double *%ptr0) #0 {
   %val9 = load double, double *%ptr9
   %val10 = load double, double *%ptr10
 
-  %frob0 = fadd double %val0, %val0
-  %frob1 = fadd double %val1, %val1
-  %frob2 = fadd double %val2, %val2
-  %frob3 = fadd double %val3, %val3
-  %frob4 = fadd double %val4, %val4
-  %frob5 = fadd double %val5, %val5
-  %frob6 = fadd double %val6, %val6
-  %frob7 = fadd double %val7, %val7
-  %frob8 = fadd double %val8, %val8
-  %frob9 = fadd double %val9, %val9
-  %frob10 = fadd double %val9, %val10
+  %frob0 = call double @llvm.experimental.constrained.fadd.f64(
+                        double %val0, double %val0,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
+  %frob1 = call double @llvm.experimental.constrained.fadd.f64(
+                        double %val1, double %val1,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
+  %frob2 = call double @llvm.experimental.constrained.fadd.f64(
+                        double %val2, double %val2,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
+  %frob3 = call double @llvm.experimental.constrained.fadd.f64(
+                        double %val3, double %val3,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
+  %frob4 = call double @llvm.experimental.constrained.fadd.f64(
+                        double %val4, double %val4,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
+  %frob5 = call double @llvm.experimental.constrained.fadd.f64(
+                        double %val5, double %val5,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
+  %frob6 = call double @llvm.experimental.constrained.fadd.f64(
+                        double %val6, double %val6,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
+  %frob7 = call double @llvm.experimental.constrained.fadd.f64(
+                        double %val7, double %val7,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
+  %frob8 = call double @llvm.experimental.constrained.fadd.f64(
+                        double %val8, double %val8,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
+  %frob9 = call double @llvm.experimental.constrained.fadd.f64(
+                        double %val9, double %val9,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
+  %frob10 = call double @llvm.experimental.constrained.fadd.f64(
+                        double %val10, double %val10,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
 
   store double %frob0, double *%ptr0
   store double %frob1, double *%ptr1
@@ -186,12 +222,14 @@ define double @f7(double *%ptr0) #0 {
                         fp128 %accext0, fp128 %ext0,
                         metadata !"round.dynamic",
                         metadata !"fpexcept.strict") #0
-  %const0 = fpext double 1.01 to fp128
   %extra0 = call fp128 @llvm.experimental.constrained.fmul.f128(
-                        fp128 %mul0, fp128 %const0,
+                        fp128 %mul0, fp128 0xL00000000000000003fff000001000000,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
+  %trunc0 = call double @llvm.experimental.constrained.fptrunc.f64.f128(
+                        fp128 %extra0,
                         metadata !"round.dynamic",
                         metadata !"fpexcept.strict") #0
-  %trunc0 = fptrunc fp128 %extra0 to double
 
   %accext1 = fpext double %trunc0 to fp128
   %ext1 = fpext double %frob1 to fp128
@@ -199,12 +237,14 @@ define double @f7(double *%ptr0) #0 {
                         fp128 %accext1, fp128 %ext1,
                         metadata !"round.dynamic",
                         metadata !"fpexcept.strict") #0
-  %const1 = fpext double 1.11 to fp128
   %extra1 = call fp128 @llvm.experimental.constrained.fmul.f128(
-                        fp128 %mul1, fp128 %const1,
+                        fp128 %mul1, fp128 0xL00000000000000003fff000002000000,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
+  %trunc1 = call double @llvm.experimental.constrained.fptrunc.f64.f128(
+                        fp128 %extra1,
                         metadata !"round.dynamic",
                         metadata !"fpexcept.strict") #0
-  %trunc1 = fptrunc fp128 %extra1 to double
 
   %accext2 = fpext double %trunc1 to fp128
   %ext2 = fpext double %frob2 to fp128
@@ -212,12 +252,14 @@ define double @f7(double *%ptr0) #0 {
                         fp128 %accext2, fp128 %ext2,
                         metadata !"round.dynamic",
                         metadata !"fpexcept.strict") #0
-  %const2 = fpext double 1.21 to fp128
   %extra2 = call fp128 @llvm.experimental.constrained.fmul.f128(
-                        fp128 %mul2, fp128 %const2,
+                        fp128 %mul2, fp128 0xL00000000000000003fff000003000000,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
+  %trunc2 = call double @llvm.experimental.constrained.fptrunc.f64.f128(
+                        fp128 %extra2,
                         metadata !"round.dynamic",
                         metadata !"fpexcept.strict") #0
-  %trunc2 = fptrunc fp128 %extra2 to double
 
   %accext3 = fpext double %trunc2 to fp128
   %ext3 = fpext double %frob3 to fp128
@@ -225,12 +267,14 @@ define double @f7(double *%ptr0) #0 {
                         fp128 %accext3, fp128 %ext3,
                         metadata !"round.dynamic",
                         metadata !"fpexcept.strict") #0
-  %const3 = fpext double 1.31 to fp128
   %extra3 = call fp128 @llvm.experimental.constrained.fmul.f128(
-                        fp128 %mul3, fp128 %const3,
+                        fp128 %mul3, fp128 0xL00000000000000003fff000004000000,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
+  %trunc3 = call double @llvm.experimental.constrained.fptrunc.f64.f128(
+                        fp128 %extra3,
                         metadata !"round.dynamic",
                         metadata !"fpexcept.strict") #0
-  %trunc3 = fptrunc fp128 %extra3 to double
 
   %accext4 = fpext double %trunc3 to fp128
   %ext4 = fpext double %frob4 to fp128
@@ -238,12 +282,14 @@ define double @f7(double *%ptr0) #0 {
                         fp128 %accext4, fp128 %ext4,
                         metadata !"round.dynamic",
                         metadata !"fpexcept.strict") #0
-  %const4 = fpext double 1.41 to fp128
   %extra4 = call fp128 @llvm.experimental.constrained.fmul.f128(
-                        fp128 %mul4, fp128 %const4,
+                        fp128 %mul4, fp128 0xL00000000000000003fff000005000000,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
+  %trunc4 = call double @llvm.experimental.constrained.fptrunc.f64.f128(
+                        fp128 %extra4,
                         metadata !"round.dynamic",
                         metadata !"fpexcept.strict") #0
-  %trunc4 = fptrunc fp128 %extra4 to double
 
   %accext5 = fpext double %trunc4 to fp128
   %ext5 = fpext double %frob5 to fp128
@@ -251,12 +297,14 @@ define double @f7(double *%ptr0) #0 {
                         fp128 %accext5, fp128 %ext5,
                         metadata !"round.dynamic",
                         metadata !"fpexcept.strict") #0
-  %const5 = fpext double 1.51 to fp128
   %extra5 = call fp128 @llvm.experimental.constrained.fmul.f128(
-                        fp128 %mul5, fp128 %const5,
+                        fp128 %mul5, fp128 0xL00000000000000003fff000006000000,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
+  %trunc5 = call double @llvm.experimental.constrained.fptrunc.f64.f128(
+                        fp128 %extra5,
                         metadata !"round.dynamic",
                         metadata !"fpexcept.strict") #0
-  %trunc5 = fptrunc fp128 %extra5 to double
 
   %accext6 = fpext double %trunc5 to fp128
   %ext6 = fpext double %frob6 to fp128
@@ -264,12 +312,14 @@ define double @f7(double *%ptr0) #0 {
                         fp128 %accext6, fp128 %ext6,
                         metadata !"round.dynamic",
                         metadata !"fpexcept.strict") #0
-  %const6 = fpext double 1.61 to fp128
   %extra6 = call fp128 @llvm.experimental.constrained.fmul.f128(
-                        fp128 %mul6, fp128 %const6,
+                        fp128 %mul6, fp128 0xL00000000000000003fff000007000000,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
+  %trunc6 = call double @llvm.experimental.constrained.fptrunc.f64.f128(
+                        fp128 %extra6,
                         metadata !"round.dynamic",
                         metadata !"fpexcept.strict") #0
-  %trunc6 = fptrunc fp128 %extra6 to double
 
   %accext7 = fpext double %trunc6 to fp128
   %ext7 = fpext double %frob7 to fp128
@@ -277,12 +327,14 @@ define double @f7(double *%ptr0) #0 {
                         fp128 %accext7, fp128 %ext7,
                         metadata !"round.dynamic",
                         metadata !"fpexcept.strict") #0
-  %const7 = fpext double 1.71 to fp128
   %extra7 = call fp128 @llvm.experimental.constrained.fmul.f128(
-                        fp128 %mul7, fp128 %const7,
+                        fp128 %mul7, fp128 0xL00000000000000003fff000008000000,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
+  %trunc7 = call double @llvm.experimental.constrained.fptrunc.f64.f128(
+                        fp128 %extra7,
                         metadata !"round.dynamic",
                         metadata !"fpexcept.strict") #0
-  %trunc7 = fptrunc fp128 %extra7 to double
 
   %accext8 = fpext double %trunc7 to fp128
   %ext8 = fpext double %frob8 to fp128
@@ -290,12 +342,14 @@ define double @f7(double *%ptr0) #0 {
                         fp128 %accext8, fp128 %ext8,
                         metadata !"round.dynamic",
                         metadata !"fpexcept.strict") #0
-  %const8 = fpext double 1.81 to fp128
   %extra8 = call fp128 @llvm.experimental.constrained.fmul.f128(
-                        fp128 %mul8, fp128 %const8,
+                        fp128 %mul8, fp128 0xL00000000000000003fff000009000000,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
+  %trunc8 = call double @llvm.experimental.constrained.fptrunc.f64.f128(
+                        fp128 %extra8,
                         metadata !"round.dynamic",
                         metadata !"fpexcept.strict") #0
-  %trunc8 = fptrunc fp128 %extra8 to double
 
   %accext9 = fpext double %trunc8 to fp128
   %ext9 = fpext double %frob9 to fp128
@@ -303,12 +357,14 @@ define double @f7(double *%ptr0) #0 {
                         fp128 %accext9, fp128 %ext9,
                         metadata !"round.dynamic",
                         metadata !"fpexcept.strict") #0
-  %const9 = fpext double 1.91 to fp128
   %extra9 = call fp128 @llvm.experimental.constrained.fmul.f128(
-                        fp128 %mul9, fp128 %const9,
+                        fp128 %mul9, fp128 0xL00000000000000003fff00000a000000,
+                        metadata !"round.dynamic",
+                        metadata !"fpexcept.strict") #0
+  %trunc9 = call double @llvm.experimental.constrained.fptrunc.f64.f128(
+                        fp128 %extra9,
                         metadata !"round.dynamic",
                         metadata !"fpexcept.strict") #0
-  %trunc9 = fptrunc fp128 %extra9 to double
 
   ret double %trunc9
 }

diff  --git a/llvm/test/CodeGen/SystemZ/fp-strict-mul-05.ll b/llvm/test/CodeGen/SystemZ/fp-strict-mul-05.ll
index 0a8ee0bf7bd6..422566c8645b 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-mul-05.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-mul-05.ll
@@ -3,6 +3,7 @@
 ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
 
 declare fp128 @llvm.experimental.constrained.fmul.f128(fp128, fp128, metadata, metadata)
+declare fp128 @llvm.experimental.constrained.fpext.f128.f32(float, metadata)
 
 ; There is no memory form of 128-bit multiplication.
 define void @f1(fp128 *%ptr, float %f2) strictfp {
@@ -15,11 +16,14 @@ define void @f1(fp128 *%ptr, float %f2) strictfp {
 ; CHECK: std %f2, 8(%r2)
 ; CHECK: br %r14
   %f1 = load fp128, fp128 *%ptr
-  %f2x = fpext float %f2 to fp128
+  %f2x = call fp128 @llvm.experimental.constrained.fpext.f128.f32(float %f2,
+                                               metadata !"fpexcept.strict") #0
   %
diff  = call fp128 @llvm.experimental.constrained.fmul.f128(
                         fp128 %f1, fp128 %f2x,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict") strictfp
+                        metadata !"fpexcept.strict") #0
   store fp128 %
diff , fp128 *%ptr
   ret void
 }
+
+attributes #0 = { strictfp }

diff  --git a/llvm/test/CodeGen/SystemZ/fp-strict-mul-11.ll b/llvm/test/CodeGen/SystemZ/fp-strict-mul-11.ll
index 58e5bc453e61..6ad75c302b2f 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-mul-11.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-mul-11.ll
@@ -3,6 +3,7 @@
 ; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
 
 declare fp128 @llvm.experimental.constrained.fmul.f128(fp128, fp128, metadata, metadata)
+declare fp128 @llvm.experimental.constrained.fpext.f128.f64(double, metadata)
 
 define void @f1(fp128 *%ptr1, fp128 *%ptr2) #0 {
 ; CHECK-LABEL: f1:
@@ -28,8 +29,10 @@ define void @f2(double %f1, double %f2, fp128 *%dst) #0 {
 ; CHECK: wfmxb [[RES:%v[0-9]+]], [[REG1]], [[REG2]]
 ; CHECK: vst [[RES]], 0(%r2)
 ; CHECK: br %r14
-  %f1x = fpext double %f1 to fp128
-  %f2x = fpext double %f2 to fp128
+  %f1x = call fp128 @llvm.experimental.constrained.fpext.f128.f64(double %f1,
+                                               metadata !"fpexcept.strict") #0
+  %f2x = call fp128 @llvm.experimental.constrained.fpext.f128.f64(double %f2,
+                                               metadata !"fpexcept.strict") #0
   %res = call fp128 @llvm.experimental.constrained.fmul.f128(
                         fp128 %f1x, fp128 %f2x,
                         metadata !"round.dynamic",

diff  --git a/llvm/test/CodeGen/SystemZ/fp-strict-sub-03.ll b/llvm/test/CodeGen/SystemZ/fp-strict-sub-03.ll
index cc3ee09e3a24..d5ab4caf0db8 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-sub-03.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-sub-03.ll
@@ -3,6 +3,7 @@
 ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
 
 declare fp128 @llvm.experimental.constrained.fsub.f128(fp128, fp128, metadata, metadata)
+declare fp128 @llvm.experimental.constrained.fpext.f128.f32(float, metadata)
 
 ; There is no memory form of 128-bit subtraction.
 define void @f1(fp128 *%ptr, float %f2) strictfp {
@@ -15,11 +16,14 @@ define void @f1(fp128 *%ptr, float %f2) strictfp {
 ; CHECK: std %f3, 8(%r2)
 ; CHECK: br %r14
   %f1 = load fp128, fp128 *%ptr
-  %f2x = fpext float %f2 to fp128
+  %f2x = call fp128 @llvm.experimental.constrained.fpext.f128.f32(float %f2,
+                                               metadata !"fpexcept.strict") #0
   %sum = call fp128 @llvm.experimental.constrained.fsub.f128(
                         fp128 %f1, fp128 %f2x,
                         metadata !"round.dynamic",
-                        metadata !"fpexcept.strict") strictfp
+                        metadata !"fpexcept.strict") #0
   store fp128 %sum, fp128 *%ptr
   ret void
 }
+
+attributes #0 = { strictfp }


        


More information about the llvm-commits mailing list