[llvm] cd74ccc - [X86] Fix errors in use of strictfp attribute.

Kevin P. Neal via llvm-commits llvm-commits at lists.llvm.org
Fri May 29 09:32:44 PDT 2020


Author: Kevin P. Neal
Date: 2020-05-29T12:31:55-04:00
New Revision: cd74ccc965e773a3f0c31cd6bb46de318cefdca9

URL: https://github.com/llvm/llvm-project/commit/cd74ccc965e773a3f0c31cd6bb46de318cefdca9
DIFF: https://github.com/llvm/llvm-project/commit/cd74ccc965e773a3f0c31cd6bb46de318cefdca9.diff

LOG: [X86] Fix errors in use of strictfp attribute.

Errors spotted with use of: https://reviews.llvm.org/D68233

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/fp-intrinsics.ll
    llvm/test/CodeGen/X86/fp-strict-scalar-cmp.ll
    llvm/test/CodeGen/X86/fp-strict-scalar-round.ll
    llvm/test/CodeGen/X86/vec-strict-128.ll
    llvm/test/CodeGen/X86/vec-strict-256.ll
    llvm/test/CodeGen/X86/vec-strict-512.ll
    llvm/test/CodeGen/X86/vec-strict-fptoint-128.ll
    llvm/test/CodeGen/X86/vec-strict-fptoint-256.ll
    llvm/test/CodeGen/X86/vec-strict-fptoint-512.ll
    llvm/test/CodeGen/X86/vec-strict-round-128.ll
    llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics-flags.ll
    llvm/test/CodeGen/X86/vector-half-conversions.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/fp-intrinsics.ll b/llvm/test/CodeGen/X86/fp-intrinsics.ll
index 27f198168e38..657731c231c6 100644
--- a/llvm/test/CodeGen/X86/fp-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/fp-intrinsics.ll
@@ -1762,13 +1762,14 @@ entry:
   ret i64 %result
 }
 
-define i64 @f26(float %x) {
+define i64 @f26(float %x) #0 {
 ; X87-LABEL: f26:
 ; X87:       # %bb.0: # %entry
 ; X87-NEXT:    subl $12, %esp
 ; X87-NEXT:    .cfi_def_cfa_offset 16
 ; X87-NEXT:    flds {{[0-9]+}}(%esp)
 ; X87-NEXT:    fstps (%esp)
+; X87-NEXT:    wait
 ; X87-NEXT:    calll llrintf
 ; X87-NEXT:    addl $12, %esp
 ; X87-NEXT:    .cfi_def_cfa_offset 4

diff  --git a/llvm/test/CodeGen/X86/fp-strict-scalar-cmp.ll b/llvm/test/CodeGen/X86/fp-strict-scalar-cmp.ll
index 7bee1340a774..7f9e57d94f73 100644
--- a/llvm/test/CodeGen/X86/fp-strict-scalar-cmp.ll
+++ b/llvm/test/CodeGen/X86/fp-strict-scalar-cmp.ll
@@ -4206,7 +4206,7 @@ define void @foo(float %0, float %1) #0 {
   br i1 %3, label %4, label %5
 
 4:                                                ; preds = %2
-  tail call void @bar()
+  tail call void @bar() #0
   br label %5
 
 5:                                                ; preds = %4, %2

diff  --git a/llvm/test/CodeGen/X86/fp-strict-scalar-round.ll b/llvm/test/CodeGen/X86/fp-strict-scalar-round.ll
index 26137bd76a9f..da05e8be432e 100644
--- a/llvm/test/CodeGen/X86/fp-strict-scalar-round.ll
+++ b/llvm/test/CodeGen/X86/fp-strict-scalar-round.ll
@@ -54,7 +54,7 @@ define float @fceil32(float %f) #0 {
 ; AVX-X64-NEXT:    vroundss $10, %xmm0, %xmm0, %xmm0
 ; AVX-X64-NEXT:    retq
   %res = call float @llvm.experimental.constrained.ceil.f32(
-                        float %f, metadata !"fpexcept.strict")
+                        float %f, metadata !"fpexcept.strict") #0
   ret float %res
 }
 
@@ -107,7 +107,7 @@ define double @fceilf64(double %f) #0 {
 ; AVX-X64-NEXT:    vroundsd $10, %xmm0, %xmm0, %xmm0
 ; AVX-X64-NEXT:    retq
   %res = call double @llvm.experimental.constrained.ceil.f64(
-                        double %f, metadata !"fpexcept.strict")
+                        double %f, metadata !"fpexcept.strict") #0
   ret double %res
 }
 
@@ -148,7 +148,7 @@ define float @ffloor32(float %f) #0 {
 ; AVX-X64-NEXT:    vroundss $9, %xmm0, %xmm0, %xmm0
 ; AVX-X64-NEXT:    retq
   %res = call float @llvm.experimental.constrained.floor.f32(
-                        float %f, metadata !"fpexcept.strict")
+                        float %f, metadata !"fpexcept.strict") #0
   ret float %res
 }
 
@@ -201,7 +201,7 @@ define double @ffloorf64(double %f) #0 {
 ; AVX-X64-NEXT:    vroundsd $9, %xmm0, %xmm0, %xmm0
 ; AVX-X64-NEXT:    retq
   %res = call double @llvm.experimental.constrained.floor.f64(
-                        double %f, metadata !"fpexcept.strict")
+                        double %f, metadata !"fpexcept.strict") #0
   ret double %res
 }
 
@@ -242,7 +242,7 @@ define float @ftrunc32(float %f) #0 {
 ; AVX-X64-NEXT:    vroundss $11, %xmm0, %xmm0, %xmm0
 ; AVX-X64-NEXT:    retq
   %res = call float @llvm.experimental.constrained.trunc.f32(
-                        float %f, metadata !"fpexcept.strict")
+                        float %f, metadata !"fpexcept.strict") #0
   ret float %res
 }
 
@@ -295,7 +295,7 @@ define double @ftruncf64(double %f) #0 {
 ; AVX-X64-NEXT:    vroundsd $11, %xmm0, %xmm0, %xmm0
 ; AVX-X64-NEXT:    retq
   %res = call double @llvm.experimental.constrained.trunc.f64(
-                        double %f, metadata !"fpexcept.strict")
+                        double %f, metadata !"fpexcept.strict") #0
   ret double %res
 }
 
@@ -337,7 +337,7 @@ define float @frint32(float %f) #0 {
 ; AVX-X64-NEXT:    retq
   %res = call float @llvm.experimental.constrained.rint.f32(
                         float %f,
-                        metadata !"round.dynamic", metadata !"fpexcept.strict")
+                        metadata !"round.dynamic", metadata !"fpexcept.strict") #0
   ret float %res
 }
 
@@ -391,7 +391,7 @@ define double @frintf64(double %f) #0 {
 ; AVX-X64-NEXT:    retq
   %res = call double @llvm.experimental.constrained.rint.f64(
                         double %f,
-                        metadata !"round.dynamic", metadata !"fpexcept.strict")
+                        metadata !"round.dynamic", metadata !"fpexcept.strict") #0
   ret double %res
 }
 
@@ -433,7 +433,7 @@ define float @fnearbyint32(float %f) #0 {
 ; AVX-X64-NEXT:    retq
   %res = call float @llvm.experimental.constrained.nearbyint.f32(
                         float %f,
-                        metadata !"round.dynamic", metadata !"fpexcept.strict")
+                        metadata !"round.dynamic", metadata !"fpexcept.strict") #0
   ret float %res
 }
 
@@ -487,7 +487,7 @@ define double @fnearbyintf64(double %f) #0 {
 ; AVX-X64-NEXT:    retq
   %res = call double @llvm.experimental.constrained.nearbyint.f64(
                         double %f,
-                        metadata !"round.dynamic", metadata !"fpexcept.strict")
+                        metadata !"round.dynamic", metadata !"fpexcept.strict") #0
   ret double %res
 }
 

diff  --git a/llvm/test/CodeGen/X86/vec-strict-128.ll b/llvm/test/CodeGen/X86/vec-strict-128.ll
index 98162a1da9a9..4fecd2631dc0 100644
--- a/llvm/test/CodeGen/X86/vec-strict-128.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-128.ll
@@ -199,7 +199,7 @@ define <4 x float> @f11(<2 x double> %a0, <4 x float> %a1) #0 {
   %ext = extractelement <2 x double> %a0, i32 0
   %cvt = call float @llvm.experimental.constrained.fptrunc.f32.f64(double %ext,
                                                                    metadata !"round.dynamic",
-                                                                   metadata !"fpexcept.strict")
+                                                                   metadata !"fpexcept.strict") #0
   %res = insertelement <4 x float> %a1, float %cvt, i32 0
   ret <4 x float> %res
 }

diff  --git a/llvm/test/CodeGen/X86/vec-strict-256.ll b/llvm/test/CodeGen/X86/vec-strict-256.ll
index 97980a4f05da..5945e6c1bc66 100644
--- a/llvm/test/CodeGen/X86/vec-strict-256.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-256.ll
@@ -194,7 +194,7 @@ define <8 x float> @fceilv8f32(<8 x float> %f) #0 {
 ; CHECK-NEXT:    vroundps $10, %ymm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <8 x float> @llvm.experimental.constrained.ceil.v8f32(
-                          <8 x float> %f, metadata !"fpexcept.strict")
+                          <8 x float> %f, metadata !"fpexcept.strict") #0
   ret <8 x float> %res
 }
 
@@ -204,7 +204,7 @@ define <4 x double> @fceilv4f64(<4 x double> %f) #0 {
 ; CHECK-NEXT:    vroundpd $10, %ymm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <4 x double> @llvm.experimental.constrained.ceil.v4f64(
-                        <4 x double> %f, metadata !"fpexcept.strict")
+                        <4 x double> %f, metadata !"fpexcept.strict") #0
   ret <4 x double> %res
 }
 
@@ -214,7 +214,7 @@ define <8 x float> @ffloorv8f32(<8 x float> %f) #0 {
 ; CHECK-NEXT:    vroundps $9, %ymm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <8 x float> @llvm.experimental.constrained.floor.v8f32(
-                          <8 x float> %f, metadata !"fpexcept.strict")
+                          <8 x float> %f, metadata !"fpexcept.strict") #0
   ret <8 x float> %res
 }
 
@@ -224,7 +224,7 @@ define <4 x double> @ffloorv4f64(<4 x double> %f) #0 {
 ; CHECK-NEXT:    vroundpd $9, %ymm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <4 x double> @llvm.experimental.constrained.floor.v4f64(
-                        <4 x double> %f, metadata !"fpexcept.strict")
+                        <4 x double> %f, metadata !"fpexcept.strict") #0
   ret <4 x double> %res
 }
 
@@ -235,7 +235,7 @@ define <8 x float> @ftruncv8f32(<8 x float> %f) #0 {
 ; CHECK-NEXT:    vroundps $11, %ymm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <8 x float> @llvm.experimental.constrained.trunc.v8f32(
-                          <8 x float> %f, metadata !"fpexcept.strict")
+                          <8 x float> %f, metadata !"fpexcept.strict") #0
   ret <8 x float> %res
 }
 
@@ -245,7 +245,7 @@ define <4 x double> @ftruncv4f64(<4 x double> %f) #0 {
 ; CHECK-NEXT:    vroundpd $11, %ymm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <4 x double> @llvm.experimental.constrained.trunc.v4f64(
-                        <4 x double> %f, metadata !"fpexcept.strict")
+                        <4 x double> %f, metadata !"fpexcept.strict") #0
   ret <4 x double> %res
 }
 
@@ -257,7 +257,7 @@ define <8 x float> @frintv8f32(<8 x float> %f) #0 {
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <8 x float> @llvm.experimental.constrained.rint.v8f32(
                           <8 x float> %f,
-                          metadata !"round.dynamic", metadata !"fpexcept.strict")
+                          metadata !"round.dynamic", metadata !"fpexcept.strict") #0
   ret <8 x float> %res
 }
 
@@ -268,7 +268,7 @@ define <4 x double> @frintv4f64(<4 x double> %f) #0 {
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <4 x double> @llvm.experimental.constrained.rint.v4f64(
                         <4 x double> %f,
-                        metadata !"round.dynamic", metadata !"fpexcept.strict")
+                        metadata !"round.dynamic", metadata !"fpexcept.strict") #0
   ret <4 x double> %res
 }
 
@@ -280,7 +280,7 @@ define <8 x float> @fnearbyintv8f32(<8 x float> %f) #0 {
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <8 x float> @llvm.experimental.constrained.nearbyint.v8f32(
                           <8 x float> %f,
-                          metadata !"round.dynamic", metadata !"fpexcept.strict")
+                          metadata !"round.dynamic", metadata !"fpexcept.strict") #0
   ret <8 x float> %res
 }
 
@@ -291,7 +291,7 @@ define <4 x double> @fnearbyintv4f64(<4 x double> %f) #0 {
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <4 x double> @llvm.experimental.constrained.nearbyint.v4f64(
                         <4 x double> %f,
-                        metadata !"round.dynamic", metadata !"fpexcept.strict")
+                        metadata !"round.dynamic", metadata !"fpexcept.strict") #0
   ret <4 x double> %res
 }
 

diff  --git a/llvm/test/CodeGen/X86/vec-strict-512.ll b/llvm/test/CodeGen/X86/vec-strict-512.ll
index b2a2c7efbdf6..2cafd74af495 100644
--- a/llvm/test/CodeGen/X86/vec-strict-512.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-512.ll
@@ -191,7 +191,7 @@ define <16 x float> @strict_vector_fceil_v16f32(<16 x float> %f) #0 {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vrndscaleps $10, %zmm0, %zmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
-  %res = call <16 x float> @llvm.experimental.constrained.ceil.v16f32(<16 x float> %f, metadata !"fpexcept.strict")
+  %res = call <16 x float> @llvm.experimental.constrained.ceil.v16f32(<16 x float> %f, metadata !"fpexcept.strict") #0
   ret <16 x float> %res
 }
 
@@ -200,7 +200,7 @@ define <8 x double> @strict_vector_fceil_v8f64(<8 x double> %f) #0 {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vrndscalepd $10, %zmm0, %zmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
-  %res = call <8 x double> @llvm.experimental.constrained.ceil.v8f64(<8 x double> %f, metadata !"fpexcept.strict")
+  %res = call <8 x double> @llvm.experimental.constrained.ceil.v8f64(<8 x double> %f, metadata !"fpexcept.strict") #0
   ret <8 x double> %res
 }
 
@@ -209,7 +209,7 @@ define <16 x float> @strict_vector_ffloor_v16f32(<16 x float> %f) #0 {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vrndscaleps $9, %zmm0, %zmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
-  %res = call <16 x float> @llvm.experimental.constrained.floor.v16f32(<16 x float> %f, metadata !"fpexcept.strict")
+  %res = call <16 x float> @llvm.experimental.constrained.floor.v16f32(<16 x float> %f, metadata !"fpexcept.strict") #0
   ret <16 x float> %res
 }
 
@@ -218,7 +218,7 @@ define <8 x double> @strict_vector_ffloor_v8f64(<8 x double> %f) #0 {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vrndscalepd $9, %zmm0, %zmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
-  %res = call <8 x double> @llvm.experimental.constrained.floor.v8f64(<8 x double> %f, metadata !"fpexcept.strict")
+  %res = call <8 x double> @llvm.experimental.constrained.floor.v8f64(<8 x double> %f, metadata !"fpexcept.strict") #0
   ret <8 x double> %res
 }
 
@@ -227,7 +227,7 @@ define <16 x float> @strict_vector_ftrunc_v16f32(<16 x float> %f) #0 {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vrndscaleps $11, %zmm0, %zmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
-  %res = call <16 x float> @llvm.experimental.constrained.trunc.v16f32(<16 x float> %f, metadata !"fpexcept.strict")
+  %res = call <16 x float> @llvm.experimental.constrained.trunc.v16f32(<16 x float> %f, metadata !"fpexcept.strict") #0
   ret <16 x float> %res
 }
 
@@ -236,7 +236,7 @@ define <8 x double> @strict_vector_ftrunc_v8f64(<8 x double> %f) #0 {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vrndscalepd $11, %zmm0, %zmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
-  %res = call <8 x double> @llvm.experimental.constrained.trunc.v8f64(<8 x double> %f, metadata !"fpexcept.strict")
+  %res = call <8 x double> @llvm.experimental.constrained.trunc.v8f64(<8 x double> %f, metadata !"fpexcept.strict") #0
   ret <8 x double> %res
 }
 
@@ -246,7 +246,7 @@ define <16 x float> @strict_vector_frint_v16f32(<16 x float> %f) #0 {
 ; CHECK-NEXT:    vrndscaleps $4, %zmm0, %zmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <16 x float> @llvm.experimental.constrained.rint.v16f32(<16 x float> %f,
-                             metadata !"round.dynamic", metadata !"fpexcept.strict")
+                             metadata !"round.dynamic", metadata !"fpexcept.strict") #0
   ret <16 x float> %res
 }
 
@@ -256,7 +256,7 @@ define <8 x double> @strict_vector_frint_v8f64(<8 x double> %f) #0 {
 ; CHECK-NEXT:    vrndscalepd $4, %zmm0, %zmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <8 x double> @llvm.experimental.constrained.rint.v8f64(<8 x double> %f,
-                            metadata !"round.dynamic", metadata !"fpexcept.strict")
+                            metadata !"round.dynamic", metadata !"fpexcept.strict") #0
   ret <8 x double> %res
 }
 
@@ -266,7 +266,7 @@ define <16 x float> @strict_vector_fnearbyint_v16f32(<16 x float> %f) #0 {
 ; CHECK-NEXT:    vrndscaleps $12, %zmm0, %zmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <16 x float> @llvm.experimental.constrained.nearbyint.v16f32(<16 x float> %f,
-                             metadata !"round.dynamic", metadata !"fpexcept.strict")
+                             metadata !"round.dynamic", metadata !"fpexcept.strict") #0
   ret <16 x float> %res
 }
 
@@ -276,7 +276,7 @@ define <8 x double> @strict_vector_fnearbyint_v8f64(<8 x double> %f) #0 {
 ; CHECK-NEXT:    vrndscalepd $12, %zmm0, %zmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <8 x double> @llvm.experimental.constrained.nearbyint.v8f64(<8 x double> %f,
-                             metadata !"round.dynamic", metadata !"fpexcept.strict")
+                             metadata !"round.dynamic", metadata !"fpexcept.strict") #0
   ret <8 x double> %res
 }
 

diff  --git a/llvm/test/CodeGen/X86/vec-strict-fptoint-128.ll b/llvm/test/CodeGen/X86/vec-strict-fptoint-128.ll
index b3a91e657c42..af9663d7798f 100644
--- a/llvm/test/CodeGen/X86/vec-strict-fptoint-128.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-fptoint-128.ll
@@ -208,7 +208,7 @@ define <2 x i64> @strict_vector_fptosi_v2f64_to_v2i64(<2 x double> %a) #0 {
 ; AVX512VLDQ-NEXT:    vcvttpd2qq %xmm0, %xmm0
 ; AVX512VLDQ-NEXT:    ret{{[l|q]}}
   %ret = call <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f64(<2 x double> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <2 x i64> %ret
 }
 
@@ -526,7 +526,7 @@ define <2 x i64> @strict_vector_fptoui_v2f64_to_v2i64(<2 x double> %a) #0 {
 ; AVX512VLDQ-NEXT:    vcvttpd2uqq %xmm0, %xmm0
 ; AVX512VLDQ-NEXT:    ret{{[l|q]}}
   %ret = call <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f64(<2 x double> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <2 x i64> %ret
 }
 
@@ -711,7 +711,7 @@ define <2 x i64> @strict_vector_fptosi_v2f32_to_v2i64(<2 x float> %a) #0 {
 ; AVX512VLDQ-NEXT:    vcvttps2qq %xmm0, %xmm0
 ; AVX512VLDQ-NEXT:    ret{{[l|q]}}
   %ret = call <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f32(<2 x float> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <2 x i64> %ret
 }
 
@@ -1042,7 +1042,7 @@ define <2 x i64> @strict_vector_fptoui_v2f32_to_v2i64(<2 x float> %a) #0 {
 ; AVX512VLDQ-NEXT:    vcvttps2uqq %xmm0, %xmm0
 ; AVX512VLDQ-NEXT:    ret{{[l|q]}}
   %ret = call <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f32(<2 x float> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <2 x i64> %ret
 }
 
@@ -1082,7 +1082,7 @@ define <2 x i32> @strict_vector_fptosi_v2f64_to_v2i32(<2 x double> %a) #0 {
 ; AVX512VLDQ-NEXT:    vcvttpd2dq %xmm0, %xmm0
 ; AVX512VLDQ-NEXT:    ret{{[l|q]}}
   %ret = call <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f64(<2 x double> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <2 x i32> %ret
 }
 
@@ -1191,7 +1191,7 @@ define <2 x i32> @strict_vector_fptoui_v2f64_to_v2i32(<2 x double> %a) #0 {
 ; AVX512VLDQ-NEXT:    vcvttpd2udq %xmm0, %xmm0
 ; AVX512VLDQ-NEXT:    ret{{[l|q]}}
   %ret = call <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f64(<2 x double> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <2 x i32> %ret
 }
 
@@ -1238,7 +1238,7 @@ define <2 x i32> @strict_vector_fptosi_v2f32_to_v2i32(<2 x float> %a) #0 {
 ; AVX512VLDQ-NEXT:    vcvttps2dq %xmm0, %xmm0
 ; AVX512VLDQ-NEXT:    ret{{[l|q]}}
   %ret = call <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f32(<2 x float> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <2 x i32> %ret
 }
 
@@ -1349,7 +1349,7 @@ define <2 x i32> @strict_vector_fptoui_v2f32_to_v2i32(<2 x float> %a) #0 {
 ; AVX512VLDQ-NEXT:    vcvttps2udq %xmm0, %xmm0
 ; AVX512VLDQ-NEXT:    ret{{[l|q]}}
   %ret = call <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f32(<2 x float> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <2 x i32> %ret
 }
 
@@ -1396,7 +1396,7 @@ define <2 x i16> @strict_vector_fptosi_v2f64_to_v2i16(<2 x double> %a) #0 {
 ; AVX512VLDQ-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
 ; AVX512VLDQ-NEXT:    ret{{[l|q]}}
   %ret = call <2 x i16> @llvm.experimental.constrained.fptosi.v2i16.v2f64(<2 x double> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <2 x i16> %ret
 }
 
@@ -1443,7 +1443,7 @@ define <2 x i16> @strict_vector_fptoui_v2f64_to_v2i16(<2 x double> %a) #0 {
 ; AVX512VLDQ-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
 ; AVX512VLDQ-NEXT:    ret{{[l|q]}}
   %ret = call <2 x i16> @llvm.experimental.constrained.fptoui.v2i16.v2f64(<2 x double> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <2 x i16> %ret
 }
 
@@ -1497,7 +1497,7 @@ define <2 x i16> @strict_vector_fptosi_v2f32_to_v2i16(<2 x float> %a) #0 {
 ; AVX512VLDQ-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
 ; AVX512VLDQ-NEXT:    ret{{[l|q]}}
   %ret = call <2 x i16> @llvm.experimental.constrained.fptosi.v2i16.v2f32(<2 x float> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <2 x i16> %ret
 }
 
@@ -1551,7 +1551,7 @@ define <2 x i16> @strict_vector_fptoui_v2f32_to_v2i16(<2 x float> %a) #0 {
 ; AVX512VLDQ-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
 ; AVX512VLDQ-NEXT:    ret{{[l|q]}}
   %ret = call <2 x i16> @llvm.experimental.constrained.fptoui.v2i16.v2f32(<2 x float> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <2 x i16> %ret
 }
 
@@ -1602,7 +1602,7 @@ define <2 x i8> @strict_vector_fptosi_v2f64_to_v2i8(<2 x double> %a) #0 {
 ; AVX512VLDQ-NEXT:    vpmovdb %xmm0, %xmm0
 ; AVX512VLDQ-NEXT:    ret{{[l|q]}}
   %ret = call <2 x i8> @llvm.experimental.constrained.fptosi.v2i8.v2f64(<2 x double> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <2 x i8> %ret
 }
 
@@ -1653,7 +1653,7 @@ define <2 x i8> @strict_vector_fptoui_v2f64_to_v2i8(<2 x double> %a) #0 {
 ; AVX512VLDQ-NEXT:    vpmovdb %xmm0, %xmm0
 ; AVX512VLDQ-NEXT:    ret{{[l|q]}}
   %ret = call <2 x i8> @llvm.experimental.constrained.fptoui.v2i8.v2f64(<2 x double> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <2 x i8> %ret
 }
 
@@ -1711,7 +1711,7 @@ define <2 x i8> @strict_vector_fptosi_v2f32_to_v2i8(<2 x float> %a) #0 {
 ; AVX512VLDQ-NEXT:    vpmovdb %xmm0, %xmm0
 ; AVX512VLDQ-NEXT:    ret{{[l|q]}}
   %ret = call <2 x i8> @llvm.experimental.constrained.fptosi.v2i8.v2f32(<2 x float> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <2 x i8> %ret
 }
 
@@ -1769,7 +1769,7 @@ define <2 x i8> @strict_vector_fptoui_v2f32_to_v2i8(<2 x float> %a) #0 {
 ; AVX512VLDQ-NEXT:    vpmovdb %xmm0, %xmm0
 ; AVX512VLDQ-NEXT:    ret{{[l|q]}}
   %ret = call <2 x i8> @llvm.experimental.constrained.fptoui.v2i8.v2f32(<2 x float> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <2 x i8> %ret
 }
 
@@ -1894,7 +1894,7 @@ define <2 x i1> @strict_vector_fptosi_v2f64_to_v2i1(<2 x double> %a) #0 {
 ; AVX512VLDQ-NEXT:    vpmovm2q %k0, %xmm0
 ; AVX512VLDQ-NEXT:    ret{{[l|q]}}
   %ret = call <2 x i1> @llvm.experimental.constrained.fptosi.v2i1.v2f64(<2 x double> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <2 x i1> %ret
 }
 
@@ -2122,7 +2122,7 @@ define <2 x i1> @strict_vector_fptoui_v2f64_to_v2i1(<2 x double> %a) #0 {
 ; AVX512VLDQ-NEXT:    vpmovm2q %k0, %xmm0
 ; AVX512VLDQ-NEXT:    ret{{[l|q]}}
   %ret = call <2 x i1> @llvm.experimental.constrained.fptoui.v2i1.v2f64(<2 x double> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <2 x i1> %ret
 }
 
@@ -2270,7 +2270,7 @@ define <2 x i1> @strict_vector_fptosi_v2f32_to_v2i1(<2 x float> %a) #0 {
 ; AVX512VLDQ-NEXT:    vpmovm2q %k0, %xmm0
 ; AVX512VLDQ-NEXT:    ret{{[l|q]}}
   %ret = call <2 x i1> @llvm.experimental.constrained.fptosi.v2i1.v2f32(<2 x float> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <2 x i1> %ret
 }
 
@@ -2518,7 +2518,7 @@ define <2 x i1> @strict_vector_fptoui_v2f32_to_v2i1(<2 x float> %a) #0 {
 ; AVX512VLDQ-NEXT:    vpmovm2q %k0, %xmm0
 ; AVX512VLDQ-NEXT:    ret{{[l|q]}}
   %ret = call <2 x i1> @llvm.experimental.constrained.fptoui.v2i1.v2f32(<2 x float> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <2 x i1> %ret
 }
 
@@ -2558,7 +2558,7 @@ define <4 x i32> @strict_vector_fptosi_v4f32_to_v4i32(<4 x float> %a) #0 {
 ; AVX512VLDQ-NEXT:    vcvttps2dq %xmm0, %xmm0
 ; AVX512VLDQ-NEXT:    ret{{[l|q]}}
   %ret = call <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f32(<4 x float> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <4 x i32> %ret
 }
 
@@ -2630,7 +2630,7 @@ define <4 x i32> @strict_vector_fptoui_v4f32_to_v4i32(<4 x float> %a) #0 {
 ; AVX512VLDQ-NEXT:    vcvttps2udq %xmm0, %xmm0
 ; AVX512VLDQ-NEXT:    ret{{[l|q]}}
   %ret = call <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f32(<4 x float> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <4 x i32> %ret
 }
 
@@ -2682,7 +2682,7 @@ define <4 x i8> @strict_vector_fptosi_v4f32_to_v4i8(<4 x float> %a) #0 {
 ; AVX512VLDQ-NEXT:    vpmovdb %xmm0, %xmm0
 ; AVX512VLDQ-NEXT:    ret{{[l|q]}}
   %ret = call <4 x i8> @llvm.experimental.constrained.fptosi.v4i8.v4f32(<4 x float> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <4 x i8> %ret
 }
 
@@ -2734,7 +2734,7 @@ define <4 x i8> @strict_vector_fptoui_v4f32_to_v4i8(<4 x float> %a) #0 {
 ; AVX512VLDQ-NEXT:    vpmovdb %xmm0, %xmm0
 ; AVX512VLDQ-NEXT:    ret{{[l|q]}}
   %ret = call <4 x i8> @llvm.experimental.constrained.fptoui.v4i8.v4f32(<4 x float> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <4 x i8> %ret
 }
 
@@ -2787,7 +2787,7 @@ define <4 x i1> @strict_vector_fptosi_v4f32_to_v4i1(<4 x float> %a) #0 {
 ; AVX512VLDQ-NEXT:    vpmovm2d %k0, %xmm0
 ; AVX512VLDQ-NEXT:    ret{{[l|q]}}
   %ret = call <4 x i1> @llvm.experimental.constrained.fptosi.v4i1.v4f32(<4 x float> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <4 x i1> %ret
 }
 
@@ -2844,7 +2844,7 @@ define <4 x i1> @strict_vector_fptoui_v4f32_to_v4i1(<4 x float> %a) #0 {
 ; AVX512VLDQ-NEXT:    vpmovm2d %k0, %xmm0
 ; AVX512VLDQ-NEXT:    ret{{[l|q]}}
   %ret = call <4 x i1> @llvm.experimental.constrained.fptoui.v4i1.v4f32(<4 x float> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <4 x i1> %ret
 }
 

diff  --git a/llvm/test/CodeGen/X86/vec-strict-fptoint-256.ll b/llvm/test/CodeGen/X86/vec-strict-fptoint-256.ll
index 00aee49f64cb..52313174fed7 100644
--- a/llvm/test/CodeGen/X86/vec-strict-fptoint-256.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-fptoint-256.ll
@@ -212,7 +212,7 @@ define <4 x i64> @strict_vector_fptosi_v4f64_to_v4i64(<4 x double> %a) #0 {
 ; AVX512DQVL-NEXT:    vcvttpd2qq %ymm0, %ymm0
 ; AVX512DQVL-NEXT:    ret{{[l|q]}}
   %ret = call <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f64(<4 x double> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <4 x i64> %ret
 }
 
@@ -595,7 +595,7 @@ define <4 x i64> @strict_vector_fptoui_v4f64_to_v4i64(<4 x double> %a) #0 {
 ; AVX512DQVL-NEXT:    vcvttpd2uqq %ymm0, %ymm0
 ; AVX512DQVL-NEXT:    ret{{[l|q]}}
   %ret = call <4 x i64> @llvm.experimental.constrained.fptoui.v4i64.v4f64(<4 x double> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <4 x i64> %ret
 }
 
@@ -774,7 +774,7 @@ define <4 x i64> @strict_vector_fptosi_v4f32_to_v4i64(<4 x float> %a) #0 {
 ; AVX512DQVL-NEXT:    vcvttps2qq %xmm0, %ymm0
 ; AVX512DQVL-NEXT:    ret{{[l|q]}}
   %ret = call <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f32(<4 x float> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <4 x i64> %ret
 }
 
@@ -1157,7 +1157,7 @@ define <4 x i64> @strict_vector_fptoui_v4f32_to_v4i64(<4 x float> %a) #0 {
 ; AVX512DQVL-NEXT:    vcvttps2uqq %xmm0, %ymm0
 ; AVX512DQVL-NEXT:    ret{{[l|q]}}
   %ret = call <4 x i64> @llvm.experimental.constrained.fptoui.v4i64.v4f32(<4 x float> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <4 x i64> %ret
 }
 
@@ -1168,7 +1168,7 @@ define <4 x i32> @strict_vector_fptosi_v4f64_to_v4i32(<4 x double> %a) #0 {
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    ret{{[l|q]}}
   %ret = call <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f64(<4 x double> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <4 x i32> %ret
 }
 
@@ -1218,7 +1218,7 @@ define <4 x i32> @strict_vector_fptoui_v4f64_to_v4i32(<4 x double> %a) #0 {
 ; AVX512DQVL-NEXT:    vzeroupper
 ; AVX512DQVL-NEXT:    ret{{[l|q]}}
   %ret = call <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f64(<4 x double> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <4 x i32> %ret
 }
 
@@ -1230,7 +1230,7 @@ define <4 x i16> @strict_vector_fptosi_v4f64_to_v4i16(<4 x double> %a) #0 {
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    ret{{[l|q]}}
   %ret = call <4 x i16> @llvm.experimental.constrained.fptosi.v4i16.v4f64(<4 x double> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <4 x i16> %ret
 }
 
@@ -1242,7 +1242,7 @@ define <4 x i16> @strict_vector_fptoui_v4f64_to_v4i16(<4 x double> %a) #0 {
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    ret{{[l|q]}}
   %ret = call <4 x i16> @llvm.experimental.constrained.fptoui.v4i16.v4f64(<4 x double> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <4 x i16> %ret
 }
 
@@ -1285,7 +1285,7 @@ define <4 x i8> @strict_vector_fptosi_v4f64_to_v4i8(<4 x double> %a) #0 {
 ; AVX512DQVL-NEXT:    vzeroupper
 ; AVX512DQVL-NEXT:    ret{{[l|q]}}
   %ret = call <4 x i8> @llvm.experimental.constrained.fptosi.v4i8.v4f64(<4 x double> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <4 x i8> %ret
 }
 
@@ -1328,7 +1328,7 @@ define <4 x i8> @strict_vector_fptoui_v4f64_to_v4i8(<4 x double> %a) #0 {
 ; AVX512DQVL-NEXT:    vzeroupper
 ; AVX512DQVL-NEXT:    ret{{[l|q]}}
   %ret = call <4 x i8> @llvm.experimental.constrained.fptoui.v4i8.v4f64(<4 x double> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <4 x i8> %ret
 }
 
@@ -1374,7 +1374,7 @@ define <4 x i1> @strict_vector_fptosi_v4f64_to_v4i1(<4 x double> %a) #0 {
 ; AVX512DQVL-NEXT:    vzeroupper
 ; AVX512DQVL-NEXT:    ret{{[l|q]}}
   %ret = call <4 x i1> @llvm.experimental.constrained.fptosi.v4i1.v4f64(<4 x double> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <4 x i1> %ret
 }
 
@@ -1424,7 +1424,7 @@ define <4 x i1> @strict_vector_fptoui_v4f64_to_v4i1(<4 x double> %a) #0 {
 ; AVX512DQVL-NEXT:    vzeroupper
 ; AVX512DQVL-NEXT:    ret{{[l|q]}}
   %ret = call <4 x i1> @llvm.experimental.constrained.fptoui.v4i1.v4f64(<4 x double> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <4 x i1> %ret
 }
 
@@ -1434,7 +1434,7 @@ define <8 x i32> @strict_vector_fptosi_v8f32_to_v8i32(<8 x float> %a) #0 {
 ; CHECK-NEXT:    vcvttps2dq %ymm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
   %ret = call <8 x i32> @llvm.experimental.constrained.fptosi.v8i32.v8f32(<8 x float> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <8 x i32> %ret
 }
 
@@ -1476,7 +1476,7 @@ define <8 x i32> @strict_vector_fptoui_v8f32_to_v8i32(<8 x float> %a) #0 {
 ; AVX512DQVL-NEXT:    vcvttps2udq %ymm0, %ymm0
 ; AVX512DQVL-NEXT:    ret{{[l|q]}}
   %ret = call <8 x i32> @llvm.experimental.constrained.fptoui.v8i32.v8f32(<8 x float> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <8 x i32> %ret
 }
 
@@ -1519,7 +1519,7 @@ define <8 x i16> @strict_vector_fptosi_v8f32_to_v8i16(<8 x float> %a) #0 {
 ; AVX512DQVL-NEXT:    vzeroupper
 ; AVX512DQVL-NEXT:    ret{{[l|q]}}
   %ret = call <8 x i16> @llvm.experimental.constrained.fptosi.v8i16.v8f32(<8 x float> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <8 x i16> %ret
 }
 
@@ -1562,7 +1562,7 @@ define <8 x i16> @strict_vector_fptoui_v8f32_to_v8i16(<8 x float> %a) #0 {
 ; AVX512DQVL-NEXT:    vzeroupper
 ; AVX512DQVL-NEXT:    ret{{[l|q]}}
   %ret = call <8 x i16> @llvm.experimental.constrained.fptoui.v8i16.v8f32(<8 x float> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <8 x i16> %ret
 }
 
@@ -1604,7 +1604,7 @@ define <8 x i8> @strict_vector_fptosi_v8f32_to_v8i8(<8 x float> %a) #0 {
 ; AVX512DQVL-NEXT:    vzeroupper
 ; AVX512DQVL-NEXT:    ret{{[l|q]}}
   %ret = call <8 x i8> @llvm.experimental.constrained.fptosi.v8i8.v8f32(<8 x float> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <8 x i8> %ret
 }
 
@@ -1646,7 +1646,7 @@ define <8 x i8> @strict_vector_fptoui_v8f32_to_v8i8(<8 x float> %a) #0 {
 ; AVX512DQVL-NEXT:    vzeroupper
 ; AVX512DQVL-NEXT:    ret{{[l|q]}}
   %ret = call <8 x i8> @llvm.experimental.constrained.fptoui.v8i8.v8f32(<8 x float> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <8 x i8> %ret
 }
 
@@ -1698,7 +1698,7 @@ define <8 x i1> @strict_vector_fptosi_v8f32_to_v8i1(<8 x float> %a) #0 {
 ; AVX512DQVL-NEXT:    vzeroupper
 ; AVX512DQVL-NEXT:    ret{{[l|q]}}
   %ret = call <8 x i1> @llvm.experimental.constrained.fptosi.v8i1.v8f32(<8 x float> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <8 x i1> %ret
 }
 
@@ -1754,7 +1754,7 @@ define <8 x i1> @strict_vector_fptoui_v8f32_to_v8i1(<8 x float> %a) #0 {
 ; AVX512DQVL-NEXT:    vzeroupper
 ; AVX512DQVL-NEXT:    ret{{[l|q]}}
   %ret = call <8 x i1> @llvm.experimental.constrained.fptoui.v8i1.v8f32(<8 x float> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <8 x i1> %ret
 }
 

diff  --git a/llvm/test/CodeGen/X86/vec-strict-fptoint-512.ll b/llvm/test/CodeGen/X86/vec-strict-fptoint-512.ll
index 06464ea1cb81..52ba8deec1c4 100644
--- a/llvm/test/CodeGen/X86/vec-strict-fptoint-512.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-fptoint-512.ll
@@ -129,7 +129,7 @@ define <8 x i64> @strict_vector_fptosi_v8f64_to_v8i64(<8 x double> %a) #0 {
 ; AVX512DQ-NEXT:    vcvttpd2qq %zmm0, %zmm0
 ; AVX512DQ-NEXT:    ret{{[l|q]}}
   %ret = call <8 x i64> @llvm.experimental.constrained.fptosi.v8i64.v8f64(<8 x double> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <8 x i64> %ret
 }
 
@@ -340,7 +340,7 @@ define <8 x i64> @strict_vector_fptoui_v8f64_to_v8i64(<8 x double> %a) #0 {
 ; AVX512DQ-NEXT:    vcvttpd2uqq %zmm0, %zmm0
 ; AVX512DQ-NEXT:    ret{{[l|q]}}
   %ret = call <8 x i64> @llvm.experimental.constrained.fptoui.v8i64.v8f64(<8 x double> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <8 x i64> %ret
 }
 
@@ -443,7 +443,7 @@ define <8 x i64> @strict_vector_fptosi_v8f32_to_v8i64(<8 x float> %a) #0 {
 ; AVX512DQ-NEXT:    vcvttps2qq %ymm0, %zmm0
 ; AVX512DQ-NEXT:    ret{{[l|q]}}
   %ret = call <8 x i64> @llvm.experimental.constrained.fptosi.v8i64.v8f32(<8 x float> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <8 x i64> %ret
 }
 
@@ -654,7 +654,7 @@ define <8 x i64> @strict_vector_fptoui_v8f32_to_v8i64(<8 x float> %a) #0 {
 ; AVX512DQ-NEXT:    vcvttps2uqq %ymm0, %zmm0
 ; AVX512DQ-NEXT:    ret{{[l|q]}}
   %ret = call <8 x i64> @llvm.experimental.constrained.fptoui.v8i64.v8f32(<8 x float> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <8 x i64> %ret
 }
 
@@ -664,7 +664,7 @@ define <8 x i32> @strict_vector_fptosi_v8f64_to_v8i32(<8 x double> %a) #0 {
 ; CHECK-NEXT:    vcvttpd2dq %zmm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
   %ret = call <8 x i32> @llvm.experimental.constrained.fptosi.v8i32.v8f64(<8 x double> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <8 x i32> %ret
 }
 
@@ -674,7 +674,7 @@ define <8 x i32> @strict_vector_fptoui_v8f64_to_v8i32(<8 x double> %a) #0 {
 ; CHECK-NEXT:    vcvttpd2udq %zmm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
   %ret = call <8 x i32> @llvm.experimental.constrained.fptoui.v8i32.v8f64(<8 x double> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <8 x i32> %ret
 }
 
@@ -694,7 +694,7 @@ define <8 x i16> @strict_vector_fptosi_v8f64_to_v8i16(<8 x double> %a) #0 {
 ; AVX512DQ-NEXT:    vzeroupper
 ; AVX512DQ-NEXT:    ret{{[l|q]}}
   %ret = call <8 x i16> @llvm.experimental.constrained.fptosi.v8i16.v8f64(<8 x double> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <8 x i16> %ret
 }
 
@@ -714,7 +714,7 @@ define <8 x i16> @strict_vector_fptoui_v8f64_to_v8i16(<8 x double> %a) #0 {
 ; AVX512DQ-NEXT:    vzeroupper
 ; AVX512DQ-NEXT:    ret{{[l|q]}}
   %ret = call <8 x i16> @llvm.experimental.constrained.fptoui.v8i16.v8f64(<8 x double> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <8 x i16> %ret
 }
 
@@ -733,7 +733,7 @@ define <8 x i8> @strict_vector_fptosi_v8f64_to_v8i8(<8 x double> %a) #0 {
 ; AVX512DQ-NEXT:    vzeroupper
 ; AVX512DQ-NEXT:    ret{{[l|q]}}
   %ret = call <8 x i8> @llvm.experimental.constrained.fptosi.v8i8.v8f64(<8 x double> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <8 x i8> %ret
 }
 
@@ -752,7 +752,7 @@ define <8 x i8> @strict_vector_fptoui_v8f64_to_v8i8(<8 x double> %a) #0 {
 ; AVX512DQ-NEXT:    vzeroupper
 ; AVX512DQ-NEXT:    ret{{[l|q]}}
   %ret = call <8 x i8> @llvm.experimental.constrained.fptoui.v8i8.v8f64(<8 x double> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <8 x i8> %ret
 }
 
@@ -777,7 +777,7 @@ define <8 x i1> @strict_vector_fptosi_v8f64_to_v8i1(<8 x double> %a) #0 {
 ; AVX512DQ-NEXT:    vzeroupper
 ; AVX512DQ-NEXT:    ret{{[l|q]}}
   %ret = call <8 x i1> @llvm.experimental.constrained.fptosi.v8i1.v8f64(<8 x double> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <8 x i1> %ret
 }
 
@@ -804,7 +804,7 @@ define <8 x i1> @strict_vector_fptoui_v8f64_to_v8i1(<8 x double> %a) #0 {
 ; AVX512DQ-NEXT:    vzeroupper
 ; AVX512DQ-NEXT:    ret{{[l|q]}}
   %ret = call <8 x i1> @llvm.experimental.constrained.fptoui.v8i1.v8f64(<8 x double> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <8 x i1> %ret
 }
 
@@ -814,7 +814,7 @@ define <16 x i32> @strict_vector_fptosi_v16f32_to_v16i32(<16 x float> %a) #0 {
 ; CHECK-NEXT:    vcvttps2dq %zmm0, %zmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
   %ret = call <16 x i32> @llvm.experimental.constrained.fptosi.v16i32.v16f32(<16 x float> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <16 x i32> %ret
 }
 
@@ -824,7 +824,7 @@ define <16 x i32> @strict_vector_fptoui_v16f32_to_v16i32(<16 x float> %a) #0 {
 ; CHECK-NEXT:    vcvttps2udq %zmm0, %zmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
   %ret = call <16 x i32> @llvm.experimental.constrained.fptoui.v16i32.v16f32(<16 x float> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <16 x i32> %ret
 }
 
@@ -835,7 +835,7 @@ define <16 x i16> @strict_vector_fptosi_v16f32_to_v16i16(<16 x float> %a) #0 {
 ; CHECK-NEXT:    vpmovdw %zmm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
   %ret = call <16 x i16> @llvm.experimental.constrained.fptosi.v16i16.v16f32(<16 x float> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <16 x i16> %ret
 }
 
@@ -846,7 +846,7 @@ define <16 x i16> @strict_vector_fptoui_v16f32_to_v16i16(<16 x float> %a) #0 {
 ; CHECK-NEXT:    vpmovdw %zmm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
   %ret = call <16 x i16> @llvm.experimental.constrained.fptoui.v16i16.v16f32(<16 x float> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <16 x i16> %ret
 }
 
@@ -858,7 +858,7 @@ define <16 x i8> @strict_vector_fptosi_v16f32_to_v16i8(<16 x float> %a) #0 {
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    ret{{[l|q]}}
   %ret = call <16 x i8> @llvm.experimental.constrained.fptosi.v16i8.v16f32(<16 x float> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <16 x i8> %ret
 }
 
@@ -870,7 +870,7 @@ define <16 x i8> @strict_vector_fptoui_v16f32_to_v16i8(<16 x float> %a) #0 {
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    ret{{[l|q]}}
   %ret = call <16 x i8> @llvm.experimental.constrained.fptoui.v16i8.v16f32(<16 x float> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <16 x i8> %ret
 }
 
@@ -893,7 +893,7 @@ define <16 x i1> @strict_vector_fptosi_v16f32_to_v16i1(<16 x float> %a) #0 {
 ; AVX512DQ-NEXT:    vzeroupper
 ; AVX512DQ-NEXT:    ret{{[l|q]}}
   %ret = call <16 x i1> @llvm.experimental.constrained.fptosi.v16i1.v16f32(<16 x float> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <16 x i1> %ret
 }
 
@@ -918,7 +918,7 @@ define <16 x i1> @strict_vector_fptoui_v16f32_to_v16i1(<16 x float> %a) #0 {
 ; AVX512DQ-NEXT:    vzeroupper
 ; AVX512DQ-NEXT:    ret{{[l|q]}}
   %ret = call <16 x i1> @llvm.experimental.constrained.fptoui.v16i1.v16f32(<16 x float> %a,
-                                              metadata !"fpexcept.strict")
+                                              metadata !"fpexcept.strict") #0
   ret <16 x i1> %ret
 }
 

diff  --git a/llvm/test/CodeGen/X86/vec-strict-round-128.ll b/llvm/test/CodeGen/X86/vec-strict-round-128.ll
index 3f8b4f84a136..1f7507cc02bc 100644
--- a/llvm/test/CodeGen/X86/vec-strict-round-128.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-round-128.ll
@@ -28,7 +28,7 @@ define <4 x float> @fceilv4f32(<4 x float> %f) #0 {
 ; AVX-NEXT:    vroundps $10, %xmm0, %xmm0
 ; AVX-NEXT:    ret{{[l|q]}}
   %res = call <4 x float> @llvm.experimental.constrained.ceil.v4f32(
-                          <4 x float> %f, metadata !"fpexcept.strict")
+                          <4 x float> %f, metadata !"fpexcept.strict") #0
   ret <4 x float> %res
 }
 
@@ -43,7 +43,7 @@ define <2 x double> @fceilv2f64(<2 x double> %f) #0 {
 ; AVX-NEXT:    vroundpd $10, %xmm0, %xmm0
 ; AVX-NEXT:    ret{{[l|q]}}
   %res = call <2 x double> @llvm.experimental.constrained.ceil.v2f64(
-                        <2 x double> %f, metadata !"fpexcept.strict")
+                        <2 x double> %f, metadata !"fpexcept.strict") #0
   ret <2 x double> %res
 }
 
@@ -58,7 +58,7 @@ define <4 x float> @ffloorv4f32(<4 x float> %f) #0 {
 ; AVX-NEXT:    vroundps $9, %xmm0, %xmm0
 ; AVX-NEXT:    ret{{[l|q]}}
   %res = call <4 x float> @llvm.experimental.constrained.floor.v4f32(
-                          <4 x float> %f, metadata !"fpexcept.strict")
+                          <4 x float> %f, metadata !"fpexcept.strict") #0
   ret <4 x float> %res
 }
 
@@ -73,7 +73,7 @@ define <2 x double> @ffloorv2f64(<2 x double> %f) #0 {
 ; AVX-NEXT:    vroundpd $9, %xmm0, %xmm0
 ; AVX-NEXT:    ret{{[l|q]}}
   %res = call <2 x double> @llvm.experimental.constrained.floor.v2f64(
-                        <2 x double> %f, metadata !"fpexcept.strict")
+                        <2 x double> %f, metadata !"fpexcept.strict") #0
   ret <2 x double> %res
 }
 
@@ -88,7 +88,7 @@ define <4 x float> @ftruncv4f32(<4 x float> %f) #0 {
 ; AVX-NEXT:    vroundps $11, %xmm0, %xmm0
 ; AVX-NEXT:    ret{{[l|q]}}
   %res = call <4 x float> @llvm.experimental.constrained.trunc.v4f32(
-                          <4 x float> %f, metadata !"fpexcept.strict")
+                          <4 x float> %f, metadata !"fpexcept.strict") #0
   ret <4 x float> %res
 }
 
@@ -103,7 +103,7 @@ define <2 x double> @ftruncv2f64(<2 x double> %f) #0 {
 ; AVX-NEXT:    vroundpd $11, %xmm0, %xmm0
 ; AVX-NEXT:    ret{{[l|q]}}
   %res = call <2 x double> @llvm.experimental.constrained.trunc.v2f64(
-                        <2 x double> %f, metadata !"fpexcept.strict")
+                        <2 x double> %f, metadata !"fpexcept.strict") #0
   ret <2 x double> %res
 }
 
@@ -119,7 +119,7 @@ define <4 x float> @frintv4f32(<4 x float> %f) #0 {
 ; AVX-NEXT:    ret{{[l|q]}}
   %res = call <4 x float> @llvm.experimental.constrained.rint.v4f32(
                           <4 x float> %f,
-                          metadata !"round.dynamic", metadata !"fpexcept.strict")
+                          metadata !"round.dynamic", metadata !"fpexcept.strict") #0
   ret <4 x float> %res
 }
 
@@ -135,7 +135,7 @@ define <2 x double> @frintv2f64(<2 x double> %f) #0 {
 ; AVX-NEXT:    ret{{[l|q]}}
   %res = call <2 x double> @llvm.experimental.constrained.rint.v2f64(
                         <2 x double> %f,
-                        metadata !"round.dynamic", metadata !"fpexcept.strict")
+                        metadata !"round.dynamic", metadata !"fpexcept.strict") #0
   ret <2 x double> %res
 }
 
@@ -151,7 +151,7 @@ define <4 x float> @fnearbyintv4f32(<4 x float> %f) #0 {
 ; AVX-NEXT:    ret{{[l|q]}}
   %res = call <4 x float> @llvm.experimental.constrained.nearbyint.v4f32(
                           <4 x float> %f,
-                          metadata !"round.dynamic", metadata !"fpexcept.strict")
+                          metadata !"round.dynamic", metadata !"fpexcept.strict") #0
   ret <4 x float> %res
 }
 
@@ -167,7 +167,7 @@ define <2 x double> @fnearbyintv2f64(<2 x double> %f) #0 {
 ; AVX-NEXT:    ret{{[l|q]}}
   %res = call <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(
                         <2 x double> %f,
-                        metadata !"round.dynamic", metadata !"fpexcept.strict")
+                        metadata !"round.dynamic", metadata !"fpexcept.strict") #0
   ret <2 x double> %res
 }
 

diff  --git a/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics-flags.ll b/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics-flags.ll
index b1ef02c855a1..4dbb6d888feb 100644
--- a/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics-flags.ll
+++ b/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics-flags.ll
@@ -54,6 +54,8 @@ entry:
   ret <4 x double> %add
 }
 
+attributes #0 = { strictfp }
+
 declare <1 x float> @llvm.experimental.constrained.fadd.v1f32(<1 x float>, <1 x float>, metadata, metadata)
 declare <3 x float> @llvm.experimental.constrained.fadd.v3f32(<3 x float>, <3 x float>, metadata, metadata)
 declare <4 x double> @llvm.experimental.constrained.fadd.v4f64(<4 x double>, <4 x double>, metadata, metadata)

diff  --git a/llvm/test/CodeGen/X86/vector-half-conversions.ll b/llvm/test/CodeGen/X86/vector-half-conversions.ll
index f1436324c2ca..85d5f580974c 100644
--- a/llvm/test/CodeGen/X86/vector-half-conversions.ll
+++ b/llvm/test/CodeGen/X86/vector-half-conversions.ll
@@ -222,7 +222,7 @@ define <4 x float> @load_cvt_4i16_to_4f32_constrained(<4 x i16>* %a0) nounwind s
   ret <4 x float> %3
 }
 
-define <4 x float> @load_cvt_8i16_to_4f32_constrained(<8 x i16>* %a0) nounwind {
+define <4 x float> @load_cvt_8i16_to_4f32_constrained(<8 x i16>* %a0) nounwind strictfp {
 ; ALL-LABEL: load_cvt_8i16_to_4f32_constrained:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    vcvtph2ps (%rdi), %xmm0


        


More information about the llvm-commits mailing list