[llvm] f57fb82 - [FPEnv][AArch64] Correct strictfp tests.

Kevin P. Neal via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 26 06:14:39 PDT 2023


Author: Kevin P. Neal
Date: 2023-07-26T09:14:25-04:00
New Revision: f57fb82e0f90b4c667a0d044c14d9deaa22d3a99

URL: https://github.com/llvm/llvm-project/commit/f57fb82e0f90b4c667a0d044c14d9deaa22d3a99
DIFF: https://github.com/llvm/llvm-project/commit/f57fb82e0f90b4c667a0d044c14d9deaa22d3a99.diff

LOG: [FPEnv][AArch64] Correct strictfp tests.

Correct AArch64 strictfp tests to follow the rules documented in the LangRef:
https://llvm.org/docs/LangRef.html#constrained-floating-point-intrinsics

Mostly these tests just needed the strictfp attribute on function
definitions.  I've also removed the strictfp attribute from uses
of the constrained intrinsics because it comes by default since
D154991, but I only did this in tests I was changing anyway.

I have removed attributes added to declare lines of intrinsics. The
attributes of intrinsics cannot be changed in a test so I eliminated
attempts to do so.

Test changes verified with D146845.

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/arm64-constrained-fcmp-no-nans-opt.ll
    llvm/test/CodeGen/AArch64/arm64-fmadd.ll
    llvm/test/CodeGen/AArch64/arm64-vmul.ll
    llvm/test/CodeGen/AArch64/neon-fpextend_f16.ll
    llvm/test/CodeGen/AArch64/neon-scalar-by-elem-fma.ll
    llvm/test/CodeGen/AArch64/strict-fp-opt.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/arm64-constrained-fcmp-no-nans-opt.ll b/llvm/test/CodeGen/AArch64/arm64-constrained-fcmp-no-nans-opt.ll
index 4c2db4ae913ee1..968acb2565b4e4 100644
--- a/llvm/test/CodeGen/AArch64/arm64-constrained-fcmp-no-nans-opt.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-constrained-fcmp-no-nans-opt.ll
@@ -7,7 +7,7 @@ declare i1 @llvm.experimental.constrained.fcmp.f64(double, double, metadata, met
 ; CHECK: fcmp s0, s1
 ; CHECK-NEXT: cset w0, eq
 ; CHECK-NEXT: ret
-define i1 @f32_constrained_fcmp_ueq(float %a, float %b) nounwind ssp {
+define i1 @f32_constrained_fcmp_ueq(float %a, float %b) nounwind ssp strictfp {
   %cmp = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ueq", metadata !"fpexcept.strict")
   ret i1 %cmp
 }
@@ -16,7 +16,7 @@ define i1 @f32_constrained_fcmp_ueq(float %a, float %b) nounwind ssp {
 ; CHECK: fcmp s0, s1
 ; CHECK-NEXT: cset w0, ne
 ; CHECK-NEXT: ret
-define i1 @f32_constrained_fcmp_une(float %a, float %b) nounwind ssp {
+define i1 @f32_constrained_fcmp_une(float %a, float %b) nounwind ssp strictfp {
   %cmp = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"une", metadata !"fpexcept.strict")
   ret i1 %cmp
 }
@@ -25,7 +25,7 @@ define i1 @f32_constrained_fcmp_une(float %a, float %b) nounwind ssp {
 ; CHECK: fcmp s0, s1
 ; CHECK-NEXT: cset w0, gt
 ; CHECK-NEXT: ret
-define i1 @f32_constrained_fcmp_ugt(float %a, float %b) nounwind ssp {
+define i1 @f32_constrained_fcmp_ugt(float %a, float %b) nounwind ssp strictfp {
   %cmp = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ugt", metadata !"fpexcept.strict")
   ret i1 %cmp
 }
@@ -34,7 +34,7 @@ define i1 @f32_constrained_fcmp_ugt(float %a, float %b) nounwind ssp {
 ; CHECK: fcmp s0, s1
 ; CHECK-NEXT: cset w0, ge
 ; CHECK-NEXT: ret
-define i1 @f32_constrained_fcmp_uge(float %a, float %b) nounwind ssp {
+define i1 @f32_constrained_fcmp_uge(float %a, float %b) nounwind ssp strictfp {
   %cmp = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"uge", metadata !"fpexcept.strict")
   ret i1 %cmp
 }
@@ -43,7 +43,7 @@ define i1 @f32_constrained_fcmp_uge(float %a, float %b) nounwind ssp {
 ; CHECK: fcmp s0, s1
 ; CHECK-NEXT: cset w0, lt
 ; CHECK-NEXT: ret
-define i1 @f32_constrained_fcmp_ult(float %a, float %b) nounwind ssp {
+define i1 @f32_constrained_fcmp_ult(float %a, float %b) nounwind ssp strictfp {
   %cmp = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ult", metadata !"fpexcept.strict")
   ret i1 %cmp
 }
@@ -52,7 +52,7 @@ define i1 @f32_constrained_fcmp_ult(float %a, float %b) nounwind ssp {
 ; CHECK: fcmp s0, s1
 ; CHECK-NEXT: cset w0, le
 ; CHECK-NEXT: ret
-define i1 @f32_constrained_fcmp_ule(float %a, float %b) nounwind ssp {
+define i1 @f32_constrained_fcmp_ule(float %a, float %b) nounwind ssp strictfp {
   %cmp = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ule", metadata !"fpexcept.strict")
   ret i1 %cmp
 }
@@ -61,7 +61,7 @@ define i1 @f32_constrained_fcmp_ule(float %a, float %b) nounwind ssp {
 ; CHECK: fcmp d0, d1
 ; CHECK-NEXT: cset w0, eq
 ; CHECK-NEXT: ret
-define i1 @f64_constrained_fcmp_ueq(double %a, double %b) nounwind ssp {
+define i1 @f64_constrained_fcmp_ueq(double %a, double %b) nounwind ssp strictfp {
   %cmp = tail call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ueq", metadata !"fpexcept.strict")
   ret i1 %cmp
 }
@@ -70,7 +70,7 @@ define i1 @f64_constrained_fcmp_ueq(double %a, double %b) nounwind ssp {
 ; CHECK: fcmp d0, d1
 ; CHECK-NEXT: cset w0, ne
 ; CHECK-NEXT: ret
-define i1 @f64_constrained_fcmp_une(double %a, double %b) nounwind ssp {
+define i1 @f64_constrained_fcmp_une(double %a, double %b) nounwind ssp strictfp {
   %cmp = tail call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"une", metadata !"fpexcept.strict")
   ret i1 %cmp
 }
@@ -79,7 +79,7 @@ define i1 @f64_constrained_fcmp_une(double %a, double %b) nounwind ssp {
 ; CHECK: fcmp d0, d1
 ; CHECK-NEXT: cset w0, gt
 ; CHECK-NEXT: ret
-define i1 @f64_constrained_fcmp_ugt(double %a, double %b) nounwind ssp {
+define i1 @f64_constrained_fcmp_ugt(double %a, double %b) nounwind ssp strictfp {
   %cmp = tail call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ugt", metadata !"fpexcept.strict")
   ret i1 %cmp
 }
@@ -88,7 +88,7 @@ define i1 @f64_constrained_fcmp_ugt(double %a, double %b) nounwind ssp {
 ; CHECK: fcmp d0, d1
 ; CHECK-NEXT: cset w0, ge
 ; CHECK-NEXT: ret
-define i1 @f64_constrained_fcmp_uge(double %a, double %b) nounwind ssp {
+define i1 @f64_constrained_fcmp_uge(double %a, double %b) nounwind ssp strictfp {
   %cmp = tail call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"uge", metadata !"fpexcept.strict")
   ret i1 %cmp
 }
@@ -97,7 +97,7 @@ define i1 @f64_constrained_fcmp_uge(double %a, double %b) nounwind ssp {
 ; CHECK: fcmp d0, d1
 ; CHECK-NEXT: cset w0, lt
 ; CHECK-NEXT: ret
-define i1 @f64_constrained_fcmp_ult(double %a, double %b) nounwind ssp {
+define i1 @f64_constrained_fcmp_ult(double %a, double %b) nounwind ssp strictfp {
   %cmp = tail call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ult", metadata !"fpexcept.strict")
   ret i1 %cmp
 }
@@ -106,7 +106,7 @@ define i1 @f64_constrained_fcmp_ult(double %a, double %b) nounwind ssp {
 ; CHECK: fcmp d0, d1
 ; CHECK-NEXT: cset w0, le
 ; CHECK-NEXT: ret
-define i1 @f64_constrained_fcmp_ule(double %a, double %b) nounwind ssp {
+define i1 @f64_constrained_fcmp_ule(double %a, double %b) nounwind ssp strictfp {
   %cmp = tail call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ule", metadata !"fpexcept.strict")
   ret i1 %cmp
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-fmadd.ll b/llvm/test/CodeGen/AArch64/arm64-fmadd.ll
index d7cdb835cd3c31..b1dde3fefa9165 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fmadd.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fmadd.ll
@@ -109,7 +109,7 @@ entry:
   ret double %0
 }
 
-define float @fma32_strict(float %a, float %b, float %c) nounwind readnone ssp {
+define float @fma32_strict(float %a, float %b, float %c) nounwind readnone ssp strictfp {
 ; CHECK-LABEL: fma32_strict:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    fmadd s0, s0, s1, s2
@@ -119,7 +119,7 @@ entry:
   ret float %0
 }
 
-define float @fnma32_strict(float %a, float %b, float %c) nounwind readnone ssp {
+define float @fnma32_strict(float %a, float %b, float %c) nounwind readnone ssp strictfp {
 ; CHECK-LABEL: fnma32_strict:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    fnmadd s0, s0, s1, s2
@@ -130,7 +130,7 @@ entry:
   ret float %neg
 }
 
-define float @fms32_strict(float %a, float %b, float %c) nounwind readnone ssp {
+define float @fms32_strict(float %a, float %b, float %c) nounwind readnone ssp strictfp {
 ; CHECK-LABEL: fms32_strict:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    fmsub s0, s0, s1, s2
@@ -141,7 +141,7 @@ entry:
   ret float %0
 }
 
-define float @fms32_com_strict(float %a, float %b, float %c) nounwind readnone ssp {
+define float @fms32_com_strict(float %a, float %b, float %c) nounwind readnone ssp strictfp {
 ; CHECK-LABEL: fms32_com_strict:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    fmsub s0, s0, s1, s2
@@ -152,7 +152,7 @@ entry:
   ret float %0
 }
 
-define float @fnms32_strict(float %a, float %b, float %c) nounwind readnone ssp {
+define float @fnms32_strict(float %a, float %b, float %c) nounwind readnone ssp strictfp {
 ; CHECK-LABEL: fnms32_strict:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    fnmsub s0, s0, s1, s2
@@ -163,7 +163,7 @@ entry:
   ret float %0
 }
 
-define double @fma64_strict(double %a, double %b, double %c) nounwind readnone ssp {
+define double @fma64_strict(double %a, double %b, double %c) nounwind readnone ssp strictfp {
 ; CHECK-LABEL: fma64_strict:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    fmadd d0, d0, d1, d2
@@ -173,7 +173,7 @@ entry:
   ret double %0
 }
 
-define double @fnma64_strict(double %a, double %b, double %c) nounwind readnone ssp {
+define double @fnma64_strict(double %a, double %b, double %c) nounwind readnone ssp strictfp {
 ; CHECK-LABEL: fnma64_strict:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    fnmadd d0, d0, d1, d2
@@ -184,7 +184,7 @@ entry:
   ret double %neg
 }
 
-define double @fms64_strict(double %a, double %b, double %c) nounwind readnone ssp {
+define double @fms64_strict(double %a, double %b, double %c) nounwind readnone ssp strictfp {
 ; CHECK-LABEL: fms64_strict:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    fmsub d0, d0, d1, d2
@@ -195,7 +195,7 @@ entry:
   ret double %0
 }
 
-define double @fms64_com_strict(double %a, double %b, double %c) nounwind readnone ssp {
+define double @fms64_com_strict(double %a, double %b, double %c) nounwind readnone ssp strictfp {
 ; CHECK-LABEL: fms64_com_strict:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    fmsub d0, d0, d1, d2
@@ -206,7 +206,7 @@ entry:
   ret double %0
 }
 
-define double @fnms64_strict(double %a, double %b, double %c) nounwind readnone ssp {
+define double @fnms64_strict(double %a, double %b, double %c) nounwind readnone ssp strictfp {
 ; CHECK-LABEL: fnms64_strict:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    fnmsub d0, d0, d1, d2

diff  --git a/llvm/test/CodeGen/AArch64/arm64-vmul.ll b/llvm/test/CodeGen/AArch64/arm64-vmul.ll
index 3a9f0319b06e0f..1f71d9fdfc4ea3 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vmul.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vmul.ll
@@ -993,7 +993,7 @@ entry:
   ret <2 x double> %fmla1
 }
 
-define <2 x float> @fmls_indexed_2s_strict(<2 x float> %a, <2 x float> %b, <2 x float> %c) nounwind readnone ssp {
+define <2 x float> @fmls_indexed_2s_strict(<2 x float> %a, <2 x float> %b, <2 x float> %c) nounwind readnone ssp strictfp {
 ; CHECK-LABEL: fmls_indexed_2s_strict:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
@@ -1006,7 +1006,7 @@ entry:
   ret <2 x float> %fmls1
 }
 
-define <4 x float> @fmls_indexed_4s_strict(<4 x float> %a, <4 x float> %b, <4 x float> %c) nounwind readnone ssp {
+define <4 x float> @fmls_indexed_4s_strict(<4 x float> %a, <4 x float> %b, <4 x float> %c) nounwind readnone ssp strictfp {
 ; CHECK-LABEL: fmls_indexed_4s_strict:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    fmls.4s v0, v2, v1[0]
@@ -1018,7 +1018,7 @@ entry:
   ret <4 x float> %fmls1
 }
 
-define <2 x double> @fmls_indexed_2d_strict(<2 x double> %a, <2 x double> %b, <2 x double> %c) nounwind readnone ssp {
+define <2 x double> @fmls_indexed_2d_strict(<2 x double> %a, <2 x double> %b, <2 x double> %c) nounwind readnone ssp strictfp {
 ; CHECK-LABEL: fmls_indexed_2d_strict:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    fmls.2d v0, v2, v1[0]
@@ -1030,7 +1030,7 @@ entry:
   ret <2 x double> %fmls1
 }
 
-define <2 x float> @fmla_indexed_scalar_2s_strict(<2 x float> %a, <2 x float> %b, float %c) nounwind readnone ssp {
+define <2 x float> @fmla_indexed_scalar_2s_strict(<2 x float> %a, <2 x float> %b, float %c) nounwind readnone ssp strictfp {
 ; CHECK-LABEL: fmla_indexed_scalar_2s_strict:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $s2 killed $s2 def $q2
@@ -1043,7 +1043,7 @@ entry:
   ret <2 x float> %fmla1
 }
 
-define <4 x float> @fmla_indexed_scalar_4s_strict(<4 x float> %a, <4 x float> %b, float %c) nounwind readnone ssp {
+define <4 x float> @fmla_indexed_scalar_4s_strict(<4 x float> %a, <4 x float> %b, float %c) nounwind readnone ssp strictfp {
 ; CHECK-LABEL: fmla_indexed_scalar_4s_strict:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $s2 killed $s2 def $q2
@@ -1058,7 +1058,7 @@ entry:
   ret <4 x float> %fmla1
 }
 
-define <2 x double> @fmla_indexed_scalar_2d_strict(<2 x double> %a, <2 x double> %b, double %c) nounwind readnone ssp {
+define <2 x double> @fmla_indexed_scalar_2d_strict(<2 x double> %a, <2 x double> %b, double %c) nounwind readnone ssp strictfp {
 ; CHECK-LABEL: fmla_indexed_scalar_2d_strict:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2

diff  --git a/llvm/test/CodeGen/AArch64/neon-fpextend_f16.ll b/llvm/test/CodeGen/AArch64/neon-fpextend_f16.ll
index caf012a9a699fe..097e18b8104111 100644
--- a/llvm/test/CodeGen/AArch64/neon-fpextend_f16.ll
+++ b/llvm/test/CodeGen/AArch64/neon-fpextend_f16.ll
@@ -15,7 +15,7 @@ define <1 x double> @fpext_v1f16_v1f64(ptr %a) {
   ret <1 x double> %c
 }
 
-define <1 x double> @strict_fpext_v1f32_v1f64(<1 x half> %x) #0 {
+define <1 x double> @strict_fpext_v1f32_v1f64(<1 x half> %x) strictfp {
 ; CHECK-LABEL: strict_fpext_v1f32_v1f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fcvt d0, h0

diff  --git a/llvm/test/CodeGen/AArch64/neon-scalar-by-elem-fma.ll b/llvm/test/CodeGen/AArch64/neon-scalar-by-elem-fma.ll
index 5ae08cf20c392c..14b6e4b383adfc 100644
--- a/llvm/test/CodeGen/AArch64/neon-scalar-by-elem-fma.ll
+++ b/llvm/test/CodeGen/AArch64/neon-scalar-by-elem-fma.ll
@@ -110,105 +110,105 @@ define double @test_fmls_dd2D_swap(double %a, double %b, <2 x double> %v) {
   ret double %tmp3
 }
 
-define float @test_fmla_ss4S_strict(float %a, float %b, <4 x float> %v) {
+define float @test_fmla_ss4S_strict(float %a, float %b, <4 x float> %v) #0 {
   ; CHECK-LABEL: test_fmla_ss4S_strict
   ; CHECK: fmla {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[3]
   %tmp1 = extractelement <4 x float> %v, i32 3
-  %tmp2 = call float @llvm.experimental.constrained.fma.f32(float %b, float %tmp1, float %a, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  %tmp2 = call float @llvm.experimental.constrained.fma.f32(float %b, float %tmp1, float %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
   ret float %tmp2
 }
 
-define float @test_fmla_ss4S_swap_strict(float %a, float %b, <4 x float> %v) {
+define float @test_fmla_ss4S_swap_strict(float %a, float %b, <4 x float> %v) #0 {
   ; CHECK-LABEL: test_fmla_ss4S_swap_strict
   ; CHECK: fmla {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[3]
   %tmp1 = extractelement <4 x float> %v, i32 3
-  %tmp2 = call float @llvm.experimental.constrained.fma.f32(float %tmp1, float %a, float %a, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  %tmp2 = call float @llvm.experimental.constrained.fma.f32(float %tmp1, float %a, float %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
   ret float %tmp2
 }
 
-define float @test_fmla_ss2S_strict(float %a, float %b, <2 x float> %v) {
+define float @test_fmla_ss2S_strict(float %a, float %b, <2 x float> %v) #0 {
   ; CHECK-LABEL: test_fmla_ss2S_strict
   ; CHECK: fmla {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[1]
   %tmp1 = extractelement <2 x float> %v, i32 1
-  %tmp2 = call float @llvm.experimental.constrained.fma.f32(float %b, float %tmp1, float %a, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  %tmp2 = call float @llvm.experimental.constrained.fma.f32(float %b, float %tmp1, float %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
   ret float %tmp2
 }
 
-define double @test_fmla_ddD_strict(double %a, double %b, <1 x double> %v) {
+define double @test_fmla_ddD_strict(double %a, double %b, <1 x double> %v) #0 {
   ; CHECK-LABEL: test_fmla_ddD_strict
   ; CHECK: {{fmla d[0-9]+, d[0-9]+, v[0-9]+.d\[0]|fmadd d[0-9]+, d[0-9]+, d[0-9]+, d[0-9]+}}
   %tmp1 = extractelement <1 x double> %v, i32 0
-  %tmp2 = call double @llvm.experimental.constrained.fma.f64(double %b, double %tmp1, double %a, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  %tmp2 = call double @llvm.experimental.constrained.fma.f64(double %b, double %tmp1, double %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
   ret double %tmp2
 }
 
-define double @test_fmla_dd2D_strict(double %a, double %b, <2 x double> %v) {
+define double @test_fmla_dd2D_strict(double %a, double %b, <2 x double> %v) #0 {
   ; CHECK-LABEL: test_fmla_dd2D_strict
   ; CHECK: fmla {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[1]
   %tmp1 = extractelement <2 x double> %v, i32 1
-  %tmp2 = call double @llvm.experimental.constrained.fma.f64(double %b, double %tmp1, double %a, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  %tmp2 = call double @llvm.experimental.constrained.fma.f64(double %b, double %tmp1, double %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
   ret double %tmp2
 }
 
-define double @test_fmla_dd2D_swap_strict(double %a, double %b, <2 x double> %v) {
+define double @test_fmla_dd2D_swap_strict(double %a, double %b, <2 x double> %v) #0 {
   ; CHECK-LABEL: test_fmla_dd2D_swap_strict
   ; CHECK: fmla {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[1]
   %tmp1 = extractelement <2 x double> %v, i32 1
-  %tmp2 = call double @llvm.experimental.constrained.fma.f64(double %tmp1, double %b, double %a, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  %tmp2 = call double @llvm.experimental.constrained.fma.f64(double %tmp1, double %b, double %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
   ret double %tmp2
 }
 
-define float @test_fmls_ss4S_strict(float %a, float %b, <4 x float> %v) {
+define float @test_fmls_ss4S_strict(float %a, float %b, <4 x float> %v) #0 {
   ; CHECK-LABEL: test_fmls_ss4S_strict
   ; CHECK: fmls {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[3]
   %tmp1 = extractelement <4 x float> %v, i32 3
   %tmp2 = fneg float %tmp1
-  %tmp3 = call float @llvm.experimental.constrained.fma.f32(float %tmp2, float %tmp1, float %a, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  %tmp3 = call float @llvm.experimental.constrained.fma.f32(float %tmp2, float %tmp1, float %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
   ret float %tmp3
 }
 
-define float @test_fmls_ss4S_swap_strict(float %a, float %b, <4 x float> %v) {
+define float @test_fmls_ss4S_swap_strict(float %a, float %b, <4 x float> %v) #0 {
   ; CHECK-LABEL: test_fmls_ss4S_swap_strict
   ; CHECK: fmls {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[3]
   %tmp1 = extractelement <4 x float> %v, i32 3
   %tmp2 = fneg float %tmp1
-  %tmp3 = call float @llvm.experimental.constrained.fma.f32(float %tmp1, float %tmp2, float %a, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  %tmp3 = call float @llvm.experimental.constrained.fma.f32(float %tmp1, float %tmp2, float %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
   ret float %tmp3
 }
 
-define float @test_fmls_ss2S_strict(float %a, float %b, <2 x float> %v) {
+define float @test_fmls_ss2S_strict(float %a, float %b, <2 x float> %v) #0 {
   ; CHECK-LABEL: test_fmls_ss2S_strict
   ; CHECK: fmls {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[1]
   %tmp1 = extractelement <2 x float> %v, i32 1
   %tmp2 = fneg float %tmp1
-  %tmp3 = call float @llvm.experimental.constrained.fma.f32(float %tmp2, float %tmp1, float %a, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  %tmp3 = call float @llvm.experimental.constrained.fma.f32(float %tmp2, float %tmp1, float %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
   ret float %tmp3
 }
 
-define double @test_fmls_ddD_strict(double %a, double %b, <1 x double> %v) {
+define double @test_fmls_ddD_strict(double %a, double %b, <1 x double> %v) #0 {
   ; CHECK-LABEL: test_fmls_ddD_strict
   ; CHECK: {{fmls d[0-9]+, d[0-9]+, v[0-9]+.d\[0]|fmsub d[0-9]+, d[0-9]+, d[0-9]+, d[0-9]+}}
   %tmp1 = extractelement <1 x double> %v, i32 0
   %tmp2 = fneg double %tmp1
-  %tmp3 = call double @llvm.experimental.constrained.fma.f64(double %tmp2, double %tmp1, double %a, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  %tmp3 = call double @llvm.experimental.constrained.fma.f64(double %tmp2, double %tmp1, double %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
   ret double %tmp3
 }
 
-define double @test_fmls_dd2D_strict(double %a, double %b, <2 x double> %v) {
+define double @test_fmls_dd2D_strict(double %a, double %b, <2 x double> %v) #0 {
   ; CHECK-LABEL: test_fmls_dd2D_strict
   ; CHECK: fmls {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[1]
   %tmp1 = extractelement <2 x double> %v, i32 1
   %tmp2 = fneg double %tmp1
-  %tmp3 = call double @llvm.experimental.constrained.fma.f64(double %tmp2, double %tmp1, double %a, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  %tmp3 = call double @llvm.experimental.constrained.fma.f64(double %tmp2, double %tmp1, double %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
   ret double %tmp3
 }
 
-define double @test_fmls_dd2D_swap_strict(double %a, double %b, <2 x double> %v) {
+define double @test_fmls_dd2D_swap_strict(double %a, double %b, <2 x double> %v) #0 {
   ; CHECK-LABEL: test_fmls_dd2D_swap_strict
   ; CHECK: fmls {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[1]
   %tmp1 = extractelement <2 x double> %v, i32 1
   %tmp2 = fneg double %tmp1
-  %tmp3 = call double @llvm.experimental.constrained.fma.f64(double %tmp1, double %tmp2, double %a, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  %tmp3 = call double @llvm.experimental.constrained.fma.f64(double %tmp1, double %tmp2, double %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
   ret double %tmp3
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/strict-fp-opt.ll b/llvm/test/CodeGen/AArch64/strict-fp-opt.ll
index f370e8f05761cb..bb7cd22c01b418 100644
--- a/llvm/test/CodeGen/AArch64/strict-fp-opt.ll
+++ b/llvm/test/CodeGen/AArch64/strict-fp-opt.ll
@@ -7,7 +7,7 @@
 ; CHECK-LABEL: unused_div:
 ; CHECK-NOT: fdiv
 ; CHECK: ret
-define void @unused_div(float %x, float %y) #0 {
+define void @unused_div(float %x, float %y) {
 entry:
   %add = fdiv float %x, %y
   ret void
@@ -40,7 +40,7 @@ entry:
 ; CHECK-NEXT: fmul [[MUL:s[0-9]+]], [[ADD]], [[ADD]]
 ; CHECK-NEXT: fcsel s0, [[ADD]], [[MUL]], eq
 ; CHECK-NEXT: ret
-define float @add_twice(float %x, float %y, i32 %n) #0 {
+define float @add_twice(float %x, float %y, i32 %n) {
 entry:
   %add = fadd float %x, %y
   %tobool.not = icmp eq i32 %n, 0
@@ -118,7 +118,7 @@ if.end:
 ; CHECK-NEXT: msr FPCR, [[XREG4]]
 ; CHECK-NEXT: fsub s0, [[SREG]], [[SREG]]
 ; CHECK-NEXT: ret
-define float @set_rounding(float %x, float %y) #0 {
+define float @set_rounding(float %x, float %y) {
 entry:
   %add1 = fadd float %x, %y
   call void @llvm.set.rounding(i32 0)
@@ -142,9 +142,9 @@ entry:
 define float @set_rounding_fpexcept_strict(float %x, float %y) #0 {
 entry:
   %add1 = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
-  call void @llvm.set.rounding(i32 0)
+  call void @llvm.set.rounding(i32 0) #0
   %add2 = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
-  call void @llvm.set.rounding(i32 1)
+  call void @llvm.set.rounding(i32 1) #0
   %sub = call float @llvm.experimental.constrained.fsub.f32(float %add1, float %add2, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret float %sub
 }
@@ -163,17 +163,17 @@ entry:
 define float @set_rounding_round_dynamic(float %x, float %y) #0 {
 entry:
   %add1 = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
-  call void @llvm.set.rounding(i32 0)
+  call void @llvm.set.rounding(i32 0) #0
   %add2 = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
-  call void @llvm.set.rounding(i32 1)
+  call void @llvm.set.rounding(i32 1) #0
   %sub = call float @llvm.experimental.constrained.fsub.f32(float %add1, float %add2, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
   ret float %sub
 }
 
-declare float @llvm.experimental.constrained.fadd.f32(float, float, metadata, metadata) #0
-declare float @llvm.experimental.constrained.fsub.f32(float, float, metadata, metadata) #0
-declare float @llvm.experimental.constrained.fmul.f32(float, float, metadata, metadata) #0
-declare float @llvm.experimental.constrained.fdiv.f32(float, float, metadata, metadata) #0
+declare float @llvm.experimental.constrained.fadd.f32(float, float, metadata, metadata)
+declare float @llvm.experimental.constrained.fsub.f32(float, float, metadata, metadata)
+declare float @llvm.experimental.constrained.fmul.f32(float, float, metadata, metadata)
+declare float @llvm.experimental.constrained.fdiv.f32(float, float, metadata, metadata)
 declare i32 @llvm.get.rounding()
 declare void @llvm.set.rounding(i32)
 


        


More information about the llvm-commits mailing list