[llvm] 7e0e8b7 - [FPEnv][PowerPC] Correct strictfp tests.

Kevin P. Neal via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 26 06:13:01 PDT 2023


Author: Kevin P. Neal
Date: 2023-07-26T09:12:29-04:00
New Revision: 7e0e8b7acee4be6d7fdeffc3c14738ab1fc962cd

URL: https://github.com/llvm/llvm-project/commit/7e0e8b7acee4be6d7fdeffc3c14738ab1fc962cd
DIFF: https://github.com/llvm/llvm-project/commit/7e0e8b7acee4be6d7fdeffc3c14738ab1fc962cd.diff

LOG: [FPEnv][PowerPC] Correct strictfp tests.

Correct PowerPC strictfp tests to follow the rules documented in the LangRef:
https://llvm.org/docs/LangRef.html#constrained-floating-point-intrinsics

Mostly these tests just needed the strictfp attribute on function
definitions.  I've also removed the strictfp attribute from uses
of the constrained intrinsics because it comes by default since
D154991, but I only did this in tests I was changing anyway.

I have removed attributes added to declare lines of intrinsics. The
attributes of intrinsics cannot be changed in a test so I eliminated
attempts to do so.

Test changes verified with D146845.

Added: 
    

Modified: 
    llvm/test/CodeGen/PowerPC/fp-strict-conv.ll
    llvm/test/CodeGen/PowerPC/fp-strict-round.ll
    llvm/test/CodeGen/PowerPC/is_fpclass.ll
    llvm/test/CodeGen/PowerPC/nofpexcept.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/PowerPC/fp-strict-conv.ll b/llvm/test/CodeGen/PowerPC/fp-strict-conv.ll
index 20a2aac2f77403..8defbeaf1bcb02 100644
--- a/llvm/test/CodeGen/PowerPC/fp-strict-conv.ll
+++ b/llvm/test/CodeGen/PowerPC/fp-strict-conv.ll
@@ -46,7 +46,7 @@ define i32 @d_to_i32(double %m) #0 {
 ; NOVSX-NEXT:    lwz r3, -4(r1)
 ; NOVSX-NEXT:    blr
 entry:
-  %conv = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %m, metadata !"fpexcept.strict") #0
+  %conv = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %m, metadata !"fpexcept.strict")
   ret i32 %conv
 }
 
@@ -64,7 +64,7 @@ define i64 @d_to_i64(double %m) #0 {
 ; NOVSX-NEXT:    ld r3, -8(r1)
 ; NOVSX-NEXT:    blr
 entry:
-  %conv = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %m, metadata !"fpexcept.strict") #0
+  %conv = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %m, metadata !"fpexcept.strict")
   ret i64 %conv
 }
 
@@ -82,7 +82,7 @@ define i64 @d_to_u64(double %m) #0 {
 ; NOVSX-NEXT:    ld r3, -8(r1)
 ; NOVSX-NEXT:    blr
 entry:
-  %conv = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %m, metadata !"fpexcept.strict") #0
+  %conv = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %m, metadata !"fpexcept.strict")
   ret i64 %conv
 }
 
@@ -101,7 +101,7 @@ define zeroext i32 @d_to_u32(double %m) #0 {
 ; NOVSX-NEXT:    lwz r3, -4(r1)
 ; NOVSX-NEXT:    blr
 entry:
-  %conv = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %m, metadata !"fpexcept.strict") #0
+  %conv = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %m, metadata !"fpexcept.strict")
   ret i32 %conv
 }
 
@@ -121,7 +121,7 @@ define signext i32 @f_to_i32(float %m) #0 {
 ; NOVSX-NEXT:    lwa r3, -4(r1)
 ; NOVSX-NEXT:    blr
 entry:
-  %conv = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %m, metadata !"fpexcept.strict") #0
+  %conv = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %m, metadata !"fpexcept.strict")
   ret i32 %conv
 }
 
@@ -139,7 +139,7 @@ define i64 @f_to_i64(float %m) #0 {
 ; NOVSX-NEXT:    ld r3, -8(r1)
 ; NOVSX-NEXT:    blr
 entry:
-  %conv = call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %m, metadata !"fpexcept.strict") #0
+  %conv = call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %m, metadata !"fpexcept.strict")
   ret i64 %conv
 }
 
@@ -157,7 +157,7 @@ define i64 @f_to_u64(float %m) #0 {
 ; NOVSX-NEXT:    ld r3, -8(r1)
 ; NOVSX-NEXT:    blr
 entry:
-  %conv = call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %m, metadata !"fpexcept.strict") #0
+  %conv = call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %m, metadata !"fpexcept.strict")
   ret i64 %conv
 }
 
@@ -176,7 +176,7 @@ define zeroext i32 @f_to_u32(float %m) #0 {
 ; NOVSX-NEXT:    lwz r3, -4(r1)
 ; NOVSX-NEXT:    blr
 entry:
-  %conv = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %m, metadata !"fpexcept.strict") #0
+  %conv = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %m, metadata !"fpexcept.strict")
   ret i32 %conv
 }
 
@@ -195,7 +195,7 @@ define double @i32_to_d(i32 signext %m) #0 {
 ; NOVSX-NEXT:    fcfid f1, f0
 ; NOVSX-NEXT:    blr
 entry:
-  %conv = tail call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+  %conv = tail call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret double %conv
 }
 
@@ -213,7 +213,7 @@ define double @i64_to_d(i64 %m) #0 {
 ; NOVSX-NEXT:    fcfid f1, f0
 ; NOVSX-NEXT:    blr
 entry:
-  %conv = tail call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+  %conv = tail call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret double %conv
 }
 
@@ -232,7 +232,7 @@ define double @u32_to_d(i32 zeroext %m) #0 {
 ; NOVSX-NEXT:    fcfidu f1, f0
 ; NOVSX-NEXT:    blr
 entry:
-  %conv = tail call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+  %conv = tail call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret double %conv
 }
 
@@ -250,7 +250,7 @@ define double @u64_to_d(i64 %m) #0 {
 ; NOVSX-NEXT:    fcfidu f1, f0
 ; NOVSX-NEXT:    blr
 entry:
-  %conv = tail call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+  %conv = tail call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret double %conv
 }
 
@@ -269,7 +269,7 @@ define float @i32_to_f(i32 signext %m) #0 {
 ; NOVSX-NEXT:    fcfids f1, f0
 ; NOVSX-NEXT:    blr
 entry:
-  %conv = tail call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+  %conv = tail call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret float %conv
 }
 
@@ -287,7 +287,7 @@ define float @i64_to_f(i64 %m) #0 {
 ; NOVSX-NEXT:    fcfids f1, f0
 ; NOVSX-NEXT:    blr
 entry:
-  %conv = tail call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+  %conv = tail call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret float %conv
 }
 
@@ -306,7 +306,7 @@ define float @u32_to_f(i32 zeroext %m) #0 {
 ; NOVSX-NEXT:    fcfidus f1, f0
 ; NOVSX-NEXT:    blr
 entry:
-  %conv = tail call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+  %conv = tail call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret float %conv
 }
 
@@ -324,7 +324,7 @@ define float @u64_to_f(i64 %m) #0 {
 ; NOVSX-NEXT:    fcfidus f1, f0
 ; NOVSX-NEXT:    blr
 entry:
-  %conv = tail call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+  %conv = tail call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret float %conv
 }
 
@@ -344,7 +344,7 @@ define void @d_to_i32_store(double %m, ptr %addr) #0 {
 ; NOVSX-NEXT:    stw r3, 0(r4)
 ; NOVSX-NEXT:    blr
 entry:
-  %conv = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %m, metadata !"fpexcept.strict") #0
+  %conv = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %m, metadata !"fpexcept.strict")
   store i32 %conv, ptr %addr, align 4
   ret void
 }
@@ -370,7 +370,7 @@ define void @d_to_i64_store(double %m, ptr %addr) #0 {
 ; NOVSX-NEXT:    std r3, 0(r4)
 ; NOVSX-NEXT:    blr
 entry:
-  %conv = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %m, metadata !"fpexcept.strict") #0
+  %conv = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %m, metadata !"fpexcept.strict")
   store i64 %conv, ptr %addr, align 8
   ret void
 }
@@ -396,7 +396,7 @@ define void @d_to_u64_store(double %m, ptr %addr) #0 {
 ; NOVSX-NEXT:    std r3, 0(r4)
 ; NOVSX-NEXT:    blr
 entry:
-  %conv = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %m, metadata !"fpexcept.strict") #0
+  %conv = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %m, metadata !"fpexcept.strict")
   store i64 %conv, ptr %addr, align 8
   ret void
 }
@@ -417,7 +417,7 @@ define void @d_to_u32_store(double %m, ptr %addr) #0 {
 ; NOVSX-NEXT:    stw r3, 0(r4)
 ; NOVSX-NEXT:    blr
 entry:
-  %conv = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %m, metadata !"fpexcept.strict") #0
+  %conv = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %m, metadata !"fpexcept.strict")
   store i32 %conv, ptr %addr, align 4
   ret void
 }
@@ -438,7 +438,7 @@ define void @f_to_i32_store(float %m, ptr %addr) #0 {
 ; NOVSX-NEXT:    stw r3, 0(r4)
 ; NOVSX-NEXT:    blr
 entry:
-  %conv = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %m, metadata !"fpexcept.strict") #0
+  %conv = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %m, metadata !"fpexcept.strict")
   store i32 %conv, ptr %addr, align 4
   ret void
 }
@@ -464,7 +464,7 @@ define void @f_to_i64_store(float %m, ptr %addr) #0 {
 ; NOVSX-NEXT:    std r3, 0(r4)
 ; NOVSX-NEXT:    blr
 entry:
-  %conv = call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %m, metadata !"fpexcept.strict") #0
+  %conv = call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %m, metadata !"fpexcept.strict")
   store i64 %conv, ptr %addr, align 8
   ret void
 }
@@ -490,7 +490,7 @@ define void @f_to_u64_store(float %m, ptr %addr) #0 {
 ; NOVSX-NEXT:    std r3, 0(r4)
 ; NOVSX-NEXT:    blr
 entry:
-  %conv = call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %m, metadata !"fpexcept.strict") #0
+  %conv = call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %m, metadata !"fpexcept.strict")
   store i64 %conv, ptr %addr, align 8
   ret void
 }
@@ -511,7 +511,7 @@ define void @f_to_u32_store(float %m, ptr %addr) #0 {
 ; NOVSX-NEXT:    stw r3, 0(r4)
 ; NOVSX-NEXT:    blr
 entry:
-  %conv = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %m, metadata !"fpexcept.strict") #0
+  %conv = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %m, metadata !"fpexcept.strict")
   store i32 %conv, ptr %addr, align 4
   ret void
 }
@@ -530,7 +530,7 @@ define double @load_i32_to_d(ptr %addr) #0 {
 ; NOVSX-NEXT:    blr
 entry:
   %m = load i32, ptr %addr, align 4
-  %conv = tail call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+  %conv = tail call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret double %conv
 }
 
@@ -548,7 +548,7 @@ define double @load_i64_to_d(ptr %addr) #0 {
 ; NOVSX-NEXT:    blr
 entry:
   %m = load i64, ptr %addr, align 8
-  %conv = tail call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+  %conv = tail call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret double %conv
 }
 
@@ -566,7 +566,7 @@ define double @load_u32_to_d(ptr %addr) #0 {
 ; NOVSX-NEXT:    blr
 entry:
   %m = load i32, ptr %addr, align 4
-  %conv = tail call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+  %conv = tail call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret double %conv
 }
 
@@ -584,7 +584,7 @@ define double @load_u64_to_d(ptr %addr) #0 {
 ; NOVSX-NEXT:    blr
 entry:
   %m = load i64, ptr %addr, align 8
-  %conv = tail call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+  %conv = tail call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret double %conv
 }
 
@@ -602,7 +602,7 @@ define float @load_i32_to_f(ptr %addr) #0 {
 ; NOVSX-NEXT:    blr
 entry:
   %m = load i32, ptr %addr, align 4
-  %conv = tail call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+  %conv = tail call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret float %conv
 }
 
@@ -620,7 +620,7 @@ define float @load_i64_to_f(ptr %addr) #0 {
 ; NOVSX-NEXT:    blr
 entry:
   %m = load i64, ptr %addr, align 8
-  %conv = tail call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+  %conv = tail call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret float %conv
 }
 
@@ -638,7 +638,7 @@ define float @load_u32_to_f(ptr %addr) #0 {
 ; NOVSX-NEXT:    blr
 entry:
   %m = load i32, ptr %addr, align 4
-  %conv = tail call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+  %conv = tail call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret float %conv
 }
 
@@ -656,21 +656,21 @@ define float @load_u64_to_f(ptr %addr) #0 {
 ; NOVSX-NEXT:    blr
 entry:
   %m = load i64, ptr %addr, align 8
-  %conv = tail call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+  %conv = tail call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret float %conv
 }
 
-define void @fptoint_nofpexcept_f64(double %m, ptr %addr1, ptr %addr2) {
+define void @fptoint_nofpexcept_f64(double %m, ptr %addr1, ptr %addr2) #0 {
 ; MIR-LABEL: name: fptoint_nofpexcept_f64
 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVDPSXWS
 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVDPUXWS
 ; MIR: renamable $vf{{[0-9]+}} = nofpexcept XSCVDPSXDS
 ; MIR: renamable $vf{{[0-9]+}} = nofpexcept XSCVDPUXDS
 entry:
-  %conv1 = tail call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %m, metadata !"fpexcept.ignore") #0
-  %conv2 = tail call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %m, metadata !"fpexcept.ignore") #0
-  %conv3 = tail call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %m, metadata !"fpexcept.ignore") #0
-  %conv4 = tail call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %m, metadata !"fpexcept.ignore") #0
+  %conv1 = tail call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %m, metadata !"fpexcept.ignore")
+  %conv2 = tail call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %m, metadata !"fpexcept.ignore")
+  %conv3 = tail call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %m, metadata !"fpexcept.ignore")
+  %conv4 = tail call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %m, metadata !"fpexcept.ignore")
   store volatile i32 %conv1, ptr %addr1, align 4
   store volatile i32 %conv2, ptr %addr1, align 4
   store volatile i64 %conv3, ptr %addr2, align 8
@@ -678,17 +678,17 @@ entry:
   ret void
 }
 
-define void @fptoint_nofpexcept_f32(float %m, ptr %addr1, ptr %addr2) {
+define void @fptoint_nofpexcept_f32(float %m, ptr %addr1, ptr %addr2) #0 {
 ; MIR-LABEL: name: fptoint_nofpexcept_f32
 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVDPSXWS
 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVDPUXWS
 ; MIR: renamable $vf{{[0-9]+}} = nofpexcept XSCVDPSXDS
 ; MIR: renamable $vf{{[0-9]+}} = nofpexcept XSCVDPUXDS
 entry:
-  %conv1 = tail call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %m, metadata !"fpexcept.ignore") #0
-  %conv2 = tail call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %m, metadata !"fpexcept.ignore") #0
-  %conv3 = tail call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %m, metadata !"fpexcept.ignore") #0
-  %conv4 = tail call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %m, metadata !"fpexcept.ignore") #0
+  %conv1 = tail call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %m, metadata !"fpexcept.ignore")
+  %conv2 = tail call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %m, metadata !"fpexcept.ignore")
+  %conv3 = tail call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %m, metadata !"fpexcept.ignore")
+  %conv4 = tail call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %m, metadata !"fpexcept.ignore")
   store volatile i32 %conv1, ptr %addr1, align 4
   store volatile i32 %conv2, ptr %addr1, align 4
   store volatile i64 %conv3, ptr %addr2, align 8
@@ -696,17 +696,17 @@ entry:
   ret void
 }
 
-define void @inttofp_nofpexcept_i32(i32 %m, ptr %addr1, ptr %addr2) {
+define void @inttofp_nofpexcept_i32(i32 %m, ptr %addr1, ptr %addr2) #0 {
 ; MIR-LABEL: name: inttofp_nofpexcept_i32
 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVSXDSP
 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVUXDSP
 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVSXDDP
 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVUXDDP
 entry:
-  %conv1 = tail call float  @llvm.experimental.constrained.sitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
-  %conv2 = tail call float  @llvm.experimental.constrained.uitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
-  %conv3 = tail call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
-  %conv4 = tail call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
+  %conv1 = tail call float  @llvm.experimental.constrained.sitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  %conv2 = tail call float  @llvm.experimental.constrained.uitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  %conv3 = tail call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  %conv4 = tail call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore")
   store volatile float  %conv1, ptr  %addr1, align 4
   store volatile float  %conv2, ptr  %addr1, align 4
   store volatile double %conv3, ptr %addr2, align 8
@@ -714,17 +714,17 @@ entry:
   ret void
 }
 
-define void @inttofp_nofpexcept_i64(i64 %m, ptr %addr1, ptr %addr2) {
+define void @inttofp_nofpexcept_i64(i64 %m, ptr %addr1, ptr %addr2) #0 {
 ; MIR-LABEL: name: inttofp_nofpexcept_i64
 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVSXDSP
 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVUXDSP
 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVSXDDP
 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVUXDDP
 entry:
-  %conv1 = tail call float  @llvm.experimental.constrained.sitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
-  %conv2 = tail call float  @llvm.experimental.constrained.uitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
-  %conv3 = tail call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
-  %conv4 = tail call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
+  %conv1 = tail call float  @llvm.experimental.constrained.sitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  %conv2 = tail call float  @llvm.experimental.constrained.uitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  %conv3 = tail call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  %conv4 = tail call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore")
   store volatile float  %conv1, ptr  %addr1, align 4
   store volatile float  %conv2, ptr  %addr1, align 4
   store volatile double %conv3, ptr %addr2, align 8
@@ -732,11 +732,11 @@ entry:
   ret void
 }
 
-define <2 x double> @inttofp_nofpexcept_vec(<2 x i16> %m) {
+define <2 x double> @inttofp_nofpexcept_vec(<2 x i16> %m) #0 {
 ; MIR-LABEL: name: inttofp_nofpexcept_vec
 ; MIR: renamable $v{{[0-9]+}} = nofpexcept XVCVSXDDP
 entry:
-  %conv = tail call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i16(<2 x i16> %m, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
+  %conv = tail call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i16(<2 x i16> %m, metadata !"round.dynamic", metadata !"fpexcept.ignore")
   ret <2 x double> %conv
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/fp-strict-round.ll b/llvm/test/CodeGen/PowerPC/fp-strict-round.ll
index 4de7d1a05f50e5..6c2a718e65b3f8 100644
--- a/llvm/test/CodeGen/PowerPC/fp-strict-round.ll
+++ b/llvm/test/CodeGen/PowerPC/fp-strict-round.ll
@@ -39,7 +39,7 @@ declare double @llvm.experimental.constrained.trunc.f64(double, metadata)
 declare <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float>, metadata)
 declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata)
 
-define float @ceil_f32(float %f1) {
+define float @ceil_f32(float %f1) strictfp {
 ; P8-LABEL: ceil_f32:
 ; P8:       # %bb.0:
 ; P8-NEXT:    xsrdpip f1, f1
@@ -55,7 +55,7 @@ define float @ceil_f32(float %f1) {
   ret float %res
 }
 
-define double @ceil_f64(double %f1) {
+define double @ceil_f64(double %f1) strictfp {
 ; P8-LABEL: ceil_f64:
 ; P8:       # %bb.0:
 ; P8-NEXT:    xsrdpip f1, f1
@@ -71,7 +71,7 @@ define double @ceil_f64(double %f1) {
   ret double %res
 }
 
-define <4 x float> @ceil_v4f32(<4 x float> %vf1) {
+define <4 x float> @ceil_v4f32(<4 x float> %vf1) strictfp {
 ; P8-LABEL: ceil_v4f32:
 ; P8:       # %bb.0:
 ; P8-NEXT:    xvrspip v2, v2
@@ -87,7 +87,7 @@ define <4 x float> @ceil_v4f32(<4 x float> %vf1) {
   ret <4 x float> %res
 }
 
-define <2 x double> @ceil_v2f64(<2 x double> %vf1) {
+define <2 x double> @ceil_v2f64(<2 x double> %vf1) strictfp {
 ; P8-LABEL: ceil_v2f64:
 ; P8:       # %bb.0:
 ; P8-NEXT:    xvrdpip v2, v2
@@ -103,7 +103,7 @@ define <2 x double> @ceil_v2f64(<2 x double> %vf1) {
   ret <2 x double> %res
 }
 
-define float @floor_f32(float %f1) {
+define float @floor_f32(float %f1) strictfp {
 ; P8-LABEL: floor_f32:
 ; P8:       # %bb.0:
 ; P8-NEXT:    xsrdpim f1, f1
@@ -119,7 +119,7 @@ define float @floor_f32(float %f1) {
   ret float %res
 }
 
-define double @floor_f64(double %f1) {
+define double @floor_f64(double %f1) strictfp {
 ; P8-LABEL: floor_f64:
 ; P8:       # %bb.0:
 ; P8-NEXT:    xsrdpim f1, f1
@@ -135,7 +135,7 @@ define double @floor_f64(double %f1) {
   ret double %res;
 }
 
-define <4 x float> @floor_v4f32(<4 x float> %vf1) {
+define <4 x float> @floor_v4f32(<4 x float> %vf1) strictfp {
 ; P8-LABEL: floor_v4f32:
 ; P8:       # %bb.0:
 ; P8-NEXT:    xvrspim v2, v2
@@ -151,7 +151,7 @@ define <4 x float> @floor_v4f32(<4 x float> %vf1) {
   ret <4 x float> %res;
 }
 
-define <2 x double> @floor_v2f64(<2 x double> %vf1) {
+define <2 x double> @floor_v2f64(<2 x double> %vf1) strictfp {
 ; P8-LABEL: floor_v2f64:
 ; P8:       # %bb.0:
 ; P8-NEXT:    xvrdpim v2, v2
@@ -167,7 +167,7 @@ define <2 x double> @floor_v2f64(<2 x double> %vf1) {
   ret <2 x double> %res;
 }
 
-define double @nearbyint_f64(double %f1, double %f2) {
+define double @nearbyint_f64(double %f1, double %f2) strictfp {
 ; P8-LABEL: nearbyint_f64:
 ; P8:       # %bb.0:
 ; P8-NEXT:    mflr r0
@@ -202,7 +202,7 @@ define double @nearbyint_f64(double %f1, double %f2) {
   ret double %res
 }
 
-define <4 x float> @nearbyint_v4f32(<4 x float> %vf1, <4 x float> %vf2) {
+define <4 x float> @nearbyint_v4f32(<4 x float> %vf1, <4 x float> %vf2) strictfp {
 ; P8-LABEL: nearbyint_v4f32:
 ; P8:       # %bb.0:
 ; P8-NEXT:    mflr r0
@@ -307,7 +307,7 @@ define <4 x float> @nearbyint_v4f32(<4 x float> %vf1, <4 x float> %vf2) {
   ret <4 x float> %res
 }
 
-define <2 x double> @nearbyint_v2f64(<2 x double> %vf1, <2 x double> %vf2) {
+define <2 x double> @nearbyint_v2f64(<2 x double> %vf1, <2 x double> %vf2) strictfp {
 ; P8-LABEL: nearbyint_v2f64:
 ; P8:       # %bb.0:
 ; P8-NEXT:    mflr r0
@@ -376,7 +376,7 @@ define <2 x double> @nearbyint_v2f64(<2 x double> %vf1, <2 x double> %vf2) {
   ret <2 x double> %res
 }
 
-define <4 x double> @fpext_v4f64_v4f32(<4 x float> %vf1) {
+define <4 x double> @fpext_v4f64_v4f32(<4 x float> %vf1) strictfp {
 ; P8-LABEL: fpext_v4f64_v4f32:
 ; P8:       # %bb.0:
 ; P8-NEXT:    xxsldwi vs0, v2, v2, 1
@@ -409,7 +409,7 @@ define <4 x double> @fpext_v4f64_v4f32(<4 x float> %vf1) {
   ret <4 x double> %res
 }
 
-define <2 x double> @fpext_v2f64_v2f32(<2 x float> %vf1) {
+define <2 x double> @fpext_v2f64_v2f32(<2 x float> %vf1) strictfp {
 ; P8-LABEL: fpext_v2f64_v2f32:
 ; P8:       # %bb.0:
 ; P8-NEXT:    xxsldwi vs0, v2, v2, 1
@@ -432,7 +432,7 @@ define <2 x double> @fpext_v2f64_v2f32(<2 x float> %vf1) {
   ret <2 x double> %res
 }
 
-define float @fptrunc_f32_f64(double %f1) {
+define float @fptrunc_f32_f64(double %f1) strictfp {
 ; P8-LABEL: fptrunc_f32_f64:
 ; P8:       # %bb.0:
 ; P8-NEXT:    xsrsp f1, f1
@@ -449,7 +449,7 @@ define float @fptrunc_f32_f64(double %f1) {
   ret float %res;
 }
 
-define <4 x float> @fptrunc_v4f32_v4f64(<4 x double> %vf1) {
+define <4 x float> @fptrunc_v4f32_v4f64(<4 x double> %vf1) strictfp {
 ; P8-LABEL: fptrunc_v4f32_v4f64:
 ; P8:       # %bb.0:
 ; P8-NEXT:    xxmrgld vs0, v2, v3
@@ -474,7 +474,7 @@ define <4 x float> @fptrunc_v4f32_v4f64(<4 x double> %vf1) {
   ret <4 x float> %res
 }
 
-define <2 x float> @fptrunc_v2f32_v2f64(<2 x double> %vf1) {
+define <2 x float> @fptrunc_v2f32_v2f64(<2 x double> %vf1) strictfp {
 ; P8-LABEL: fptrunc_v2f32_v2f64:
 ; P8:       # %bb.0:
 ; P8-NEXT:    xxswapd vs0, v2
@@ -501,7 +501,7 @@ define <2 x float> @fptrunc_v2f32_v2f64(<2 x double> %vf1) {
   ret <2 x float> %res
 }
 
-define float @round_f32(float %f1) {
+define float @round_f32(float %f1) strictfp {
 ; P8-LABEL: round_f32:
 ; P8:       # %bb.0:
 ; P8-NEXT:    xsrdpi f1, f1
@@ -517,7 +517,7 @@ define float @round_f32(float %f1) {
   ret float %res
 }
 
-define double @round_f64(double %f1) {
+define double @round_f64(double %f1) strictfp {
 ; P8-LABEL: round_f64:
 ; P8:       # %bb.0:
 ; P8-NEXT:    xsrdpi f1, f1
@@ -533,7 +533,7 @@ define double @round_f64(double %f1) {
   ret double %res
 }
 
-define <4 x float> @round_v4f32(<4 x float> %vf1) {
+define <4 x float> @round_v4f32(<4 x float> %vf1) strictfp {
 ; P8-LABEL: round_v4f32:
 ; P8:       # %bb.0:
 ; P8-NEXT:    xvrspi v2, v2
@@ -549,7 +549,7 @@ define <4 x float> @round_v4f32(<4 x float> %vf1) {
   ret <4 x float> %res
 }
 
-define <2 x double> @round_v2f64(<2 x double> %vf1) {
+define <2 x double> @round_v2f64(<2 x double> %vf1) strictfp {
 ; P8-LABEL: round_v2f64:
 ; P8:       # %bb.0:
 ; P8-NEXT:    xvrdpi v2, v2
@@ -565,7 +565,7 @@ define <2 x double> @round_v2f64(<2 x double> %vf1) {
   ret <2 x double> %res
 }
 
-define float @trunc_f32(float %f1) {
+define float @trunc_f32(float %f1) strictfp {
 ; P8-LABEL: trunc_f32:
 ; P8:       # %bb.0:
 ; P8-NEXT:    xsrdpiz f1, f1
@@ -581,7 +581,7 @@ define float @trunc_f32(float %f1) {
   ret float %res
 }
 
-define double @trunc_f64(double %f1) {
+define double @trunc_f64(double %f1) strictfp {
 ; P8-LABEL: trunc_f64:
 ; P8:       # %bb.0:
 ; P8-NEXT:    xsrdpiz f1, f1
@@ -597,7 +597,7 @@ define double @trunc_f64(double %f1) {
   ret double %res
 }
 
-define <4 x float> @trunc_v4f32(<4 x float> %vf1) {
+define <4 x float> @trunc_v4f32(<4 x float> %vf1) strictfp {
 ; P8-LABEL: trunc_v4f32:
 ; P8:       # %bb.0:
 ; P8-NEXT:    xvrspiz v2, v2
@@ -613,7 +613,7 @@ define <4 x float> @trunc_v4f32(<4 x float> %vf1) {
   ret <4 x float> %res
 }
 
-define <2 x double> @trunc_v2f64(<2 x double> %vf1) {
+define <2 x double> @trunc_v2f64(<2 x double> %vf1) strictfp {
 ; P8-LABEL: trunc_v2f64:
 ; P8:       # %bb.0:
 ; P8-NEXT:    xvrdpiz v2, v2

diff  --git a/llvm/test/CodeGen/PowerPC/is_fpclass.ll b/llvm/test/CodeGen/PowerPC/is_fpclass.ll
index baf69dd787b250..1ba4749d7437df 100644
--- a/llvm/test/CodeGen/PowerPC/is_fpclass.ll
+++ b/llvm/test/CodeGen/PowerPC/is_fpclass.ll
@@ -58,7 +58,7 @@ define i1 @isnan_float_strictfp(float %x) strictfp nounwind {
 ; CHECK-NEXT:    li 4, 1
 ; CHECK-NEXT:    iseleq 3, 4, 3
 ; CHECK-NEXT:    blr
-  %1 = call i1 @llvm.is.fpclass.f32(float %x, i32 3)  ; nan
+  %1 = call i1 @llvm.is.fpclass.f32(float %x, i32 3) strictfp ; nan
   ret i1 %1
 }
 
@@ -70,7 +70,7 @@ define i1 @isnan_double_strictfp(double %x) strictfp nounwind {
 ; CHECK-NEXT:    li 4, 1
 ; CHECK-NEXT:    iseleq 3, 4, 3
 ; CHECK-NEXT:    blr
-  %1 = call i1 @llvm.is.fpclass.f64(double %x, i32 3)  ; nan
+  %1 = call i1 @llvm.is.fpclass.f64(double %x, i32 3) strictfp ; nan
   ret i1 %1
 }
 
@@ -86,7 +86,7 @@ define i1 @isnan_ppc_fp128_strictfp(ppc_fp128 %x) strictfp nounwind {
 ; CHECK-NEXT:    li 4, 1
 ; CHECK-NEXT:    iselgt 3, 4, 3
 ; CHECK-NEXT:    blr
-  %1 = call i1 @llvm.is.fpclass.ppcf128(ppc_fp128 %x, i32 3)  ; nan
+  %1 = call i1 @llvm.is.fpclass.ppcf128(ppc_fp128 %x, i32 3) strictfp ; nan
   ret i1 %1
 }
 
@@ -98,7 +98,7 @@ define i1 @isnan_f128_strictfp(fp128 %x) strictfp nounwind {
 ; CHECK-NEXT:    li 4, 1
 ; CHECK-NEXT:    iseleq 3, 4, 3
 ; CHECK-NEXT:    blr
-  %1 = call i1 @llvm.is.fpclass.f128(fp128 %x, i32 3)  ; nan
+  %1 = call i1 @llvm.is.fpclass.f128(fp128 %x, i32 3) strictfp ; nan
   ret i1 %1
 }
 

diff  --git a/llvm/test/CodeGen/PowerPC/nofpexcept.ll b/llvm/test/CodeGen/PowerPC/nofpexcept.ll
index d2b2fef6dea12d..8b998242b28d45 100644
--- a/llvm/test/CodeGen/PowerPC/nofpexcept.ll
+++ b/llvm/test/CodeGen/PowerPC/nofpexcept.ll
@@ -69,7 +69,7 @@ entry:
 }
 
 ; Verify nofpexcept is set to constrained conversions when ignoring exceptions
-define void @fptoint_nofpexcept(ppc_fp128 %p, fp128 %m, ptr %addr1, ptr %addr2) {
+define void @fptoint_nofpexcept(ppc_fp128 %p, fp128 %m, ptr %addr1, ptr %addr2) #0 {
   ; CHECK-LABEL: name: fptoint_nofpexcept
   ; CHECK: bb.0.entry:
   ; CHECK-NEXT:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
@@ -151,18 +151,18 @@ define void @fptoint_nofpexcept(ppc_fp128 %p, fp128 %m, ptr %addr1, ptr %addr2)
   ; CHECK-NEXT:   STW killed [[XOR]], 0, [[COPY1]] :: (volatile store (s32) into %ir.addr1)
   ; CHECK-NEXT:   BLR8 implicit $lr8, implicit $rm
 entry:
-  %conv1 = tail call i32 @llvm.experimental.constrained.fptosi.i32.f128(fp128 %m, metadata !"fpexcept.ignore") #0
+  %conv1 = tail call i32 @llvm.experimental.constrained.fptosi.i32.f128(fp128 %m, metadata !"fpexcept.ignore")
   store volatile i32 %conv1, ptr %addr1, align 4
-  %conv2 = tail call i32 @llvm.experimental.constrained.fptoui.i32.f128(fp128 %m, metadata !"fpexcept.ignore") #0
+  %conv2 = tail call i32 @llvm.experimental.constrained.fptoui.i32.f128(fp128 %m, metadata !"fpexcept.ignore")
   store volatile i32 %conv2, ptr %addr1, align 4
-  %conv3 = tail call i64 @llvm.experimental.constrained.fptosi.i64.f128(fp128 %m, metadata !"fpexcept.ignore") #0
+  %conv3 = tail call i64 @llvm.experimental.constrained.fptosi.i64.f128(fp128 %m, metadata !"fpexcept.ignore")
   store volatile i64 %conv3, ptr %addr2, align 8
-  %conv4 = tail call i64 @llvm.experimental.constrained.fptoui.i64.f128(fp128 %m, metadata !"fpexcept.ignore") #0
+  %conv4 = tail call i64 @llvm.experimental.constrained.fptoui.i64.f128(fp128 %m, metadata !"fpexcept.ignore")
   store volatile i64 %conv4, ptr %addr2, align 8
 
-  %conv5 = tail call i32 @llvm.experimental.constrained.fptosi.i32.ppcf128(ppc_fp128 %p, metadata !"fpexcept.ignore") #0
+  %conv5 = tail call i32 @llvm.experimental.constrained.fptosi.i32.ppcf128(ppc_fp128 %p, metadata !"fpexcept.ignore")
   store volatile i32 %conv5, ptr %addr1, align 4
-  %conv6 = tail call i32 @llvm.experimental.constrained.fptoui.i32.ppcf128(ppc_fp128 %p, metadata !"fpexcept.ignore") #0
+  %conv6 = tail call i32 @llvm.experimental.constrained.fptoui.i32.ppcf128(ppc_fp128 %p, metadata !"fpexcept.ignore")
   store volatile i32 %conv6, ptr %addr1, align 4
   ret void
 }
@@ -182,7 +182,7 @@ define signext i32 @q_to_i32(fp128 %m) #0 {
   ; CHECK-NEXT:   $x3 = COPY [[EXTSW_32_64_]]
   ; CHECK-NEXT:   BLR8 implicit $lr8, implicit $rm, implicit $x3
 entry:
-  %conv = tail call i32 @llvm.experimental.constrained.fptosi.i32.f128(fp128 %m, metadata !"fpexcept.strict") #0
+  %conv = tail call i32 @llvm.experimental.constrained.fptosi.i32.f128(fp128 %m, metadata !"fpexcept.strict")
   ret i32 %conv
 }
 
@@ -197,7 +197,7 @@ define i64 @q_to_i64(fp128 %m) #0 {
   ; CHECK-NEXT:   $x3 = COPY [[MFVRD]]
   ; CHECK-NEXT:   BLR8 implicit $lr8, implicit $rm, implicit $x3
 entry:
-  %conv = tail call i64 @llvm.experimental.constrained.fptosi.i64.f128(fp128 %m, metadata !"fpexcept.strict") #0
+  %conv = tail call i64 @llvm.experimental.constrained.fptosi.i64.f128(fp128 %m, metadata !"fpexcept.strict")
   ret i64 %conv
 }
 
@@ -212,7 +212,7 @@ define i64 @q_to_u64(fp128 %m) #0 {
   ; CHECK-NEXT:   $x3 = COPY [[MFVRD]]
   ; CHECK-NEXT:   BLR8 implicit $lr8, implicit $rm, implicit $x3
 entry:
-  %conv = tail call i64 @llvm.experimental.constrained.fptoui.i64.f128(fp128 %m, metadata !"fpexcept.strict") #0
+  %conv = tail call i64 @llvm.experimental.constrained.fptoui.i64.f128(fp128 %m, metadata !"fpexcept.strict")
   ret i64 %conv
 }
 
@@ -232,7 +232,7 @@ define zeroext i32 @q_to_u32(fp128 %m) #0 {
   ; CHECK-NEXT:   $x3 = COPY [[RLDICL]]
   ; CHECK-NEXT:   BLR8 implicit $lr8, implicit $rm, implicit $x3
 entry:
-  %conv = tail call i32 @llvm.experimental.constrained.fptoui.i32.f128(fp128 %m, metadata !"fpexcept.strict") #0
+  %conv = tail call i32 @llvm.experimental.constrained.fptoui.i32.f128(fp128 %m, metadata !"fpexcept.strict")
   ret i32 %conv
 }
 


        


More information about the llvm-commits mailing list