[llvm] [Test] Fix usage of constrained intrinsics (PR #113523)

via llvm-commits llvm-commits at lists.llvm.org
Wed Oct 23 21:50:20 PDT 2024


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-aarch64

Author: Serge Pavlov (spavloff)

<details>
<summary>Changes</summary>

Some tests contain errors in constrained intrinsic usage, such as missed or extra type parameters, wrong type parameters order and some other.

---

Patch is 71.26 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/113523.diff


17 Files Affected:

- (modified) llvm/test/CodeGen/AArch64/cvt-fp-int-fp.ll (+16-16) 
- (modified) llvm/test/CodeGen/AArch64/fp-intrinsics-fp16.ll (+8-8) 
- (modified) llvm/test/CodeGen/AArch64/fp-intrinsics-vector.ll (+3-3) 
- (modified) llvm/test/CodeGen/AArch64/fp-intrinsics.ll (+24-24) 
- (modified) llvm/test/CodeGen/ARM/fp-intrinsics.ll (+28-28) 
- (modified) llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll (+14-14) 
- (modified) llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll (+2-2) 
- (modified) llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll (+2-2) 
- (modified) llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/bfloat-constrained.ll (+9-9) 
- (modified) llvm/test/CodeGen/X86/float-strict-powi-convert.ll (+2-2) 
- (modified) llvm/test/Transforms/EarlyCSE/defaultfp-strictfp.ll (+10-10) 
- (modified) llvm/test/Transforms/EarlyCSE/ebstrict-strictfp.ll (+6-6) 
- (modified) llvm/test/Transforms/EarlyCSE/mixed-strictfp.ll (+10-10) 
- (modified) llvm/test/Transforms/EarlyCSE/nonmixed-strictfp.ll (+10-10) 
- (modified) llvm/test/Transforms/SCCP/strictfp-phis-fcmp.ll (+13-13) 
- (modified) llvm/test/Transforms/SCCP/strictfp-phis-fcmps.ll (+13-13) 


``````````diff
diff --git a/llvm/test/CodeGen/AArch64/cvt-fp-int-fp.ll b/llvm/test/CodeGen/AArch64/cvt-fp-int-fp.ll
index 049098ab2ae97d..40684b0f3a256b 100644
--- a/llvm/test/CodeGen/AArch64/cvt-fp-int-fp.ll
+++ b/llvm/test/CodeGen/AArch64/cvt-fp-int-fp.ll
@@ -131,7 +131,7 @@ define double @t1_strict(double %x) #0 {
 ; CHECK-NEXT:    ret
 entry:
   %conv = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %x, metadata !"fpexcept.strict") #0
-  %conv1 = call double @llvm.experimental.constrained.sitofp.i64.f64(i64 %conv, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  %conv1 = call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %conv, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret double %conv1
 }
 
@@ -143,7 +143,7 @@ define float @t2_strict(float %x) #0 {
 ; CHECK-NEXT:    ret
 entry:
   %conv = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %x, metadata !"fpexcept.strict") #0
-  %conv1 = call float @llvm.experimental.constrained.sitofp.i32.f32(i32 %conv, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  %conv1 = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %conv, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret float %conv1
 }
 
@@ -155,7 +155,7 @@ define half @t3_strict(half %x) #0 {
 ; CHECK-NEXT:    ret
 entry:
   %conv = call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %x, metadata !"fpexcept.strict") #0
-  %conv1 = call half @llvm.experimental.constrained.sitofp.i32.f16(i32 %conv, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  %conv1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %conv, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret half %conv1
 }
 
@@ -167,7 +167,7 @@ define double @t4_strict(double %x) #0 {
 ; CHECK-NEXT:    ret
 entry:
   %conv = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %x, metadata !"fpexcept.strict") #0
-  %conv1 = call double @llvm.experimental.constrained.uitofp.i64.f64(i64 %conv, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  %conv1 = call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %conv, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret double %conv1
 }
 
@@ -179,7 +179,7 @@ define float @t5_strict(float %x) #0 {
 ; CHECK-NEXT:    ret
 entry:
   %conv = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %x, metadata !"fpexcept.strict") #0
-  %conv1 = call float @llvm.experimental.constrained.uitofp.i32.f32(i32 %conv, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  %conv1 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %conv, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret float %conv1
 }
 
@@ -191,7 +191,7 @@ define half @t6_strict(half %x) #0 {
 ; CHECK-NEXT:    ret
 entry:
   %conv = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %x, metadata !"fpexcept.strict") #0
-  %conv1 = call half @llvm.experimental.constrained.uitofp.i32.f16(i32 %conv, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  %conv1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %conv, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret half %conv1
 }
 
@@ -216,7 +216,7 @@ define bfloat @t7_strict(bfloat %x) #0 {
 ; CHECK-NEXT:    ret
 entry:
   %conv = call i32 @llvm.experimental.constrained.fptosi.i32.bf16(bfloat %x, metadata !"fpexcept.strict") #0
-  %conv1 = call bfloat @llvm.experimental.constrained.sitofp.i32.bf16(i32 %conv, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  %conv1 = call bfloat @llvm.experimental.constrained.sitofp.bf16.i32(i32 %conv, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret bfloat %conv1
 }
 
@@ -241,7 +241,7 @@ define bfloat @t8_strict(bfloat %x) #0 {
 ; CHECK-NEXT:    ret
 entry:
   %conv = call i32 @llvm.experimental.constrained.fptoui.i32.bf16(bfloat %x, metadata !"fpexcept.strict") #0
-  %conv1 = call bfloat @llvm.experimental.constrained.uitofp.i32.bf16(i32 %conv, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  %conv1 = call bfloat @llvm.experimental.constrained.uitofp.bf16.i32(i32 %conv, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret bfloat %conv1
 }
 
@@ -255,11 +255,11 @@ declare i32 @llvm.experimental.constrained.fptosi.i32.f32(float, metadata)
 declare i32 @llvm.experimental.constrained.fptoui.i32.f32(float, metadata)
 declare i64 @llvm.experimental.constrained.fptosi.i64.f64(double, metadata)
 declare i64 @llvm.experimental.constrained.fptoui.i64.f64(double, metadata)
-declare bfloat @llvm.experimental.constrained.sitofp.i32.bf16(i32, metadata, metadata)
-declare bfloat @llvm.experimental.constrained.uitofp.i32.bf16(i32, metadata, metadata)
-declare half @llvm.experimental.constrained.sitofp.i32.f16(i32, metadata, metadata)
-declare half @llvm.experimental.constrained.uitofp.i32.f16(i32, metadata, metadata)
-declare float @llvm.experimental.constrained.sitofp.i32.f32(i32, metadata, metadata)
-declare float @llvm.experimental.constrained.uitofp.i32.f32(i32, metadata, metadata)
-declare double @llvm.experimental.constrained.sitofp.i64.f64(i64, metadata, metadata)
-declare double @llvm.experimental.constrained.uitofp.i64.f64(i64, metadata, metadata)
+declare bfloat @llvm.experimental.constrained.sitofp.bf16.i32(i32, metadata, metadata)
+declare bfloat @llvm.experimental.constrained.uitofp.bf16.i32(i32, metadata, metadata)
+declare half @llvm.experimental.constrained.sitofp.f16.i32(i32, metadata, metadata)
+declare half @llvm.experimental.constrained.uitofp.f16.i32(i32, metadata, metadata)
+declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata)
+declare float @llvm.experimental.constrained.uitofp.f32.i32(i32, metadata, metadata)
+declare double @llvm.experimental.constrained.sitofp.f64.i64(i64, metadata, metadata)
+declare double @llvm.experimental.constrained.uitofp.f64.i64(i64, metadata, metadata)
diff --git a/llvm/test/CodeGen/AArch64/fp-intrinsics-fp16.ll b/llvm/test/CodeGen/AArch64/fp-intrinsics-fp16.ll
index cbdfb4c9327756..db815a9050bd03 100644
--- a/llvm/test/CodeGen/AArch64/fp-intrinsics-fp16.ll
+++ b/llvm/test/CodeGen/AArch64/fp-intrinsics-fp16.ll
@@ -579,7 +579,7 @@ define i32 @lrint_f16(half %x) #0 {
 ; CHECK-FP16-NEXT:    frintx h0, h0
 ; CHECK-FP16-NEXT:    fcvtzs w0, h0
 ; CHECK-FP16-NEXT:    ret
-  %val = call i32 @llvm.experimental.constrained.lrint.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  %val = call i32 @llvm.experimental.constrained.lrint.i32.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret i32 %val
 }
 
@@ -596,7 +596,7 @@ define i64 @llrint_f16(half %x) #0 {
 ; CHECK-FP16-NEXT:    frintx h0, h0
 ; CHECK-FP16-NEXT:    fcvtzs x0, h0
 ; CHECK-FP16-NEXT:    ret
-  %val = call i64 @llvm.experimental.constrained.llrint.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  %val = call i64 @llvm.experimental.constrained.llrint.i64.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret i64 %val
 }
 
@@ -677,7 +677,7 @@ define i32 @lround_f16(half %x) #0 {
 ; CHECK-FP16:       // %bb.0:
 ; CHECK-FP16-NEXT:    fcvtas w0, h0
 ; CHECK-FP16-NEXT:    ret
-  %val = call i32 @llvm.experimental.constrained.lround.f16(half %x, metadata !"fpexcept.strict") #0
+  %val = call i32 @llvm.experimental.constrained.lround.i32.f16(half %x, metadata !"fpexcept.strict") #0
   ret i32 %val
 }
 
@@ -692,7 +692,7 @@ define i64 @llround_f16(half %x) #0 {
 ; CHECK-FP16:       // %bb.0:
 ; CHECK-FP16-NEXT:    fcvtas x0, h0
 ; CHECK-FP16-NEXT:    ret
-  %val = call i64 @llvm.experimental.constrained.llround.f16(half %x, metadata !"fpexcept.strict") #0
+  %val = call i64 @llvm.experimental.constrained.llround.i64.f16(half %x, metadata !"fpexcept.strict") #0
   ret i64 %val
 }
 
@@ -1261,14 +1261,14 @@ declare half @llvm.experimental.constrained.exp.f16(half, metadata, metadata)
 declare half @llvm.experimental.constrained.exp2.f16(half, metadata, metadata)
 declare half @llvm.experimental.constrained.rint.f16(half, metadata, metadata)
 declare half @llvm.experimental.constrained.nearbyint.f16(half, metadata, metadata)
-declare i32 @llvm.experimental.constrained.lrint.f16(half, metadata, metadata)
-declare i64 @llvm.experimental.constrained.llrint.f16(half, metadata, metadata)
+declare i32 @llvm.experimental.constrained.lrint.i32.f16(half, metadata, metadata)
+declare i64 @llvm.experimental.constrained.llrint.i64.f16(half, metadata, metadata)
 declare half @llvm.experimental.constrained.maxnum.f16(half, half, metadata)
 declare half @llvm.experimental.constrained.minnum.f16(half, half, metadata)
 declare half @llvm.experimental.constrained.ceil.f16(half, metadata)
 declare half @llvm.experimental.constrained.floor.f16(half, metadata)
-declare i32 @llvm.experimental.constrained.lround.f16(half, metadata)
-declare i64 @llvm.experimental.constrained.llround.f16(half, metadata)
+declare i32 @llvm.experimental.constrained.lround.i32.f16(half, metadata)
+declare i64 @llvm.experimental.constrained.llround.i64.f16(half, metadata)
 declare half @llvm.experimental.constrained.round.f16(half, metadata)
 declare half @llvm.experimental.constrained.roundeven.f16(half, metadata)
 declare half @llvm.experimental.constrained.trunc.f16(half, metadata)
diff --git a/llvm/test/CodeGen/AArch64/fp-intrinsics-vector.ll b/llvm/test/CodeGen/AArch64/fp-intrinsics-vector.ll
index 1a9ba9fd4a5180..6147afba4e603a 100644
--- a/llvm/test/CodeGen/AArch64/fp-intrinsics-vector.ll
+++ b/llvm/test/CodeGen/AArch64/fp-intrinsics-vector.ll
@@ -279,7 +279,7 @@ define <4 x i1> @fcmps_v4f32(<4 x float> %x, <4 x float> %y) #0 {
 ; CHECK-NEXT:    xtn v0.4h, v4.4s
 ; CHECK-NEXT:    ret
 entry:
-  %val = call <4 x i1> @llvm.experimental.constrained.fcmps.v4f64(<4 x float> %x, <4 x float> %y, metadata !"oeq", metadata !"fpexcept.strict")
+  %val = call <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float> %x, <4 x float> %y, metadata !"oeq", metadata !"fpexcept.strict")
   ret <4 x i1> %val
 }
 
@@ -825,8 +825,8 @@ declare <4 x float> @llvm.experimental.constrained.floor.v4f32(<4 x float>, meta
 declare <4 x float> @llvm.experimental.constrained.round.v4f32(<4 x float>, metadata)
 declare <4 x float> @llvm.experimental.constrained.roundeven.v4f32(<4 x float>, metadata)
 declare <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float>, metadata)
-declare <4 x i1> @llvm.experimental.constrained.fcmp.v4f64(<4 x float>, <4 x float>, metadata, metadata)
-declare <4 x i1> @llvm.experimental.constrained.fcmps.v4f64(<4 x float>, <4 x float>, metadata, metadata)
+declare <4 x i1> @llvm.experimental.constrained.fcmp.v4f32(<4 x float>, <4 x float>, metadata, metadata)
+declare <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float>, <4 x float>, metadata, metadata)
 
 declare <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double>, <2 x double>, metadata, metadata)
 declare <2 x double> @llvm.experimental.constrained.fsub.v2f64(<2 x double>, <2 x double>, metadata, metadata)
diff --git a/llvm/test/CodeGen/AArch64/fp-intrinsics.ll b/llvm/test/CodeGen/AArch64/fp-intrinsics.ll
index 62b4a79b26d8e7..a43e18b9d72e13 100644
--- a/llvm/test/CodeGen/AArch64/fp-intrinsics.ll
+++ b/llvm/test/CodeGen/AArch64/fp-intrinsics.ll
@@ -255,7 +255,7 @@ define float @nearbyint_f32(float %x) #0 {
 ; CHECK: frintx [[REG:s[0-9]+]], s0
 ; CHECK: fcvtzs w0, [[REG]]
 define i32 @lrint_f32(float %x) #0 {
-  %val = call i32 @llvm.experimental.constrained.lrint.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  %val = call i32 @llvm.experimental.constrained.lrint.i32.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret i32 %val
 }
 
@@ -263,7 +263,7 @@ define i32 @lrint_f32(float %x) #0 {
 ; CHECK: frintx [[REG:s[0-9]+]], s0
 ; CHECK: fcvtzs x0, [[REG]]
 define i64 @llrint_f32(float %x) #0 {
-  %val = call i64 @llvm.experimental.constrained.llrint.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  %val = call i64 @llvm.experimental.constrained.llrint.i64.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret i64 %val
 }
 
@@ -312,14 +312,14 @@ define float @floor_f32(float %x) #0 {
 ; CHECK-LABEL: lround_f32:
 ; CHECK: fcvtas w0, s0
 define i32 @lround_f32(float %x) #0 {
-  %val = call i32 @llvm.experimental.constrained.lround.f32(float %x, metadata !"fpexcept.strict") #0
+  %val = call i32 @llvm.experimental.constrained.lround.i32.f32(float %x, metadata !"fpexcept.strict") #0
   ret i32 %val
 }
 
 ; CHECK-LABEL: llround_f32:
 ; CHECK: fcvtas x0, s0
 define i64 @llround_f32(float %x) #0 {
-  %val = call i64 @llvm.experimental.constrained.llround.f32(float %x, metadata !"fpexcept.strict") #0
+  %val = call i64 @llvm.experimental.constrained.llround.i64.f32(float %x, metadata !"fpexcept.strict") #0
   ret i64 %val
 }
 
@@ -788,7 +788,7 @@ define double @nearbyint_f64(double %x) #0 {
 ; CHECK: frintx [[REG:d[0-9]+]], d0
 ; CHECK: fcvtzs w0, [[REG]]
 define i32 @lrint_f64(double %x) #0 {
-  %val = call i32 @llvm.experimental.constrained.lrint.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  %val = call i32 @llvm.experimental.constrained.lrint.i32.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret i32 %val
 }
 
@@ -796,7 +796,7 @@ define i32 @lrint_f64(double %x) #0 {
 ; CHECK: frintx [[REG:d[0-9]+]], d0
 ; CHECK: fcvtzs x0, [[REG]]
 define i64 @llrint_f64(double %x) #0 {
-  %val = call i64 @llvm.experimental.constrained.llrint.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  %val = call i64 @llvm.experimental.constrained.llrint.i64.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret i64 %val
 }
 
@@ -845,14 +845,14 @@ define double @floor_f64(double %x) #0 {
 ; CHECK-LABEL: lround_f64:
 ; CHECK: fcvtas w0, d0
 define i32 @lround_f64(double %x) #0 {
-  %val = call i32 @llvm.experimental.constrained.lround.f64(double %x, metadata !"fpexcept.strict") #0
+  %val = call i32 @llvm.experimental.constrained.lround.i32.f64(double %x, metadata !"fpexcept.strict") #0
   ret i32 %val
 }
 
 ; CHECK-LABEL: llround_f64:
 ; CHECK: fcvtas x0, d0
 define i64 @llround_f64(double %x) #0 {
-  %val = call i64 @llvm.experimental.constrained.llround.f64(double %x, metadata !"fpexcept.strict") #0
+  %val = call i64 @llvm.experimental.constrained.llround.i64.f64(double %x, metadata !"fpexcept.strict") #0
   ret i64 %val
 }
 
@@ -1320,14 +1320,14 @@ define fp128 @nearbyint_f128(fp128 %x) #0 {
 ; CHECK-LABEL: lrint_f128:
 ; CHECK: bl lrintl
 define i32 @lrint_f128(fp128 %x) #0 {
-  %val = call i32 @llvm.experimental.constrained.lrint.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  %val = call i32 @llvm.experimental.constrained.lrint.i32.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret i32 %val
 }
 
 ; CHECK-LABEL: llrint_f128:
 ; CHECK: bl llrintl
 define i64 @llrint_f128(fp128 %x) #0 {
-  %val = call i64 @llvm.experimental.constrained.llrint.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+  %val = call i64 @llvm.experimental.constrained.llrint.i64.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret i64 %val
 }
 
@@ -1362,14 +1362,14 @@ define fp128 @floor_f128(fp128 %x) #0 {
 ; CHECK-LABEL: lround_f128:
 ; CHECK: bl lroundl
 define i32 @lround_f128(fp128 %x) #0 {
-  %val = call i32 @llvm.experimental.constrained.lround.f128(fp128 %x, metadata !"fpexcept.strict") #0
+  %val = call i32 @llvm.experimental.constrained.lround.i32.f128(fp128 %x, metadata !"fpexcept.strict") #0
   ret i32 %val
 }
 
 ; CHECK-LABEL: llround_f128:
 ; CHECK: bl llroundl
 define i64 @llround_f128(fp128 %x) #0 {
-  %val = call i64 @llvm.experimental.constrained.llround.f128(fp128 %x, metadata !"fpexcept.strict") #0
+  %val = call i64 @llvm.experimental.constrained.llround.i64.f128(fp128 %x, metadata !"fpexcept.strict") #0
   ret i64 %val
 }
 
@@ -1766,16 +1766,16 @@ declare float @llvm.experimental.constrained.exp.f32(float, metadata, metadata)
 declare float @llvm.experimental.constrained.exp2.f32(float, metadata, metadata)
 declare float @llvm.experimental.constrained.rint.f32(float, metadata, metadata)
 declare float @llvm.experimental.constrained.nearbyint.f32(float, metadata, metadata)
-declare i32 @llvm.experimental.constrained.lrint.f32(float, metadata, metadata)
-declare i64 @llvm.experimental.constrained.llrint.f32(float, metadata, metadata)
+declare i32 @llvm.experimental.constrained.lrint.i32.f32(float, metadata, metadata)
+declare i64 @llvm.experimental.constrained.llrint.i64.f32(float, metadata, metadata)
 declare float @llvm.experimental.constrained.maxnum.f32(float, float, metadata)
 declare float @llvm.experimental.constrained.minnum.f32(float, float, metadata)
 declare float @llvm.experimental.constrained.maximum.f32(float, float, metadata)
 declare float @llvm.experimental.constrained.minimum.f32(float, float, metadata)
 declare float @llvm.experimental.constrained.ceil.f32(float, metadata)
 declare float @llvm.experimental.constrained.floor.f32(float, metadata)
-declare i32 @llvm.experimental.constrained.lround.f32(float, metadata)
-declare i64 @llvm.experimental.constrained.llround.f32(float, metadata)
+declare i32 @llvm.experimental.constrained.i32.f32(float, metadata)
+declare i64 @llvm.experimental.constrained.llround.i64.f32(float, metadata)
 declare float @llvm.experimental.constrained.round.f32(float, metadata)
 declare float @llvm.experimental.constrained.roundeven.f32(float, metadata)
 declare float @llvm.experimental.constrained.trunc.f32(float, metadata)
@@ -1817,16 +1817,16 @@ declare double @llvm.experimental.constrained.exp.f64(double, metadata, metadata
 declare double @llvm.experimental.constrained.exp2.f64(double, metadata, metadata)
 declare double @llvm.experimental.constrained.rint.f64(double, metadata, metadata)
 declare double @llvm.experimental.constrained.nearbyint.f64(double, metadata, metadata)
-declare i32 @llvm.experimental.constrained.lrint.f64(double, metadata, metadata)
-declare i64 @llvm.experimental.constrained.llrint.f64(double, metadata, metadata)
+declare i32 @llvm.experimental.constrained.lrint.i32.f64(double, metadata, metadata)
+declare i64 @llvm.experimental.constrained.llrint.i64.f64(double, metadata, metadata)
 declare double @llvm.experimental.constrained.maxnum.f64(double, double, metadata)
 declare double @llvm.experimental.constrained.minnum.f64(double, double, metadata)
 declare double @llvm.experimental.constrained.maximum.f64(double, double, metadata)
 declare double @llvm.experimental.constrained.minimum.f64(double, double, metadata)
 declare double @llvm.experimental.constrained.ceil.f64(double, metadata)
 declare double @llvm.experimental.constrained.floor.f64(double, metadata)
-declare i32 @llvm.experimental.constrained.lround.f64(double, metadata)
-declare i64 @llvm.experimental.constrained.llround.f64(double, metadata)
+declare i32 @llvm.experimental.constrained.lround.i32.f64(double, metadata)
+declare i64 @llvm.experimental.constrained.llround.i64.f64(double, metadata)
 declare double @llvm.experimental.constrained.round.f64(double, metadata)
 declare double @llvm.experimental.constrained.roundeven.f64(double, metadata)
 declare double @llvm.experimental.constrained.trunc.f64(double, metadata)
@@ -1868,14 +1868,14 @@ declare fp128 @llvm.experimental.constrained.exp.f128(fp128, metadata, metadata)
 declare fp128 @llvm.experimental.constrained.exp2.f128(fp128, metadata, metadata)
 declare fp128 @llvm.experimental.constrained.rint.f128(fp128, metadata, metadata)
 declare fp128 @llvm.experimental.constrained.nearbyint.f128(fp128, metadata, metadata)
-declare i32 @llvm.experimental.constrained.lrint.f128(fp128, metadata, metadata)
-declare i64 @llvm.experimental.constrained.llrint.f128(fp128, metadata, metadata)
+declare i32 @llvm.experimental.constrained.lrint.i32.f128(fp128, metadata, metadata)
+declare i64 @llvm.experimental.constrained.llrint.i64.f128(fp128, metadata, metadata)
 declare fp128 @llvm.experimental.constrained.maxnum.f128(fp128, fp128, metadata)
 declare fp128 @llvm.experimental.constrained.minnum.f128(fp128, fp128, metadata)
 declare fp128 @llvm.experimental.const...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/113523


More information about the llvm-commits mailing list