[llvm] [Test] Fix usage of constrained intrinsics (PR #113523)
Serge Pavlov via llvm-commits
llvm-commits at lists.llvm.org
Wed Oct 23 23:03:29 PDT 2024
https://github.com/spavloff updated https://github.com/llvm/llvm-project/pull/113523
>From 8532bc0d8e38c28c448ef6d5920f5ed43c483df7 Mon Sep 17 00:00:00 2001
From: Serge Pavlov <sepavloff at gmail.com>
Date: Thu, 24 Oct 2024 01:07:58 +0700
Subject: [PATCH 1/2] [Test] Fix usage of constrained intrinsics
Some tests contain errors in constrained intrinsic usage, such as missed
or extra type parameters, wrong type parameters order and some other.
---
llvm/test/CodeGen/AArch64/cvt-fp-int-fp.ll | 32 +++++------
.../CodeGen/AArch64/fp-intrinsics-fp16.ll | 16 +++---
.../CodeGen/AArch64/fp-intrinsics-vector.ll | 6 +-
llvm/test/CodeGen/AArch64/fp-intrinsics.ll | 48 ++++++++--------
llvm/test/CodeGen/ARM/fp-intrinsics.ll | 56 +++++++++----------
.../ppcf128-constrained-fp-intrinsics.ll | 28 +++++-----
.../CodeGen/RISCV/double-intrinsics-strict.ll | 4 +-
.../CodeGen/RISCV/float-intrinsics-strict.ll | 4 +-
.../RISCV/rvv/rvv-peephole-vmerge-vops.ll | 6 +-
llvm/test/CodeGen/X86/bfloat-constrained.ll | 18 +++---
.../CodeGen/X86/float-strict-powi-convert.ll | 4 +-
.../Transforms/EarlyCSE/defaultfp-strictfp.ll | 20 +++----
.../Transforms/EarlyCSE/ebstrict-strictfp.ll | 12 ++--
.../Transforms/EarlyCSE/mixed-strictfp.ll | 20 +++----
.../Transforms/EarlyCSE/nonmixed-strictfp.ll | 20 +++----
.../Transforms/SCCP/strictfp-phis-fcmp.ll | 26 ++++-----
.../Transforms/SCCP/strictfp-phis-fcmps.ll | 26 ++++-----
17 files changed, 173 insertions(+), 173 deletions(-)
diff --git a/llvm/test/CodeGen/AArch64/cvt-fp-int-fp.ll b/llvm/test/CodeGen/AArch64/cvt-fp-int-fp.ll
index 049098ab2ae97d..40684b0f3a256b 100644
--- a/llvm/test/CodeGen/AArch64/cvt-fp-int-fp.ll
+++ b/llvm/test/CodeGen/AArch64/cvt-fp-int-fp.ll
@@ -131,7 +131,7 @@ define double @t1_strict(double %x) #0 {
; CHECK-NEXT: ret
entry:
%conv = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %x, metadata !"fpexcept.strict") #0
- %conv1 = call double @llvm.experimental.constrained.sitofp.i64.f64(i64 %conv, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ %conv1 = call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %conv, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret double %conv1
}
@@ -143,7 +143,7 @@ define float @t2_strict(float %x) #0 {
; CHECK-NEXT: ret
entry:
%conv = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %x, metadata !"fpexcept.strict") #0
- %conv1 = call float @llvm.experimental.constrained.sitofp.i32.f32(i32 %conv, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ %conv1 = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %conv, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret float %conv1
}
@@ -155,7 +155,7 @@ define half @t3_strict(half %x) #0 {
; CHECK-NEXT: ret
entry:
%conv = call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %x, metadata !"fpexcept.strict") #0
- %conv1 = call half @llvm.experimental.constrained.sitofp.i32.f16(i32 %conv, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ %conv1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %conv, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret half %conv1
}
@@ -167,7 +167,7 @@ define double @t4_strict(double %x) #0 {
; CHECK-NEXT: ret
entry:
%conv = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %x, metadata !"fpexcept.strict") #0
- %conv1 = call double @llvm.experimental.constrained.uitofp.i64.f64(i64 %conv, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ %conv1 = call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %conv, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret double %conv1
}
@@ -179,7 +179,7 @@ define float @t5_strict(float %x) #0 {
; CHECK-NEXT: ret
entry:
%conv = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %x, metadata !"fpexcept.strict") #0
- %conv1 = call float @llvm.experimental.constrained.uitofp.i32.f32(i32 %conv, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ %conv1 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %conv, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret float %conv1
}
@@ -191,7 +191,7 @@ define half @t6_strict(half %x) #0 {
; CHECK-NEXT: ret
entry:
%conv = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %x, metadata !"fpexcept.strict") #0
- %conv1 = call half @llvm.experimental.constrained.uitofp.i32.f16(i32 %conv, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ %conv1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %conv, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret half %conv1
}
@@ -216,7 +216,7 @@ define bfloat @t7_strict(bfloat %x) #0 {
; CHECK-NEXT: ret
entry:
%conv = call i32 @llvm.experimental.constrained.fptosi.i32.bf16(bfloat %x, metadata !"fpexcept.strict") #0
- %conv1 = call bfloat @llvm.experimental.constrained.sitofp.i32.bf16(i32 %conv, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ %conv1 = call bfloat @llvm.experimental.constrained.sitofp.bf16.i32(i32 %conv, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret bfloat %conv1
}
@@ -241,7 +241,7 @@ define bfloat @t8_strict(bfloat %x) #0 {
; CHECK-NEXT: ret
entry:
%conv = call i32 @llvm.experimental.constrained.fptoui.i32.bf16(bfloat %x, metadata !"fpexcept.strict") #0
- %conv1 = call bfloat @llvm.experimental.constrained.uitofp.i32.bf16(i32 %conv, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ %conv1 = call bfloat @llvm.experimental.constrained.uitofp.bf16.i32(i32 %conv, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret bfloat %conv1
}
@@ -255,11 +255,11 @@ declare i32 @llvm.experimental.constrained.fptosi.i32.f32(float, metadata)
declare i32 @llvm.experimental.constrained.fptoui.i32.f32(float, metadata)
declare i64 @llvm.experimental.constrained.fptosi.i64.f64(double, metadata)
declare i64 @llvm.experimental.constrained.fptoui.i64.f64(double, metadata)
-declare bfloat @llvm.experimental.constrained.sitofp.i32.bf16(i32, metadata, metadata)
-declare bfloat @llvm.experimental.constrained.uitofp.i32.bf16(i32, metadata, metadata)
-declare half @llvm.experimental.constrained.sitofp.i32.f16(i32, metadata, metadata)
-declare half @llvm.experimental.constrained.uitofp.i32.f16(i32, metadata, metadata)
-declare float @llvm.experimental.constrained.sitofp.i32.f32(i32, metadata, metadata)
-declare float @llvm.experimental.constrained.uitofp.i32.f32(i32, metadata, metadata)
-declare double @llvm.experimental.constrained.sitofp.i64.f64(i64, metadata, metadata)
-declare double @llvm.experimental.constrained.uitofp.i64.f64(i64, metadata, metadata)
+declare bfloat @llvm.experimental.constrained.sitofp.bf16.i32(i32, metadata, metadata)
+declare bfloat @llvm.experimental.constrained.uitofp.bf16.i32(i32, metadata, metadata)
+declare half @llvm.experimental.constrained.sitofp.f16.i32(i32, metadata, metadata)
+declare half @llvm.experimental.constrained.uitofp.f16.i32(i32, metadata, metadata)
+declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata)
+declare float @llvm.experimental.constrained.uitofp.f32.i32(i32, metadata, metadata)
+declare double @llvm.experimental.constrained.sitofp.f64.i64(i64, metadata, metadata)
+declare double @llvm.experimental.constrained.uitofp.f64.i64(i64, metadata, metadata)
diff --git a/llvm/test/CodeGen/AArch64/fp-intrinsics-fp16.ll b/llvm/test/CodeGen/AArch64/fp-intrinsics-fp16.ll
index cbdfb4c9327756..db815a9050bd03 100644
--- a/llvm/test/CodeGen/AArch64/fp-intrinsics-fp16.ll
+++ b/llvm/test/CodeGen/AArch64/fp-intrinsics-fp16.ll
@@ -579,7 +579,7 @@ define i32 @lrint_f16(half %x) #0 {
; CHECK-FP16-NEXT: frintx h0, h0
; CHECK-FP16-NEXT: fcvtzs w0, h0
; CHECK-FP16-NEXT: ret
- %val = call i32 @llvm.experimental.constrained.lrint.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ %val = call i32 @llvm.experimental.constrained.lrint.i32.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret i32 %val
}
@@ -596,7 +596,7 @@ define i64 @llrint_f16(half %x) #0 {
; CHECK-FP16-NEXT: frintx h0, h0
; CHECK-FP16-NEXT: fcvtzs x0, h0
; CHECK-FP16-NEXT: ret
- %val = call i64 @llvm.experimental.constrained.llrint.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ %val = call i64 @llvm.experimental.constrained.llrint.i64.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret i64 %val
}
@@ -677,7 +677,7 @@ define i32 @lround_f16(half %x) #0 {
; CHECK-FP16: // %bb.0:
; CHECK-FP16-NEXT: fcvtas w0, h0
; CHECK-FP16-NEXT: ret
- %val = call i32 @llvm.experimental.constrained.lround.f16(half %x, metadata !"fpexcept.strict") #0
+ %val = call i32 @llvm.experimental.constrained.lround.i32.f16(half %x, metadata !"fpexcept.strict") #0
ret i32 %val
}
@@ -692,7 +692,7 @@ define i64 @llround_f16(half %x) #0 {
; CHECK-FP16: // %bb.0:
; CHECK-FP16-NEXT: fcvtas x0, h0
; CHECK-FP16-NEXT: ret
- %val = call i64 @llvm.experimental.constrained.llround.f16(half %x, metadata !"fpexcept.strict") #0
+ %val = call i64 @llvm.experimental.constrained.llround.i64.f16(half %x, metadata !"fpexcept.strict") #0
ret i64 %val
}
@@ -1261,14 +1261,14 @@ declare half @llvm.experimental.constrained.exp.f16(half, metadata, metadata)
declare half @llvm.experimental.constrained.exp2.f16(half, metadata, metadata)
declare half @llvm.experimental.constrained.rint.f16(half, metadata, metadata)
declare half @llvm.experimental.constrained.nearbyint.f16(half, metadata, metadata)
-declare i32 @llvm.experimental.constrained.lrint.f16(half, metadata, metadata)
-declare i64 @llvm.experimental.constrained.llrint.f16(half, metadata, metadata)
+declare i32 @llvm.experimental.constrained.lrint.i32.f16(half, metadata, metadata)
+declare i64 @llvm.experimental.constrained.llrint.i64.f16(half, metadata, metadata)
declare half @llvm.experimental.constrained.maxnum.f16(half, half, metadata)
declare half @llvm.experimental.constrained.minnum.f16(half, half, metadata)
declare half @llvm.experimental.constrained.ceil.f16(half, metadata)
declare half @llvm.experimental.constrained.floor.f16(half, metadata)
-declare i32 @llvm.experimental.constrained.lround.f16(half, metadata)
-declare i64 @llvm.experimental.constrained.llround.f16(half, metadata)
+declare i32 @llvm.experimental.constrained.lround.i32.f16(half, metadata)
+declare i64 @llvm.experimental.constrained.llround.i64.f16(half, metadata)
declare half @llvm.experimental.constrained.round.f16(half, metadata)
declare half @llvm.experimental.constrained.roundeven.f16(half, metadata)
declare half @llvm.experimental.constrained.trunc.f16(half, metadata)
diff --git a/llvm/test/CodeGen/AArch64/fp-intrinsics-vector.ll b/llvm/test/CodeGen/AArch64/fp-intrinsics-vector.ll
index 1a9ba9fd4a5180..6147afba4e603a 100644
--- a/llvm/test/CodeGen/AArch64/fp-intrinsics-vector.ll
+++ b/llvm/test/CodeGen/AArch64/fp-intrinsics-vector.ll
@@ -279,7 +279,7 @@ define <4 x i1> @fcmps_v4f32(<4 x float> %x, <4 x float> %y) #0 {
; CHECK-NEXT: xtn v0.4h, v4.4s
; CHECK-NEXT: ret
entry:
- %val = call <4 x i1> @llvm.experimental.constrained.fcmps.v4f64(<4 x float> %x, <4 x float> %y, metadata !"oeq", metadata !"fpexcept.strict")
+ %val = call <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float> %x, <4 x float> %y, metadata !"oeq", metadata !"fpexcept.strict")
ret <4 x i1> %val
}
@@ -825,8 +825,8 @@ declare <4 x float> @llvm.experimental.constrained.floor.v4f32(<4 x float>, meta
declare <4 x float> @llvm.experimental.constrained.round.v4f32(<4 x float>, metadata)
declare <4 x float> @llvm.experimental.constrained.roundeven.v4f32(<4 x float>, metadata)
declare <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float>, metadata)
-declare <4 x i1> @llvm.experimental.constrained.fcmp.v4f64(<4 x float>, <4 x float>, metadata, metadata)
-declare <4 x i1> @llvm.experimental.constrained.fcmps.v4f64(<4 x float>, <4 x float>, metadata, metadata)
+declare <4 x i1> @llvm.experimental.constrained.fcmp.v4f32(<4 x float>, <4 x float>, metadata, metadata)
+declare <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float>, <4 x float>, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double>, <2 x double>, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.fsub.v2f64(<2 x double>, <2 x double>, metadata, metadata)
diff --git a/llvm/test/CodeGen/AArch64/fp-intrinsics.ll b/llvm/test/CodeGen/AArch64/fp-intrinsics.ll
index 62b4a79b26d8e7..a43e18b9d72e13 100644
--- a/llvm/test/CodeGen/AArch64/fp-intrinsics.ll
+++ b/llvm/test/CodeGen/AArch64/fp-intrinsics.ll
@@ -255,7 +255,7 @@ define float @nearbyint_f32(float %x) #0 {
; CHECK: frintx [[REG:s[0-9]+]], s0
; CHECK: fcvtzs w0, [[REG]]
define i32 @lrint_f32(float %x) #0 {
- %val = call i32 @llvm.experimental.constrained.lrint.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ %val = call i32 @llvm.experimental.constrained.lrint.i32.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret i32 %val
}
@@ -263,7 +263,7 @@ define i32 @lrint_f32(float %x) #0 {
; CHECK: frintx [[REG:s[0-9]+]], s0
; CHECK: fcvtzs x0, [[REG]]
define i64 @llrint_f32(float %x) #0 {
- %val = call i64 @llvm.experimental.constrained.llrint.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ %val = call i64 @llvm.experimental.constrained.llrint.i64.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret i64 %val
}
@@ -312,14 +312,14 @@ define float @floor_f32(float %x) #0 {
; CHECK-LABEL: lround_f32:
; CHECK: fcvtas w0, s0
define i32 @lround_f32(float %x) #0 {
- %val = call i32 @llvm.experimental.constrained.lround.f32(float %x, metadata !"fpexcept.strict") #0
+ %val = call i32 @llvm.experimental.constrained.lround.i32.f32(float %x, metadata !"fpexcept.strict") #0
ret i32 %val
}
; CHECK-LABEL: llround_f32:
; CHECK: fcvtas x0, s0
define i64 @llround_f32(float %x) #0 {
- %val = call i64 @llvm.experimental.constrained.llround.f32(float %x, metadata !"fpexcept.strict") #0
+ %val = call i64 @llvm.experimental.constrained.llround.i64.f32(float %x, metadata !"fpexcept.strict") #0
ret i64 %val
}
@@ -788,7 +788,7 @@ define double @nearbyint_f64(double %x) #0 {
; CHECK: frintx [[REG:d[0-9]+]], d0
; CHECK: fcvtzs w0, [[REG]]
define i32 @lrint_f64(double %x) #0 {
- %val = call i32 @llvm.experimental.constrained.lrint.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ %val = call i32 @llvm.experimental.constrained.lrint.i32.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret i32 %val
}
@@ -796,7 +796,7 @@ define i32 @lrint_f64(double %x) #0 {
; CHECK: frintx [[REG:d[0-9]+]], d0
; CHECK: fcvtzs x0, [[REG]]
define i64 @llrint_f64(double %x) #0 {
- %val = call i64 @llvm.experimental.constrained.llrint.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ %val = call i64 @llvm.experimental.constrained.llrint.i64.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret i64 %val
}
@@ -845,14 +845,14 @@ define double @floor_f64(double %x) #0 {
; CHECK-LABEL: lround_f64:
; CHECK: fcvtas w0, d0
define i32 @lround_f64(double %x) #0 {
- %val = call i32 @llvm.experimental.constrained.lround.f64(double %x, metadata !"fpexcept.strict") #0
+ %val = call i32 @llvm.experimental.constrained.lround.i32.f64(double %x, metadata !"fpexcept.strict") #0
ret i32 %val
}
; CHECK-LABEL: llround_f64:
; CHECK: fcvtas x0, d0
define i64 @llround_f64(double %x) #0 {
- %val = call i64 @llvm.experimental.constrained.llround.f64(double %x, metadata !"fpexcept.strict") #0
+ %val = call i64 @llvm.experimental.constrained.llround.i64.f64(double %x, metadata !"fpexcept.strict") #0
ret i64 %val
}
@@ -1320,14 +1320,14 @@ define fp128 @nearbyint_f128(fp128 %x) #0 {
; CHECK-LABEL: lrint_f128:
; CHECK: bl lrintl
define i32 @lrint_f128(fp128 %x) #0 {
- %val = call i32 @llvm.experimental.constrained.lrint.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ %val = call i32 @llvm.experimental.constrained.lrint.i32.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret i32 %val
}
; CHECK-LABEL: llrint_f128:
; CHECK: bl llrintl
define i64 @llrint_f128(fp128 %x) #0 {
- %val = call i64 @llvm.experimental.constrained.llrint.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ %val = call i64 @llvm.experimental.constrained.llrint.i64.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret i64 %val
}
@@ -1362,14 +1362,14 @@ define fp128 @floor_f128(fp128 %x) #0 {
; CHECK-LABEL: lround_f128:
; CHECK: bl lroundl
define i32 @lround_f128(fp128 %x) #0 {
- %val = call i32 @llvm.experimental.constrained.lround.f128(fp128 %x, metadata !"fpexcept.strict") #0
+ %val = call i32 @llvm.experimental.constrained.lround.i32.f128(fp128 %x, metadata !"fpexcept.strict") #0
ret i32 %val
}
; CHECK-LABEL: llround_f128:
; CHECK: bl llroundl
define i64 @llround_f128(fp128 %x) #0 {
- %val = call i64 @llvm.experimental.constrained.llround.f128(fp128 %x, metadata !"fpexcept.strict") #0
+ %val = call i64 @llvm.experimental.constrained.llround.i64.f128(fp128 %x, metadata !"fpexcept.strict") #0
ret i64 %val
}
@@ -1766,16 +1766,16 @@ declare float @llvm.experimental.constrained.exp.f32(float, metadata, metadata)
declare float @llvm.experimental.constrained.exp2.f32(float, metadata, metadata)
declare float @llvm.experimental.constrained.rint.f32(float, metadata, metadata)
declare float @llvm.experimental.constrained.nearbyint.f32(float, metadata, metadata)
-declare i32 @llvm.experimental.constrained.lrint.f32(float, metadata, metadata)
-declare i64 @llvm.experimental.constrained.llrint.f32(float, metadata, metadata)
+declare i32 @llvm.experimental.constrained.lrint.i32.f32(float, metadata, metadata)
+declare i64 @llvm.experimental.constrained.llrint.i64.f32(float, metadata, metadata)
declare float @llvm.experimental.constrained.maxnum.f32(float, float, metadata)
declare float @llvm.experimental.constrained.minnum.f32(float, float, metadata)
declare float @llvm.experimental.constrained.maximum.f32(float, float, metadata)
declare float @llvm.experimental.constrained.minimum.f32(float, float, metadata)
declare float @llvm.experimental.constrained.ceil.f32(float, metadata)
declare float @llvm.experimental.constrained.floor.f32(float, metadata)
-declare i32 @llvm.experimental.constrained.lround.f32(float, metadata)
-declare i64 @llvm.experimental.constrained.llround.f32(float, metadata)
+declare i32 @llvm.experimental.constrained.i32.f32(float, metadata)
+declare i64 @llvm.experimental.constrained.llround.i64.f32(float, metadata)
declare float @llvm.experimental.constrained.round.f32(float, metadata)
declare float @llvm.experimental.constrained.roundeven.f32(float, metadata)
declare float @llvm.experimental.constrained.trunc.f32(float, metadata)
@@ -1817,16 +1817,16 @@ declare double @llvm.experimental.constrained.exp.f64(double, metadata, metadata
declare double @llvm.experimental.constrained.exp2.f64(double, metadata, metadata)
declare double @llvm.experimental.constrained.rint.f64(double, metadata, metadata)
declare double @llvm.experimental.constrained.nearbyint.f64(double, metadata, metadata)
-declare i32 @llvm.experimental.constrained.lrint.f64(double, metadata, metadata)
-declare i64 @llvm.experimental.constrained.llrint.f64(double, metadata, metadata)
+declare i32 @llvm.experimental.constrained.lrint.i32.f64(double, metadata, metadata)
+declare i64 @llvm.experimental.constrained.llrint.i64.f64(double, metadata, metadata)
declare double @llvm.experimental.constrained.maxnum.f64(double, double, metadata)
declare double @llvm.experimental.constrained.minnum.f64(double, double, metadata)
declare double @llvm.experimental.constrained.maximum.f64(double, double, metadata)
declare double @llvm.experimental.constrained.minimum.f64(double, double, metadata)
declare double @llvm.experimental.constrained.ceil.f64(double, metadata)
declare double @llvm.experimental.constrained.floor.f64(double, metadata)
-declare i32 @llvm.experimental.constrained.lround.f64(double, metadata)
-declare i64 @llvm.experimental.constrained.llround.f64(double, metadata)
+declare i32 @llvm.experimental.constrained.lround.i32.f64(double, metadata)
+declare i64 @llvm.experimental.constrained.llround.i64.f64(double, metadata)
declare double @llvm.experimental.constrained.round.f64(double, metadata)
declare double @llvm.experimental.constrained.roundeven.f64(double, metadata)
declare double @llvm.experimental.constrained.trunc.f64(double, metadata)
@@ -1868,14 +1868,14 @@ declare fp128 @llvm.experimental.constrained.exp.f128(fp128, metadata, metadata)
declare fp128 @llvm.experimental.constrained.exp2.f128(fp128, metadata, metadata)
declare fp128 @llvm.experimental.constrained.rint.f128(fp128, metadata, metadata)
declare fp128 @llvm.experimental.constrained.nearbyint.f128(fp128, metadata, metadata)
-declare i32 @llvm.experimental.constrained.lrint.f128(fp128, metadata, metadata)
-declare i64 @llvm.experimental.constrained.llrint.f128(fp128, metadata, metadata)
+declare i32 @llvm.experimental.constrained.lrint.i32.f128(fp128, metadata, metadata)
+declare i64 @llvm.experimental.constrained.llrint.i64.f128(fp128, metadata, metadata)
declare fp128 @llvm.experimental.constrained.maxnum.f128(fp128, fp128, metadata)
declare fp128 @llvm.experimental.constrained.minnum.f128(fp128, fp128, metadata)
declare fp128 @llvm.experimental.constrained.ceil.f128(fp128, metadata)
declare fp128 @llvm.experimental.constrained.floor.f128(fp128, metadata)
-declare i32 @llvm.experimental.constrained.lround.f128(fp128, metadata)
-declare i64 @llvm.experimental.constrained.llround.f128(fp128, metadata)
+declare i32 @llvm.experimental.constrained.i32.f128(fp128, metadata)
+declare i64 @llvm.experimental.constrained.llround.i64.f128(fp128, metadata)
declare fp128 @llvm.experimental.constrained.round.f128(fp128, metadata)
declare fp128 @llvm.experimental.constrained.trunc.f128(fp128, metadata)
declare i1 @llvm.experimental.constrained.fcmps.f128(fp128, fp128, metadata, metadata)
diff --git a/llvm/test/CodeGen/ARM/fp-intrinsics.ll b/llvm/test/CodeGen/ARM/fp-intrinsics.ll
index e286eb3226e46f..ca2dc701bd1fb3 100644
--- a/llvm/test/CodeGen/ARM/fp-intrinsics.ll
+++ b/llvm/test/CodeGen/ARM/fp-intrinsics.ll
@@ -68,7 +68,7 @@ define float @fma_f32(float %x, float %y, float %z) #0 {
; CHECK-NOSP: bl __aeabi_f2iz
; CHECK-SP: vcvt.s32.f32
define i32 @fptosi_f32(float %x) #0 {
- %val = call i32 @llvm.experimental.constrained.fptosi.f32(float %x, metadata !"fpexcept.strict") #0
+ %val = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %x, metadata !"fpexcept.strict") #0
ret i32 %val
}
@@ -79,9 +79,9 @@ define i32 @fptosi_f32(float %x) #0 {
; FIXME-CHECK-SP: vcvt.s32.f32
define void @fptosi_f32_twice(float %arg, ptr %ptr) #0 {
entry:
- %conv = call i32 @llvm.experimental.constrained.fptosi.f32(float %arg, metadata !"fpexcept.strict") #0
+ %conv = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %arg, metadata !"fpexcept.strict") #0
store i32 %conv, ptr %ptr, align 4
- %conv1 = call i32 @llvm.experimental.constrained.fptosi.f32(float %arg, metadata !"fpexcept.strict") #0
+ %conv1 = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %arg, metadata !"fpexcept.strict") #0
%idx = getelementptr inbounds i32, ptr %ptr, i32 1
store i32 %conv1, ptr %idx, align 4
ret void
@@ -91,7 +91,7 @@ entry:
; CHECK-NOSP: bl __aeabi_f2uiz
; FIXME-CHECK-SP: vcvt.u32.f32
define i32 @fptoui_f32(float %x) #0 {
- %val = call i32 @llvm.experimental.constrained.fptoui.f32(float %x, metadata !"fpexcept.strict") #0
+ %val = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %x, metadata !"fpexcept.strict") #0
ret i32 %val
}
@@ -102,9 +102,9 @@ define i32 @fptoui_f32(float %x) #0 {
; FIXME-CHECK-SP: vcvt.u32.f32
define void @fptoui_f32_twice(float %arg, ptr %ptr) #0 {
entry:
- %conv = call i32 @llvm.experimental.constrained.fptoui.f32(float %arg, metadata !"fpexcept.strict") #0
+ %conv = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %arg, metadata !"fpexcept.strict") #0
store i32 %conv, ptr %ptr, align 4
- %conv1 = call i32 @llvm.experimental.constrained.fptoui.f32(float %arg, metadata !"fpexcept.strict") #0
+ %conv1 = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %arg, metadata !"fpexcept.strict") #0
%idx = getelementptr inbounds i32, ptr %ptr, i32 1
store i32 %conv1, ptr %idx, align 4
ret void
@@ -209,14 +209,14 @@ define float @nearbyint_f32(float %x) #0 {
; CHECK-LABEL: lrint_f32:
; CHECK: bl lrintf
define i32 @lrint_f32(float %x) #0 {
- %val = call i32 @llvm.experimental.constrained.lrint.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ %val = call i32 @llvm.experimental.constrained.lrint.i32.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret i32 %val
}
; CHECK-LABEL: llrint_f32:
; CHECK: bl llrintf
define i32 @llrint_f32(float %x) #0 {
- %val = call i32 @llvm.experimental.constrained.llrint.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ %val = call i32 @llvm.experimental.constrained.llrint.i32.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret i32 %val
}
@@ -259,14 +259,14 @@ define float @floor_f32(float %x) #0 {
; CHECK-LABEL: lround_f32:
; CHECK: bl lroundf
define i32 @lround_f32(float %x) #0 {
- %val = call i32 @llvm.experimental.constrained.lround.f32(float %x, metadata !"fpexcept.strict") #0
+ %val = call i32 @llvm.experimental.constrained.lround.i32.f32(float %x, metadata !"fpexcept.strict") #0
ret i32 %val
}
; CHECK-LABEL: llround_f32:
; CHECK: bl llroundf
define i32 @llround_f32(float %x) #0 {
- %val = call i32 @llvm.experimental.constrained.llround.f32(float %x, metadata !"fpexcept.strict") #0
+ %val = call i32 @llvm.experimental.constrained.llround.i32.f32(float %x, metadata !"fpexcept.strict") #0
ret i32 %val
}
@@ -562,7 +562,7 @@ define double @fma_f64(double %x, double %y, double %z) #0 {
; CHECK-NODP: bl __aeabi_d2iz
; CHECK-DP: vcvt.s32.f64
define i32 @fptosi_f64(double %x) #0 {
- %val = call i32 @llvm.experimental.constrained.fptosi.f64(double %x, metadata !"fpexcept.strict") #0
+ %val = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %x, metadata !"fpexcept.strict") #0
ret i32 %val
}
@@ -570,7 +570,7 @@ define i32 @fptosi_f64(double %x) #0 {
; CHECK-NODP: bl __aeabi_d2uiz
; FIXME-CHECK-DP: vcvt.u32.f64
define i32 @fptoui_f64(double %x) #0 {
- %val = call i32 @llvm.experimental.constrained.fptoui.f64(double %x, metadata !"fpexcept.strict") #0
+ %val = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %x, metadata !"fpexcept.strict") #0
ret i32 %val
}
@@ -673,14 +673,14 @@ define double @nearbyint_f64(double %x) #0 {
; CHECK-LABEL: lrint_f64:
; CHECK: bl lrint
define i32 @lrint_f64(double %x) #0 {
- %val = call i32 @llvm.experimental.constrained.lrint.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ %val = call i32 @llvm.experimental.constrained.lrint.i32.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret i32 %val
}
; CHECK-LABEL: llrint_f64:
; CHECK: bl llrint
define i32 @llrint_f64(double %x) #0 {
- %val = call i32 @llvm.experimental.constrained.llrint.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ %val = call i32 @llvm.experimental.constrained.llrint.i32.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
ret i32 %val
}
@@ -723,14 +723,14 @@ define double @floor_f64(double %x) #0 {
; CHECK-LABEL: lround_f64:
; CHECK: bl lround
define i32 @lround_f64(double %x) #0 {
- %val = call i32 @llvm.experimental.constrained.lround.f64(double %x, metadata !"fpexcept.strict") #0
+ %val = call i32 @llvm.experimental.constrained.lround.i32.f64(double %x, metadata !"fpexcept.strict") #0
ret i32 %val
}
; CHECK-LABEL: llround_f64:
; CHECK: bl llround
define i32 @llround_f64(double %x) #0 {
- %val = call i32 @llvm.experimental.constrained.llround.f64(double %x, metadata !"fpexcept.strict") #0
+ %val = call i32 @llvm.experimental.constrained.llround.i32.f64(double %x, metadata !"fpexcept.strict") #0
ret i32 %val
}
@@ -1031,8 +1031,8 @@ declare float @llvm.experimental.constrained.fmul.f32(float, float, metadata, me
declare float @llvm.experimental.constrained.fdiv.f32(float, float, metadata, metadata)
declare float @llvm.experimental.constrained.frem.f32(float, float, metadata, metadata)
declare float @llvm.experimental.constrained.fma.f32(float, float, float, metadata, metadata)
-declare i32 @llvm.experimental.constrained.fptosi.f32(float, metadata)
-declare i32 @llvm.experimental.constrained.fptoui.f32(float, metadata)
+declare i32 @llvm.experimental.constrained.fptosi.i32.f32(float, metadata)
+declare i32 @llvm.experimental.constrained.fptoui.i32.f32(float, metadata)
declare float @llvm.experimental.constrained.sqrt.f32(float, metadata, metadata)
declare float @llvm.experimental.constrained.powi.f32(float, i32, metadata, metadata)
declare float @llvm.experimental.constrained.sin.f32(float, metadata, metadata)
@@ -1046,14 +1046,14 @@ declare float @llvm.experimental.constrained.exp.f32(float, metadata, metadata)
declare float @llvm.experimental.constrained.exp2.f32(float, metadata, metadata)
declare float @llvm.experimental.constrained.rint.f32(float, metadata, metadata)
declare float @llvm.experimental.constrained.nearbyint.f32(float, metadata, metadata)
-declare i32 @llvm.experimental.constrained.lrint.f32(float, metadata, metadata)
-declare i32 @llvm.experimental.constrained.llrint.f32(float, metadata, metadata)
+declare i32 @llvm.experimental.constrained.lrint.i32.f32(float, metadata, metadata)
+declare i32 @llvm.experimental.constrained.llrint.i32.f32(float, metadata, metadata)
declare float @llvm.experimental.constrained.maxnum.f32(float, float, metadata)
declare float @llvm.experimental.constrained.minnum.f32(float, float, metadata)
declare float @llvm.experimental.constrained.ceil.f32(float, metadata)
declare float @llvm.experimental.constrained.floor.f32(float, metadata)
-declare i32 @llvm.experimental.constrained.lround.f32(float, metadata)
-declare i32 @llvm.experimental.constrained.llround.f32(float, metadata)
+declare i32 @llvm.experimental.constrained.lround.i32.f32(float, metadata)
+declare i32 @llvm.experimental.constrained.llround.i32.f32(float, metadata)
declare float @llvm.experimental.constrained.round.f32(float, metadata)
declare float @llvm.experimental.constrained.trunc.f32(float, metadata)
declare i1 @llvm.experimental.constrained.fcmps.f32(float, float, metadata, metadata)
@@ -1065,8 +1065,8 @@ declare double @llvm.experimental.constrained.fmul.f64(double, double, metadata,
declare double @llvm.experimental.constrained.fdiv.f64(double, double, metadata, metadata)
declare double @llvm.experimental.constrained.frem.f64(double, double, metadata, metadata)
declare double @llvm.experimental.constrained.fma.f64(double, double, double, metadata, metadata)
-declare i32 @llvm.experimental.constrained.fptosi.f64(double, metadata)
-declare i32 @llvm.experimental.constrained.fptoui.f64(double, metadata)
+declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata)
+declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata)
declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadata)
declare double @llvm.experimental.constrained.powi.f64(double, i32, metadata, metadata)
declare double @llvm.experimental.constrained.sin.f64(double, metadata, metadata)
@@ -1080,14 +1080,14 @@ declare double @llvm.experimental.constrained.exp.f64(double, metadata, metadata
declare double @llvm.experimental.constrained.exp2.f64(double, metadata, metadata)
declare double @llvm.experimental.constrained.rint.f64(double, metadata, metadata)
declare double @llvm.experimental.constrained.nearbyint.f64(double, metadata, metadata)
-declare i32 @llvm.experimental.constrained.lrint.f64(double, metadata, metadata)
-declare i32 @llvm.experimental.constrained.llrint.f64(double, metadata, metadata)
+declare i32 @llvm.experimental.constrained.lrint.i32.f64(double, metadata, metadata)
+declare i32 @llvm.experimental.constrained.llrint.i32.f64(double, metadata, metadata)
declare double @llvm.experimental.constrained.maxnum.f64(double, double, metadata)
declare double @llvm.experimental.constrained.minnum.f64(double, double, metadata)
declare double @llvm.experimental.constrained.ceil.f64(double, metadata)
declare double @llvm.experimental.constrained.floor.f64(double, metadata)
-declare i32 @llvm.experimental.constrained.lround.f64(double, metadata)
-declare i32 @llvm.experimental.constrained.llround.f64(double, metadata)
+declare i32 @llvm.experimental.constrained.lround.i32.f64(double, metadata)
+declare i32 @llvm.experimental.constrained.llround.i32.f64(double, metadata)
declare double @llvm.experimental.constrained.round.f64(double, metadata)
declare double @llvm.experimental.constrained.trunc.f64(double, metadata)
declare i1 @llvm.experimental.constrained.fcmps.f64(double, double, metadata, metadata)
diff --git a/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll
index 76f3dea5b7751d..3e8935e7d5977b 100644
--- a/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll
+++ b/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll
@@ -1083,7 +1083,7 @@ define float @test_fptrunc_ppc_fp128_f32(ppc_fp128 %first) #0 {
; PC64-NEXT: frsp 1, 1
; PC64-NEXT: blr
entry:
- %fptrunc = call float @llvm.experimental.constrained.fptrunc.ppcf128.f32(
+ %fptrunc = call float @llvm.experimental.constrained.fptrunc.f32.ppcf128.f32(
ppc_fp128 %first,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #1
@@ -1103,7 +1103,7 @@ define double @test_fptrunc_ppc_fp128_f64(ppc_fp128 %first) #0 {
; PC64: # %bb.0: # %entry
; PC64-NEXT: blr
entry:
- %fptrunc = call double @llvm.experimental.constrained.fptrunc.ppcf128.f64(
+ %fptrunc = call double @llvm.experimental.constrained.fptrunc.f64.ppcf128(
ppc_fp128 %first,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #1
@@ -1127,7 +1127,7 @@ define ppc_fp128 @test_fpext_ppc_fp128_f32(float %first) #0 {
; PC64-NEXT: lfs 2, .LCPI26_0 at toc@l(3)
; PC64-NEXT: blr
entry:
- %fpext = call ppc_fp128 @llvm.experimental.constrained.fpext.f32.ppcf128(
+ %fpext = call ppc_fp128 @llvm.experimental.constrained.fpext.ppcf128.f32(
float %first,
metadata !"fpexcept.strict") #1
ret ppc_fp128 %fpext
@@ -1150,7 +1150,7 @@ define ppc_fp128 @test_fpext_ppc_fp128_f64(double %first) #0 {
; PC64-NEXT: lfs 2, .LCPI27_0 at toc@l(3)
; PC64-NEXT: blr
entry:
- %fpext = call ppc_fp128 @llvm.experimental.constrained.fpext.f64.ppcf128(
+ %fpext = call ppc_fp128 @llvm.experimental.constrained.fpext.ppcf128.f64(
double %first,
metadata !"fpexcept.strict") #1
ret ppc_fp128 %fpext
@@ -1568,7 +1568,7 @@ define void @test_constrained_libcall_multichain(ptr %firstptr, ptr %result) #0
; PC64-NEXT: mtlr 0
; PC64-NEXT: blr
%load = load float, ptr %firstptr
- %first = call ppc_fp128 @llvm.experimental.constrained.fpext.f32.ppcf128(
+ %first = call ppc_fp128 @llvm.experimental.constrained.fpext.ppcf128.f32(
float %load,
metadata !"fpexcept.strict") #1
store ppc_fp128 %first, ptr %result
@@ -1598,7 +1598,7 @@ define void @test_constrained_libcall_multichain(ptr %firstptr, ptr %result) #0
i32 2,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #1
- %tinypow = call float @llvm.experimental.constrained.fptrunc.ppcf128.f32(
+ %tinypow = call float @llvm.experimental.constrained.fptrunc.f32.ppcf128(
ppc_fp128 %powi,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #1
@@ -2015,7 +2015,7 @@ define i1 @ppcq_to_s1(ppc_fp128 %a) {
; PC64-NEXT: mtlr 0
; PC64-NEXT: blr
entry:
- %conv = tail call i1 @llvm.experimental.constrained.fptosi.ppcf128.i1(ppc_fp128 %a, metadata !"fpexcept.strict") #1
+ %conv = tail call i1 @llvm.experimental.constrained.fptosi.i1.ppcf128(ppc_fp128 %a, metadata !"fpexcept.strict") #1
ret i1 %conv
}
@@ -2062,7 +2062,7 @@ define i1 @ppcq_to_u1(ppc_fp128 %a) {
; PC64-NEXT: mtlr 0
; PC64-NEXT: blr
entry:
- %conv = tail call i1 @llvm.experimental.constrained.fptoui.ppcf128.i1(ppc_fp128 %a, metadata !"fpexcept.strict") #1
+ %conv = tail call i1 @llvm.experimental.constrained.fptoui.i1.ppcf128(ppc_fp128 %a, metadata !"fpexcept.strict") #1
ret i1 %conv
}
@@ -2121,10 +2121,10 @@ declare ppc_fp128 @llvm.experimental.constrained.exp.ppcf128(ppc_fp128, metadata
declare ppc_fp128 @llvm.experimental.constrained.exp2.ppcf128(ppc_fp128, metadata, metadata)
declare ppc_fp128 @llvm.experimental.constrained.floor.ppcf128(ppc_fp128, metadata)
declare ppc_fp128 @llvm.experimental.constrained.fma.ppcf128(ppc_fp128, ppc_fp128, ppc_fp128, metadata, metadata)
-declare ppc_fp128 @llvm.experimental.constrained.fpext.f32.ppcf128(float, metadata)
-declare ppc_fp128 @llvm.experimental.constrained.fpext.f64.ppcf128(double, metadata)
-declare float @llvm.experimental.constrained.fptrunc.ppcf128.f32(ppc_fp128, metadata, metadata)
-declare double @llvm.experimental.constrained.fptrunc.ppcf128.f64(ppc_fp128, metadata, metadata)
+declare ppc_fp128 @llvm.experimental.constrained.fpext.ppcf128.f32(float, metadata)
+declare ppc_fp128 @llvm.experimental.constrained.fpext.ppcf128.f64(double, metadata)
+declare float @llvm.experimental.constrained.fptrunc.f32.ppcf128(ppc_fp128, metadata, metadata)
+declare double @llvm.experimental.constrained.fptrunc.f64.ppcf128(ppc_fp128, metadata, metadata)
declare ppc_fp128 @llvm.experimental.constrained.log.ppcf128(ppc_fp128, metadata, metadata)
declare ppc_fp128 @llvm.experimental.constrained.log10.ppcf128(ppc_fp128, metadata, metadata)
declare ppc_fp128 @llvm.experimental.constrained.log2.ppcf128(ppc_fp128, metadata, metadata)
@@ -2144,10 +2144,10 @@ declare ppc_fp128 @llvm.experimental.constrained.tan.ppcf128(ppc_fp128, metadata
declare ppc_fp128 @llvm.experimental.constrained.trunc.ppcf128(ppc_fp128, metadata)
declare i64 @llvm.experimental.constrained.fptosi.i64.ppcf128(ppc_fp128, metadata)
declare i32 @llvm.experimental.constrained.fptosi.i32.ppcf128(ppc_fp128, metadata)
-declare i1 @llvm.experimental.constrained.fptosi.ppcf128.i1(ppc_fp128, metadata)
+declare i1 @llvm.experimental.constrained.fptosi.i1.ppcf128(ppc_fp128, metadata)
declare i64 @llvm.experimental.constrained.fptoui.i64.ppcf128(ppc_fp128, metadata)
declare i32 @llvm.experimental.constrained.fptoui.i32.ppcf128(ppc_fp128, metadata)
-declare i1 @llvm.experimental.constrained.fptoui.ppcf128.i1(ppc_fp128, metadata)
+declare i1 @llvm.experimental.constrained.fptoui.i1.ppcf128(ppc_fp128, metadata)
declare ppc_fp128 @llvm.experimental.constrained.sitofp.ppcf128.i32(i32, metadata, metadata)
declare ppc_fp128 @llvm.experimental.constrained.uitofp.ppcf128.i32(i32, metadata, metadata)
declare ppc_fp128 @llvm.experimental.constrained.sitofp.ppcf128.i64(i64, metadata, metadata)
diff --git a/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll
index 4cb6191e7322e9..7e5ea173e52295 100644
--- a/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll
+++ b/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll
@@ -57,7 +57,7 @@ define double @sqrt_f64(double %a) nounwind strictfp {
ret double %1
}
-declare double @llvm.experimental.constrained.powi.f64.i32(double, i32, metadata, metadata)
+declare double @llvm.experimental.constrained.powi.f64(double, i32, metadata, metadata)
define double @powi_f64(double %a, i32 %b) nounwind strictfp {
; RV32IFD-LABEL: powi_f64:
@@ -116,7 +116,7 @@ define double @powi_f64(double %a, i32 %b) nounwind strictfp {
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
- %1 = call double @llvm.experimental.constrained.powi.f64.i32(double %a, i32 %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+ %1 = call double @llvm.experimental.constrained.powi.f64(double %a, i32 %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
ret double %1
}
diff --git a/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll
index cbd84634de11c0..7b2d38fefaacb1 100644
--- a/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll
+++ b/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll
@@ -52,7 +52,7 @@ define float @sqrt_f32(float %a) nounwind strictfp {
ret float %1
}
-declare float @llvm.experimental.constrained.powi.f32.i32(float, i32, metadata, metadata)
+declare float @llvm.experimental.constrained.powi.f32(float, i32, metadata, metadata)
define float @powi_f32(float %a, i32 %b) nounwind strictfp {
; RV32IF-LABEL: powi_f32:
@@ -111,7 +111,7 @@ define float @powi_f32(float %a, i32 %b) nounwind strictfp {
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
- %1 = call float @llvm.experimental.constrained.powi.f32.i32(float %a, i32 %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+ %1 = call float @llvm.experimental.constrained.powi.f32(float %a, i32 %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
ret float %1
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
index 6700920cebff0a..23ebfade6f6b0f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
@@ -357,11 +357,11 @@ define <vscale x 2 x float> @vpmerge_constrained_fadd(<vscale x 2 x float> %pass
; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
; CHECK-NEXT: ret
- %a = call <vscale x 2 x float> @llvm.experimental.constrained.fadd(<vscale x 2 x float> %x, <vscale x 2 x float> %y, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+ %a = call <vscale x 2 x float> @llvm.experimental.constrained.fadd.nxv2f32(<vscale x 2 x float> %x, <vscale x 2 x float> %y, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
%b = call <vscale x 2 x float> @llvm.riscv.vmerge.nxv2f32.nxv2f32(<vscale x 2 x float> %passthru, <vscale x 2 x float> %passthru, <vscale x 2 x float> %a, <vscale x 2 x i1> %m, i64 %vl) strictfp
ret <vscale x 2 x float> %b
}
-declare <vscale x 2 x float> @llvm.experimental.constrained.fadd(<vscale x 2 x float>, <vscale x 2 x float>, metadata, metadata)
+declare <vscale x 2 x float> @llvm.experimental.constrained.fadd.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, metadata, metadata)
declare <vscale x 2 x float> @llvm.riscv.vmerge.nxv2f32.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x i1>, i64)
; This shouldn't be folded because we need to preserve exceptions with
@@ -374,7 +374,7 @@ define <vscale x 2 x float> @vpmerge_constrained_fadd_vlmax(<vscale x 2 x float>
; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, ma
; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
; CHECK-NEXT: ret
- %a = call <vscale x 2 x float> @llvm.experimental.constrained.fadd(<vscale x 2 x float> %x, <vscale x 2 x float> %y, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+ %a = call <vscale x 2 x float> @llvm.experimental.constrained.fadd.nxv2f32(<vscale x 2 x float> %x, <vscale x 2 x float> %y, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
%b = call <vscale x 2 x float> @llvm.riscv.vmerge.nxv2f32.nxv2f32(<vscale x 2 x float> %passthru, <vscale x 2 x float> %passthru, <vscale x 2 x float> %a, <vscale x 2 x i1> %m, i64 -1) strictfp
ret <vscale x 2 x float> %b
}
diff --git a/llvm/test/CodeGen/X86/bfloat-constrained.ll b/llvm/test/CodeGen/X86/bfloat-constrained.ll
index 0a8c4f20648b05..081b1cebfc43d6 100644
--- a/llvm/test/CodeGen/X86/bfloat-constrained.ll
+++ b/llvm/test/CodeGen/X86/bfloat-constrained.ll
@@ -86,7 +86,7 @@ define void @float_to_bfloat(float %0) strictfp {
; X64-NEXT: popq %rax
; X64-NEXT: .cfi_def_cfa_offset 8
; X64-NEXT: retq
- %2 = tail call bfloat @llvm.experimental.constrained.fptrunc.bfloat.f32(float %0, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ %2 = tail call bfloat @llvm.experimental.constrained.fptrunc.bf16.f32(float %0, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
store bfloat %2, ptr @a, align 2
ret void
}
@@ -115,7 +115,7 @@ define void @double_to_bfloat(double %0) strictfp {
; X64-NEXT: popq %rax
; X64-NEXT: .cfi_def_cfa_offset 8
; X64-NEXT: retq
- %2 = tail call bfloat @llvm.experimental.constrained.fptrunc.bfloat.f64(double %0, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ %2 = tail call bfloat @llvm.experimental.constrained.fptrunc.bf16.f64(double %0, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
store bfloat %2, ptr @a, align 2
ret void
}
@@ -162,20 +162,20 @@ define void @add() strictfp {
; X64-NEXT: .cfi_def_cfa_offset 8
; X64-NEXT: retq
%1 = load bfloat, ptr @a, align 2
- %2 = tail call float @llvm.experimental.constrained.fpext.f32.bfloat(bfloat %1, metadata !"fpexcept.strict") #0
+ %2 = tail call float @llvm.experimental.constrained.fpext.f32.bf16(bfloat %1, metadata !"fpexcept.strict") #0
%3 = load bfloat, ptr @b, align 2
- %4 = tail call float @llvm.experimental.constrained.fpext.f32.bfloat(bfloat %3, metadata !"fpexcept.strict") #0
+ %4 = tail call float @llvm.experimental.constrained.fpext.f32.bf16(bfloat %3, metadata !"fpexcept.strict") #0
%5 = tail call float @llvm.experimental.constrained.fadd.f32(float %2, float %4, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
- %6 = tail call bfloat @llvm.experimental.constrained.fptrunc.bfloat.f32(float %5, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ %6 = tail call bfloat @llvm.experimental.constrained.fptrunc.bf16.f32(float %5, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
store bfloat %6, ptr @c, align 2
ret void
}
-declare float @llvm.experimental.constrained.fpext.f32.bfloat(bfloat, metadata)
-declare double @llvm.experimental.constrained.fpext.f64.bfloat(bfloat, metadata)
+declare float @llvm.experimental.constrained.fpext.f32.bf16(bfloat, metadata)
+declare double @llvm.experimental.constrained.fpext.f64.bf16(bfloat, metadata)
declare float @llvm.experimental.constrained.fadd.f32(float, float, metadata, metadata)
-declare bfloat @llvm.experimental.constrained.fptrunc.bfloat.f32(float, metadata, metadata)
-declare bfloat @llvm.experimental.constrained.fptrunc.bfloat.f64(double, metadata, metadata)
+declare bfloat @llvm.experimental.constrained.fptrunc.bf16.f32(float, metadata, metadata)
+declare bfloat @llvm.experimental.constrained.fptrunc.bf16.f64(double, metadata, metadata)
attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/X86/float-strict-powi-convert.ll b/llvm/test/CodeGen/X86/float-strict-powi-convert.ll
index 4d0cffc53d93af..b39f5ec667cecd 100644
--- a/llvm/test/CodeGen/X86/float-strict-powi-convert.ll
+++ b/llvm/test/CodeGen/X86/float-strict-powi-convert.ll
@@ -2,7 +2,7 @@
; RUN: llc -mtriple=x86_64-pc-windows-msvc %s -o - | FileCheck %s -check-prefix=WIN
; RUN: llc -mtriple=x86_64-pc-linux %s -o -| FileCheck %s -check-prefix=UNIX
-declare float @llvm.experimental.constrained.powi.f32.i32(float, i32, metadata, metadata)
+declare float @llvm.experimental.constrained.powi.f32(float, i32, metadata, metadata)
define float @powi_f64(float %a, i32 %b) nounwind strictfp {
; WIN-LABEL: powi_f64:
@@ -19,6 +19,6 @@ define float @powi_f64(float %a, i32 %b) nounwind strictfp {
; UNIX-NEXT: callq __powisf2 at PLT
; UNIX-NEXT: popq %rax
; UNIX-NEXT: retq
- %1 = call float @llvm.experimental.constrained.powi.f32.i32(float %a, i32 %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") strictfp
+ %1 = call float @llvm.experimental.constrained.powi.f32(float %a, i32 %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") strictfp
ret float %1
}
diff --git a/llvm/test/Transforms/EarlyCSE/defaultfp-strictfp.ll b/llvm/test/Transforms/EarlyCSE/defaultfp-strictfp.ll
index bdfad0d6e44e9c..3871822c9dc17a 100644
--- a/llvm/test/Transforms/EarlyCSE/defaultfp-strictfp.ll
+++ b/llvm/test/Transforms/EarlyCSE/defaultfp-strictfp.ll
@@ -246,8 +246,8 @@ define i1 @multiple_fcmp(double %a, double %b) #0 {
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #[[ATTR0]]
; CHECK-NEXT: ret i1 [[TMP1]]
;
- %1 = call i1 @llvm.experimental.constrained.fcmp.i1.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
- %2 = call i1 @llvm.experimental.constrained.fcmp.i1.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
+ %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
+ %2 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
%3 = zext i1 %1 to i32
%4 = zext i1 %2 to i32
%5 = call i32 @bar.i32(i32 %3, i32 %4) #0
@@ -262,9 +262,9 @@ define i1 @multiple_fcmp_split(double %a, double %b) #0 {
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #[[ATTR0]]
; CHECK-NEXT: ret i1 [[TMP1]]
;
- %1 = call i1 @llvm.experimental.constrained.fcmp.i1.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
+ %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
call void @arbitraryfunc() #0
- %2 = call i1 @llvm.experimental.constrained.fcmp.i1.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
+ %2 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
%3 = zext i1 %1 to i32
%4 = zext i1 %2 to i32
%5 = call i32 @bar.i32(i32 %3, i32 %4) #0
@@ -278,8 +278,8 @@ define i1 @multiple_fcmps(double %a, double %b) #0 {
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #[[ATTR0]]
; CHECK-NEXT: ret i1 [[TMP1]]
;
- %1 = call i1 @llvm.experimental.constrained.fcmps.i1.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
- %2 = call i1 @llvm.experimental.constrained.fcmps.i1.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
+ %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
+ %2 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
%3 = zext i1 %1 to i32
%4 = zext i1 %2 to i32
%5 = call i32 @bar.i32(i32 %3, i32 %4) #0
@@ -294,9 +294,9 @@ define i1 @multiple_fcmps_split(double %a, double %b) #0 {
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #[[ATTR0]]
; CHECK-NEXT: ret i1 [[TMP1]]
;
- %1 = call i1 @llvm.experimental.constrained.fcmps.i1.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
+ %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
call void @arbitraryfunc() #0
- %2 = call i1 @llvm.experimental.constrained.fcmps.i1.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
+ %2 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
%3 = zext i1 %1 to i32
%4 = zext i1 %2 to i32
%5 = call i32 @bar.i32(i32 %3, i32 %4) #0
@@ -318,5 +318,5 @@ declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata)
declare double @llvm.experimental.constrained.uitofp.f64.i32(i32, metadata, metadata)
declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata)
declare double @llvm.experimental.constrained.sitofp.f64.i32(i32, metadata, metadata)
-declare i1 @llvm.experimental.constrained.fcmp.i1.f64(double, double, metadata, metadata)
-declare i1 @llvm.experimental.constrained.fcmps.i1.f64(double, double, metadata, metadata)
+declare i1 @llvm.experimental.constrained.fcmp.f64(double, double, metadata, metadata)
+declare i1 @llvm.experimental.constrained.fcmps.f64(double, double, metadata, metadata)
diff --git a/llvm/test/Transforms/EarlyCSE/ebstrict-strictfp.ll b/llvm/test/Transforms/EarlyCSE/ebstrict-strictfp.ll
index fafc7ccbb38c1f..f2675ce7816a4e 100644
--- a/llvm/test/Transforms/EarlyCSE/ebstrict-strictfp.ll
+++ b/llvm/test/Transforms/EarlyCSE/ebstrict-strictfp.ll
@@ -132,8 +132,8 @@ define i1 @fcmp_strict(double %a, double %b) #0 {
; CHECK-NEXT: [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #[[ATTR0]]
; CHECK-NEXT: ret i1 [[TMP2]]
;
- %1 = call i1 @llvm.experimental.constrained.fcmp.i1.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict") #0
- %2 = call i1 @llvm.experimental.constrained.fcmp.i1.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict") #0
+ %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict") #0
+ %2 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict") #0
%3 = zext i1 %1 to i32
%4 = zext i1 %2 to i32
%5 = call i32 @bar.i32(i32 %3, i32 %4) #0
@@ -149,8 +149,8 @@ define i1 @fcmps_strict(double %a, double %b) #0 {
; CHECK-NEXT: [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #[[ATTR0]]
; CHECK-NEXT: ret i1 [[TMP2]]
;
- %1 = call i1 @llvm.experimental.constrained.fcmps.i1.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict") #0
- %2 = call i1 @llvm.experimental.constrained.fcmps.i1.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict") #0
+ %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict") #0
+ %2 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict") #0
%3 = zext i1 %1 to i32
%4 = zext i1 %2 to i32
%5 = call i32 @bar.i32(i32 %3, i32 %4) #0
@@ -172,5 +172,5 @@ declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata)
declare double @llvm.experimental.constrained.uitofp.f64.i32(i32, metadata, metadata)
declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata)
declare double @llvm.experimental.constrained.sitofp.f64.i32(i32, metadata, metadata)
-declare i1 @llvm.experimental.constrained.fcmp.i1.f64(double, double, metadata, metadata)
-declare i1 @llvm.experimental.constrained.fcmps.i1.f64(double, double, metadata, metadata)
+declare i1 @llvm.experimental.constrained.fcmp.f64(double, double, metadata, metadata)
+declare i1 @llvm.experimental.constrained.fcmps.f64(double, double, metadata, metadata)
diff --git a/llvm/test/Transforms/EarlyCSE/mixed-strictfp.ll b/llvm/test/Transforms/EarlyCSE/mixed-strictfp.ll
index f3b857ab2f4874..b79f7018b8d0d5 100644
--- a/llvm/test/Transforms/EarlyCSE/mixed-strictfp.ll
+++ b/llvm/test/Transforms/EarlyCSE/mixed-strictfp.ll
@@ -339,8 +339,8 @@ define i1 @mixed_fcmp_maytrap(double %a, double %b) #0 {
; CHECK-NEXT: [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #[[ATTR0]]
; CHECK-NEXT: ret i1 [[TMP2]]
;
- %1 = call i1 @llvm.experimental.constrained.fcmp.i1.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
- %2 = call i1 @llvm.experimental.constrained.fcmp.i1.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.maytrap") #0
+ %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
+ %2 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.maytrap") #0
%3 = zext i1 %1 to i32
%4 = zext i1 %2 to i32
%5 = call i32 @bar.i32(i32 %3, i32 %4) #0
@@ -356,8 +356,8 @@ define i1 @mixed_fcmp_strict(double %a, double %b) #0 {
; CHECK-NEXT: [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #[[ATTR0]]
; CHECK-NEXT: ret i1 [[TMP2]]
;
- %1 = call i1 @llvm.experimental.constrained.fcmp.i1.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
- %2 = call i1 @llvm.experimental.constrained.fcmp.i1.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict") #0
+ %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
+ %2 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict") #0
%3 = zext i1 %1 to i32
%4 = zext i1 %2 to i32
%5 = call i32 @bar.i32(i32 %3, i32 %4) #0
@@ -373,8 +373,8 @@ define i1 @mixed_fcmps_maytrap(double %a, double %b) #0 {
; CHECK-NEXT: [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #[[ATTR0]]
; CHECK-NEXT: ret i1 [[TMP2]]
;
- %1 = call i1 @llvm.experimental.constrained.fcmps.i1.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
- %2 = call i1 @llvm.experimental.constrained.fcmps.i1.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.maytrap") #0
+ %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
+ %2 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.maytrap") #0
%3 = zext i1 %1 to i32
%4 = zext i1 %2 to i32
%5 = call i32 @bar.i32(i32 %3, i32 %4) #0
@@ -390,8 +390,8 @@ define i1 @mixed_fcmps_strict(double %a, double %b) #0 {
; CHECK-NEXT: [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #[[ATTR0]]
; CHECK-NEXT: ret i1 [[TMP2]]
;
- %1 = call i1 @llvm.experimental.constrained.fcmps.i1.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
- %2 = call i1 @llvm.experimental.constrained.fcmps.i1.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict") #0
+ %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
+ %2 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict") #0
%3 = zext i1 %1 to i32
%4 = zext i1 %2 to i32
%5 = call i32 @bar.i32(i32 %3, i32 %4) #0
@@ -413,5 +413,5 @@ declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata)
declare double @llvm.experimental.constrained.uitofp.f64.i32(i32, metadata, metadata)
declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata)
declare double @llvm.experimental.constrained.sitofp.f64.i32(i32, metadata, metadata)
-declare i1 @llvm.experimental.constrained.fcmp.i1.f64(double, double, metadata, metadata)
-declare i1 @llvm.experimental.constrained.fcmps.i1.f64(double, double, metadata, metadata)
+declare i1 @llvm.experimental.constrained.fcmp.f64(double, double, metadata, metadata)
+declare i1 @llvm.experimental.constrained.fcmps.f64(double, double, metadata, metadata)
diff --git a/llvm/test/Transforms/EarlyCSE/nonmixed-strictfp.ll b/llvm/test/Transforms/EarlyCSE/nonmixed-strictfp.ll
index 8772f208ebe47e..3acf5597dfc3fe 100644
--- a/llvm/test/Transforms/EarlyCSE/nonmixed-strictfp.ll
+++ b/llvm/test/Transforms/EarlyCSE/nonmixed-strictfp.ll
@@ -313,8 +313,8 @@ define i1 @fcmp_defaultenv(double %a, double %b) #0 {
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #[[ATTR0]]
; CHECK-NEXT: ret i1 [[TMP1]]
;
- %1 = call i1 @llvm.experimental.constrained.fcmp.i1.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
- %2 = call i1 @llvm.experimental.constrained.fcmp.i1.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
+ %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
+ %2 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
%3 = zext i1 %1 to i32
%4 = zext i1 %2 to i32
%5 = call i32 @bar.i32(i32 %3, i32 %4) #0
@@ -328,8 +328,8 @@ define i1 @fcmp_maytrap(double %a, double %b) #0 {
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #[[ATTR0]]
; CHECK-NEXT: ret i1 [[TMP1]]
;
- %1 = call i1 @llvm.experimental.constrained.fcmp.i1.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.maytrap") #0
- %2 = call i1 @llvm.experimental.constrained.fcmp.i1.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.maytrap") #0
+ %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.maytrap") #0
+ %2 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.maytrap") #0
%3 = zext i1 %1 to i32
%4 = zext i1 %2 to i32
%5 = call i32 @bar.i32(i32 %3, i32 %4) #0
@@ -343,8 +343,8 @@ define i1 @fcmps_defaultenv(double %a, double %b) #0 {
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #[[ATTR0]]
; CHECK-NEXT: ret i1 [[TMP1]]
;
- %1 = call i1 @llvm.experimental.constrained.fcmps.i1.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
- %2 = call i1 @llvm.experimental.constrained.fcmps.i1.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
+ %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
+ %2 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
%3 = zext i1 %1 to i32
%4 = zext i1 %2 to i32
%5 = call i32 @bar.i32(i32 %3, i32 %4) #0
@@ -358,8 +358,8 @@ define i1 @fcmps_maytrap(double %a, double %b) #0 {
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #[[ATTR0]]
; CHECK-NEXT: ret i1 [[TMP1]]
;
- %1 = call i1 @llvm.experimental.constrained.fcmps.i1.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.maytrap") #0
- %2 = call i1 @llvm.experimental.constrained.fcmps.i1.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.maytrap") #0
+ %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.maytrap") #0
+ %2 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.maytrap") #0
%3 = zext i1 %1 to i32
%4 = zext i1 %2 to i32
%5 = call i32 @bar.i32(i32 %3, i32 %4) #0
@@ -381,5 +381,5 @@ declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata)
declare double @llvm.experimental.constrained.uitofp.f64.i32(i32, metadata, metadata)
declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata)
declare double @llvm.experimental.constrained.sitofp.f64.i32(i32, metadata, metadata)
-declare i1 @llvm.experimental.constrained.fcmp.i1.f64(double, double, metadata, metadata)
-declare i1 @llvm.experimental.constrained.fcmps.i1.f64(double, double, metadata, metadata)
+declare i1 @llvm.experimental.constrained.fcmp.f64(double, double, metadata, metadata)
+declare i1 @llvm.experimental.constrained.fcmps.f64(double, double, metadata, metadata)
diff --git a/llvm/test/Transforms/SCCP/strictfp-phis-fcmp.ll b/llvm/test/Transforms/SCCP/strictfp-phis-fcmp.ll
index 3bf7d9578b560a..a6c023a25608b4 100644
--- a/llvm/test/Transforms/SCCP/strictfp-phis-fcmp.ll
+++ b/llvm/test/Transforms/SCCP/strictfp-phis-fcmp.ll
@@ -19,7 +19,7 @@ if.true:
end:
%p = phi float [ 1.0, %entry ], [ 1.0, %if.true]
- %c = call i1 @llvm.experimental.constrained.fcmp.i1.f32(float %p, float 1.0, metadata !"ueq", metadata !"fpexcept.ignore") #0
+ %c = call i1 @llvm.experimental.constrained.fcmp.f32(float %p, float 1.0, metadata !"ueq", metadata !"fpexcept.ignore") #0
ret i1 %c
}
@@ -42,7 +42,7 @@ if.true:
end:
%p = phi float [ 1.0, %entry ], [ 1.0, %if.true]
- %c = call i1 @llvm.experimental.constrained.fcmp.i1.f32(float %p, float 1.0, metadata !"ueq", metadata !"fpexcept.maytrap") #0
+ %c = call i1 @llvm.experimental.constrained.fcmp.f32(float %p, float 1.0, metadata !"ueq", metadata !"fpexcept.maytrap") #0
ret i1 %c
}
@@ -66,7 +66,7 @@ if.true:
end:
%p = phi float [ 1.0, %entry ], [ 1.0, %if.true]
- %c = call i1 @llvm.experimental.constrained.fcmp.i1.f32(float %p, float 1.0, metadata !"ueq", metadata !"fpexcept.strict") #0
+ %c = call i1 @llvm.experimental.constrained.fcmp.f32(float %p, float 1.0, metadata !"ueq", metadata !"fpexcept.strict") #0
ret i1 %c
}
@@ -91,7 +91,7 @@ if.true:
end:
%p = phi float [ 1.0, %entry ], [ 2.0, %if.true]
- %c = call i1 @llvm.experimental.constrained.fcmp.i1.f32(float %p, float 1.0, metadata !"ueq", metadata !"fpexcept.ignore") #0
+ %c = call i1 @llvm.experimental.constrained.fcmp.f32(float %p, float 1.0, metadata !"ueq", metadata !"fpexcept.ignore") #0
ret i1 %c
}
@@ -115,7 +115,7 @@ if.true:
end:
%p = phi float [ 1.0, %entry ], [ 2.0, %if.true]
- %c = call i1 @llvm.experimental.constrained.fcmp.i1.f32(float %p, float 1.0, metadata !"ueq", metadata !"fpexcept.maytrap") #0
+ %c = call i1 @llvm.experimental.constrained.fcmp.f32(float %p, float 1.0, metadata !"ueq", metadata !"fpexcept.maytrap") #0
ret i1 %c
}
@@ -139,7 +139,7 @@ if.true:
end:
%p = phi float [ 1.0, %entry ], [ 2.0, %if.true]
- %c = call i1 @llvm.experimental.constrained.fcmp.i1.f32(float %p, float 1.0, metadata !"ueq", metadata !"fpexcept.strict") #0
+ %c = call i1 @llvm.experimental.constrained.fcmp.f32(float %p, float 1.0, metadata !"ueq", metadata !"fpexcept.strict") #0
ret i1 %c
}
@@ -163,7 +163,7 @@ if.true:
end:
%p = phi float [ 1.0, %entry ], [ %f, %if.true]
- %c = call i1 @llvm.experimental.constrained.fcmp.i1.f32(float %p, float 1.0, metadata !"ueq", metadata !"fpexcept.ignore") #0
+ %c = call i1 @llvm.experimental.constrained.fcmp.f32(float %p, float 1.0, metadata !"ueq", metadata !"fpexcept.ignore") #0
ret i1 %c
}
@@ -187,7 +187,7 @@ if.true:
end:
%p = phi float [ 1.0, %entry ], [ %f, %if.true]
- %c = call i1 @llvm.experimental.constrained.fcmp.i1.f32(float %p, float 1.0, metadata !"ueq", metadata !"fpexcept.maytrap") #0
+ %c = call i1 @llvm.experimental.constrained.fcmp.f32(float %p, float 1.0, metadata !"ueq", metadata !"fpexcept.maytrap") #0
ret i1 %c
}
@@ -211,7 +211,7 @@ if.true:
end:
%p = phi float [ 1.0, %entry ], [ %f, %if.true]
- %c = call i1 @llvm.experimental.constrained.fcmp.i1.f32(float %p, float 1.0, metadata !"ueq", metadata !"fpexcept.strict") #0
+ %c = call i1 @llvm.experimental.constrained.fcmp.f32(float %p, float 1.0, metadata !"ueq", metadata !"fpexcept.strict") #0
ret i1 %c
}
@@ -236,7 +236,7 @@ dead:
end:
%p = phi float [ 1.0, %entry ], [ 1.0, %if.true], [ %f, %dead ]
- %c = call i1 @llvm.experimental.constrained.fcmp.i1.f32(float %p, float 1.0, metadata !"une", metadata !"fpexcept.ignore") #0
+ %c = call i1 @llvm.experimental.constrained.fcmp.f32(float %p, float 1.0, metadata !"une", metadata !"fpexcept.ignore") #0
ret i1 %c
}
@@ -261,7 +261,7 @@ dead:
end:
%p = phi float [ 1.0, %entry ], [ 1.0, %if.true], [ %f, %dead ]
- %c = call i1 @llvm.experimental.constrained.fcmp.i1.f32(float %p, float 1.0, metadata !"une", metadata !"fpexcept.maytrap") #0
+ %c = call i1 @llvm.experimental.constrained.fcmp.f32(float %p, float 1.0, metadata !"une", metadata !"fpexcept.maytrap") #0
ret i1 %c
}
@@ -288,11 +288,11 @@ dead:
end:
%p = phi float [ 1.0, %entry ], [ 1.0, %if.true], [ %f, %dead ]
- %c = call i1 @llvm.experimental.constrained.fcmp.i1.f32(float %p, float 1.0, metadata !"une", metadata !"fpexcept.strict") #0
+ %c = call i1 @llvm.experimental.constrained.fcmp.f32(float %p, float 1.0, metadata !"une", metadata !"fpexcept.strict") #0
ret i1 %c
}
attributes #0 = { strictfp }
-declare i1 @llvm.experimental.constrained.fcmp.i1.f32(float, float, metadata, metadata)
+declare i1 @llvm.experimental.constrained.fcmp.f32(float, float, metadata, metadata)
diff --git a/llvm/test/Transforms/SCCP/strictfp-phis-fcmps.ll b/llvm/test/Transforms/SCCP/strictfp-phis-fcmps.ll
index 6db1f47ccca997..213293a7859385 100644
--- a/llvm/test/Transforms/SCCP/strictfp-phis-fcmps.ll
+++ b/llvm/test/Transforms/SCCP/strictfp-phis-fcmps.ll
@@ -19,7 +19,7 @@ if.true:
end:
%p = phi float [ 1.0, %entry ], [ 1.0, %if.true]
- %c = call i1 @llvm.experimental.constrained.fcmps.i1.f32(float %p, float 1.0, metadata !"ueq", metadata !"fpexcept.ignore") #0
+ %c = call i1 @llvm.experimental.constrained.fcmps.f32(float %p, float 1.0, metadata !"ueq", metadata !"fpexcept.ignore") #0
ret i1 %c
}
@@ -42,7 +42,7 @@ if.true:
end:
%p = phi float [ 1.0, %entry ], [ 1.0, %if.true]
- %c = call i1 @llvm.experimental.constrained.fcmps.i1.f32(float %p, float 1.0, metadata !"ueq", metadata !"fpexcept.maytrap") #0
+ %c = call i1 @llvm.experimental.constrained.fcmps.f32(float %p, float 1.0, metadata !"ueq", metadata !"fpexcept.maytrap") #0
ret i1 %c
}
@@ -66,7 +66,7 @@ if.true:
end:
%p = phi float [ 1.0, %entry ], [ 1.0, %if.true]
- %c = call i1 @llvm.experimental.constrained.fcmps.i1.f32(float %p, float 1.0, metadata !"ueq", metadata !"fpexcept.strict") #0
+ %c = call i1 @llvm.experimental.constrained.fcmps.f32(float %p, float 1.0, metadata !"ueq", metadata !"fpexcept.strict") #0
ret i1 %c
}
@@ -91,7 +91,7 @@ if.true:
end:
%p = phi float [ 1.0, %entry ], [ 2.0, %if.true]
- %c = call i1 @llvm.experimental.constrained.fcmps.i1.f32(float %p, float 1.0, metadata !"ueq", metadata !"fpexcept.ignore") #0
+ %c = call i1 @llvm.experimental.constrained.fcmps.f32(float %p, float 1.0, metadata !"ueq", metadata !"fpexcept.ignore") #0
ret i1 %c
}
@@ -115,7 +115,7 @@ if.true:
end:
%p = phi float [ 1.0, %entry ], [ 2.0, %if.true]
- %c = call i1 @llvm.experimental.constrained.fcmps.i1.f32(float %p, float 1.0, metadata !"ueq", metadata !"fpexcept.maytrap") #0
+ %c = call i1 @llvm.experimental.constrained.fcmps.f32(float %p, float 1.0, metadata !"ueq", metadata !"fpexcept.maytrap") #0
ret i1 %c
}
@@ -139,7 +139,7 @@ if.true:
end:
%p = phi float [ 1.0, %entry ], [ 2.0, %if.true]
- %c = call i1 @llvm.experimental.constrained.fcmps.i1.f32(float %p, float 1.0, metadata !"ueq", metadata !"fpexcept.strict") #0
+ %c = call i1 @llvm.experimental.constrained.fcmps.f32(float %p, float 1.0, metadata !"ueq", metadata !"fpexcept.strict") #0
ret i1 %c
}
@@ -163,7 +163,7 @@ if.true:
end:
%p = phi float [ 1.0, %entry ], [ %f, %if.true]
- %c = call i1 @llvm.experimental.constrained.fcmps.i1.f32(float %p, float 1.0, metadata !"ueq", metadata !"fpexcept.ignore") #0
+ %c = call i1 @llvm.experimental.constrained.fcmps.f32(float %p, float 1.0, metadata !"ueq", metadata !"fpexcept.ignore") #0
ret i1 %c
}
@@ -187,7 +187,7 @@ if.true:
end:
%p = phi float [ 1.0, %entry ], [ %f, %if.true]
- %c = call i1 @llvm.experimental.constrained.fcmps.i1.f32(float %p, float 1.0, metadata !"ueq", metadata !"fpexcept.maytrap") #0
+ %c = call i1 @llvm.experimental.constrained.fcmps.f32(float %p, float 1.0, metadata !"ueq", metadata !"fpexcept.maytrap") #0
ret i1 %c
}
@@ -211,7 +211,7 @@ if.true:
end:
%p = phi float [ 1.0, %entry ], [ %f, %if.true]
- %c = call i1 @llvm.experimental.constrained.fcmps.i1.f32(float %p, float 1.0, metadata !"ueq", metadata !"fpexcept.strict") #0
+ %c = call i1 @llvm.experimental.constrained.fcmps.f32(float %p, float 1.0, metadata !"ueq", metadata !"fpexcept.strict") #0
ret i1 %c
}
@@ -236,7 +236,7 @@ dead:
end:
%p = phi float [ 1.0, %entry ], [ 1.0, %if.true], [ %f, %dead ]
- %c = call i1 @llvm.experimental.constrained.fcmps.i1.f32(float %p, float 1.0, metadata !"une", metadata !"fpexcept.ignore") #0
+ %c = call i1 @llvm.experimental.constrained.fcmps.f32(float %p, float 1.0, metadata !"une", metadata !"fpexcept.ignore") #0
ret i1 %c
}
@@ -261,7 +261,7 @@ dead:
end:
%p = phi float [ 1.0, %entry ], [ 1.0, %if.true], [ %f, %dead ]
- %c = call i1 @llvm.experimental.constrained.fcmps.i1.f32(float %p, float 1.0, metadata !"une", metadata !"fpexcept.maytrap") #0
+ %c = call i1 @llvm.experimental.constrained.fcmps.f32(float %p, float 1.0, metadata !"une", metadata !"fpexcept.maytrap") #0
ret i1 %c
}
@@ -288,11 +288,11 @@ dead:
end:
%p = phi float [ 1.0, %entry ], [ 1.0, %if.true], [ %f, %dead ]
- %c = call i1 @llvm.experimental.constrained.fcmps.i1.f32(float %p, float 1.0, metadata !"une", metadata !"fpexcept.strict") #0
+ %c = call i1 @llvm.experimental.constrained.fcmps.f32(float %p, float 1.0, metadata !"une", metadata !"fpexcept.strict") #0
ret i1 %c
}
attributes #0 = { strictfp }
-declare i1 @llvm.experimental.constrained.fcmps.i1.f32(float, float, metadata, metadata)
+declare i1 @llvm.experimental.constrained.fcmps.f32(float, float, metadata, metadata)
>From edeae6c7efe36508c93b619ffb9849c4913d629b Mon Sep 17 00:00:00 2001
From: Serge Pavlov <sepavloff at gmail.com>
Date: Thu, 24 Oct 2024 13:03:20 +0700
Subject: [PATCH 2/2] Apply suggestions from code review
Co-authored-by: Andy Kaylor <andy_kaylor at yahoo.com>
---
llvm/test/CodeGen/AArch64/fp-intrinsics.ll | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/test/CodeGen/AArch64/fp-intrinsics.ll b/llvm/test/CodeGen/AArch64/fp-intrinsics.ll
index a43e18b9d72e13..daeec83878645f 100644
--- a/llvm/test/CodeGen/AArch64/fp-intrinsics.ll
+++ b/llvm/test/CodeGen/AArch64/fp-intrinsics.ll
@@ -1774,7 +1774,7 @@ declare float @llvm.experimental.constrained.maximum.f32(float, float, metadata)
declare float @llvm.experimental.constrained.minimum.f32(float, float, metadata)
declare float @llvm.experimental.constrained.ceil.f32(float, metadata)
declare float @llvm.experimental.constrained.floor.f32(float, metadata)
-declare i32 @llvm.experimental.constrained.i32.f32(float, metadata)
+declare i32 @llvm.experimental.constrained.lround.i32.f32(float, metadata)
declare i64 @llvm.experimental.constrained.llround.i64.f32(float, metadata)
declare float @llvm.experimental.constrained.round.f32(float, metadata)
declare float @llvm.experimental.constrained.roundeven.f32(float, metadata)
@@ -1874,7 +1874,7 @@ declare fp128 @llvm.experimental.constrained.maxnum.f128(fp128, fp128, metadata)
declare fp128 @llvm.experimental.constrained.minnum.f128(fp128, fp128, metadata)
declare fp128 @llvm.experimental.constrained.ceil.f128(fp128, metadata)
declare fp128 @llvm.experimental.constrained.floor.f128(fp128, metadata)
-declare i32 @llvm.experimental.constrained.i32.f128(fp128, metadata)
+declare i32 @llvm.experimental.constrained.lround.i32.f128(fp128, metadata)
declare i64 @llvm.experimental.constrained.llround.i64.f128(fp128, metadata)
declare fp128 @llvm.experimental.constrained.round.f128(fp128, metadata)
declare fp128 @llvm.experimental.constrained.trunc.f128(fp128, metadata)
More information about the llvm-commits
mailing list