[clang] [llvm] Reimplement constrained 'trunc' using operand bundles (PR #118253)

Serge Pavlov via cfe-commits cfe-commits at lists.llvm.org
Sun Dec 1 22:00:14 PST 2024


https://github.com/spavloff updated https://github.com/llvm/llvm-project/pull/118253

>From a4b8a547130b843c60c7af73160b4e949f4861e8 Mon Sep 17 00:00:00 2001
From: Serge Pavlov <sepavloff at gmail.com>
Date: Thu, 22 Aug 2024 17:33:20 +0700
Subject: [PATCH 1/6] Implement operand bundles for floating-point operations

Currently floating-point operations in general form (beyond the default
mode) are always represented by calls to constrained intrinsics. In
addition to the side effect, they carry additional information in the
form of metadata arguments. This scheme is not efficient in the case of
intrinsic function calls, as was noted in
https://discourse.llvm.org/t/thought-on-strictfp-support/71453, because
it requires defining a separate intrinsic for the same operation but
used in non-default FP environment. The solution proposed in the
discussion was "to move the complexity about the environment tracking
from the intrinsics themselves to the call instruction".

The way implemented in this change is to use operand bundles
(https://llvm.org/docs/LangRef.html#operand-bundles). This way was tried
previously (https://reviews.llvm.org/D93455), but was not finished.

This change does not add any new functionality, it only adds the new way
of keeping FP related information in LLVM IR. Metadata arguments of
constrained functions are preserved, but they are not used in the
queries like `getRoundingMode` or `getExceptionBehavior`.
---
 clang/test/CodeGen/X86/strictfp_builtins.c    |  6 +-
 clang/test/CodeGen/strictfp_builtins.c        | 34 ++++----
 .../cl20-device-side-enqueue-attributes.cl    |  4 +-
 llvm/docs/LangRef.rst                         | 23 ++++++
 llvm/include/llvm/ADT/FloatingPointMode.h     |  9 +++
 llvm/include/llvm/IR/AutoUpgrade.h            |  2 +
 llvm/include/llvm/IR/FPEnv.h                  |  9 +++
 llvm/include/llvm/IR/IRBuilder.h              | 48 ++++++++++++
 llvm/include/llvm/IR/InstrTypes.h             |  7 ++
 llvm/include/llvm/IR/IntrinsicInst.h          |  2 -
 llvm/include/llvm/IR/LLVMContext.h            |  2 +
 llvm/lib/AsmParser/LLParser.cpp               | 46 +++++++++++
 llvm/lib/IR/AutoUpgrade.cpp                   | 78 +++++++++++++++++++
 llvm/lib/IR/IRBuilder.cpp                     | 56 ++++++++++---
 llvm/lib/IR/Instructions.cpp                  | 18 +++++
 llvm/lib/IR/IntrinsicInst.cpp                 | 23 ------
 llvm/lib/IR/LLVMContext.cpp                   | 10 +++
 llvm/lib/IR/Verifier.cpp                      | 70 ++++++++++++++++-
 .../Scalar/TailRecursionElimination.cpp       | 10 ++-
 llvm/lib/Transforms/Utils/CloneFunction.cpp   | 16 +++-
 .../Bitcode/operand-bundles-bc-analyzer.ll    |  2 +
 .../AMDGPU/amdgpu-simplify-libcall-pown.ll    | 10 +--
 22 files changed, 413 insertions(+), 72 deletions(-)

diff --git a/clang/test/CodeGen/X86/strictfp_builtins.c b/clang/test/CodeGen/X86/strictfp_builtins.c
index 43e4060bef259b..75ed3a2555b3d7 100644
--- a/clang/test/CodeGen/X86/strictfp_builtins.c
+++ b/clang/test/CodeGen/X86/strictfp_builtins.c
@@ -27,7 +27,7 @@ void p(char *str, int x) {
 // CHECK-NEXT:    [[LD_ADDR:%.*]] = alloca x86_fp80, align 16
 // CHECK-NEXT:    store x86_fp80 [[LD:%.*]], ptr [[LD_ADDR]], align 16
 // CHECK-NEXT:    [[TMP0:%.*]] = load x86_fp80, ptr [[LD_ADDR]], align 16
-// CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f80(x86_fp80 [[TMP0]], i32 516) #[[ATTR3]]
+// CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f80(x86_fp80 [[TMP0]], i32 516) #[[ATTR4:[0-9]+]]
 // CHECK-NEXT:    [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
 // CHECK-NEXT:    call void @p(ptr noundef @.str.1, i32 noundef [[TMP2]]) #[[ATTR3]]
 // CHECK-NEXT:    ret void
@@ -43,7 +43,7 @@ void test_long_double_isinf(long double ld) {
 // CHECK-NEXT:    [[LD_ADDR:%.*]] = alloca x86_fp80, align 16
 // CHECK-NEXT:    store x86_fp80 [[LD:%.*]], ptr [[LD_ADDR]], align 16
 // CHECK-NEXT:    [[TMP0:%.*]] = load x86_fp80, ptr [[LD_ADDR]], align 16
-// CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f80(x86_fp80 [[TMP0]], i32 504) #[[ATTR3]]
+// CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f80(x86_fp80 [[TMP0]], i32 504) #[[ATTR4]]
 // CHECK-NEXT:    [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
 // CHECK-NEXT:    call void @p(ptr noundef @.str.2, i32 noundef [[TMP2]]) #[[ATTR3]]
 // CHECK-NEXT:    ret void
@@ -59,7 +59,7 @@ void test_long_double_isfinite(long double ld) {
 // CHECK-NEXT:    [[LD_ADDR:%.*]] = alloca x86_fp80, align 16
 // CHECK-NEXT:    store x86_fp80 [[LD:%.*]], ptr [[LD_ADDR]], align 16
 // CHECK-NEXT:    [[TMP0:%.*]] = load x86_fp80, ptr [[LD_ADDR]], align 16
-// CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f80(x86_fp80 [[TMP0]], i32 3) #[[ATTR3]]
+// CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f80(x86_fp80 [[TMP0]], i32 3) #[[ATTR4]]
 // CHECK-NEXT:    [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
 // CHECK-NEXT:    call void @p(ptr noundef @.str.3, i32 noundef [[TMP2]]) #[[ATTR3]]
 // CHECK-NEXT:    ret void
diff --git a/clang/test/CodeGen/strictfp_builtins.c b/clang/test/CodeGen/strictfp_builtins.c
index 58815c7de4fa94..2e758115779711 100644
--- a/clang/test/CodeGen/strictfp_builtins.c
+++ b/clang/test/CodeGen/strictfp_builtins.c
@@ -31,21 +31,21 @@ void p(char *str, int x) {
 // CHECK-NEXT:    [[D_ADDR:%.*]] = alloca double, align 8
 // CHECK-NEXT:    store double [[D:%.*]], ptr [[D_ADDR]], align 8
 // CHECK-NEXT:    [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8
-// CHECK-NEXT:    [[ISZERO:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double 0.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR4]]
+// CHECK-NEXT:    [[ISZERO:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double 0.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR5:[0-9]+]] [ "fpe.except"(i32 2) ]
 // CHECK-NEXT:    br i1 [[ISZERO]], label [[FPCLASSIFY_END:%.*]], label [[FPCLASSIFY_NOT_ZERO:%.*]]
 // CHECK:       fpclassify_end:
 // CHECK-NEXT:    [[FPCLASSIFY_RESULT:%.*]] = phi i32 [ 4, [[ENTRY:%.*]] ], [ 0, [[FPCLASSIFY_NOT_ZERO]] ], [ 1, [[FPCLASSIFY_NOT_NAN:%.*]] ], [ [[TMP2:%.*]], [[FPCLASSIFY_NOT_INF:%.*]] ]
 // CHECK-NEXT:    call void @p(ptr noundef @.str.1, i32 noundef [[FPCLASSIFY_RESULT]]) #[[ATTR4]]
 // CHECK-NEXT:    ret void
 // CHECK:       fpclassify_not_zero:
-// CHECK-NEXT:    [[CMP:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double [[TMP0]], metadata !"uno", metadata !"fpexcept.strict") #[[ATTR4]]
+// CHECK-NEXT:    [[CMP:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double [[TMP0]], metadata !"uno", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.except"(i32 2) ]
 // CHECK-NEXT:    br i1 [[CMP]], label [[FPCLASSIFY_END]], label [[FPCLASSIFY_NOT_NAN]]
 // CHECK:       fpclassify_not_nan:
-// CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) #[[ATTR5:[0-9]+]]
-// CHECK-NEXT:    [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR4]]
+// CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) #[[ATTR6:[0-9]+]]
+// CHECK-NEXT:    [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.except"(i32 2) ]
 // CHECK-NEXT:    br i1 [[ISINF]], label [[FPCLASSIFY_END]], label [[FPCLASSIFY_NOT_INF]]
 // CHECK:       fpclassify_not_inf:
-// CHECK-NEXT:    [[ISNORMAL:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x10000000000000, metadata !"uge", metadata !"fpexcept.strict") #[[ATTR4]]
+// CHECK-NEXT:    [[ISNORMAL:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x10000000000000, metadata !"uge", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.except"(i32 2) ]
 // CHECK-NEXT:    [[TMP2]] = select i1 [[ISNORMAL]], i32 2, i32 3
 // CHECK-NEXT:    br label [[FPCLASSIFY_END]]
 //
@@ -60,7 +60,7 @@ void test_fpclassify(double d) {
 // CHECK-NEXT:    [[H_ADDR:%.*]] = alloca half, align 2
 // CHECK-NEXT:    store half [[H:%.*]], ptr [[H_ADDR]], align 2
 // CHECK-NEXT:    [[TMP0:%.*]] = load half, ptr [[H_ADDR]], align 2
-// CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f16(half [[TMP0]], i32 516) #[[ATTR4]]
+// CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f16(half [[TMP0]], i32 516) #[[ATTR5]]
 // CHECK-NEXT:    [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
 // CHECK-NEXT:    call void @p(ptr noundef @.str.2, i32 noundef [[TMP2]]) #[[ATTR4]]
 // CHECK-NEXT:    ret void
@@ -76,7 +76,7 @@ void test_fp16_isinf(_Float16 h) {
 // CHECK-NEXT:    [[F_ADDR:%.*]] = alloca float, align 4
 // CHECK-NEXT:    store float [[F:%.*]], ptr [[F_ADDR]], align 4
 // CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr [[F_ADDR]], align 4
-// CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f32(float [[TMP0]], i32 516) #[[ATTR4]]
+// CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f32(float [[TMP0]], i32 516) #[[ATTR5]]
 // CHECK-NEXT:    [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
 // CHECK-NEXT:    call void @p(ptr noundef @.str.3, i32 noundef [[TMP2]]) #[[ATTR4]]
 // CHECK-NEXT:    ret void
@@ -92,7 +92,7 @@ void test_float_isinf(float f) {
 // CHECK-NEXT:    [[D_ADDR:%.*]] = alloca double, align 8
 // CHECK-NEXT:    store double [[D:%.*]], ptr [[D_ADDR]], align 8
 // CHECK-NEXT:    [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8
-// CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 516) #[[ATTR4]]
+// CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 516) #[[ATTR5]]
 // CHECK-NEXT:    [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
 // CHECK-NEXT:    call void @p(ptr noundef @.str.4, i32 noundef [[TMP2]]) #[[ATTR4]]
 // CHECK-NEXT:    ret void
@@ -108,7 +108,7 @@ void test_double_isinf(double d) {
 // CHECK-NEXT:    [[H_ADDR:%.*]] = alloca half, align 2
 // CHECK-NEXT:    store half [[H:%.*]], ptr [[H_ADDR]], align 2
 // CHECK-NEXT:    [[TMP0:%.*]] = load half, ptr [[H_ADDR]], align 2
-// CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f16(half [[TMP0]], i32 504) #[[ATTR4]]
+// CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f16(half [[TMP0]], i32 504) #[[ATTR5]]
 // CHECK-NEXT:    [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
 // CHECK-NEXT:    call void @p(ptr noundef @.str.5, i32 noundef [[TMP2]]) #[[ATTR4]]
 // CHECK-NEXT:    ret void
@@ -124,7 +124,7 @@ void test_fp16_isfinite(_Float16 h) {
 // CHECK-NEXT:    [[F_ADDR:%.*]] = alloca float, align 4
 // CHECK-NEXT:    store float [[F:%.*]], ptr [[F_ADDR]], align 4
 // CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr [[F_ADDR]], align 4
-// CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f32(float [[TMP0]], i32 504) #[[ATTR4]]
+// CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f32(float [[TMP0]], i32 504) #[[ATTR5]]
 // CHECK-NEXT:    [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
 // CHECK-NEXT:    call void @p(ptr noundef @.str.6, i32 noundef [[TMP2]]) #[[ATTR4]]
 // CHECK-NEXT:    ret void
@@ -140,7 +140,7 @@ void test_float_isfinite(float f) {
 // CHECK-NEXT:    [[D_ADDR:%.*]] = alloca double, align 8
 // CHECK-NEXT:    store double [[D:%.*]], ptr [[D_ADDR]], align 8
 // CHECK-NEXT:    [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8
-// CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 504) #[[ATTR4]]
+// CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 504) #[[ATTR5]]
 // CHECK-NEXT:    [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
 // CHECK-NEXT:    call void @p(ptr noundef @.str.7, i32 noundef [[TMP2]]) #[[ATTR4]]
 // CHECK-NEXT:    ret void
@@ -156,8 +156,8 @@ void test_double_isfinite(double d) {
 // CHECK-NEXT:    [[D_ADDR:%.*]] = alloca double, align 8
 // CHECK-NEXT:    store double [[D:%.*]], ptr [[D_ADDR]], align 8
 // CHECK-NEXT:    [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8
-// CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) #[[ATTR5]]
-// CHECK-NEXT:    [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR4]]
+// CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) #[[ATTR6]]
+// CHECK-NEXT:    [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.except"(i32 2) ]
 // CHECK-NEXT:    [[TMP2:%.*]] = bitcast double [[TMP0]] to i64
 // CHECK-NEXT:    [[TMP3:%.*]] = icmp slt i64 [[TMP2]], 0
 // CHECK-NEXT:    [[TMP4:%.*]] = select i1 [[TMP3]], i32 -1, i32 1
@@ -176,7 +176,7 @@ void test_isinf_sign(double d) {
 // CHECK-NEXT:    [[H_ADDR:%.*]] = alloca half, align 2
 // CHECK-NEXT:    store half [[H:%.*]], ptr [[H_ADDR]], align 2
 // CHECK-NEXT:    [[TMP0:%.*]] = load half, ptr [[H_ADDR]], align 2
-// CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f16(half [[TMP0]], i32 3) #[[ATTR4]]
+// CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f16(half [[TMP0]], i32 3) #[[ATTR5]]
 // CHECK-NEXT:    [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
 // CHECK-NEXT:    call void @p(ptr noundef @.str.9, i32 noundef [[TMP2]]) #[[ATTR4]]
 // CHECK-NEXT:    ret void
@@ -192,7 +192,7 @@ void test_fp16_isnan(_Float16 h) {
 // CHECK-NEXT:    [[F_ADDR:%.*]] = alloca float, align 4
 // CHECK-NEXT:    store float [[F:%.*]], ptr [[F_ADDR]], align 4
 // CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr [[F_ADDR]], align 4
-// CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f32(float [[TMP0]], i32 3) #[[ATTR4]]
+// CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f32(float [[TMP0]], i32 3) #[[ATTR5]]
 // CHECK-NEXT:    [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
 // CHECK-NEXT:    call void @p(ptr noundef @.str.10, i32 noundef [[TMP2]]) #[[ATTR4]]
 // CHECK-NEXT:    ret void
@@ -208,7 +208,7 @@ void test_float_isnan(float f) {
 // CHECK-NEXT:    [[D_ADDR:%.*]] = alloca double, align 8
 // CHECK-NEXT:    store double [[D:%.*]], ptr [[D_ADDR]], align 8
 // CHECK-NEXT:    [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8
-// CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 3) #[[ATTR4]]
+// CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 3) #[[ATTR5]]
 // CHECK-NEXT:    [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
 // CHECK-NEXT:    call void @p(ptr noundef @.str.11, i32 noundef [[TMP2]]) #[[ATTR4]]
 // CHECK-NEXT:    ret void
@@ -224,7 +224,7 @@ void test_double_isnan(double d) {
 // CHECK-NEXT:    [[D_ADDR:%.*]] = alloca double, align 8
 // CHECK-NEXT:    store double [[D:%.*]], ptr [[D_ADDR]], align 8
 // CHECK-NEXT:    [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8
-// CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 264) #[[ATTR4]]
+// CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 264) #[[ATTR5]]
 // CHECK-NEXT:    [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
 // CHECK-NEXT:    call void @p(ptr noundef @.str.12, i32 noundef [[TMP2]]) #[[ATTR4]]
 // CHECK-NEXT:    ret void
diff --git a/clang/test/CodeGenOpenCL/cl20-device-side-enqueue-attributes.cl b/clang/test/CodeGenOpenCL/cl20-device-side-enqueue-attributes.cl
index 451d30b4d86f0e..4d931b0e105133 100644
--- a/clang/test/CodeGenOpenCL/cl20-device-side-enqueue-attributes.cl
+++ b/clang/test/CodeGenOpenCL/cl20-device-side-enqueue-attributes.cl
@@ -144,7 +144,7 @@ kernel void device_side_enqueue(global float *a, global float *b, int i) {
 // STRICTFP-NEXT:    [[TMP1:%.*]] = load i32, ptr addrspace(4) [[BLOCK_CAPTURE_ADDR1]], align 4
 // STRICTFP-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr addrspace(1) [[TMP0]], i32 [[TMP1]]
 // STRICTFP-NEXT:    [[TMP2:%.*]] = load float, ptr addrspace(1) [[ARRAYIDX]], align 4
-// STRICTFP-NEXT:    [[TMP3:%.*]] = call float @llvm.experimental.constrained.fmuladd.f32(float 4.000000e+00, float [[TMP2]], float 1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR5]]
+// STRICTFP-NEXT:    [[TMP3:%.*]] = call float @llvm.experimental.constrained.fmuladd.f32(float 4.000000e+00, float [[TMP2]], float 1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.round"(i32 1), "fpe.except"(i32 2) ]
 // STRICTFP-NEXT:    [[BLOCK_CAPTURE_ADDR2:%.*]] = getelementptr inbounds nuw <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, ptr addrspace(1) }>, ptr addrspace(4) [[DOTBLOCK_DESCRIPTOR]], i32 0, i32 3
 // STRICTFP-NEXT:    [[TMP4:%.*]] = load ptr addrspace(1), ptr addrspace(4) [[BLOCK_CAPTURE_ADDR2]], align 4
 // STRICTFP-NEXT:    [[BLOCK_CAPTURE_ADDR3:%.*]] = getelementptr inbounds nuw <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, ptr addrspace(1) }>, ptr addrspace(4) [[DOTBLOCK_DESCRIPTOR]], i32 0, i32 4
@@ -173,7 +173,7 @@ kernel void device_side_enqueue(global float *a, global float *b, int i) {
 // STRICTFP: attributes #[[ATTR2]] = { convergent noinline nounwind optnone strictfp "stack-protector-buffer-size"="8" }
 // STRICTFP: attributes #[[ATTR3:[0-9]+]] = { nocallback nofree nosync nounwind strictfp willreturn memory(inaccessiblemem: readwrite) }
 // STRICTFP: attributes #[[ATTR4]] = { convergent nounwind "stack-protector-buffer-size"="8" }
-// STRICTFP: attributes #[[ATTR5]] = { strictfp }
+// STRICTFP: attributes #[[ATTR5]] = { strictfp memory(inaccessiblemem: readwrite) }
 //.
 // SPIR32: [[META0:![0-9]+]] = !{i32 1, !"wchar_size", i32 4}
 // SPIR32: [[META1:![0-9]+]] = !{i32 2, i32 0}
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index abfd2fdfb9de71..d2c228f59a18b6 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -3005,6 +3005,29 @@ A "convergencectrl" operand bundle is only valid on a ``convergent`` operation.
 When present, the operand bundle must contain exactly one value of token type.
 See the :doc:`ConvergentOperations` document for details.
 
+.. _ob_fpe:
+
+Floating-point Environment Operand Bundles
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+These operand bundles provide details on how the operation interacts with the
+:ref:`floating-point environment <_floatenv>`. There are two kinds of such
+operand bundles, which characterize interaction with floating-point control
+modes and status bits.
+
+An operand bundle tagged with "fpe.round" may be associated with the operations
+that may depend on rounding mode. It has an integer value, which represents
+the rounding mode with the same encoding as ``llvm::RoundingMode`` uses. If it
+is present and is not equal to ``llvm::Dynamic``, it specifies the rounding
+mode, which will be used for the operation evaluation. The value
+``llvm::RoundingMode`` indicates that the rounding mode used by the operation is
+specified in a floating-point control register.
+
+An operand bundle tagged with "fpe.except" may be associated with the operations
+that may read or write floating-point exception flags. It has the same meaning
+and encoding as the corresponding argument in
+:ref:`constrained intrinsics <_constrainedfp>`.
+
 .. _moduleasm:
 
 Module-Level Inline Assembly
diff --git a/llvm/include/llvm/ADT/FloatingPointMode.h b/llvm/include/llvm/ADT/FloatingPointMode.h
index 639d931ef88fec..970cc89093924b 100644
--- a/llvm/include/llvm/ADT/FloatingPointMode.h
+++ b/llvm/include/llvm/ADT/FloatingPointMode.h
@@ -47,6 +47,15 @@ enum class RoundingMode : int8_t {
   Invalid = -1    ///< Denotes invalid value.
 };
 
+inline bool isValidRoundingMode(int X) {
+  return X >= 0 && X <= static_cast<int>(RoundingMode::Dynamic);
+}
+
+inline RoundingMode castToRoundingMode(int X) {
+  assert(isValidRoundingMode(X));
+  return static_cast<RoundingMode>(X);
+}
+
 /// Returns text representation of the given rounding mode.
 inline StringRef spell(RoundingMode RM) {
   switch (RM) {
diff --git a/llvm/include/llvm/IR/AutoUpgrade.h b/llvm/include/llvm/IR/AutoUpgrade.h
index 97c3e4d7589d7b..8bd005d73fba36 100644
--- a/llvm/include/llvm/IR/AutoUpgrade.h
+++ b/llvm/include/llvm/IR/AutoUpgrade.h
@@ -107,6 +107,8 @@ namespace llvm {
   /// Upgrade operand bundles (without knowing about their user instruction).
   void UpgradeOperandBundles(std::vector<OperandBundleDef> &OperandBundles);
 
+  CallBase *upgradeConstrainedFunctionCall(CallBase *CB);
+
 } // End llvm namespace
 
 #endif
diff --git a/llvm/include/llvm/IR/FPEnv.h b/llvm/include/llvm/IR/FPEnv.h
index a0197377759daf..e4602bab6038e0 100644
--- a/llvm/include/llvm/IR/FPEnv.h
+++ b/llvm/include/llvm/IR/FPEnv.h
@@ -43,6 +43,15 @@ enum ExceptionBehavior : uint8_t {
 
 }
 
+inline bool isValidExceptionBehavior(unsigned X) {
+  return X <= fp::ExceptionBehavior::ebStrict;
+}
+
+inline fp::ExceptionBehavior castToExceptionBehavior(unsigned X) {
+  assert(isValidExceptionBehavior(X));
+  return static_cast<fp::ExceptionBehavior>(X);
+}
+
 /// Returns a valid RoundingMode enumerator when given a string
 /// that is valid as input in constrained intrinsic rounding mode
 /// metadata.
diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h
index 23fd8350a29b3d..ca732f4903ce44 100644
--- a/llvm/include/llvm/IR/IRBuilder.h
+++ b/llvm/include/llvm/IR/IRBuilder.h
@@ -357,6 +357,9 @@ class IRBuilderBase {
 
   void setConstrainedFPCallAttr(CallBase *I) {
     I->addFnAttr(Attribute::StrictFP);
+    MemoryEffects ME = MemoryEffects::inaccessibleMemOnly();
+    auto A = Attribute::getWithMemoryEffects(getContext(), ME);
+    I->addFnAttr(A);
   }
 
   void setDefaultOperandBundles(ArrayRef<OperandBundleDef> OpBundles) {
@@ -975,6 +978,16 @@ class IRBuilderBase {
                             Instruction *FMFSource = nullptr,
                             const Twine &Name = "");
 
+  /// Create a call to intrinsic \p ID with \p Args, mangled using \p Types and
+  /// with operand bundles.
+  /// If \p FMFSource is provided, copy fast-math-flags from that instruction to
+  /// the intrinsic.
+  CallInst *CreateIntrinsic(Intrinsic::ID ID, ArrayRef<Type *> Types,
+                            ArrayRef<Value *> Args,
+                            ArrayRef<OperandBundleDef> OpBundles,
+                            Instruction *FMFSource = nullptr,
+                            const Twine &Name = "");
+
   /// Create a call to intrinsic \p ID with \p RetTy and \p Args. If
   /// \p FMFSource is provided, copy fast-math-flags from that instruction to
   /// the intrinsic.
@@ -1311,6 +1324,15 @@ class IRBuilderBase {
     return I;
   }
 
+  RoundingMode
+  getEffectiveRounding(std::optional<RoundingMode> Rounding = std::nullopt) {
+    RoundingMode RM = DefaultConstrainedRounding;
+
+    if (Rounding)
+      RM = *Rounding;
+    return RM;
+  }
+
   Value *getConstrainedFPRounding(std::optional<RoundingMode> Rounding) {
     RoundingMode UseRounding = DefaultConstrainedRounding;
 
@@ -1325,6 +1347,14 @@ class IRBuilderBase {
     return MetadataAsValue::get(Context, RoundingMDS);
   }
 
+  fp::ExceptionBehavior getEffectiveExceptionBehavior(
+      std::optional<fp::ExceptionBehavior> Except = std::nullopt) {
+    fp::ExceptionBehavior EB = DefaultConstrainedExcept;
+    if (Except)
+      EB = *Except;
+    return EB;
+  }
+
   Value *getConstrainedFPExcept(std::optional<fp::ExceptionBehavior> Except) {
     std::optional<StringRef> ExceptStr = convertExceptionBehaviorToStr(
         Except.value_or(DefaultConstrainedExcept));
@@ -2475,6 +2505,10 @@ class IRBuilderBase {
       Function *Callee, ArrayRef<Value *> Args, const Twine &Name = "",
       std::optional<RoundingMode> Rounding = std::nullopt,
       std::optional<fp::ExceptionBehavior> Except = std::nullopt);
+  CallInst *CreateConstrainedFPCall(
+      Intrinsic::ID ID, ArrayRef<Value *> Args, const Twine &Name = "",
+      std::optional<RoundingMode> Rounding = std::nullopt,
+      std::optional<fp::ExceptionBehavior> Except = std::nullopt);
 
   Value *CreateSelect(Value *C, Value *True, Value *False,
                       const Twine &Name = "", Instruction *MDFrom = nullptr);
@@ -2671,6 +2705,20 @@ class IRBuilderBase {
   CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
                                       Value *Alignment,
                                       Value *OffsetValue = nullptr);
+
+  void
+  createFPRoundingBundle(SmallVectorImpl<OperandBundleDef> &Bundles,
+                         std::optional<RoundingMode> Rounding = std::nullopt) {
+    int RM = static_cast<int32_t>(getEffectiveRounding(Rounding));
+    Bundles.emplace_back("fpe.round", getInt32(RM));
+  }
+
+  void createFPExceptionBundle(
+      SmallVectorImpl<OperandBundleDef> &Bundles,
+      std::optional<fp::ExceptionBehavior> Except = std::nullopt) {
+    int EB = getEffectiveExceptionBehavior(Except);
+    Bundles.emplace_back("fpe.except", getInt32(EB));
+  }
 };
 
 /// This provides a uniform API for creating instructions and inserting
diff --git a/llvm/include/llvm/IR/InstrTypes.h b/llvm/include/llvm/IR/InstrTypes.h
index e6332a16df7d5f..b2a20a231182ad 100644
--- a/llvm/include/llvm/IR/InstrTypes.h
+++ b/llvm/include/llvm/IR/InstrTypes.h
@@ -25,6 +25,7 @@
 #include "llvm/IR/CallingConv.h"
 #include "llvm/IR/DerivedTypes.h"
 #include "llvm/IR/FMF.h"
+#include "llvm/IR/FPEnv.h"
 #include "llvm/IR/Function.h"
 #include "llvm/IR/Instruction.h"
 #include "llvm/IR/LLVMContext.h"
@@ -2131,6 +2132,12 @@ class CallBase : public Instruction {
     return false;
   }
 
+  /// Return rounding mode specified by operand bundles.
+  std::optional<RoundingMode> getRoundingMode() const;
+
+  /// Return exception behavior specified by operand bundles.
+  std::optional<fp::ExceptionBehavior> getExceptionBehavior() const;
+
   /// Used to keep track of an operand bundle.  See the main comment on
   /// OperandBundleUser above.
   struct BundleOpInfo {
diff --git a/llvm/include/llvm/IR/IntrinsicInst.h b/llvm/include/llvm/IR/IntrinsicInst.h
index 3436216d478e38..b5134a9ee900c5 100644
--- a/llvm/include/llvm/IR/IntrinsicInst.h
+++ b/llvm/include/llvm/IR/IntrinsicInst.h
@@ -723,8 +723,6 @@ class VPBinOpIntrinsic : public VPIntrinsic {
 class ConstrainedFPIntrinsic : public IntrinsicInst {
 public:
   unsigned getNonMetadataArgCount() const;
-  std::optional<RoundingMode> getRoundingMode() const;
-  std::optional<fp::ExceptionBehavior> getExceptionBehavior() const;
   bool isDefaultFPEnvironment() const;
 
   // Methods for support type inquiry through isa, cast, and dyn_cast:
diff --git a/llvm/include/llvm/IR/LLVMContext.h b/llvm/include/llvm/IR/LLVMContext.h
index 6d4a59ba6b1f6c..71d99fd2cbff21 100644
--- a/llvm/include/llvm/IR/LLVMContext.h
+++ b/llvm/include/llvm/IR/LLVMContext.h
@@ -96,6 +96,8 @@ class LLVMContext {
     OB_ptrauth = 7,                // "ptrauth"
     OB_kcfi = 8,                   // "kcfi"
     OB_convergencectrl = 9,        // "convergencectrl"
+    OB_fpe_round = 10,             // "fpe.round"
+    OB_fpe_except = 11,            // "fpe.except"
   };
 
   /// getMDKindID - Return a unique non-zero ID for the specified metadata kind.
diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp
index dd72d46f5d9aad..42d55b85819782 100644
--- a/llvm/lib/AsmParser/LLParser.cpp
+++ b/llvm/lib/AsmParser/LLParser.cpp
@@ -6317,6 +6317,50 @@ bool isOldDbgFormatIntrinsic(StringRef Name) {
          FnID == Intrinsic::dbg_assign;
 }
 
+bool updateConstrainedIntrinsic(StringRef Name, ArrayRef<Value *> Args,
+                                SmallVectorImpl<OperandBundleDef> &Bundles,
+                                LLVMContext &C) {
+  if (Args.empty())
+    return false;
+  if (!Name.starts_with("llvm.experimental.constrained."))
+    return false;
+  for (auto &B : Bundles) {
+    if (B.getTag().starts_with("fpe."))
+      return false;
+  }
+
+  const auto getMetadataArgumentValue = [](Value *Arg) -> StringRef {
+    if (auto *MAV = dyn_cast<MetadataAsValue>(Arg)) {
+      if (const auto *MD = MAV->getMetadata()) {
+        if (auto MDStr = dyn_cast<MDString>(MD))
+          return MDStr->getString();
+      }
+    }
+    return StringRef();
+  };
+
+  if (Args.size() > 1) {
+    Value *V = Args.take_back(2).front();
+    if (StringRef VStr = getMetadataArgumentValue(V); !VStr.empty()) {
+      if (auto RM = convertStrToRoundingMode(VStr)) {
+        int RMVal = static_cast<int>(*RM);
+        Bundles.emplace_back("fpe.round",
+                             ConstantInt::get(Type::getInt32Ty(C), RMVal));
+      }
+    }
+  }
+
+  Value *V = Args.back();
+  if (StringRef VStr = getMetadataArgumentValue(V); !VStr.empty()) {
+    if (auto EB = convertStrToExceptionBehavior(VStr)) {
+      Bundles.emplace_back("fpe.except",
+                           ConstantInt::get(Type::getInt32Ty(C), *EB));
+    }
+  }
+
+  return true;
+}
+
 /// FunctionHeader
 ///   ::= OptionalLinkage OptionalPreemptionSpecifier OptionalVisibility
 ///       OptionalCallingConv OptRetAttrs OptUnnamedAddr Type GlobalName
@@ -8084,6 +8128,8 @@ bool LLParser::parseCall(Instruction *&Inst, PerFunctionState &PFS,
       AttributeList::get(Context, AttributeSet::get(Context, FnAttrs),
                          AttributeSet::get(Context, RetAttrs), Attrs);
 
+  updateConstrainedIntrinsic(CalleeID.StrVal, Args, BundleList, Context);
+
   CallInst *CI = CallInst::Create(Ty, Callee, Args, BundleList);
   CI->setTailCallKind(TCK);
   CI->setCallingConv(CC);
diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index e73538da282e99..20fc14b6e8ef1c 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -4324,6 +4324,64 @@ static void upgradeDbgIntrinsicToDbgRecord(StringRef Name, CallBase *CI) {
   CI->getParent()->insertDbgRecordBefore(DR, CI->getIterator());
 }
 
+static CallBase *upgradeConstrainedIntrinsicCall(CallBase *CB, Function *F,
+                                                 IRBuilder<> &Builder) {
+  if (CB->getOperandBundle(LLVMContext::OB_fpe_round))
+    return nullptr;
+
+  auto *CFPI = cast<ConstrainedFPIntrinsic>(F);
+  SmallVector<OperandBundleDef, 2> NewBundles;
+  LLVMContext &C = CB->getContext();
+
+  auto RM = CFPI->getRoundingMode();
+  if (RM) {
+    auto CurrentRM = CB->getRoundingMode();
+    if (CurrentRM) {
+      assert(*RM == *CurrentRM);
+    } else {
+      int RMValue = static_cast<int>(*RM);
+      NewBundles.emplace_back("fpe.round",
+                              ConstantInt::get(Type::getInt32Ty(C), RMValue));
+    }
+  }
+
+  auto EB = CFPI->getExceptionBehavior();
+  if (EB) {
+    auto CurrentEB = CB->getExceptionBehavior();
+    if (CurrentEB) {
+      assert(*EB == *CurrentEB);
+    } else {
+      NewBundles.emplace_back("fpe.except",
+                              ConstantInt::get(Type::getInt32Ty(C), *EB));
+    }
+  }
+
+  CallInst *NewCB = nullptr;
+  if (!NewBundles.empty()) {
+    SmallVector<Value *, 4> Args(CB->args());
+    SmallVector<OperandBundleDef, 2> Bundles;
+    CB->getOperandBundlesAsDefs(Bundles);
+    Bundles.append(NewBundles);
+
+    Builder.SetInsertPoint(CB->getParent(), CB->getIterator());
+    MDNode *FPMath = CB->getMetadata(LLVMContext::MD_fpmath);
+    NewCB = Builder.CreateCall(F, Args, Bundles, CB->getName(), FPMath);
+
+    NewCB->copyMetadata(*CB);
+    AttributeList Attrs = CB->getAttributes();
+    NewCB->setAttributes(Attrs);
+    if (isa<FPMathOperator>(CB)) {
+      FastMathFlags FMF = CB->getFastMathFlags();
+      NewCB->setFastMathFlags(FMF);
+    }
+    MemoryEffects ME = MemoryEffects::inaccessibleMemOnly();
+    auto A = Attribute::getWithMemoryEffects(C, ME);
+    NewCB->addFnAttr(A);
+  }
+
+  return NewCB;
+}
+
 /// Upgrade a call to an old intrinsic. All argument and return casting must be
 /// provided to seamlessly integrate with existing context.
 void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
@@ -4352,6 +4410,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
     bool IsARM = Name.consume_front("arm.");
     bool IsAMDGCN = Name.consume_front("amdgcn.");
     bool IsDbg = Name.consume_front("dbg.");
+    bool IsConstrained = Name.starts_with("experimental.constrained.");
     Value *Rep = nullptr;
 
     if (!IsX86 && Name == "stackprotectorcheck") {
@@ -4380,6 +4439,8 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
       } else {
         upgradeDbgIntrinsicToDbgRecord(Name, CI);
       }
+    } else if (IsConstrained) {
+      Rep = upgradeConstrainedIntrinsicCall(CI, F, Builder);
     } else {
       llvm_unreachable("Unknown function for CallBase upgrade.");
     }
@@ -5643,3 +5704,20 @@ void llvm::UpgradeOperandBundles(std::vector<OperandBundleDef> &Bundles) {
            OBD.inputs().empty();
   });
 }
+
+CallBase *llvm::upgradeConstrainedFunctionCall(CallBase *CB) {
+  Function *F = dyn_cast<Function>(CB->getCalledOperand());
+  if (!F)
+    return nullptr;
+
+  if (CB->getNumOperands() < 1)
+    return nullptr;
+
+  StringRef Name = F->getName();
+  if (!Name.starts_with("experimental.constrained."))
+    return nullptr;
+
+  LLVMContext &C = CB->getContext();
+  IRBuilder<> Builder(C);
+  return upgradeConstrainedIntrinsicCall(CB, F, Builder);
+}
diff --git a/llvm/lib/IR/IRBuilder.cpp b/llvm/lib/IR/IRBuilder.cpp
index f340f7aafdc76f..a22e833483563e 100644
--- a/llvm/lib/IR/IRBuilder.cpp
+++ b/llvm/lib/IR/IRBuilder.cpp
@@ -897,6 +897,17 @@ CallInst *IRBuilderBase::CreateIntrinsic(Intrinsic::ID ID,
   return createCallHelper(Fn, Args, Name, FMFSource);
 }
 
+CallInst *IRBuilderBase::CreateIntrinsic(Intrinsic::ID ID,
+                                         ArrayRef<Type *> Types,
+                                         ArrayRef<Value *> Args,
+                                         ArrayRef<OperandBundleDef> OpBundles,
+                                         Instruction *FMFSource,
+                                         const Twine &Name) {
+  Module *M = BB->getModule();
+  Function *Fn = Intrinsic::getDeclaration(M, ID, Types);
+  return createCallHelper(Fn, Args, Name, FMFSource, OpBundles);
+}
+
 CallInst *IRBuilderBase::CreateIntrinsic(Type *RetTy, Intrinsic::ID ID,
                                          ArrayRef<Value *> Args,
                                          Instruction *FMFSource,
@@ -936,8 +947,11 @@ CallInst *IRBuilderBase::CreateConstrainedFPBinOp(
   if (FMFSource)
     UseFMF = FMFSource->getFastMathFlags();
 
-  CallInst *C = CreateIntrinsic(ID, {L->getType()},
-                                {L, R, RoundingV, ExceptV}, nullptr, Name);
+  SmallVector<OperandBundleDef, 2> OpBundles;
+  createFPRoundingBundle(OpBundles, Rounding);
+  createFPExceptionBundle(OpBundles, Except);
+  CallInst *C = CreateIntrinsic(ID, {L->getType()}, {L, R, RoundingV, ExceptV},
+                                OpBundles, nullptr, Name);
   setConstrainedFPCallAttr(C);
   setFPAttrs(C, FPMathTag, UseFMF);
   return C;
@@ -953,8 +967,12 @@ CallInst *IRBuilderBase::CreateConstrainedFPUnroundedBinOp(
   if (FMFSource)
     UseFMF = FMFSource->getFastMathFlags();
 
-  CallInst *C =
-      CreateIntrinsic(ID, {L->getType()}, {L, R, ExceptV}, nullptr, Name);
+  SmallVector<OperandBundleDef, 2> OpBundles;
+  int EB = getEffectiveExceptionBehavior(Except);
+  OpBundles.emplace_back("fpe.except", getInt32(EB));
+
+  CallInst *C = CreateIntrinsic(ID, {L->getType()}, {L, R, ExceptV}, OpBundles,
+                                nullptr, Name);
   setConstrainedFPCallAttr(C);
   setFPAttrs(C, FPMathTag, UseFMF);
   return C;
@@ -981,19 +999,24 @@ CallInst *IRBuilderBase::CreateConstrainedFPCast(
     std::optional<RoundingMode> Rounding,
     std::optional<fp::ExceptionBehavior> Except) {
   Value *ExceptV = getConstrainedFPExcept(Except);
+  bool HasRounding = Intrinsic::hasConstrainedFPRoundingModeOperand(ID);
 
   FastMathFlags UseFMF = FMF;
   if (FMFSource)
     UseFMF = FMFSource->getFastMathFlags();
 
+  SmallVector<OperandBundleDef, 2> OpBundles;
+  createFPRoundingBundle(OpBundles, Rounding);
+  createFPExceptionBundle(OpBundles, Except);
+
   CallInst *C;
-  if (Intrinsic::hasConstrainedFPRoundingModeOperand(ID)) {
+  if (HasRounding) {
     Value *RoundingV = getConstrainedFPRounding(Rounding);
     C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, RoundingV, ExceptV},
-                        nullptr, Name);
+                        OpBundles, nullptr, Name);
   } else
-    C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, ExceptV}, nullptr,
-                        Name);
+    C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, ExceptV}, OpBundles,
+                        nullptr, Name);
 
   setConstrainedFPCallAttr(C);
 
@@ -1022,8 +1045,12 @@ CallInst *IRBuilderBase::CreateConstrainedFPCmp(
   Value *PredicateV = getConstrainedFPPredicate(P);
   Value *ExceptV = getConstrainedFPExcept(Except);
 
-  CallInst *C = CreateIntrinsic(ID, {L->getType()},
-                                {L, R, PredicateV, ExceptV}, nullptr, Name);
+  SmallVector<OperandBundleDef, 1> OpBundles;
+  int EB = getEffectiveExceptionBehavior(Except);
+  OpBundles.emplace_back("fpe.except", getInt32(EB));
+
+  CallInst *C = CreateIntrinsic(ID, {L->getType()}, {L, R, PredicateV, ExceptV},
+                                OpBundles, nullptr, Name);
   setConstrainedFPCallAttr(C);
   return C;
 }
@@ -1033,14 +1060,19 @@ CallInst *IRBuilderBase::CreateConstrainedFPCall(
     std::optional<RoundingMode> Rounding,
     std::optional<fp::ExceptionBehavior> Except) {
   llvm::SmallVector<Value *, 6> UseArgs;
+  SmallVector<OperandBundleDef, 2> OpBundles;
 
   append_range(UseArgs, Args);
 
-  if (Intrinsic::hasConstrainedFPRoundingModeOperand(Callee->getIntrinsicID()))
+  if (Intrinsic::hasConstrainedFPRoundingModeOperand(
+          Callee->getIntrinsicID())) {
     UseArgs.push_back(getConstrainedFPRounding(Rounding));
+    createFPRoundingBundle(OpBundles, Rounding);
+  }
   UseArgs.push_back(getConstrainedFPExcept(Except));
+  createFPExceptionBundle(OpBundles, Except);
 
-  CallInst *C = CreateCall(Callee, UseArgs, Name);
+  CallInst *C = CreateCall(Callee, UseArgs, OpBundles, Name);
   setConstrainedFPCallAttr(C);
   return C;
 }
diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp
index 065ce3a0172837..f6b093fb9a86cc 100644
--- a/llvm/lib/IR/Instructions.cpp
+++ b/llvm/lib/IR/Instructions.cpp
@@ -604,6 +604,24 @@ bool CallBase::hasClobberingOperandBundles() const {
          getIntrinsicID() != Intrinsic::assume;
 }
 
+std::optional<RoundingMode> CallBase::getRoundingMode() const {
+  if (auto RoundingBundle = getOperandBundle(LLVMContext::OB_fpe_round)) {
+    uint64_t RM =
+        cast<ConstantInt>(RoundingBundle->Inputs.front())->getSExtValue();
+    return castToRoundingMode(RM);
+  }
+  return std::nullopt;
+}
+
+std::optional<fp::ExceptionBehavior> CallBase::getExceptionBehavior() const {
+  if (auto ExceptionBundle = getOperandBundle(LLVMContext::OB_fpe_except)) {
+    uint64_t EB =
+        cast<ConstantInt>(ExceptionBundle->Inputs.front())->getZExtValue();
+    return castToExceptionBehavior(EB);
+  }
+  return std::nullopt;
+}
+
 MemoryEffects CallBase::getMemoryEffects() const {
   MemoryEffects ME = getAttributes().getMemoryEffects();
   if (auto *Fn = dyn_cast<Function>(getCalledOperand())) {
diff --git a/llvm/lib/IR/IntrinsicInst.cpp b/llvm/lib/IR/IntrinsicInst.cpp
index 002bab8e079e50..abd00682b032b7 100644
--- a/llvm/lib/IR/IntrinsicInst.cpp
+++ b/llvm/lib/IR/IntrinsicInst.cpp
@@ -273,29 +273,6 @@ void InstrProfCallsite::setCallee(Value *Callee) {
   setArgOperand(4, Callee);
 }
 
-std::optional<RoundingMode> ConstrainedFPIntrinsic::getRoundingMode() const {
-  unsigned NumOperands = arg_size();
-  Metadata *MD = nullptr;
-  auto *MAV = dyn_cast<MetadataAsValue>(getArgOperand(NumOperands - 2));
-  if (MAV)
-    MD = MAV->getMetadata();
-  if (!MD || !isa<MDString>(MD))
-    return std::nullopt;
-  return convertStrToRoundingMode(cast<MDString>(MD)->getString());
-}
-
-std::optional<fp::ExceptionBehavior>
-ConstrainedFPIntrinsic::getExceptionBehavior() const {
-  unsigned NumOperands = arg_size();
-  Metadata *MD = nullptr;
-  auto *MAV = dyn_cast<MetadataAsValue>(getArgOperand(NumOperands - 1));
-  if (MAV)
-    MD = MAV->getMetadata();
-  if (!MD || !isa<MDString>(MD))
-    return std::nullopt;
-  return convertStrToExceptionBehavior(cast<MDString>(MD)->getString());
-}
-
 bool ConstrainedFPIntrinsic::isDefaultFPEnvironment() const {
   std::optional<fp::ExceptionBehavior> Except = getExceptionBehavior();
   if (Except) {
diff --git a/llvm/lib/IR/LLVMContext.cpp b/llvm/lib/IR/LLVMContext.cpp
index e078527b597b44..c354adc3136773 100644
--- a/llvm/lib/IR/LLVMContext.cpp
+++ b/llvm/lib/IR/LLVMContext.cpp
@@ -97,6 +97,16 @@ LLVMContext::LLVMContext() : pImpl(new LLVMContextImpl(*this)) {
          "convergencectrl operand bundle id drifted!");
   (void)ConvergenceCtrlEntry;
 
+  auto *RoundingEntry = pImpl->getOrInsertBundleTag("fpe.round");
+  assert(RoundingEntry->second == LLVMContext::OB_fpe_round &&
+         "fpe.round operand bundle id drifted!");
+  (void)RoundingEntry;
+
+  auto *ExceptionEntry = pImpl->getOrInsertBundleTag("fpe.except");
+  assert(ExceptionEntry->second == LLVMContext::OB_fpe_except &&
+         "fpe.except operand bundle id drifted!");
+  (void)ExceptionEntry;
+
   SyncScope::ID SingleThreadSSID =
       pImpl->getOrInsertSyncScopeID("singlethread");
   assert(SingleThreadSSID == SyncScope::SingleThread &&
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index 55de486e90e190..090f71ee8e059f 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -658,6 +658,9 @@ class Verifier : public InstVisitor<Verifier>, VerifierSupport {
 
   /// Verify the llvm.experimental.noalias.scope.decl declarations
   void verifyNoAliasScopeDecl();
+
+  /// Verify the call of a constrained intrinsic call.
+  void verifyConstrainedInstrinsicCall(const CallBase &CB);
 };
 
 } // end anonymous namespace
@@ -3718,7 +3721,9 @@ void Verifier::visitCallBase(CallBase &Call) {
        FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
        FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
        FoundPtrauthBundle = false, FoundKCFIBundle = false,
-       FoundAttachedCallBundle = false;
+       FoundAttachedCallBundle = false, FoundFpeRoundBundle = false,
+       FoundFpeExceptBundle = false;
+
   for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
     OperandBundleUse BU = Call.getOperandBundleAt(i);
     uint32_t Tag = BU.getTagID();
@@ -3781,9 +3786,31 @@ void Verifier::visitCallBase(CallBase &Call) {
             "Multiple \"clang.arc.attachedcall\" operand bundles", Call);
       FoundAttachedCallBundle = true;
       verifyAttachedCallBundle(Call, BU);
+    } else if (Tag == LLVMContext::OB_fpe_round) {
+      Check(!FoundFpeRoundBundle, "Multiple fpe.round operand bundles", Call);
+      Check(BU.Inputs.size() == 1,
+            "Expected exactly one fpe.round bundle operand", Call);
+      auto RM = dyn_cast<ConstantInt>(BU.Inputs.front());
+      Check(RM, "Value of fpe.round bundle operand must be an integer", Call);
+      Check(isValidRoundingMode(RM->getSExtValue()),
+            "Invalid value of fpe.round bundle operand", Call);
+      FoundFpeRoundBundle = true;
+    } else if (Tag == LLVMContext::OB_fpe_except) {
+      Check(!FoundFpeExceptBundle, "Multiple fpe.except operand bundles", Call);
+      Check(BU.Inputs.size() == 1,
+            "Expected exactly one fpe.except bundle operand", Call);
+      auto EB = dyn_cast<ConstantInt>(BU.Inputs.front());
+      Check(EB, "Value of fpe.except bundle operand must be an integer", Call);
+      Check(isValidExceptionBehavior(EB->getZExtValue()),
+            "Invalid value of fpe.except bundle operand", Call);
+      FoundFpeExceptBundle = true;
     }
   }
 
+  // Verify if FP options specified in constrained intrinsic arguments agree
+  // with the options specified in operand bundles.
+  verifyConstrainedInstrinsicCall(Call);
+
   // Verify that callee and callsite agree on whether to use pointer auth.
   Check(!(Call.getCalledFunction() && FoundPtrauthBundle),
         "Direct call cannot have a ptrauth bundle", Call);
@@ -3810,6 +3837,47 @@ void Verifier::visitCallBase(CallBase &Call) {
   visitInstruction(Call);
 }
 
+void Verifier::verifyConstrainedInstrinsicCall(const CallBase &CB) {
+  const auto *CFPI = dyn_cast<ConstrainedFPIntrinsic>(&CB);
+  if (!CFPI)
+    return;
+
+  // FP metadata arguments must not conflict with the corresponding
+  // operand bundles.
+  if (std::optional<RoundingMode> RM = CFPI->getRoundingMode()) {
+    RoundingMode Rounding = *RM;
+    auto RoundingBundle = CB.getOperandBundle(LLVMContext::OB_fpe_round);
+    Check(RoundingBundle,
+          "Constrained intrinsic has a rounding argument but the call does not",
+          CB);
+    if (RoundingBundle) {
+      OperandBundleUse OBU = *RoundingBundle;
+      uint64_t BundleRM = cast<ConstantInt>(OBU.Inputs.front())->getZExtValue();
+      Check(BundleRM == static_cast<uint64_t>(Rounding),
+            "Rounding mode of the constrained intrinsic differs from that in "
+            "operand bundle",
+            CB);
+    }
+  }
+
+  if (std::optional<fp::ExceptionBehavior> EB = CFPI->getExceptionBehavior()) {
+    fp::ExceptionBehavior Excepts = *EB;
+    auto ExceptionBundle = CB.getOperandBundle(LLVMContext::OB_fpe_except);
+    Check(ExceptionBundle,
+          "Constrained intrinsic has an exception handling argument but the "
+          "call does not",
+          CB);
+    if (ExceptionBundle) {
+      OperandBundleUse OBU = *ExceptionBundle;
+      uint64_t BundleEB = cast<ConstantInt>(OBU.Inputs.front())->getZExtValue();
+      Check(BundleEB == static_cast<uint64_t>(Excepts),
+            "Exception behavior of the constrained intrinsic differs from that "
+            "in operand bundle",
+            CB);
+    }
+  }
+}
+
 void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs,
                                          StringRef Context) {
   Check(!Attrs.contains(Attribute::InAlloca),
diff --git a/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp b/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
index 53e486f3dc6cda..478691d031150f 100644
--- a/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
@@ -251,10 +251,12 @@ static bool markTails(Function &F, OptimizationRemarkEmitter *ORE) {
 
       // Special-case operand bundles "clang.arc.attachedcall", "ptrauth", and
       // "kcfi".
-      bool IsNoTail = CI->isNoTailCall() ||
-                      CI->hasOperandBundlesOtherThan(
-                          {LLVMContext::OB_clang_arc_attachedcall,
-                           LLVMContext::OB_ptrauth, LLVMContext::OB_kcfi});
+      bool IsNoTail =
+          CI->isNoTailCall() ||
+          CI->hasOperandBundlesOtherThan(
+              {LLVMContext::OB_clang_arc_attachedcall, LLVMContext::OB_ptrauth,
+               LLVMContext::OB_kcfi, LLVMContext::OB_fpe_round,
+               LLVMContext::OB_fpe_except});
 
       if (!IsNoTail && CI->doesNotAccessMemory()) {
         // A call to a readnone function whose arguments are all things computed
diff --git a/llvm/lib/Transforms/Utils/CloneFunction.cpp b/llvm/lib/Transforms/Utils/CloneFunction.cpp
index cb6a4e34c226e5..2ef62eae5a4b45 100644
--- a/llvm/lib/Transforms/Utils/CloneFunction.cpp
+++ b/llvm/lib/Transforms/Utils/CloneFunction.cpp
@@ -472,13 +472,23 @@ PruningFunctionCloner::cloneInstruction(BasicBlock::const_iterator II) {
       // The last arguments of a constrained intrinsic are metadata that
       // represent rounding mode (absents in some intrinsics) and exception
       // behavior. The inlined function uses default settings.
-      if (Intrinsic::hasConstrainedFPRoundingModeOperand(CIID))
+      SmallVector<OperandBundleDef, 2> Bundles;
+      if (Intrinsic::hasConstrainedFPRoundingModeOperand(CIID)) {
         Args.push_back(
             MetadataAsValue::get(Ctx, MDString::get(Ctx, "round.tonearest")));
+        Bundles.emplace_back(
+            "fpe.round",
+            ConstantInt::get(
+                Type::getInt32Ty(Ctx),
+                static_cast<int>(RoundingMode::NearestTiesToEven)));
+      }
       Args.push_back(
           MetadataAsValue::get(Ctx, MDString::get(Ctx, "fpexcept.ignore")));
-
-      NewInst = CallInst::Create(IFn, Args, OldInst.getName() + ".strict");
+      Bundles.emplace_back("fpe.except",
+                           ConstantInt::get(Type::getInt32Ty(Ctx),
+                                            fp::ExceptionBehavior::ebIgnore));
+      NewInst =
+          CallInst::Create(IFn, Args, Bundles, OldInst.getName() + ".strict");
     }
   }
   if (!NewInst)
diff --git a/llvm/test/Bitcode/operand-bundles-bc-analyzer.ll b/llvm/test/Bitcode/operand-bundles-bc-analyzer.ll
index d860104b9cb3d9..01e5b3f6673ae5 100644
--- a/llvm/test/Bitcode/operand-bundles-bc-analyzer.ll
+++ b/llvm/test/Bitcode/operand-bundles-bc-analyzer.ll
@@ -13,6 +13,8 @@
 ; CHECK-NEXT:    <OPERAND_BUNDLE_TAG
 ; CHECK-NEXT:    <OPERAND_BUNDLE_TAG
 ; CHECK-NEXT:    <OPERAND_BUNDLE_TAG
+; CHECK-NEXT:    <OPERAND_BUNDLE_TAG
+; CHECK-NEXT:    <OPERAND_BUNDLE_TAG
 ; CHECK-NEXT:  </OPERAND_BUNDLE_TAGS_BLOCK
 
 ; CHECK:   <FUNCTION_BLOCK
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll
index f9c359bc114ed3..3e4690ec3640f2 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll
@@ -819,11 +819,11 @@ define float @test_pown_fast_f32_strictfp(float %x, i32 %y) #1 {
 ; CHECK-LABEL: define float @test_pown_fast_f32_strictfp
 ; CHECK-SAME: (float [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR0:[0-9]+]] {
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[__FABS:%.*]] = call fast float @llvm.fabs.f32(float [[X]]) #[[ATTR0]]
-; CHECK-NEXT:    [[__LOG2:%.*]] = call fast float @llvm.log2.f32(float [[__FABS]]) #[[ATTR0]]
-; CHECK-NEXT:    [[POWNI2F:%.*]] = call fast float @llvm.experimental.constrained.sitofp.f32.i32(i32 [[Y]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]]
-; CHECK-NEXT:    [[__YLOGX:%.*]] = call fast float @llvm.experimental.constrained.fmul.f32(float [[POWNI2F]], float [[__LOG2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]]
-; CHECK-NEXT:    [[__EXP2:%.*]] = call fast float @llvm.exp2.f32(float [[__YLOGX]]) #[[ATTR0]]
+; CHECK-NEXT:    [[__FABS:%.*]] = call fast float @llvm.fabs.f32(float [[X]]) #[[ATTR5:[0-9]+]]
+; CHECK-NEXT:    [[__LOG2:%.*]] = call fast float @llvm.log2.f32(float [[__FABS]]) #[[ATTR5]]
+; CHECK-NEXT:    [[POWNI2F:%.*]] = call fast float @llvm.experimental.constrained.sitofp.f32.i32(i32 [[Y]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.round"(i32 7), "fpe.except"(i32 2) ]
+; CHECK-NEXT:    [[__YLOGX:%.*]] = call fast float @llvm.experimental.constrained.fmul.f32(float [[POWNI2F]], float [[__LOG2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.round"(i32 7), "fpe.except"(i32 2) ]
+; CHECK-NEXT:    [[__EXP2:%.*]] = call fast float @llvm.exp2.f32(float [[__YLOGX]]) #[[ATTR5]]
 ; CHECK-NEXT:    [[__YEVEN:%.*]] = shl i32 [[Y]], 31
 ; CHECK-NEXT:    [[TMP0:%.*]] = bitcast float [[X]] to i32
 ; CHECK-NEXT:    [[__POW_SIGN:%.*]] = and i32 [[__YEVEN]], [[TMP0]]

>From 2b2af2bde63a38759bc714aff9cf306f72d09fb7 Mon Sep 17 00:00:00 2001
From: Serge Pavlov <sepavloff at gmail.com>
Date: Wed, 25 Sep 2024 17:25:00 +0700
Subject: [PATCH 2/6] Use metadata for bundle values

---
 clang/test/CodeGen/strictfp_builtins.c        |  10 +-
 .../cl20-device-side-enqueue-attributes.cl    |   2 +-
 llvm/docs/LangRef.rst                         |  37 +-
 llvm/include/llvm/ADT/FloatingPointMode.h     |   9 -
 llvm/include/llvm/AsmParser/LLParser.h        |   5 +
 llvm/include/llvm/IR/FPEnv.h                  |  21 +-
 llvm/include/llvm/IR/IRBuilder.h              |  49 +--
 llvm/include/llvm/IR/InstrTypes.h             |   7 +
 llvm/include/llvm/IR/IntrinsicInst.h          |   7 +
 llvm/include/llvm/IR/LLVMContext.h            |   2 +-
 llvm/lib/AsmParser/LLParser.cpp               |  48 +--
 llvm/lib/Bitcode/Reader/BitcodeReader.cpp     |   8 +-
 llvm/lib/IR/AutoUpgrade.cpp                   |  56 +--
 llvm/lib/IR/FPEnv.cpp                         |  45 ++-
 llvm/lib/IR/IRBuilder.cpp                     |  61 +++-
 llvm/lib/IR/Instructions.cpp                  |  34 +-
 llvm/lib/IR/IntrinsicInst.cpp                 |  33 ++
 llvm/lib/IR/LLVMContext.cpp                   |   4 +-
 llvm/lib/IR/Verifier.cpp                      |  57 +--
 .../Scalar/TailRecursionElimination.cpp       |   2 +-
 llvm/lib/Transforms/Utils/CloneFunction.cpp   |  22 +-
 llvm/test/Bitcode/auto-upgrade-constrained.ll | 327 ++++++++++++++++++
 .../Bitcode/auto-upgrade-constrained.ll.bc    | Bin 0 -> 8120 bytes
 .../AMDGPU/amdgpu-simplify-libcall-pown.ll    |   4 +-
 llvm/test/Transforms/Attributor/nofpclass.ll  |   4 +-
 .../test/Transforms/Inline/inline-strictfp.ll |  25 +-
 llvm/test/Verifier/fp-intrinsics.ll           |   8 +-
 27 files changed, 657 insertions(+), 230 deletions(-)
 create mode 100644 llvm/test/Bitcode/auto-upgrade-constrained.ll
 create mode 100644 llvm/test/Bitcode/auto-upgrade-constrained.ll.bc

diff --git a/clang/test/CodeGen/strictfp_builtins.c b/clang/test/CodeGen/strictfp_builtins.c
index 2e758115779711..053265dcc0667f 100644
--- a/clang/test/CodeGen/strictfp_builtins.c
+++ b/clang/test/CodeGen/strictfp_builtins.c
@@ -31,21 +31,21 @@ void p(char *str, int x) {
 // CHECK-NEXT:    [[D_ADDR:%.*]] = alloca double, align 8
 // CHECK-NEXT:    store double [[D:%.*]], ptr [[D_ADDR]], align 8
 // CHECK-NEXT:    [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8
-// CHECK-NEXT:    [[ISZERO:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double 0.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR5:[0-9]+]] [ "fpe.except"(i32 2) ]
+// CHECK-NEXT:    [[ISZERO:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double 0.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR5:[0-9]+]] [ "fpe.except"(metadata !"strict") ]
 // CHECK-NEXT:    br i1 [[ISZERO]], label [[FPCLASSIFY_END:%.*]], label [[FPCLASSIFY_NOT_ZERO:%.*]]
 // CHECK:       fpclassify_end:
 // CHECK-NEXT:    [[FPCLASSIFY_RESULT:%.*]] = phi i32 [ 4, [[ENTRY:%.*]] ], [ 0, [[FPCLASSIFY_NOT_ZERO]] ], [ 1, [[FPCLASSIFY_NOT_NAN:%.*]] ], [ [[TMP2:%.*]], [[FPCLASSIFY_NOT_INF:%.*]] ]
 // CHECK-NEXT:    call void @p(ptr noundef @.str.1, i32 noundef [[FPCLASSIFY_RESULT]]) #[[ATTR4]]
 // CHECK-NEXT:    ret void
 // CHECK:       fpclassify_not_zero:
-// CHECK-NEXT:    [[CMP:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double [[TMP0]], metadata !"uno", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.except"(i32 2) ]
+// CHECK-NEXT:    [[CMP:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double [[TMP0]], metadata !"uno", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.except"(metadata !"strict") ]
 // CHECK-NEXT:    br i1 [[CMP]], label [[FPCLASSIFY_END]], label [[FPCLASSIFY_NOT_NAN]]
 // CHECK:       fpclassify_not_nan:
 // CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) #[[ATTR6:[0-9]+]]
-// CHECK-NEXT:    [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.except"(i32 2) ]
+// CHECK-NEXT:    [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.except"(metadata !"strict") ]
 // CHECK-NEXT:    br i1 [[ISINF]], label [[FPCLASSIFY_END]], label [[FPCLASSIFY_NOT_INF]]
 // CHECK:       fpclassify_not_inf:
-// CHECK-NEXT:    [[ISNORMAL:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x10000000000000, metadata !"uge", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.except"(i32 2) ]
+// CHECK-NEXT:    [[ISNORMAL:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x10000000000000, metadata !"uge", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.except"(metadata !"strict") ]
 // CHECK-NEXT:    [[TMP2]] = select i1 [[ISNORMAL]], i32 2, i32 3
 // CHECK-NEXT:    br label [[FPCLASSIFY_END]]
 //
@@ -157,7 +157,7 @@ void test_double_isfinite(double d) {
 // CHECK-NEXT:    store double [[D:%.*]], ptr [[D_ADDR]], align 8
 // CHECK-NEXT:    [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8
 // CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) #[[ATTR6]]
-// CHECK-NEXT:    [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.except"(i32 2) ]
+// CHECK-NEXT:    [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.except"(metadata !"strict") ]
 // CHECK-NEXT:    [[TMP2:%.*]] = bitcast double [[TMP0]] to i64
 // CHECK-NEXT:    [[TMP3:%.*]] = icmp slt i64 [[TMP2]], 0
 // CHECK-NEXT:    [[TMP4:%.*]] = select i1 [[TMP3]], i32 -1, i32 1
diff --git a/clang/test/CodeGenOpenCL/cl20-device-side-enqueue-attributes.cl b/clang/test/CodeGenOpenCL/cl20-device-side-enqueue-attributes.cl
index 4d931b0e105133..31f1aa60780b9e 100644
--- a/clang/test/CodeGenOpenCL/cl20-device-side-enqueue-attributes.cl
+++ b/clang/test/CodeGenOpenCL/cl20-device-side-enqueue-attributes.cl
@@ -144,7 +144,7 @@ kernel void device_side_enqueue(global float *a, global float *b, int i) {
 // STRICTFP-NEXT:    [[TMP1:%.*]] = load i32, ptr addrspace(4) [[BLOCK_CAPTURE_ADDR1]], align 4
 // STRICTFP-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr addrspace(1) [[TMP0]], i32 [[TMP1]]
 // STRICTFP-NEXT:    [[TMP2:%.*]] = load float, ptr addrspace(1) [[ARRAYIDX]], align 4
-// STRICTFP-NEXT:    [[TMP3:%.*]] = call float @llvm.experimental.constrained.fmuladd.f32(float 4.000000e+00, float [[TMP2]], float 1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.round"(i32 1), "fpe.except"(i32 2) ]
+// STRICTFP-NEXT:    [[TMP3:%.*]] = call float @llvm.experimental.constrained.fmuladd.f32(float 4.000000e+00, float [[TMP2]], float 1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.control"(metadata !"rte"), "fpe.except"(metadata !"strict") ]
 // STRICTFP-NEXT:    [[BLOCK_CAPTURE_ADDR2:%.*]] = getelementptr inbounds nuw <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, ptr addrspace(1) }>, ptr addrspace(4) [[DOTBLOCK_DESCRIPTOR]], i32 0, i32 3
 // STRICTFP-NEXT:    [[TMP4:%.*]] = load ptr addrspace(1), ptr addrspace(4) [[BLOCK_CAPTURE_ADDR2]], align 4
 // STRICTFP-NEXT:    [[BLOCK_CAPTURE_ADDR3:%.*]] = getelementptr inbounds nuw <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, ptr addrspace(1) }>, ptr addrspace(4) [[DOTBLOCK_DESCRIPTOR]], i32 0, i32 4
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index d2c228f59a18b6..b091e383be31c6 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -3013,19 +3013,36 @@ Floating-point Environment Operand Bundles
 These operand bundles provide details on how the operation interacts with the
 :ref:`floating-point environment <_floatenv>`. There are two kinds of such
 operand bundles, which characterize interaction with floating-point control
-modes and status bits.
+modes and status bits respectively.
 
-An operand bundle tagged with "fpe.round" may be associated with the operations
-that may depend on rounding mode. It has an integer value, which represents
-the rounding mode with the same encoding as ``llvm::RoundingMode`` uses. If it
-is present and is not equal to ``llvm::Dynamic``, it specifies the rounding
-mode, which will be used for the operation evaluation. The value
-``llvm::RoundingMode`` indicates that the rounding mode used by the operation is
-specified in a floating-point control register.
+An operand bundle tagged with "fpe.control" keeps information about control
+modes used by the operation. Only rounding mode is supported now. It is
+represented by a metadata string value and specifies the rounding mode, which
+will be used for the operation evaluation. Possible values are:
+
+::
+
+    "rtz"  - toward zero
+    "rte"  - to nearest, ties to even
+    "rtp"  - toward positive infinity
+    "rtn"  - toward negative infinity
+    "rmm"  - to nearest, ties away from zero
+    "dyn"  - rounding mode is taken from control register
+
+If "fpe.control" is absent, default rounding rounding to nearest, ties to even
+is assumed. 
 
 An operand bundle tagged with "fpe.except" may be associated with the operations
-that may read or write floating-point exception flags. It has the same meaning
-and encoding as the corresponding argument in
+that may read or write floating-point exception flags. It has a single metadata
+string value, which may have one of the values:
+
+::
+
+    "ignore"
+    "strict"
+    "maytrap"
+
+It has the same meaning as the corresponding argument in
 :ref:`constrained intrinsics <_constrainedfp>`.
 
 .. _moduleasm:
diff --git a/llvm/include/llvm/ADT/FloatingPointMode.h b/llvm/include/llvm/ADT/FloatingPointMode.h
index 970cc89093924b..639d931ef88fec 100644
--- a/llvm/include/llvm/ADT/FloatingPointMode.h
+++ b/llvm/include/llvm/ADT/FloatingPointMode.h
@@ -47,15 +47,6 @@ enum class RoundingMode : int8_t {
   Invalid = -1    ///< Denotes invalid value.
 };
 
-inline bool isValidRoundingMode(int X) {
-  return X >= 0 && X <= static_cast<int>(RoundingMode::Dynamic);
-}
-
-inline RoundingMode castToRoundingMode(int X) {
-  assert(isValidRoundingMode(X));
-  return static_cast<RoundingMode>(X);
-}
-
 /// Returns text representation of the given rounding mode.
 inline StringRef spell(RoundingMode RM) {
   switch (RM) {
diff --git a/llvm/include/llvm/AsmParser/LLParser.h b/llvm/include/llvm/AsmParser/LLParser.h
index 1ef8b8ffc39660..88d48f7bc5e6a5 100644
--- a/llvm/include/llvm/AsmParser/LLParser.h
+++ b/llvm/include/llvm/AsmParser/LLParser.h
@@ -563,6 +563,11 @@ namespace llvm {
     bool resolveFunctionType(Type *RetType, ArrayRef<ParamInfo> ArgList,
                              FunctionType *&FuncTy);
 
+    void updateConstrainedIntrinsic(ValID &CalleeID,
+                                    SmallVectorImpl<ParamInfo> &Args,
+                                    SmallVectorImpl<OperandBundleDef> &Bundles,
+                                    AttrBuilder &FnAttrs);
+
     // Constant Parsing.
     bool parseValID(ValID &ID, PerFunctionState *PFS,
                     Type *ExpectedTy = nullptr);
diff --git a/llvm/include/llvm/IR/FPEnv.h b/llvm/include/llvm/IR/FPEnv.h
index e4602bab6038e0..58a0c1956598c0 100644
--- a/llvm/include/llvm/IR/FPEnv.h
+++ b/llvm/include/llvm/IR/FPEnv.h
@@ -43,31 +43,26 @@ enum ExceptionBehavior : uint8_t {
 
 }
 
-inline bool isValidExceptionBehavior(unsigned X) {
-  return X <= fp::ExceptionBehavior::ebStrict;
-}
-
-inline fp::ExceptionBehavior castToExceptionBehavior(unsigned X) {
-  assert(isValidExceptionBehavior(X));
-  return static_cast<fp::ExceptionBehavior>(X);
-}
-
 /// Returns a valid RoundingMode enumerator when given a string
 /// that is valid as input in constrained intrinsic rounding mode
 /// metadata.
-std::optional<RoundingMode> convertStrToRoundingMode(StringRef);
+std::optional<RoundingMode> convertStrToRoundingMode(StringRef,
+                                                     bool InBundle = false);
 
 /// For any RoundingMode enumerator, returns a string valid as input in
 /// constrained intrinsic rounding mode metadata.
-std::optional<StringRef> convertRoundingModeToStr(RoundingMode);
+std::optional<StringRef> convertRoundingModeToStr(RoundingMode,
+                                                  bool InBundle = false);
 
 /// Returns a valid ExceptionBehavior enumerator when given a string
 /// valid as input in constrained intrinsic exception behavior metadata.
-std::optional<fp::ExceptionBehavior> convertStrToExceptionBehavior(StringRef);
+std::optional<fp::ExceptionBehavior>
+convertStrToExceptionBehavior(StringRef, bool InBundle = false);
 
 /// For any ExceptionBehavior enumerator, returns a string valid as
 /// input in constrained intrinsic exception behavior metadata.
-std::optional<StringRef> convertExceptionBehaviorToStr(fp::ExceptionBehavior);
+std::optional<StringRef> convertExceptionBehaviorToStr(fp::ExceptionBehavior,
+                                                       bool InBundle = false);
 
 /// Returns true if the exception handling behavior and rounding mode
 /// match what is used in the default floating point environment.
diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h
index ca732f4903ce44..b8c79b53d42ae4 100644
--- a/llvm/include/llvm/IR/IRBuilder.h
+++ b/llvm/include/llvm/IR/IRBuilder.h
@@ -1324,15 +1324,6 @@ class IRBuilderBase {
     return I;
   }
 
-  RoundingMode
-  getEffectiveRounding(std::optional<RoundingMode> Rounding = std::nullopt) {
-    RoundingMode RM = DefaultConstrainedRounding;
-
-    if (Rounding)
-      RM = *Rounding;
-    return RM;
-  }
-
   Value *getConstrainedFPRounding(std::optional<RoundingMode> Rounding) {
     RoundingMode UseRounding = DefaultConstrainedRounding;
 
@@ -1347,14 +1338,6 @@ class IRBuilderBase {
     return MetadataAsValue::get(Context, RoundingMDS);
   }
 
-  fp::ExceptionBehavior getEffectiveExceptionBehavior(
-      std::optional<fp::ExceptionBehavior> Except = std::nullopt) {
-    fp::ExceptionBehavior EB = DefaultConstrainedExcept;
-    if (Except)
-      EB = *Except;
-    return EB;
-  }
-
   Value *getConstrainedFPExcept(std::optional<fp::ExceptionBehavior> Except) {
     std::optional<StringRef> ExceptStr = convertExceptionBehaviorToStr(
         Except.value_or(DefaultConstrainedExcept));
@@ -2469,24 +2452,13 @@ class IRBuilderBase {
   CallInst *CreateCall(FunctionType *FTy, Value *Callee,
                        ArrayRef<Value *> Args = {}, const Twine &Name = "",
                        MDNode *FPMathTag = nullptr) {
-    CallInst *CI = CallInst::Create(FTy, Callee, Args, DefaultOperandBundles);
-    if (IsFPConstrained)
-      setConstrainedFPCallAttr(CI);
-    if (isa<FPMathOperator>(CI))
-      setFPAttrs(CI, FPMathTag, FMF);
-    return Insert(CI, Name);
+    return CreateCall(FTy, Callee, Args, DefaultOperandBundles, Name,
+                      FPMathTag);
   }
 
   CallInst *CreateCall(FunctionType *FTy, Value *Callee, ArrayRef<Value *> Args,
                        ArrayRef<OperandBundleDef> OpBundles,
-                       const Twine &Name = "", MDNode *FPMathTag = nullptr) {
-    CallInst *CI = CallInst::Create(FTy, Callee, Args, OpBundles);
-    if (IsFPConstrained)
-      setConstrainedFPCallAttr(CI);
-    if (isa<FPMathOperator>(CI))
-      setFPAttrs(CI, FPMathTag, FMF);
-    return Insert(CI, Name);
-  }
+                       const Twine &Name = "", MDNode *FPMathTag = nullptr);
 
   CallInst *CreateCall(FunctionCallee Callee, ArrayRef<Value *> Args = {},
                        const Twine &Name = "", MDNode *FPMathTag = nullptr) {
@@ -2505,10 +2477,6 @@ class IRBuilderBase {
       Function *Callee, ArrayRef<Value *> Args, const Twine &Name = "",
       std::optional<RoundingMode> Rounding = std::nullopt,
       std::optional<fp::ExceptionBehavior> Except = std::nullopt);
-  CallInst *CreateConstrainedFPCall(
-      Intrinsic::ID ID, ArrayRef<Value *> Args, const Twine &Name = "",
-      std::optional<RoundingMode> Rounding = std::nullopt,
-      std::optional<fp::ExceptionBehavior> Except = std::nullopt);
 
   Value *CreateSelect(Value *C, Value *True, Value *False,
                       const Twine &Name = "", Instruction *MDFrom = nullptr);
@@ -2708,17 +2676,10 @@ class IRBuilderBase {
 
   void
   createFPRoundingBundle(SmallVectorImpl<OperandBundleDef> &Bundles,
-                         std::optional<RoundingMode> Rounding = std::nullopt) {
-    int RM = static_cast<int32_t>(getEffectiveRounding(Rounding));
-    Bundles.emplace_back("fpe.round", getInt32(RM));
-  }
-
+                         std::optional<RoundingMode> Rounding = std::nullopt);
   void createFPExceptionBundle(
       SmallVectorImpl<OperandBundleDef> &Bundles,
-      std::optional<fp::ExceptionBehavior> Except = std::nullopt) {
-    int EB = getEffectiveExceptionBehavior(Except);
-    Bundles.emplace_back("fpe.except", getInt32(EB));
-  }
+      std::optional<fp::ExceptionBehavior> Except = std::nullopt);
 };
 
 /// This provides a uniform API for creating instructions and inserting
diff --git a/llvm/include/llvm/IR/InstrTypes.h b/llvm/include/llvm/IR/InstrTypes.h
index b2a20a231182ad..2cc6c0359bf7ad 100644
--- a/llvm/include/llvm/IR/InstrTypes.h
+++ b/llvm/include/llvm/IR/InstrTypes.h
@@ -1100,6 +1100,13 @@ template <typename InputTy> class OperandBundleDefT {
 using OperandBundleDef = OperandBundleDefT<Value *>;
 using ConstOperandBundleDef = OperandBundleDefT<const Value *>;
 
+void addFPRoundingBundle(LLVMContext &Ctx,
+                         SmallVectorImpl<OperandBundleDef> &Bundles,
+                         RoundingMode Rounding);
+void addFPExceptionBundle(LLVMContext &Ctx,
+                          SmallVectorImpl<OperandBundleDef> &Bundles,
+                          fp::ExceptionBehavior Except);
+
 //===----------------------------------------------------------------------===//
 //                               CallBase Class
 //===----------------------------------------------------------------------===//
diff --git a/llvm/include/llvm/IR/IntrinsicInst.h b/llvm/include/llvm/IR/IntrinsicInst.h
index b5134a9ee900c5..a248a9612a82d0 100644
--- a/llvm/include/llvm/IR/IntrinsicInst.h
+++ b/llvm/include/llvm/IR/IntrinsicInst.h
@@ -128,6 +128,10 @@ class IntrinsicInst : public CallInst {
   /// course of IR transformations
   static bool mayLowerToFunctionCall(Intrinsic::ID IID);
 
+  /// Check if the specified intrinsic can read or write FP environment.
+  /// Constrained intrinsics are not handled in it.
+  static bool canAccessFPEnvironment(Intrinsic::ID IID);
+
   /// Methods for support type inquiry through isa, cast, and dyn_cast:
   static bool classof(const CallInst *I) {
     if (const Function *CF = I->getCalledFunction())
@@ -139,6 +143,9 @@ class IntrinsicInst : public CallInst {
   }
 };
 
+std::optional<RoundingMode> getRoundingModeArg(const CallBase &I);
+std::optional<fp::ExceptionBehavior> getExceptionBehaviorArg(const CallBase &I);
+
 /// Check if \p ID corresponds to a lifetime intrinsic.
 static inline bool isLifetimeIntrinsic(Intrinsic::ID ID) {
   switch (ID) {
diff --git a/llvm/include/llvm/IR/LLVMContext.h b/llvm/include/llvm/IR/LLVMContext.h
index 71d99fd2cbff21..6d4dbac0bf32eb 100644
--- a/llvm/include/llvm/IR/LLVMContext.h
+++ b/llvm/include/llvm/IR/LLVMContext.h
@@ -96,7 +96,7 @@ class LLVMContext {
     OB_ptrauth = 7,                // "ptrauth"
     OB_kcfi = 8,                   // "kcfi"
     OB_convergencectrl = 9,        // "convergencectrl"
-    OB_fpe_round = 10,             // "fpe.round"
+    OB_fpe_control = 10,           // "fpe.control"
     OB_fpe_except = 11,            // "fpe.except"
   };
 
diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp
index 42d55b85819782..65ef6c8b291165 100644
--- a/llvm/lib/AsmParser/LLParser.cpp
+++ b/llvm/lib/AsmParser/LLParser.cpp
@@ -6317,16 +6317,19 @@ bool isOldDbgFormatIntrinsic(StringRef Name) {
          FnID == Intrinsic::dbg_assign;
 }
 
-bool updateConstrainedIntrinsic(StringRef Name, ArrayRef<Value *> Args,
-                                SmallVectorImpl<OperandBundleDef> &Bundles,
-                                LLVMContext &C) {
+void LLParser::updateConstrainedIntrinsic(
+    ValID &CalleeID, SmallVectorImpl<LLParser::ParamInfo> &Args,
+    SmallVectorImpl<OperandBundleDef> &Bundles, AttrBuilder &FnAttrs) {
   if (Args.empty())
-    return false;
-  if (!Name.starts_with("llvm.experimental.constrained."))
-    return false;
+    return;
+
+  StringRef Name = CalleeID.StrVal;
+  if (!Name.consume_front("llvm.experimental.constrained."))
+    return;
+
   for (auto &B : Bundles) {
     if (B.getTag().starts_with("fpe."))
-      return false;
+      return;
   }
 
   const auto getMetadataArgumentValue = [](Value *Arg) -> StringRef {
@@ -6340,25 +6343,24 @@ bool updateConstrainedIntrinsic(StringRef Name, ArrayRef<Value *> Args,
   };
 
   if (Args.size() > 1) {
-    Value *V = Args.take_back(2).front();
-    if (StringRef VStr = getMetadataArgumentValue(V); !VStr.empty()) {
-      if (auto RM = convertStrToRoundingMode(VStr)) {
-        int RMVal = static_cast<int>(*RM);
-        Bundles.emplace_back("fpe.round",
-                             ConstantInt::get(Type::getInt32Ty(C), RMVal));
-      }
+    Value *V = Args[Args.size() - 2].V;
+    StringRef VStr = getMetadataArgumentValue(V);
+    if (!VStr.empty()) {
+      if (auto RM = convertStrToRoundingMode(VStr))
+        addFPRoundingBundle(Context, Bundles, *RM);
     }
   }
 
-  Value *V = Args.back();
-  if (StringRef VStr = getMetadataArgumentValue(V); !VStr.empty()) {
-    if (auto EB = convertStrToExceptionBehavior(VStr)) {
-      Bundles.emplace_back("fpe.except",
-                           ConstantInt::get(Type::getInt32Ty(C), *EB));
-    }
+  Value *V = Args.back().V;
+  StringRef VStr = getMetadataArgumentValue(V);
+  if (!VStr.empty()) {
+    if (auto EB = convertStrToExceptionBehavior(VStr))
+      addFPExceptionBundle(Context, Bundles, *EB);
   }
 
-  return true;
+  MemoryEffects ME = MemoryEffects::inaccessibleMemOnly();
+  FnAttrs.addAttribute(Attribute::getWithMemoryEffects(Context, ME));
+  FnAttrs.addAttribute(Attribute::StrictFP);
 }
 
 /// FunctionHeader
@@ -8081,6 +8083,8 @@ bool LLParser::parseCall(Instruction *&Inst, PerFunctionState &PFS,
       parseOptionalOperandBundles(BundleList, PFS))
     return true;
 
+  updateConstrainedIntrinsic(CalleeID, ArgList, BundleList, FnAttrs);
+
   // If RetType is a non-function pointer type, then this is the short syntax
   // for the call, which means that RetType is just the return type.  Infer the
   // rest of the function argument types from the arguments that are present.
@@ -8128,8 +8132,6 @@ bool LLParser::parseCall(Instruction *&Inst, PerFunctionState &PFS,
       AttributeList::get(Context, AttributeSet::get(Context, FnAttrs),
                          AttributeSet::get(Context, RetAttrs), Attrs);
 
-  updateConstrainedIntrinsic(CalleeID.StrVal, Args, BundleList, Context);
-
   CallInst *CI = CallInst::Create(Ty, Callee, Args, BundleList);
   CI->setTailCallKind(TCK);
   CI->setCallingConv(CC);
diff --git a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
index a585a24a022467..02fbb38548d6b9 100644
--- a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -7127,9 +7127,11 @@ Error BitcodeReader::materializeModule() {
       if (CallInst *CI = dyn_cast<CallInst>(U))
         UpgradeIntrinsicCall(CI, I.second);
     }
-    if (!I.first->use_empty())
-      I.first->replaceAllUsesWith(I.second);
-    I.first->eraseFromParent();
+    if (I.second) {
+      if (!I.first->use_empty())
+        I.first->replaceAllUsesWith(I.second);
+      I.first->eraseFromParent();
+    }
   }
   UpgradedIntrinsics.clear();
 
diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index 20fc14b6e8ef1c..0d04b6634f4351 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -1193,6 +1193,8 @@ static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn,
           F->getParent(), ID, F->getFunctionType()->getReturnType());
       return true;
     }
+    if (Name.starts_with("experimental.constrained."))
+      return true;
     break; // No other 'e*'.
   case 'f':
     if (Name.starts_with("flt.rounds")) {
@@ -4326,34 +4328,24 @@ static void upgradeDbgIntrinsicToDbgRecord(StringRef Name, CallBase *CI) {
 
 static CallBase *upgradeConstrainedIntrinsicCall(CallBase *CB, Function *F,
                                                  IRBuilder<> &Builder) {
-  if (CB->getOperandBundle(LLVMContext::OB_fpe_round))
+  if (CB->getOperandBundle(LLVMContext::OB_fpe_control) ||
+      CB->getOperandBundle(LLVMContext::OB_fpe_except))
     return nullptr;
 
-  auto *CFPI = cast<ConstrainedFPIntrinsic>(F);
   SmallVector<OperandBundleDef, 2> NewBundles;
-  LLVMContext &C = CB->getContext();
 
-  auto RM = CFPI->getRoundingMode();
+  auto RM = getRoundingModeArg(*CB);
   if (RM) {
     auto CurrentRM = CB->getRoundingMode();
-    if (CurrentRM) {
-      assert(*RM == *CurrentRM);
-    } else {
-      int RMValue = static_cast<int>(*RM);
-      NewBundles.emplace_back("fpe.round",
-                              ConstantInt::get(Type::getInt32Ty(C), RMValue));
-    }
+    assert(!CurrentRM && "unexpected rounding bundle");
+    Builder.createFPRoundingBundle(NewBundles, RM);
   }
 
-  auto EB = CFPI->getExceptionBehavior();
+  auto EB = getExceptionBehaviorArg(*CB);
   if (EB) {
     auto CurrentEB = CB->getExceptionBehavior();
-    if (CurrentEB) {
-      assert(*EB == *CurrentEB);
-    } else {
-      NewBundles.emplace_back("fpe.except",
-                              ConstantInt::get(Type::getInt32Ty(C), *EB));
-    }
+    assert(!CurrentEB && "unexpected exception bundle");
+    Builder.createFPExceptionBundle(NewBundles, EB);
   }
 
   CallInst *NewCB = nullptr;
@@ -4374,9 +4366,11 @@ static CallBase *upgradeConstrainedIntrinsicCall(CallBase *CB, Function *F,
       FastMathFlags FMF = CB->getFastMathFlags();
       NewCB->setFastMathFlags(FMF);
     }
+
     MemoryEffects ME = MemoryEffects::inaccessibleMemOnly();
-    auto A = Attribute::getWithMemoryEffects(C, ME);
+    auto A = Attribute::getWithMemoryEffects(CB->getContext(), ME);
     NewCB->addFnAttr(A);
+    NewCB->addFnAttr(Attribute::StrictFP);
   }
 
   return NewCB;
@@ -4410,7 +4404,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
     bool IsARM = Name.consume_front("arm.");
     bool IsAMDGCN = Name.consume_front("amdgcn.");
     bool IsDbg = Name.consume_front("dbg.");
-    bool IsConstrained = Name.starts_with("experimental.constrained.");
+    bool IsConstrained = Name.consume_front("experimental.constrained.");
     Value *Rep = nullptr;
 
     if (!IsX86 && Name == "stackprotectorcheck") {
@@ -4441,6 +4435,8 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
       }
     } else if (IsConstrained) {
       Rep = upgradeConstrainedIntrinsicCall(CI, F, Builder);
+      if (!Rep)
+        return;
     } else {
       llvm_unreachable("Unknown function for CallBase upgrade.");
     }
@@ -4964,7 +4960,8 @@ void llvm::UpgradeCallsToIntrinsic(Function *F) {
         UpgradeIntrinsicCall(CB, NewFn);
 
     // Remove old function, no longer used, from the module.
-    F->eraseFromParent();
+    if (NewFn)
+      F->eraseFromParent();
   }
 }
 
@@ -5704,20 +5701,3 @@ void llvm::UpgradeOperandBundles(std::vector<OperandBundleDef> &Bundles) {
            OBD.inputs().empty();
   });
 }
-
-CallBase *llvm::upgradeConstrainedFunctionCall(CallBase *CB) {
-  Function *F = dyn_cast<Function>(CB->getCalledOperand());
-  if (!F)
-    return nullptr;
-
-  if (CB->getNumOperands() < 1)
-    return nullptr;
-
-  StringRef Name = F->getName();
-  if (!Name.starts_with("experimental.constrained."))
-    return nullptr;
-
-  LLVMContext &C = CB->getContext();
-  IRBuilder<> Builder(C);
-  return upgradeConstrainedIntrinsicCall(CB, F, Builder);
-}
diff --git a/llvm/lib/IR/FPEnv.cpp b/llvm/lib/IR/FPEnv.cpp
index 67f21d3756e936..91a962eb8190bc 100644
--- a/llvm/lib/IR/FPEnv.cpp
+++ b/llvm/lib/IR/FPEnv.cpp
@@ -21,7 +21,18 @@
 
 namespace llvm {
 
-std::optional<RoundingMode> convertStrToRoundingMode(StringRef RoundingArg) {
+std::optional<RoundingMode> convertStrToRoundingMode(StringRef RoundingArg,
+                                                     bool InBundle) {
+  if (InBundle)
+    return StringSwitch<std::optional<RoundingMode>>(RoundingArg)
+        .Case("dyn", RoundingMode::Dynamic)
+        .Case("rte", RoundingMode::NearestTiesToEven)
+        .Case("rmm", RoundingMode::NearestTiesToAway)
+        .Case("rtn", RoundingMode::TowardNegative)
+        .Case("rtp", RoundingMode::TowardPositive)
+        .Case("rtz", RoundingMode::TowardZero)
+        .Default(std::nullopt);
+
   // For dynamic rounding mode, we use round to nearest but we will set the
   // 'exact' SDNodeFlag so that the value will not be rounded.
   return StringSwitch<std::optional<RoundingMode>>(RoundingArg)
@@ -34,26 +45,27 @@ std::optional<RoundingMode> convertStrToRoundingMode(StringRef RoundingArg) {
       .Default(std::nullopt);
 }
 
-std::optional<StringRef> convertRoundingModeToStr(RoundingMode UseRounding) {
+std::optional<StringRef> convertRoundingModeToStr(RoundingMode UseRounding,
+                                                  bool InBundle) {
   std::optional<StringRef> RoundingStr;
   switch (UseRounding) {
   case RoundingMode::Dynamic:
-    RoundingStr = "round.dynamic";
+    RoundingStr = InBundle ? "dyn" : "round.dynamic";
     break;
   case RoundingMode::NearestTiesToEven:
-    RoundingStr = "round.tonearest";
+    RoundingStr = InBundle ? "rte" : "round.tonearest";
     break;
   case RoundingMode::NearestTiesToAway:
-    RoundingStr = "round.tonearestaway";
+    RoundingStr = InBundle ? "rmm" : "round.tonearestaway";
     break;
   case RoundingMode::TowardNegative:
-    RoundingStr = "round.downward";
+    RoundingStr = InBundle ? "rtn" : "round.downward";
     break;
   case RoundingMode::TowardPositive:
-    RoundingStr = "round.upward";
+    RoundingStr = InBundle ? "rtp" : "round.upward";
     break;
   case RoundingMode::TowardZero:
-    RoundingStr = "round.towardzero";
+    RoundingStr = InBundle ? "rtz" : "round.towardzero";
     break;
   default:
     break;
@@ -62,7 +74,14 @@ std::optional<StringRef> convertRoundingModeToStr(RoundingMode UseRounding) {
 }
 
 std::optional<fp::ExceptionBehavior>
-convertStrToExceptionBehavior(StringRef ExceptionArg) {
+convertStrToExceptionBehavior(StringRef ExceptionArg, bool InBundle) {
+  if (InBundle)
+    return StringSwitch<std::optional<fp::ExceptionBehavior>>(ExceptionArg)
+        .Case("ignore", fp::ebIgnore)
+        .Case("maytrap", fp::ebMayTrap)
+        .Case("strict", fp::ebStrict)
+        .Default(std::nullopt);
+
   return StringSwitch<std::optional<fp::ExceptionBehavior>>(ExceptionArg)
       .Case("fpexcept.ignore", fp::ebIgnore)
       .Case("fpexcept.maytrap", fp::ebMayTrap)
@@ -71,17 +90,17 @@ convertStrToExceptionBehavior(StringRef ExceptionArg) {
 }
 
 std::optional<StringRef>
-convertExceptionBehaviorToStr(fp::ExceptionBehavior UseExcept) {
+convertExceptionBehaviorToStr(fp::ExceptionBehavior UseExcept, bool InBundle) {
   std::optional<StringRef> ExceptStr;
   switch (UseExcept) {
   case fp::ebStrict:
-    ExceptStr = "fpexcept.strict";
+    ExceptStr = InBundle ? "strict" : "fpexcept.strict";
     break;
   case fp::ebIgnore:
-    ExceptStr = "fpexcept.ignore";
+    ExceptStr = InBundle ? "ignore" : "fpexcept.ignore";
     break;
   case fp::ebMayTrap:
-    ExceptStr = "fpexcept.maytrap";
+    ExceptStr = InBundle ? "maytrap" : "fpexcept.maytrap";
     break;
   }
   return ExceptStr;
diff --git a/llvm/lib/IR/IRBuilder.cpp b/llvm/lib/IR/IRBuilder.cpp
index a22e833483563e..b4b020f06eec52 100644
--- a/llvm/lib/IR/IRBuilder.cpp
+++ b/llvm/lib/IR/IRBuilder.cpp
@@ -86,6 +86,43 @@ IRBuilderBase::createCallHelper(Function *Callee, ArrayRef<Value *> Ops,
   return CI;
 }
 
+CallInst *IRBuilderBase::CreateCall(FunctionType *FTy, Value *Callee,
+                                    ArrayRef<Value *> Args,
+                                    ArrayRef<OperandBundleDef> OpBundles,
+                                    const Twine &Name, MDNode *FPMathTag) {
+  ArrayRef<OperandBundleDef> ActualBundlesRef = OpBundles;
+  SmallVector<OperandBundleDef, 2> ActualBundles;
+
+  if (IsFPConstrained) {
+    if (const auto *Func = dyn_cast<Function>(Callee)) {
+      if (Intrinsic::ID ID = Func->getIntrinsicID()) {
+        if (IntrinsicInst::canAccessFPEnvironment(ID)) {
+          bool NeedRound = true, NeedExcept = true;
+          for (const auto &Item : OpBundles) {
+            if (NeedRound && Item.getTag() == "fpe.round")
+              NeedRound = false;
+            else if (NeedExcept && Item.getTag() == "fpe.except")
+              NeedExcept = false;
+            ActualBundles.push_back(Item);
+          }
+          if (NeedRound && Intrinsic::hasConstrainedFPRoundingModeOperand(ID))
+            createFPRoundingBundle(ActualBundles);
+          if (NeedExcept)
+            createFPExceptionBundle(ActualBundles);
+          ActualBundlesRef = ActualBundles;
+        }
+      }
+    }
+  }
+
+  CallInst *CI = CallInst::Create(FTy, Callee, Args, ActualBundlesRef);
+  if (IsFPConstrained)
+    setConstrainedFPCallAttr(CI);
+  if (isa<FPMathOperator>(CI))
+    setFPAttrs(CI, FPMathTag, FMF);
+  return Insert(CI, Name);
+}
+
 Value *IRBuilderBase::CreateVScale(Constant *Scaling, const Twine &Name) {
   assert(isa<ConstantInt>(Scaling) && "Expected constant integer");
   if (cast<ConstantInt>(Scaling)->isZero())
@@ -904,7 +941,7 @@ CallInst *IRBuilderBase::CreateIntrinsic(Intrinsic::ID ID,
                                          Instruction *FMFSource,
                                          const Twine &Name) {
   Module *M = BB->getModule();
-  Function *Fn = Intrinsic::getDeclaration(M, ID, Types);
+  Function *Fn = Intrinsic::getOrInsertDeclaration(M, ID, Types);
   return createCallHelper(Fn, Args, Name, FMFSource, OpBundles);
 }
 
@@ -967,9 +1004,8 @@ CallInst *IRBuilderBase::CreateConstrainedFPUnroundedBinOp(
   if (FMFSource)
     UseFMF = FMFSource->getFastMathFlags();
 
-  SmallVector<OperandBundleDef, 2> OpBundles;
-  int EB = getEffectiveExceptionBehavior(Except);
-  OpBundles.emplace_back("fpe.except", getInt32(EB));
+  SmallVector<OperandBundleDef, 1> OpBundles;
+  createFPExceptionBundle(OpBundles, Except);
 
   CallInst *C = CreateIntrinsic(ID, {L->getType()}, {L, R, ExceptV}, OpBundles,
                                 nullptr, Name);
@@ -1046,8 +1082,7 @@ CallInst *IRBuilderBase::CreateConstrainedFPCmp(
   Value *ExceptV = getConstrainedFPExcept(Except);
 
   SmallVector<OperandBundleDef, 1> OpBundles;
-  int EB = getEffectiveExceptionBehavior(Except);
-  OpBundles.emplace_back("fpe.except", getInt32(EB));
+  createFPExceptionBundle(OpBundles, Except);
 
   CallInst *C = CreateIntrinsic(ID, {L->getType()}, {L, R, PredicateV, ExceptV},
                                 OpBundles, nullptr, Name);
@@ -1305,6 +1340,20 @@ CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL,
   return CreateAlignmentAssumptionHelper(DL, PtrValue, Alignment, OffsetValue);
 }
 
+void IRBuilderBase::createFPRoundingBundle(
+    SmallVectorImpl<OperandBundleDef> &Bundles,
+    std::optional<RoundingMode> Rounding) {
+  addFPRoundingBundle(Context, Bundles,
+                      Rounding.value_or(DefaultConstrainedRounding));
+}
+
+void IRBuilderBase::createFPExceptionBundle(
+    SmallVectorImpl<OperandBundleDef> &Bundles,
+    std::optional<fp::ExceptionBehavior> Except) {
+  addFPExceptionBundle(Context, Bundles,
+                       Except.value_or(DefaultConstrainedExcept));
+}
+
 IRBuilderDefaultInserter::~IRBuilderDefaultInserter() = default;
 IRBuilderCallbackInserter::~IRBuilderCallbackInserter() = default;
 IRBuilderFolder::~IRBuilderFolder() = default;
diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp
index f6b093fb9a86cc..f763a29e90a97f 100644
--- a/llvm/lib/IR/Instructions.cpp
+++ b/llvm/lib/IR/Instructions.cpp
@@ -605,19 +605,19 @@ bool CallBase::hasClobberingOperandBundles() const {
 }
 
 std::optional<RoundingMode> CallBase::getRoundingMode() const {
-  if (auto RoundingBundle = getOperandBundle(LLVMContext::OB_fpe_round)) {
-    uint64_t RM =
-        cast<ConstantInt>(RoundingBundle->Inputs.front())->getSExtValue();
-    return castToRoundingMode(RM);
+  if (auto RoundingBundle = getOperandBundle(LLVMContext::OB_fpe_control)) {
+    Value *V = RoundingBundle->Inputs.front();
+    Metadata *MD = cast<MetadataAsValue>(V)->getMetadata();
+    return convertStrToRoundingMode(cast<MDString>(MD)->getString(), true);
   }
   return std::nullopt;
 }
 
 std::optional<fp::ExceptionBehavior> CallBase::getExceptionBehavior() const {
   if (auto ExceptionBundle = getOperandBundle(LLVMContext::OB_fpe_except)) {
-    uint64_t EB =
-        cast<ConstantInt>(ExceptionBundle->Inputs.front())->getZExtValue();
-    return castToExceptionBehavior(EB);
+    Value *V = ExceptionBundle->Inputs.front();
+    Metadata *MD = cast<MetadataAsValue>(V)->getMetadata();
+    return convertStrToExceptionBehavior(cast<MDString>(MD)->getString(), true);
   }
   return std::nullopt;
 }
@@ -693,6 +693,26 @@ void CallBase::setOnlyAccessesInaccessibleMemOrArgMem() {
                    MemoryEffects::inaccessibleOrArgMemOnly());
 }
 
+void llvm::addFPRoundingBundle(LLVMContext &Ctx,
+                               SmallVectorImpl<OperandBundleDef> &Bundles,
+                               RoundingMode Rounding) {
+  std::optional<StringRef> RndStr = convertRoundingModeToStr(Rounding, true);
+  assert(RndStr && "Garbage rounding mode!");
+  auto *RoundingMDS = MDString::get(Ctx, *RndStr);
+  auto *RM = MetadataAsValue::get(Ctx, RoundingMDS);
+  Bundles.emplace_back("fpe.control", RM);
+}
+
+void llvm::addFPExceptionBundle(LLVMContext &Ctx,
+                                SmallVectorImpl<OperandBundleDef> &Bundles,
+                                fp::ExceptionBehavior Except) {
+  std::optional<StringRef> ExcStr = convertExceptionBehaviorToStr(Except, true);
+  assert(ExcStr && "Garbage exception behavior!");
+  auto *ExceptMDS = MDString::get(Ctx, *ExcStr);
+  auto *EB = MetadataAsValue::get(Ctx, ExceptMDS);
+  Bundles.emplace_back("fpe.except", EB);
+}
+
 //===----------------------------------------------------------------------===//
 //                        CallInst Implementation
 //===----------------------------------------------------------------------===//
diff --git a/llvm/lib/IR/IntrinsicInst.cpp b/llvm/lib/IR/IntrinsicInst.cpp
index abd00682b032b7..a1f8533fe8773b 100644
--- a/llvm/lib/IR/IntrinsicInst.cpp
+++ b/llvm/lib/IR/IntrinsicInst.cpp
@@ -66,6 +66,39 @@ bool IntrinsicInst::mayLowerToFunctionCall(Intrinsic::ID IID) {
   }
 }
 
+bool IntrinsicInst::canAccessFPEnvironment(Intrinsic::ID IID) {
+  switch (IID) {
+#define FUNCTION(NAME, A, R, I) case Intrinsic::NAME:
+#include "llvm/IR/ConstrainedOps.def"
+    return true;
+  default:
+    return false;
+  }
+}
+
+std::optional<RoundingMode> llvm::getRoundingModeArg(const CallBase &I) {
+  unsigned NumOperands = I.arg_size();
+  Metadata *MD = nullptr;
+  auto *MAV = dyn_cast<MetadataAsValue>(I.getArgOperand(NumOperands - 2));
+  if (MAV)
+    MD = MAV->getMetadata();
+  if (!MD || !isa<MDString>(MD))
+    return std::nullopt;
+  return convertStrToRoundingMode(cast<MDString>(MD)->getString());
+}
+
+std::optional<fp::ExceptionBehavior>
+llvm::getExceptionBehaviorArg(const CallBase &I) {
+  unsigned NumOperands = I.arg_size();
+  Metadata *MD = nullptr;
+  auto *MAV = dyn_cast<MetadataAsValue>(I.getArgOperand(NumOperands - 1));
+  if (MAV)
+    MD = MAV->getMetadata();
+  if (!MD || !isa<MDString>(MD))
+    return std::nullopt;
+  return convertStrToExceptionBehavior(cast<MDString>(MD)->getString());
+}
+
 //===----------------------------------------------------------------------===//
 /// DbgVariableIntrinsic - This is the common base class for debug info
 /// intrinsics for variables.
diff --git a/llvm/lib/IR/LLVMContext.cpp b/llvm/lib/IR/LLVMContext.cpp
index c354adc3136773..1e0883641d078a 100644
--- a/llvm/lib/IR/LLVMContext.cpp
+++ b/llvm/lib/IR/LLVMContext.cpp
@@ -97,8 +97,8 @@ LLVMContext::LLVMContext() : pImpl(new LLVMContextImpl(*this)) {
          "convergencectrl operand bundle id drifted!");
   (void)ConvergenceCtrlEntry;
 
-  auto *RoundingEntry = pImpl->getOrInsertBundleTag("fpe.round");
-  assert(RoundingEntry->second == LLVMContext::OB_fpe_round &&
+  auto *RoundingEntry = pImpl->getOrInsertBundleTag("fpe.control");
+  assert(RoundingEntry->second == LLVMContext::OB_fpe_control &&
          "fpe.round operand bundle id drifted!");
   (void)RoundingEntry;
 
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index 090f71ee8e059f..e3fafecacdd2a8 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -3786,23 +3786,32 @@ void Verifier::visitCallBase(CallBase &Call) {
             "Multiple \"clang.arc.attachedcall\" operand bundles", Call);
       FoundAttachedCallBundle = true;
       verifyAttachedCallBundle(Call, BU);
-    } else if (Tag == LLVMContext::OB_fpe_round) {
+    } else if (Tag == LLVMContext::OB_fpe_control) {
       Check(!FoundFpeRoundBundle, "Multiple fpe.round operand bundles", Call);
       Check(BU.Inputs.size() == 1,
             "Expected exactly one fpe.round bundle operand", Call);
-      auto RM = dyn_cast<ConstantInt>(BU.Inputs.front());
-      Check(RM, "Value of fpe.round bundle operand must be an integer", Call);
-      Check(isValidRoundingMode(RM->getSExtValue()),
-            "Invalid value of fpe.round bundle operand", Call);
+      auto *V = dyn_cast<MetadataAsValue>(BU.Inputs.front());
+      Check(V, "Value of fpe.round bundle operand must be a metadata", Call);
+      auto *MDS = dyn_cast<MDString>(V->getMetadata());
+      Check(MDS, "Value of fpe.round bundle operand must be a string", Call);
+      auto RM = convertStrToRoundingMode(MDS->getString(), true);
+      Check(RM.has_value(),
+            "Value of fpe.round bundle operand is not a correct rounding mode",
+            Call);
       FoundFpeRoundBundle = true;
     } else if (Tag == LLVMContext::OB_fpe_except) {
       Check(!FoundFpeExceptBundle, "Multiple fpe.except operand bundles", Call);
       Check(BU.Inputs.size() == 1,
             "Expected exactly one fpe.except bundle operand", Call);
-      auto EB = dyn_cast<ConstantInt>(BU.Inputs.front());
-      Check(EB, "Value of fpe.except bundle operand must be an integer", Call);
-      Check(isValidExceptionBehavior(EB->getZExtValue()),
-            "Invalid value of fpe.except bundle operand", Call);
+      auto *V = dyn_cast<MetadataAsValue>(BU.Inputs.front());
+      Check(V, "Value of fpe.except bundle operand must be a metadata", Call);
+      auto *MDS = dyn_cast<MDString>(V->getMetadata());
+      Check(MDS, "Value of fpe.except bundle operand must be a string", Call);
+      auto EB = convertStrToExceptionBehavior(MDS->getString(), true);
+      Check(EB.has_value(),
+            "Value of fpe.except bundle operand is not a correct exception "
+            "behavior",
+            Call);
       FoundFpeExceptBundle = true;
     }
   }
@@ -3844,23 +3853,25 @@ void Verifier::verifyConstrainedInstrinsicCall(const CallBase &CB) {
 
   // FP metadata arguments must not conflict with the corresponding
   // operand bundles.
-  if (std::optional<RoundingMode> RM = CFPI->getRoundingMode()) {
+  if (std::optional<RoundingMode> RM = getRoundingModeArg(CB)) {
     RoundingMode Rounding = *RM;
-    auto RoundingBundle = CB.getOperandBundle(LLVMContext::OB_fpe_round);
+    auto RoundingBundle = CB.getOperandBundle(LLVMContext::OB_fpe_control);
     Check(RoundingBundle,
           "Constrained intrinsic has a rounding argument but the call does not",
           CB);
     if (RoundingBundle) {
-      OperandBundleUse OBU = *RoundingBundle;
-      uint64_t BundleRM = cast<ConstantInt>(OBU.Inputs.front())->getZExtValue();
-      Check(BundleRM == static_cast<uint64_t>(Rounding),
-            "Rounding mode of the constrained intrinsic differs from that in "
-            "operand bundle",
-            CB);
+      std::optional<RoundingMode> RMByBundle = CB.getRoundingMode();
+      Check(RMByBundle, "Invalid value of rounding mode bundle", CB);
+      if (RMByBundle) {
+        Check(*RMByBundle == Rounding,
+              "Rounding mode of the constrained intrinsic differs from that in "
+              "operand bundle",
+              CB);
+      }
     }
   }
 
-  if (std::optional<fp::ExceptionBehavior> EB = CFPI->getExceptionBehavior()) {
+  if (std::optional<fp::ExceptionBehavior> EB = getExceptionBehaviorArg(CB)) {
     fp::ExceptionBehavior Excepts = *EB;
     auto ExceptionBundle = CB.getOperandBundle(LLVMContext::OB_fpe_except);
     Check(ExceptionBundle,
@@ -3868,12 +3879,16 @@ void Verifier::verifyConstrainedInstrinsicCall(const CallBase &CB) {
           "call does not",
           CB);
     if (ExceptionBundle) {
-      OperandBundleUse OBU = *ExceptionBundle;
-      uint64_t BundleEB = cast<ConstantInt>(OBU.Inputs.front())->getZExtValue();
-      Check(BundleEB == static_cast<uint64_t>(Excepts),
+      std::optional<fp::ExceptionBehavior> EBByBundle =
+          CB.getExceptionBehavior();
+      Check(EBByBundle, "Invalid value of exception behavior bundle", CB);
+      if (EBByBundle) {
+        Check(
+            *EBByBundle == Excepts,
             "Exception behavior of the constrained intrinsic differs from that "
             "in operand bundle",
             CB);
+      }
     }
   }
 }
diff --git a/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp b/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
index 478691d031150f..7538c84b03bfa4 100644
--- a/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
@@ -255,7 +255,7 @@ static bool markTails(Function &F, OptimizationRemarkEmitter *ORE) {
           CI->isNoTailCall() ||
           CI->hasOperandBundlesOtherThan(
               {LLVMContext::OB_clang_arc_attachedcall, LLVMContext::OB_ptrauth,
-               LLVMContext::OB_kcfi, LLVMContext::OB_fpe_round,
+               LLVMContext::OB_kcfi, LLVMContext::OB_fpe_control,
                LLVMContext::OB_fpe_except});
 
       if (!IsNoTail && CI->doesNotAccessMemory()) {
diff --git a/llvm/lib/Transforms/Utils/CloneFunction.cpp b/llvm/lib/Transforms/Utils/CloneFunction.cpp
index 2ef62eae5a4b45..38fabb233c0782 100644
--- a/llvm/lib/Transforms/Utils/CloneFunction.cpp
+++ b/llvm/lib/Transforms/Utils/CloneFunction.cpp
@@ -418,7 +418,6 @@ struct PruningFunctionCloner {
 Instruction *
 PruningFunctionCloner::cloneInstruction(BasicBlock::const_iterator II) {
   const Instruction &OldInst = *II;
-  Instruction *NewInst = nullptr;
   if (HostFuncIsStrictFP) {
     Intrinsic::ID CIID = getConstrainedIntrinsicID(OldInst);
     if (CIID != Intrinsic::not_intrinsic) {
@@ -476,24 +475,21 @@ PruningFunctionCloner::cloneInstruction(BasicBlock::const_iterator II) {
       if (Intrinsic::hasConstrainedFPRoundingModeOperand(CIID)) {
         Args.push_back(
             MetadataAsValue::get(Ctx, MDString::get(Ctx, "round.tonearest")));
-        Bundles.emplace_back(
-            "fpe.round",
-            ConstantInt::get(
-                Type::getInt32Ty(Ctx),
-                static_cast<int>(RoundingMode::NearestTiesToEven)));
+        addFPRoundingBundle(Ctx, Bundles, RoundingMode::NearestTiesToEven);
       }
       Args.push_back(
           MetadataAsValue::get(Ctx, MDString::get(Ctx, "fpexcept.ignore")));
-      Bundles.emplace_back("fpe.except",
-                           ConstantInt::get(Type::getInt32Ty(Ctx),
-                                            fp::ExceptionBehavior::ebIgnore));
-      NewInst =
+        addFPExceptionBundle(Ctx, Bundles, fp::ExceptionBehavior::ebIgnore);
+      auto *NewConstrainedInst =
           CallInst::Create(IFn, Args, Bundles, OldInst.getName() + ".strict");
+
+      MemoryEffects ME = MemoryEffects::inaccessibleMemOnly();
+      auto A = Attribute::getWithMemoryEffects(Ctx, ME);
+      NewConstrainedInst->addFnAttr(A);
+      return NewConstrainedInst;
     }
   }
-  if (!NewInst)
-    NewInst = II->clone();
-  return NewInst;
+  return OldInst.clone();
 }
 
 /// The specified block is found to be reachable, clone it and
diff --git a/llvm/test/Bitcode/auto-upgrade-constrained.ll b/llvm/test/Bitcode/auto-upgrade-constrained.ll
new file mode 100644
index 00000000000000..8e3f2c4ad77896
--- /dev/null
+++ b/llvm/test/Bitcode/auto-upgrade-constrained.ll
@@ -0,0 +1,327 @@
+; RUN: llvm-as < %s | llvm-dis | FileCheck %s
+; RUN: llvm-dis %s.bc -o - | FileCheck %s
+
+define float @test_fadd(float %a, float %b) strictfp {
+  %res = call float @llvm.experimental.constrained.fadd.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret float %res
+}
+; CHECK-LABEL: define float @test_fadd(
+; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0:[0-9]+]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"ignore") ]
+
+
+define float @test_fsub(float %a, float %b) strictfp {
+  %res = call float @llvm.experimental.constrained.fsub.f32(float %a, float %b, metadata !"round.tonearest", metadata !"fpexcept.ignore")
+  ret float %res
+}
+; CHECK-LABEL: define float @test_fsub(
+; CHECK: call float @llvm.experimental.constrained.fsub.f32(float {{.*}}, float {{.*}}, metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] [ "fpe.control"(metadata !"rte"), "fpe.except"(metadata !"ignore") ]
+
+define float @test_fmul(float %a, float %b) strictfp {
+  %res = call float @llvm.experimental.constrained.fmul.f32(float %a, float %b, metadata !"round.downward", metadata !"fpexcept.ignore")
+  ret float %res
+}
+; CHECK-LABEL: define float @test_fmul(
+; CHECK: call float @llvm.experimental.constrained.fmul.f32(float {{.*}}, float {{.*}}, metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]] [ "fpe.control"(metadata !"rtn"), "fpe.except"(metadata !"ignore") ]
+
+define float @test_fdiv(float %a, float %b) strictfp {
+  %res = call float @llvm.experimental.constrained.fdiv.f32(float %a, float %b, metadata !"round.upward", metadata !"fpexcept.ignore")
+  ret float %res
+}
+; CHECK-LABEL: define float @test_fdiv(
+; CHECK: call float @llvm.experimental.constrained.fdiv.f32(float {{.*}}, float {{.*}}, metadata !"round.upward", metadata !"fpexcept.ignore") #[[ATTR0]] [ "fpe.control"(metadata !"rtp"), "fpe.except"(metadata !"ignore") ]
+
+define float @test_frem(float %a, float %b) strictfp {
+  %res = call float @llvm.experimental.constrained.frem.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret float %res
+}
+; CHECK-LABEL: define float @test_frem(
+; CHECK: call float @llvm.experimental.constrained.frem.f32(float {{.*}}, float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"ignore") ]
+
+define float @test_fma(float %a, float %b, float %c) strictfp {
+  %res = call float @llvm.experimental.constrained.fma.f32(float %a, float %b, float %c, metadata !"round.towardzero", metadata !"fpexcept.ignore")
+  ret float %res
+}
+; CHECK-LABEL: define float @test_fma(
+; CHECK: call float @llvm.experimental.constrained.fma.f32(float {{.*}}, float {{.*}}, float {{.*}}, metadata !"round.towardzero", metadata !"fpexcept.ignore") #[[ATTR0]] [ "fpe.control"(metadata !"rtz"), "fpe.except"(metadata !"ignore") ]
+
+define float @test_fmuladd(float %a, float %b, float %c) strictfp {
+  %res = call float @llvm.experimental.constrained.fmuladd.f32(float %a, float %b, float %c, metadata !"round.tonearestaway", metadata !"fpexcept.ignore")
+  ret float %res
+}
+; CHECK-LABEL: define float @test_fmuladd(
+; CHECK: call float @llvm.experimental.constrained.fmuladd.f32(float {{.*}}, float {{.*}}, float {{.*}}, metadata !"round.tonearestaway", metadata !"fpexcept.ignore") #[[ATTR0]] [ "fpe.control"(metadata !"rmm"), "fpe.except"(metadata !"ignore") ]
+
+define i32 @test_fptosi(float %a) strictfp {
+  %res = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %a, metadata !"fpexcept.ignore")
+  ret i32 %res
+}
+; CHECK-LABEL: define i32 @test_fptosi(
+; CHECK: call i32 @llvm.experimental.constrained.fptosi.i32.f32(float {{.*}}, metadata !"fpexcept.ignore") #[[ATTR0]] [ "fpe.except"(metadata !"ignore") ]
+
+define i32 @test_fptoui(float %a) strictfp {
+  %res = call i32 @llvm.experimental.constrained.fptoui.f32.i32(float %a, metadata !"fpexcept.strict")
+  ret i32 %res
+}
+; CHECK-LABEL: define i32 @test_fptoui(
+; CHECK: call i32 @llvm.experimental.constrained.fptoui.i32.f32(float {{.*}}, metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.except"(metadata !"strict") ]
+
+define float @test_sitofp(i32 %a) strictfp {
+  %res = call float @llvm.experimental.constrained.sitofp.i32.f32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret float %res
+}
+; CHECK-LABEL: define float @test_sitofp(
+; CHECK: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"ignore") ]
+
+define float @test_uitofp(i32 %a) strictfp {
+  %res = call float @llvm.experimental.constrained.uitofp.i32.f32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret float %res
+}
+; CHECK-LABEL: define float @test_uitofp(
+; CHECK: call float @llvm.experimental.constrained.uitofp.f32.i32(i32 {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"ignore") ]
+
+define float @test_fptrunc(double %a) strictfp {
+  %res = call float @llvm.experimental.constrained.fptrunc.f32.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+  ret float %res
+}
+; CHECK-LABEL: define float @test_fptrunc(
+; CHECK: call float @llvm.experimental.constrained.fptrunc.f32.f64(double {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"ignore") ]
+
+define double @test_fpext(float %a) strictfp {
+  %res = call double @llvm.experimental.constrained.fpext.f64.f32(float %a, metadata !"fpexcept.ignore")
+  ret double %res
+}
+; CHECK-LABEL: define double @test_fpext(
+; CHECK: call double @llvm.experimental.constrained.fpext.f64.f32(float {{.*}}, metadata !"fpexcept.ignore") #[[ATTR0]] [ "fpe.except"(metadata !"ignore") ]
+
+define float @test_sqrt(float %a) strictfp {
+  %res = call float @llvm.experimental.constrained.sqrt.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret float %res
+}
+; CHECK-LABEL: define float @test_sqrt(
+; CHECK: call float @llvm.experimental.constrained.sqrt.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define float @test_powi(float %a, i32 %b) strictfp {
+  %res = call float @llvm.experimental.constrained.powi.f32.i32(float %a, i32 %b, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret float %res
+}
+; CHECK-LABEL: define float @test_powi(
+; CHECK: call float @llvm.experimental.constrained.powi.f32(float {{.*}}, i32 %b, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define float @test_ldexp(float %a, i32 %b) strictfp {
+  %res = call float @llvm.experimental.constrained.ldexp.f32.i32(float %a, i32 %b, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret float %res
+}
+; CHECK-LABEL: define float @test_ldexp(
+; CHECK: call float @llvm.experimental.constrained.ldexp.f32.i32(float {{.*}}, i32 {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define float @test_asin(float %a) strictfp {
+  %res = call float @llvm.experimental.constrained.asin.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret float %res
+}
+; CHECK-LABEL: define float @test_asin(
+; CHECK: call float @llvm.experimental.constrained.asin.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define float @test_acos(float %a) strictfp {
+  %res = call float @llvm.experimental.constrained.acos.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret float %res
+}
+; CHECK-LABEL: define float @test_acos(
+; CHECK: call float @llvm.experimental.constrained.acos.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define float @test_atan(float %a) strictfp {
+  %res = call float @llvm.experimental.constrained.atan.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret float %res
+}
+; CHECK-LABEL: define float @test_atan(
+; CHECK: call float @llvm.experimental.constrained.atan.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define float @test_sin(float %a) strictfp {
+  %res = call float @llvm.experimental.constrained.sin.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret float %res
+}
+; CHECK-LABEL: define float @test_sin(
+; CHECK: call float @llvm.experimental.constrained.sin.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define float @test_cos(float %a) strictfp {
+  %res = call float @llvm.experimental.constrained.cos.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret float %res
+}
+; CHECK-LABEL: define float @test_cos(
+; CHECK: call float @llvm.experimental.constrained.cos.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define float @test_tan(float %a) strictfp {
+  %res = call float @llvm.experimental.constrained.tan.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret float %res
+}
+; CHECK-LABEL: define float @test_tan(
+; CHECK: call float @llvm.experimental.constrained.tan.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define float @test_sinh(float %a) strictfp {
+  %res = call float @llvm.experimental.constrained.sinh.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret float %res
+}
+; CHECK-LABEL: define float @test_sinh(
+; CHECK: call float @llvm.experimental.constrained.sinh.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define float @test_cosh(float %a) strictfp {
+  %res = call float @llvm.experimental.constrained.cosh.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret float %res
+}
+; CHECK-LABEL: define float @test_cosh(
+; CHECK: call float @llvm.experimental.constrained.cosh.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define float @test_tanh(float %a) strictfp {
+  %res = call float @llvm.experimental.constrained.tanh.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret float %res
+}
+; CHECK-LABEL: define float @test_tanh(
+; CHECK: call float @llvm.experimental.constrained.tanh.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define float @test_pow(float %a, float %b) strictfp {
+  %res = call float @llvm.experimental.constrained.pow.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret float %res
+}
+; CHECK-LABEL: define float @test_pow(
+; CHECK: call float @llvm.experimental.constrained.pow.f32(float {{.*}}, float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define float @test_log(float %a) strictfp {
+  %res = call float @llvm.experimental.constrained.log.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret float %res
+}
+; CHECK-LABEL: define float @test_log(
+; CHECK: call float @llvm.experimental.constrained.log.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define float @test_log10(float %a) strictfp {
+  %res = call float @llvm.experimental.constrained.log10.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret float %res
+}
+; CHECK-LABEL: define float @test_log10(
+; CHECK: call float @llvm.experimental.constrained.log10.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define float @test_log2(float %a) strictfp {
+  %res = call float @llvm.experimental.constrained.log2.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret float %res
+}
+; CHECK-LABEL: define float @test_log2(
+; CHECK: call float @llvm.experimental.constrained.log2.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define float @test_exp(float %a) strictfp {
+  %res = call float @llvm.experimental.constrained.exp.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret float %res
+}
+; CHECK-LABEL: define float @test_exp(
+; CHECK: call float @llvm.experimental.constrained.exp.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define float @test_exp2(float %a) strictfp {
+  %res = call float @llvm.experimental.constrained.exp2.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret float %res
+}
+; CHECK-LABEL: define float @test_exp2(
+; CHECK: call float @llvm.experimental.constrained.exp2.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define float @test_rint(float %a) strictfp {
+  %res = call float @llvm.experimental.constrained.rint.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret float %res
+}
+; CHECK-LABEL: define float @test_rint(
+; CHECK: call float @llvm.experimental.constrained.rint.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define float @test_nearbyint(float %a) strictfp {
+  %res = call float @llvm.experimental.constrained.nearbyint.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret float %res
+}
+; CHECK-LABEL: define float @test_nearbyint(
+; CHECK: call float @llvm.experimental.constrained.nearbyint.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define i32 @test_lrint(float %a) strictfp {
+  %res = call i32 @llvm.experimental.constrained.lrint.i32.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret i32 %res
+}
+; CHECK-LABEL: define i32 @test_lrint(
+; CHECK: call i32 @llvm.experimental.constrained.lrint.i32.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define i32 @test_llrint(float %a) strictfp {
+  %res = call i32 @llvm.experimental.constrained.llrint.i32.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret i32 %res
+}
+; CHECK-LABEL: define i32 @test_llrint(
+; CHECK: call i32 @llvm.experimental.constrained.llrint.i32.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define float @test_maxnum(float %a, float %b) strictfp {
+  %res = call float @llvm.experimental.constrained.maxnum.f32(float %a, float %b, metadata !"fpexcept.strict")
+  ret float %res
+}
+; CHECK-LABEL: define float @test_maxnum(
+; CHECK: call float @llvm.experimental.constrained.maxnum.f32(float {{.*}}, float {{.*}}, metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.except"(metadata !"strict") ]
+
+define float @test_minnum(float %a, float %b) strictfp {
+  %res = call float @llvm.experimental.constrained.minnum.f32(float %a, float %b, metadata !"fpexcept.strict")
+  ret float %res
+}
+; CHECK-LABEL: define float @test_minnum(
+; CHECK: call float @llvm.experimental.constrained.minnum.f32(float {{.*}}, float {{.*}}, metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.except"(metadata !"strict") ]
+
+define float @test_maximum(float %a, float %b) strictfp {
+  %res = call float @llvm.experimental.constrained.maximum.f32(float %a, float %b, metadata !"fpexcept.strict")
+  ret float %res
+}
+; CHECK-LABEL: define float @test_maximum(
+; CHECK: call float @llvm.experimental.constrained.maximum.f32(float {{.*}}, float {{.*}}, metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.except"(metadata !"strict") ]
+
+define float @test_minimum(float %a, float %b) strictfp {
+  %res = call float @llvm.experimental.constrained.minimum.f32(float %a, float %b, metadata !"fpexcept.strict")
+  ret float %res
+}
+; CHECK-LABEL: define float @test_minimum(
+; CHECK: call float @llvm.experimental.constrained.minimum.f32(float {{.*}}, float {{.*}}, metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.except"(metadata !"strict") ]
+
+define float @test_ceil(float %a) strictfp {
+  %res = call float @llvm.experimental.constrained.ceil.f32(float %a, metadata !"fpexcept.strict")
+  ret float %res
+}
+; CHECK-LABEL: define float @test_ceil(
+; call float @llvm.experimental.constrained.ceil.f32(float {{.*}}, metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.except"(metadata !"strict") ]
+
+define float @test_floor(float %a) strictfp {
+  %res = call float @llvm.experimental.constrained.floor.f32(float %a, metadata !"fpexcept.strict")
+  ret float %res
+}
+; CHECK-LABEL: define float @test_floor(
+; call float @llvm.experimental.constrained.floor.f32(float {{.*}}, metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.except"(metadata !"strict") ]
+
+define i32 @test_lround(float %a) strictfp {
+  %res = call i32 @llvm.experimental.constrained.lround.i32.f32(float %a, metadata !"fpexcept.strict")
+  ret i32 %res
+}
+; CHECK-LABEL: define i32 @test_lround(
+; CHECK: call i32 @llvm.experimental.constrained.lround.i32.f32(float {{.*}}, metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.except"(metadata !"strict") ]
+
+define i32 @test_llround(float %a) strictfp {
+  %res = call i32 @llvm.experimental.constrained.llround.i32.f32(float %a, metadata !"fpexcept.strict")
+  ret i32 %res
+}
+; CHECK-LABEL: define i32 @test_llround(
+; CHECK: call i32 @llvm.experimental.constrained.llround.i32.f32(float {{.*}}, metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.except"(metadata !"strict") ]
+
+define float @test_round(float %a) strictfp {
+  %res = call float @llvm.experimental.constrained.round.f32(float %a, metadata !"fpexcept.strict")
+  ret float %res
+}
+; CHECK-LABEL: define float @test_round(
+; CHECK: call float @llvm.experimental.constrained.round.f32(float {{.*}}, metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.except"(metadata !"strict") ]
+
+define float @test_roundeven(float %a) strictfp {
+  %res = call float @llvm.experimental.constrained.roundeven.f32(float %a, metadata !"fpexcept.strict")
+  ret float %res
+}
+; CHECK-LABEL: define float @test_roundeven(
+; CHECK: call float @llvm.experimental.constrained.roundeven.f32(float {{.*}}, metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.except"(metadata !"strict") ]
+
+define float @test_trunc(float %a) strictfp {
+  %res = call float @llvm.experimental.constrained.trunc.f32(float %a, metadata !"fpexcept.strict")
+  ret float %res
+}
+; CHECK-LABEL: define float @test_trunc(
+; CHECK: call float @llvm.experimental.constrained.trunc.f32(float {{.*}}, metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.except"(metadata !"strict") ]
+
+; CHECK: attributes #[[ATTR0]] = { strictfp memory(inaccessiblemem: readwrite) }
diff --git a/llvm/test/Bitcode/auto-upgrade-constrained.ll.bc b/llvm/test/Bitcode/auto-upgrade-constrained.ll.bc
new file mode 100644
index 0000000000000000000000000000000000000000..75a84901b5cbc37231b35fbed978c176d4c1afa0
GIT binary patch
literal 8120
zcmb7I4^R_l7T^3K8z62{#7Z<tH%L3wDvePgm at oqbYqa(nsrIO&m4zgd8j=tK1Z};I
z!3Hf_usD<U&;x64s_ooh&zmw++W|s7im8Xa)3I}o4Hd0*s4}hgytDV`?JkM3oUn;Q
z_T}v_ at B6*qd+*yX78`pnFHc91Bm_Y!B$}+}Ui$3!&)qxu{QSYJ5}q~*QA-fyO}LrL
zS0jAD$l(w5FK8<o6s{J at wD|{kO_Ilx#B!DJjS4b-CBOJUYVlE-HeDyqcs4~_+H7(U
zs27x^1zTiA2l$pj at riWY_)IhY1y5VrRED*-WtOyd?K;AjhtV^0<fSbwkIR%Ryn_g`
z2S*SEtd(gK%04RIwfd1#cx$WaH=9zDpICW1skrnBvGyHawo%+AQ#WZ=A)=Ldm)I}i
zrK>wKJIqb8AsJsT&MpzR98?Qm+$-Z(elAuEPY=2IPYIGgl#OV>C$8L$`%e@%$;#Ax
z!Xbp!5M*5jf+WLreslD{@IrgB+>(CElJ-7{>_mRy;k<g}c2RjM=&~BHlqR!xl^5f1
zou^)lee6E;ALsdzt&O2g&VAq9%-^i~)hA~xbAMZKl%K{!NI)?!ocH?r)tLw)i#d?2
z-qtm1fCLqhev!hA(&{}3QaJG$L0S>xLam97`4dmeigO$bvDfPAh9Wa5QsI&{M=N%}
zhah-7wuw}qU%W_!AiS8XkS{qlKrBHJam<6Hm7C6E68>aL$6qz4v1sorIl`i}_wfTs
ztn`<6En-pn*PasIWJ+^3W~HzweVe(CMd`h{%`8d_&lN(tlaxFrVpCd^o5rT}Nn19H
zQs$d%N*i|I5O|VGHw0`-=O5e4qI7)rYBr at lF`H7#swtF+18hpAvsbZII-Ct7I*C$=
z^+}dW=NG=g?xjsHv3qG>>H=2bZfwqA_tL#7lrrTjS(FZ*S;VH~d7VvZL+%unKCWS_
zbY~x1r5oNzCy!qk?@DOh4fjGt0wEc9!K56QYXc(jeO0K5YqW?k+5NttI(+9b$vpi2
zWKcub)-GRG_(0dgu#|qFts);7Rv3gk8*Re~505Vi^LzZWNR&nF{QkH5*u>0w0m>qF
z{Y8<CMQr0)iHb$+%gyr&S;W5mba^w2Sluq(d=m at yPMDmP#ftXtieb at g)}m!vO at PhK
zBM-4tSa?Ztf#mZG<H=LIN3+Ob=JD{j4+OdKTp+iVMep+uil)e$@^y4d6}qa+MYCA+
zHszHCQdrXcbAGp9C?w;{Ql-~oU-h!+9efI{DreCfIviNm&7ybbue^C>EcI6WP(F*@
zBwsX`MN$?$r$HlN(|b)dwKHp^XM0(Fa&QRU7Dae?JPoRyZU&}w-Fx@^Q`F-Z`Df*@
zihH7Ks(O}n*;6LsAK6ps9ZsJ at Z;n01;hv-(IUw>Malnk9$T$O}mx at Wnz=*<@&MRm(
z>YBnW#U$G3Lu*4dl52?c2N`yo#Fk2mRsDU+vt0_CCW(dx=0^ktgT{guK|#dV;&r1t
z#Au at _=ys#q-GX{w(2WP>kw>txR%}Egb!wzn#<6iTwp2EtysTDSR4e-ll})ope=49K
zHs+rc7)}KYO#*#nJiWdg-BE?+RRulpP6pkrL3cS?pN-bvO at gouWN?c(Sd$U-go2GB
z|Dv1Y*v%+*rxzR32r34<6r){=2>pJEPz<RR1AgTYA-d3N%pWx7hv at g7u&&*xYZmC+
z!%xGazwFZgO`z`%Oa5#Ry32wdcfebP`pSdejG(tOC_npx^d=(x(Tt6 at Vpj^KBRDn&
zS>V~qOMR;TBGp%F#U;P$QdBWIqP)RV*)rDYx{WJ30{VzKYa)^IV1p0!h1RXT-72LN
zP`#oWA{5uv%FBJqojesqSgjl<MC*44bdj*W+gJdC{L*h2HR_w;W4Pt>6v#B#0DQ#g
z_H5J>@^88#!^V2Ct7hyc&4RwKid3T`b1#l4F9QLST;CliI2oAV6xOvE^CMwHPhb9E
z7;-d3Oebr^D6pyl0n38&zn{j&%B4R<MdP^isz!Phm;P+VoHFc+27`}3MMd97u~9R2
zSF_;TF+$PrS6&>MJEm4%Q!5A5N~qv<^<y^tkp7ghphuA39>|ZBor3B^vfF{DDcIl+
z%Fld<-O at -$qS*I^((gNAl`v0G`#uV}YzaZDPzrm8p_KlnqWowWq$_|*MG6Nxl|x<1
zXp!o2pTd?^sli65`br_lRgF;{sLMcWO~Eal!B<K6?umAup(Si+59@}FE1;bspqRv@
zo>NSyfa=*kWvM^iwsxxqyM;?{MMbyF(%V!h>}phW4VR7#L$dD{2I{NOMsZMHI<F_J
zhdw-N)I~r;L4Iq%5D^$a%yt1a-;FqeJPKEj%7h!6CGupC)`Tzcx3smLE9sPFTMSFI
z#rs>;@9JbO<jbD`=c;pBax9HbqS{_*b6b6tR<}3DzSZgSSmPgSy&ij|mkN&@gLCO(
zxTdCfvZ2?}?Pcx|=u1hHKOEk5O>w$w*7YK46d^aJhaO#ar5yXt46P|0r5aTFof*5`
zP5RNsY<hBhL8v;_H)ZaQu<j&Je>70g9_DX at Ayix%QCz|mqXhMS-LL8=6vJJLL8jP5
zA5VH41OmNCEVY>tJ?SqxkoY1DQW#3EhYo}J9~9^*?&sh at 42MtzjTnvw^c^r at f`ZO~
z0fwzL%mY7xBM6)bk=eq&W*(o!GYnO@@U!7mMU#L>iF;v=@Y?c{Cd(!Y3 at -vl5I9i)
zzQ`bfs~G+=R}HqV6AcKZU^Ci!T}~_EvDPyBEr4UguJUnjad+ycy+u0N!{W~Qzj2#{
zSUj%9)!=L(JQi at kYa(u|Up7|!v|EP3;R|_K><2lcfMYldg;Wj<=Q?*R@<iMYE^#Mo
zcUX3Jg2}^pP<=X8J7E&_yCtR_#bdkG;{q4tm<-w>Z};KCQ&GAq2z|JMy8KMl<z;_)
ztTc3O<~|Ug`rS2Ao21!b$Kp at ahG-xf!5d$rjq4gzT&iFEJm?owxon(|jue8A4}TXP
zXchuze*4|WZt0##VOne-6?fCB;nY)9w at e?G!4`AJu6>ykmd>q&;FKH9Vc9dosh>QY
z at 1#C4 at 0Z=>GadV|KADW~m=PbV3B$!HyWO8dPsDUJVX&?Qu+<4*S5K?QWWL)Hh`Ta@
zxTXZKuOz^sWZJlsYG-&e!P=Eg8+)>DzfJ)AVgguu0$NlhfL)sahv&It*KbGwyK!1<
ztakKll)V+7|C#2L!M7!dZ%+_^U`BkbKm61ls&mN3xbDq5D|f7EJ*Rz~=3sh>J688>
z0 at yVPV2uyO*1$7rAw0LVm at Kmi`B0QWZX@|;MLM&S6y6h+xqYPQcc_otL5jm at DEcay
z at -^C1xRaFJLN9jyhQxM@@5=U&vP0qw|6Vffk7C`(8)W(y;<BQB<fGq+eO>#>j632G
zFGS93N$#n6n^YW0zIg5(Qq_}ucS#3{pH0p<aEQztP1gN*n9RDDT;_S7eB!MX-;c-0
z?Bgk+(htamy(v8(eMm04ka98Y55SXpZH7b33$U at L3EE$yxO?HCwZ%qQER0cGSHT$7
zbq>a`*y|a?W38UJMZ_WT at 43CMT07$gvyL&r<FdJFFL+`mcszB^$_cR5=cTRM8V_T*
zT at B2)9TuyP`7%*!chVrD(pAeCUV`z&?!<09;IE>QaDy at 64r9Oq^c{z5YwWs26SK6m
z5nC9pjKc#2=sVQ$r=l_T$r1NE?wi$w&soPvRc&|1O~PldzVD9Pl~((N&<>Z&!_c75
zT8!zqZ?i0Gy_J!GR at 33Aug(FwR*$_J{DdQ?(&dDoA%xv&wd7E`<=B>LXF!3N98e(S
z%qS`?4k#|Ps0z*;3UcQJ($0*crhx+r27?2NswxLqHNgc`$3asYOfI(s;K!>s$CkT{
z!^-q{=h&BWQIDQ~&RA+Ipa at Vzrf*LhY(N~S!VWewir!o3Wg(~Omt`iKTRwoL|9gOw
zP_X}SAnMw>M8gTjp)~X!#W9lv5`hhm+RkQX$?S|AaCRrhfb{-4bHMnnJu?)Z4xB*t
znNifkfdh)}m>D-`x*pV?KXXJ*+c<zY?5fNYoLZ7<o9PFIdV<ePj?>eFdRB7?727+e
z=fxa7VmW9vIq+|*-HYc|ROD$b*1Y9}Zi!W!SE0?z)mbX?a`Toh*Hl#6mQ`BwELuWa
aNz{2=i|gE5J%q)&_`#>D!$F^@5afSC6V8AD

literal 0
HcmV?d00001

diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll
index 3e4690ec3640f2..418a98873eaa11 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll
@@ -821,8 +821,8 @@ define float @test_pown_fast_f32_strictfp(float %x, i32 %y) #1 {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[__FABS:%.*]] = call fast float @llvm.fabs.f32(float [[X]]) #[[ATTR5:[0-9]+]]
 ; CHECK-NEXT:    [[__LOG2:%.*]] = call fast float @llvm.log2.f32(float [[__FABS]]) #[[ATTR5]]
-; CHECK-NEXT:    [[POWNI2F:%.*]] = call fast float @llvm.experimental.constrained.sitofp.f32.i32(i32 [[Y]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.round"(i32 7), "fpe.except"(i32 2) ]
-; CHECK-NEXT:    [[__YLOGX:%.*]] = call fast float @llvm.experimental.constrained.fmul.f32(float [[POWNI2F]], float [[__LOG2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.round"(i32 7), "fpe.except"(i32 2) ]
+; CHECK-NEXT:    [[POWNI2F:%.*]] = call fast float @llvm.experimental.constrained.sitofp.f32.i32(i32 [[Y]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+; CHECK-NEXT:    [[__YLOGX:%.*]] = call fast float @llvm.experimental.constrained.fmul.f32(float [[POWNI2F]], float [[__LOG2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    [[__EXP2:%.*]] = call fast float @llvm.exp2.f32(float [[__YLOGX]]) #[[ATTR5]]
 ; CHECK-NEXT:    [[__YEVEN:%.*]] = shl i32 [[Y]], 31
 ; CHECK-NEXT:    [[TMP0:%.*]] = bitcast float [[X]] to i32
diff --git a/llvm/test/Transforms/Attributor/nofpclass.ll b/llvm/test/Transforms/Attributor/nofpclass.ll
index b97454a29d5135..4cedbaf2a36a33 100644
--- a/llvm/test/Transforms/Attributor/nofpclass.ll
+++ b/llvm/test/Transforms/Attributor/nofpclass.ll
@@ -1937,7 +1937,7 @@ define float @constrained_sitofp(i32 %arg) strictfp {
 ; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind strictfp willreturn memory(inaccessiblemem: readwrite)
 ; CHECK-LABEL: define nofpclass(nan nzero sub) float @constrained_sitofp
 ; CHECK-SAME: (i32 [[ARG:%.*]]) #[[ATTR8:[0-9]+]] {
-; CHECK-NEXT:    [[VAL:%.*]] = call nofpclass(nan nzero sub) float @llvm.experimental.constrained.sitofp.f32.i32(i32 [[ARG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR20]]
+; CHECK-NEXT:    [[VAL:%.*]] = call nofpclass(nan nzero sub) float @llvm.experimental.constrained.sitofp.f32.i32(i32 [[ARG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR23:[0-9]+]]
 ; CHECK-NEXT:    ret float [[VAL]]
 ;
   %val = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %arg, metadata !"round.dynamic", metadata !"fpexcept.strict")
@@ -1948,7 +1948,7 @@ define float @constrained_uitofp(i32 %arg) strictfp {
 ; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind strictfp willreturn memory(inaccessiblemem: readwrite)
 ; CHECK-LABEL: define nofpclass(nan ninf nzero sub nnorm) float @constrained_uitofp
 ; CHECK-SAME: (i32 [[ARG:%.*]]) #[[ATTR8]] {
-; CHECK-NEXT:    [[VAL:%.*]] = call nofpclass(nan ninf nzero sub nnorm) float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[ARG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR20]]
+; CHECK-NEXT:    [[VAL:%.*]] = call nofpclass(nan ninf nzero sub nnorm) float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[ARG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR23]]
 ; CHECK-NEXT:    ret float [[VAL]]
 ;
   %val = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %arg, metadata !"round.dynamic", metadata !"fpexcept.strict")
diff --git a/llvm/test/Transforms/Inline/inline-strictfp.ll b/llvm/test/Transforms/Inline/inline-strictfp.ll
index bc42fafd63943d..5883002061c304 100644
--- a/llvm/test/Transforms/Inline/inline-strictfp.ll
+++ b/llvm/test/Transforms/Inline/inline-strictfp.ll
@@ -15,8 +15,8 @@ entry:
   %add = call float @llvm.experimental.constrained.fadd.f32(float %0, float 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
   ret float %add
 ; CHECK-LABEL: @host_02
-; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float {{.*}}, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
-; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float {{.*}}, metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0:[0-9]+]]
+; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]]
 }
 
 
@@ -34,8 +34,8 @@ entry:
   %add = call float @llvm.experimental.constrained.fadd.f32(float %0, float 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
   ret float %add
 ; CHECK-LABEL: @host_04
-; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float {{.*}}, metadata !"round.downward", metadata !"fpexcept.maytrap") #0
-; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float {{.*}}, metadata !"round.downward", metadata !"fpexcept.maytrap") #[[ATTR0]]
+; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]]
 }
 
 
@@ -77,8 +77,8 @@ entry:
   ret float %add
 ; CHECK-LABEL: @host_08
 ; CHECK: call float @func_ext(float {{.*}}) #0
-; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float {{.*}}, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
-; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float {{.*}}, metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
+; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]]
 }
 
 
@@ -97,8 +97,8 @@ entry:
   %add = call double @llvm.experimental.constrained.fadd.f64(double %0, double 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
   ret double %add
 ; CHECK-LABEL: @host_10
-; CHECK: call double @llvm.experimental.constrained.fpext.f64.f32(float {{.*}}, metadata !"fpexcept.ignore") #0
-; CHECK: call double @llvm.experimental.constrained.fadd.f64(double {{.*}}, double 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+; CHECK: call double @llvm.experimental.constrained.fpext.f64.f32(float {{.*}}, metadata !"fpexcept.ignore") #[[ATTR0]]
+; CHECK: call double @llvm.experimental.constrained.fadd.f64(double {{.*}}, double 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]]
 }
 
 ; fcmp does not depend on rounding mode and has metadata argument.
@@ -114,8 +114,8 @@ entry:
   %cmp = call i1 @inlined_11(float %a, float %b) #0
   ret i1 %cmp
 ; CHECK-LABEL: @host_12
-; CHECK: call float @llvm.experimental.constrained.fadd.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
-; CHECK: call i1 @llvm.experimental.constrained.fcmp.f32(float {{.*}}, metadata !"oeq", metadata !"fpexcept.ignore") #0
+; CHECK: call float @llvm.experimental.constrained.fadd.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK: call i1 @llvm.experimental.constrained.fcmp.f32(float {{.*}}, metadata !"oeq", metadata !"fpexcept.ignore") #[[ATTR0]]
 }
 
 ; Intrinsic 'ceil' has constrained variant.
@@ -131,11 +131,12 @@ entry:
   %add = call float @llvm.experimental.constrained.fadd.f32(float %0, float 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
   ret float %add
 ; CHECK-LABEL: @host_14
-; CHECK: call float @llvm.experimental.constrained.ceil.f32(float %a, metadata !"fpexcept.ignore") #0
-; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+; CHECK: call float @llvm.experimental.constrained.ceil.f32(float %a, metadata !"fpexcept.ignore") #[[ATTR0]]
+; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]]
 }
 
 attributes #0 = { strictfp }
+; CHECK: attributes #[[ATTR0]] = { strictfp memory(inaccessiblemem: readwrite) }
 
 declare float  @llvm.experimental.constrained.fadd.f32(float, float, metadata, metadata)
 declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata)
diff --git a/llvm/test/Verifier/fp-intrinsics.ll b/llvm/test/Verifier/fp-intrinsics.ll
index 4934843d5a2ed6..fd7b07abab93ff 100644
--- a/llvm/test/Verifier/fp-intrinsics.ll
+++ b/llvm/test/Verifier/fp-intrinsics.ll
@@ -5,7 +5,7 @@ declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadat
 
 ; Test an illegal value for the rounding mode argument.
 ; CHECK: invalid rounding mode argument
-; CHECK-NEXT:   %fadd = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.dynomic", metadata !"fpexcept.strict") #1
+; CHECK-NEXT:   %fadd = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.dynomic", metadata !"fpexcept.strict") #{{[0-9]+}}
 define double @f2(double %a, double %b) #0 {
 entry:
   %fadd = call double @llvm.experimental.constrained.fadd.f64(
@@ -17,7 +17,7 @@ entry:
 
 ; Test an illegal value for the exception behavior argument.
 ; CHECK-NEXT: invalid exception behavior argument
-; CHECK-NEXT:   %fadd = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.restrict") #1
+; CHECK-NEXT:   %fadd = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.restrict") #{{[0-9]+}}
 define double @f3(double %a, double %b) #0 {
 entry:
   %fadd = call double @llvm.experimental.constrained.fadd.f64(
@@ -29,7 +29,7 @@ entry:
 
 ; Test an illegal value for the rounding mode argument.
 ; CHECK-NEXT: invalid rounding mode argument
-; CHECK-NEXT:   %fadd = call double @llvm.experimental.constrained.sqrt.f64(double %a, metadata !"round.dynomic", metadata !"fpexcept.strict") #1
+; CHECK-NEXT:   %fadd = call double @llvm.experimental.constrained.sqrt.f64(double %a, metadata !"round.dynomic", metadata !"fpexcept.strict") #{{[0-9]+}}
 define double @f4(double %a) #0 {
 entry:
   %fadd = call double @llvm.experimental.constrained.sqrt.f64(
@@ -41,7 +41,7 @@ entry:
 
 ; Test an illegal value for the exception behavior argument.
 ; CHECK-NEXT: invalid exception behavior argument
-; CHECK-NEXT:   %fadd = call double @llvm.experimental.constrained.sqrt.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.restrict") #1
+; CHECK-NEXT:   %fadd = call double @llvm.experimental.constrained.sqrt.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.restrict") #{{[0-9]+}}
 define double @f5(double %a) #0 {
 entry:
   %fadd = call double @llvm.experimental.constrained.sqrt.f64(

>From da73eadf9bda8f8d34d28feea9d910b769e63f67 Mon Sep 17 00:00:00 2001
From: Serge Pavlov <sepavloff at gmail.com>
Date: Sun, 27 Oct 2024 17:05:47 +0700
Subject: [PATCH 3/6] Update tests in Transforms/EarlyCSE

---
 .../Transforms/EarlyCSE/defaultfp-strictfp.ll | 67 ++++++++++---------
 .../Transforms/EarlyCSE/ebstrict-strictfp.ll  | 23 ++++---
 .../Transforms/EarlyCSE/mixed-strictfp.ll     | 59 ++++++++--------
 .../Transforms/EarlyCSE/nonmixed-strictfp.ll  | 59 ++++++++--------
 .../Transforms/EarlyCSE/round-dyn-strictfp.ll | 43 ++++++------
 .../test/Transforms/EarlyCSE/tfpropagation.ll | 17 ++---
 6 files changed, 137 insertions(+), 131 deletions(-)

diff --git a/llvm/test/Transforms/EarlyCSE/defaultfp-strictfp.ll b/llvm/test/Transforms/EarlyCSE/defaultfp-strictfp.ll
index 3871822c9dc17a..90904ead23e04a 100644
--- a/llvm/test/Transforms/EarlyCSE/defaultfp-strictfp.ll
+++ b/llvm/test/Transforms/EarlyCSE/defaultfp-strictfp.ll
@@ -8,7 +8,7 @@
 define double @multiple_fadd(double %a, double %b) #0 {
 ; CHECK-LABEL: @multiple_fadd(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0:[0-9]+]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP1]]
 ;
   %1 = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -20,8 +20,8 @@ define double @multiple_fadd(double %a, double %b) #0 {
 define double @multiple_fadd_split(double %a, double %b) #0 {
 ; CHECK-LABEL: @multiple_fadd_split(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    call void @arbitraryfunc() #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    call void @arbitraryfunc() #0
+; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP1]]
 ;
   %1 = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -34,7 +34,7 @@ define double @multiple_fadd_split(double %a, double %b) #0 {
 define double @multiple_fsub(double %a, double %b) #0 {
 ; CHECK-LABEL: @multiple_fsub(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP1]]
 ;
   %1 = call double @llvm.experimental.constrained.fsub.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -46,8 +46,8 @@ define double @multiple_fsub(double %a, double %b) #0 {
 define double @multiple_fsub_split(double %a, double %b) #0 {
 ; CHECK-LABEL: @multiple_fsub_split(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    call void @arbitraryfunc() #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    call void @arbitraryfunc() #0
+; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP1]]
 ;
   %1 = call double @llvm.experimental.constrained.fsub.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -60,7 +60,7 @@ define double @multiple_fsub_split(double %a, double %b) #0 {
 define double @multiple_fmul(double %a, double %b) #0 {
 ; CHECK-LABEL: @multiple_fmul(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP1]]
 ;
   %1 = call double @llvm.experimental.constrained.fmul.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -72,8 +72,8 @@ define double @multiple_fmul(double %a, double %b) #0 {
 define double @multiple_fmul_split(double %a, double %b) #0 {
 ; CHECK-LABEL: @multiple_fmul_split(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    call void @arbitraryfunc() #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    call void @arbitraryfunc() #0
+; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP1]]
 ;
   %1 = call double @llvm.experimental.constrained.fmul.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -86,7 +86,7 @@ define double @multiple_fmul_split(double %a, double %b) #0 {
 define double @multiple_fdiv(double %a, double %b) #0 {
 ; CHECK-LABEL: @multiple_fdiv(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP1]]
 ;
   %1 = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -98,8 +98,8 @@ define double @multiple_fdiv(double %a, double %b) #0 {
 define double @multiple_fdiv_split(double %a, double %b) #0 {
 ; CHECK-LABEL: @multiple_fdiv_split(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    call void @arbitraryfunc() #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    call void @arbitraryfunc() #0
+; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP1]]
 ;
   %1 = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -112,7 +112,7 @@ define double @multiple_fdiv_split(double %a, double %b) #0 {
 define double @multiple_frem(double %a, double %b) #0 {
 ; CHECK-LABEL: @multiple_frem(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP1]]
 ;
   %1 = call double @llvm.experimental.constrained.frem.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -124,8 +124,8 @@ define double @multiple_frem(double %a, double %b) #0 {
 define double @multiple_frem_split(double %a, double %b) #0 {
 ; CHECK-LABEL: @multiple_frem_split(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    call void @arbitraryfunc() #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    call void @arbitraryfunc() #0
+; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP1]]
 ;
   %1 = call double @llvm.experimental.constrained.frem.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -138,7 +138,7 @@ define double @multiple_frem_split(double %a, double %b) #0 {
 define i32 @multiple_fptoui(double %a) #0 {
 ; CHECK-LABEL: @multiple_fptoui(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double [[A:%.*]], metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0
 ; CHECK-NEXT:    ret i32 [[TMP1]]
 ;
   %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %a, metadata !"fpexcept.ignore") #0
@@ -150,8 +150,8 @@ define i32 @multiple_fptoui(double %a) #0 {
 define i32 @multiple_fptoui_split(double %a, double %b) #0 {
 ; CHECK-LABEL: @multiple_fptoui_split(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double [[A:%.*]], metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    call void @arbitraryfunc() #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    call void @arbitraryfunc() #0
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0
 ; CHECK-NEXT:    ret i32 [[TMP1]]
 ;
   %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %a, metadata !"fpexcept.ignore") #0
@@ -164,7 +164,7 @@ define i32 @multiple_fptoui_split(double %a, double %b) #0 {
 define double @multiple_uitofp(i32 %a) #0 {
 ; CHECK-LABEL: @multiple_uitofp(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP1]]
 ;
   %1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -176,8 +176,8 @@ define double @multiple_uitofp(i32 %a) #0 {
 define double @multiple_uitofp_split(i32 %a) #0 {
 ; CHECK-LABEL: @multiple_uitofp_split(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    call void @arbitraryfunc() #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    call void @arbitraryfunc() #0
+; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP1]]
 ;
   %1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -190,7 +190,7 @@ define double @multiple_uitofp_split(i32 %a) #0 {
 define i32 @multiple_fptosi(double %a) #0 {
 ; CHECK-LABEL: @multiple_fptosi(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double [[A:%.*]], metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0
 ; CHECK-NEXT:    ret i32 [[TMP1]]
 ;
   %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %a, metadata !"fpexcept.ignore") #0
@@ -202,8 +202,8 @@ define i32 @multiple_fptosi(double %a) #0 {
 define i32 @multiple_fptosi_split(double %a, double %b) #0 {
 ; CHECK-LABEL: @multiple_fptosi_split(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double [[A:%.*]], metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    call void @arbitraryfunc() #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    call void @arbitraryfunc() #0
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0
 ; CHECK-NEXT:    ret i32 [[TMP1]]
 ;
   %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %a, metadata !"fpexcept.ignore") #0
@@ -216,7 +216,7 @@ define i32 @multiple_fptosi_split(double %a, double %b) #0 {
 define double @multiple_sitofp(i32 %a) #0 {
 ; CHECK-LABEL: @multiple_sitofp(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP1]]
 ;
   %1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -228,8 +228,8 @@ define double @multiple_sitofp(i32 %a) #0 {
 define double @multiple_sitofp_split(i32 %a) #0 {
 ; CHECK-LABEL: @multiple_sitofp_split(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    call void @arbitraryfunc() #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    call void @arbitraryfunc() #0
+; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP1]]
 ;
   %1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -243,7 +243,7 @@ define i1 @multiple_fcmp(double %a, double %b) #0 {
 ; CHECK-LABEL: @multiple_fcmp(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[A:%.*]], double [[B:%.*]], metadata !"oeq", metadata !"fpexcept.ignore") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
-; CHECK-NEXT:    [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #0
 ; CHECK-NEXT:    ret i1 [[TMP1]]
 ;
   %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
@@ -257,9 +257,9 @@ define i1 @multiple_fcmp(double %a, double %b) #0 {
 define i1 @multiple_fcmp_split(double %a, double %b) #0 {
 ; CHECK-LABEL: @multiple_fcmp_split(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[A:%.*]], double [[B:%.*]], metadata !"oeq", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    call void @arbitraryfunc() #[[ATTR0]]
+; CHECK-NEXT:    call void @arbitraryfunc() #0
 ; CHECK-NEXT:    [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
-; CHECK-NEXT:    [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #0
 ; CHECK-NEXT:    ret i1 [[TMP1]]
 ;
   %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
@@ -275,7 +275,7 @@ define i1 @multiple_fcmps(double %a, double %b) #0 {
 ; CHECK-LABEL: @multiple_fcmps(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double [[A:%.*]], double [[B:%.*]], metadata !"oeq", metadata !"fpexcept.ignore") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
-; CHECK-NEXT:    [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #0
 ; CHECK-NEXT:    ret i1 [[TMP1]]
 ;
   %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
@@ -289,9 +289,9 @@ define i1 @multiple_fcmps(double %a, double %b) #0 {
 define i1 @multiple_fcmps_split(double %a, double %b) #0 {
 ; CHECK-LABEL: @multiple_fcmps_split(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double [[A:%.*]], double [[B:%.*]], metadata !"oeq", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    call void @arbitraryfunc() #[[ATTR0]]
+; CHECK-NEXT:    call void @arbitraryfunc() #0
 ; CHECK-NEXT:    [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
-; CHECK-NEXT:    [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #0
 ; CHECK-NEXT:    ret i1 [[TMP1]]
 ;
   %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
@@ -304,6 +304,7 @@ define i1 @multiple_fcmps_split(double %a, double %b) #0 {
 }
 
 attributes #0 = { strictfp }
+; CHECK: attributes #[[ATTR0]] = { strictfp memory(inaccessiblemem: readwrite) }
 
 declare void @arbitraryfunc() #0
 declare double @foo.f64(double, double) #0
diff --git a/llvm/test/Transforms/EarlyCSE/ebstrict-strictfp.ll b/llvm/test/Transforms/EarlyCSE/ebstrict-strictfp.ll
index f2675ce7816a4e..6183b7c1558a51 100644
--- a/llvm/test/Transforms/EarlyCSE/ebstrict-strictfp.ll
+++ b/llvm/test/Transforms/EarlyCSE/ebstrict-strictfp.ll
@@ -10,7 +10,7 @@ define double @fadd_strict(double %a, double %b) #0 {
 ; CHECK-LABEL: @fadd_strict(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0:[0-9]+]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
@@ -23,7 +23,7 @@ define double @fsub_strict(double %a, double %b) #0 {
 ; CHECK-LABEL: @fsub_strict(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.fsub.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
@@ -36,7 +36,7 @@ define double @fmul_strict(double %a, double %b) #0 {
 ; CHECK-LABEL: @fmul_strict(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.fmul.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
@@ -49,7 +49,7 @@ define double @fdiv_strict(double %a, double %b) #0 {
 ; CHECK-LABEL: @fdiv_strict(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
@@ -62,7 +62,7 @@ define double @frem_strict(double %a, double %b) #0 {
 ; CHECK-LABEL: @frem_strict(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.frem.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
@@ -75,7 +75,7 @@ define i32 @fptoui_strict(double %a) #0 {
 ; CHECK-LABEL: @fptoui_strict(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double [[A:%.*]], metadata !"fpexcept.strict") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double [[A]], metadata !"fpexcept.strict") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0
 ; CHECK-NEXT:    ret i32 [[TMP2]]
 ;
   %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %a, metadata !"fpexcept.strict") #0
@@ -88,7 +88,7 @@ define double @uitofp_strict(i32 %a) #0 {
 ; CHECK-LABEL: @uitofp_strict(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
@@ -101,7 +101,7 @@ define i32 @fptosi_strict(double %a) #0 {
 ; CHECK-LABEL: @fptosi_strict(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double [[A:%.*]], metadata !"fpexcept.strict") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double [[A]], metadata !"fpexcept.strict") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0
 ; CHECK-NEXT:    ret i32 [[TMP2]]
 ;
   %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %a, metadata !"fpexcept.strict") #0
@@ -114,7 +114,7 @@ define double @sitofp_strict(i32 %a) #0 {
 ; CHECK-LABEL: @sitofp_strict(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
@@ -129,7 +129,7 @@ define i1 @fcmp_strict(double %a, double %b) #0 {
 ; CHECK-NEXT:    [[TMP2:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[A]], double [[B]], metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP3:%.*]] = zext i1 [[TMP1]] to i32
 ; CHECK-NEXT:    [[TMP4:%.*]] = zext i1 [[TMP2]] to i32
-; CHECK-NEXT:    [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #0
 ; CHECK-NEXT:    ret i1 [[TMP2]]
 ;
   %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict") #0
@@ -146,7 +146,7 @@ define i1 @fcmps_strict(double %a, double %b) #0 {
 ; CHECK-NEXT:    [[TMP2:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double [[A]], double [[B]], metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP3:%.*]] = zext i1 [[TMP1]] to i32
 ; CHECK-NEXT:    [[TMP4:%.*]] = zext i1 [[TMP2]] to i32
-; CHECK-NEXT:    [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #0
 ; CHECK-NEXT:    ret i1 [[TMP2]]
 ;
   %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict") #0
@@ -158,6 +158,7 @@ define i1 @fcmps_strict(double %a, double %b) #0 {
 }
 
 attributes #0 = { strictfp }
+; CHECK: attributes #[[ATTR0]] = { strictfp memory(inaccessiblemem: readwrite) }
 
 declare void @arbitraryfunc() #0
 declare double @foo.f64(double, double) #0
diff --git a/llvm/test/Transforms/EarlyCSE/mixed-strictfp.ll b/llvm/test/Transforms/EarlyCSE/mixed-strictfp.ll
index b79f7018b8d0d5..61e976ce428173 100644
--- a/llvm/test/Transforms/EarlyCSE/mixed-strictfp.ll
+++ b/llvm/test/Transforms/EarlyCSE/mixed-strictfp.ll
@@ -10,7 +10,7 @@ define double @mixed_fadd_neginf(double %a, double %b) #0 {
 ; CHECK-LABEL: @mixed_fadd_neginf(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0:[0-9]+]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A]], double [[B]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -23,7 +23,7 @@ define double @mixed_fadd_maytrap(double %a, double %b) #0 {
 ; CHECK-LABEL: @mixed_fadd_maytrap(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -36,7 +36,7 @@ define double @mixed_fadd_strict(double %a, double %b) #0 {
 ; CHECK-LABEL: @mixed_fadd_strict(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -49,7 +49,7 @@ define double @mixed_fsub_neginf(double %a, double %b) #0 {
 ; CHECK-LABEL: @mixed_fsub_neginf(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A]], double [[B]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.fsub.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -62,7 +62,7 @@ define double @mixed_fsub_maytrap(double %a, double %b) #0 {
 ; CHECK-LABEL: @mixed_fsub_maytrap(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.fsub.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -75,7 +75,7 @@ define double @mixed_fsub_strict(double %a, double %b) #0 {
 ; CHECK-LABEL: @mixed_fsub_strict(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.fsub.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -88,7 +88,7 @@ define double @mixed_fmul_neginf(double %a, double %b) #0 {
 ; CHECK-LABEL: @mixed_fmul_neginf(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A]], double [[B]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.fmul.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -100,7 +100,7 @@ define double @mixed_fmul_maytrap(double %a, double %b) #0 {
 ; CHECK-LABEL: @mixed_fmul_maytrap(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.fmul.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -113,7 +113,7 @@ define double @mixed_fmul_strict(double %a, double %b) #0 {
 ; CHECK-LABEL: @mixed_fmul_strict(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.fmul.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -126,7 +126,7 @@ define double @mixed_fdiv_neginf(double %a, double %b) #0 {
 ; CHECK-LABEL: @mixed_fdiv_neginf(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A]], double [[B]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -139,7 +139,7 @@ define double @mixed_fdiv_maytrap(double %a, double %b) #0 {
 ; CHECK-LABEL: @mixed_fdiv_maytrap(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -152,7 +152,7 @@ define double @mixed_fdiv_strict(double %a, double %b) #0 {
 ; CHECK-LABEL: @mixed_fdiv_strict(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -165,7 +165,7 @@ define double @mixed_frem_neginf(double %a, double %b) #0 {
 ; CHECK-LABEL: @mixed_frem_neginf(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A]], double [[B]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.frem.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -178,7 +178,7 @@ define double @mixed_frem_maytrap(double %a, double %b) #0 {
 ; CHECK-LABEL: @mixed_frem_maytrap(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.frem.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -191,7 +191,7 @@ define double @mixed_frem_strict(double %a, double %b) #0 {
 ; CHECK-LABEL: @mixed_frem_strict(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.frem.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -204,7 +204,7 @@ define i32 @mixed_fptoui_maytrap(double %a) #0 {
 ; CHECK-LABEL: @mixed_fptoui_maytrap(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double [[A:%.*]], metadata !"fpexcept.ignore") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double [[A]], metadata !"fpexcept.maytrap") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0
 ; CHECK-NEXT:    ret i32 [[TMP2]]
 ;
   %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %a, metadata !"fpexcept.ignore") #0
@@ -217,7 +217,7 @@ define i32 @mixed_fptoui_strict(double %a) #0 {
 ; CHECK-LABEL: @mixed_fptoui_strict(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double [[A:%.*]], metadata !"fpexcept.ignore") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double [[A]], metadata !"fpexcept.strict") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0
 ; CHECK-NEXT:    ret i32 [[TMP2]]
 ;
   %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %a, metadata !"fpexcept.ignore") #0
@@ -230,7 +230,7 @@ define double @mixed_uitofp_neginf(i32 %a) #0 {
 ; CHECK-LABEL: @mixed_uitofp_neginf(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -243,7 +243,7 @@ define double @mixed_uitofp_maytrap(i32 %a) #0 {
 ; CHECK-LABEL: @mixed_uitofp_maytrap(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -256,7 +256,7 @@ define double @mixed_uitofp_strict(i32 %a) #0 {
 ; CHECK-LABEL: @mixed_uitofp_strict(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -269,7 +269,7 @@ define i32 @mixed_fptosi_maytrap(double %a) #0 {
 ; CHECK-LABEL: @mixed_fptosi_maytrap(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double [[A:%.*]], metadata !"fpexcept.ignore") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double [[A]], metadata !"fpexcept.maytrap") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0
 ; CHECK-NEXT:    ret i32 [[TMP2]]
 ;
   %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %a, metadata !"fpexcept.ignore") #0
@@ -282,7 +282,7 @@ define i32 @mixed_fptosi_strict(double %a) #0 {
 ; CHECK-LABEL: @mixed_fptosi_strict(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double [[A:%.*]], metadata !"fpexcept.ignore") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double [[A]], metadata !"fpexcept.strict") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0
 ; CHECK-NEXT:    ret i32 [[TMP2]]
 ;
   %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %a, metadata !"fpexcept.ignore") #0
@@ -295,7 +295,7 @@ define double @mixed_sitofp_neginf(i32 %a) #0 {
 ; CHECK-LABEL: @mixed_sitofp_neginf(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -308,7 +308,7 @@ define double @mixed_sitofp_maytrap(i32 %a) #0 {
 ; CHECK-LABEL: @mixed_sitofp_maytrap(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -321,7 +321,7 @@ define double @mixed_sitofp_strict(i32 %a) #0 {
 ; CHECK-LABEL: @mixed_sitofp_strict(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -336,7 +336,7 @@ define i1 @mixed_fcmp_maytrap(double %a, double %b) #0 {
 ; CHECK-NEXT:    [[TMP2:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[A]], double [[B]], metadata !"oeq", metadata !"fpexcept.maytrap") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP3:%.*]] = zext i1 [[TMP1]] to i32
 ; CHECK-NEXT:    [[TMP4:%.*]] = zext i1 [[TMP2]] to i32
-; CHECK-NEXT:    [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #0
 ; CHECK-NEXT:    ret i1 [[TMP2]]
 ;
   %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
@@ -353,7 +353,7 @@ define i1 @mixed_fcmp_strict(double %a, double %b) #0 {
 ; CHECK-NEXT:    [[TMP2:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[A]], double [[B]], metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP3:%.*]] = zext i1 [[TMP1]] to i32
 ; CHECK-NEXT:    [[TMP4:%.*]] = zext i1 [[TMP2]] to i32
-; CHECK-NEXT:    [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #0
 ; CHECK-NEXT:    ret i1 [[TMP2]]
 ;
   %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
@@ -370,7 +370,7 @@ define i1 @mixed_fcmps_maytrap(double %a, double %b) #0 {
 ; CHECK-NEXT:    [[TMP2:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double [[A]], double [[B]], metadata !"oeq", metadata !"fpexcept.maytrap") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP3:%.*]] = zext i1 [[TMP1]] to i32
 ; CHECK-NEXT:    [[TMP4:%.*]] = zext i1 [[TMP2]] to i32
-; CHECK-NEXT:    [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #0
 ; CHECK-NEXT:    ret i1 [[TMP2]]
 ;
   %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
@@ -387,7 +387,7 @@ define i1 @mixed_fcmps_strict(double %a, double %b) #0 {
 ; CHECK-NEXT:    [[TMP2:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double [[A]], double [[B]], metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP3:%.*]] = zext i1 [[TMP1]] to i32
 ; CHECK-NEXT:    [[TMP4:%.*]] = zext i1 [[TMP2]] to i32
-; CHECK-NEXT:    [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #0
 ; CHECK-NEXT:    ret i1 [[TMP2]]
 ;
   %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
@@ -399,6 +399,7 @@ define i1 @mixed_fcmps_strict(double %a, double %b) #0 {
 }
 
 attributes #0 = { strictfp }
+; CHECK: attributes #[[ATTR0]] = { strictfp memory(inaccessiblemem: readwrite) }
 
 declare void @arbitraryfunc() #0
 declare double @foo.f64(double, double) #0
diff --git a/llvm/test/Transforms/EarlyCSE/nonmixed-strictfp.ll b/llvm/test/Transforms/EarlyCSE/nonmixed-strictfp.ll
index 3acf5597dfc3fe..1ce2fdd3f75de0 100644
--- a/llvm/test/Transforms/EarlyCSE/nonmixed-strictfp.ll
+++ b/llvm/test/Transforms/EarlyCSE/nonmixed-strictfp.ll
@@ -10,7 +10,7 @@
 define double @fadd_defaultenv(double %a, double %b) #0 {
 ; CHECK-LABEL: @fadd_defaultenv(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0:[0-9]+]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP1]]
 ;
   %1 = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -22,7 +22,7 @@ define double @fadd_defaultenv(double %a, double %b) #0 {
 define double @fadd_neginf(double %a, double %b) #0 {
 ; CHECK-LABEL: @fadd_neginf(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP1]]
 ;
   %1 = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.downward", metadata !"fpexcept.ignore") #0
@@ -34,7 +34,7 @@ define double @fadd_neginf(double %a, double %b) #0 {
 define double @fadd_maytrap(double %a, double %b) #0 {
 ; CHECK-LABEL: @fadd_maytrap(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP1]]
 ;
   %1 = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0
@@ -46,7 +46,7 @@ define double @fadd_maytrap(double %a, double %b) #0 {
 define double @fsub_defaultenv(double %a, double %b) #0 {
 ; CHECK-LABEL: @fsub_defaultenv(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP1]]
 ;
   %1 = call double @llvm.experimental.constrained.fsub.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -58,7 +58,7 @@ define double @fsub_defaultenv(double %a, double %b) #0 {
 define double @fsub_neginf(double %a, double %b) #0 {
 ; CHECK-LABEL: @fsub_neginf(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP1]]
 ;
   %1 = call double @llvm.experimental.constrained.fsub.f64(double %a, double %b, metadata !"round.downward", metadata !"fpexcept.ignore") #0
@@ -70,7 +70,7 @@ define double @fsub_neginf(double %a, double %b) #0 {
 define double @fsub_maytrap(double %a, double %b) #0 {
 ; CHECK-LABEL: @fsub_maytrap(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP1]]
 ;
   %1 = call double @llvm.experimental.constrained.fsub.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0
@@ -82,7 +82,7 @@ define double @fsub_maytrap(double %a, double %b) #0 {
 define double @fmul_defaultenv(double %a, double %b) #0 {
 ; CHECK-LABEL: @fmul_defaultenv(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP1]]
 ;
   %1 = call double @llvm.experimental.constrained.fmul.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -94,7 +94,7 @@ define double @fmul_defaultenv(double %a, double %b) #0 {
 define double @fmul_neginf(double %a, double %b) #0 {
 ; CHECK-LABEL: @fmul_neginf(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP1]]
 ;
   %1 = call double @llvm.experimental.constrained.fmul.f64(double %a, double %b, metadata !"round.downward", metadata !"fpexcept.ignore") #0
@@ -105,7 +105,7 @@ define double @fmul_neginf(double %a, double %b) #0 {
 define double @fmul_maytrap(double %a, double %b) #0 {
 ; CHECK-LABEL: @fmul_maytrap(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP1]]
 ;
   %1 = call double @llvm.experimental.constrained.fmul.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0
@@ -117,7 +117,7 @@ define double @fmul_maytrap(double %a, double %b) #0 {
 define double @fdiv_defaultenv(double %a, double %b) #0 {
 ; CHECK-LABEL: @fdiv_defaultenv(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP1]]
 ;
   %1 = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -129,7 +129,7 @@ define double @fdiv_defaultenv(double %a, double %b) #0 {
 define double @fdiv_neginf(double %a, double %b) #0 {
 ; CHECK-LABEL: @fdiv_neginf(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP1]]
 ;
   %1 = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b, metadata !"round.downward", metadata !"fpexcept.ignore") #0
@@ -141,7 +141,7 @@ define double @fdiv_neginf(double %a, double %b) #0 {
 define double @fdiv_maytrap(double %a, double %b) #0 {
 ; CHECK-LABEL: @fdiv_maytrap(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP1]]
 ;
   %1 = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0
@@ -153,7 +153,7 @@ define double @fdiv_maytrap(double %a, double %b) #0 {
 define double @frem_defaultenv(double %a, double %b) #0 {
 ; CHECK-LABEL: @frem_defaultenv(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP1]]
 ;
   %1 = call double @llvm.experimental.constrained.frem.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -165,7 +165,7 @@ define double @frem_defaultenv(double %a, double %b) #0 {
 define double @frem_neginf(double %a, double %b) #0 {
 ; CHECK-LABEL: @frem_neginf(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP1]]
 ;
   %1 = call double @llvm.experimental.constrained.frem.f64(double %a, double %b, metadata !"round.downward", metadata !"fpexcept.ignore") #0
@@ -177,7 +177,7 @@ define double @frem_neginf(double %a, double %b) #0 {
 define double @frem_maytrap(double %a, double %b) #0 {
 ; CHECK-LABEL: @frem_maytrap(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP1]]
 ;
   %1 = call double @llvm.experimental.constrained.frem.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0
@@ -189,7 +189,7 @@ define double @frem_maytrap(double %a, double %b) #0 {
 define i32 @fptoui_defaultenv(double %a) #0 {
 ; CHECK-LABEL: @fptoui_defaultenv(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double [[A:%.*]], metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0
 ; CHECK-NEXT:    ret i32 [[TMP1]]
 ;
   %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %a, metadata !"fpexcept.ignore") #0
@@ -201,7 +201,7 @@ define i32 @fptoui_defaultenv(double %a) #0 {
 define i32 @fptoui_maytrap(double %a) #0 {
 ; CHECK-LABEL: @fptoui_maytrap(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double [[A:%.*]], metadata !"fpexcept.maytrap") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0
 ; CHECK-NEXT:    ret i32 [[TMP1]]
 ;
   %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %a, metadata !"fpexcept.maytrap") #0
@@ -213,7 +213,7 @@ define i32 @fptoui_maytrap(double %a) #0 {
 define double @uitofp_defaultenv(i32 %a) #0 {
 ; CHECK-LABEL: @uitofp_defaultenv(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP1]]
 ;
   %1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -225,7 +225,7 @@ define double @uitofp_defaultenv(i32 %a) #0 {
 define double @uitofp_neginf(i32 %a) #0 {
 ; CHECK-LABEL: @uitofp_neginf(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A:%.*]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP1]]
 ;
   %1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.downward", metadata !"fpexcept.ignore") #0
@@ -237,7 +237,7 @@ define double @uitofp_neginf(i32 %a) #0 {
 define double @uitofp_maytrap(i32 %a) #0 {
 ; CHECK-LABEL: @uitofp_maytrap(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP1]]
 ;
   %1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0
@@ -249,7 +249,7 @@ define double @uitofp_maytrap(i32 %a) #0 {
 define i32 @fptosi_defaultenv(double %a) #0 {
 ; CHECK-LABEL: @fptosi_defaultenv(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double [[A:%.*]], metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0
 ; CHECK-NEXT:    ret i32 [[TMP1]]
 ;
   %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %a, metadata !"fpexcept.ignore") #0
@@ -261,7 +261,7 @@ define i32 @fptosi_defaultenv(double %a) #0 {
 define i32 @fptosi_maytrap(double %a) #0 {
 ; CHECK-LABEL: @fptosi_maytrap(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double [[A:%.*]], metadata !"fpexcept.maytrap") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0
 ; CHECK-NEXT:    ret i32 [[TMP1]]
 ;
   %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %a, metadata !"fpexcept.maytrap") #0
@@ -273,7 +273,7 @@ define i32 @fptosi_maytrap(double %a) #0 {
 define double @sitofp_defaultenv(i32 %a) #0 {
 ; CHECK-LABEL: @sitofp_defaultenv(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP1]]
 ;
   %1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -285,7 +285,7 @@ define double @sitofp_defaultenv(i32 %a) #0 {
 define double @sitofp_neginf(i32 %a) #0 {
 ; CHECK-LABEL: @sitofp_neginf(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A:%.*]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP1]]
 ;
   %1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.downward", metadata !"fpexcept.ignore") #0
@@ -297,7 +297,7 @@ define double @sitofp_neginf(i32 %a) #0 {
 define double @sitofp_maytrap(i32 %a) #0 {
 ; CHECK-LABEL: @sitofp_maytrap(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP1]]
 ;
   %1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0
@@ -310,7 +310,7 @@ define i1 @fcmp_defaultenv(double %a, double %b) #0 {
 ; CHECK-LABEL: @fcmp_defaultenv(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[A:%.*]], double [[B:%.*]], metadata !"oeq", metadata !"fpexcept.ignore") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
-; CHECK-NEXT:    [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #0
 ; CHECK-NEXT:    ret i1 [[TMP1]]
 ;
   %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
@@ -325,7 +325,7 @@ define i1 @fcmp_maytrap(double %a, double %b) #0 {
 ; CHECK-LABEL: @fcmp_maytrap(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[A:%.*]], double [[B:%.*]], metadata !"oeq", metadata !"fpexcept.maytrap") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
-; CHECK-NEXT:    [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #0
 ; CHECK-NEXT:    ret i1 [[TMP1]]
 ;
   %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.maytrap") #0
@@ -340,7 +340,7 @@ define i1 @fcmps_defaultenv(double %a, double %b) #0 {
 ; CHECK-LABEL: @fcmps_defaultenv(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double [[A:%.*]], double [[B:%.*]], metadata !"oeq", metadata !"fpexcept.ignore") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
-; CHECK-NEXT:    [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #0
 ; CHECK-NEXT:    ret i1 [[TMP1]]
 ;
   %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
@@ -355,7 +355,7 @@ define i1 @fcmps_maytrap(double %a, double %b) #0 {
 ; CHECK-LABEL: @fcmps_maytrap(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double [[A:%.*]], double [[B:%.*]], metadata !"oeq", metadata !"fpexcept.maytrap") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
-; CHECK-NEXT:    [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #0
 ; CHECK-NEXT:    ret i1 [[TMP1]]
 ;
   %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.maytrap") #0
@@ -367,6 +367,7 @@ define i1 @fcmps_maytrap(double %a, double %b) #0 {
 }
 
 attributes #0 = { strictfp }
+; CHECK: attributes #[[ATTR0]] = { strictfp memory(inaccessiblemem: readwrite) }
 
 declare void @arbitraryfunc() #0
 declare double @foo.f64(double, double) #0
diff --git a/llvm/test/Transforms/EarlyCSE/round-dyn-strictfp.ll b/llvm/test/Transforms/EarlyCSE/round-dyn-strictfp.ll
index c33e022f53be29..b2cebfeb586e3d 100644
--- a/llvm/test/Transforms/EarlyCSE/round-dyn-strictfp.ll
+++ b/llvm/test/Transforms/EarlyCSE/round-dyn-strictfp.ll
@@ -11,7 +11,7 @@ define double @multiple_fadd(double %a, double %b) #0 {
 ; CHECK-LABEL: @multiple_fadd(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0:[0-9]+]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A]], double [[B]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
@@ -23,9 +23,9 @@ define double @multiple_fadd(double %a, double %b) #0 {
 define double @multiple_fadd_split(double %a, double %b) #0 {
 ; CHECK-LABEL: @multiple_fadd_split(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    call void @arbitraryfunc() #[[ATTR0]]
+; CHECK-NEXT:    call void @arbitraryfunc() #0
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A]], double [[B]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
@@ -39,7 +39,7 @@ define double @multiple_fsub(double %a, double %b) #0 {
 ; CHECK-LABEL: @multiple_fsub(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A]], double [[B]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.fsub.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
@@ -51,9 +51,9 @@ define double @multiple_fsub(double %a, double %b) #0 {
 define double @multiple_fsub_split(double %a, double %b) #0 {
 ; CHECK-LABEL: @multiple_fsub_split(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    call void @arbitraryfunc() #[[ATTR0]]
+; CHECK-NEXT:    call void @arbitraryfunc() #0
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A]], double [[B]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.fsub.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
@@ -67,7 +67,7 @@ define double @multiple_fmul(double %a, double %b) #0 {
 ; CHECK-LABEL: @multiple_fmul(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A]], double [[B]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.fmul.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
@@ -79,9 +79,9 @@ define double @multiple_fmul(double %a, double %b) #0 {
 define double @multiple_fmul_split(double %a, double %b) #0 {
 ; CHECK-LABEL: @multiple_fmul_split(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    call void @arbitraryfunc() #[[ATTR0]]
+; CHECK-NEXT:    call void @arbitraryfunc() #0
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A]], double [[B]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.fmul.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
@@ -95,7 +95,7 @@ define double @multiple_fdiv(double %a, double %b) #0 {
 ; CHECK-LABEL: @multiple_fdiv(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A]], double [[B]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
@@ -107,9 +107,9 @@ define double @multiple_fdiv(double %a, double %b) #0 {
 define double @multiple_fdiv_split(double %a, double %b) #0 {
 ; CHECK-LABEL: @multiple_fdiv_split(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    call void @arbitraryfunc() #[[ATTR0]]
+; CHECK-NEXT:    call void @arbitraryfunc() #0
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A]], double [[B]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
@@ -123,7 +123,7 @@ define double @multiple_frem(double %a, double %b) #0 {
 ; CHECK-LABEL: @multiple_frem(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A]], double [[B]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.frem.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
@@ -135,9 +135,9 @@ define double @multiple_frem(double %a, double %b) #0 {
 define double @multiple_frem_split(double %a, double %b) #0 {
 ; CHECK-LABEL: @multiple_frem_split(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    call void @arbitraryfunc() #[[ATTR0]]
+; CHECK-NEXT:    call void @arbitraryfunc() #0
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A]], double [[B]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.frem.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
@@ -151,7 +151,7 @@ define double @multiple_uitofp(i32 %a) #0 {
 ; CHECK-LABEL: @multiple_uitofp(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
@@ -163,9 +163,9 @@ define double @multiple_uitofp(i32 %a) #0 {
 define double @multiple_uitofp_split(i32 %a) #0 {
 ; CHECK-LABEL: @multiple_uitofp_split(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    call void @arbitraryfunc() #[[ATTR0]]
+; CHECK-NEXT:    call void @arbitraryfunc() #0
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
@@ -179,7 +179,7 @@ define double @multiple_sitofp(i32 %a) #0 {
 ; CHECK-LABEL: @multiple_sitofp(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
@@ -191,9 +191,9 @@ define double @multiple_sitofp(i32 %a) #0 {
 define double @multiple_sitofp_split(i32 %a) #0 {
 ; CHECK-LABEL: @multiple_sitofp_split(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    call void @arbitraryfunc() #[[ATTR0]]
+; CHECK-NEXT:    call void @arbitraryfunc() #0
 ; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
 ; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
@@ -204,6 +204,7 @@ define double @multiple_sitofp_split(i32 %a) #0 {
 }
 
 attributes #0 = { strictfp }
+; CHECK: attributes #[[ATTR0]] = { strictfp memory(inaccessiblemem: readwrite) }
 
 declare void @arbitraryfunc() #0
 declare double @foo.f64(double, double) #0
diff --git a/llvm/test/Transforms/EarlyCSE/tfpropagation.ll b/llvm/test/Transforms/EarlyCSE/tfpropagation.ll
index d07c9627f9b52f..53127bf5f3aec7 100644
--- a/llvm/test/Transforms/EarlyCSE/tfpropagation.ll
+++ b/llvm/test/Transforms/EarlyCSE/tfpropagation.ll
@@ -68,10 +68,10 @@ define double @branching_exceptignore(i64 %a) #0 {
 ; CHECK-NEXT:    [[CMP2:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double 1.000000e+00, double [[CONV1]], metadata !"ogt", metadata !"fpexcept.ignore") #[[ATTR0]]
 ; CHECK-NEXT:    br i1 [[CMP2]], label [[IF_THEN3:%.*]], label [[IF_END3:%.*]]
 ; CHECK:       if.then3:
-; CHECK-NEXT:    [[C:%.*]] = call double @truefunc.f64.i1(i1 true) #[[ATTR0]]
+; CHECK-NEXT:    [[C:%.*]] = call double @truefunc.f64.i1(i1 true) #0
 ; CHECK-NEXT:    br label [[OUT:%.*]]
 ; CHECK:       if.end3:
-; CHECK-NEXT:    [[D:%.*]] = call double @falsefunc.f64.i1(i1 false) #[[ATTR0]]
+; CHECK-NEXT:    [[D:%.*]] = call double @falsefunc.f64.i1(i1 false) #0
 ; CHECK-NEXT:    br label [[OUT]]
 ; CHECK:       out:
 ; CHECK-NEXT:    ret double [[CONV1]]
@@ -98,10 +98,10 @@ define double @branching_exceptignore_dynround(i64 %a) #0 {
 ; CHECK-NEXT:    [[CMP2:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double 1.000000e+00, double [[CONV1]], metadata !"ogt", metadata !"fpexcept.ignore") #[[ATTR0]]
 ; CHECK-NEXT:    br i1 [[CMP2]], label [[IF_THEN3:%.*]], label [[IF_END3:%.*]]
 ; CHECK:       if.then3:
-; CHECK-NEXT:    [[C:%.*]] = call double @truefunc.f64.i1(i1 true) #[[ATTR0]]
+; CHECK-NEXT:    [[C:%.*]] = call double @truefunc.f64.i1(i1 true) #0
 ; CHECK-NEXT:    br label [[OUT:%.*]]
 ; CHECK:       if.end3:
-; CHECK-NEXT:    [[D:%.*]] = call double @falsefunc.f64.i1(i1 false) #[[ATTR0]]
+; CHECK-NEXT:    [[D:%.*]] = call double @falsefunc.f64.i1(i1 false) #0
 ; CHECK-NEXT:    br label [[OUT]]
 ; CHECK:       out:
 ; CHECK-NEXT:    ret double [[CONV1]]
@@ -128,10 +128,10 @@ define double @branching_maytrap(i64 %a) #0 {
 ; CHECK-NEXT:    [[CMP2:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double 1.000000e+00, double [[CONV1]], metadata !"ogt", metadata !"fpexcept.maytrap") #[[ATTR0]]
 ; CHECK-NEXT:    br i1 [[CMP2]], label [[IF_THEN3:%.*]], label [[IF_END3:%.*]]
 ; CHECK:       if.then3:
-; CHECK-NEXT:    [[C:%.*]] = call double @truefunc.f64.i1(i1 true) #[[ATTR0]]
+; CHECK-NEXT:    [[C:%.*]] = call double @truefunc.f64.i1(i1 true) #0
 ; CHECK-NEXT:    br label [[OUT:%.*]]
 ; CHECK:       if.end3:
-; CHECK-NEXT:    [[D:%.*]] = call double @falsefunc.f64.i1(i1 false) #[[ATTR0]]
+; CHECK-NEXT:    [[D:%.*]] = call double @falsefunc.f64.i1(i1 false) #0
 ; CHECK-NEXT:    br label [[OUT]]
 ; CHECK:       out:
 ; CHECK-NEXT:    ret double [[CONV1]]
@@ -160,10 +160,10 @@ define double @branching_ebstrict(i64 %a) #0 {
 ; CHECK-NEXT:    [[CMP2:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double 1.000000e+00, double [[CONV1]], metadata !"ogt", metadata !"fpexcept.strict") #[[ATTR0]]
 ; CHECK-NEXT:    br i1 [[CMP2]], label [[IF_THEN3:%.*]], label [[IF_END3:%.*]]
 ; CHECK:       if.then3:
-; CHECK-NEXT:    [[C:%.*]] = call double @truefunc.f64.i1(i1 [[CMP2]]) #[[ATTR0]]
+; CHECK-NEXT:    [[C:%.*]] = call double @truefunc.f64.i1(i1 [[CMP2]]) #0
 ; CHECK-NEXT:    br label [[OUT:%.*]]
 ; CHECK:       if.end3:
-; CHECK-NEXT:    [[D:%.*]] = call double @falsefunc.f64.i1(i1 [[CMP2]]) #[[ATTR0]]
+; CHECK-NEXT:    [[D:%.*]] = call double @falsefunc.f64.i1(i1 [[CMP2]]) #0
 ; CHECK-NEXT:    br label [[OUT]]
 ; CHECK:       out:
 ; CHECK-NEXT:    ret double [[CONV1]]
@@ -190,5 +190,6 @@ declare double @llvm.experimental.constrained.uitofp.f64.i64(i64, metadata, meta
 declare i1 @llvm.experimental.constrained.fcmps.f64(double, double, metadata, metadata) #0
 
 attributes #0 = { strictfp }
+; CHECK: attributes #[[ATTR0]] = { strictfp memory(inaccessiblemem: readwrite) }
 
 declare <4 x float> @llvm.experimental.constrained.fadd.v4f32(<4 x float>, <4 x float>, metadata, metadata) strictfp

>From 868660b970ac1a6af74e418e75097e05759350e2 Mon Sep 17 00:00:00 2001
From: Serge Pavlov <sepavloff at gmail.com>
Date: Mon, 28 Oct 2024 22:12:59 +0700
Subject: [PATCH 4/6] Small changes: docs, clang-format, revievers' notes

- Fix Doxygen error,
- Fix clang-format error,
- remove unused function declaration,
- remove setting MD_fpmath, it is made by copyMetadata.
---
 llvm/docs/LangRef.rst                       | 4 ++--
 llvm/include/llvm/IR/AutoUpgrade.h          | 2 --
 llvm/lib/IR/AutoUpgrade.cpp                 | 4 +---
 llvm/lib/Transforms/Utils/CloneFunction.cpp | 2 +-
 4 files changed, 4 insertions(+), 8 deletions(-)

diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index b091e383be31c6..717539b5b2383c 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -3011,7 +3011,7 @@ Floating-point Environment Operand Bundles
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 These operand bundles provide details on how the operation interacts with the
-:ref:`floating-point environment <_floatenv>`. There are two kinds of such
+:ref:`floating-point environment <floatenv>`. There are two kinds of such
 operand bundles, which characterize interaction with floating-point control
 modes and status bits respectively.
 
@@ -3043,7 +3043,7 @@ string value, which may have one of the values:
     "maytrap"
 
 It has the same meaning as the corresponding argument in
-:ref:`constrained intrinsics <_constrainedfp>`.
+:ref:`constrained intrinsics <constrainedfp>`.
 
 .. _moduleasm:
 
diff --git a/llvm/include/llvm/IR/AutoUpgrade.h b/llvm/include/llvm/IR/AutoUpgrade.h
index 8bd005d73fba36..97c3e4d7589d7b 100644
--- a/llvm/include/llvm/IR/AutoUpgrade.h
+++ b/llvm/include/llvm/IR/AutoUpgrade.h
@@ -107,8 +107,6 @@ namespace llvm {
   /// Upgrade operand bundles (without knowing about their user instruction).
   void UpgradeOperandBundles(std::vector<OperandBundleDef> &OperandBundles);
 
-  CallBase *upgradeConstrainedFunctionCall(CallBase *CB);
-
 } // End llvm namespace
 
 #endif
diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index 0d04b6634f4351..3e30fa3d10ac91 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -4356,9 +4356,7 @@ static CallBase *upgradeConstrainedIntrinsicCall(CallBase *CB, Function *F,
     Bundles.append(NewBundles);
 
     Builder.SetInsertPoint(CB->getParent(), CB->getIterator());
-    MDNode *FPMath = CB->getMetadata(LLVMContext::MD_fpmath);
-    NewCB = Builder.CreateCall(F, Args, Bundles, CB->getName(), FPMath);
-
+    NewCB = Builder.CreateCall(F, Args, Bundles, CB->getName());
     NewCB->copyMetadata(*CB);
     AttributeList Attrs = CB->getAttributes();
     NewCB->setAttributes(Attrs);
diff --git a/llvm/lib/Transforms/Utils/CloneFunction.cpp b/llvm/lib/Transforms/Utils/CloneFunction.cpp
index 38fabb233c0782..eae1a92632a8ab 100644
--- a/llvm/lib/Transforms/Utils/CloneFunction.cpp
+++ b/llvm/lib/Transforms/Utils/CloneFunction.cpp
@@ -479,7 +479,7 @@ PruningFunctionCloner::cloneInstruction(BasicBlock::const_iterator II) {
       }
       Args.push_back(
           MetadataAsValue::get(Ctx, MDString::get(Ctx, "fpexcept.ignore")));
-        addFPExceptionBundle(Ctx, Bundles, fp::ExceptionBehavior::ebIgnore);
+      addFPExceptionBundle(Ctx, Bundles, fp::ExceptionBehavior::ebIgnore);
       auto *NewConstrainedInst =
           CallInst::Create(IFn, Args, Bundles, OldInst.getName() + ".strict");
 

>From 3da7fd198007d6c3698c025bfb96ea5fb0ccca34 Mon Sep 17 00:00:00 2001
From: Serge Pavlov <sepavloff at gmail.com>
Date: Wed, 25 Sep 2024 16:07:02 +0700
Subject: [PATCH 5/6] Reimplement constrained 'trunc' using operand bundles

Previously the function 'trunc' in non-default floating-point
environment was implemented with a special LLVM intrinsic
'experimental.constrained.trunc'. Introduction of floating-point operand
bundles allows expressing the interaction with the FP environment using the
same intrinsic as for the default mode.

This changes removes 'llvm.experimental.constrained.trunc' and use
'llvm.trunc' in all cases.
---
 clang/lib/CodeGen/CGBuiltin.cpp               | 52 +++++++-------
 .../AArch64/neon-intrinsics-constrained.c     |  2 +-
 .../v8.2a-fp16-intrinsics-constrained.c       |  4 +-
 .../PowerPC/builtins-ppc-fpconstrained.c      |  6 +-
 .../builtins-systemz-vector-constrained.c     |  4 +-
 .../builtins-systemz-vector2-constrained.c    |  3 +-
 .../builtins-systemz-zvector-constrained.c    |  6 +-
 .../builtins-systemz-zvector2-constrained.c   | 10 +--
 clang/test/CodeGen/arm64-vrnd-constrained.c   |  4 +-
 .../test/CodeGen/constrained-math-builtins.c  | 19 ++---
 llvm/include/llvm/CodeGen/SelectionDAGNodes.h |  1 +
 llvm/include/llvm/CodeGen/TargetLowering.h    |  1 +
 llvm/include/llvm/IR/ConstrainedOps.def       |  8 ++-
 llvm/include/llvm/IR/Function.h               |  2 +-
 llvm/include/llvm/IR/InstrTypes.h             |  3 +
 llvm/include/llvm/IR/IntrinsicInst.h          | 12 ++++
 llvm/include/llvm/IR/Intrinsics.h             |  7 +-
 llvm/include/llvm/IR/Intrinsics.td            |  3 -
 llvm/lib/Analysis/ConstantFolding.cpp         | 13 ++--
 llvm/lib/AsmParser/LLParser.cpp               |  9 +++
 llvm/lib/CodeGen/ExpandVectorPredication.cpp  |  2 +-
 llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp  |  6 ++
 .../SelectionDAG/LegalizeVectorOps.cpp        |  2 +
 .../SelectionDAG/LegalizeVectorTypes.cpp      |  3 +
 .../lib/CodeGen/SelectionDAG/SelectionDAG.cpp |  1 +
 .../SelectionDAG/SelectionDAGBuilder.cpp      | 19 ++++-
 .../SelectionDAG/SelectionDAGBuilder.h        |  2 +-
 llvm/lib/CodeGen/TargetLoweringBase.cpp       |  3 +-
 llvm/lib/IR/AutoUpgrade.cpp                   | 72 +++++++++++++++++--
 llvm/lib/IR/Function.cpp                      |  4 +-
 llvm/lib/IR/Instructions.cpp                  |  5 ++
 llvm/lib/IR/IntrinsicInst.cpp                 | 32 ++++++++-
 llvm/lib/IR/Intrinsics.cpp                    |  2 +-
 llvm/lib/Transforms/Utils/Local.cpp           |  7 +-
 llvm/test/Assembler/fp-intrinsics-attr.ll     | 12 ++--
 llvm/test/Bitcode/auto-upgrade-constrained.ll |  2 +-
 .../CodeGen/AArch64/fp-intrinsics-fp16.ll     |  3 +-
 .../CodeGen/AArch64/fp-intrinsics-vector.ll   |  9 +--
 llvm/test/CodeGen/AArch64/fp-intrinsics.ll    |  9 +--
 llvm/test/CodeGen/ARM/fp-intrinsics.ll        |  4 +-
 llvm/test/CodeGen/PowerPC/fp-strict-round.ll  | 21 ++----
 .../ppcf128-constrained-fp-intrinsics.ll      |  5 +-
 .../vector-constrained-fp-intrinsics.ll       | 21 ++----
 .../CodeGen/RISCV/double-intrinsics-strict.ll |  4 +-
 .../CodeGen/RISCV/float-intrinsics-strict.ll  |  4 +-
 ...fixed-vectors-ftrunc-constrained-sdnode.ll | 45 ++++--------
 .../RISCV/rvv/ftrunc-constrained-sdnode.ll    | 45 ++++--------
 .../RISCV/zfh-half-intrinsics-strict.ll       |  4 +-
 .../RISCV/zfhmin-half-intrinsics-strict.ll    |  4 +-
 .../CodeGen/SystemZ/fp-strict-round-01.ll     | 15 +---
 .../CodeGen/SystemZ/fp-strict-round-02.ll     | 15 +---
 .../CodeGen/SystemZ/fp-strict-round-03.ll     | 15 +---
 .../CodeGen/SystemZ/vec-strict-round-01.ll    | 10 +--
 .../CodeGen/SystemZ/vec-strict-round-02.ll    | 10 +--
 .../vector-constrained-fp-intrinsics.ll       | 21 ++----
 .../X86/fp-strict-scalar-round-fp16.ll        |  6 +-
 .../CodeGen/X86/fp-strict-scalar-round.ll     |  8 +--
 .../test/CodeGen/X86/fp128-libcalls-strict.ll |  3 +-
 llvm/test/CodeGen/X86/fp80-strict-libcalls.ll |  3 +-
 llvm/test/CodeGen/X86/vec-strict-256-fp16.ll  |  4 +-
 llvm/test/CodeGen/X86/vec-strict-256.ll       |  8 +--
 llvm/test/CodeGen/X86/vec-strict-512-fp16.ll  |  3 +-
 llvm/test/CodeGen/X86/vec-strict-512.ll       |  6 +-
 llvm/test/CodeGen/X86/vec-strict-round-128.ll |  8 +--
 .../X86/vector-constrained-fp-intrinsics.ll   | 21 ++----
 .../InstSimplify/constfold-constrained.ll     | 49 +++++++------
 66 files changed, 363 insertions(+), 363 deletions(-)

diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index cb9c23b8e0a0d0..52b2d3320c60ea 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -657,6 +657,17 @@ static Value *emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
   }
 }
 
+// Emit a simple mangled intrinsic that has 1 argument and a return type
+// matching the argument type.
+static Value *emitUnaryFPBuiltin(CodeGenFunction &CGF, const CallExpr *E,
+                                 unsigned IntrinsicID) {
+  llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
+
+  CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
+  Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
+  return CGF.Builder.CreateCall(F, Src0);
+}
+
 // Emit an intrinsic that has 2 operands of the same type as its result.
 // Depending on mode, this may be a constrained floating-point intrinsic.
 static Value *emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
@@ -3238,9 +3249,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
     case Builtin::BI__builtin_truncf16:
     case Builtin::BI__builtin_truncl:
     case Builtin::BI__builtin_truncf128:
-      return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
-                                   Intrinsic::trunc,
-                                   Intrinsic::experimental_constrained_trunc));
+      return RValue::get(emitUnaryFPBuiltin(*this, E, Intrinsic::trunc));
 
     case Builtin::BIlround:
     case Builtin::BIlroundf:
@@ -6827,7 +6836,7 @@ Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
   unsigned j = 0;
   for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
        ai != ae; ++ai, ++j) {
-    if (F->isConstrainedFPIntrinsic())
+    if (F->isLegacyConstrainedIntrinsic())
       if (ai->getType()->isMetadataTy())
         continue;
     if (shift > 0 && shift == j)
@@ -6836,7 +6845,7 @@ Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
       Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
   }
 
-  if (F->isConstrainedFPIntrinsic())
+  if (F->isLegacyConstrainedIntrinsic())
     return Builder.CreateConstrainedFPCall(F, Ops, name);
   else
     return Builder.CreateCall(F, Ops, name);
@@ -12989,13 +12998,11 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
               : Intrinsic::rint;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx");
   }
-  case NEON::BI__builtin_neon_vrndh_f16: {
+  case NEON::BI__builtin_neon_vrndh_f16:
     Ops.push_back(EmitScalarExpr(E->getArg(0)));
-    Int = Builder.getIsFPConstrained()
-              ? Intrinsic::experimental_constrained_trunc
-              : Intrinsic::trunc;
-    return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndz");
-  }
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::trunc, HalfTy), Ops,
+                        "vrndz");
+
   case NEON::BI__builtin_neon_vrnd32x_f32:
   case NEON::BI__builtin_neon_vrnd32xq_f32:
   case NEON::BI__builtin_neon_vrnd32x_f64:
@@ -13029,12 +13036,9 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd64z");
   }
   case NEON::BI__builtin_neon_vrnd_v:
-  case NEON::BI__builtin_neon_vrndq_v: {
-    Int = Builder.getIsFPConstrained()
-              ? Intrinsic::experimental_constrained_trunc
-              : Intrinsic::trunc;
-    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndz");
-  }
+  case NEON::BI__builtin_neon_vrndq_v:
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::trunc, Ty), Ops, "vrndz");
+
   case NEON::BI__builtin_neon_vcvt_f64_v:
   case NEON::BI__builtin_neon_vcvtq_f64_v:
     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -18251,9 +18255,8 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
                : Intrinsic::ceil;
     else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpiz ||
              BuiltinID == PPC::BI__builtin_vsx_xvrspiz)
-      ID = Builder.getIsFPConstrained()
-               ? Intrinsic::experimental_constrained_trunc
-               : Intrinsic::trunc;
+      return emitUnaryFPBuiltin(*this, E, Intrinsic::trunc);
+
     llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
     return Builder.getIsFPConstrained() ? Builder.CreateConstrainedFPCall(F, X)
                                         : Builder.CreateCall(F, X);
@@ -18754,9 +18757,7 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
         .getScalarVal();
   case PPC::BI__builtin_ppc_friz:
   case PPC::BI__builtin_ppc_frizs:
-    return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
-                           *this, E, Intrinsic::trunc,
-                           Intrinsic::experimental_constrained_trunc))
+    return RValue::get(emitUnaryFPBuiltin(*this, E, Intrinsic::trunc))
         .getScalarVal();
   case PPC::BI__builtin_ppc_fsqrt:
   case PPC::BI__builtin_ppc_fsqrts:
@@ -20536,8 +20537,7 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
               CI = Intrinsic::experimental_constrained_nearbyint; break;
       case 1: ID = Intrinsic::round;
               CI = Intrinsic::experimental_constrained_round; break;
-      case 5: ID = Intrinsic::trunc;
-              CI = Intrinsic::experimental_constrained_trunc; break;
+      case 5: ID = Intrinsic::trunc; break;
       case 6: ID = Intrinsic::ceil;
               CI = Intrinsic::experimental_constrained_ceil; break;
       case 7: ID = Intrinsic::floor;
@@ -20546,7 +20546,7 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
       break;
     }
     if (ID != Intrinsic::not_intrinsic) {
-      if (Builder.getIsFPConstrained()) {
+      if (Builder.getIsFPConstrained() && ID != Intrinsic::trunc) {
         Function *F = CGM.getIntrinsic(CI, ResultType);
         return Builder.CreateConstrainedFPCall(F, X);
       } else {
diff --git a/clang/test/CodeGen/AArch64/neon-intrinsics-constrained.c b/clang/test/CodeGen/AArch64/neon-intrinsics-constrained.c
index 15ae7eea820e80..0405cf7f19c73b 100644
--- a/clang/test/CodeGen/AArch64/neon-intrinsics-constrained.c
+++ b/clang/test/CodeGen/AArch64/neon-intrinsics-constrained.c
@@ -792,7 +792,7 @@ float64x1_t test_vrndx_f64(float64x1_t a) {
 // COMMON-LABEL: test_vrnd_f64
 // COMMONIR:      [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8>
 // UNCONSTRAINED: [[VRNDZ1_I:%.*]] = call <1 x double> @llvm.trunc.v1f64(<1 x double> %a)
-// CONSTRAINED:   [[VRNDZ1_I:%.*]] = call <1 x double> @llvm.experimental.constrained.trunc.v1f64(<1 x double> %a, metadata !"fpexcept.strict")
+// CONSTRAINED:   [[VRNDZ1_I:%.*]] = call <1 x double> @llvm.trunc.v1f64(<1 x double> %a) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ]
 // COMMONIR:      ret <1 x double> [[VRNDZ1_I]]
 float64x1_t test_vrnd_f64(float64x1_t a) {
   return vrnd_f64(a);
diff --git a/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics-constrained.c b/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics-constrained.c
index 9109626cea9ca2..9079a6690b9db3 100644
--- a/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics-constrained.c
+++ b/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics-constrained.c
@@ -150,7 +150,7 @@ uint64_t test_vcvth_u64_f16 (float16_t a) {
 
 // COMMON-LABEL: test_vrndh_f16
 // UNCONSTRAINED:  [[RND:%.*]] = call half @llvm.trunc.f16(half %a)
-// CONSTRAINED:    [[RND:%.*]] = call half @llvm.experimental.constrained.trunc.f16(half %a, metadata !"fpexcept.strict")
+// CONSTRAINED:    [[RND:%.*]] = call half @llvm.trunc.f16(half %a) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ]
 // COMMONIR:       ret half [[RND]]
 float16_t test_vrndh_f16(float16_t a) {
   return vrndh_f16(a);
@@ -298,3 +298,5 @@ float16_t test_vfmsh_f16(float16_t a, float16_t b, float16_t c) {
   return vfmsh_f16(a, b, c);
 }
 
+// CHECK: attributes #[[ATTR]] = { strictfp memory(inaccessiblemem: readwrite) }
+
diff --git a/clang/test/CodeGen/PowerPC/builtins-ppc-fpconstrained.c b/clang/test/CodeGen/PowerPC/builtins-ppc-fpconstrained.c
index 838db02415fe5b..b326f131a56e54 100644
--- a/clang/test/CodeGen/PowerPC/builtins-ppc-fpconstrained.c
+++ b/clang/test/CodeGen/PowerPC/builtins-ppc-fpconstrained.c
@@ -85,13 +85,13 @@ void test_float(void) {
   vf = __builtin_vsx_xvrspiz(vf);
   // CHECK-LABEL: try-xvrspiz
   // CHECK-UNCONSTRAINED: @llvm.trunc.v4f32(<4 x float> %{{.*}})
-  // CHECK-CONSTRAINED: @llvm.experimental.constrained.trunc.v4f32(<4 x float> %{{.*}}, metadata !"fpexcept.strict")
+  // CHECK-CONSTRAINED: @llvm.trunc.v4f32(<4 x float> %{{.*}}) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ]
   // CHECK-ASM: xvrspiz
 
   vd = __builtin_vsx_xvrdpiz(vd);
   // CHECK-LABEL: try-xvrdpiz
   // CHECK-UNCONSTRAINED: @llvm.trunc.v2f64(<2 x double> %{{.*}})
-  // CHECK-CONSTRAINED: @llvm.experimental.constrained.trunc.v2f64(<2 x double> %{{.*}}, metadata !"fpexcept.strict")
+  // CHECK-CONSTRAINED: @llvm.trunc.v2f64(<2 x double> %{{.*}}) #[[ATTR]] [ "fpe.except"(metadata !"strict") ]
   // CHECK-ASM: xvrdpiz
 
   vf = __builtin_vsx_xvmaddasp(vf, vf, vf);
@@ -156,3 +156,5 @@ void test_float(void) {
   // CHECK-CONSTRAINED: fneg <2 x double> [[RESULT1]]
   // CHECK-ASM: xvnmsubadp
 }
+
+// CHECK-CONSTRAINED: attributes #[[ATTR]] = { strictfp memory(inaccessiblemem: readwrite) }
diff --git a/clang/test/CodeGen/SystemZ/builtins-systemz-vector-constrained.c b/clang/test/CodeGen/SystemZ/builtins-systemz-vector-constrained.c
index 6d2845504a39f0..77ede2c10eea08 100644
--- a/clang/test/CodeGen/SystemZ/builtins-systemz-vector-constrained.c
+++ b/clang/test/CodeGen/SystemZ/builtins-systemz-vector-constrained.c
@@ -45,7 +45,7 @@ void test_float(void) {
   vd = __builtin_s390_vfidb(vd, 4, 1);
   // CHECK: call <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double> %{{.*}})
   vd = __builtin_s390_vfidb(vd, 4, 5);
-  // CHECK: call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %{{.*}})
+  // CHECK: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{.*}}) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ]
   vd = __builtin_s390_vfidb(vd, 4, 6);
   // CHECK: call <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double> %{{.*}})
   vd = __builtin_s390_vfidb(vd, 4, 7);
@@ -53,3 +53,5 @@ void test_float(void) {
   vd = __builtin_s390_vfidb(vd, 4, 4);
   // CHECK: call <2 x double> @llvm.s390.vfidb(<2 x double> %{{.*}}, i32 4, i32 4)
 }
+
+// CHECK: attributes #[[ATTR]] = { strictfp memory(inaccessiblemem: readwrite) }
diff --git a/clang/test/CodeGen/SystemZ/builtins-systemz-vector2-constrained.c b/clang/test/CodeGen/SystemZ/builtins-systemz-vector2-constrained.c
index 735b6a0249ab62..7488cf90a9669d 100644
--- a/clang/test/CodeGen/SystemZ/builtins-systemz-vector2-constrained.c
+++ b/clang/test/CodeGen/SystemZ/builtins-systemz-vector2-constrained.c
@@ -60,10 +60,11 @@ void test_float(void) {
   vf = __builtin_s390_vfisb(vf, 4, 1);
   // CHECK: call <4 x float> @llvm.experimental.constrained.round.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
   vf = __builtin_s390_vfisb(vf, 4, 5);
-  // CHECK: call <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
+  // CHECK: call <4 x float> @llvm.trunc.v4f32(<4 x float> %{{.*}}) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ]
   vf = __builtin_s390_vfisb(vf, 4, 6);
   // CHECK: call <4 x float> @llvm.experimental.constrained.ceil.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
   vf = __builtin_s390_vfisb(vf, 4, 7);
   // CHECK: call <4 x float> @llvm.experimental.constrained.floor.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
 }
 
+// CHECK: attributes #[[ATTR]] = { strictfp memory(inaccessiblemem: readwrite) }
diff --git a/clang/test/CodeGen/SystemZ/builtins-systemz-zvector-constrained.c b/clang/test/CodeGen/SystemZ/builtins-systemz-zvector-constrained.c
index 6a1f8f0e923f65..fe964fa38aee07 100644
--- a/clang/test/CodeGen/SystemZ/builtins-systemz-zvector-constrained.c
+++ b/clang/test/CodeGen/SystemZ/builtins-systemz-zvector-constrained.c
@@ -303,10 +303,10 @@ void test_float(void) {
   // CHECK: call <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
   // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 7
   vd = vec_roundz(vd);
-  // CHECK: call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
+  // CHECK: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{.*}}) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ]
   // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 5
   vd = vec_trunc(vd);
-  // CHECK: call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
+  // CHECK: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{.*}}) #[[ATTR]] [ "fpe.except"(metadata !"strict") ]
   // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 5
   vd = vec_roundc(vd);
   // CHECK: call <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
@@ -316,3 +316,5 @@ void test_float(void) {
   // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 0, 0
   vd = vec_round(vd);
 }
+
+// CHECK: attributes #[[ATTR]] = { strictfp memory(inaccessiblemem: readwrite) }
diff --git a/clang/test/CodeGen/SystemZ/builtins-systemz-zvector2-constrained.c b/clang/test/CodeGen/SystemZ/builtins-systemz-zvector2-constrained.c
index 750f5011a26798..e7ea4e325862e9 100644
--- a/clang/test/CodeGen/SystemZ/builtins-systemz-zvector2-constrained.c
+++ b/clang/test/CodeGen/SystemZ/builtins-systemz-zvector2-constrained.c
@@ -495,16 +495,16 @@ void test_float(void) {
   // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 7
 
   vf = vec_roundz(vf);
-  // CHECK: call <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
+  // CHECK: call <4 x float> @llvm.trunc.v4f32(<4 x float> %{{.*}}) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ]
   // CHECK-ASM: vfisb %{{.*}}, %{{.*}}, 4, 5
   vf = vec_trunc(vf);
-  // CHECK: call <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
+  // CHECK: call <4 x float> @llvm.trunc.v4f32(<4 x float> %{{.*}}) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ]
   // CHECK-ASM: vfisb %{{.*}}, %{{.*}}, 4, 5
   vd = vec_roundz(vd);
-  // CHECK: call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
+  // CHECK: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{.*}}) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ]
   // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 5
   vd = vec_trunc(vd);
-  // CHECK: call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
+  // CHECK: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{.*}}) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ]
   // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 5
 
   vf = vec_roundc(vf);
@@ -541,3 +541,5 @@ void test_float(void) {
   // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 4095)
   // CHECK-ASM: vftcidb
 }
+
+// CHECK: attributes #[[ATTR]] = { strictfp memory(inaccessiblemem: readwrite) }
\ No newline at end of file
diff --git a/clang/test/CodeGen/arm64-vrnd-constrained.c b/clang/test/CodeGen/arm64-vrnd-constrained.c
index ccf729a6a25ef6..e690f26b0def52 100644
--- a/clang/test/CodeGen/arm64-vrnd-constrained.c
+++ b/clang/test/CodeGen/arm64-vrnd-constrained.c
@@ -14,7 +14,7 @@
 float64x2_t rnd5(float64x2_t a) { return vrndq_f64(a); }
 // COMMON-LABEL: rnd5
 // UNCONSTRAINED: call <2 x double> @llvm.trunc.v2f64(<2 x double>
-// CONSTRAINED:   call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>
+// CONSTRAINED:   call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{.*}}) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ]
 // CHECK-ASM:     frintz.2d v{{[0-9]+}}, v{{[0-9]+}}
 
 float64x2_t rnd13(float64x2_t a) { return vrndmq_f64(a); }
@@ -41,3 +41,5 @@ float64x2_t rnd25(float64x2_t a) { return vrndxq_f64(a); }
 // CONSTRAINED:   call <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double>
 // CHECK-ASM:     frintx.2d v{{[0-9]+}}, v{{[0-9]+}}
 
+// CHECK: attributes #[[ATTR]] = { strictfp memory(inaccessiblemem: readwrite) }
+
diff --git a/clang/test/CodeGen/constrained-math-builtins.c b/clang/test/CodeGen/constrained-math-builtins.c
index 68b9e75283c547..f5136cd18e0eff 100644
--- a/clang/test/CodeGen/constrained-math-builtins.c
+++ b/clang/test/CodeGen/constrained-math-builtins.c
@@ -242,10 +242,10 @@ __builtin_atan2(f,f);        __builtin_atan2f(f,f);       __builtin_atan2l(f,f);
 
   __builtin_trunc(f);      __builtin_truncf(f);     __builtin_truncl(f); __builtin_truncf128(f);
 
-// CHECK: call double @llvm.experimental.constrained.trunc.f64(double %{{.*}}, metadata !"fpexcept.strict")
-// CHECK: call float @llvm.experimental.constrained.trunc.f32(float %{{.*}}, metadata !"fpexcept.strict")
-// CHECK: call x86_fp80 @llvm.experimental.constrained.trunc.f80(x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
-// CHECK: call fp128 @llvm.experimental.constrained.trunc.f128(fp128 %{{.*}}, metadata !"fpexcept.strict")
+// CHECK: call double @llvm.trunc.f64(double %{{.*}}) #[[ATTR_CALL:[0-9]+]] [ "fpe.except"(metadata !"strict") ]
+// CHECK: call float @llvm.trunc.f32(float %{{.*}}) #[[ATTR_CALL]] [ "fpe.except"(metadata !"strict") ]
+// CHECK: call x86_fp80 @llvm.trunc.f80(x86_fp80 %{{.*}}) #[[ATTR_CALL]] [ "fpe.except"(metadata !"strict") ]
+// CHECK: call fp128 @llvm.trunc.f128(fp128 %{{.*}}) #[[ATTR_CALL]] [ "fpe.except"(metadata !"strict") ]
 };
 
 // CHECK: declare double @llvm.experimental.constrained.frem.f64(double, double, metadata, metadata)
@@ -377,10 +377,10 @@ __builtin_atan2(f,f);        __builtin_atan2f(f,f);       __builtin_atan2l(f,f);
 // CHECK: declare x86_fp80 @llvm.experimental.constrained.tan.f80(x86_fp80, metadata, metadata)
 // CHECK: declare fp128 @llvm.experimental.constrained.tan.f128(fp128, metadata, metadata)
 
-// CHECK: declare double @llvm.experimental.constrained.trunc.f64(double, metadata)
-// CHECK: declare float @llvm.experimental.constrained.trunc.f32(float, metadata)
-// CHECK: declare x86_fp80 @llvm.experimental.constrained.trunc.f80(x86_fp80, metadata)
-// CHECK: declare fp128 @llvm.experimental.constrained.trunc.f128(fp128, metadata)
+// CHECK: declare double @llvm.trunc.f64(double) #[[ATTR_FUNC:[0-9]+]]
+// CHECK: declare float @llvm.trunc.f32(float) #[[ATTR_FUNC]]
+// CHECK: declare x86_fp80 @llvm.trunc.f80(x86_fp80) #[[ATTR_FUNC]]
+// CHECK: declare fp128 @llvm.trunc.f128(fp128) #[[ATTR_FUNC]]
 
 #pragma STDC FP_CONTRACT ON
 void bar(float f) {
@@ -401,3 +401,6 @@ void bar(float f) {
   // CHECK: fneg
   // CHECK: call float @llvm.experimental.constrained.fmuladd.f32(float %{{.*}}, float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
 };
+
+// CHECK: attributes #[[ATTR_FUNC]] = { {{.*}} memory(none) }
+// CHECK: attributes #[[ATTR_CALL]] = { strictfp memory(inaccessiblemem: readwrite) }
diff --git a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
index 677b59e0c8fbeb..9dc831ef23273d 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -721,6 +721,7 @@ END_TWO_BYTE_PACK()
       case ISD::STRICT_FP_TO_BF16:
 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
       case ISD::STRICT_##DAGN:
+#define LEGACY_FUNCTION DAG_INSTRUCTION
 #include "llvm/IR/ConstrainedOps.def"
         return true;
     }
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index 6a41094ff933b0..7ccaf9558077c0 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -1324,6 +1324,7 @@ class TargetLoweringBase {
       default: llvm_unreachable("Unexpected FP pseudo-opcode");
 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
       case ISD::STRICT_##DAGN: EqOpc = ISD::DAGN; break;
+#define LEGACY_FUNCTION DAG_INSTRUCTION
 #define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
       case ISD::STRICT_##DAGN: EqOpc = ISD::SETCC; break;
 #include "llvm/IR/ConstrainedOps.def"
diff --git a/llvm/include/llvm/IR/ConstrainedOps.def b/llvm/include/llvm/IR/ConstrainedOps.def
index 30a82bf633d575..2b1bb711444a06 100644
--- a/llvm/include/llvm/IR/ConstrainedOps.def
+++ b/llvm/include/llvm/IR/ConstrainedOps.def
@@ -39,6 +39,11 @@
 #define CMP_INSTRUCTION(N,A,R,I,D) DAG_INSTRUCTION(N,A,R,I,D)
 #endif
 
+// Intrinsic function that had constrained variant.
+#ifndef LEGACY_FUNCTION
+#define LEGACY_FUNCTION(N,A,R,I,D)
+#endif
+
 // Arguments of the entries are:
 // - instruction or intrinsic function name.
 // - Number of original instruction/intrinsic arguments.
@@ -103,7 +108,7 @@ DAG_FUNCTION(sinh,            1, 1, experimental_constrained_sinh,       FSINH)
 DAG_FUNCTION(sqrt,            1, 1, experimental_constrained_sqrt,       FSQRT)
 DAG_FUNCTION(tan,             1, 1, experimental_constrained_tan,        FTAN)
 DAG_FUNCTION(tanh,            1, 1, experimental_constrained_tanh,       FTANH)
-DAG_FUNCTION(trunc,           1, 0, experimental_constrained_trunc,      FTRUNC)
+LEGACY_FUNCTION(trunc,        1, 0, experimental_constrained_trunc,      FTRUNC)
 
 // This is definition for fmuladd intrinsic function, that is converted into
 // constrained FMA or FMUL + FADD intrinsics.
@@ -114,3 +119,4 @@ FUNCTION(fmuladd,         3, 1, experimental_constrained_fmuladd)
 #undef CMP_INSTRUCTION
 #undef DAG_INSTRUCTION
 #undef DAG_FUNCTION
+#undef LEGACY_FUNCTION
\ No newline at end of file
diff --git a/llvm/include/llvm/IR/Function.h b/llvm/include/llvm/IR/Function.h
index e7afcbd31420c1..076a28519491ff 100644
--- a/llvm/include/llvm/IR/Function.h
+++ b/llvm/include/llvm/IR/Function.h
@@ -263,7 +263,7 @@ class LLVM_ABI Function : public GlobalObject, public ilist_node<Function> {
   /// Returns true if the function is one of the "Constrained Floating-Point
   /// Intrinsics". Returns false if not, and returns false when
   /// getIntrinsicID() returns Intrinsic::not_intrinsic.
-  bool isConstrainedFPIntrinsic() const;
+  bool isLegacyConstrainedIntrinsic() const;
 
   /// Update internal caches that depend on the function name (such as the
   /// intrinsic ID and libcall cache).
diff --git a/llvm/include/llvm/IR/InstrTypes.h b/llvm/include/llvm/IR/InstrTypes.h
index 2cc6c0359bf7ad..aaa07215028e19 100644
--- a/llvm/include/llvm/IR/InstrTypes.h
+++ b/llvm/include/llvm/IR/InstrTypes.h
@@ -2145,6 +2145,9 @@ class CallBase : public Instruction {
   /// Return exception behavior specified by operand bundles.
   std::optional<fp::ExceptionBehavior> getExceptionBehavior() const;
 
+  // Does the called function access floating-point environment?
+  bool isConstrained() const;
+
   /// Used to keep track of an operand bundle.  See the main comment on
   /// OperandBundleUser above.
   struct BundleOpInfo {
diff --git a/llvm/include/llvm/IR/IntrinsicInst.h b/llvm/include/llvm/IR/IntrinsicInst.h
index a248a9612a82d0..2f382ed6b0ad4d 100644
--- a/llvm/include/llvm/IR/IntrinsicInst.h
+++ b/llvm/include/llvm/IR/IntrinsicInst.h
@@ -146,6 +146,18 @@ class IntrinsicInst : public CallInst {
 std::optional<RoundingMode> getRoundingModeArg(const CallBase &I);
 std::optional<fp::ExceptionBehavior> getExceptionBehaviorArg(const CallBase &I);
 
+/// Return true if the argument specifies an intrinsic that had a constrained
+/// variant (like 'trunc.f32').
+bool hadConstrainedVariant(StringRef Name);
+
+/// If the given string specifies some legacy constrained intrinsic (like
+/// 'llvm.experimental.constrained.trunc.f32'), return corresponding intrinsic
+/// id (like 'Intrinsic::trunc') and the number of FP metadata arguments.
+///
+/// \param Name Intrinsic name without prefix 'llvm.experimental.constrained'
+///             (like 'trunc.f32').
+std::pair<Intrinsic::ID, unsigned> getIntrinsicForConstrained(StringRef Name);
+
 /// Check if \p ID corresponds to a lifetime intrinsic.
 static inline bool isLifetimeIntrinsic(Intrinsic::ID ID) {
   switch (ID) {
diff --git a/llvm/include/llvm/IR/Intrinsics.h b/llvm/include/llvm/IR/Intrinsics.h
index 89dfff256e0c43..c867a944ccc9b1 100644
--- a/llvm/include/llvm/IR/Intrinsics.h
+++ b/llvm/include/llvm/IR/Intrinsics.h
@@ -125,9 +125,10 @@ namespace Intrinsic {
   /// Map a MS builtin name to an intrinsic ID.
   ID getIntrinsicForMSBuiltin(StringRef TargetPrefix, StringRef BuiltinName);
 
-  /// Returns true if the intrinsic ID is for one of the "Constrained
-  /// Floating-Point Intrinsics".
-  bool isConstrainedFPIntrinsic(ID QID);
+  /// Returns true if the intrinsic ID is for one of the legacy constrained
+  /// floating-point intrinsics, which use metadata argument to present
+  /// floating-point options.
+  bool isLegacyConstrainedIntrinsic(ID QID);
 
   /// Returns true if the intrinsic ID is for one of the "Constrained
   /// Floating-Point Intrinsics" that take rounding mode metadata.
diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td
index 1ca8c2565ab0b6..8d192b0d5cfe00 100644
--- a/llvm/include/llvm/IR/Intrinsics.td
+++ b/llvm/include/llvm/IR/Intrinsics.td
@@ -1352,9 +1352,6 @@ let IntrProperties = [IntrInaccessibleMemOnly, IntrWillReturn, IntrStrictFP] in
   def int_experimental_constrained_roundeven : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
                                                          [ LLVMMatchType<0>,
                                                            llvm_metadata_ty ]>;
-  def int_experimental_constrained_trunc : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
-                                                     [ LLVMMatchType<0>,
-                                                       llvm_metadata_ty ]>;
 
   // Constrained floating-point comparison (quiet and signaling variants).
   // Third operand is the predicate represented as a metadata string.
diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp
index 1971c28fc4c4de..6bb86048694acf 100644
--- a/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/llvm/lib/Analysis/ConstantFolding.cpp
@@ -1706,7 +1706,6 @@ bool llvm::canConstantFoldCallTo(const CallBase *Call, const Function *F) {
   case Intrinsic::experimental_constrained_floor:
   case Intrinsic::experimental_constrained_round:
   case Intrinsic::experimental_constrained_roundeven:
-  case Intrinsic::experimental_constrained_trunc:
   case Intrinsic::experimental_constrained_nearbyint:
   case Intrinsic::experimental_constrained_rint:
   case Intrinsic::experimental_constrained_fcmp:
@@ -2142,8 +2141,11 @@ static Constant *ConstantFoldScalarCall1(StringRef Name,
   }
 
   if (auto *Op = dyn_cast<ConstantFP>(Operands[0])) {
+    auto EB = Call->getExceptionBehavior();
+    APFloat U = Op->getValueAPF();
+
     if (IntrinsicID == Intrinsic::convert_to_fp16) {
-      APFloat Val(Op->getValueAPF());
+      APFloat Val(U);
 
       bool lost = false;
       Val.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &lost);
@@ -2151,8 +2153,6 @@ static Constant *ConstantFoldScalarCall1(StringRef Name,
       return ConstantInt::get(Ty->getContext(), Val.bitcastToAPInt());
     }
 
-    APFloat U = Op->getValueAPF();
-
     if (IntrinsicID == Intrinsic::wasm_trunc_signed ||
         IntrinsicID == Intrinsic::wasm_trunc_unsigned) {
       bool Signed = IntrinsicID == Intrinsic::wasm_trunc_signed;
@@ -2231,6 +2231,8 @@ static Constant *ConstantFoldScalarCall1(StringRef Name,
     }
 
     if (IntrinsicID == Intrinsic::trunc) {
+      if (U.isSignaling() && EB && *EB != fp::ebIgnore)
+        return nullptr;
       U.roundToIntegral(APFloat::rmTowardZero);
       return ConstantFP::get(Ty->getContext(), U);
     }
@@ -2277,9 +2279,6 @@ static Constant *ConstantFoldScalarCall1(StringRef Name,
     case Intrinsic::experimental_constrained_floor:
       RM = APFloat::rmTowardNegative;
       break;
-    case Intrinsic::experimental_constrained_trunc:
-      RM = APFloat::rmTowardZero;
-      break;
     }
     if (RM) {
       auto CI = cast<ConstrainedFPIntrinsic>(Call);
diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp
index 65ef6c8b291165..5367e922b0082e 100644
--- a/llvm/lib/AsmParser/LLParser.cpp
+++ b/llvm/lib/AsmParser/LLParser.cpp
@@ -6342,10 +6342,12 @@ void LLParser::updateConstrainedIntrinsic(
     return StringRef();
   };
 
+  unsigned NumMetadataArgs = 0;
   if (Args.size() > 1) {
     Value *V = Args[Args.size() - 2].V;
     StringRef VStr = getMetadataArgumentValue(V);
     if (!VStr.empty()) {
+      NumMetadataArgs++;
       if (auto RM = convertStrToRoundingMode(VStr))
         addFPRoundingBundle(Context, Bundles, *RM);
     }
@@ -6354,10 +6356,17 @@ void LLParser::updateConstrainedIntrinsic(
   Value *V = Args.back().V;
   StringRef VStr = getMetadataArgumentValue(V);
   if (!VStr.empty()) {
+    NumMetadataArgs++;
     if (auto EB = convertStrToExceptionBehavior(VStr))
       addFPExceptionBundle(Context, Bundles, *EB);
   }
 
+  if (hadConstrainedVariant(Name)) {
+    Args.pop_back_n(NumMetadataArgs);
+    CalleeID.StrVal = "llvm." + Name.str();
+  }
+
+  FnAttrs.addAttribute(Attribute::StrictFP);
   MemoryEffects ME = MemoryEffects::inaccessibleMemOnly();
   FnAttrs.addAttribute(Attribute::getWithMemoryEffects(Context, ME));
   FnAttrs.addAttribute(Attribute::StrictFP);
diff --git a/llvm/lib/CodeGen/ExpandVectorPredication.cpp b/llvm/lib/CodeGen/ExpandVectorPredication.cpp
index 5ca223852cbde3..5c81c61f1ab278 100644
--- a/llvm/lib/CodeGen/ExpandVectorPredication.cpp
+++ b/llvm/lib/CodeGen/ExpandVectorPredication.cpp
@@ -331,7 +331,7 @@ Value *CachingVPExpander::expandPredicationToFPCall(
     Function *Fn = Intrinsic::getOrInsertDeclaration(
         VPI.getModule(), UnpredicatedIntrinsicID, {VPI.getType()});
     Value *NewOp;
-    if (Intrinsic::isConstrainedFPIntrinsic(UnpredicatedIntrinsicID))
+    if (Intrinsic::isLegacyConstrainedIntrinsic(UnpredicatedIntrinsicID))
       NewOp =
           Builder.CreateConstrainedFPCall(Fn, {Op0, Op1, Op2}, VPI.getName());
     else
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index f668e41094bbc8..c4e6042d2a791a 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -2155,6 +2155,12 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
     }
   }
 
+  // Process constrained intrinsics in a way compatible with the pre-bundle
+  // implementation..
+  if (CI.isConstrained() &&
+      !Intrinsic::isLegacyConstrainedIntrinsic(CI.getIntrinsicID()))
+    return false;
+
   // If this is a simple intrinsic (that is, we just need to add a def of
   // a vreg, and uses for each arg operand, then translate it.
   if (translateSimpleIntrinsic(CI, ID, MIRBuilder))
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index db21e708970648..b984e6dc491f3d 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -314,6 +314,7 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
     break;
 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
   case ISD::STRICT_##DAGN:
+#define LEGACY_FUNCTION DAG_INSTRUCTION
 #include "llvm/IR/ConstrainedOps.def"
     ValVT = Node->getValueType(0);
     if (Op.getOpcode() == ISD::STRICT_SINT_TO_FP ||
@@ -1151,6 +1152,7 @@ void VectorLegalizer::Expand(SDNode *Node, SmallVectorImpl<SDValue> &Results) {
     break;
 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
   case ISD::STRICT_##DAGN:
+#define LEGACY_FUNCTION DAG_INSTRUCTION
 #include "llvm/IR/ConstrainedOps.def"
     ExpandStrictFPOp(Node, Results);
     return;
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 465128099f4447..1ff3dc2bcdb8ad 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -199,6 +199,7 @@ void DAGTypeLegalizer::ScalarizeVectorResult(SDNode *N, unsigned ResNo) {
 
 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
   case ISD::STRICT_##DAGN:
+#define LEGACY_FUNCTION DAG_INSTRUCTION
 #include "llvm/IR/ConstrainedOps.def"
     R = ScalarizeVecRes_StrictFPOp(N);
     break;
@@ -1337,6 +1338,7 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
 
 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
   case ISD::STRICT_##DAGN:
+#define LEGACY_FUNCTION DAG_INSTRUCTION
 #include "llvm/IR/ConstrainedOps.def"
     SplitVecRes_StrictFPOp(N, Lo, Hi);
     break;
@@ -4639,6 +4641,7 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
 
 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
   case ISD::STRICT_##DAGN:
+#define LEGACY_FUNCTION DAG_INSTRUCTION
 #include "llvm/IR/ConstrainedOps.def"
     Res = WidenVecRes_StrictFP(N);
     break;
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 182529123ec6d8..f7a9b351b43f55 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -11020,6 +11020,7 @@ SDNode* SelectionDAG::mutateStrictFPToFP(SDNode *Node) {
     llvm_unreachable("mutateStrictFPToFP called with unexpected opcode!");
 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
   case ISD::STRICT_##DAGN: NewOpc = ISD::DAGN; break;
+#define LEGACY_FUNCTION DAG_INSTRUCTION
 #define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
   case ISD::STRICT_##DAGN: NewOpc = ISD::SETCC; break;
 #include "llvm/IR/ConstrainedOps.def"
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index a38a3e9b91052d..15c801a74dbc89 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -6462,6 +6462,11 @@ void SelectionDAGBuilder::visitVectorExtractLastActive(const CallInst &I,
 /// Lower the call to the specified intrinsic function.
 void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
                                              unsigned Intrinsic) {
+  if (I.isConstrained()) {
+    visitConstrainedFPIntrinsic(cast<IntrinsicInst>(I));
+    return;
+  }
+
   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
   SDLoc sdl = getCurSDLoc();
   DebugLoc dl = getCurDebugLoc();
@@ -7022,7 +7027,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC)                         \
   case Intrinsic::INTRINSIC:
 #include "llvm/IR/ConstrainedOps.def"
-    visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(I));
+    visitConstrainedFPIntrinsic(cast<IntrinsicInst>(I));
     return;
 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
 #include "llvm/IR/VPIntrinsics.def"
@@ -8290,7 +8295,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
 }
 
 void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
-    const ConstrainedFPIntrinsic &FPI) {
+    const IntrinsicInst &FPI) {
   SDLoc sdl = getCurSDLoc();
 
   // We do not need to serialize constrained FP intrinsics against
@@ -8299,7 +8304,13 @@ void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
   SDValue Chain = DAG.getRoot();
   SmallVector<SDValue, 4> Opers;
   Opers.push_back(Chain);
-  for (unsigned I = 0, E = FPI.getNonMetadataArgCount(); I != E; ++I)
+
+  Intrinsic::ID ID = FPI.getIntrinsicID();
+  bool IsLegacy = Intrinsic::isLegacyConstrainedIntrinsic(ID);
+  unsigned NumArgs = IsLegacy ? static_cast<const ConstrainedFPIntrinsic &>(FPI)
+                                    .getNonMetadataArgCount()
+                              : FPI.arg_size();
+  for (unsigned I = 0; I != NumArgs; ++I)
     Opers.push_back(getValue(FPI.getArgOperand(I)));
 
   auto pushOutChain = [this](SDValue Result, fp::ExceptionBehavior EB) {
@@ -8347,6 +8358,8 @@ void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
   case Intrinsic::INTRINSIC:                                                   \
     Opcode = ISD::STRICT_##DAGN;                                               \
     break;
+#define LEGACY_FUNCTION(NAME, NARG, ROUND_MODE, I, DAGN)                       \
+  DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, NAME, DAGN)
 #include "llvm/IR/ConstrainedOps.def"
   case Intrinsic::experimental_constrained_fmuladd: {
     Opcode = ISD::STRICT_FMA;
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
index 3a8dc25e98700e..8c0b8a667357c1 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
@@ -626,7 +626,7 @@ class SelectionDAGBuilder {
                                DebugLoc DbgLoc);
   void visitIntrinsicCall(const CallInst &I, unsigned Intrinsic);
   void visitTargetIntrinsic(const CallInst &I, unsigned Intrinsic);
-  void visitConstrainedFPIntrinsic(const ConstrainedFPIntrinsic &FPI);
+  void visitConstrainedFPIntrinsic(const IntrinsicInst &FPI);
   void visitConvergenceControl(const CallInst &I, unsigned Intrinsic);
   void visitVectorHistogram(const CallInst &I, unsigned IntrinsicID);
   void visitVectorExtractLastActive(const CallInst &I, unsigned Intrinsic);
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index 392cfbdd21273d..4275318a7e0b13 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -791,7 +791,8 @@ void TargetLoweringBase::initActions() {
 
       // Constrained floating-point operations default to expand.
 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
-    setOperationAction(ISD::STRICT_##DAGN, VT, Expand);
+  setOperationAction(ISD::STRICT_##DAGN, VT, Expand);
+#define LEGACY_FUNCTION DAG_INSTRUCTION
 #include "llvm/IR/ConstrainedOps.def"
 
     // For most targets @llvm.get.dynamic.area.offset just returns 0.
diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index 3e30fa3d10ac91..6185e4c5a8958b 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -13,6 +13,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "llvm/IR/AutoUpgrade.h"
+#include "llvm/ADT/StringExtras.h"
 #include "llvm/ADT/StringRef.h"
 #include "llvm/ADT/StringSwitch.h"
 #include "llvm/BinaryFormat/Dwarf.h"
@@ -1193,8 +1194,31 @@ static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn,
           F->getParent(), ID, F->getFunctionType()->getReturnType());
       return true;
     }
-    if (Name.starts_with("experimental.constrained."))
+    if (Name.consume_front("experimental.constrained.")) {
+      Name = Name.take_while(
+          [](char Ch) -> bool { return isAlnum(Ch) || Ch == '_'; });
+      auto [NewID, NumMetadataArgs] = getIntrinsicForConstrained(Name);
+      if (NewID != Intrinsic::not_intrinsic) {
+        auto *OldTy = cast<FunctionType>(F->getFunctionType());
+        SmallVector<Type *, 4> ParamTys;
+        for (unsigned i = 0, e = OldTy->getNumParams() - NumMetadataArgs;
+             i != e; ++i) {
+          ParamTys.push_back(OldTy->getParamType(i));
+        }
+        auto *NewTy =
+            FunctionType::get(OldTy->getReturnType(), ParamTys, false);
+
+        SmallVector<Type *> OverloadTys;
+        bool Success =
+            Intrinsic::getIntrinsicSignature(NewID, NewTy, OverloadTys);
+        (void)Success;
+        assert(Success && "cannot get intrinsic signature");
+
+        NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(), NewID,
+                                              OverloadTys);
+      }
       return true;
+    }
     break; // No other 'e*'.
   case 'f':
     if (Name.starts_with("flt.rounds")) {
@@ -4333,16 +4357,12 @@ static CallBase *upgradeConstrainedIntrinsicCall(CallBase *CB, Function *F,
     return nullptr;
 
   SmallVector<OperandBundleDef, 2> NewBundles;
-
-  auto RM = getRoundingModeArg(*CB);
-  if (RM) {
+  if (auto RM = getRoundingModeArg(*CB)) {
     auto CurrentRM = CB->getRoundingMode();
     assert(!CurrentRM && "unexpected rounding bundle");
     Builder.createFPRoundingBundle(NewBundles, RM);
   }
-
-  auto EB = getExceptionBehaviorArg(*CB);
-  if (EB) {
+  if (auto EB = getExceptionBehaviorArg(*CB)) {
     auto CurrentEB = CB->getExceptionBehavior();
     assert(!CurrentEB && "unexpected exception bundle");
     Builder.createFPExceptionBundle(NewBundles, EB);
@@ -4936,6 +4956,44 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
       MTI->setSourceAlignment(Align->getMaybeAlignValue());
     break;
   }
+#define LEGACY_FUNCTION(NAME, A, R, I, D)                                      \
+  case Intrinsic::NAME:
+#include "llvm/IR/ConstrainedOps.def"
+  {
+    SmallVector<OperandBundleDef, 2> Bundles;
+    unsigned NumMetadataArgs = 0;
+
+    if (auto RM = getRoundingModeArg(*CI)) {
+      auto CurrentRM = CI->getRoundingMode();
+      assert(!CurrentRM && "unexpected rounding bundle");
+      Builder.createFPRoundingBundle(Bundles, RM);
+      ++NumMetadataArgs;
+    }
+
+    if (auto EB = getExceptionBehaviorArg(*CI)) {
+      auto CurrentEB = CI->getExceptionBehavior();
+      assert(!CurrentEB && "unexpected exception bundle");
+      Builder.createFPExceptionBundle(Bundles, EB);
+      ++NumMetadataArgs;
+    }
+
+    SmallVector<Value *, 4> Args(CI->args());
+    Args.pop_back_n(NumMetadataArgs);
+    NewCall = Builder.CreateCall(NewFn, Args, Bundles, CI->getName());
+    NewCall->copyMetadata(*CI);
+    AttributeList Attrs = CI->getAttributes();
+    NewCall->setAttributes(Attrs);
+    if (isa<FPMathOperator>(CI)) {
+      FastMathFlags FMF = CI->getFastMathFlags();
+      NewCall->setFastMathFlags(FMF);
+    }
+
+    MemoryEffects ME = MemoryEffects::inaccessibleMemOnly();
+    auto A = Attribute::getWithMemoryEffects(CI->getContext(), ME);
+    NewCall->addFnAttr(A);
+    NewCall->addFnAttr(Attribute::StrictFP);
+    break;
+  }
   }
   assert(NewCall && "Should have either set this variable or returned through "
                     "the default case");
diff --git a/llvm/lib/IR/Function.cpp b/llvm/lib/IR/Function.cpp
index 9c5dd5aeb92e97..d6c29e27a24d6f 100644
--- a/llvm/lib/IR/Function.cpp
+++ b/llvm/lib/IR/Function.cpp
@@ -554,8 +554,8 @@ static MutableArrayRef<Argument> makeArgArray(Argument *Args, size_t Count) {
   return MutableArrayRef<Argument>(Args, Count);
 }
 
-bool Function::isConstrainedFPIntrinsic() const {
-  return Intrinsic::isConstrainedFPIntrinsic(getIntrinsicID());
+bool Function::isLegacyConstrainedIntrinsic() const {
+  return Intrinsic::isLegacyConstrainedIntrinsic(getIntrinsicID());
 }
 
 void Function::clearArguments() {
diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp
index f763a29e90a97f..6d3c360fdbd59a 100644
--- a/llvm/lib/IR/Instructions.cpp
+++ b/llvm/lib/IR/Instructions.cpp
@@ -622,6 +622,11 @@ std::optional<fp::ExceptionBehavior> CallBase::getExceptionBehavior() const {
   return std::nullopt;
 }
 
+bool CallBase::isConstrained() const {
+  return getOperandBundle(LLVMContext::OB_fpe_control) ||
+         getOperandBundle(LLVMContext::OB_fpe_except);
+}
+
 MemoryEffects CallBase::getMemoryEffects() const {
   MemoryEffects ME = getAttributes().getMemoryEffects();
   if (auto *Fn = dyn_cast<Function>(getCalledOperand())) {
diff --git a/llvm/lib/IR/IntrinsicInst.cpp b/llvm/lib/IR/IntrinsicInst.cpp
index a1f8533fe8773b..262aebc4e94c65 100644
--- a/llvm/lib/IR/IntrinsicInst.cpp
+++ b/llvm/lib/IR/IntrinsicInst.cpp
@@ -21,6 +21,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "llvm/IR/IntrinsicInst.h"
+#include "llvm/ADT/StringExtras.h"
 #include "llvm/ADT/StringSwitch.h"
 #include "llvm/IR/Constants.h"
 #include "llvm/IR/DebugInfoMetadata.h"
@@ -69,6 +70,7 @@ bool IntrinsicInst::mayLowerToFunctionCall(Intrinsic::ID IID) {
 bool IntrinsicInst::canAccessFPEnvironment(Intrinsic::ID IID) {
   switch (IID) {
 #define FUNCTION(NAME, A, R, I) case Intrinsic::NAME:
+#define LEGACY_FUNCTION(NAME, A, R, I, N) case Intrinsic::NAME:
 #include "llvm/IR/ConstrainedOps.def"
     return true;
   default:
@@ -78,6 +80,8 @@ bool IntrinsicInst::canAccessFPEnvironment(Intrinsic::ID IID) {
 
 std::optional<RoundingMode> llvm::getRoundingModeArg(const CallBase &I) {
   unsigned NumOperands = I.arg_size();
+  if (NumOperands <= 2)
+    return std::nullopt;
   Metadata *MD = nullptr;
   auto *MAV = dyn_cast<MetadataAsValue>(I.getArgOperand(NumOperands - 2));
   if (MAV)
@@ -90,6 +94,8 @@ std::optional<RoundingMode> llvm::getRoundingModeArg(const CallBase &I) {
 std::optional<fp::ExceptionBehavior>
 llvm::getExceptionBehaviorArg(const CallBase &I) {
   unsigned NumOperands = I.arg_size();
+  if (NumOperands <= 1)
+    return std::nullopt;
   Metadata *MD = nullptr;
   auto *MAV = dyn_cast<MetadataAsValue>(I.getArgOperand(NumOperands - 1));
   if (MAV)
@@ -99,6 +105,30 @@ llvm::getExceptionBehaviorArg(const CallBase &I) {
   return convertStrToExceptionBehavior(cast<MDString>(MD)->getString());
 }
 
+bool llvm::hadConstrainedVariant(StringRef Name) {
+  size_t period_pos = Name.find('.');
+  if (period_pos != StringRef::npos)
+    Name = Name.take_front(period_pos);
+#define LEGACY_FUNCTION(NAME, A, R, I, D)                                      \
+  if (Name == #NAME)                                                           \
+    return true;
+#include "llvm/IR/ConstrainedOps.def"
+  return false;
+}
+
+std::pair<Intrinsic::ID, unsigned>
+llvm::getIntrinsicForConstrained(StringRef Name) {
+  size_t period_pos = Name.find('.');
+  if (period_pos != StringRef::npos)
+    Name = Name.take_front(period_pos);
+#define LEGACY_FUNCTION(NAME, A, R, I, D)                                      \
+  if (Name == #NAME)                                                           \
+    return std::make_pair(Intrinsic::NAME, 1 + R);
+#include "llvm/IR/ConstrainedOps.def"
+
+  return std::make_pair(Intrinsic::not_intrinsic, 0);
+}
+
 //===----------------------------------------------------------------------===//
 /// DbgVariableIntrinsic - This is the common base class for debug info
 /// intrinsics for variables.
@@ -364,7 +394,7 @@ unsigned ConstrainedFPIntrinsic::getNonMetadataArgCount() const {
 }
 
 bool ConstrainedFPIntrinsic::classof(const IntrinsicInst *I) {
-  return Intrinsic::isConstrainedFPIntrinsic(I->getIntrinsicID());
+  return Intrinsic::isLegacyConstrainedIntrinsic(I->getIntrinsicID());
 }
 
 ElementCount VPIntrinsic::getStaticVectorLength() const {
diff --git a/llvm/lib/IR/Intrinsics.cpp b/llvm/lib/IR/Intrinsics.cpp
index 3130a0bd2955a5..168b98de2fb66f 100644
--- a/llvm/lib/IR/Intrinsics.cpp
+++ b/llvm/lib/IR/Intrinsics.cpp
@@ -741,7 +741,7 @@ Function *Intrinsic::getDeclarationIfExists(Module *M, ID id,
 #include "llvm/IR/IntrinsicImpl.inc"
 #undef GET_LLVM_INTRINSIC_FOR_MS_BUILTIN
 
-bool Intrinsic::isConstrainedFPIntrinsic(ID QID) {
+bool Intrinsic::isLegacyConstrainedIntrinsic(ID QID) {
   switch (QID) {
 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC)                         \
   case Intrinsic::INTRINSIC:
diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp
index cdc3f0308fe59c..238ce74f697805 100644
--- a/llvm/lib/Transforms/Utils/Local.cpp
+++ b/llvm/lib/Transforms/Utils/Local.cpp
@@ -514,10 +514,9 @@ bool llvm::wouldInstructionBeTriviallyDead(const Instruction *I,
       return false;
     }
 
-    if (auto *FPI = dyn_cast<ConstrainedFPIntrinsic>(I)) {
-      std::optional<fp::ExceptionBehavior> ExBehavior =
-          FPI->getExceptionBehavior();
-      return *ExBehavior != fp::ebStrict;
+    if (auto *Call = dyn_cast<CallBase>(I)) {
+      if (auto EB = Call->getExceptionBehavior())
+        return *EB != fp::ebStrict;
     }
   }
 
diff --git a/llvm/test/Assembler/fp-intrinsics-attr.ll b/llvm/test/Assembler/fp-intrinsics-attr.ll
index 5b9a44710763e4..176c900465c3c9 100644
--- a/llvm/test/Assembler/fp-intrinsics-attr.ll
+++ b/llvm/test/Assembler/fp-intrinsics-attr.ll
@@ -215,9 +215,7 @@ define void @func(double %a, double %b, double %c, i32 %i) strictfp {
                                                double %a,
                                                metadata !"fpexcept.strict")
 
-  %trunc = call double @llvm.experimental.constrained.trunc.f64(
-                                               double %a,
-                                               metadata !"fpexcept.strict")
+  %trunc = call double @llvm.trunc.f64(double %a) strictfp [ "fpe.except"(metadata !"strict") ]
 
   %q1 = call i1 @llvm.experimental.constrained.fcmp.f64(
                                                double %a, double %b,
@@ -368,15 +366,15 @@ declare double @llvm.experimental.constrained.round.f64(double, metadata)
 declare double @llvm.experimental.constrained.roundeven.f64(double, metadata)
 ; CHECK: @llvm.experimental.constrained.roundeven.f64({{.*}}) #[[ATTR1]]
 
-declare double @llvm.experimental.constrained.trunc.f64(double, metadata)
-; CHECK: @llvm.experimental.constrained.trunc.f64({{.*}}) #[[ATTR1]]
-
 declare i1 @llvm.experimental.constrained.fcmp.f64(double, double, metadata, metadata)
 ; CHECK: @llvm.experimental.constrained.fcmp.f64({{.*}}) #[[ATTR1]]
 
 declare i1 @llvm.experimental.constrained.fcmps.f64(double, double, metadata, metadata)
 ; CHECK: @llvm.experimental.constrained.fcmps.f64({{.*}}) #[[ATTR1]]
 
+declare double @llvm.trunc.f64(double)
+; CHECK: declare double @llvm.trunc.f64(double) #[[ATTR2:[0-9]+]]
+
 ; CHECK: attributes #[[ATTR0]] = {{{.*}} strictfp {{.*}}}
 ; CHECK: attributes #[[ATTR1]] = { {{.*}} strictfp {{.*}} }
-
+; CHECK: attributes #[[ATTR2]] = { {{.*}} memory(none) }
diff --git a/llvm/test/Bitcode/auto-upgrade-constrained.ll b/llvm/test/Bitcode/auto-upgrade-constrained.ll
index 8e3f2c4ad77896..b857fa1f4a6860 100644
--- a/llvm/test/Bitcode/auto-upgrade-constrained.ll
+++ b/llvm/test/Bitcode/auto-upgrade-constrained.ll
@@ -322,6 +322,6 @@ define float @test_trunc(float %a) strictfp {
   ret float %res
 }
 ; CHECK-LABEL: define float @test_trunc(
-; CHECK: call float @llvm.experimental.constrained.trunc.f32(float {{.*}}, metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.except"(metadata !"strict") ]
+; CHECK: call float @llvm.trunc.f32(float {{.*}}) #[[ATTR0]] [ "fpe.except"(metadata !"strict") ]
 
 ; CHECK: attributes #[[ATTR0]] = { strictfp memory(inaccessiblemem: readwrite) }
diff --git a/llvm/test/CodeGen/AArch64/fp-intrinsics-fp16.ll b/llvm/test/CodeGen/AArch64/fp-intrinsics-fp16.ll
index d323a7e677b5aa..742d76638409b8 100644
--- a/llvm/test/CodeGen/AArch64/fp-intrinsics-fp16.ll
+++ b/llvm/test/CodeGen/AArch64/fp-intrinsics-fp16.ll
@@ -830,7 +830,7 @@ define half @trunc_f16(half %x) #0 {
 ; CHECK-FP16:       // %bb.0:
 ; CHECK-FP16-NEXT:    frintz h0, h0
 ; CHECK-FP16-NEXT:    ret
-  %val = call half @llvm.experimental.constrained.trunc.f16(half %x, metadata !"fpexcept.strict") #0
+  %val = call half @llvm.trunc.f16(half %x) #0 [ "fpe.except"(metadata !"strict") ]
   ret half %val
 }
 
@@ -1376,7 +1376,6 @@ declare i32 @llvm.experimental.constrained.lround.i32.f16(half, metadata)
 declare i64 @llvm.experimental.constrained.llround.i64.f16(half, metadata)
 declare half @llvm.experimental.constrained.round.f16(half, metadata)
 declare half @llvm.experimental.constrained.roundeven.f16(half, metadata)
-declare half @llvm.experimental.constrained.trunc.f16(half, metadata)
 declare i1 @llvm.experimental.constrained.fcmps.f16(half, half, metadata, metadata)
 declare i1 @llvm.experimental.constrained.fcmp.f16(half, half, metadata, metadata)
 
diff --git a/llvm/test/CodeGen/AArch64/fp-intrinsics-vector.ll b/llvm/test/CodeGen/AArch64/fp-intrinsics-vector.ll
index 83e60c10897624..a922a39ee2da3d 100644
--- a/llvm/test/CodeGen/AArch64/fp-intrinsics-vector.ll
+++ b/llvm/test/CodeGen/AArch64/fp-intrinsics-vector.ll
@@ -300,7 +300,7 @@ define <4 x float> @trunc_v4f32(<4 x float> %x) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    frintz v0.4s, v0.4s
 ; CHECK-NEXT:    ret
-  %val = call <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
+  %val = call <4 x float> @llvm.trunc.v4f32(<4 x float> %x) #0 [ "fpe.except"(metadata !"strict") ]
   ret <4 x float> %val
 }
 
@@ -571,7 +571,7 @@ define <2 x double> @trunc_v2f64(<2 x double> %x) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    frintz v0.2d, v0.2d
 ; CHECK-NEXT:    ret
-  %val = call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
+  %val = call <2 x double> @llvm.trunc.v2f64(<2 x double> %x) #0 [ "fpe.except"(metadata !"strict") ]
   ret <2 x double> %val
 }
 
@@ -829,7 +829,7 @@ define <1 x double> @trunc_v1f64(<1 x double> %x) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    frintz d0, d0
 ; CHECK-NEXT:    ret
-  %val = call <1 x double> @llvm.experimental.constrained.trunc.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
+  %val = call <1 x double> @llvm.trunc.v1f64(<1 x double> %x) #0 [ "fpe.except"(metadata !"strict") ]
   ret <1 x double> %val
 }
 
@@ -901,7 +901,6 @@ declare <4 x float> @llvm.experimental.constrained.ceil.v4f32(<4 x float>, metad
 declare <4 x float> @llvm.experimental.constrained.floor.v4f32(<4 x float>, metadata)
 declare <4 x float> @llvm.experimental.constrained.round.v4f32(<4 x float>, metadata)
 declare <4 x float> @llvm.experimental.constrained.roundeven.v4f32(<4 x float>, metadata)
-declare <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float>, metadata)
 declare <4 x i1> @llvm.experimental.constrained.fcmp.v4f32(<4 x float>, <4 x float>, metadata, metadata)
 declare <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float>, <4 x float>, metadata, metadata)
 
@@ -927,7 +926,6 @@ declare <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double>, met
 declare <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double>, metadata)
 declare <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double>, metadata)
 declare <2 x double> @llvm.experimental.constrained.roundeven.v2f64(<2 x double>, metadata)
-declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata)
 declare <2 x i1> @llvm.experimental.constrained.fcmp.v2f64(<2 x double>, <2 x double>, metadata, metadata)
 declare <2 x i1> @llvm.experimental.constrained.fcmps.v2f64(<2 x double>, <2 x double>, metadata, metadata)
 
@@ -953,7 +951,6 @@ declare <1 x double> @llvm.experimental.constrained.ceil.v1f64(<1 x double>, met
 declare <1 x double> @llvm.experimental.constrained.floor.v1f64(<1 x double>, metadata)
 declare <1 x double> @llvm.experimental.constrained.round.v1f64(<1 x double>, metadata)
 declare <1 x double> @llvm.experimental.constrained.roundeven.v1f64(<1 x double>, metadata)
-declare <1 x double> @llvm.experimental.constrained.trunc.v1f64(<1 x double>, metadata)
 declare <1 x i1> @llvm.experimental.constrained.fcmp.v1f64(<1 x double>, <1 x double>, metadata, metadata)
 declare <1 x i1> @llvm.experimental.constrained.fcmps.v1f64(<1 x double>, <1 x double>, metadata, metadata)
 
diff --git a/llvm/test/CodeGen/AArch64/fp-intrinsics.ll b/llvm/test/CodeGen/AArch64/fp-intrinsics.ll
index f2a14a9b73fa16..539ac690719846 100644
--- a/llvm/test/CodeGen/AArch64/fp-intrinsics.ll
+++ b/llvm/test/CodeGen/AArch64/fp-intrinsics.ll
@@ -765,7 +765,7 @@ define float @trunc_f32(float %x) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    frintz s0, s0
 ; CHECK-NEXT:    ret
-  %val = call float @llvm.experimental.constrained.trunc.f32(float %x, metadata !"fpexcept.strict") #0
+  %val = call float @llvm.trunc.f32(float %x) #0 [ "fpe.except"(metadata !"strict") ]
   ret float %val
 }
 
@@ -1559,7 +1559,7 @@ define double @trunc_f64(double %x) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    frintz d0, d0
 ; CHECK-NEXT:    ret
-  %val = call double @llvm.experimental.constrained.trunc.f64(double %x, metadata !"fpexcept.strict") #0
+  %val = call double @llvm.trunc.f64(double %x) #0 [ "fpe.except"(metadata !"strict") ]
   ret double %val
 }
 
@@ -2428,7 +2428,7 @@ define fp128 @trunc_f128(fp128 %x) #0 {
 ; CHECK-NEXT:    bl truncl
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
-  %val = call fp128 @llvm.experimental.constrained.trunc.f128(fp128 %x, metadata !"fpexcept.strict") #0
+  %val = call fp128 @llvm.trunc.f128(fp128 %x) #0 [ "fpe.except"(metadata !"strict") ]
   ret fp128 %val
 }
 
@@ -3179,7 +3179,6 @@ declare i32 @llvm.experimental.constrained.lround.i32.f32(float, metadata)
 declare i64 @llvm.experimental.constrained.llround.i64.f32(float, metadata)
 declare float @llvm.experimental.constrained.round.f32(float, metadata)
 declare float @llvm.experimental.constrained.roundeven.f32(float, metadata)
-declare float @llvm.experimental.constrained.trunc.f32(float, metadata)
 declare i1 @llvm.experimental.constrained.fcmps.f32(float, float, metadata, metadata)
 declare i1 @llvm.experimental.constrained.fcmp.f32(float, float, metadata, metadata)
 
@@ -3231,7 +3230,6 @@ declare i32 @llvm.experimental.constrained.lround.i32.f64(double, metadata)
 declare i64 @llvm.experimental.constrained.llround.i64.f64(double, metadata)
 declare double @llvm.experimental.constrained.round.f64(double, metadata)
 declare double @llvm.experimental.constrained.roundeven.f64(double, metadata)
-declare double @llvm.experimental.constrained.trunc.f64(double, metadata)
 declare i1 @llvm.experimental.constrained.fcmps.f64(double, double, metadata, metadata)
 declare i1 @llvm.experimental.constrained.fcmp.f64(double, double, metadata, metadata)
 
@@ -3280,7 +3278,6 @@ declare fp128 @llvm.experimental.constrained.floor.f128(fp128, metadata)
 declare i32 @llvm.experimental.constrained.lround.i32.f128(fp128, metadata)
 declare i64 @llvm.experimental.constrained.llround.i64.f128(fp128, metadata)
 declare fp128 @llvm.experimental.constrained.round.f128(fp128, metadata)
-declare fp128 @llvm.experimental.constrained.trunc.f128(fp128, metadata)
 declare i1 @llvm.experimental.constrained.fcmps.f128(fp128, fp128, metadata, metadata)
 declare i1 @llvm.experimental.constrained.fcmp.f128(fp128, fp128, metadata, metadata)
 
diff --git a/llvm/test/CodeGen/ARM/fp-intrinsics.ll b/llvm/test/CodeGen/ARM/fp-intrinsics.ll
index 93b6a58a22b6ce..797ad8d3734eb0 100644
--- a/llvm/test/CodeGen/ARM/fp-intrinsics.ll
+++ b/llvm/test/CodeGen/ARM/fp-intrinsics.ll
@@ -291,7 +291,7 @@ define float @round_f32(float %x) #0 {
 ; CHECK-SP-NOV8: bl truncf
 ; CHECK-SP-V8: vrintz.f32
 define float @trunc_f32(float %x) #0 {
-  %val = call float @llvm.experimental.constrained.trunc.f32(float %x, metadata !"fpexcept.strict") #0
+  %val = call float @llvm.trunc.f32(float %x) #0 [ "fpe.except"(metadata !"strict") ]
   ret float %val
 }
 
@@ -762,7 +762,7 @@ define double @round_f64(double %x) #0 {
 ; CHECK-DP-NOV8: bl trunc
 ; CHECK-DP-V8: vrintz.f64
 define double @trunc_f64(double %x) #0 {
-  %val = call double @llvm.experimental.constrained.trunc.f64(double %x, metadata !"fpexcept.strict") #0
+  %val = call double @llvm.trunc.f64(double %x) #0 [ "fpe.except"(metadata !"strict") ]
   ret double %val
 }
 
diff --git a/llvm/test/CodeGen/PowerPC/fp-strict-round.ll b/llvm/test/CodeGen/PowerPC/fp-strict-round.ll
index eac4fb6f98bf7d..379e2d7e9df9be 100644
--- a/llvm/test/CodeGen/PowerPC/fp-strict-round.ll
+++ b/llvm/test/CodeGen/PowerPC/fp-strict-round.ll
@@ -34,11 +34,6 @@ declare double @llvm.experimental.constrained.round.f64(double, metadata)
 declare <4 x float> @llvm.experimental.constrained.round.v4f32(<4 x float>, metadata)
 declare <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double>, metadata)
 
-declare float @llvm.experimental.constrained.trunc.f32(float, metadata)
-declare double @llvm.experimental.constrained.trunc.f64(double, metadata)
-declare <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float>, metadata)
-declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata)
-
 define float @ceil_f32(float %f1) strictfp {
 ; P8-LABEL: ceil_f32:
 ; P8:       # %bb.0:
@@ -567,9 +562,7 @@ define float @trunc_f32(float %f1) strictfp {
 ; P9:       # %bb.0:
 ; P9-NEXT:    xsrdpiz f1, f1
 ; P9-NEXT:    blr
-  %res = call float @llvm.experimental.constrained.trunc.f32(
-                        float %f1,
-                        metadata !"fpexcept.strict")
+  %res = call float @llvm.trunc.f32(float %f1) [ "fpe.except"(metadata !"strict") ]
   ret float %res
 }
 
@@ -583,9 +576,7 @@ define double @trunc_f64(double %f1) strictfp {
 ; P9:       # %bb.0:
 ; P9-NEXT:    xsrdpiz f1, f1
 ; P9-NEXT:    blr
-  %res = call double @llvm.experimental.constrained.trunc.f64(
-                        double %f1,
-                        metadata !"fpexcept.strict")
+  %res = call double @llvm.trunc.f64(double %f1) [ "fpe.except"(metadata !"strict") ] 
   ret double %res
 }
 
@@ -599,9 +590,7 @@ define <4 x float> @trunc_v4f32(<4 x float> %vf1) strictfp {
 ; P9:       # %bb.0:
 ; P9-NEXT:    xvrspiz v2, v2
 ; P9-NEXT:    blr
-  %res = call <4 x float> @llvm.experimental.constrained.trunc.v4f32(
-                        <4 x float> %vf1,
-                        metadata !"fpexcept.strict")
+  %res = call <4 x float> @llvm.trunc.v4f32(<4 x float> %vf1) [ "fpe.except"(metadata !"strict") ]
   ret <4 x float> %res
 }
 
@@ -615,8 +604,6 @@ define <2 x double> @trunc_v2f64(<2 x double> %vf1) strictfp {
 ; P9:       # %bb.0:
 ; P9-NEXT:    xvrdpiz v2, v2
 ; P9-NEXT:    blr
-  %res = call <2 x double> @llvm.experimental.constrained.trunc.v2f64(
-                        <2 x double> %vf1,
-                        metadata !"fpexcept.strict")
+  %res = call <2 x double> @llvm.trunc.v2f64(<2 x double> %vf1) [ "fpe.except"(metadata !"strict") ]
   ret <2 x double> %res
 }
diff --git a/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll
index c1ee436a40c557..55f26d099d59f0 100644
--- a/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll
+++ b/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll
@@ -1061,9 +1061,7 @@ define ppc_fp128 @test_trunc_ppc_fp128(ppc_fp128 %first) #0 {
 ; PC64-NEXT:    mtlr 0
 ; PC64-NEXT:    blr
 entry:
-  %trunc = call ppc_fp128 @llvm.experimental.constrained.trunc.ppcf128(
-                    ppc_fp128 %first,
-                    metadata !"fpexcept.strict") #1
+  %trunc = call ppc_fp128 @llvm.trunc.ppcf128(ppc_fp128 %first) #1 [ "fpe.except"(metadata !"strict") ]
   ret ppc_fp128 %trunc
 }
 
@@ -2187,7 +2185,6 @@ declare ppc_fp128 @llvm.experimental.constrained.sqrt.ppcf128(ppc_fp128, metadat
 declare ppc_fp128 @llvm.experimental.constrained.fsub.ppcf128(ppc_fp128, ppc_fp128, metadata, metadata)
 declare ppc_fp128 @llvm.experimental.constrained.tan.ppcf128(ppc_fp128, metadata, metadata)
 declare ppc_fp128 @llvm.experimental.constrained.atan2.ppcf128(ppc_fp128, ppc_fp128, metadata, metadata)
-declare ppc_fp128 @llvm.experimental.constrained.trunc.ppcf128(ppc_fp128, metadata)
 declare i64 @llvm.experimental.constrained.fptosi.i64.ppcf128(ppc_fp128, metadata)
 declare i32 @llvm.experimental.constrained.fptosi.i32.ppcf128(ppc_fp128, metadata)
 declare i1 @llvm.experimental.constrained.fptosi.i1.ppcf128(ppc_fp128, metadata)
diff --git a/llvm/test/CodeGen/PowerPC/vector-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/PowerPC/vector-constrained-fp-intrinsics.ll
index 71c3069a406fe3..f18512347c98cc 100644
--- a/llvm/test/CodeGen/PowerPC/vector-constrained-fp-intrinsics.ll
+++ b/llvm/test/CodeGen/PowerPC/vector-constrained-fp-intrinsics.ll
@@ -6767,9 +6767,7 @@ define <1 x float> @constrained_vector_trunc_v1f32(<1 x float> %x) #0 {
 ; PC64LE9-NEXT:    xsrdpiz 1, 1
 ; PC64LE9-NEXT:    blr
 entry:
-  %trunc = call <1 x float> @llvm.experimental.constrained.trunc.v1f32(
-                               <1 x float> %x,
-                               metadata !"fpexcept.strict") #1
+  %trunc = call <1 x float> @llvm.trunc.v1f32(<1 x float> %x) #1 [ "fpe.except"(metadata !"strict") ]
   ret <1 x float> %trunc
 }
 
@@ -6784,9 +6782,7 @@ define <2 x double> @constrained_vector_trunc_v2f64(<2 x double> %x) #0 {
 ; PC64LE9-NEXT:    xvrdpiz 34, 34
 ; PC64LE9-NEXT:    blr
 entry:
-  %trunc = call <2 x double> @llvm.experimental.constrained.trunc.v2f64(
-                                <2 x double> %x,
-                                metadata !"fpexcept.strict") #1
+  %trunc = call <2 x double> @llvm.trunc.v2f64(<2 x double> %x) #1 [ "fpe.except"(metadata !"strict") ]
   ret <2 x double> %trunc
 }
 
@@ -6834,9 +6830,7 @@ define <3 x float> @constrained_vector_trunc_v3f32(<3 x float> %x) #0 {
 ; PC64LE9-NEXT:    xxperm 34, 35, 1
 ; PC64LE9-NEXT:    blr
 entry:
-  %trunc = call <3 x float> @llvm.experimental.constrained.trunc.v3f32(
-                              <3 x float> %x,
-                              metadata !"fpexcept.strict") #1
+  %trunc = call <3 x float> @llvm.trunc.v3f32(<3 x float> %x) #1 [ "fpe.except"(metadata !"strict") ]
   ret <3 x float> %trunc
 }
 
@@ -6857,9 +6851,7 @@ define <3 x double> @constrained_vector_trunc_v3f64(<3 x double> %x) #0 {
 ; PC64LE9-NEXT:    xxswapd 1, 2
 ; PC64LE9-NEXT:    blr
 entry:
-  %trunc = call <3 x double> @llvm.experimental.constrained.trunc.v3f64(
-                          <3 x double> %x,
-                          metadata !"fpexcept.strict") #1
+  %trunc = call <3 x double> @llvm.trunc.v3f64(<3 x double> %x) #1 [ "fpe.except"(metadata !"strict") ]
   ret <3 x double> %trunc
 }
 
@@ -8785,7 +8777,6 @@ declare <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f32(<2 x float
 declare <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double>, metadata)
 declare <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double>, metadata)
 declare <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double>, metadata)
-declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata)
 declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i16(<2 x i16>, metadata, metadata)
 declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i16(<2 x i16>, metadata, metadata)
 declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i32(<2 x i32>, metadata, metadata)
@@ -8832,7 +8823,6 @@ declare <1 x double> @llvm.experimental.constrained.fpext.v1f64.v1f32(<1 x float
 declare <1 x float> @llvm.experimental.constrained.ceil.v1f32(<1 x float>, metadata)
 declare <1 x float> @llvm.experimental.constrained.floor.v1f32(<1 x float>, metadata)
 declare <1 x float> @llvm.experimental.constrained.round.v1f32(<1 x float>, metadata)
-declare <1 x float> @llvm.experimental.constrained.trunc.v1f32(<1 x float>, metadata)
 declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i32(<1 x i32>, metadata, metadata)
 declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i32(<1 x i32>, metadata, metadata)
 declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i64(<1 x i64>, metadata, metadata)
@@ -8901,8 +8891,6 @@ declare <3 x float> @llvm.experimental.constrained.floor.v3f32(<3 x float>, meta
 declare <3 x double> @llvm.experimental.constrained.floor.v3f64(<3 x double>, metadata)
 declare <3 x float> @llvm.experimental.constrained.round.v3f32(<3 x float>, metadata)
 declare <3 x double> @llvm.experimental.constrained.round.v3f64(<3 x double>, metadata)
-declare <3 x float> @llvm.experimental.constrained.trunc.v3f32(<3 x float>, metadata)
-declare <3 x double> @llvm.experimental.constrained.trunc.v3f64(<3 x double>, metadata)
 declare <3 x double> @llvm.experimental.constrained.sitofp.v3f64.v3i32(<3 x i32>, metadata, metadata)
 declare <3 x float> @llvm.experimental.constrained.sitofp.v3f32.v3i32(<3 x i32>, metadata, metadata)
 declare <3 x double> @llvm.experimental.constrained.sitofp.v3f64.v3i64(<3 x i64>, metadata, metadata)
@@ -8947,7 +8935,6 @@ declare <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f32(<4 x float
 declare <4 x double> @llvm.experimental.constrained.ceil.v4f64(<4 x double>, metadata)
 declare <4 x double> @llvm.experimental.constrained.floor.v4f64(<4 x double>, metadata)
 declare <4 x double> @llvm.experimental.constrained.round.v4f64(<4 x double>, metadata)
-declare <4 x double> @llvm.experimental.constrained.trunc.v4f64(<4 x double>, metadata)
 declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i16(<4 x i16>, metadata, metadata)
 declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i32(<4 x i32>, metadata, metadata)
 declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i32(<4 x i32>, metadata, metadata)
diff --git a/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll
index fddb86de58f518..6da0e5c482ed57 100644
--- a/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll
+++ b/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll
@@ -1552,8 +1552,6 @@ define double @ceil_f64(double %a) nounwind strictfp {
   ret double %1
 }
 
-declare double @llvm.experimental.constrained.trunc.f64(double, metadata)
-
 define double @trunc_f64(double %a) nounwind strictfp {
 ; RV32IFD-LABEL: trunc_f64:
 ; RV32IFD:       # %bb.0:
@@ -1608,7 +1606,7 @@ define double @trunc_f64(double %a) nounwind strictfp {
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-  %1 = call double @llvm.experimental.constrained.trunc.f64(double %a, metadata !"fpexcept.strict") strictfp
+  %1 = call double @llvm.trunc.f64(double %a) strictfp [ "fpe.except"(metadata !"strict") ]
   ret double %1
 }
 
diff --git a/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll
index 8b883f781c9d9b..63f84b55233984 100644
--- a/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll
+++ b/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll
@@ -1517,8 +1517,6 @@ define float @ceil_f32(float %a) nounwind strictfp {
   ret float %1
 }
 
-declare float @llvm.experimental.constrained.trunc.f32(float, metadata)
-
 define float @trunc_f32(float %a) nounwind strictfp {
 ; RV32IF-LABEL: trunc_f32:
 ; RV32IF:       # %bb.0:
@@ -1573,7 +1571,7 @@ define float @trunc_f32(float %a) nounwind strictfp {
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-  %1 = call float @llvm.experimental.constrained.trunc.f32(float %a, metadata !"fpexcept.strict") strictfp
+  %1 = call float @llvm.trunc.f32(float %a) strictfp [ "fpe.except"(metadata !"strict") ]
   ret float %1
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ftrunc-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ftrunc-constrained-sdnode.ll
index 2173887e854178..a5641d47e51fea 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ftrunc-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ftrunc-constrained-sdnode.ll
@@ -20,10 +20,9 @@ define <1 x half> @trunc_v1f16(<1 x half> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <1 x half> @llvm.experimental.constrained.trunc.v1f16(<1 x half> %x, metadata !"fpexcept.strict")
+  %a = call <1 x half> @llvm.trunc.v1f16(<1 x half> %x) [ "fpe.except"(metadata !"strict") ]
   ret <1 x half> %a
 }
-declare <1 x half> @llvm.experimental.constrained.trunc.v1f16(<1 x half>, metadata)
 
 define <2 x half> @trunc_v2f16(<2 x half> %x) strictfp {
 ; CHECK-LABEL: trunc_v2f16:
@@ -41,10 +40,9 @@ define <2 x half> @trunc_v2f16(<2 x half> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <2 x half> @llvm.experimental.constrained.trunc.v2f16(<2 x half> %x, metadata !"fpexcept.strict")
+  %a = call <2 x half> @llvm.trunc.v2f16(<2 x half> %x) [ "fpe.except"(metadata !"strict") ]
   ret <2 x half> %a
 }
-declare <2 x half> @llvm.experimental.constrained.trunc.v2f16(<2 x half>, metadata)
 
 define <4 x half> @trunc_v4f16(<4 x half> %x) strictfp {
 ; CHECK-LABEL: trunc_v4f16:
@@ -62,10 +60,9 @@ define <4 x half> @trunc_v4f16(<4 x half> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <4 x half> @llvm.experimental.constrained.trunc.v4f16(<4 x half> %x, metadata !"fpexcept.strict")
+  %a = call <4 x half> @llvm.trunc.v4f16(<4 x half> %x) [ "fpe.except"(metadata !"strict") ]
   ret <4 x half> %a
 }
-declare <4 x half> @llvm.experimental.constrained.trunc.v4f16(<4 x half>, metadata)
 
 define <8 x half> @trunc_v8f16(<8 x half> %x) strictfp {
 ; CHECK-LABEL: trunc_v8f16:
@@ -83,10 +80,9 @@ define <8 x half> @trunc_v8f16(<8 x half> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <8 x half> @llvm.experimental.constrained.trunc.v8f16(<8 x half> %x, metadata !"fpexcept.strict")
+  %a = call <8 x half> @llvm.trunc.v8f16(<8 x half> %x) [ "fpe.except"(metadata !"strict") ]
   ret <8 x half> %a
 }
-declare <8 x half> @llvm.experimental.constrained.trunc.v8f16(<8 x half>, metadata)
 
 define <16 x half> @trunc_v16f16(<16 x half> %x) strictfp {
 ; CHECK-LABEL: trunc_v16f16:
@@ -104,10 +100,9 @@ define <16 x half> @trunc_v16f16(<16 x half> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <16 x half> @llvm.experimental.constrained.trunc.v16f16(<16 x half> %x, metadata !"fpexcept.strict")
+  %a = call <16 x half> @llvm.trunc.v16f16(<16 x half> %x) [ "fpe.except"(metadata !"strict") ]
   ret <16 x half> %a
 }
-declare <16 x half> @llvm.experimental.constrained.trunc.v16f16(<16 x half>, metadata)
 
 define <32 x half> @trunc_v32f16(<32 x half> %x) strictfp {
 ; CHECK-LABEL: trunc_v32f16:
@@ -126,10 +121,9 @@ define <32 x half> @trunc_v32f16(<32 x half> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <32 x half> @llvm.experimental.constrained.trunc.v32f16(<32 x half> %x, metadata !"fpexcept.strict")
+  %a = call <32 x half> @llvm.trunc.v32f16(<32 x half> %x) [ "fpe.except"(metadata !"strict") ]
   ret <32 x half> %a
 }
-declare <32 x half> @llvm.experimental.constrained.trunc.v32f16(<32 x half>, metadata)
 
 define <1 x float> @trunc_v1f32(<1 x float> %x) strictfp {
 ; CHECK-LABEL: trunc_v1f32:
@@ -147,10 +141,9 @@ define <1 x float> @trunc_v1f32(<1 x float> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <1 x float> @llvm.experimental.constrained.trunc.v1f32(<1 x float> %x, metadata !"fpexcept.strict")
+  %a = call <1 x float> @llvm.trunc.v1f32(<1 x float> %x) [ "fpe.except"(metadata !"strict") ]
   ret <1 x float> %a
 }
-declare <1 x float> @llvm.experimental.constrained.trunc.v1f32(<1 x float>, metadata)
 
 define <2 x float> @trunc_v2f32(<2 x float> %x) strictfp {
 ; CHECK-LABEL: trunc_v2f32:
@@ -168,10 +161,9 @@ define <2 x float> @trunc_v2f32(<2 x float> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <2 x float> @llvm.experimental.constrained.trunc.v2f32(<2 x float> %x, metadata !"fpexcept.strict")
+  %a = call <2 x float> @llvm.trunc.v2f32(<2 x float> %x) [ "fpe.except"(metadata !"strict") ]
   ret <2 x float> %a
 }
-declare <2 x float> @llvm.experimental.constrained.trunc.v2f32(<2 x float>, metadata)
 
 define <4 x float> @trunc_v4f32(<4 x float> %x) strictfp {
 ; CHECK-LABEL: trunc_v4f32:
@@ -189,10 +181,9 @@ define <4 x float> @trunc_v4f32(<4 x float> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float> %x, metadata !"fpexcept.strict")
+  %a = call <4 x float> @llvm.trunc.v4f32(<4 x float> %x) [ "fpe.except"(metadata !"strict") ]
   ret <4 x float> %a
 }
-declare <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float>, metadata)
 
 define <8 x float> @trunc_v8f32(<8 x float> %x) strictfp {
 ; CHECK-LABEL: trunc_v8f32:
@@ -210,10 +201,9 @@ define <8 x float> @trunc_v8f32(<8 x float> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <8 x float> @llvm.experimental.constrained.trunc.v8f32(<8 x float> %x, metadata !"fpexcept.strict")
+  %a = call <8 x float> @llvm.trunc.v8f32(<8 x float> %x) [ "fpe.except"(metadata !"strict") ]
   ret <8 x float> %a
 }
-declare <8 x float> @llvm.experimental.constrained.trunc.v8f32(<8 x float>, metadata)
 
 define <16 x float> @trunc_v16f32(<16 x float> %x) strictfp {
 ; CHECK-LABEL: trunc_v16f32:
@@ -231,10 +221,9 @@ define <16 x float> @trunc_v16f32(<16 x float> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <16 x float> @llvm.experimental.constrained.trunc.v16f32(<16 x float> %x, metadata !"fpexcept.strict")
+  %a = call <16 x float> @llvm.trunc.v16f32(<16 x float> %x) [ "fpe.except"(metadata !"strict") ]
   ret <16 x float> %a
 }
-declare <16 x float> @llvm.experimental.constrained.trunc.v16f32(<16 x float>, metadata)
 
 define <1 x double> @trunc_v1f64(<1 x double> %x) strictfp {
 ; CHECK-LABEL: trunc_v1f64:
@@ -252,10 +241,9 @@ define <1 x double> @trunc_v1f64(<1 x double> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <1 x double> @llvm.experimental.constrained.trunc.v1f64(<1 x double> %x, metadata !"fpexcept.strict")
+  %a = call <1 x double> @llvm.trunc.v1f64(<1 x double> %x) [ "fpe.except"(metadata !"strict") ]
   ret <1 x double> %a
 }
-declare <1 x double> @llvm.experimental.constrained.trunc.v1f64(<1 x double>, metadata)
 
 define <2 x double> @trunc_v2f64(<2 x double> %x) strictfp {
 ; CHECK-LABEL: trunc_v2f64:
@@ -273,10 +261,9 @@ define <2 x double> @trunc_v2f64(<2 x double> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %x, metadata !"fpexcept.strict")
+  %a = call <2 x double> @llvm.trunc.v2f64(<2 x double> %x) [ "fpe.except"(metadata !"strict") ]
   ret <2 x double> %a
 }
-declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata)
 
 define <4 x double> @trunc_v4f64(<4 x double> %x) strictfp {
 ; CHECK-LABEL: trunc_v4f64:
@@ -294,10 +281,9 @@ define <4 x double> @trunc_v4f64(<4 x double> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <4 x double> @llvm.experimental.constrained.trunc.v4f64(<4 x double> %x, metadata !"fpexcept.strict")
+  %a = call <4 x double> @llvm.trunc.v4f64(<4 x double> %x) [ "fpe.except"(metadata !"strict") ]
   ret <4 x double> %a
 }
-declare <4 x double> @llvm.experimental.constrained.trunc.v4f64(<4 x double>, metadata)
 
 define <8 x double> @trunc_v8f64(<8 x double> %x) strictfp {
 ; CHECK-LABEL: trunc_v8f64:
@@ -315,7 +301,6 @@ define <8 x double> @trunc_v8f64(<8 x double> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <8 x double> @llvm.experimental.constrained.trunc.v8f64(<8 x double> %x, metadata !"fpexcept.strict")
+  %a = call <8 x double> @llvm.trunc.v8f64(<8 x double> %x) [ "fpe.except"(metadata !"strict") ]
   ret <8 x double> %a
 }
-declare <8 x double> @llvm.experimental.constrained.trunc.v8f64(<8 x double>, metadata)
diff --git a/llvm/test/CodeGen/RISCV/rvv/ftrunc-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ftrunc-constrained-sdnode.ll
index 8a5f118d8f6acc..d1ace747e043e3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ftrunc-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ftrunc-constrained-sdnode.ll
@@ -20,10 +20,9 @@ define <vscale x 1 x half> @trunc_nxv1f16(<vscale x 1 x half> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 1 x half> @llvm.experimental.constrained.trunc.nxv1f16(<vscale x 1 x half> %x, metadata !"fpexcept.strict")
+  %a = call <vscale x 1 x half> @llvm.trunc.nxv1f16(<vscale x 1 x half> %x) [ "fpe.except"(metadata !"strict") ]
   ret <vscale x 1 x half> %a
 }
-declare <vscale x 1 x half> @llvm.experimental.constrained.trunc.nxv1f16(<vscale x 1 x half>, metadata)
 
 define <vscale x 2 x half> @trunc_nxv2f16(<vscale x 2 x half> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv2f16:
@@ -41,10 +40,9 @@ define <vscale x 2 x half> @trunc_nxv2f16(<vscale x 2 x half> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x half> @llvm.experimental.constrained.trunc.nxv2f16(<vscale x 2 x half> %x, metadata !"fpexcept.strict")
+  %a = call <vscale x 2 x half> @llvm.trunc.nxv2f16(<vscale x 2 x half> %x) [ "fpe.except"(metadata !"strict") ]
   ret <vscale x 2 x half> %a
 }
-declare <vscale x 2 x half> @llvm.experimental.constrained.trunc.nxv2f16(<vscale x 2 x half>, metadata)
 
 define <vscale x 4 x half> @trunc_nxv4f16(<vscale x 4 x half> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv4f16:
@@ -62,10 +60,9 @@ define <vscale x 4 x half> @trunc_nxv4f16(<vscale x 4 x half> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x half> @llvm.experimental.constrained.trunc.nxv4f16(<vscale x 4 x half> %x, metadata !"fpexcept.strict")
+  %a = call <vscale x 4 x half> @llvm.trunc.nxv4f16(<vscale x 4 x half> %x) [ "fpe.except"(metadata !"strict") ]
   ret <vscale x 4 x half> %a
 }
-declare <vscale x 4 x half> @llvm.experimental.constrained.trunc.nxv4f16(<vscale x 4 x half>, metadata)
 
 define <vscale x 8 x half> @trunc_nxv8f16(<vscale x 8 x half> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv8f16:
@@ -83,10 +80,9 @@ define <vscale x 8 x half> @trunc_nxv8f16(<vscale x 8 x half> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x half> @llvm.experimental.constrained.trunc.nxv8f16(<vscale x 8 x half> %x, metadata !"fpexcept.strict")
+  %a = call <vscale x 8 x half> @llvm.trunc.nxv8f16(<vscale x 8 x half> %x) [ "fpe.except"(metadata !"strict") ]
   ret <vscale x 8 x half> %a
 }
-declare <vscale x 8 x half> @llvm.experimental.constrained.trunc.nxv8f16(<vscale x 8 x half>, metadata)
 
 define <vscale x 16 x half> @trunc_nxv16f16(<vscale x 16 x half> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv16f16:
@@ -104,10 +100,9 @@ define <vscale x 16 x half> @trunc_nxv16f16(<vscale x 16 x half> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 16 x half> @llvm.experimental.constrained.trunc.nxv16f16(<vscale x 16 x half> %x, metadata !"fpexcept.strict")
+  %a = call <vscale x 16 x half> @llvm.trunc.nxv16f16(<vscale x 16 x half> %x) [ "fpe.except"(metadata !"strict") ]
   ret <vscale x 16 x half> %a
 }
-declare <vscale x 16 x half> @llvm.experimental.constrained.trunc.nxv16f16(<vscale x 16 x half>, metadata)
 
 define <vscale x 32 x half> @trunc_nxv32f16(<vscale x 32 x half> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv32f16:
@@ -125,10 +120,9 @@ define <vscale x 32 x half> @trunc_nxv32f16(<vscale x 32 x half> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 32 x half> @llvm.experimental.constrained.trunc.nxv32f16(<vscale x 32 x half> %x, metadata !"fpexcept.strict")
+  %a = call <vscale x 32 x half> @llvm.trunc.nxv32f16(<vscale x 32 x half> %x) [ "fpe.except"(metadata !"strict") ]
   ret <vscale x 32 x half> %a
 }
-declare <vscale x 32 x half> @llvm.experimental.constrained.trunc.nxv32f16(<vscale x 32 x half>, metadata)
 
 define <vscale x 1 x float> @trunc_nxv1f32(<vscale x 1 x float> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv1f32:
@@ -146,10 +140,9 @@ define <vscale x 1 x float> @trunc_nxv1f32(<vscale x 1 x float> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 1 x float> @llvm.experimental.constrained.trunc.nxv1f32(<vscale x 1 x float> %x, metadata !"fpexcept.strict")
+  %a = call <vscale x 1 x float> @llvm.trunc.nxv1f32(<vscale x 1 x float> %x) [ "fpe.except"(metadata !"strict") ]
   ret <vscale x 1 x float> %a
 }
-declare <vscale x 1 x float> @llvm.experimental.constrained.trunc.nxv1f32(<vscale x 1 x float>, metadata)
 
 define <vscale x 2 x float> @trunc_nxv2f32(<vscale x 2 x float> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv2f32:
@@ -167,10 +160,9 @@ define <vscale x 2 x float> @trunc_nxv2f32(<vscale x 2 x float> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x float> @llvm.experimental.constrained.trunc.nxv2f32(<vscale x 2 x float> %x, metadata !"fpexcept.strict")
+  %a = call <vscale x 2 x float> @llvm.trunc.nxv2f32(<vscale x 2 x float> %x) [ "fpe.except"(metadata !"strict") ]
   ret <vscale x 2 x float> %a
 }
-declare <vscale x 2 x float> @llvm.experimental.constrained.trunc.nxv2f32(<vscale x 2 x float>, metadata)
 
 define <vscale x 4 x float> @trunc_nxv4f32(<vscale x 4 x float> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv4f32:
@@ -188,10 +180,9 @@ define <vscale x 4 x float> @trunc_nxv4f32(<vscale x 4 x float> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x float> @llvm.experimental.constrained.trunc.nxv4f32(<vscale x 4 x float> %x, metadata !"fpexcept.strict")
+  %a = call <vscale x 4 x float> @llvm.trunc.nxv4f32(<vscale x 4 x float> %x) [ "fpe.except"(metadata !"strict") ]
   ret <vscale x 4 x float> %a
 }
-declare <vscale x 4 x float> @llvm.experimental.constrained.trunc.nxv4f32(<vscale x 4 x float>, metadata)
 
 define <vscale x 8 x float> @trunc_nxv8f32(<vscale x 8 x float> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv8f32:
@@ -209,10 +200,9 @@ define <vscale x 8 x float> @trunc_nxv8f32(<vscale x 8 x float> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x float> @llvm.experimental.constrained.trunc.nxv8f32(<vscale x 8 x float> %x, metadata !"fpexcept.strict")
+  %a = call <vscale x 8 x float> @llvm.trunc.nxv8f32(<vscale x 8 x float> %x) [ "fpe.except"(metadata !"strict") ]
   ret <vscale x 8 x float> %a
 }
-declare <vscale x 8 x float> @llvm.experimental.constrained.trunc.nxv8f32(<vscale x 8 x float>, metadata)
 
 define <vscale x 16 x float> @trunc_nxv16f32(<vscale x 16 x float> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv16f32:
@@ -230,10 +220,9 @@ define <vscale x 16 x float> @trunc_nxv16f32(<vscale x 16 x float> %x) strictfp
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 16 x float> @llvm.experimental.constrained.trunc.nxv16f32(<vscale x 16 x float> %x, metadata !"fpexcept.strict")
+  %a = call <vscale x 16 x float> @llvm.trunc.nxv16f32(<vscale x 16 x float> %x) [ "fpe.except"(metadata !"strict") ]
   ret <vscale x 16 x float> %a
 }
-declare <vscale x 16 x float> @llvm.experimental.constrained.trunc.nxv16f32(<vscale x 16 x float>, metadata)
 
 define <vscale x 1 x double> @trunc_nxv1f64(<vscale x 1 x double> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv1f64:
@@ -251,10 +240,9 @@ define <vscale x 1 x double> @trunc_nxv1f64(<vscale x 1 x double> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 1 x double> @llvm.experimental.constrained.trunc.nxv1f64(<vscale x 1 x double> %x, metadata !"fpexcept.strict")
+  %a = call <vscale x 1 x double> @llvm.trunc.nxv1f64(<vscale x 1 x double> %x) [ "fpe.except"(metadata !"strict") ]
   ret <vscale x 1 x double> %a
 }
-declare <vscale x 1 x double> @llvm.experimental.constrained.trunc.nxv1f64(<vscale x 1 x double>, metadata)
 
 define <vscale x 2 x double> @trunc_nxv2f64(<vscale x 2 x double> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv2f64:
@@ -272,10 +260,9 @@ define <vscale x 2 x double> @trunc_nxv2f64(<vscale x 2 x double> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x double> @llvm.experimental.constrained.trunc.nxv2f64(<vscale x 2 x double> %x, metadata !"fpexcept.strict")
+  %a = call <vscale x 2 x double> @llvm.trunc.nxv2f64(<vscale x 2 x double> %x) [ "fpe.except"(metadata !"strict") ]
   ret <vscale x 2 x double> %a
 }
-declare <vscale x 2 x double> @llvm.experimental.constrained.trunc.nxv2f64(<vscale x 2 x double>, metadata)
 
 define <vscale x 4 x double> @trunc_nxv4f64(<vscale x 4 x double> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv4f64:
@@ -293,10 +280,9 @@ define <vscale x 4 x double> @trunc_nxv4f64(<vscale x 4 x double> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x double> @llvm.experimental.constrained.trunc.nxv4f64(<vscale x 4 x double> %x, metadata !"fpexcept.strict")
+  %a = call <vscale x 4 x double> @llvm.trunc.nxv4f64(<vscale x 4 x double> %x) [ "fpe.except"(metadata !"strict") ]
   ret <vscale x 4 x double> %a
 }
-declare <vscale x 4 x double> @llvm.experimental.constrained.trunc.nxv4f64(<vscale x 4 x double>, metadata)
 
 define <vscale x 8 x double> @trunc_nxv8f64(<vscale x 8 x double> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv8f64:
@@ -314,7 +300,6 @@ define <vscale x 8 x double> @trunc_nxv8f64(<vscale x 8 x double> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x double> @llvm.experimental.constrained.trunc.nxv8f64(<vscale x 8 x double> %x, metadata !"fpexcept.strict")
+  %a = call <vscale x 8 x double> @llvm.trunc.nxv8f64(<vscale x 8 x double> %x) [ "fpe.except"(metadata !"strict") ]
   ret <vscale x 8 x double> %a
 }
-declare <vscale x 8 x double> @llvm.experimental.constrained.trunc.nxv8f64(<vscale x 8 x double>, metadata)
diff --git a/llvm/test/CodeGen/RISCV/zfh-half-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/zfh-half-intrinsics-strict.ll
index 3efa9e58e65d3d..f8046674754d5f 100644
--- a/llvm/test/CodeGen/RISCV/zfh-half-intrinsics-strict.ll
+++ b/llvm/test/CodeGen/RISCV/zfh-half-intrinsics-strict.ll
@@ -204,8 +204,6 @@ define half @ceil_f16(half %a) nounwind strictfp {
   ret half %1
 }
 
-declare half @llvm.experimental.constrained.trunc.f16(half, metadata)
-
 define half @trunc_f16(half %a) nounwind strictfp {
 ; RV32IZFH-LABEL: trunc_f16:
 ; RV32IZFH:       # %bb.0:
@@ -272,7 +270,7 @@ define half @trunc_f16(half %a) nounwind strictfp {
 ; RV64IZDINXZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZDINXZHINX-NEXT:    addi sp, sp, 16
 ; RV64IZDINXZHINX-NEXT:    ret
-  %1 = call half @llvm.experimental.constrained.trunc.f16(half %a, metadata !"fpexcept.strict") strictfp
+  %1 = call half @llvm.trunc.f16(half %a) strictfp [ "fpe.except"(metadata !"strict") ]
   ret half %1
 }
 
diff --git a/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics-strict.ll
index 214ea46d3130d6..de0394a9625926 100644
--- a/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics-strict.ll
+++ b/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics-strict.ll
@@ -216,8 +216,6 @@ define half @ceil_f16(half %a) nounwind strictfp {
   ret half %1
 }
 
-declare half @llvm.experimental.constrained.trunc.f16(half, metadata)
-
 define half @trunc_f16(half %a) nounwind strictfp {
 ; RV32IZFHMIN-LABEL: trunc_f16:
 ; RV32IZFHMIN:       # %bb.0:
@@ -284,7 +282,7 @@ define half @trunc_f16(half %a) nounwind strictfp {
 ; RV64IZDINXZHINXMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZDINXZHINXMIN-NEXT:    addi sp, sp, 16
 ; RV64IZDINXZHINXMIN-NEXT:    ret
-  %1 = call half @llvm.experimental.constrained.trunc.f16(half %a, metadata !"fpexcept.strict") strictfp
+  %1 = call half @llvm.trunc.f16(half %a) strictfp [ "fpe.except"(metadata !"strict") ]
   ret half %1
 }
 
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-round-01.ll b/llvm/test/CodeGen/SystemZ/fp-strict-round-01.ll
index 1fbb1790c01dc0..3f031745c2b61e 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-round-01.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-round-01.ll
@@ -161,39 +161,30 @@ define void @f12(ptr %ptr) #0 {
 }
 
 ; Test trunc for f32.
-declare float @llvm.experimental.constrained.trunc.f32(float, metadata)
 define float @f13(float %f) #0 {
 ; CHECK-LABEL: f13:
 ; CHECK: brasl %r14, truncf at PLT
 ; CHECK: br %r14
-  %res = call float @llvm.experimental.constrained.trunc.f32(
-                        float %f,
-                        metadata !"fpexcept.strict") #0
+  %res = call float @llvm.trunc.f32(float %f) #0 [ "fpe.except"(metadata !"strict") ]
   ret float %res
 }
 
 ; Test trunc for f64.
-declare double @llvm.experimental.constrained.trunc.f64(double, metadata)
 define double @f14(double %f) #0 {
 ; CHECK-LABEL: f14:
 ; CHECK: brasl %r14, trunc at PLT
 ; CHECK: br %r14
-  %res = call double @llvm.experimental.constrained.trunc.f64(
-                        double %f,
-                        metadata !"fpexcept.strict") #0
+  %res = call double @llvm.trunc.f64(double %f) #0 [ "fpe.except"(metadata !"strict") ] 
   ret double %res
 }
 
 ; Test trunc for f128.
-declare fp128 @llvm.experimental.constrained.trunc.f128(fp128, metadata)
 define void @f15(ptr %ptr) #0 {
 ; CHECK-LABEL: f15:
 ; CHECK: brasl %r14, truncl at PLT
 ; CHECK: br %r14
   %src = load fp128, ptr %ptr
-  %res = call fp128 @llvm.experimental.constrained.trunc.f128(
-                        fp128 %src,
-                        metadata !"fpexcept.strict") #0
+  %res = call fp128 @llvm.trunc.f128(fp128 %src) #0 [ "fpe.except"(metadata !"strict") ]
   store fp128 %res, ptr %ptr
   ret void
 }
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-round-02.ll b/llvm/test/CodeGen/SystemZ/fp-strict-round-02.ll
index bc304a3fb95fb0..8f56f552661fda 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-round-02.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-round-02.ll
@@ -165,39 +165,30 @@ define void @f12(ptr %ptr) #0 {
 }
 
 ; Test trunc for f32.
-declare float @llvm.experimental.constrained.trunc.f32(float, metadata)
 define float @f13(float %f) #0 {
 ; CHECK-LABEL: f13:
 ; CHECK: fiebra %f0, 5, %f0, 4
 ; CHECK: br %r14
-  %res = call float @llvm.experimental.constrained.trunc.f32(
-                        float %f,
-                        metadata !"fpexcept.strict") #0
+  %res = call float @llvm.trunc.f32(float %f) #0 [ "fpe.except"(metadata !"strict") ]
   ret float %res
 }
 
 ; Test trunc for f64.
-declare double @llvm.experimental.constrained.trunc.f64(double, metadata)
 define double @f14(double %f) #0 {
 ; CHECK-LABEL: f14:
 ; CHECK: fidbra %f0, 5, %f0, 4
 ; CHECK: br %r14
-  %res = call double @llvm.experimental.constrained.trunc.f64(
-                        double %f,
-                        metadata !"fpexcept.strict") #0
+  %res = call double @llvm.trunc.f64(double %f) #0 [ "fpe.except"(metadata !"strict") ]
   ret double %res
 }
 
 ; Test trunc for f128.
-declare fp128 @llvm.experimental.constrained.trunc.f128(fp128, metadata)
 define void @f15(ptr %ptr) #0 {
 ; CHECK-LABEL: f15:
 ; CHECK: fixbra %f0, 5, %f0, 4
 ; CHECK: br %r14
   %src = load fp128, ptr %ptr
-  %res = call fp128 @llvm.experimental.constrained.trunc.f128(
-                        fp128 %src,
-                        metadata !"fpexcept.strict") #0
+  %res = call fp128 @llvm.trunc.f128(fp128 %src) #0 [ "fpe.except"(metadata !"strict") ]
   store fp128 %res, ptr %ptr
   ret void
 }
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-round-03.ll b/llvm/test/CodeGen/SystemZ/fp-strict-round-03.ll
index 2cdff7d5c425ec..df207c6b01a58b 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-round-03.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-round-03.ll
@@ -169,31 +169,24 @@ define void @f12(ptr %ptr) #0 {
 }
 
 ; Test trunc for f32.
-declare float @llvm.experimental.constrained.trunc.f32(float, metadata)
 define float @f13(float %f) #0 {
 ; CHECK-LABEL: f13:
 ; CHECK: fiebra %f0, 5, %f0, 4
 ; CHECK: br %r14
-  %res = call float @llvm.experimental.constrained.trunc.f32(
-                        float %f,
-                        metadata !"fpexcept.strict") #0
+  %res = call float @llvm.trunc.f32(float %f) #0 [ "fpe.except"(metadata !"strict") ]
   ret float %res
 }
 
 ; Test trunc for f64.
-declare double @llvm.experimental.constrained.trunc.f64(double, metadata)
 define double @f14(double %f) #0 {
 ; CHECK-LABEL: f14:
 ; CHECK: fidbra %f0, 5, %f0, 4
 ; CHECK: br %r14
-  %res = call double @llvm.experimental.constrained.trunc.f64(
-                        double %f,
-                        metadata !"fpexcept.strict") #0
+  %res = call double @llvm.trunc.f64(double %f) #0 [ "fpe.except"(metadata !"strict") ]
   ret double %res
 }
 
 ; Test trunc for f128.
-declare fp128 @llvm.experimental.constrained.trunc.f128(fp128, metadata)
 define void @f15(ptr %ptr) #0 {
 ; CHECK-LABEL: f15:
 ; CHECK: vl [[REG:%v[0-9]+]], 0(%r2)
@@ -201,9 +194,7 @@ define void @f15(ptr %ptr) #0 {
 ; CHECK: vst [[RES]], 0(%r2)
 ; CHECK: br %r14
   %src = load fp128, ptr %ptr
-  %res = call fp128 @llvm.experimental.constrained.trunc.f128(
-                        fp128 %src,
-                        metadata !"fpexcept.strict") #0
+  %res = call fp128 @llvm.trunc.f128(fp128 %src) #0 [ "fpe.except"(metadata !"strict") ]
   store fp128 %res, ptr %ptr
   ret void
 }
diff --git a/llvm/test/CodeGen/SystemZ/vec-strict-round-01.ll b/llvm/test/CodeGen/SystemZ/vec-strict-round-01.ll
index b82cb8082b7b8c..a24a2d9f791930 100644
--- a/llvm/test/CodeGen/SystemZ/vec-strict-round-01.ll
+++ b/llvm/test/CodeGen/SystemZ/vec-strict-round-01.ll
@@ -6,13 +6,11 @@ declare double @llvm.experimental.constrained.rint.f64(double, metadata, metadat
 declare double @llvm.experimental.constrained.nearbyint.f64(double, metadata, metadata)
 declare double @llvm.experimental.constrained.floor.f64(double, metadata)
 declare double @llvm.experimental.constrained.ceil.f64(double, metadata)
-declare double @llvm.experimental.constrained.trunc.f64(double, metadata)
 declare double @llvm.experimental.constrained.round.f64(double, metadata)
 declare <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double>, metadata, metadata)
 declare <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(<2 x double>, metadata, metadata)
 declare <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double>, metadata)
 declare <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double>, metadata)
-declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata)
 declare <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double>, metadata)
 
 define <2 x double> @f1(<2 x double> %val) #0 {
@@ -61,9 +59,7 @@ define <2 x double> @f5(<2 x double> %val) #0 {
 ; CHECK-LABEL: f5:
 ; CHECK: vfidb %v24, %v24, 4, 5
 ; CHECK: br %r14
-  %res = call <2 x double> @llvm.experimental.constrained.trunc.v2f64(
-                        <2 x double> %val,
-                        metadata !"fpexcept.strict") #0
+  %res = call <2 x double> @llvm.trunc.v2f64(<2 x double> %val) #0 [ "fpe.except"(metadata !"strict") ]
   ret <2 x double> %res
 }
 
@@ -129,9 +125,7 @@ define double @f11(<2 x double> %val) #0 {
 ; CHECK: wfidb %f0, %v24, 4, 5
 ; CHECK: br %r14
   %scalar = extractelement <2 x double> %val, i32 0
-  %res = call double @llvm.experimental.constrained.trunc.f64(
-                        double %scalar,
-                        metadata !"fpexcept.strict") #0
+  %res = call double @llvm.trunc.f64(double %scalar) #0 [ "fpe.except"(metadata !"strict") ]
   ret double %res
 }
 
diff --git a/llvm/test/CodeGen/SystemZ/vec-strict-round-02.ll b/llvm/test/CodeGen/SystemZ/vec-strict-round-02.ll
index 701dd5b2302f2c..6db7d03cb82e2d 100644
--- a/llvm/test/CodeGen/SystemZ/vec-strict-round-02.ll
+++ b/llvm/test/CodeGen/SystemZ/vec-strict-round-02.ll
@@ -6,13 +6,11 @@ declare float @llvm.experimental.constrained.rint.f32(float, metadata, metadata)
 declare float @llvm.experimental.constrained.nearbyint.f32(float, metadata, metadata)
 declare float @llvm.experimental.constrained.floor.f32(float, metadata)
 declare float @llvm.experimental.constrained.ceil.f32(float, metadata)
-declare float @llvm.experimental.constrained.trunc.f32(float, metadata)
 declare float @llvm.experimental.constrained.round.f32(float, metadata)
 declare <4 x float> @llvm.experimental.constrained.rint.v4f32(<4 x float>, metadata, metadata)
 declare <4 x float> @llvm.experimental.constrained.nearbyint.v4f32(<4 x float>, metadata, metadata)
 declare <4 x float> @llvm.experimental.constrained.floor.v4f32(<4 x float>, metadata)
 declare <4 x float> @llvm.experimental.constrained.ceil.v4f32(<4 x float>, metadata)
-declare <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float>, metadata)
 declare <4 x float> @llvm.experimental.constrained.round.v4f32(<4 x float>, metadata)
 
 define <4 x float> @f1(<4 x float> %val) #0 {
@@ -61,9 +59,7 @@ define <4 x float> @f5(<4 x float> %val) #0 {
 ; CHECK-LABEL: f5:
 ; CHECK: vfisb %v24, %v24, 4, 5
 ; CHECK: br %r14
-  %res = call <4 x float> @llvm.experimental.constrained.trunc.v4f32(
-                        <4 x float> %val,
-                        metadata !"fpexcept.strict") #0
+  %res = call <4 x float> @llvm.trunc.v4f32(<4 x float> %val) #0 [ "fpe.except"(metadata !"strict") ]
   ret <4 x float> %res
 }
 
@@ -128,9 +124,7 @@ define float @f11(<4 x float> %val) #0 {
 ; CHECK: wfisb %f0, %v24, 4, 5
 ; CHECK: br %r14
   %scalar = extractelement <4 x float> %val, i32 0
-  %res = call float @llvm.experimental.constrained.trunc.f32(
-                        float %scalar,
-                        metadata !"fpexcept.strict") #0
+  %res = call float @llvm.trunc.f32(float %scalar) #0 [ "fpe.except"(metadata !"strict") ]
   ret float %res
 }
 
diff --git a/llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll
index b08f0e5a74d569..74afe4c6ae4b88 100644
--- a/llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll
+++ b/llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll
@@ -6071,9 +6071,7 @@ define <1 x float> @constrained_vector_trunc_v1f32(ptr %a) #0 {
 ; SZ13-NEXT:    br %r14
 entry:
   %b = load <1 x float>, ptr %a
-  %trunc = call <1 x float> @llvm.experimental.constrained.trunc.v1f32(
-                               <1 x float> %b,
-                               metadata !"fpexcept.strict") #0
+  %trunc = call <1 x float> @llvm.trunc.v1f32(<1 x float> %b) #0 [ "fpe.except"(metadata !"strict") ]
   ret <1 x float> %trunc
 }
 
@@ -6108,9 +6106,7 @@ define <2 x double> @constrained_vector_trunc_v2f64(ptr %a) #0 {
 ; SZ13-NEXT:    br %r14
 entry:
   %b = load <2 x double>, ptr %a
-  %trunc = call <2 x double> @llvm.experimental.constrained.trunc.v2f64(
-                                <2 x double> %b,
-                                metadata !"fpexcept.strict") #0
+  %trunc = call <2 x double> @llvm.trunc.v2f64(<2 x double> %b) #0 [ "fpe.except"(metadata !"strict") ]
   ret <2 x double> %trunc
 }
 
@@ -6163,9 +6159,7 @@ define <3 x float> @constrained_vector_trunc_v3f32(ptr %a) #0 {
 ; SZ13-NEXT:    br %r14
 entry:
   %b = load <3 x float>, ptr %a
-  %trunc = call <3 x float> @llvm.experimental.constrained.trunc.v3f32(
-                              <3 x float> %b,
-                              metadata !"fpexcept.strict") #0
+  %trunc = call <3 x float> @llvm.trunc.v3f32(<3 x float> %b) #0 [ "fpe.except"(metadata !"strict") ]
   ret <3 x float> %trunc
 }
 
@@ -6215,9 +6209,7 @@ define void @constrained_vector_trunc_v3f64(ptr %a) #0 {
 ; SZ13-NEXT:    br %r14
 entry:
   %b = load <3 x double>, ptr %a
-  %trunc = call <3 x double> @llvm.experimental.constrained.trunc.v3f64(
-                          <3 x double> %b,
-                          metadata !"fpexcept.strict") #0
+  %trunc = call <3 x double> @llvm.trunc.v3f64(<3 x double> %b) #0 [ "fpe.except"(metadata !"strict") ]
   store <3 x double> %trunc, ptr %a
   ret void
 }
@@ -6953,7 +6945,6 @@ declare <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f32(<2 x float
 declare <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double>, metadata)
 declare <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double>, metadata)
 declare <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double>, metadata)
-declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata)
 
 declare <1 x float> @llvm.experimental.constrained.fadd.v1f32(<1 x float>, <1 x float>, metadata, metadata)
 declare <1 x float> @llvm.experimental.constrained.fsub.v1f32(<1 x float>, <1 x float>, metadata, metadata)
@@ -6981,7 +6972,6 @@ declare <1 x double> @llvm.experimental.constrained.fpext.v1f64.v1f32(<1 x float
 declare <1 x float> @llvm.experimental.constrained.ceil.v1f32(<1 x float>, metadata)
 declare <1 x float> @llvm.experimental.constrained.floor.v1f32(<1 x float>, metadata)
 declare <1 x float> @llvm.experimental.constrained.round.v1f32(<1 x float>, metadata)
-declare <1 x float> @llvm.experimental.constrained.trunc.v1f32(<1 x float>, metadata)
 
 declare <3 x float> @llvm.experimental.constrained.fadd.v3f32(<3 x float>, <3 x float>, metadata, metadata)
 declare <3 x double> @llvm.experimental.constrained.fadd.v3f64(<3 x double>, <3 x double>, metadata, metadata)
@@ -7033,8 +7023,6 @@ declare <3 x float> @llvm.experimental.constrained.floor.v3f32(<3 x float>, meta
 declare <3 x double> @llvm.experimental.constrained.floor.v3f64(<3 x double>, metadata)
 declare <3 x float> @llvm.experimental.constrained.round.v3f32(<3 x float>, metadata)
 declare <3 x double> @llvm.experimental.constrained.round.v3f64(<3 x double>, metadata)
-declare <3 x float> @llvm.experimental.constrained.trunc.v3f32(<3 x float>, metadata)
-declare <3 x double> @llvm.experimental.constrained.trunc.v3f64(<3 x double>, metadata)
 
 declare <4 x double> @llvm.experimental.constrained.fadd.v4f64(<4 x double>, <4 x double>, metadata, metadata)
 declare <4 x double> @llvm.experimental.constrained.fsub.v4f64(<4 x double>, <4 x double>, metadata, metadata)
@@ -7062,4 +7050,3 @@ declare <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f32(<4 x float
 declare <4 x double> @llvm.experimental.constrained.ceil.v4f64(<4 x double>, metadata)
 declare <4 x double> @llvm.experimental.constrained.floor.v4f64(<4 x double>, metadata)
 declare <4 x double> @llvm.experimental.constrained.round.v4f64(<4 x double>, metadata)
-declare <4 x double> @llvm.experimental.constrained.trunc.v4f64(<4 x double>, metadata)
diff --git a/llvm/test/CodeGen/X86/fp-strict-scalar-round-fp16.ll b/llvm/test/CodeGen/X86/fp-strict-scalar-round-fp16.ll
index 3b9798a2af5820..87aab3f9ad9c56 100644
--- a/llvm/test/CodeGen/X86/fp-strict-scalar-round-fp16.ll
+++ b/llvm/test/CodeGen/X86/fp-strict-scalar-round-fp16.ll
@@ -7,7 +7,6 @@
 
 declare half @llvm.experimental.constrained.ceil.f16(half, metadata)
 declare half @llvm.experimental.constrained.floor.f16(half, metadata)
-declare half @llvm.experimental.constrained.trunc.f16(half, metadata)
 declare half @llvm.experimental.constrained.rint.f16(half, metadata, metadata)
 declare half @llvm.experimental.constrained.nearbyint.f16(half, metadata, metadata)
 declare half @llvm.experimental.constrained.roundeven.f16(half, metadata)
@@ -122,8 +121,7 @@ define half @ftrunc32(half %f) #0 {
 ; X64:       # %bb.0:
 ; X64-NEXT:    vrndscalesh $11, %xmm0, %xmm0, %xmm0
 ; X64-NEXT:    retq
-  %res = call half @llvm.experimental.constrained.trunc.f16(
-                        half %f, metadata !"fpexcept.strict") #0
+  %res = call half @llvm.trunc.f16(half %f) #1 [ "fpe.except"(metadata !"strict") ]
   ret half %res
 }
 
@@ -273,7 +271,7 @@ define half @fround16(half %f) #0 {
 ; X86-LABEL: fround16:
 ; X86:       # %bb.0:
 ; X86-NEXT:    subl $8, %esp
-; X86-NEXT:    vmovsh {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT:    vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
 ; X86-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
 ; X86-NEXT:    vmovss %xmm0, (%esp)
 ; X86-NEXT:    calll roundf
diff --git a/llvm/test/CodeGen/X86/fp-strict-scalar-round.ll b/llvm/test/CodeGen/X86/fp-strict-scalar-round.ll
index 13f890ae6e191a..7235f0a95ef345 100644
--- a/llvm/test/CodeGen/X86/fp-strict-scalar-round.ll
+++ b/llvm/test/CodeGen/X86/fp-strict-scalar-round.ll
@@ -10,8 +10,6 @@ declare float @llvm.experimental.constrained.ceil.f32(float, metadata)
 declare double @llvm.experimental.constrained.ceil.f64(double, metadata)
 declare float @llvm.experimental.constrained.floor.f32(float, metadata)
 declare double @llvm.experimental.constrained.floor.f64(double, metadata)
-declare float @llvm.experimental.constrained.trunc.f32(float, metadata)
-declare double @llvm.experimental.constrained.trunc.f64(double, metadata)
 declare float @llvm.experimental.constrained.rint.f32(float, metadata, metadata)
 declare double @llvm.experimental.constrained.rint.f64(double, metadata, metadata)
 declare float @llvm.experimental.constrained.nearbyint.f32(float, metadata, metadata)
@@ -245,8 +243,7 @@ define float @ftrunc32(float %f) #0 {
 ; AVX-X64:       # %bb.0:
 ; AVX-X64-NEXT:    vroundss $11, %xmm0, %xmm0, %xmm0
 ; AVX-X64-NEXT:    retq
-  %res = call float @llvm.experimental.constrained.trunc.f32(
-                        float %f, metadata !"fpexcept.strict") #0
+  %res = call float @llvm.trunc.f32(float %f) #0 [ "fpe.except"(metadata !"strict") ]
   ret float %res
 }
 
@@ -298,8 +295,7 @@ define double @ftruncf64(double %f) #0 {
 ; AVX-X64:       # %bb.0:
 ; AVX-X64-NEXT:    vroundsd $11, %xmm0, %xmm0, %xmm0
 ; AVX-X64-NEXT:    retq
-  %res = call double @llvm.experimental.constrained.trunc.f64(
-                        double %f, metadata !"fpexcept.strict") #0
+  %res = call double @llvm.trunc.f64(double %f) #0 [ "fpe.except"(metadata !"strict") ]
   ret double %res
 }
 
diff --git a/llvm/test/CodeGen/X86/fp128-libcalls-strict.ll b/llvm/test/CodeGen/X86/fp128-libcalls-strict.ll
index 5263e0d4f6f39f..0705ee01aa380a 100644
--- a/llvm/test/CodeGen/X86/fp128-libcalls-strict.ll
+++ b/llvm/test/CodeGen/X86/fp128-libcalls-strict.ll
@@ -1407,7 +1407,7 @@ define fp128 @trunc(fp128 %x) nounwind strictfp {
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    retl $4
 entry:
-  %trunc = call fp128 @llvm.experimental.constrained.trunc.f128(fp128 %x, metadata !"fpexcept.strict") #0
+  %trunc = call fp128 @llvm.trunc.f128(fp128 %x) #0 [ "fpe.except"(metadata !"strict") ]
   ret fp128 %trunc
 }
 
@@ -1993,7 +1993,6 @@ declare fp128 @llvm.experimental.constrained.atan.f128(fp128, metadata, metadata
 declare fp128 @llvm.experimental.constrained.atan2.f128(fp128, fp128, metadata, metadata)
 declare fp128 @llvm.experimental.constrained.tan.f128(fp128, metadata, metadata)
 declare fp128 @llvm.experimental.constrained.tanh.f128(fp128, metadata, metadata)
-declare fp128 @llvm.experimental.constrained.trunc.f128(fp128, metadata)
 declare i32 @llvm.experimental.constrained.lrint.i32.f128(fp128, metadata, metadata)
 declare i64 @llvm.experimental.constrained.llrint.i64.f128(fp128, metadata, metadata)
 declare i32 @llvm.experimental.constrained.lround.i32.f128(fp128, metadata)
diff --git a/llvm/test/CodeGen/X86/fp80-strict-libcalls.ll b/llvm/test/CodeGen/X86/fp80-strict-libcalls.ll
index 8bbc6247dbafd6..a612c6a80e31e9 100644
--- a/llvm/test/CodeGen/X86/fp80-strict-libcalls.ll
+++ b/llvm/test/CodeGen/X86/fp80-strict-libcalls.ll
@@ -729,7 +729,7 @@ define x86_fp80 @trunc(x86_fp80 %x) nounwind strictfp {
 ; X64-NEXT:    addq $24, %rsp
 ; X64-NEXT:    retq
 entry:
-  %trunc = call x86_fp80 @llvm.experimental.constrained.trunc.f80(x86_fp80 %x, metadata !"fpexcept.strict") #0
+  %trunc = call x86_fp80 @llvm.trunc.f80(x86_fp80 %x) #0 [ "fpe.except"(metadata !"strict") ]
   ret x86_fp80 %trunc
 }
 
@@ -862,7 +862,6 @@ declare x86_fp80 @llvm.experimental.constrained.atan.f80(x86_fp80, metadata, met
 declare x86_fp80 @llvm.experimental.constrained.atan2.f80(x86_fp80, x86_fp80, metadata, metadata)
 declare x86_fp80 @llvm.experimental.constrained.tan.f80(x86_fp80, metadata, metadata)
 declare x86_fp80 @llvm.experimental.constrained.tanh.f80(x86_fp80, metadata, metadata)
-declare x86_fp80 @llvm.experimental.constrained.trunc.f80(x86_fp80, metadata)
 declare i32 @llvm.experimental.constrained.lrint.i32.f80(x86_fp80, metadata, metadata)
 declare i64 @llvm.experimental.constrained.llrint.i64.f80(x86_fp80, metadata, metadata)
 declare i32 @llvm.experimental.constrained.lround.i32.f80(x86_fp80, metadata)
diff --git a/llvm/test/CodeGen/X86/vec-strict-256-fp16.ll b/llvm/test/CodeGen/X86/vec-strict-256-fp16.ll
index a2e02508327c81..e9f6cf3de8ad48 100644
--- a/llvm/test/CodeGen/X86/vec-strict-256-fp16.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-256-fp16.ll
@@ -14,7 +14,6 @@ declare <4 x half> @llvm.experimental.constrained.fptrunc.v4f16.v4f64(<4 x doubl
 declare <8 x half> @llvm.experimental.constrained.fptrunc.v8f16.v8f32(<8 x float>, metadata, metadata)
 declare <16 x half> @llvm.experimental.constrained.ceil.v16f16(<16 x half>, metadata)
 declare <16 x half> @llvm.experimental.constrained.floor.v16f16(<16 x half>, metadata)
-declare <16 x half> @llvm.experimental.constrained.trunc.v16f16(<16 x half>, metadata)
 declare <16 x half> @llvm.experimental.constrained.rint.v16f16(<16 x half>, metadata, metadata)
 declare <16 x half> @llvm.experimental.constrained.nearbyint.v16f16(<16 x half>, metadata, metadata)
 
@@ -160,8 +159,7 @@ define <16 x half> @ftruncv16f16(<16 x half> %f) #0 {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vrndscaleph $11, %ymm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
-  %res = call <16 x half> @llvm.experimental.constrained.trunc.v16f16(
-                          <16 x half> %f, metadata !"fpexcept.strict") #0
+  %res = call <16 x half> @llvm.trunc.v16f16(<16 x half> %f) #0 [ "fpe.except"(metadata !"strict") ]
   ret <16 x half> %res
 }
 
diff --git a/llvm/test/CodeGen/X86/vec-strict-256.ll b/llvm/test/CodeGen/X86/vec-strict-256.ll
index 5945e6c1bc66eb..d89996db74288f 100644
--- a/llvm/test/CodeGen/X86/vec-strict-256.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-256.ll
@@ -22,8 +22,6 @@ declare <8 x float> @llvm.experimental.constrained.ceil.v8f32(<8 x float>, metad
 declare <4 x double>  @llvm.experimental.constrained.ceil.v4f64(<4 x double>, metadata)
 declare <8 x float> @llvm.experimental.constrained.floor.v8f32(<8 x float>, metadata)
 declare <4 x double> @llvm.experimental.constrained.floor.v4f64(<4 x double>, metadata)
-declare <8 x float> @llvm.experimental.constrained.trunc.v8f32(<8 x float>, metadata)
-declare <4 x double> @llvm.experimental.constrained.trunc.v4f64(<4 x double>, metadata)
 declare <8 x float> @llvm.experimental.constrained.rint.v8f32(<8 x float>, metadata, metadata)
 declare <4 x double> @llvm.experimental.constrained.rint.v4f64(<4 x double>, metadata, metadata)
 declare <8 x float> @llvm.experimental.constrained.nearbyint.v8f32(<8 x float>, metadata, metadata)
@@ -234,8 +232,7 @@ define <8 x float> @ftruncv8f32(<8 x float> %f) #0 {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vroundps $11, %ymm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
-  %res = call <8 x float> @llvm.experimental.constrained.trunc.v8f32(
-                          <8 x float> %f, metadata !"fpexcept.strict") #0
+  %res = call <8 x float> @llvm.trunc.v8f32(<8 x float> %f) #0 [ "fpe.except"(metadata !"strict") ]
   ret <8 x float> %res
 }
 
@@ -244,8 +241,7 @@ define <4 x double> @ftruncv4f64(<4 x double> %f) #0 {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vroundpd $11, %ymm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
-  %res = call <4 x double> @llvm.experimental.constrained.trunc.v4f64(
-                        <4 x double> %f, metadata !"fpexcept.strict") #0
+  %res = call <4 x double> @llvm.trunc.v4f64(<4 x double> %f) #0 [ "fpe.except"(metadata !"strict") ]
   ret <4 x double> %res
 }
 
diff --git a/llvm/test/CodeGen/X86/vec-strict-512-fp16.ll b/llvm/test/CodeGen/X86/vec-strict-512-fp16.ll
index dfbc11a43d3d7c..dac0195543ac7f 100644
--- a/llvm/test/CodeGen/X86/vec-strict-512-fp16.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-512-fp16.ll
@@ -14,7 +14,6 @@ declare <16 x half> @llvm.experimental.constrained.fptrunc.v16f16.v16f32(<16 x f
 declare <32 x half> @llvm.experimental.constrained.fma.v32f16(<32 x half>, <32 x half>, <32 x half>, metadata, metadata)
 declare <32 x half> @llvm.experimental.constrained.ceil.v32f16(<32 x half>, metadata)
 declare <32 x half> @llvm.experimental.constrained.floor.v32f16(<32 x half>, metadata)
-declare <32 x half> @llvm.experimental.constrained.trunc.v32f16(<32 x half>, metadata)
 declare <32 x half> @llvm.experimental.constrained.rint.v32f16(<32 x half>, metadata, metadata)
 declare <32 x half> @llvm.experimental.constrained.nearbyint.v32f16(<32 x half>, metadata, metadata)
 
@@ -155,7 +154,7 @@ define <32 x half> @strict_vector_ftrunc_v32f16(<32 x half> %f) #0 {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vrndscaleph $11, %zmm0, %zmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
-  %res = call <32 x half> @llvm.experimental.constrained.trunc.v32f16(<32 x half> %f, metadata !"fpexcept.strict") #0
+  %res = call <32 x half> @llvm.trunc.v32f16(<32 x half> %f) #0 [ "fpe.except"(metadata !"strict") ]
   ret <32 x half> %res
 }
 
diff --git a/llvm/test/CodeGen/X86/vec-strict-512.ll b/llvm/test/CodeGen/X86/vec-strict-512.ll
index 2cafd74af49538..43abaaa3d7d8ff 100644
--- a/llvm/test/CodeGen/X86/vec-strict-512.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-512.ll
@@ -20,8 +20,6 @@ declare <16 x float> @llvm.experimental.constrained.ceil.v16f32(<16 x float>, me
 declare <8 x double>  @llvm.experimental.constrained.ceil.v8f64(<8 x double>, metadata)
 declare <16 x float> @llvm.experimental.constrained.floor.v16f32(<16 x float>, metadata)
 declare <8 x double> @llvm.experimental.constrained.floor.v8f64(<8 x double>, metadata)
-declare <16 x float> @llvm.experimental.constrained.trunc.v16f32(<16 x float>, metadata)
-declare <8 x double> @llvm.experimental.constrained.trunc.v8f64(<8 x double>, metadata)
 declare <16 x float> @llvm.experimental.constrained.rint.v16f32(<16 x float>, metadata, metadata)
 declare <8 x double> @llvm.experimental.constrained.rint.v8f64(<8 x double>, metadata, metadata)
 declare <16 x float> @llvm.experimental.constrained.nearbyint.v16f32(<16 x float>, metadata, metadata)
@@ -227,7 +225,7 @@ define <16 x float> @strict_vector_ftrunc_v16f32(<16 x float> %f) #0 {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vrndscaleps $11, %zmm0, %zmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
-  %res = call <16 x float> @llvm.experimental.constrained.trunc.v16f32(<16 x float> %f, metadata !"fpexcept.strict") #0
+  %res = call <16 x float> @llvm.trunc.v16f32(<16 x float> %f) #0 [ "fpe.except"(metadata !"strict") ]
   ret <16 x float> %res
 }
 
@@ -236,7 +234,7 @@ define <8 x double> @strict_vector_ftrunc_v8f64(<8 x double> %f) #0 {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vrndscalepd $11, %zmm0, %zmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
-  %res = call <8 x double> @llvm.experimental.constrained.trunc.v8f64(<8 x double> %f, metadata !"fpexcept.strict") #0
+  %res = call <8 x double> @llvm.trunc.v8f64(<8 x double> %f) #0 [ "fpe.except"(metadata !"strict") ]
   ret <8 x double> %res
 }
 
diff --git a/llvm/test/CodeGen/X86/vec-strict-round-128.ll b/llvm/test/CodeGen/X86/vec-strict-round-128.ll
index 1f7507cc02bc59..403731057618e6 100644
--- a/llvm/test/CodeGen/X86/vec-strict-round-128.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-round-128.ll
@@ -10,8 +10,6 @@ declare <4 x float> @llvm.experimental.constrained.ceil.v4f32(<4 x float>, metad
 declare <2 x double>  @llvm.experimental.constrained.ceil.v2f64(<2 x double>, metadata)
 declare <4 x float> @llvm.experimental.constrained.floor.v4f32(<4 x float>, metadata)
 declare <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double>, metadata)
-declare <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float>, metadata)
-declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata)
 declare <4 x float> @llvm.experimental.constrained.rint.v4f32(<4 x float>, metadata, metadata)
 declare <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double>, metadata, metadata)
 declare <4 x float> @llvm.experimental.constrained.nearbyint.v4f32(<4 x float>, metadata, metadata)
@@ -87,8 +85,7 @@ define <4 x float> @ftruncv4f32(<4 x float> %f) #0 {
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    vroundps $11, %xmm0, %xmm0
 ; AVX-NEXT:    ret{{[l|q]}}
-  %res = call <4 x float> @llvm.experimental.constrained.trunc.v4f32(
-                          <4 x float> %f, metadata !"fpexcept.strict") #0
+  %res = call <4 x float> @llvm.trunc.v4f32(<4 x float> %f) #0 [ "fpe.except"(metadata !"strict") ]
   ret <4 x float> %res
 }
 
@@ -102,8 +99,7 @@ define <2 x double> @ftruncv2f64(<2 x double> %f) #0 {
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    vroundpd $11, %xmm0, %xmm0
 ; AVX-NEXT:    ret{{[l|q]}}
-  %res = call <2 x double> @llvm.experimental.constrained.trunc.v2f64(
-                        <2 x double> %f, metadata !"fpexcept.strict") #0
+  %res = call <2 x double> @llvm.trunc.v2f64(<2 x double> %f) #0 [ "fpe.except"(metadata !"strict") ]
   ret <2 x double> %res
 }
 
diff --git a/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
index 49062eaef31887..e7bb0744b86d46 100644
--- a/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
@@ -6372,9 +6372,7 @@ define <1 x float> @constrained_vector_trunc_v1f32_var(ptr %a) #0 {
 ; AVX-NEXT:    retq
 entry:
   %b = load <1 x float>, ptr %a
-  %trunc = call <1 x float> @llvm.experimental.constrained.trunc.v1f32(
-                               <1 x float> %b,
-                               metadata !"fpexcept.strict") #0
+  %trunc = call <1 x float> @llvm.trunc.v1f32(<1 x float> %b) #0 [ "fpe.except"(metadata !"strict") ]
   ret <1 x float> %trunc
 }
 
@@ -6403,9 +6401,7 @@ define <2 x double> @constrained_vector_trunc_v2f64_var(ptr %a) #0 {
 ; AVX-NEXT:    retq
 entry:
   %b = load <2 x double>, ptr %a
-  %trunc = call <2 x double> @llvm.experimental.constrained.trunc.v2f64(
-                                <2 x double> %b,
-                                metadata !"fpexcept.strict") #0
+  %trunc = call <2 x double> @llvm.trunc.v2f64(<2 x double> %b) #0 [ "fpe.except"(metadata !"strict") ]
   ret <2 x double> %trunc
 }
 
@@ -6446,9 +6442,7 @@ define <3 x float> @constrained_vector_trunc_v3f32_var(ptr %a) #0 {
 ; AVX-NEXT:    retq
 entry:
   %b = load <3 x float>, ptr %a
-  %trunc = call <3 x float> @llvm.experimental.constrained.trunc.v3f32(
-                              <3 x float> %b,
-                              metadata !"fpexcept.strict") #0
+  %trunc = call <3 x float> @llvm.trunc.v3f32(<3 x float> %b) #0 [ "fpe.except"(metadata !"strict") ]
   ret <3 x float> %trunc
 }
 
@@ -6490,9 +6484,7 @@ define <3 x double> @constrained_vector_trunc_v3f64_var(ptr %a) #0 {
 ; AVX-NEXT:    retq
 entry:
   %b = load <3 x double>, ptr %a
-  %trunc = call <3 x double> @llvm.experimental.constrained.trunc.v3f64(
-                          <3 x double> %b,
-                          metadata !"fpexcept.strict") #0
+  %trunc = call <3 x double> @llvm.trunc.v3f64(<3 x double> %b) #0 [ "fpe.except"(metadata !"strict") ]
   ret <3 x double> %trunc
 }
 
@@ -9975,7 +9967,6 @@ declare <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f32(<2 x float
 declare <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double>, metadata)
 declare <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double>, metadata)
 declare <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double>, metadata)
-declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata)
 declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i32(<2 x i32>, metadata, metadata)
 declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i32(<2 x i32>, metadata, metadata)
 declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64>, metadata, metadata)
@@ -10025,7 +10016,6 @@ declare <1 x double> @llvm.experimental.constrained.fpext.v1f64.v1f32(<1 x float
 declare <1 x float> @llvm.experimental.constrained.ceil.v1f32(<1 x float>, metadata)
 declare <1 x float> @llvm.experimental.constrained.floor.v1f32(<1 x float>, metadata)
 declare <1 x float> @llvm.experimental.constrained.round.v1f32(<1 x float>, metadata)
-declare <1 x float> @llvm.experimental.constrained.trunc.v1f32(<1 x float>, metadata)
 declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i32(<1 x i32>, metadata, metadata)
 declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i32(<1 x i32>, metadata, metadata)
 declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i64(<1 x i64>, metadata, metadata)
@@ -10104,8 +10094,6 @@ declare <3 x float> @llvm.experimental.constrained.floor.v3f32(<3 x float>, meta
 declare <3 x double> @llvm.experimental.constrained.floor.v3f64(<3 x double>, metadata)
 declare <3 x float> @llvm.experimental.constrained.round.v3f32(<3 x float>, metadata)
 declare <3 x double> @llvm.experimental.constrained.round.v3f64(<3 x double>, metadata)
-declare <3 x float> @llvm.experimental.constrained.trunc.v3f32(<3 x float>, metadata)
-declare <3 x double> @llvm.experimental.constrained.trunc.v3f64(<3 x double>, metadata)
 declare <3 x double> @llvm.experimental.constrained.sitofp.v3f64.v3i32(<3 x i32>, metadata, metadata)
 declare <3 x float> @llvm.experimental.constrained.sitofp.v3f32.v3i32(<3 x i32>, metadata, metadata)
 declare <3 x double> @llvm.experimental.constrained.sitofp.v3f64.v3i64(<3 x i64>, metadata, metadata)
@@ -10156,7 +10144,6 @@ declare <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f32(<4 x float
 declare <4 x double> @llvm.experimental.constrained.ceil.v4f64(<4 x double>, metadata)
 declare <4 x double> @llvm.experimental.constrained.floor.v4f64(<4 x double>, metadata)
 declare <4 x double> @llvm.experimental.constrained.round.v4f64(<4 x double>, metadata)
-declare <4 x double> @llvm.experimental.constrained.trunc.v4f64(<4 x double>, metadata)
 declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i32(<4 x i32>, metadata, metadata)
 declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i32(<4 x i32>, metadata, metadata)
 declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i64(<4 x i64>, metadata, metadata)
diff --git a/llvm/test/Transforms/InstSimplify/constfold-constrained.ll b/llvm/test/Transforms/InstSimplify/constfold-constrained.ll
index a9ef7f6a765d19..64d8e1d2454db2 100644
--- a/llvm/test/Transforms/InstSimplify/constfold-constrained.ll
+++ b/llvm/test/Transforms/InstSimplify/constfold-constrained.ll
@@ -17,7 +17,7 @@ entry:
 define double @floor_02() #0 {
 ; CHECK-LABEL: @floor_02(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.floor.f64(double -1.010000e+01, metadata !"fpexcept.strict") #[[ATTR0:[0-9]+]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.floor.f64(double -1.010000e+01, metadata !"fpexcept.strict") #[[ATTR3:[0-9]+]] [ "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    ret double -1.100000e+01
 ;
 entry:
@@ -40,7 +40,7 @@ entry:
 define double @ceil_02() #0 {
 ; CHECK-LABEL: @ceil_02(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.ceil.f64(double -1.010000e+01, metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.ceil.f64(double -1.010000e+01, metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    ret double -1.000000e+01
 ;
 entry:
@@ -55,7 +55,7 @@ define double @trunc_01() #0 {
 ; CHECK-NEXT:    ret double 1.000000e+01
 ;
 entry:
-  %result = call double @llvm.experimental.constrained.trunc.f64(double 1.010000e+01, metadata !"fpexcept.ignore") #0
+  %result = call double @llvm.trunc.f64(double 1.010000e+01) #0 [ "fpe.except"(metadata !"ignore") ]
   ret double %result
 }
 
@@ -63,7 +63,7 @@ entry:
 define double @trunc_02() #0 {
 ; CHECK-LABEL: @trunc_02(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.trunc.f64(double -1.010000e+01, metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.trunc.f64(double -1.010000e+01) #[[ATTR3]] [ "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    ret double -1.000000e+01
 ;
 entry:
@@ -86,7 +86,7 @@ entry:
 define double @round_02() #0 {
 ; CHECK-LABEL: @round_02(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.round.f64(double -1.050000e+01, metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.round.f64(double -1.050000e+01, metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    ret double -1.100000e+01
 ;
 entry:
@@ -120,7 +120,7 @@ entry:
 define double @nearbyint_03() #0 {
 ; CHECK-LABEL: @nearbyint_03(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.nearbyint.f64(double 1.050000e+01, metadata !"round.towardzero", metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.nearbyint.f64(double 1.050000e+01, metadata !"round.towardzero", metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.control"(metadata !"rtz"), "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    ret double 1.000000e+01
 ;
 entry:
@@ -132,7 +132,7 @@ entry:
 define double @nearbyint_04() #0 {
 ; CHECK-LABEL: @nearbyint_04(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.nearbyint.f64(double 1.050000e+01, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.nearbyint.f64(double 1.050000e+01, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.control"(metadata !"rte"), "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    ret double 1.000000e+01
 ;
 entry:
@@ -144,7 +144,7 @@ entry:
 define double @nearbyint_05() #0 {
 ; CHECK-LABEL: @nearbyint_05(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.nearbyint.f64(double 1.050000e+01, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.nearbyint.f64(double 1.050000e+01, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    ret double [[RESULT]]
 ;
 entry:
@@ -156,7 +156,7 @@ entry:
 define double @nonfinite_01() #0 {
 ; CHECK-LABEL: @nonfinite_01(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.trunc.f64(double 0x7FF4000000000000, metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.trunc.f64(double 0x7FF4000000000000) #[[ATTR3]] [ "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    ret double [[RESULT]]
 ;
 entry:
@@ -168,10 +168,10 @@ entry:
 define double @nonfinite_02() #0 {
 ; CHECK-LABEL: @nonfinite_02(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    ret double 0x7FF8000000000000
+; CHECK-NEXT:    ret double 0x7FFC000000000000
 ;
 entry:
-  %result = call double @llvm.experimental.constrained.trunc.f64(double 0x7ff4000000000000, metadata !"fpexcept.ignore") #0
+  %result = call double @llvm.trunc.f64(double 0x7ff4000000000000) #0 [ "fpe.except"(metadata !"ignore") ]
   ret double %result
 }
 
@@ -179,7 +179,7 @@ entry:
 define double @nonfinite_03() #0 {
 ; CHECK-LABEL: @nonfinite_03(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.trunc.f64(double 0x7FF8000000000000, metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.trunc.f64(double 0x7FF8000000000000) #[[ATTR3]] [ "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    ret double 0x7FF8000000000000
 ;
 entry:
@@ -191,7 +191,7 @@ entry:
 define double @nonfinite_04() #0 {
 ; CHECK-LABEL: @nonfinite_04(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.trunc.f64(double 0x7FF0000000000000, metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.trunc.f64(double 0x7FF0000000000000) #[[ATTR3]] [ "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    ret double 0x7FF0000000000000
 ;
 entry:
@@ -203,7 +203,7 @@ entry:
 define double @rint_01() #0 {
 ; CHECK-LABEL: @rint_01(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.rint.f64(double 1.000000e+01, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.rint.f64(double 1.000000e+01, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.control"(metadata !"rte"), "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    ret double 1.000000e+01
 ;
 entry:
@@ -215,7 +215,7 @@ entry:
 define double @rint_02() #0 {
 ; CHECK-LABEL: @rint_02(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.rint.f64(double 1.010000e+01, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.rint.f64(double 1.010000e+01, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.control"(metadata !"rte"), "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    ret double [[RESULT]]
 ;
 entry:
@@ -270,7 +270,7 @@ entry:
 define double @fadd_04() #0 {
 ; CHECK-LABEL: @fadd_04(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double 1.000000e+00, double 0x3FF0000000000001, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double 1.000000e+00, double 0x3FF0000000000001, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.control"(metadata !"rte"), "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    ret double [[RESULT]]
 ;
 entry:
@@ -282,7 +282,7 @@ entry:
 define double @fadd_05() #0 {
 ; CHECK-LABEL: @fadd_05(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double 1.000000e+00, double 2.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double 1.000000e+00, double 2.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.control"(metadata !"rte"), "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    ret double 3.000000e+00
 ;
 entry:
@@ -294,7 +294,7 @@ entry:
 define double @fadd_06() #0 {
 ; CHECK-LABEL: @fadd_06(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double 1.000000e+00, double 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double 1.000000e+00, double 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    ret double 3.000000e+00
 ;
 entry:
@@ -306,7 +306,7 @@ entry:
 define double @fadd_07() #0 {
 ; CHECK-LABEL: @fadd_07(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double 1.000000e+00, double 0x3FF0000000000001, metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double 1.000000e+00, double 0x3FF0000000000001, metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR3]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"ignore") ]
 ; CHECK-NEXT:    ret double [[RESULT]]
 ;
 entry:
@@ -328,7 +328,7 @@ entry:
 define double @fadd_09() #0 {
 ; CHECK-LABEL: @fadd_09(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.control"(metadata !"rte"), "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    ret double [[RESULT]]
 ;
 entry:
@@ -492,7 +492,7 @@ entry:
 define i1 @cmp_eq_nan_01() #0 {
 ; CHECK-LABEL: @cmp_eq_nan_01(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double 0x7FF4000000000000, double 1.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double 0x7FF4000000000000, double 1.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    ret i1 [[RESULT]]
 ;
 entry:
@@ -503,7 +503,7 @@ entry:
 define i1 @cmp_eq_nan_02() #0 {
 ; CHECK-LABEL: @cmp_eq_nan_02(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double 0x7FF4000000000000, double 1.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double 0x7FF4000000000000, double 1.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    ret i1 [[RESULT]]
 ;
 entry:
@@ -515,7 +515,7 @@ entry:
 define i1 @cmp_eq_nan_03() #0 {
 ; CHECK-LABEL: @cmp_eq_nan_03(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double 0x7FF8000000000000, double 1.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double 0x7FF8000000000000, double 1.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    ret i1 false
 ;
 entry:
@@ -526,7 +526,7 @@ entry:
 define i1 @cmp_eq_nan_04() #0 {
 ; CHECK-LABEL: @cmp_eq_nan_04(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double 0x7FF8000000000000, double 1.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double 0x7FF8000000000000, double 1.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    ret i1 [[RESULT]]
 ;
 entry:
@@ -540,7 +540,6 @@ attributes #0 = { strictfp }
 declare double @llvm.experimental.constrained.nearbyint.f64(double, metadata, metadata)
 declare double @llvm.experimental.constrained.floor.f64(double, metadata)
 declare double @llvm.experimental.constrained.ceil.f64(double, metadata)
-declare double @llvm.experimental.constrained.trunc.f64(double, metadata)
 declare double @llvm.experimental.constrained.round.f64(double, metadata)
 declare double @llvm.experimental.constrained.rint.f64(double, metadata, metadata)
 declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata)

>From 8e64ef3ee8baf97e9ad319486a4be3aacc71c75e Mon Sep 17 00:00:00 2001
From: Serge Pavlov <sepavloff at gmail.com>
Date: Mon, 2 Dec 2024 12:59:12 +0700
Subject: [PATCH 6/6] Fix clang-format errors

---
 llvm/lib/IR/AutoUpgrade.cpp | 67 ++++++++++++++++++-------------------
 1 file changed, 33 insertions(+), 34 deletions(-)

diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index 6185e4c5a8958b..912cf4ce45a4c5 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -1215,7 +1215,7 @@ static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn,
         assert(Success && "cannot get intrinsic signature");
 
         NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(), NewID,
-                                              OverloadTys);
+                                                  OverloadTys);
       }
       return true;
     }
@@ -4956,44 +4956,43 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
       MTI->setSourceAlignment(Align->getMaybeAlignValue());
     break;
   }
-#define LEGACY_FUNCTION(NAME, A, R, I, D)                                      \
-  case Intrinsic::NAME:
+#define LEGACY_FUNCTION(NAME, A, R, I, D) case Intrinsic::NAME:
 #include "llvm/IR/ConstrainedOps.def"
-  {
-    SmallVector<OperandBundleDef, 2> Bundles;
-    unsigned NumMetadataArgs = 0;
+    {
+      SmallVector<OperandBundleDef, 2> Bundles;
+      unsigned NumMetadataArgs = 0;
+
+      if (auto RM = getRoundingModeArg(*CI)) {
+        auto CurrentRM = CI->getRoundingMode();
+        assert(!CurrentRM && "unexpected rounding bundle");
+        Builder.createFPRoundingBundle(Bundles, RM);
+        ++NumMetadataArgs;
+      }
 
-    if (auto RM = getRoundingModeArg(*CI)) {
-      auto CurrentRM = CI->getRoundingMode();
-      assert(!CurrentRM && "unexpected rounding bundle");
-      Builder.createFPRoundingBundle(Bundles, RM);
-      ++NumMetadataArgs;
-    }
+      if (auto EB = getExceptionBehaviorArg(*CI)) {
+        auto CurrentEB = CI->getExceptionBehavior();
+        assert(!CurrentEB && "unexpected exception bundle");
+        Builder.createFPExceptionBundle(Bundles, EB);
+        ++NumMetadataArgs;
+      }
 
-    if (auto EB = getExceptionBehaviorArg(*CI)) {
-      auto CurrentEB = CI->getExceptionBehavior();
-      assert(!CurrentEB && "unexpected exception bundle");
-      Builder.createFPExceptionBundle(Bundles, EB);
-      ++NumMetadataArgs;
-    }
+      SmallVector<Value *, 4> Args(CI->args());
+      Args.pop_back_n(NumMetadataArgs);
+      NewCall = Builder.CreateCall(NewFn, Args, Bundles, CI->getName());
+      NewCall->copyMetadata(*CI);
+      AttributeList Attrs = CI->getAttributes();
+      NewCall->setAttributes(Attrs);
+      if (isa<FPMathOperator>(CI)) {
+        FastMathFlags FMF = CI->getFastMathFlags();
+        NewCall->setFastMathFlags(FMF);
+      }
 
-    SmallVector<Value *, 4> Args(CI->args());
-    Args.pop_back_n(NumMetadataArgs);
-    NewCall = Builder.CreateCall(NewFn, Args, Bundles, CI->getName());
-    NewCall->copyMetadata(*CI);
-    AttributeList Attrs = CI->getAttributes();
-    NewCall->setAttributes(Attrs);
-    if (isa<FPMathOperator>(CI)) {
-      FastMathFlags FMF = CI->getFastMathFlags();
-      NewCall->setFastMathFlags(FMF);
+      MemoryEffects ME = MemoryEffects::inaccessibleMemOnly();
+      auto A = Attribute::getWithMemoryEffects(CI->getContext(), ME);
+      NewCall->addFnAttr(A);
+      NewCall->addFnAttr(Attribute::StrictFP);
+      break;
     }
-
-    MemoryEffects ME = MemoryEffects::inaccessibleMemOnly();
-    auto A = Attribute::getWithMemoryEffects(CI->getContext(), ME);
-    NewCall->addFnAttr(A);
-    NewCall->addFnAttr(Attribute::StrictFP);
-    break;
-  }
   }
   assert(NewCall && "Should have either set this variable or returned through "
                     "the default case");



More information about the cfe-commits mailing list