[llvm-branch-commits] [clang] [llvm] Reimplement constrained 'trunc' using operand bundles (PR #118253)

Serge Pavlov via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Sun Dec 1 21:44:11 PST 2024


https://github.com/spavloff created https://github.com/llvm/llvm-project/pull/118253

Previously the function 'trunc' in non-default floating-point environment was implemented with a special LLVM intrinsic 'experimental.constrained.trunc'. Introduction of floating-point operand bundles allows expressing the interaction with the FP environment using the same intrinsic as for the default mode.

This changes removes 'llvm.experimental.constrained.trunc' and use 'llvm.trunc' in all cases.

>From 3da7fd198007d6c3698c025bfb96ea5fb0ccca34 Mon Sep 17 00:00:00 2001
From: Serge Pavlov <sepavloff at gmail.com>
Date: Wed, 25 Sep 2024 16:07:02 +0700
Subject: [PATCH] Reimplement constrained 'trunc' using operand bundles

Previously the function 'trunc' in non-default floating-point
environment was implemented with a special LLVM intrinsic
'experimental.constrained.trunc'. Introduction of floating-point operand
bundles allows expressing the interaction with the FP environment using the
same intrinsic as for the default mode.

This changes removes 'llvm.experimental.constrained.trunc' and use
'llvm.trunc' in all cases.
---
 clang/lib/CodeGen/CGBuiltin.cpp               | 52 +++++++-------
 .../AArch64/neon-intrinsics-constrained.c     |  2 +-
 .../v8.2a-fp16-intrinsics-constrained.c       |  4 +-
 .../PowerPC/builtins-ppc-fpconstrained.c      |  6 +-
 .../builtins-systemz-vector-constrained.c     |  4 +-
 .../builtins-systemz-vector2-constrained.c    |  3 +-
 .../builtins-systemz-zvector-constrained.c    |  6 +-
 .../builtins-systemz-zvector2-constrained.c   | 10 +--
 clang/test/CodeGen/arm64-vrnd-constrained.c   |  4 +-
 .../test/CodeGen/constrained-math-builtins.c  | 19 ++---
 llvm/include/llvm/CodeGen/SelectionDAGNodes.h |  1 +
 llvm/include/llvm/CodeGen/TargetLowering.h    |  1 +
 llvm/include/llvm/IR/ConstrainedOps.def       |  8 ++-
 llvm/include/llvm/IR/Function.h               |  2 +-
 llvm/include/llvm/IR/InstrTypes.h             |  3 +
 llvm/include/llvm/IR/IntrinsicInst.h          | 12 ++++
 llvm/include/llvm/IR/Intrinsics.h             |  7 +-
 llvm/include/llvm/IR/Intrinsics.td            |  3 -
 llvm/lib/Analysis/ConstantFolding.cpp         | 13 ++--
 llvm/lib/AsmParser/LLParser.cpp               |  9 +++
 llvm/lib/CodeGen/ExpandVectorPredication.cpp  |  2 +-
 llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp  |  6 ++
 .../SelectionDAG/LegalizeVectorOps.cpp        |  2 +
 .../SelectionDAG/LegalizeVectorTypes.cpp      |  3 +
 .../lib/CodeGen/SelectionDAG/SelectionDAG.cpp |  1 +
 .../SelectionDAG/SelectionDAGBuilder.cpp      | 19 ++++-
 .../SelectionDAG/SelectionDAGBuilder.h        |  2 +-
 llvm/lib/CodeGen/TargetLoweringBase.cpp       |  3 +-
 llvm/lib/IR/AutoUpgrade.cpp                   | 72 +++++++++++++++++--
 llvm/lib/IR/Function.cpp                      |  4 +-
 llvm/lib/IR/Instructions.cpp                  |  5 ++
 llvm/lib/IR/IntrinsicInst.cpp                 | 32 ++++++++-
 llvm/lib/IR/Intrinsics.cpp                    |  2 +-
 llvm/lib/Transforms/Utils/Local.cpp           |  7 +-
 llvm/test/Assembler/fp-intrinsics-attr.ll     | 12 ++--
 llvm/test/Bitcode/auto-upgrade-constrained.ll |  2 +-
 .../CodeGen/AArch64/fp-intrinsics-fp16.ll     |  3 +-
 .../CodeGen/AArch64/fp-intrinsics-vector.ll   |  9 +--
 llvm/test/CodeGen/AArch64/fp-intrinsics.ll    |  9 +--
 llvm/test/CodeGen/ARM/fp-intrinsics.ll        |  4 +-
 llvm/test/CodeGen/PowerPC/fp-strict-round.ll  | 21 ++----
 .../ppcf128-constrained-fp-intrinsics.ll      |  5 +-
 .../vector-constrained-fp-intrinsics.ll       | 21 ++----
 .../CodeGen/RISCV/double-intrinsics-strict.ll |  4 +-
 .../CodeGen/RISCV/float-intrinsics-strict.ll  |  4 +-
 ...fixed-vectors-ftrunc-constrained-sdnode.ll | 45 ++++--------
 .../RISCV/rvv/ftrunc-constrained-sdnode.ll    | 45 ++++--------
 .../RISCV/zfh-half-intrinsics-strict.ll       |  4 +-
 .../RISCV/zfhmin-half-intrinsics-strict.ll    |  4 +-
 .../CodeGen/SystemZ/fp-strict-round-01.ll     | 15 +---
 .../CodeGen/SystemZ/fp-strict-round-02.ll     | 15 +---
 .../CodeGen/SystemZ/fp-strict-round-03.ll     | 15 +---
 .../CodeGen/SystemZ/vec-strict-round-01.ll    | 10 +--
 .../CodeGen/SystemZ/vec-strict-round-02.ll    | 10 +--
 .../vector-constrained-fp-intrinsics.ll       | 21 ++----
 .../X86/fp-strict-scalar-round-fp16.ll        |  6 +-
 .../CodeGen/X86/fp-strict-scalar-round.ll     |  8 +--
 .../test/CodeGen/X86/fp128-libcalls-strict.ll |  3 +-
 llvm/test/CodeGen/X86/fp80-strict-libcalls.ll |  3 +-
 llvm/test/CodeGen/X86/vec-strict-256-fp16.ll  |  4 +-
 llvm/test/CodeGen/X86/vec-strict-256.ll       |  8 +--
 llvm/test/CodeGen/X86/vec-strict-512-fp16.ll  |  3 +-
 llvm/test/CodeGen/X86/vec-strict-512.ll       |  6 +-
 llvm/test/CodeGen/X86/vec-strict-round-128.ll |  8 +--
 .../X86/vector-constrained-fp-intrinsics.ll   | 21 ++----
 .../InstSimplify/constfold-constrained.ll     | 49 +++++++------
 66 files changed, 363 insertions(+), 363 deletions(-)

diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index cb9c23b8e0a0d0..52b2d3320c60ea 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -657,6 +657,17 @@ static Value *emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
   }
 }
 
+// Emit a simple mangled intrinsic that has 1 argument and a return type
+// matching the argument type.
+static Value *emitUnaryFPBuiltin(CodeGenFunction &CGF, const CallExpr *E,
+                                 unsigned IntrinsicID) {
+  llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
+
+  CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
+  Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
+  return CGF.Builder.CreateCall(F, Src0);
+}
+
 // Emit an intrinsic that has 2 operands of the same type as its result.
 // Depending on mode, this may be a constrained floating-point intrinsic.
 static Value *emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
@@ -3238,9 +3249,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
     case Builtin::BI__builtin_truncf16:
     case Builtin::BI__builtin_truncl:
     case Builtin::BI__builtin_truncf128:
-      return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
-                                   Intrinsic::trunc,
-                                   Intrinsic::experimental_constrained_trunc));
+      return RValue::get(emitUnaryFPBuiltin(*this, E, Intrinsic::trunc));
 
     case Builtin::BIlround:
     case Builtin::BIlroundf:
@@ -6827,7 +6836,7 @@ Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
   unsigned j = 0;
   for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
        ai != ae; ++ai, ++j) {
-    if (F->isConstrainedFPIntrinsic())
+    if (F->isLegacyConstrainedIntrinsic())
       if (ai->getType()->isMetadataTy())
         continue;
     if (shift > 0 && shift == j)
@@ -6836,7 +6845,7 @@ Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
       Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
   }
 
-  if (F->isConstrainedFPIntrinsic())
+  if (F->isLegacyConstrainedIntrinsic())
     return Builder.CreateConstrainedFPCall(F, Ops, name);
   else
     return Builder.CreateCall(F, Ops, name);
@@ -12989,13 +12998,11 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
               : Intrinsic::rint;
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx");
   }
-  case NEON::BI__builtin_neon_vrndh_f16: {
+  case NEON::BI__builtin_neon_vrndh_f16:
     Ops.push_back(EmitScalarExpr(E->getArg(0)));
-    Int = Builder.getIsFPConstrained()
-              ? Intrinsic::experimental_constrained_trunc
-              : Intrinsic::trunc;
-    return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndz");
-  }
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::trunc, HalfTy), Ops,
+                        "vrndz");
+
   case NEON::BI__builtin_neon_vrnd32x_f32:
   case NEON::BI__builtin_neon_vrnd32xq_f32:
   case NEON::BI__builtin_neon_vrnd32x_f64:
@@ -13029,12 +13036,9 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd64z");
   }
   case NEON::BI__builtin_neon_vrnd_v:
-  case NEON::BI__builtin_neon_vrndq_v: {
-    Int = Builder.getIsFPConstrained()
-              ? Intrinsic::experimental_constrained_trunc
-              : Intrinsic::trunc;
-    return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndz");
-  }
+  case NEON::BI__builtin_neon_vrndq_v:
+    return EmitNeonCall(CGM.getIntrinsic(Intrinsic::trunc, Ty), Ops, "vrndz");
+
   case NEON::BI__builtin_neon_vcvt_f64_v:
   case NEON::BI__builtin_neon_vcvtq_f64_v:
     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -18251,9 +18255,8 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
                : Intrinsic::ceil;
     else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpiz ||
              BuiltinID == PPC::BI__builtin_vsx_xvrspiz)
-      ID = Builder.getIsFPConstrained()
-               ? Intrinsic::experimental_constrained_trunc
-               : Intrinsic::trunc;
+      return emitUnaryFPBuiltin(*this, E, Intrinsic::trunc);
+
     llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
     return Builder.getIsFPConstrained() ? Builder.CreateConstrainedFPCall(F, X)
                                         : Builder.CreateCall(F, X);
@@ -18754,9 +18757,7 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
         .getScalarVal();
   case PPC::BI__builtin_ppc_friz:
   case PPC::BI__builtin_ppc_frizs:
-    return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
-                           *this, E, Intrinsic::trunc,
-                           Intrinsic::experimental_constrained_trunc))
+    return RValue::get(emitUnaryFPBuiltin(*this, E, Intrinsic::trunc))
         .getScalarVal();
   case PPC::BI__builtin_ppc_fsqrt:
   case PPC::BI__builtin_ppc_fsqrts:
@@ -20536,8 +20537,7 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
               CI = Intrinsic::experimental_constrained_nearbyint; break;
       case 1: ID = Intrinsic::round;
               CI = Intrinsic::experimental_constrained_round; break;
-      case 5: ID = Intrinsic::trunc;
-              CI = Intrinsic::experimental_constrained_trunc; break;
+      case 5: ID = Intrinsic::trunc; break;
       case 6: ID = Intrinsic::ceil;
               CI = Intrinsic::experimental_constrained_ceil; break;
       case 7: ID = Intrinsic::floor;
@@ -20546,7 +20546,7 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
       break;
     }
     if (ID != Intrinsic::not_intrinsic) {
-      if (Builder.getIsFPConstrained()) {
+      if (Builder.getIsFPConstrained() && ID != Intrinsic::trunc) {
         Function *F = CGM.getIntrinsic(CI, ResultType);
         return Builder.CreateConstrainedFPCall(F, X);
       } else {
diff --git a/clang/test/CodeGen/AArch64/neon-intrinsics-constrained.c b/clang/test/CodeGen/AArch64/neon-intrinsics-constrained.c
index 15ae7eea820e80..0405cf7f19c73b 100644
--- a/clang/test/CodeGen/AArch64/neon-intrinsics-constrained.c
+++ b/clang/test/CodeGen/AArch64/neon-intrinsics-constrained.c
@@ -792,7 +792,7 @@ float64x1_t test_vrndx_f64(float64x1_t a) {
 // COMMON-LABEL: test_vrnd_f64
 // COMMONIR:      [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8>
 // UNCONSTRAINED: [[VRNDZ1_I:%.*]] = call <1 x double> @llvm.trunc.v1f64(<1 x double> %a)
-// CONSTRAINED:   [[VRNDZ1_I:%.*]] = call <1 x double> @llvm.experimental.constrained.trunc.v1f64(<1 x double> %a, metadata !"fpexcept.strict")
+// CONSTRAINED:   [[VRNDZ1_I:%.*]] = call <1 x double> @llvm.trunc.v1f64(<1 x double> %a) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ]
 // COMMONIR:      ret <1 x double> [[VRNDZ1_I]]
 float64x1_t test_vrnd_f64(float64x1_t a) {
   return vrnd_f64(a);
diff --git a/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics-constrained.c b/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics-constrained.c
index 9109626cea9ca2..9079a6690b9db3 100644
--- a/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics-constrained.c
+++ b/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics-constrained.c
@@ -150,7 +150,7 @@ uint64_t test_vcvth_u64_f16 (float16_t a) {
 
 // COMMON-LABEL: test_vrndh_f16
 // UNCONSTRAINED:  [[RND:%.*]] = call half @llvm.trunc.f16(half %a)
-// CONSTRAINED:    [[RND:%.*]] = call half @llvm.experimental.constrained.trunc.f16(half %a, metadata !"fpexcept.strict")
+// CONSTRAINED:    [[RND:%.*]] = call half @llvm.trunc.f16(half %a) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ]
 // COMMONIR:       ret half [[RND]]
 float16_t test_vrndh_f16(float16_t a) {
   return vrndh_f16(a);
@@ -298,3 +298,5 @@ float16_t test_vfmsh_f16(float16_t a, float16_t b, float16_t c) {
   return vfmsh_f16(a, b, c);
 }
 
+// CHECK: attributes #[[ATTR]] = { strictfp memory(inaccessiblemem: readwrite) }
+
diff --git a/clang/test/CodeGen/PowerPC/builtins-ppc-fpconstrained.c b/clang/test/CodeGen/PowerPC/builtins-ppc-fpconstrained.c
index 838db02415fe5b..b326f131a56e54 100644
--- a/clang/test/CodeGen/PowerPC/builtins-ppc-fpconstrained.c
+++ b/clang/test/CodeGen/PowerPC/builtins-ppc-fpconstrained.c
@@ -85,13 +85,13 @@ void test_float(void) {
   vf = __builtin_vsx_xvrspiz(vf);
   // CHECK-LABEL: try-xvrspiz
   // CHECK-UNCONSTRAINED: @llvm.trunc.v4f32(<4 x float> %{{.*}})
-  // CHECK-CONSTRAINED: @llvm.experimental.constrained.trunc.v4f32(<4 x float> %{{.*}}, metadata !"fpexcept.strict")
+  // CHECK-CONSTRAINED: @llvm.trunc.v4f32(<4 x float> %{{.*}}) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ]
   // CHECK-ASM: xvrspiz
 
   vd = __builtin_vsx_xvrdpiz(vd);
   // CHECK-LABEL: try-xvrdpiz
   // CHECK-UNCONSTRAINED: @llvm.trunc.v2f64(<2 x double> %{{.*}})
-  // CHECK-CONSTRAINED: @llvm.experimental.constrained.trunc.v2f64(<2 x double> %{{.*}}, metadata !"fpexcept.strict")
+  // CHECK-CONSTRAINED: @llvm.trunc.v2f64(<2 x double> %{{.*}}) #[[ATTR]] [ "fpe.except"(metadata !"strict") ]
   // CHECK-ASM: xvrdpiz
 
   vf = __builtin_vsx_xvmaddasp(vf, vf, vf);
@@ -156,3 +156,5 @@ void test_float(void) {
   // CHECK-CONSTRAINED: fneg <2 x double> [[RESULT1]]
   // CHECK-ASM: xvnmsubadp
 }
+
+// CHECK-CONSTRAINED: attributes #[[ATTR]] = { strictfp memory(inaccessiblemem: readwrite) }
diff --git a/clang/test/CodeGen/SystemZ/builtins-systemz-vector-constrained.c b/clang/test/CodeGen/SystemZ/builtins-systemz-vector-constrained.c
index 6d2845504a39f0..77ede2c10eea08 100644
--- a/clang/test/CodeGen/SystemZ/builtins-systemz-vector-constrained.c
+++ b/clang/test/CodeGen/SystemZ/builtins-systemz-vector-constrained.c
@@ -45,7 +45,7 @@ void test_float(void) {
   vd = __builtin_s390_vfidb(vd, 4, 1);
   // CHECK: call <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double> %{{.*}})
   vd = __builtin_s390_vfidb(vd, 4, 5);
-  // CHECK: call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %{{.*}})
+  // CHECK: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{.*}}) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ]
   vd = __builtin_s390_vfidb(vd, 4, 6);
   // CHECK: call <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double> %{{.*}})
   vd = __builtin_s390_vfidb(vd, 4, 7);
@@ -53,3 +53,5 @@ void test_float(void) {
   vd = __builtin_s390_vfidb(vd, 4, 4);
   // CHECK: call <2 x double> @llvm.s390.vfidb(<2 x double> %{{.*}}, i32 4, i32 4)
 }
+
+// CHECK: attributes #[[ATTR]] = { strictfp memory(inaccessiblemem: readwrite) }
diff --git a/clang/test/CodeGen/SystemZ/builtins-systemz-vector2-constrained.c b/clang/test/CodeGen/SystemZ/builtins-systemz-vector2-constrained.c
index 735b6a0249ab62..7488cf90a9669d 100644
--- a/clang/test/CodeGen/SystemZ/builtins-systemz-vector2-constrained.c
+++ b/clang/test/CodeGen/SystemZ/builtins-systemz-vector2-constrained.c
@@ -60,10 +60,11 @@ void test_float(void) {
   vf = __builtin_s390_vfisb(vf, 4, 1);
   // CHECK: call <4 x float> @llvm.experimental.constrained.round.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
   vf = __builtin_s390_vfisb(vf, 4, 5);
-  // CHECK: call <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
+  // CHECK: call <4 x float> @llvm.trunc.v4f32(<4 x float> %{{.*}}) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ]
   vf = __builtin_s390_vfisb(vf, 4, 6);
   // CHECK: call <4 x float> @llvm.experimental.constrained.ceil.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
   vf = __builtin_s390_vfisb(vf, 4, 7);
   // CHECK: call <4 x float> @llvm.experimental.constrained.floor.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
 }
 
+// CHECK: attributes #[[ATTR]] = { strictfp memory(inaccessiblemem: readwrite) }
diff --git a/clang/test/CodeGen/SystemZ/builtins-systemz-zvector-constrained.c b/clang/test/CodeGen/SystemZ/builtins-systemz-zvector-constrained.c
index 6a1f8f0e923f65..fe964fa38aee07 100644
--- a/clang/test/CodeGen/SystemZ/builtins-systemz-zvector-constrained.c
+++ b/clang/test/CodeGen/SystemZ/builtins-systemz-zvector-constrained.c
@@ -303,10 +303,10 @@ void test_float(void) {
   // CHECK: call <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
   // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 7
   vd = vec_roundz(vd);
-  // CHECK: call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
+  // CHECK: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{.*}}) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ]
   // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 5
   vd = vec_trunc(vd);
-  // CHECK: call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
+  // CHECK: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{.*}}) #[[ATTR]] [ "fpe.except"(metadata !"strict") ]
   // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 5
   vd = vec_roundc(vd);
   // CHECK: call <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
@@ -316,3 +316,5 @@ void test_float(void) {
   // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 0, 0
   vd = vec_round(vd);
 }
+
+// CHECK: attributes #[[ATTR]] = { strictfp memory(inaccessiblemem: readwrite) }
diff --git a/clang/test/CodeGen/SystemZ/builtins-systemz-zvector2-constrained.c b/clang/test/CodeGen/SystemZ/builtins-systemz-zvector2-constrained.c
index 750f5011a26798..e7ea4e325862e9 100644
--- a/clang/test/CodeGen/SystemZ/builtins-systemz-zvector2-constrained.c
+++ b/clang/test/CodeGen/SystemZ/builtins-systemz-zvector2-constrained.c
@@ -495,16 +495,16 @@ void test_float(void) {
   // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 7
 
   vf = vec_roundz(vf);
-  // CHECK: call <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
+  // CHECK: call <4 x float> @llvm.trunc.v4f32(<4 x float> %{{.*}}) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ]
   // CHECK-ASM: vfisb %{{.*}}, %{{.*}}, 4, 5
   vf = vec_trunc(vf);
-  // CHECK: call <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
+  // CHECK: call <4 x float> @llvm.trunc.v4f32(<4 x float> %{{.*}}) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ]
   // CHECK-ASM: vfisb %{{.*}}, %{{.*}}, 4, 5
   vd = vec_roundz(vd);
-  // CHECK: call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
+  // CHECK: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{.*}}) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ]
   // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 5
   vd = vec_trunc(vd);
-  // CHECK: call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
+  // CHECK: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{.*}}) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ]
   // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 5
 
   vf = vec_roundc(vf);
@@ -541,3 +541,5 @@ void test_float(void) {
   // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 4095)
   // CHECK-ASM: vftcidb
 }
+
+// CHECK: attributes #[[ATTR]] = { strictfp memory(inaccessiblemem: readwrite) }
\ No newline at end of file
diff --git a/clang/test/CodeGen/arm64-vrnd-constrained.c b/clang/test/CodeGen/arm64-vrnd-constrained.c
index ccf729a6a25ef6..e690f26b0def52 100644
--- a/clang/test/CodeGen/arm64-vrnd-constrained.c
+++ b/clang/test/CodeGen/arm64-vrnd-constrained.c
@@ -14,7 +14,7 @@
 float64x2_t rnd5(float64x2_t a) { return vrndq_f64(a); }
 // COMMON-LABEL: rnd5
 // UNCONSTRAINED: call <2 x double> @llvm.trunc.v2f64(<2 x double>
-// CONSTRAINED:   call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>
+// CONSTRAINED:   call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{.*}}) #[[ATTR:[0-9]+]] [ "fpe.except"(metadata !"strict") ]
 // CHECK-ASM:     frintz.2d v{{[0-9]+}}, v{{[0-9]+}}
 
 float64x2_t rnd13(float64x2_t a) { return vrndmq_f64(a); }
@@ -41,3 +41,5 @@ float64x2_t rnd25(float64x2_t a) { return vrndxq_f64(a); }
 // CONSTRAINED:   call <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double>
 // CHECK-ASM:     frintx.2d v{{[0-9]+}}, v{{[0-9]+}}
 
+// CHECK: attributes #[[ATTR]] = { strictfp memory(inaccessiblemem: readwrite) }
+
diff --git a/clang/test/CodeGen/constrained-math-builtins.c b/clang/test/CodeGen/constrained-math-builtins.c
index 68b9e75283c547..f5136cd18e0eff 100644
--- a/clang/test/CodeGen/constrained-math-builtins.c
+++ b/clang/test/CodeGen/constrained-math-builtins.c
@@ -242,10 +242,10 @@ __builtin_atan2(f,f);        __builtin_atan2f(f,f);       __builtin_atan2l(f,f);
 
   __builtin_trunc(f);      __builtin_truncf(f);     __builtin_truncl(f); __builtin_truncf128(f);
 
-// CHECK: call double @llvm.experimental.constrained.trunc.f64(double %{{.*}}, metadata !"fpexcept.strict")
-// CHECK: call float @llvm.experimental.constrained.trunc.f32(float %{{.*}}, metadata !"fpexcept.strict")
-// CHECK: call x86_fp80 @llvm.experimental.constrained.trunc.f80(x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
-// CHECK: call fp128 @llvm.experimental.constrained.trunc.f128(fp128 %{{.*}}, metadata !"fpexcept.strict")
+// CHECK: call double @llvm.trunc.f64(double %{{.*}}) #[[ATTR_CALL:[0-9]+]] [ "fpe.except"(metadata !"strict") ]
+// CHECK: call float @llvm.trunc.f32(float %{{.*}}) #[[ATTR_CALL]] [ "fpe.except"(metadata !"strict") ]
+// CHECK: call x86_fp80 @llvm.trunc.f80(x86_fp80 %{{.*}}) #[[ATTR_CALL]] [ "fpe.except"(metadata !"strict") ]
+// CHECK: call fp128 @llvm.trunc.f128(fp128 %{{.*}}) #[[ATTR_CALL]] [ "fpe.except"(metadata !"strict") ]
 };
 
 // CHECK: declare double @llvm.experimental.constrained.frem.f64(double, double, metadata, metadata)
@@ -377,10 +377,10 @@ __builtin_atan2(f,f);        __builtin_atan2f(f,f);       __builtin_atan2l(f,f);
 // CHECK: declare x86_fp80 @llvm.experimental.constrained.tan.f80(x86_fp80, metadata, metadata)
 // CHECK: declare fp128 @llvm.experimental.constrained.tan.f128(fp128, metadata, metadata)
 
-// CHECK: declare double @llvm.experimental.constrained.trunc.f64(double, metadata)
-// CHECK: declare float @llvm.experimental.constrained.trunc.f32(float, metadata)
-// CHECK: declare x86_fp80 @llvm.experimental.constrained.trunc.f80(x86_fp80, metadata)
-// CHECK: declare fp128 @llvm.experimental.constrained.trunc.f128(fp128, metadata)
+// CHECK: declare double @llvm.trunc.f64(double) #[[ATTR_FUNC:[0-9]+]]
+// CHECK: declare float @llvm.trunc.f32(float) #[[ATTR_FUNC]]
+// CHECK: declare x86_fp80 @llvm.trunc.f80(x86_fp80) #[[ATTR_FUNC]]
+// CHECK: declare fp128 @llvm.trunc.f128(fp128) #[[ATTR_FUNC]]
 
 #pragma STDC FP_CONTRACT ON
 void bar(float f) {
@@ -401,3 +401,6 @@ void bar(float f) {
   // CHECK: fneg
   // CHECK: call float @llvm.experimental.constrained.fmuladd.f32(float %{{.*}}, float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
 };
+
+// CHECK: attributes #[[ATTR_FUNC]] = { {{.*}} memory(none) }
+// CHECK: attributes #[[ATTR_CALL]] = { strictfp memory(inaccessiblemem: readwrite) }
diff --git a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
index 677b59e0c8fbeb..9dc831ef23273d 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -721,6 +721,7 @@ END_TWO_BYTE_PACK()
       case ISD::STRICT_FP_TO_BF16:
 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
       case ISD::STRICT_##DAGN:
+#define LEGACY_FUNCTION DAG_INSTRUCTION
 #include "llvm/IR/ConstrainedOps.def"
         return true;
     }
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index 6a41094ff933b0..7ccaf9558077c0 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -1324,6 +1324,7 @@ class TargetLoweringBase {
       default: llvm_unreachable("Unexpected FP pseudo-opcode");
 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
       case ISD::STRICT_##DAGN: EqOpc = ISD::DAGN; break;
+#define LEGACY_FUNCTION DAG_INSTRUCTION
 #define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
       case ISD::STRICT_##DAGN: EqOpc = ISD::SETCC; break;
 #include "llvm/IR/ConstrainedOps.def"
diff --git a/llvm/include/llvm/IR/ConstrainedOps.def b/llvm/include/llvm/IR/ConstrainedOps.def
index 30a82bf633d575..2b1bb711444a06 100644
--- a/llvm/include/llvm/IR/ConstrainedOps.def
+++ b/llvm/include/llvm/IR/ConstrainedOps.def
@@ -39,6 +39,11 @@
 #define CMP_INSTRUCTION(N,A,R,I,D) DAG_INSTRUCTION(N,A,R,I,D)
 #endif
 
+// Intrinsic function that had constrained variant.
+#ifndef LEGACY_FUNCTION
+#define LEGACY_FUNCTION(N,A,R,I,D)
+#endif
+
 // Arguments of the entries are:
 // - instruction or intrinsic function name.
 // - Number of original instruction/intrinsic arguments.
@@ -103,7 +108,7 @@ DAG_FUNCTION(sinh,            1, 1, experimental_constrained_sinh,       FSINH)
 DAG_FUNCTION(sqrt,            1, 1, experimental_constrained_sqrt,       FSQRT)
 DAG_FUNCTION(tan,             1, 1, experimental_constrained_tan,        FTAN)
 DAG_FUNCTION(tanh,            1, 1, experimental_constrained_tanh,       FTANH)
-DAG_FUNCTION(trunc,           1, 0, experimental_constrained_trunc,      FTRUNC)
+LEGACY_FUNCTION(trunc,        1, 0, experimental_constrained_trunc,      FTRUNC)
 
 // This is definition for fmuladd intrinsic function, that is converted into
 // constrained FMA or FMUL + FADD intrinsics.
@@ -114,3 +119,4 @@ FUNCTION(fmuladd,         3, 1, experimental_constrained_fmuladd)
 #undef CMP_INSTRUCTION
 #undef DAG_INSTRUCTION
 #undef DAG_FUNCTION
+#undef LEGACY_FUNCTION
\ No newline at end of file
diff --git a/llvm/include/llvm/IR/Function.h b/llvm/include/llvm/IR/Function.h
index e7afcbd31420c1..076a28519491ff 100644
--- a/llvm/include/llvm/IR/Function.h
+++ b/llvm/include/llvm/IR/Function.h
@@ -263,7 +263,7 @@ class LLVM_ABI Function : public GlobalObject, public ilist_node<Function> {
   /// Returns true if the function is one of the "Constrained Floating-Point
   /// Intrinsics". Returns false if not, and returns false when
   /// getIntrinsicID() returns Intrinsic::not_intrinsic.
-  bool isConstrainedFPIntrinsic() const;
+  bool isLegacyConstrainedIntrinsic() const;
 
   /// Update internal caches that depend on the function name (such as the
   /// intrinsic ID and libcall cache).
diff --git a/llvm/include/llvm/IR/InstrTypes.h b/llvm/include/llvm/IR/InstrTypes.h
index 2cc6c0359bf7ad..aaa07215028e19 100644
--- a/llvm/include/llvm/IR/InstrTypes.h
+++ b/llvm/include/llvm/IR/InstrTypes.h
@@ -2145,6 +2145,9 @@ class CallBase : public Instruction {
   /// Return exception behavior specified by operand bundles.
   std::optional<fp::ExceptionBehavior> getExceptionBehavior() const;
 
+  // Does the called function access floating-point environment?
+  bool isConstrained() const;
+
   /// Used to keep track of an operand bundle.  See the main comment on
   /// OperandBundleUser above.
   struct BundleOpInfo {
diff --git a/llvm/include/llvm/IR/IntrinsicInst.h b/llvm/include/llvm/IR/IntrinsicInst.h
index a248a9612a82d0..2f382ed6b0ad4d 100644
--- a/llvm/include/llvm/IR/IntrinsicInst.h
+++ b/llvm/include/llvm/IR/IntrinsicInst.h
@@ -146,6 +146,18 @@ class IntrinsicInst : public CallInst {
 std::optional<RoundingMode> getRoundingModeArg(const CallBase &I);
 std::optional<fp::ExceptionBehavior> getExceptionBehaviorArg(const CallBase &I);
 
+/// Return true if the argument specifies an intrinsic that had a constrained
+/// variant (like 'trunc.f32').
+bool hadConstrainedVariant(StringRef Name);
+
+/// If the given string specifies some legacy constrained intrinsic (like
+/// 'llvm.experimental.constrained.trunc.f32'), return corresponding intrinsic
+/// id (like 'Intrinsic::trunc') and the number of FP metadata arguments.
+///
+/// \param Name Intrinsic name without prefix 'llvm.experimental.constrained'
+///             (like 'trunc.f32').
+std::pair<Intrinsic::ID, unsigned> getIntrinsicForConstrained(StringRef Name);
+
 /// Check if \p ID corresponds to a lifetime intrinsic.
 static inline bool isLifetimeIntrinsic(Intrinsic::ID ID) {
   switch (ID) {
diff --git a/llvm/include/llvm/IR/Intrinsics.h b/llvm/include/llvm/IR/Intrinsics.h
index 89dfff256e0c43..c867a944ccc9b1 100644
--- a/llvm/include/llvm/IR/Intrinsics.h
+++ b/llvm/include/llvm/IR/Intrinsics.h
@@ -125,9 +125,10 @@ namespace Intrinsic {
   /// Map a MS builtin name to an intrinsic ID.
   ID getIntrinsicForMSBuiltin(StringRef TargetPrefix, StringRef BuiltinName);
 
-  /// Returns true if the intrinsic ID is for one of the "Constrained
-  /// Floating-Point Intrinsics".
-  bool isConstrainedFPIntrinsic(ID QID);
+  /// Returns true if the intrinsic ID is for one of the legacy constrained
+  /// floating-point intrinsics, which use metadata argument to present
+  /// floating-point options.
+  bool isLegacyConstrainedIntrinsic(ID QID);
 
   /// Returns true if the intrinsic ID is for one of the "Constrained
   /// Floating-Point Intrinsics" that take rounding mode metadata.
diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td
index 1ca8c2565ab0b6..8d192b0d5cfe00 100644
--- a/llvm/include/llvm/IR/Intrinsics.td
+++ b/llvm/include/llvm/IR/Intrinsics.td
@@ -1352,9 +1352,6 @@ let IntrProperties = [IntrInaccessibleMemOnly, IntrWillReturn, IntrStrictFP] in
   def int_experimental_constrained_roundeven : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
                                                          [ LLVMMatchType<0>,
                                                            llvm_metadata_ty ]>;
-  def int_experimental_constrained_trunc : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
-                                                     [ LLVMMatchType<0>,
-                                                       llvm_metadata_ty ]>;
 
   // Constrained floating-point comparison (quiet and signaling variants).
   // Third operand is the predicate represented as a metadata string.
diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp
index 1971c28fc4c4de..6bb86048694acf 100644
--- a/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/llvm/lib/Analysis/ConstantFolding.cpp
@@ -1706,7 +1706,6 @@ bool llvm::canConstantFoldCallTo(const CallBase *Call, const Function *F) {
   case Intrinsic::experimental_constrained_floor:
   case Intrinsic::experimental_constrained_round:
   case Intrinsic::experimental_constrained_roundeven:
-  case Intrinsic::experimental_constrained_trunc:
   case Intrinsic::experimental_constrained_nearbyint:
   case Intrinsic::experimental_constrained_rint:
   case Intrinsic::experimental_constrained_fcmp:
@@ -2142,8 +2141,11 @@ static Constant *ConstantFoldScalarCall1(StringRef Name,
   }
 
   if (auto *Op = dyn_cast<ConstantFP>(Operands[0])) {
+    auto EB = Call->getExceptionBehavior();
+    APFloat U = Op->getValueAPF();
+
     if (IntrinsicID == Intrinsic::convert_to_fp16) {
-      APFloat Val(Op->getValueAPF());
+      APFloat Val(U);
 
       bool lost = false;
       Val.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &lost);
@@ -2151,8 +2153,6 @@ static Constant *ConstantFoldScalarCall1(StringRef Name,
       return ConstantInt::get(Ty->getContext(), Val.bitcastToAPInt());
     }
 
-    APFloat U = Op->getValueAPF();
-
     if (IntrinsicID == Intrinsic::wasm_trunc_signed ||
         IntrinsicID == Intrinsic::wasm_trunc_unsigned) {
       bool Signed = IntrinsicID == Intrinsic::wasm_trunc_signed;
@@ -2231,6 +2231,8 @@ static Constant *ConstantFoldScalarCall1(StringRef Name,
     }
 
     if (IntrinsicID == Intrinsic::trunc) {
+      if (U.isSignaling() && EB && *EB != fp::ebIgnore)
+        return nullptr;
       U.roundToIntegral(APFloat::rmTowardZero);
       return ConstantFP::get(Ty->getContext(), U);
     }
@@ -2277,9 +2279,6 @@ static Constant *ConstantFoldScalarCall1(StringRef Name,
     case Intrinsic::experimental_constrained_floor:
       RM = APFloat::rmTowardNegative;
       break;
-    case Intrinsic::experimental_constrained_trunc:
-      RM = APFloat::rmTowardZero;
-      break;
     }
     if (RM) {
       auto CI = cast<ConstrainedFPIntrinsic>(Call);
diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp
index 65ef6c8b291165..5367e922b0082e 100644
--- a/llvm/lib/AsmParser/LLParser.cpp
+++ b/llvm/lib/AsmParser/LLParser.cpp
@@ -6342,10 +6342,12 @@ void LLParser::updateConstrainedIntrinsic(
     return StringRef();
   };
 
+  unsigned NumMetadataArgs = 0;
   if (Args.size() > 1) {
     Value *V = Args[Args.size() - 2].V;
     StringRef VStr = getMetadataArgumentValue(V);
     if (!VStr.empty()) {
+      NumMetadataArgs++;
       if (auto RM = convertStrToRoundingMode(VStr))
         addFPRoundingBundle(Context, Bundles, *RM);
     }
@@ -6354,10 +6356,17 @@ void LLParser::updateConstrainedIntrinsic(
   Value *V = Args.back().V;
   StringRef VStr = getMetadataArgumentValue(V);
   if (!VStr.empty()) {
+    NumMetadataArgs++;
     if (auto EB = convertStrToExceptionBehavior(VStr))
       addFPExceptionBundle(Context, Bundles, *EB);
   }
 
+  if (hadConstrainedVariant(Name)) {
+    Args.pop_back_n(NumMetadataArgs);
+    CalleeID.StrVal = "llvm." + Name.str();
+  }
+
+  FnAttrs.addAttribute(Attribute::StrictFP);
   MemoryEffects ME = MemoryEffects::inaccessibleMemOnly();
   FnAttrs.addAttribute(Attribute::getWithMemoryEffects(Context, ME));
   FnAttrs.addAttribute(Attribute::StrictFP);
diff --git a/llvm/lib/CodeGen/ExpandVectorPredication.cpp b/llvm/lib/CodeGen/ExpandVectorPredication.cpp
index 5ca223852cbde3..5c81c61f1ab278 100644
--- a/llvm/lib/CodeGen/ExpandVectorPredication.cpp
+++ b/llvm/lib/CodeGen/ExpandVectorPredication.cpp
@@ -331,7 +331,7 @@ Value *CachingVPExpander::expandPredicationToFPCall(
     Function *Fn = Intrinsic::getOrInsertDeclaration(
         VPI.getModule(), UnpredicatedIntrinsicID, {VPI.getType()});
     Value *NewOp;
-    if (Intrinsic::isConstrainedFPIntrinsic(UnpredicatedIntrinsicID))
+    if (Intrinsic::isLegacyConstrainedIntrinsic(UnpredicatedIntrinsicID))
       NewOp =
           Builder.CreateConstrainedFPCall(Fn, {Op0, Op1, Op2}, VPI.getName());
     else
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index f668e41094bbc8..c4e6042d2a791a 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -2155,6 +2155,12 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
     }
   }
 
+  // Process constrained intrinsics in a way compatible with the pre-bundle
+  // implementation..
+  if (CI.isConstrained() &&
+      !Intrinsic::isLegacyConstrainedIntrinsic(CI.getIntrinsicID()))
+    return false;
+
   // If this is a simple intrinsic (that is, we just need to add a def of
   // a vreg, and uses for each arg operand, then translate it.
   if (translateSimpleIntrinsic(CI, ID, MIRBuilder))
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index db21e708970648..b984e6dc491f3d 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -314,6 +314,7 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
     break;
 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
   case ISD::STRICT_##DAGN:
+#define LEGACY_FUNCTION DAG_INSTRUCTION
 #include "llvm/IR/ConstrainedOps.def"
     ValVT = Node->getValueType(0);
     if (Op.getOpcode() == ISD::STRICT_SINT_TO_FP ||
@@ -1151,6 +1152,7 @@ void VectorLegalizer::Expand(SDNode *Node, SmallVectorImpl<SDValue> &Results) {
     break;
 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
   case ISD::STRICT_##DAGN:
+#define LEGACY_FUNCTION DAG_INSTRUCTION
 #include "llvm/IR/ConstrainedOps.def"
     ExpandStrictFPOp(Node, Results);
     return;
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 465128099f4447..1ff3dc2bcdb8ad 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -199,6 +199,7 @@ void DAGTypeLegalizer::ScalarizeVectorResult(SDNode *N, unsigned ResNo) {
 
 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
   case ISD::STRICT_##DAGN:
+#define LEGACY_FUNCTION DAG_INSTRUCTION
 #include "llvm/IR/ConstrainedOps.def"
     R = ScalarizeVecRes_StrictFPOp(N);
     break;
@@ -1337,6 +1338,7 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
 
 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
   case ISD::STRICT_##DAGN:
+#define LEGACY_FUNCTION DAG_INSTRUCTION
 #include "llvm/IR/ConstrainedOps.def"
     SplitVecRes_StrictFPOp(N, Lo, Hi);
     break;
@@ -4639,6 +4641,7 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
 
 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
   case ISD::STRICT_##DAGN:
+#define LEGACY_FUNCTION DAG_INSTRUCTION
 #include "llvm/IR/ConstrainedOps.def"
     Res = WidenVecRes_StrictFP(N);
     break;
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 182529123ec6d8..f7a9b351b43f55 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -11020,6 +11020,7 @@ SDNode* SelectionDAG::mutateStrictFPToFP(SDNode *Node) {
     llvm_unreachable("mutateStrictFPToFP called with unexpected opcode!");
 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
   case ISD::STRICT_##DAGN: NewOpc = ISD::DAGN; break;
+#define LEGACY_FUNCTION DAG_INSTRUCTION
 #define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
   case ISD::STRICT_##DAGN: NewOpc = ISD::SETCC; break;
 #include "llvm/IR/ConstrainedOps.def"
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index a38a3e9b91052d..15c801a74dbc89 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -6462,6 +6462,11 @@ void SelectionDAGBuilder::visitVectorExtractLastActive(const CallInst &I,
 /// Lower the call to the specified intrinsic function.
 void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
                                              unsigned Intrinsic) {
+  if (I.isConstrained()) {
+    visitConstrainedFPIntrinsic(cast<IntrinsicInst>(I));
+    return;
+  }
+
   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
   SDLoc sdl = getCurSDLoc();
   DebugLoc dl = getCurDebugLoc();
@@ -7022,7 +7027,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC)                         \
   case Intrinsic::INTRINSIC:
 #include "llvm/IR/ConstrainedOps.def"
-    visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(I));
+    visitConstrainedFPIntrinsic(cast<IntrinsicInst>(I));
     return;
 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
 #include "llvm/IR/VPIntrinsics.def"
@@ -8290,7 +8295,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
 }
 
 void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
-    const ConstrainedFPIntrinsic &FPI) {
+    const IntrinsicInst &FPI) {
   SDLoc sdl = getCurSDLoc();
 
   // We do not need to serialize constrained FP intrinsics against
@@ -8299,7 +8304,13 @@ void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
   SDValue Chain = DAG.getRoot();
   SmallVector<SDValue, 4> Opers;
   Opers.push_back(Chain);
-  for (unsigned I = 0, E = FPI.getNonMetadataArgCount(); I != E; ++I)
+
+  Intrinsic::ID ID = FPI.getIntrinsicID();
+  bool IsLegacy = Intrinsic::isLegacyConstrainedIntrinsic(ID);
+  unsigned NumArgs = IsLegacy ? static_cast<const ConstrainedFPIntrinsic &>(FPI)
+                                    .getNonMetadataArgCount()
+                              : FPI.arg_size();
+  for (unsigned I = 0; I != NumArgs; ++I)
     Opers.push_back(getValue(FPI.getArgOperand(I)));
 
   auto pushOutChain = [this](SDValue Result, fp::ExceptionBehavior EB) {
@@ -8347,6 +8358,8 @@ void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
   case Intrinsic::INTRINSIC:                                                   \
     Opcode = ISD::STRICT_##DAGN;                                               \
     break;
+#define LEGACY_FUNCTION(NAME, NARG, ROUND_MODE, I, DAGN)                       \
+  DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, NAME, DAGN)
 #include "llvm/IR/ConstrainedOps.def"
   case Intrinsic::experimental_constrained_fmuladd: {
     Opcode = ISD::STRICT_FMA;
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
index 3a8dc25e98700e..8c0b8a667357c1 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
@@ -626,7 +626,7 @@ class SelectionDAGBuilder {
                                DebugLoc DbgLoc);
   void visitIntrinsicCall(const CallInst &I, unsigned Intrinsic);
   void visitTargetIntrinsic(const CallInst &I, unsigned Intrinsic);
-  void visitConstrainedFPIntrinsic(const ConstrainedFPIntrinsic &FPI);
+  void visitConstrainedFPIntrinsic(const IntrinsicInst &FPI);
   void visitConvergenceControl(const CallInst &I, unsigned Intrinsic);
   void visitVectorHistogram(const CallInst &I, unsigned IntrinsicID);
   void visitVectorExtractLastActive(const CallInst &I, unsigned Intrinsic);
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index 392cfbdd21273d..4275318a7e0b13 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -791,7 +791,8 @@ void TargetLoweringBase::initActions() {
 
       // Constrained floating-point operations default to expand.
 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
-    setOperationAction(ISD::STRICT_##DAGN, VT, Expand);
+  setOperationAction(ISD::STRICT_##DAGN, VT, Expand);
+#define LEGACY_FUNCTION DAG_INSTRUCTION
 #include "llvm/IR/ConstrainedOps.def"
 
     // For most targets @llvm.get.dynamic.area.offset just returns 0.
diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index 3e30fa3d10ac91..6185e4c5a8958b 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -13,6 +13,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "llvm/IR/AutoUpgrade.h"
+#include "llvm/ADT/StringExtras.h"
 #include "llvm/ADT/StringRef.h"
 #include "llvm/ADT/StringSwitch.h"
 #include "llvm/BinaryFormat/Dwarf.h"
@@ -1193,8 +1194,31 @@ static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn,
           F->getParent(), ID, F->getFunctionType()->getReturnType());
       return true;
     }
-    if (Name.starts_with("experimental.constrained."))
+    if (Name.consume_front("experimental.constrained.")) {
+      Name = Name.take_while(
+          [](char Ch) -> bool { return isAlnum(Ch) || Ch == '_'; });
+      auto [NewID, NumMetadataArgs] = getIntrinsicForConstrained(Name);
+      if (NewID != Intrinsic::not_intrinsic) {
+        auto *OldTy = cast<FunctionType>(F->getFunctionType());
+        SmallVector<Type *, 4> ParamTys;
+        for (unsigned i = 0, e = OldTy->getNumParams() - NumMetadataArgs;
+             i != e; ++i) {
+          ParamTys.push_back(OldTy->getParamType(i));
+        }
+        auto *NewTy =
+            FunctionType::get(OldTy->getReturnType(), ParamTys, false);
+
+        SmallVector<Type *> OverloadTys;
+        bool Success =
+            Intrinsic::getIntrinsicSignature(NewID, NewTy, OverloadTys);
+        (void)Success;
+        assert(Success && "cannot get intrinsic signature");
+
+        NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(), NewID,
+                                              OverloadTys);
+      }
       return true;
+    }
     break; // No other 'e*'.
   case 'f':
     if (Name.starts_with("flt.rounds")) {
@@ -4333,16 +4357,12 @@ static CallBase *upgradeConstrainedIntrinsicCall(CallBase *CB, Function *F,
     return nullptr;
 
   SmallVector<OperandBundleDef, 2> NewBundles;
-
-  auto RM = getRoundingModeArg(*CB);
-  if (RM) {
+  if (auto RM = getRoundingModeArg(*CB)) {
     auto CurrentRM = CB->getRoundingMode();
     assert(!CurrentRM && "unexpected rounding bundle");
     Builder.createFPRoundingBundle(NewBundles, RM);
   }
-
-  auto EB = getExceptionBehaviorArg(*CB);
-  if (EB) {
+  if (auto EB = getExceptionBehaviorArg(*CB)) {
     auto CurrentEB = CB->getExceptionBehavior();
     assert(!CurrentEB && "unexpected exception bundle");
     Builder.createFPExceptionBundle(NewBundles, EB);
@@ -4936,6 +4956,44 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
       MTI->setSourceAlignment(Align->getMaybeAlignValue());
     break;
   }
+#define LEGACY_FUNCTION(NAME, A, R, I, D)                                      \
+  case Intrinsic::NAME:
+#include "llvm/IR/ConstrainedOps.def"
+  {
+    SmallVector<OperandBundleDef, 2> Bundles;
+    unsigned NumMetadataArgs = 0;
+
+    if (auto RM = getRoundingModeArg(*CI)) {
+      auto CurrentRM = CI->getRoundingMode();
+      assert(!CurrentRM && "unexpected rounding bundle");
+      Builder.createFPRoundingBundle(Bundles, RM);
+      ++NumMetadataArgs;
+    }
+
+    if (auto EB = getExceptionBehaviorArg(*CI)) {
+      auto CurrentEB = CI->getExceptionBehavior();
+      assert(!CurrentEB && "unexpected exception bundle");
+      Builder.createFPExceptionBundle(Bundles, EB);
+      ++NumMetadataArgs;
+    }
+
+    SmallVector<Value *, 4> Args(CI->args());
+    Args.pop_back_n(NumMetadataArgs);
+    NewCall = Builder.CreateCall(NewFn, Args, Bundles, CI->getName());
+    NewCall->copyMetadata(*CI);
+    AttributeList Attrs = CI->getAttributes();
+    NewCall->setAttributes(Attrs);
+    if (isa<FPMathOperator>(CI)) {
+      FastMathFlags FMF = CI->getFastMathFlags();
+      NewCall->setFastMathFlags(FMF);
+    }
+
+    MemoryEffects ME = MemoryEffects::inaccessibleMemOnly();
+    auto A = Attribute::getWithMemoryEffects(CI->getContext(), ME);
+    NewCall->addFnAttr(A);
+    NewCall->addFnAttr(Attribute::StrictFP);
+    break;
+  }
   }
   assert(NewCall && "Should have either set this variable or returned through "
                     "the default case");
diff --git a/llvm/lib/IR/Function.cpp b/llvm/lib/IR/Function.cpp
index 9c5dd5aeb92e97..d6c29e27a24d6f 100644
--- a/llvm/lib/IR/Function.cpp
+++ b/llvm/lib/IR/Function.cpp
@@ -554,8 +554,8 @@ static MutableArrayRef<Argument> makeArgArray(Argument *Args, size_t Count) {
   return MutableArrayRef<Argument>(Args, Count);
 }
 
-bool Function::isConstrainedFPIntrinsic() const {
-  return Intrinsic::isConstrainedFPIntrinsic(getIntrinsicID());
+bool Function::isLegacyConstrainedIntrinsic() const {
+  return Intrinsic::isLegacyConstrainedIntrinsic(getIntrinsicID());
 }
 
 void Function::clearArguments() {
diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp
index f763a29e90a97f..6d3c360fdbd59a 100644
--- a/llvm/lib/IR/Instructions.cpp
+++ b/llvm/lib/IR/Instructions.cpp
@@ -622,6 +622,11 @@ std::optional<fp::ExceptionBehavior> CallBase::getExceptionBehavior() const {
   return std::nullopt;
 }
 
+bool CallBase::isConstrained() const {
+  return getOperandBundle(LLVMContext::OB_fpe_control) ||
+         getOperandBundle(LLVMContext::OB_fpe_except);
+}
+
 MemoryEffects CallBase::getMemoryEffects() const {
   MemoryEffects ME = getAttributes().getMemoryEffects();
   if (auto *Fn = dyn_cast<Function>(getCalledOperand())) {
diff --git a/llvm/lib/IR/IntrinsicInst.cpp b/llvm/lib/IR/IntrinsicInst.cpp
index a1f8533fe8773b..262aebc4e94c65 100644
--- a/llvm/lib/IR/IntrinsicInst.cpp
+++ b/llvm/lib/IR/IntrinsicInst.cpp
@@ -21,6 +21,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "llvm/IR/IntrinsicInst.h"
+#include "llvm/ADT/StringExtras.h"
 #include "llvm/ADT/StringSwitch.h"
 #include "llvm/IR/Constants.h"
 #include "llvm/IR/DebugInfoMetadata.h"
@@ -69,6 +70,7 @@ bool IntrinsicInst::mayLowerToFunctionCall(Intrinsic::ID IID) {
 bool IntrinsicInst::canAccessFPEnvironment(Intrinsic::ID IID) {
   switch (IID) {
 #define FUNCTION(NAME, A, R, I) case Intrinsic::NAME:
+#define LEGACY_FUNCTION(NAME, A, R, I, N) case Intrinsic::NAME:
 #include "llvm/IR/ConstrainedOps.def"
     return true;
   default:
@@ -78,6 +80,8 @@ bool IntrinsicInst::canAccessFPEnvironment(Intrinsic::ID IID) {
 
 std::optional<RoundingMode> llvm::getRoundingModeArg(const CallBase &I) {
   unsigned NumOperands = I.arg_size();
+  if (NumOperands <= 2)
+    return std::nullopt;
   Metadata *MD = nullptr;
   auto *MAV = dyn_cast<MetadataAsValue>(I.getArgOperand(NumOperands - 2));
   if (MAV)
@@ -90,6 +94,8 @@ std::optional<RoundingMode> llvm::getRoundingModeArg(const CallBase &I) {
 std::optional<fp::ExceptionBehavior>
 llvm::getExceptionBehaviorArg(const CallBase &I) {
   unsigned NumOperands = I.arg_size();
+  if (NumOperands <= 1)
+    return std::nullopt;
   Metadata *MD = nullptr;
   auto *MAV = dyn_cast<MetadataAsValue>(I.getArgOperand(NumOperands - 1));
   if (MAV)
@@ -99,6 +105,30 @@ llvm::getExceptionBehaviorArg(const CallBase &I) {
   return convertStrToExceptionBehavior(cast<MDString>(MD)->getString());
 }
 
+bool llvm::hadConstrainedVariant(StringRef Name) {
+  size_t period_pos = Name.find('.');
+  if (period_pos != StringRef::npos)
+    Name = Name.take_front(period_pos);
+#define LEGACY_FUNCTION(NAME, A, R, I, D)                                      \
+  if (Name == #NAME)                                                           \
+    return true;
+#include "llvm/IR/ConstrainedOps.def"
+  return false;
+}
+
+std::pair<Intrinsic::ID, unsigned>
+llvm::getIntrinsicForConstrained(StringRef Name) {
+  size_t period_pos = Name.find('.');
+  if (period_pos != StringRef::npos)
+    Name = Name.take_front(period_pos);
+#define LEGACY_FUNCTION(NAME, A, R, I, D)                                      \
+  if (Name == #NAME)                                                           \
+    return std::make_pair(Intrinsic::NAME, 1 + R);
+#include "llvm/IR/ConstrainedOps.def"
+
+  return std::make_pair(Intrinsic::not_intrinsic, 0);
+}
+
 //===----------------------------------------------------------------------===//
 /// DbgVariableIntrinsic - This is the common base class for debug info
 /// intrinsics for variables.
@@ -364,7 +394,7 @@ unsigned ConstrainedFPIntrinsic::getNonMetadataArgCount() const {
 }
 
 bool ConstrainedFPIntrinsic::classof(const IntrinsicInst *I) {
-  return Intrinsic::isConstrainedFPIntrinsic(I->getIntrinsicID());
+  return Intrinsic::isLegacyConstrainedIntrinsic(I->getIntrinsicID());
 }
 
 ElementCount VPIntrinsic::getStaticVectorLength() const {
diff --git a/llvm/lib/IR/Intrinsics.cpp b/llvm/lib/IR/Intrinsics.cpp
index 3130a0bd2955a5..168b98de2fb66f 100644
--- a/llvm/lib/IR/Intrinsics.cpp
+++ b/llvm/lib/IR/Intrinsics.cpp
@@ -741,7 +741,7 @@ Function *Intrinsic::getDeclarationIfExists(Module *M, ID id,
 #include "llvm/IR/IntrinsicImpl.inc"
 #undef GET_LLVM_INTRINSIC_FOR_MS_BUILTIN
 
-bool Intrinsic::isConstrainedFPIntrinsic(ID QID) {
+bool Intrinsic::isLegacyConstrainedIntrinsic(ID QID) {
   switch (QID) {
 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC)                         \
   case Intrinsic::INTRINSIC:
diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp
index cdc3f0308fe59c..238ce74f697805 100644
--- a/llvm/lib/Transforms/Utils/Local.cpp
+++ b/llvm/lib/Transforms/Utils/Local.cpp
@@ -514,10 +514,9 @@ bool llvm::wouldInstructionBeTriviallyDead(const Instruction *I,
       return false;
     }
 
-    if (auto *FPI = dyn_cast<ConstrainedFPIntrinsic>(I)) {
-      std::optional<fp::ExceptionBehavior> ExBehavior =
-          FPI->getExceptionBehavior();
-      return *ExBehavior != fp::ebStrict;
+    if (auto *Call = dyn_cast<CallBase>(I)) {
+      if (auto EB = Call->getExceptionBehavior())
+        return *EB != fp::ebStrict;
     }
   }
 
diff --git a/llvm/test/Assembler/fp-intrinsics-attr.ll b/llvm/test/Assembler/fp-intrinsics-attr.ll
index 5b9a44710763e4..176c900465c3c9 100644
--- a/llvm/test/Assembler/fp-intrinsics-attr.ll
+++ b/llvm/test/Assembler/fp-intrinsics-attr.ll
@@ -215,9 +215,7 @@ define void @func(double %a, double %b, double %c, i32 %i) strictfp {
                                                double %a,
                                                metadata !"fpexcept.strict")
 
-  %trunc = call double @llvm.experimental.constrained.trunc.f64(
-                                               double %a,
-                                               metadata !"fpexcept.strict")
+  %trunc = call double @llvm.trunc.f64(double %a) strictfp [ "fpe.except"(metadata !"strict") ]
 
   %q1 = call i1 @llvm.experimental.constrained.fcmp.f64(
                                                double %a, double %b,
@@ -368,15 +366,15 @@ declare double @llvm.experimental.constrained.round.f64(double, metadata)
 declare double @llvm.experimental.constrained.roundeven.f64(double, metadata)
 ; CHECK: @llvm.experimental.constrained.roundeven.f64({{.*}}) #[[ATTR1]]
 
-declare double @llvm.experimental.constrained.trunc.f64(double, metadata)
-; CHECK: @llvm.experimental.constrained.trunc.f64({{.*}}) #[[ATTR1]]
-
 declare i1 @llvm.experimental.constrained.fcmp.f64(double, double, metadata, metadata)
 ; CHECK: @llvm.experimental.constrained.fcmp.f64({{.*}}) #[[ATTR1]]
 
 declare i1 @llvm.experimental.constrained.fcmps.f64(double, double, metadata, metadata)
 ; CHECK: @llvm.experimental.constrained.fcmps.f64({{.*}}) #[[ATTR1]]
 
+declare double @llvm.trunc.f64(double)
+; CHECK: declare double @llvm.trunc.f64(double) #[[ATTR2:[0-9]+]]
+
 ; CHECK: attributes #[[ATTR0]] = {{{.*}} strictfp {{.*}}}
 ; CHECK: attributes #[[ATTR1]] = { {{.*}} strictfp {{.*}} }
-
+; CHECK: attributes #[[ATTR2]] = { {{.*}} memory(none) }
diff --git a/llvm/test/Bitcode/auto-upgrade-constrained.ll b/llvm/test/Bitcode/auto-upgrade-constrained.ll
index 8e3f2c4ad77896..b857fa1f4a6860 100644
--- a/llvm/test/Bitcode/auto-upgrade-constrained.ll
+++ b/llvm/test/Bitcode/auto-upgrade-constrained.ll
@@ -322,6 +322,6 @@ define float @test_trunc(float %a) strictfp {
   ret float %res
 }
 ; CHECK-LABEL: define float @test_trunc(
-; CHECK: call float @llvm.experimental.constrained.trunc.f32(float {{.*}}, metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.except"(metadata !"strict") ]
+; CHECK: call float @llvm.trunc.f32(float {{.*}}) #[[ATTR0]] [ "fpe.except"(metadata !"strict") ]
 
 ; CHECK: attributes #[[ATTR0]] = { strictfp memory(inaccessiblemem: readwrite) }
diff --git a/llvm/test/CodeGen/AArch64/fp-intrinsics-fp16.ll b/llvm/test/CodeGen/AArch64/fp-intrinsics-fp16.ll
index d323a7e677b5aa..742d76638409b8 100644
--- a/llvm/test/CodeGen/AArch64/fp-intrinsics-fp16.ll
+++ b/llvm/test/CodeGen/AArch64/fp-intrinsics-fp16.ll
@@ -830,7 +830,7 @@ define half @trunc_f16(half %x) #0 {
 ; CHECK-FP16:       // %bb.0:
 ; CHECK-FP16-NEXT:    frintz h0, h0
 ; CHECK-FP16-NEXT:    ret
-  %val = call half @llvm.experimental.constrained.trunc.f16(half %x, metadata !"fpexcept.strict") #0
+  %val = call half @llvm.trunc.f16(half %x) #0 [ "fpe.except"(metadata !"strict") ]
   ret half %val
 }
 
@@ -1376,7 +1376,6 @@ declare i32 @llvm.experimental.constrained.lround.i32.f16(half, metadata)
 declare i64 @llvm.experimental.constrained.llround.i64.f16(half, metadata)
 declare half @llvm.experimental.constrained.round.f16(half, metadata)
 declare half @llvm.experimental.constrained.roundeven.f16(half, metadata)
-declare half @llvm.experimental.constrained.trunc.f16(half, metadata)
 declare i1 @llvm.experimental.constrained.fcmps.f16(half, half, metadata, metadata)
 declare i1 @llvm.experimental.constrained.fcmp.f16(half, half, metadata, metadata)
 
diff --git a/llvm/test/CodeGen/AArch64/fp-intrinsics-vector.ll b/llvm/test/CodeGen/AArch64/fp-intrinsics-vector.ll
index 83e60c10897624..a922a39ee2da3d 100644
--- a/llvm/test/CodeGen/AArch64/fp-intrinsics-vector.ll
+++ b/llvm/test/CodeGen/AArch64/fp-intrinsics-vector.ll
@@ -300,7 +300,7 @@ define <4 x float> @trunc_v4f32(<4 x float> %x) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    frintz v0.4s, v0.4s
 ; CHECK-NEXT:    ret
-  %val = call <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
+  %val = call <4 x float> @llvm.trunc.v4f32(<4 x float> %x) #0 [ "fpe.except"(metadata !"strict") ]
   ret <4 x float> %val
 }
 
@@ -571,7 +571,7 @@ define <2 x double> @trunc_v2f64(<2 x double> %x) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    frintz v0.2d, v0.2d
 ; CHECK-NEXT:    ret
-  %val = call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
+  %val = call <2 x double> @llvm.trunc.v2f64(<2 x double> %x) #0 [ "fpe.except"(metadata !"strict") ]
   ret <2 x double> %val
 }
 
@@ -829,7 +829,7 @@ define <1 x double> @trunc_v1f64(<1 x double> %x) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    frintz d0, d0
 ; CHECK-NEXT:    ret
-  %val = call <1 x double> @llvm.experimental.constrained.trunc.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
+  %val = call <1 x double> @llvm.trunc.v1f64(<1 x double> %x) #0 [ "fpe.except"(metadata !"strict") ]
   ret <1 x double> %val
 }
 
@@ -901,7 +901,6 @@ declare <4 x float> @llvm.experimental.constrained.ceil.v4f32(<4 x float>, metad
 declare <4 x float> @llvm.experimental.constrained.floor.v4f32(<4 x float>, metadata)
 declare <4 x float> @llvm.experimental.constrained.round.v4f32(<4 x float>, metadata)
 declare <4 x float> @llvm.experimental.constrained.roundeven.v4f32(<4 x float>, metadata)
-declare <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float>, metadata)
 declare <4 x i1> @llvm.experimental.constrained.fcmp.v4f32(<4 x float>, <4 x float>, metadata, metadata)
 declare <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float>, <4 x float>, metadata, metadata)
 
@@ -927,7 +926,6 @@ declare <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double>, met
 declare <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double>, metadata)
 declare <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double>, metadata)
 declare <2 x double> @llvm.experimental.constrained.roundeven.v2f64(<2 x double>, metadata)
-declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata)
 declare <2 x i1> @llvm.experimental.constrained.fcmp.v2f64(<2 x double>, <2 x double>, metadata, metadata)
 declare <2 x i1> @llvm.experimental.constrained.fcmps.v2f64(<2 x double>, <2 x double>, metadata, metadata)
 
@@ -953,7 +951,6 @@ declare <1 x double> @llvm.experimental.constrained.ceil.v1f64(<1 x double>, met
 declare <1 x double> @llvm.experimental.constrained.floor.v1f64(<1 x double>, metadata)
 declare <1 x double> @llvm.experimental.constrained.round.v1f64(<1 x double>, metadata)
 declare <1 x double> @llvm.experimental.constrained.roundeven.v1f64(<1 x double>, metadata)
-declare <1 x double> @llvm.experimental.constrained.trunc.v1f64(<1 x double>, metadata)
 declare <1 x i1> @llvm.experimental.constrained.fcmp.v1f64(<1 x double>, <1 x double>, metadata, metadata)
 declare <1 x i1> @llvm.experimental.constrained.fcmps.v1f64(<1 x double>, <1 x double>, metadata, metadata)
 
diff --git a/llvm/test/CodeGen/AArch64/fp-intrinsics.ll b/llvm/test/CodeGen/AArch64/fp-intrinsics.ll
index f2a14a9b73fa16..539ac690719846 100644
--- a/llvm/test/CodeGen/AArch64/fp-intrinsics.ll
+++ b/llvm/test/CodeGen/AArch64/fp-intrinsics.ll
@@ -765,7 +765,7 @@ define float @trunc_f32(float %x) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    frintz s0, s0
 ; CHECK-NEXT:    ret
-  %val = call float @llvm.experimental.constrained.trunc.f32(float %x, metadata !"fpexcept.strict") #0
+  %val = call float @llvm.trunc.f32(float %x) #0 [ "fpe.except"(metadata !"strict") ]
   ret float %val
 }
 
@@ -1559,7 +1559,7 @@ define double @trunc_f64(double %x) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    frintz d0, d0
 ; CHECK-NEXT:    ret
-  %val = call double @llvm.experimental.constrained.trunc.f64(double %x, metadata !"fpexcept.strict") #0
+  %val = call double @llvm.trunc.f64(double %x) #0 [ "fpe.except"(metadata !"strict") ]
   ret double %val
 }
 
@@ -2428,7 +2428,7 @@ define fp128 @trunc_f128(fp128 %x) #0 {
 ; CHECK-NEXT:    bl truncl
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
-  %val = call fp128 @llvm.experimental.constrained.trunc.f128(fp128 %x, metadata !"fpexcept.strict") #0
+  %val = call fp128 @llvm.trunc.f128(fp128 %x) #0 [ "fpe.except"(metadata !"strict") ]
   ret fp128 %val
 }
 
@@ -3179,7 +3179,6 @@ declare i32 @llvm.experimental.constrained.lround.i32.f32(float, metadata)
 declare i64 @llvm.experimental.constrained.llround.i64.f32(float, metadata)
 declare float @llvm.experimental.constrained.round.f32(float, metadata)
 declare float @llvm.experimental.constrained.roundeven.f32(float, metadata)
-declare float @llvm.experimental.constrained.trunc.f32(float, metadata)
 declare i1 @llvm.experimental.constrained.fcmps.f32(float, float, metadata, metadata)
 declare i1 @llvm.experimental.constrained.fcmp.f32(float, float, metadata, metadata)
 
@@ -3231,7 +3230,6 @@ declare i32 @llvm.experimental.constrained.lround.i32.f64(double, metadata)
 declare i64 @llvm.experimental.constrained.llround.i64.f64(double, metadata)
 declare double @llvm.experimental.constrained.round.f64(double, metadata)
 declare double @llvm.experimental.constrained.roundeven.f64(double, metadata)
-declare double @llvm.experimental.constrained.trunc.f64(double, metadata)
 declare i1 @llvm.experimental.constrained.fcmps.f64(double, double, metadata, metadata)
 declare i1 @llvm.experimental.constrained.fcmp.f64(double, double, metadata, metadata)
 
@@ -3280,7 +3278,6 @@ declare fp128 @llvm.experimental.constrained.floor.f128(fp128, metadata)
 declare i32 @llvm.experimental.constrained.lround.i32.f128(fp128, metadata)
 declare i64 @llvm.experimental.constrained.llround.i64.f128(fp128, metadata)
 declare fp128 @llvm.experimental.constrained.round.f128(fp128, metadata)
-declare fp128 @llvm.experimental.constrained.trunc.f128(fp128, metadata)
 declare i1 @llvm.experimental.constrained.fcmps.f128(fp128, fp128, metadata, metadata)
 declare i1 @llvm.experimental.constrained.fcmp.f128(fp128, fp128, metadata, metadata)
 
diff --git a/llvm/test/CodeGen/ARM/fp-intrinsics.ll b/llvm/test/CodeGen/ARM/fp-intrinsics.ll
index 93b6a58a22b6ce..797ad8d3734eb0 100644
--- a/llvm/test/CodeGen/ARM/fp-intrinsics.ll
+++ b/llvm/test/CodeGen/ARM/fp-intrinsics.ll
@@ -291,7 +291,7 @@ define float @round_f32(float %x) #0 {
 ; CHECK-SP-NOV8: bl truncf
 ; CHECK-SP-V8: vrintz.f32
 define float @trunc_f32(float %x) #0 {
-  %val = call float @llvm.experimental.constrained.trunc.f32(float %x, metadata !"fpexcept.strict") #0
+  %val = call float @llvm.trunc.f32(float %x) #0 [ "fpe.except"(metadata !"strict") ]
   ret float %val
 }
 
@@ -762,7 +762,7 @@ define double @round_f64(double %x) #0 {
 ; CHECK-DP-NOV8: bl trunc
 ; CHECK-DP-V8: vrintz.f64
 define double @trunc_f64(double %x) #0 {
-  %val = call double @llvm.experimental.constrained.trunc.f64(double %x, metadata !"fpexcept.strict") #0
+  %val = call double @llvm.trunc.f64(double %x) #0 [ "fpe.except"(metadata !"strict") ]
   ret double %val
 }
 
diff --git a/llvm/test/CodeGen/PowerPC/fp-strict-round.ll b/llvm/test/CodeGen/PowerPC/fp-strict-round.ll
index eac4fb6f98bf7d..379e2d7e9df9be 100644
--- a/llvm/test/CodeGen/PowerPC/fp-strict-round.ll
+++ b/llvm/test/CodeGen/PowerPC/fp-strict-round.ll
@@ -34,11 +34,6 @@ declare double @llvm.experimental.constrained.round.f64(double, metadata)
 declare <4 x float> @llvm.experimental.constrained.round.v4f32(<4 x float>, metadata)
 declare <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double>, metadata)
 
-declare float @llvm.experimental.constrained.trunc.f32(float, metadata)
-declare double @llvm.experimental.constrained.trunc.f64(double, metadata)
-declare <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float>, metadata)
-declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata)
-
 define float @ceil_f32(float %f1) strictfp {
 ; P8-LABEL: ceil_f32:
 ; P8:       # %bb.0:
@@ -567,9 +562,7 @@ define float @trunc_f32(float %f1) strictfp {
 ; P9:       # %bb.0:
 ; P9-NEXT:    xsrdpiz f1, f1
 ; P9-NEXT:    blr
-  %res = call float @llvm.experimental.constrained.trunc.f32(
-                        float %f1,
-                        metadata !"fpexcept.strict")
+  %res = call float @llvm.trunc.f32(float %f1) [ "fpe.except"(metadata !"strict") ]
   ret float %res
 }
 
@@ -583,9 +576,7 @@ define double @trunc_f64(double %f1) strictfp {
 ; P9:       # %bb.0:
 ; P9-NEXT:    xsrdpiz f1, f1
 ; P9-NEXT:    blr
-  %res = call double @llvm.experimental.constrained.trunc.f64(
-                        double %f1,
-                        metadata !"fpexcept.strict")
+  %res = call double @llvm.trunc.f64(double %f1) [ "fpe.except"(metadata !"strict") ] 
   ret double %res
 }
 
@@ -599,9 +590,7 @@ define <4 x float> @trunc_v4f32(<4 x float> %vf1) strictfp {
 ; P9:       # %bb.0:
 ; P9-NEXT:    xvrspiz v2, v2
 ; P9-NEXT:    blr
-  %res = call <4 x float> @llvm.experimental.constrained.trunc.v4f32(
-                        <4 x float> %vf1,
-                        metadata !"fpexcept.strict")
+  %res = call <4 x float> @llvm.trunc.v4f32(<4 x float> %vf1) [ "fpe.except"(metadata !"strict") ]
   ret <4 x float> %res
 }
 
@@ -615,8 +604,6 @@ define <2 x double> @trunc_v2f64(<2 x double> %vf1) strictfp {
 ; P9:       # %bb.0:
 ; P9-NEXT:    xvrdpiz v2, v2
 ; P9-NEXT:    blr
-  %res = call <2 x double> @llvm.experimental.constrained.trunc.v2f64(
-                        <2 x double> %vf1,
-                        metadata !"fpexcept.strict")
+  %res = call <2 x double> @llvm.trunc.v2f64(<2 x double> %vf1) [ "fpe.except"(metadata !"strict") ]
   ret <2 x double> %res
 }
diff --git a/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll
index c1ee436a40c557..55f26d099d59f0 100644
--- a/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll
+++ b/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll
@@ -1061,9 +1061,7 @@ define ppc_fp128 @test_trunc_ppc_fp128(ppc_fp128 %first) #0 {
 ; PC64-NEXT:    mtlr 0
 ; PC64-NEXT:    blr
 entry:
-  %trunc = call ppc_fp128 @llvm.experimental.constrained.trunc.ppcf128(
-                    ppc_fp128 %first,
-                    metadata !"fpexcept.strict") #1
+  %trunc = call ppc_fp128 @llvm.trunc.ppcf128(ppc_fp128 %first) #1 [ "fpe.except"(metadata !"strict") ]
   ret ppc_fp128 %trunc
 }
 
@@ -2187,7 +2185,6 @@ declare ppc_fp128 @llvm.experimental.constrained.sqrt.ppcf128(ppc_fp128, metadat
 declare ppc_fp128 @llvm.experimental.constrained.fsub.ppcf128(ppc_fp128, ppc_fp128, metadata, metadata)
 declare ppc_fp128 @llvm.experimental.constrained.tan.ppcf128(ppc_fp128, metadata, metadata)
 declare ppc_fp128 @llvm.experimental.constrained.atan2.ppcf128(ppc_fp128, ppc_fp128, metadata, metadata)
-declare ppc_fp128 @llvm.experimental.constrained.trunc.ppcf128(ppc_fp128, metadata)
 declare i64 @llvm.experimental.constrained.fptosi.i64.ppcf128(ppc_fp128, metadata)
 declare i32 @llvm.experimental.constrained.fptosi.i32.ppcf128(ppc_fp128, metadata)
 declare i1 @llvm.experimental.constrained.fptosi.i1.ppcf128(ppc_fp128, metadata)
diff --git a/llvm/test/CodeGen/PowerPC/vector-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/PowerPC/vector-constrained-fp-intrinsics.ll
index 71c3069a406fe3..f18512347c98cc 100644
--- a/llvm/test/CodeGen/PowerPC/vector-constrained-fp-intrinsics.ll
+++ b/llvm/test/CodeGen/PowerPC/vector-constrained-fp-intrinsics.ll
@@ -6767,9 +6767,7 @@ define <1 x float> @constrained_vector_trunc_v1f32(<1 x float> %x) #0 {
 ; PC64LE9-NEXT:    xsrdpiz 1, 1
 ; PC64LE9-NEXT:    blr
 entry:
-  %trunc = call <1 x float> @llvm.experimental.constrained.trunc.v1f32(
-                               <1 x float> %x,
-                               metadata !"fpexcept.strict") #1
+  %trunc = call <1 x float> @llvm.trunc.v1f32(<1 x float> %x) #1 [ "fpe.except"(metadata !"strict") ]
   ret <1 x float> %trunc
 }
 
@@ -6784,9 +6782,7 @@ define <2 x double> @constrained_vector_trunc_v2f64(<2 x double> %x) #0 {
 ; PC64LE9-NEXT:    xvrdpiz 34, 34
 ; PC64LE9-NEXT:    blr
 entry:
-  %trunc = call <2 x double> @llvm.experimental.constrained.trunc.v2f64(
-                                <2 x double> %x,
-                                metadata !"fpexcept.strict") #1
+  %trunc = call <2 x double> @llvm.trunc.v2f64(<2 x double> %x) #1 [ "fpe.except"(metadata !"strict") ]
   ret <2 x double> %trunc
 }
 
@@ -6834,9 +6830,7 @@ define <3 x float> @constrained_vector_trunc_v3f32(<3 x float> %x) #0 {
 ; PC64LE9-NEXT:    xxperm 34, 35, 1
 ; PC64LE9-NEXT:    blr
 entry:
-  %trunc = call <3 x float> @llvm.experimental.constrained.trunc.v3f32(
-                              <3 x float> %x,
-                              metadata !"fpexcept.strict") #1
+  %trunc = call <3 x float> @llvm.trunc.v3f32(<3 x float> %x) #1 [ "fpe.except"(metadata !"strict") ]
   ret <3 x float> %trunc
 }
 
@@ -6857,9 +6851,7 @@ define <3 x double> @constrained_vector_trunc_v3f64(<3 x double> %x) #0 {
 ; PC64LE9-NEXT:    xxswapd 1, 2
 ; PC64LE9-NEXT:    blr
 entry:
-  %trunc = call <3 x double> @llvm.experimental.constrained.trunc.v3f64(
-                          <3 x double> %x,
-                          metadata !"fpexcept.strict") #1
+  %trunc = call <3 x double> @llvm.trunc.v3f64(<3 x double> %x) #1 [ "fpe.except"(metadata !"strict") ]
   ret <3 x double> %trunc
 }
 
@@ -8785,7 +8777,6 @@ declare <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f32(<2 x float
 declare <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double>, metadata)
 declare <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double>, metadata)
 declare <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double>, metadata)
-declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata)
 declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i16(<2 x i16>, metadata, metadata)
 declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i16(<2 x i16>, metadata, metadata)
 declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i32(<2 x i32>, metadata, metadata)
@@ -8832,7 +8823,6 @@ declare <1 x double> @llvm.experimental.constrained.fpext.v1f64.v1f32(<1 x float
 declare <1 x float> @llvm.experimental.constrained.ceil.v1f32(<1 x float>, metadata)
 declare <1 x float> @llvm.experimental.constrained.floor.v1f32(<1 x float>, metadata)
 declare <1 x float> @llvm.experimental.constrained.round.v1f32(<1 x float>, metadata)
-declare <1 x float> @llvm.experimental.constrained.trunc.v1f32(<1 x float>, metadata)
 declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i32(<1 x i32>, metadata, metadata)
 declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i32(<1 x i32>, metadata, metadata)
 declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i64(<1 x i64>, metadata, metadata)
@@ -8901,8 +8891,6 @@ declare <3 x float> @llvm.experimental.constrained.floor.v3f32(<3 x float>, meta
 declare <3 x double> @llvm.experimental.constrained.floor.v3f64(<3 x double>, metadata)
 declare <3 x float> @llvm.experimental.constrained.round.v3f32(<3 x float>, metadata)
 declare <3 x double> @llvm.experimental.constrained.round.v3f64(<3 x double>, metadata)
-declare <3 x float> @llvm.experimental.constrained.trunc.v3f32(<3 x float>, metadata)
-declare <3 x double> @llvm.experimental.constrained.trunc.v3f64(<3 x double>, metadata)
 declare <3 x double> @llvm.experimental.constrained.sitofp.v3f64.v3i32(<3 x i32>, metadata, metadata)
 declare <3 x float> @llvm.experimental.constrained.sitofp.v3f32.v3i32(<3 x i32>, metadata, metadata)
 declare <3 x double> @llvm.experimental.constrained.sitofp.v3f64.v3i64(<3 x i64>, metadata, metadata)
@@ -8947,7 +8935,6 @@ declare <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f32(<4 x float
 declare <4 x double> @llvm.experimental.constrained.ceil.v4f64(<4 x double>, metadata)
 declare <4 x double> @llvm.experimental.constrained.floor.v4f64(<4 x double>, metadata)
 declare <4 x double> @llvm.experimental.constrained.round.v4f64(<4 x double>, metadata)
-declare <4 x double> @llvm.experimental.constrained.trunc.v4f64(<4 x double>, metadata)
 declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i16(<4 x i16>, metadata, metadata)
 declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i32(<4 x i32>, metadata, metadata)
 declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i32(<4 x i32>, metadata, metadata)
diff --git a/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll
index fddb86de58f518..6da0e5c482ed57 100644
--- a/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll
+++ b/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll
@@ -1552,8 +1552,6 @@ define double @ceil_f64(double %a) nounwind strictfp {
   ret double %1
 }
 
-declare double @llvm.experimental.constrained.trunc.f64(double, metadata)
-
 define double @trunc_f64(double %a) nounwind strictfp {
 ; RV32IFD-LABEL: trunc_f64:
 ; RV32IFD:       # %bb.0:
@@ -1608,7 +1606,7 @@ define double @trunc_f64(double %a) nounwind strictfp {
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-  %1 = call double @llvm.experimental.constrained.trunc.f64(double %a, metadata !"fpexcept.strict") strictfp
+  %1 = call double @llvm.trunc.f64(double %a) strictfp [ "fpe.except"(metadata !"strict") ]
   ret double %1
 }
 
diff --git a/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll
index 8b883f781c9d9b..63f84b55233984 100644
--- a/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll
+++ b/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll
@@ -1517,8 +1517,6 @@ define float @ceil_f32(float %a) nounwind strictfp {
   ret float %1
 }
 
-declare float @llvm.experimental.constrained.trunc.f32(float, metadata)
-
 define float @trunc_f32(float %a) nounwind strictfp {
 ; RV32IF-LABEL: trunc_f32:
 ; RV32IF:       # %bb.0:
@@ -1573,7 +1571,7 @@ define float @trunc_f32(float %a) nounwind strictfp {
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-  %1 = call float @llvm.experimental.constrained.trunc.f32(float %a, metadata !"fpexcept.strict") strictfp
+  %1 = call float @llvm.trunc.f32(float %a) strictfp [ "fpe.except"(metadata !"strict") ]
   ret float %1
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ftrunc-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ftrunc-constrained-sdnode.ll
index 2173887e854178..a5641d47e51fea 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ftrunc-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ftrunc-constrained-sdnode.ll
@@ -20,10 +20,9 @@ define <1 x half> @trunc_v1f16(<1 x half> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <1 x half> @llvm.experimental.constrained.trunc.v1f16(<1 x half> %x, metadata !"fpexcept.strict")
+  %a = call <1 x half> @llvm.trunc.v1f16(<1 x half> %x) [ "fpe.except"(metadata !"strict") ]
   ret <1 x half> %a
 }
-declare <1 x half> @llvm.experimental.constrained.trunc.v1f16(<1 x half>, metadata)
 
 define <2 x half> @trunc_v2f16(<2 x half> %x) strictfp {
 ; CHECK-LABEL: trunc_v2f16:
@@ -41,10 +40,9 @@ define <2 x half> @trunc_v2f16(<2 x half> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <2 x half> @llvm.experimental.constrained.trunc.v2f16(<2 x half> %x, metadata !"fpexcept.strict")
+  %a = call <2 x half> @llvm.trunc.v2f16(<2 x half> %x) [ "fpe.except"(metadata !"strict") ]
   ret <2 x half> %a
 }
-declare <2 x half> @llvm.experimental.constrained.trunc.v2f16(<2 x half>, metadata)
 
 define <4 x half> @trunc_v4f16(<4 x half> %x) strictfp {
 ; CHECK-LABEL: trunc_v4f16:
@@ -62,10 +60,9 @@ define <4 x half> @trunc_v4f16(<4 x half> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <4 x half> @llvm.experimental.constrained.trunc.v4f16(<4 x half> %x, metadata !"fpexcept.strict")
+  %a = call <4 x half> @llvm.trunc.v4f16(<4 x half> %x) [ "fpe.except"(metadata !"strict") ]
   ret <4 x half> %a
 }
-declare <4 x half> @llvm.experimental.constrained.trunc.v4f16(<4 x half>, metadata)
 
 define <8 x half> @trunc_v8f16(<8 x half> %x) strictfp {
 ; CHECK-LABEL: trunc_v8f16:
@@ -83,10 +80,9 @@ define <8 x half> @trunc_v8f16(<8 x half> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <8 x half> @llvm.experimental.constrained.trunc.v8f16(<8 x half> %x, metadata !"fpexcept.strict")
+  %a = call <8 x half> @llvm.trunc.v8f16(<8 x half> %x) [ "fpe.except"(metadata !"strict") ]
   ret <8 x half> %a
 }
-declare <8 x half> @llvm.experimental.constrained.trunc.v8f16(<8 x half>, metadata)
 
 define <16 x half> @trunc_v16f16(<16 x half> %x) strictfp {
 ; CHECK-LABEL: trunc_v16f16:
@@ -104,10 +100,9 @@ define <16 x half> @trunc_v16f16(<16 x half> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <16 x half> @llvm.experimental.constrained.trunc.v16f16(<16 x half> %x, metadata !"fpexcept.strict")
+  %a = call <16 x half> @llvm.trunc.v16f16(<16 x half> %x) [ "fpe.except"(metadata !"strict") ]
   ret <16 x half> %a
 }
-declare <16 x half> @llvm.experimental.constrained.trunc.v16f16(<16 x half>, metadata)
 
 define <32 x half> @trunc_v32f16(<32 x half> %x) strictfp {
 ; CHECK-LABEL: trunc_v32f16:
@@ -126,10 +121,9 @@ define <32 x half> @trunc_v32f16(<32 x half> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <32 x half> @llvm.experimental.constrained.trunc.v32f16(<32 x half> %x, metadata !"fpexcept.strict")
+  %a = call <32 x half> @llvm.trunc.v32f16(<32 x half> %x) [ "fpe.except"(metadata !"strict") ]
   ret <32 x half> %a
 }
-declare <32 x half> @llvm.experimental.constrained.trunc.v32f16(<32 x half>, metadata)
 
 define <1 x float> @trunc_v1f32(<1 x float> %x) strictfp {
 ; CHECK-LABEL: trunc_v1f32:
@@ -147,10 +141,9 @@ define <1 x float> @trunc_v1f32(<1 x float> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <1 x float> @llvm.experimental.constrained.trunc.v1f32(<1 x float> %x, metadata !"fpexcept.strict")
+  %a = call <1 x float> @llvm.trunc.v1f32(<1 x float> %x) [ "fpe.except"(metadata !"strict") ]
   ret <1 x float> %a
 }
-declare <1 x float> @llvm.experimental.constrained.trunc.v1f32(<1 x float>, metadata)
 
 define <2 x float> @trunc_v2f32(<2 x float> %x) strictfp {
 ; CHECK-LABEL: trunc_v2f32:
@@ -168,10 +161,9 @@ define <2 x float> @trunc_v2f32(<2 x float> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <2 x float> @llvm.experimental.constrained.trunc.v2f32(<2 x float> %x, metadata !"fpexcept.strict")
+  %a = call <2 x float> @llvm.trunc.v2f32(<2 x float> %x) [ "fpe.except"(metadata !"strict") ]
   ret <2 x float> %a
 }
-declare <2 x float> @llvm.experimental.constrained.trunc.v2f32(<2 x float>, metadata)
 
 define <4 x float> @trunc_v4f32(<4 x float> %x) strictfp {
 ; CHECK-LABEL: trunc_v4f32:
@@ -189,10 +181,9 @@ define <4 x float> @trunc_v4f32(<4 x float> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float> %x, metadata !"fpexcept.strict")
+  %a = call <4 x float> @llvm.trunc.v4f32(<4 x float> %x) [ "fpe.except"(metadata !"strict") ]
   ret <4 x float> %a
 }
-declare <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float>, metadata)
 
 define <8 x float> @trunc_v8f32(<8 x float> %x) strictfp {
 ; CHECK-LABEL: trunc_v8f32:
@@ -210,10 +201,9 @@ define <8 x float> @trunc_v8f32(<8 x float> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <8 x float> @llvm.experimental.constrained.trunc.v8f32(<8 x float> %x, metadata !"fpexcept.strict")
+  %a = call <8 x float> @llvm.trunc.v8f32(<8 x float> %x) [ "fpe.except"(metadata !"strict") ]
   ret <8 x float> %a
 }
-declare <8 x float> @llvm.experimental.constrained.trunc.v8f32(<8 x float>, metadata)
 
 define <16 x float> @trunc_v16f32(<16 x float> %x) strictfp {
 ; CHECK-LABEL: trunc_v16f32:
@@ -231,10 +221,9 @@ define <16 x float> @trunc_v16f32(<16 x float> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <16 x float> @llvm.experimental.constrained.trunc.v16f32(<16 x float> %x, metadata !"fpexcept.strict")
+  %a = call <16 x float> @llvm.trunc.v16f32(<16 x float> %x) [ "fpe.except"(metadata !"strict") ]
   ret <16 x float> %a
 }
-declare <16 x float> @llvm.experimental.constrained.trunc.v16f32(<16 x float>, metadata)
 
 define <1 x double> @trunc_v1f64(<1 x double> %x) strictfp {
 ; CHECK-LABEL: trunc_v1f64:
@@ -252,10 +241,9 @@ define <1 x double> @trunc_v1f64(<1 x double> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <1 x double> @llvm.experimental.constrained.trunc.v1f64(<1 x double> %x, metadata !"fpexcept.strict")
+  %a = call <1 x double> @llvm.trunc.v1f64(<1 x double> %x) [ "fpe.except"(metadata !"strict") ]
   ret <1 x double> %a
 }
-declare <1 x double> @llvm.experimental.constrained.trunc.v1f64(<1 x double>, metadata)
 
 define <2 x double> @trunc_v2f64(<2 x double> %x) strictfp {
 ; CHECK-LABEL: trunc_v2f64:
@@ -273,10 +261,9 @@ define <2 x double> @trunc_v2f64(<2 x double> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %x, metadata !"fpexcept.strict")
+  %a = call <2 x double> @llvm.trunc.v2f64(<2 x double> %x) [ "fpe.except"(metadata !"strict") ]
   ret <2 x double> %a
 }
-declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata)
 
 define <4 x double> @trunc_v4f64(<4 x double> %x) strictfp {
 ; CHECK-LABEL: trunc_v4f64:
@@ -294,10 +281,9 @@ define <4 x double> @trunc_v4f64(<4 x double> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <4 x double> @llvm.experimental.constrained.trunc.v4f64(<4 x double> %x, metadata !"fpexcept.strict")
+  %a = call <4 x double> @llvm.trunc.v4f64(<4 x double> %x) [ "fpe.except"(metadata !"strict") ]
   ret <4 x double> %a
 }
-declare <4 x double> @llvm.experimental.constrained.trunc.v4f64(<4 x double>, metadata)
 
 define <8 x double> @trunc_v8f64(<8 x double> %x) strictfp {
 ; CHECK-LABEL: trunc_v8f64:
@@ -315,7 +301,6 @@ define <8 x double> @trunc_v8f64(<8 x double> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <8 x double> @llvm.experimental.constrained.trunc.v8f64(<8 x double> %x, metadata !"fpexcept.strict")
+  %a = call <8 x double> @llvm.trunc.v8f64(<8 x double> %x) [ "fpe.except"(metadata !"strict") ]
   ret <8 x double> %a
 }
-declare <8 x double> @llvm.experimental.constrained.trunc.v8f64(<8 x double>, metadata)
diff --git a/llvm/test/CodeGen/RISCV/rvv/ftrunc-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ftrunc-constrained-sdnode.ll
index 8a5f118d8f6acc..d1ace747e043e3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ftrunc-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ftrunc-constrained-sdnode.ll
@@ -20,10 +20,9 @@ define <vscale x 1 x half> @trunc_nxv1f16(<vscale x 1 x half> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 1 x half> @llvm.experimental.constrained.trunc.nxv1f16(<vscale x 1 x half> %x, metadata !"fpexcept.strict")
+  %a = call <vscale x 1 x half> @llvm.trunc.nxv1f16(<vscale x 1 x half> %x) [ "fpe.except"(metadata !"strict") ]
   ret <vscale x 1 x half> %a
 }
-declare <vscale x 1 x half> @llvm.experimental.constrained.trunc.nxv1f16(<vscale x 1 x half>, metadata)
 
 define <vscale x 2 x half> @trunc_nxv2f16(<vscale x 2 x half> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv2f16:
@@ -41,10 +40,9 @@ define <vscale x 2 x half> @trunc_nxv2f16(<vscale x 2 x half> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x half> @llvm.experimental.constrained.trunc.nxv2f16(<vscale x 2 x half> %x, metadata !"fpexcept.strict")
+  %a = call <vscale x 2 x half> @llvm.trunc.nxv2f16(<vscale x 2 x half> %x) [ "fpe.except"(metadata !"strict") ]
   ret <vscale x 2 x half> %a
 }
-declare <vscale x 2 x half> @llvm.experimental.constrained.trunc.nxv2f16(<vscale x 2 x half>, metadata)
 
 define <vscale x 4 x half> @trunc_nxv4f16(<vscale x 4 x half> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv4f16:
@@ -62,10 +60,9 @@ define <vscale x 4 x half> @trunc_nxv4f16(<vscale x 4 x half> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x half> @llvm.experimental.constrained.trunc.nxv4f16(<vscale x 4 x half> %x, metadata !"fpexcept.strict")
+  %a = call <vscale x 4 x half> @llvm.trunc.nxv4f16(<vscale x 4 x half> %x) [ "fpe.except"(metadata !"strict") ]
   ret <vscale x 4 x half> %a
 }
-declare <vscale x 4 x half> @llvm.experimental.constrained.trunc.nxv4f16(<vscale x 4 x half>, metadata)
 
 define <vscale x 8 x half> @trunc_nxv8f16(<vscale x 8 x half> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv8f16:
@@ -83,10 +80,9 @@ define <vscale x 8 x half> @trunc_nxv8f16(<vscale x 8 x half> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x half> @llvm.experimental.constrained.trunc.nxv8f16(<vscale x 8 x half> %x, metadata !"fpexcept.strict")
+  %a = call <vscale x 8 x half> @llvm.trunc.nxv8f16(<vscale x 8 x half> %x) [ "fpe.except"(metadata !"strict") ]
   ret <vscale x 8 x half> %a
 }
-declare <vscale x 8 x half> @llvm.experimental.constrained.trunc.nxv8f16(<vscale x 8 x half>, metadata)
 
 define <vscale x 16 x half> @trunc_nxv16f16(<vscale x 16 x half> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv16f16:
@@ -104,10 +100,9 @@ define <vscale x 16 x half> @trunc_nxv16f16(<vscale x 16 x half> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 16 x half> @llvm.experimental.constrained.trunc.nxv16f16(<vscale x 16 x half> %x, metadata !"fpexcept.strict")
+  %a = call <vscale x 16 x half> @llvm.trunc.nxv16f16(<vscale x 16 x half> %x) [ "fpe.except"(metadata !"strict") ]
   ret <vscale x 16 x half> %a
 }
-declare <vscale x 16 x half> @llvm.experimental.constrained.trunc.nxv16f16(<vscale x 16 x half>, metadata)
 
 define <vscale x 32 x half> @trunc_nxv32f16(<vscale x 32 x half> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv32f16:
@@ -125,10 +120,9 @@ define <vscale x 32 x half> @trunc_nxv32f16(<vscale x 32 x half> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 32 x half> @llvm.experimental.constrained.trunc.nxv32f16(<vscale x 32 x half> %x, metadata !"fpexcept.strict")
+  %a = call <vscale x 32 x half> @llvm.trunc.nxv32f16(<vscale x 32 x half> %x) [ "fpe.except"(metadata !"strict") ]
   ret <vscale x 32 x half> %a
 }
-declare <vscale x 32 x half> @llvm.experimental.constrained.trunc.nxv32f16(<vscale x 32 x half>, metadata)
 
 define <vscale x 1 x float> @trunc_nxv1f32(<vscale x 1 x float> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv1f32:
@@ -146,10 +140,9 @@ define <vscale x 1 x float> @trunc_nxv1f32(<vscale x 1 x float> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 1 x float> @llvm.experimental.constrained.trunc.nxv1f32(<vscale x 1 x float> %x, metadata !"fpexcept.strict")
+  %a = call <vscale x 1 x float> @llvm.trunc.nxv1f32(<vscale x 1 x float> %x) [ "fpe.except"(metadata !"strict") ]
   ret <vscale x 1 x float> %a
 }
-declare <vscale x 1 x float> @llvm.experimental.constrained.trunc.nxv1f32(<vscale x 1 x float>, metadata)
 
 define <vscale x 2 x float> @trunc_nxv2f32(<vscale x 2 x float> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv2f32:
@@ -167,10 +160,9 @@ define <vscale x 2 x float> @trunc_nxv2f32(<vscale x 2 x float> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x float> @llvm.experimental.constrained.trunc.nxv2f32(<vscale x 2 x float> %x, metadata !"fpexcept.strict")
+  %a = call <vscale x 2 x float> @llvm.trunc.nxv2f32(<vscale x 2 x float> %x) [ "fpe.except"(metadata !"strict") ]
   ret <vscale x 2 x float> %a
 }
-declare <vscale x 2 x float> @llvm.experimental.constrained.trunc.nxv2f32(<vscale x 2 x float>, metadata)
 
 define <vscale x 4 x float> @trunc_nxv4f32(<vscale x 4 x float> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv4f32:
@@ -188,10 +180,9 @@ define <vscale x 4 x float> @trunc_nxv4f32(<vscale x 4 x float> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x float> @llvm.experimental.constrained.trunc.nxv4f32(<vscale x 4 x float> %x, metadata !"fpexcept.strict")
+  %a = call <vscale x 4 x float> @llvm.trunc.nxv4f32(<vscale x 4 x float> %x) [ "fpe.except"(metadata !"strict") ]
   ret <vscale x 4 x float> %a
 }
-declare <vscale x 4 x float> @llvm.experimental.constrained.trunc.nxv4f32(<vscale x 4 x float>, metadata)
 
 define <vscale x 8 x float> @trunc_nxv8f32(<vscale x 8 x float> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv8f32:
@@ -209,10 +200,9 @@ define <vscale x 8 x float> @trunc_nxv8f32(<vscale x 8 x float> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x float> @llvm.experimental.constrained.trunc.nxv8f32(<vscale x 8 x float> %x, metadata !"fpexcept.strict")
+  %a = call <vscale x 8 x float> @llvm.trunc.nxv8f32(<vscale x 8 x float> %x) [ "fpe.except"(metadata !"strict") ]
   ret <vscale x 8 x float> %a
 }
-declare <vscale x 8 x float> @llvm.experimental.constrained.trunc.nxv8f32(<vscale x 8 x float>, metadata)
 
 define <vscale x 16 x float> @trunc_nxv16f32(<vscale x 16 x float> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv16f32:
@@ -230,10 +220,9 @@ define <vscale x 16 x float> @trunc_nxv16f32(<vscale x 16 x float> %x) strictfp
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 16 x float> @llvm.experimental.constrained.trunc.nxv16f32(<vscale x 16 x float> %x, metadata !"fpexcept.strict")
+  %a = call <vscale x 16 x float> @llvm.trunc.nxv16f32(<vscale x 16 x float> %x) [ "fpe.except"(metadata !"strict") ]
   ret <vscale x 16 x float> %a
 }
-declare <vscale x 16 x float> @llvm.experimental.constrained.trunc.nxv16f32(<vscale x 16 x float>, metadata)
 
 define <vscale x 1 x double> @trunc_nxv1f64(<vscale x 1 x double> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv1f64:
@@ -251,10 +240,9 @@ define <vscale x 1 x double> @trunc_nxv1f64(<vscale x 1 x double> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 1 x double> @llvm.experimental.constrained.trunc.nxv1f64(<vscale x 1 x double> %x, metadata !"fpexcept.strict")
+  %a = call <vscale x 1 x double> @llvm.trunc.nxv1f64(<vscale x 1 x double> %x) [ "fpe.except"(metadata !"strict") ]
   ret <vscale x 1 x double> %a
 }
-declare <vscale x 1 x double> @llvm.experimental.constrained.trunc.nxv1f64(<vscale x 1 x double>, metadata)
 
 define <vscale x 2 x double> @trunc_nxv2f64(<vscale x 2 x double> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv2f64:
@@ -272,10 +260,9 @@ define <vscale x 2 x double> @trunc_nxv2f64(<vscale x 2 x double> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x double> @llvm.experimental.constrained.trunc.nxv2f64(<vscale x 2 x double> %x, metadata !"fpexcept.strict")
+  %a = call <vscale x 2 x double> @llvm.trunc.nxv2f64(<vscale x 2 x double> %x) [ "fpe.except"(metadata !"strict") ]
   ret <vscale x 2 x double> %a
 }
-declare <vscale x 2 x double> @llvm.experimental.constrained.trunc.nxv2f64(<vscale x 2 x double>, metadata)
 
 define <vscale x 4 x double> @trunc_nxv4f64(<vscale x 4 x double> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv4f64:
@@ -293,10 +280,9 @@ define <vscale x 4 x double> @trunc_nxv4f64(<vscale x 4 x double> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x double> @llvm.experimental.constrained.trunc.nxv4f64(<vscale x 4 x double> %x, metadata !"fpexcept.strict")
+  %a = call <vscale x 4 x double> @llvm.trunc.nxv4f64(<vscale x 4 x double> %x) [ "fpe.except"(metadata !"strict") ]
   ret <vscale x 4 x double> %a
 }
-declare <vscale x 4 x double> @llvm.experimental.constrained.trunc.nxv4f64(<vscale x 4 x double>, metadata)
 
 define <vscale x 8 x double> @trunc_nxv8f64(<vscale x 8 x double> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv8f64:
@@ -314,7 +300,6 @@ define <vscale x 8 x double> @trunc_nxv8f64(<vscale x 8 x double> %x) strictfp {
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x double> @llvm.experimental.constrained.trunc.nxv8f64(<vscale x 8 x double> %x, metadata !"fpexcept.strict")
+  %a = call <vscale x 8 x double> @llvm.trunc.nxv8f64(<vscale x 8 x double> %x) [ "fpe.except"(metadata !"strict") ]
   ret <vscale x 8 x double> %a
 }
-declare <vscale x 8 x double> @llvm.experimental.constrained.trunc.nxv8f64(<vscale x 8 x double>, metadata)
diff --git a/llvm/test/CodeGen/RISCV/zfh-half-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/zfh-half-intrinsics-strict.ll
index 3efa9e58e65d3d..f8046674754d5f 100644
--- a/llvm/test/CodeGen/RISCV/zfh-half-intrinsics-strict.ll
+++ b/llvm/test/CodeGen/RISCV/zfh-half-intrinsics-strict.ll
@@ -204,8 +204,6 @@ define half @ceil_f16(half %a) nounwind strictfp {
   ret half %1
 }
 
-declare half @llvm.experimental.constrained.trunc.f16(half, metadata)
-
 define half @trunc_f16(half %a) nounwind strictfp {
 ; RV32IZFH-LABEL: trunc_f16:
 ; RV32IZFH:       # %bb.0:
@@ -272,7 +270,7 @@ define half @trunc_f16(half %a) nounwind strictfp {
 ; RV64IZDINXZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZDINXZHINX-NEXT:    addi sp, sp, 16
 ; RV64IZDINXZHINX-NEXT:    ret
-  %1 = call half @llvm.experimental.constrained.trunc.f16(half %a, metadata !"fpexcept.strict") strictfp
+  %1 = call half @llvm.trunc.f16(half %a) strictfp [ "fpe.except"(metadata !"strict") ]
   ret half %1
 }
 
diff --git a/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics-strict.ll
index 214ea46d3130d6..de0394a9625926 100644
--- a/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics-strict.ll
+++ b/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics-strict.ll
@@ -216,8 +216,6 @@ define half @ceil_f16(half %a) nounwind strictfp {
   ret half %1
 }
 
-declare half @llvm.experimental.constrained.trunc.f16(half, metadata)
-
 define half @trunc_f16(half %a) nounwind strictfp {
 ; RV32IZFHMIN-LABEL: trunc_f16:
 ; RV32IZFHMIN:       # %bb.0:
@@ -284,7 +282,7 @@ define half @trunc_f16(half %a) nounwind strictfp {
 ; RV64IZDINXZHINXMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZDINXZHINXMIN-NEXT:    addi sp, sp, 16
 ; RV64IZDINXZHINXMIN-NEXT:    ret
-  %1 = call half @llvm.experimental.constrained.trunc.f16(half %a, metadata !"fpexcept.strict") strictfp
+  %1 = call half @llvm.trunc.f16(half %a) strictfp [ "fpe.except"(metadata !"strict") ]
   ret half %1
 }
 
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-round-01.ll b/llvm/test/CodeGen/SystemZ/fp-strict-round-01.ll
index 1fbb1790c01dc0..3f031745c2b61e 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-round-01.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-round-01.ll
@@ -161,39 +161,30 @@ define void @f12(ptr %ptr) #0 {
 }
 
 ; Test trunc for f32.
-declare float @llvm.experimental.constrained.trunc.f32(float, metadata)
 define float @f13(float %f) #0 {
 ; CHECK-LABEL: f13:
 ; CHECK: brasl %r14, truncf at PLT
 ; CHECK: br %r14
-  %res = call float @llvm.experimental.constrained.trunc.f32(
-                        float %f,
-                        metadata !"fpexcept.strict") #0
+  %res = call float @llvm.trunc.f32(float %f) #0 [ "fpe.except"(metadata !"strict") ]
   ret float %res
 }
 
 ; Test trunc for f64.
-declare double @llvm.experimental.constrained.trunc.f64(double, metadata)
 define double @f14(double %f) #0 {
 ; CHECK-LABEL: f14:
 ; CHECK: brasl %r14, trunc at PLT
 ; CHECK: br %r14
-  %res = call double @llvm.experimental.constrained.trunc.f64(
-                        double %f,
-                        metadata !"fpexcept.strict") #0
+  %res = call double @llvm.trunc.f64(double %f) #0 [ "fpe.except"(metadata !"strict") ] 
   ret double %res
 }
 
 ; Test trunc for f128.
-declare fp128 @llvm.experimental.constrained.trunc.f128(fp128, metadata)
 define void @f15(ptr %ptr) #0 {
 ; CHECK-LABEL: f15:
 ; CHECK: brasl %r14, truncl at PLT
 ; CHECK: br %r14
   %src = load fp128, ptr %ptr
-  %res = call fp128 @llvm.experimental.constrained.trunc.f128(
-                        fp128 %src,
-                        metadata !"fpexcept.strict") #0
+  %res = call fp128 @llvm.trunc.f128(fp128 %src) #0 [ "fpe.except"(metadata !"strict") ]
   store fp128 %res, ptr %ptr
   ret void
 }
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-round-02.ll b/llvm/test/CodeGen/SystemZ/fp-strict-round-02.ll
index bc304a3fb95fb0..8f56f552661fda 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-round-02.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-round-02.ll
@@ -165,39 +165,30 @@ define void @f12(ptr %ptr) #0 {
 }
 
 ; Test trunc for f32.
-declare float @llvm.experimental.constrained.trunc.f32(float, metadata)
 define float @f13(float %f) #0 {
 ; CHECK-LABEL: f13:
 ; CHECK: fiebra %f0, 5, %f0, 4
 ; CHECK: br %r14
-  %res = call float @llvm.experimental.constrained.trunc.f32(
-                        float %f,
-                        metadata !"fpexcept.strict") #0
+  %res = call float @llvm.trunc.f32(float %f) #0 [ "fpe.except"(metadata !"strict") ]
   ret float %res
 }
 
 ; Test trunc for f64.
-declare double @llvm.experimental.constrained.trunc.f64(double, metadata)
 define double @f14(double %f) #0 {
 ; CHECK-LABEL: f14:
 ; CHECK: fidbra %f0, 5, %f0, 4
 ; CHECK: br %r14
-  %res = call double @llvm.experimental.constrained.trunc.f64(
-                        double %f,
-                        metadata !"fpexcept.strict") #0
+  %res = call double @llvm.trunc.f64(double %f) #0 [ "fpe.except"(metadata !"strict") ]
   ret double %res
 }
 
 ; Test trunc for f128.
-declare fp128 @llvm.experimental.constrained.trunc.f128(fp128, metadata)
 define void @f15(ptr %ptr) #0 {
 ; CHECK-LABEL: f15:
 ; CHECK: fixbra %f0, 5, %f0, 4
 ; CHECK: br %r14
   %src = load fp128, ptr %ptr
-  %res = call fp128 @llvm.experimental.constrained.trunc.f128(
-                        fp128 %src,
-                        metadata !"fpexcept.strict") #0
+  %res = call fp128 @llvm.trunc.f128(fp128 %src) #0 [ "fpe.except"(metadata !"strict") ]
   store fp128 %res, ptr %ptr
   ret void
 }
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-round-03.ll b/llvm/test/CodeGen/SystemZ/fp-strict-round-03.ll
index 2cdff7d5c425ec..df207c6b01a58b 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-round-03.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-round-03.ll
@@ -169,31 +169,24 @@ define void @f12(ptr %ptr) #0 {
 }
 
 ; Test trunc for f32.
-declare float @llvm.experimental.constrained.trunc.f32(float, metadata)
 define float @f13(float %f) #0 {
 ; CHECK-LABEL: f13:
 ; CHECK: fiebra %f0, 5, %f0, 4
 ; CHECK: br %r14
-  %res = call float @llvm.experimental.constrained.trunc.f32(
-                        float %f,
-                        metadata !"fpexcept.strict") #0
+  %res = call float @llvm.trunc.f32(float %f) #0 [ "fpe.except"(metadata !"strict") ]
   ret float %res
 }
 
 ; Test trunc for f64.
-declare double @llvm.experimental.constrained.trunc.f64(double, metadata)
 define double @f14(double %f) #0 {
 ; CHECK-LABEL: f14:
 ; CHECK: fidbra %f0, 5, %f0, 4
 ; CHECK: br %r14
-  %res = call double @llvm.experimental.constrained.trunc.f64(
-                        double %f,
-                        metadata !"fpexcept.strict") #0
+  %res = call double @llvm.trunc.f64(double %f) #0 [ "fpe.except"(metadata !"strict") ]
   ret double %res
 }
 
 ; Test trunc for f128.
-declare fp128 @llvm.experimental.constrained.trunc.f128(fp128, metadata)
 define void @f15(ptr %ptr) #0 {
 ; CHECK-LABEL: f15:
 ; CHECK: vl [[REG:%v[0-9]+]], 0(%r2)
@@ -201,9 +194,7 @@ define void @f15(ptr %ptr) #0 {
 ; CHECK: vst [[RES]], 0(%r2)
 ; CHECK: br %r14
   %src = load fp128, ptr %ptr
-  %res = call fp128 @llvm.experimental.constrained.trunc.f128(
-                        fp128 %src,
-                        metadata !"fpexcept.strict") #0
+  %res = call fp128 @llvm.trunc.f128(fp128 %src) #0 [ "fpe.except"(metadata !"strict") ]
   store fp128 %res, ptr %ptr
   ret void
 }
diff --git a/llvm/test/CodeGen/SystemZ/vec-strict-round-01.ll b/llvm/test/CodeGen/SystemZ/vec-strict-round-01.ll
index b82cb8082b7b8c..a24a2d9f791930 100644
--- a/llvm/test/CodeGen/SystemZ/vec-strict-round-01.ll
+++ b/llvm/test/CodeGen/SystemZ/vec-strict-round-01.ll
@@ -6,13 +6,11 @@ declare double @llvm.experimental.constrained.rint.f64(double, metadata, metadat
 declare double @llvm.experimental.constrained.nearbyint.f64(double, metadata, metadata)
 declare double @llvm.experimental.constrained.floor.f64(double, metadata)
 declare double @llvm.experimental.constrained.ceil.f64(double, metadata)
-declare double @llvm.experimental.constrained.trunc.f64(double, metadata)
 declare double @llvm.experimental.constrained.round.f64(double, metadata)
 declare <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double>, metadata, metadata)
 declare <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(<2 x double>, metadata, metadata)
 declare <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double>, metadata)
 declare <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double>, metadata)
-declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata)
 declare <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double>, metadata)
 
 define <2 x double> @f1(<2 x double> %val) #0 {
@@ -61,9 +59,7 @@ define <2 x double> @f5(<2 x double> %val) #0 {
 ; CHECK-LABEL: f5:
 ; CHECK: vfidb %v24, %v24, 4, 5
 ; CHECK: br %r14
-  %res = call <2 x double> @llvm.experimental.constrained.trunc.v2f64(
-                        <2 x double> %val,
-                        metadata !"fpexcept.strict") #0
+  %res = call <2 x double> @llvm.trunc.v2f64(<2 x double> %val) #0 [ "fpe.except"(metadata !"strict") ]
   ret <2 x double> %res
 }
 
@@ -129,9 +125,7 @@ define double @f11(<2 x double> %val) #0 {
 ; CHECK: wfidb %f0, %v24, 4, 5
 ; CHECK: br %r14
   %scalar = extractelement <2 x double> %val, i32 0
-  %res = call double @llvm.experimental.constrained.trunc.f64(
-                        double %scalar,
-                        metadata !"fpexcept.strict") #0
+  %res = call double @llvm.trunc.f64(double %scalar) #0 [ "fpe.except"(metadata !"strict") ]
   ret double %res
 }
 
diff --git a/llvm/test/CodeGen/SystemZ/vec-strict-round-02.ll b/llvm/test/CodeGen/SystemZ/vec-strict-round-02.ll
index 701dd5b2302f2c..6db7d03cb82e2d 100644
--- a/llvm/test/CodeGen/SystemZ/vec-strict-round-02.ll
+++ b/llvm/test/CodeGen/SystemZ/vec-strict-round-02.ll
@@ -6,13 +6,11 @@ declare float @llvm.experimental.constrained.rint.f32(float, metadata, metadata)
 declare float @llvm.experimental.constrained.nearbyint.f32(float, metadata, metadata)
 declare float @llvm.experimental.constrained.floor.f32(float, metadata)
 declare float @llvm.experimental.constrained.ceil.f32(float, metadata)
-declare float @llvm.experimental.constrained.trunc.f32(float, metadata)
 declare float @llvm.experimental.constrained.round.f32(float, metadata)
 declare <4 x float> @llvm.experimental.constrained.rint.v4f32(<4 x float>, metadata, metadata)
 declare <4 x float> @llvm.experimental.constrained.nearbyint.v4f32(<4 x float>, metadata, metadata)
 declare <4 x float> @llvm.experimental.constrained.floor.v4f32(<4 x float>, metadata)
 declare <4 x float> @llvm.experimental.constrained.ceil.v4f32(<4 x float>, metadata)
-declare <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float>, metadata)
 declare <4 x float> @llvm.experimental.constrained.round.v4f32(<4 x float>, metadata)
 
 define <4 x float> @f1(<4 x float> %val) #0 {
@@ -61,9 +59,7 @@ define <4 x float> @f5(<4 x float> %val) #0 {
 ; CHECK-LABEL: f5:
 ; CHECK: vfisb %v24, %v24, 4, 5
 ; CHECK: br %r14
-  %res = call <4 x float> @llvm.experimental.constrained.trunc.v4f32(
-                        <4 x float> %val,
-                        metadata !"fpexcept.strict") #0
+  %res = call <4 x float> @llvm.trunc.v4f32(<4 x float> %val) #0 [ "fpe.except"(metadata !"strict") ]
   ret <4 x float> %res
 }
 
@@ -128,9 +124,7 @@ define float @f11(<4 x float> %val) #0 {
 ; CHECK: wfisb %f0, %v24, 4, 5
 ; CHECK: br %r14
   %scalar = extractelement <4 x float> %val, i32 0
-  %res = call float @llvm.experimental.constrained.trunc.f32(
-                        float %scalar,
-                        metadata !"fpexcept.strict") #0
+  %res = call float @llvm.trunc.f32(float %scalar) #0 [ "fpe.except"(metadata !"strict") ]
   ret float %res
 }
 
diff --git a/llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll
index b08f0e5a74d569..74afe4c6ae4b88 100644
--- a/llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll
+++ b/llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll
@@ -6071,9 +6071,7 @@ define <1 x float> @constrained_vector_trunc_v1f32(ptr %a) #0 {
 ; SZ13-NEXT:    br %r14
 entry:
   %b = load <1 x float>, ptr %a
-  %trunc = call <1 x float> @llvm.experimental.constrained.trunc.v1f32(
-                               <1 x float> %b,
-                               metadata !"fpexcept.strict") #0
+  %trunc = call <1 x float> @llvm.trunc.v1f32(<1 x float> %b) #0 [ "fpe.except"(metadata !"strict") ]
   ret <1 x float> %trunc
 }
 
@@ -6108,9 +6106,7 @@ define <2 x double> @constrained_vector_trunc_v2f64(ptr %a) #0 {
 ; SZ13-NEXT:    br %r14
 entry:
   %b = load <2 x double>, ptr %a
-  %trunc = call <2 x double> @llvm.experimental.constrained.trunc.v2f64(
-                                <2 x double> %b,
-                                metadata !"fpexcept.strict") #0
+  %trunc = call <2 x double> @llvm.trunc.v2f64(<2 x double> %b) #0 [ "fpe.except"(metadata !"strict") ]
   ret <2 x double> %trunc
 }
 
@@ -6163,9 +6159,7 @@ define <3 x float> @constrained_vector_trunc_v3f32(ptr %a) #0 {
 ; SZ13-NEXT:    br %r14
 entry:
   %b = load <3 x float>, ptr %a
-  %trunc = call <3 x float> @llvm.experimental.constrained.trunc.v3f32(
-                              <3 x float> %b,
-                              metadata !"fpexcept.strict") #0
+  %trunc = call <3 x float> @llvm.trunc.v3f32(<3 x float> %b) #0 [ "fpe.except"(metadata !"strict") ]
   ret <3 x float> %trunc
 }
 
@@ -6215,9 +6209,7 @@ define void @constrained_vector_trunc_v3f64(ptr %a) #0 {
 ; SZ13-NEXT:    br %r14
 entry:
   %b = load <3 x double>, ptr %a
-  %trunc = call <3 x double> @llvm.experimental.constrained.trunc.v3f64(
-                          <3 x double> %b,
-                          metadata !"fpexcept.strict") #0
+  %trunc = call <3 x double> @llvm.trunc.v3f64(<3 x double> %b) #0 [ "fpe.except"(metadata !"strict") ]
   store <3 x double> %trunc, ptr %a
   ret void
 }
@@ -6953,7 +6945,6 @@ declare <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f32(<2 x float
 declare <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double>, metadata)
 declare <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double>, metadata)
 declare <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double>, metadata)
-declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata)
 
 declare <1 x float> @llvm.experimental.constrained.fadd.v1f32(<1 x float>, <1 x float>, metadata, metadata)
 declare <1 x float> @llvm.experimental.constrained.fsub.v1f32(<1 x float>, <1 x float>, metadata, metadata)
@@ -6981,7 +6972,6 @@ declare <1 x double> @llvm.experimental.constrained.fpext.v1f64.v1f32(<1 x float
 declare <1 x float> @llvm.experimental.constrained.ceil.v1f32(<1 x float>, metadata)
 declare <1 x float> @llvm.experimental.constrained.floor.v1f32(<1 x float>, metadata)
 declare <1 x float> @llvm.experimental.constrained.round.v1f32(<1 x float>, metadata)
-declare <1 x float> @llvm.experimental.constrained.trunc.v1f32(<1 x float>, metadata)
 
 declare <3 x float> @llvm.experimental.constrained.fadd.v3f32(<3 x float>, <3 x float>, metadata, metadata)
 declare <3 x double> @llvm.experimental.constrained.fadd.v3f64(<3 x double>, <3 x double>, metadata, metadata)
@@ -7033,8 +7023,6 @@ declare <3 x float> @llvm.experimental.constrained.floor.v3f32(<3 x float>, meta
 declare <3 x double> @llvm.experimental.constrained.floor.v3f64(<3 x double>, metadata)
 declare <3 x float> @llvm.experimental.constrained.round.v3f32(<3 x float>, metadata)
 declare <3 x double> @llvm.experimental.constrained.round.v3f64(<3 x double>, metadata)
-declare <3 x float> @llvm.experimental.constrained.trunc.v3f32(<3 x float>, metadata)
-declare <3 x double> @llvm.experimental.constrained.trunc.v3f64(<3 x double>, metadata)
 
 declare <4 x double> @llvm.experimental.constrained.fadd.v4f64(<4 x double>, <4 x double>, metadata, metadata)
 declare <4 x double> @llvm.experimental.constrained.fsub.v4f64(<4 x double>, <4 x double>, metadata, metadata)
@@ -7062,4 +7050,3 @@ declare <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f32(<4 x float
 declare <4 x double> @llvm.experimental.constrained.ceil.v4f64(<4 x double>, metadata)
 declare <4 x double> @llvm.experimental.constrained.floor.v4f64(<4 x double>, metadata)
 declare <4 x double> @llvm.experimental.constrained.round.v4f64(<4 x double>, metadata)
-declare <4 x double> @llvm.experimental.constrained.trunc.v4f64(<4 x double>, metadata)
diff --git a/llvm/test/CodeGen/X86/fp-strict-scalar-round-fp16.ll b/llvm/test/CodeGen/X86/fp-strict-scalar-round-fp16.ll
index 3b9798a2af5820..87aab3f9ad9c56 100644
--- a/llvm/test/CodeGen/X86/fp-strict-scalar-round-fp16.ll
+++ b/llvm/test/CodeGen/X86/fp-strict-scalar-round-fp16.ll
@@ -7,7 +7,6 @@
 
 declare half @llvm.experimental.constrained.ceil.f16(half, metadata)
 declare half @llvm.experimental.constrained.floor.f16(half, metadata)
-declare half @llvm.experimental.constrained.trunc.f16(half, metadata)
 declare half @llvm.experimental.constrained.rint.f16(half, metadata, metadata)
 declare half @llvm.experimental.constrained.nearbyint.f16(half, metadata, metadata)
 declare half @llvm.experimental.constrained.roundeven.f16(half, metadata)
@@ -122,8 +121,7 @@ define half @ftrunc32(half %f) #0 {
 ; X64:       # %bb.0:
 ; X64-NEXT:    vrndscalesh $11, %xmm0, %xmm0, %xmm0
 ; X64-NEXT:    retq
-  %res = call half @llvm.experimental.constrained.trunc.f16(
-                        half %f, metadata !"fpexcept.strict") #0
+  %res = call half @llvm.trunc.f16(half %f) #1 [ "fpe.except"(metadata !"strict") ]
   ret half %res
 }
 
@@ -273,7 +271,7 @@ define half @fround16(half %f) #0 {
 ; X86-LABEL: fround16:
 ; X86:       # %bb.0:
 ; X86-NEXT:    subl $8, %esp
-; X86-NEXT:    vmovsh {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT:    vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
 ; X86-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
 ; X86-NEXT:    vmovss %xmm0, (%esp)
 ; X86-NEXT:    calll roundf
diff --git a/llvm/test/CodeGen/X86/fp-strict-scalar-round.ll b/llvm/test/CodeGen/X86/fp-strict-scalar-round.ll
index 13f890ae6e191a..7235f0a95ef345 100644
--- a/llvm/test/CodeGen/X86/fp-strict-scalar-round.ll
+++ b/llvm/test/CodeGen/X86/fp-strict-scalar-round.ll
@@ -10,8 +10,6 @@ declare float @llvm.experimental.constrained.ceil.f32(float, metadata)
 declare double @llvm.experimental.constrained.ceil.f64(double, metadata)
 declare float @llvm.experimental.constrained.floor.f32(float, metadata)
 declare double @llvm.experimental.constrained.floor.f64(double, metadata)
-declare float @llvm.experimental.constrained.trunc.f32(float, metadata)
-declare double @llvm.experimental.constrained.trunc.f64(double, metadata)
 declare float @llvm.experimental.constrained.rint.f32(float, metadata, metadata)
 declare double @llvm.experimental.constrained.rint.f64(double, metadata, metadata)
 declare float @llvm.experimental.constrained.nearbyint.f32(float, metadata, metadata)
@@ -245,8 +243,7 @@ define float @ftrunc32(float %f) #0 {
 ; AVX-X64:       # %bb.0:
 ; AVX-X64-NEXT:    vroundss $11, %xmm0, %xmm0, %xmm0
 ; AVX-X64-NEXT:    retq
-  %res = call float @llvm.experimental.constrained.trunc.f32(
-                        float %f, metadata !"fpexcept.strict") #0
+  %res = call float @llvm.trunc.f32(float %f) #0 [ "fpe.except"(metadata !"strict") ]
   ret float %res
 }
 
@@ -298,8 +295,7 @@ define double @ftruncf64(double %f) #0 {
 ; AVX-X64:       # %bb.0:
 ; AVX-X64-NEXT:    vroundsd $11, %xmm0, %xmm0, %xmm0
 ; AVX-X64-NEXT:    retq
-  %res = call double @llvm.experimental.constrained.trunc.f64(
-                        double %f, metadata !"fpexcept.strict") #0
+  %res = call double @llvm.trunc.f64(double %f) #0 [ "fpe.except"(metadata !"strict") ]
   ret double %res
 }
 
diff --git a/llvm/test/CodeGen/X86/fp128-libcalls-strict.ll b/llvm/test/CodeGen/X86/fp128-libcalls-strict.ll
index 5263e0d4f6f39f..0705ee01aa380a 100644
--- a/llvm/test/CodeGen/X86/fp128-libcalls-strict.ll
+++ b/llvm/test/CodeGen/X86/fp128-libcalls-strict.ll
@@ -1407,7 +1407,7 @@ define fp128 @trunc(fp128 %x) nounwind strictfp {
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    retl $4
 entry:
-  %trunc = call fp128 @llvm.experimental.constrained.trunc.f128(fp128 %x, metadata !"fpexcept.strict") #0
+  %trunc = call fp128 @llvm.trunc.f128(fp128 %x) #0 [ "fpe.except"(metadata !"strict") ]
   ret fp128 %trunc
 }
 
@@ -1993,7 +1993,6 @@ declare fp128 @llvm.experimental.constrained.atan.f128(fp128, metadata, metadata
 declare fp128 @llvm.experimental.constrained.atan2.f128(fp128, fp128, metadata, metadata)
 declare fp128 @llvm.experimental.constrained.tan.f128(fp128, metadata, metadata)
 declare fp128 @llvm.experimental.constrained.tanh.f128(fp128, metadata, metadata)
-declare fp128 @llvm.experimental.constrained.trunc.f128(fp128, metadata)
 declare i32 @llvm.experimental.constrained.lrint.i32.f128(fp128, metadata, metadata)
 declare i64 @llvm.experimental.constrained.llrint.i64.f128(fp128, metadata, metadata)
 declare i32 @llvm.experimental.constrained.lround.i32.f128(fp128, metadata)
diff --git a/llvm/test/CodeGen/X86/fp80-strict-libcalls.ll b/llvm/test/CodeGen/X86/fp80-strict-libcalls.ll
index 8bbc6247dbafd6..a612c6a80e31e9 100644
--- a/llvm/test/CodeGen/X86/fp80-strict-libcalls.ll
+++ b/llvm/test/CodeGen/X86/fp80-strict-libcalls.ll
@@ -729,7 +729,7 @@ define x86_fp80 @trunc(x86_fp80 %x) nounwind strictfp {
 ; X64-NEXT:    addq $24, %rsp
 ; X64-NEXT:    retq
 entry:
-  %trunc = call x86_fp80 @llvm.experimental.constrained.trunc.f80(x86_fp80 %x, metadata !"fpexcept.strict") #0
+  %trunc = call x86_fp80 @llvm.trunc.f80(x86_fp80 %x) #0 [ "fpe.except"(metadata !"strict") ]
   ret x86_fp80 %trunc
 }
 
@@ -862,7 +862,6 @@ declare x86_fp80 @llvm.experimental.constrained.atan.f80(x86_fp80, metadata, met
 declare x86_fp80 @llvm.experimental.constrained.atan2.f80(x86_fp80, x86_fp80, metadata, metadata)
 declare x86_fp80 @llvm.experimental.constrained.tan.f80(x86_fp80, metadata, metadata)
 declare x86_fp80 @llvm.experimental.constrained.tanh.f80(x86_fp80, metadata, metadata)
-declare x86_fp80 @llvm.experimental.constrained.trunc.f80(x86_fp80, metadata)
 declare i32 @llvm.experimental.constrained.lrint.i32.f80(x86_fp80, metadata, metadata)
 declare i64 @llvm.experimental.constrained.llrint.i64.f80(x86_fp80, metadata, metadata)
 declare i32 @llvm.experimental.constrained.lround.i32.f80(x86_fp80, metadata)
diff --git a/llvm/test/CodeGen/X86/vec-strict-256-fp16.ll b/llvm/test/CodeGen/X86/vec-strict-256-fp16.ll
index a2e02508327c81..e9f6cf3de8ad48 100644
--- a/llvm/test/CodeGen/X86/vec-strict-256-fp16.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-256-fp16.ll
@@ -14,7 +14,6 @@ declare <4 x half> @llvm.experimental.constrained.fptrunc.v4f16.v4f64(<4 x doubl
 declare <8 x half> @llvm.experimental.constrained.fptrunc.v8f16.v8f32(<8 x float>, metadata, metadata)
 declare <16 x half> @llvm.experimental.constrained.ceil.v16f16(<16 x half>, metadata)
 declare <16 x half> @llvm.experimental.constrained.floor.v16f16(<16 x half>, metadata)
-declare <16 x half> @llvm.experimental.constrained.trunc.v16f16(<16 x half>, metadata)
 declare <16 x half> @llvm.experimental.constrained.rint.v16f16(<16 x half>, metadata, metadata)
 declare <16 x half> @llvm.experimental.constrained.nearbyint.v16f16(<16 x half>, metadata, metadata)
 
@@ -160,8 +159,7 @@ define <16 x half> @ftruncv16f16(<16 x half> %f) #0 {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vrndscaleph $11, %ymm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
-  %res = call <16 x half> @llvm.experimental.constrained.trunc.v16f16(
-                          <16 x half> %f, metadata !"fpexcept.strict") #0
+  %res = call <16 x half> @llvm.trunc.v16f16(<16 x half> %f) #0 [ "fpe.except"(metadata !"strict") ]
   ret <16 x half> %res
 }
 
diff --git a/llvm/test/CodeGen/X86/vec-strict-256.ll b/llvm/test/CodeGen/X86/vec-strict-256.ll
index 5945e6c1bc66eb..d89996db74288f 100644
--- a/llvm/test/CodeGen/X86/vec-strict-256.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-256.ll
@@ -22,8 +22,6 @@ declare <8 x float> @llvm.experimental.constrained.ceil.v8f32(<8 x float>, metad
 declare <4 x double>  @llvm.experimental.constrained.ceil.v4f64(<4 x double>, metadata)
 declare <8 x float> @llvm.experimental.constrained.floor.v8f32(<8 x float>, metadata)
 declare <4 x double> @llvm.experimental.constrained.floor.v4f64(<4 x double>, metadata)
-declare <8 x float> @llvm.experimental.constrained.trunc.v8f32(<8 x float>, metadata)
-declare <4 x double> @llvm.experimental.constrained.trunc.v4f64(<4 x double>, metadata)
 declare <8 x float> @llvm.experimental.constrained.rint.v8f32(<8 x float>, metadata, metadata)
 declare <4 x double> @llvm.experimental.constrained.rint.v4f64(<4 x double>, metadata, metadata)
 declare <8 x float> @llvm.experimental.constrained.nearbyint.v8f32(<8 x float>, metadata, metadata)
@@ -234,8 +232,7 @@ define <8 x float> @ftruncv8f32(<8 x float> %f) #0 {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vroundps $11, %ymm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
-  %res = call <8 x float> @llvm.experimental.constrained.trunc.v8f32(
-                          <8 x float> %f, metadata !"fpexcept.strict") #0
+  %res = call <8 x float> @llvm.trunc.v8f32(<8 x float> %f) #0 [ "fpe.except"(metadata !"strict") ]
   ret <8 x float> %res
 }
 
@@ -244,8 +241,7 @@ define <4 x double> @ftruncv4f64(<4 x double> %f) #0 {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vroundpd $11, %ymm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
-  %res = call <4 x double> @llvm.experimental.constrained.trunc.v4f64(
-                        <4 x double> %f, metadata !"fpexcept.strict") #0
+  %res = call <4 x double> @llvm.trunc.v4f64(<4 x double> %f) #0 [ "fpe.except"(metadata !"strict") ]
   ret <4 x double> %res
 }
 
diff --git a/llvm/test/CodeGen/X86/vec-strict-512-fp16.ll b/llvm/test/CodeGen/X86/vec-strict-512-fp16.ll
index dfbc11a43d3d7c..dac0195543ac7f 100644
--- a/llvm/test/CodeGen/X86/vec-strict-512-fp16.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-512-fp16.ll
@@ -14,7 +14,6 @@ declare <16 x half> @llvm.experimental.constrained.fptrunc.v16f16.v16f32(<16 x f
 declare <32 x half> @llvm.experimental.constrained.fma.v32f16(<32 x half>, <32 x half>, <32 x half>, metadata, metadata)
 declare <32 x half> @llvm.experimental.constrained.ceil.v32f16(<32 x half>, metadata)
 declare <32 x half> @llvm.experimental.constrained.floor.v32f16(<32 x half>, metadata)
-declare <32 x half> @llvm.experimental.constrained.trunc.v32f16(<32 x half>, metadata)
 declare <32 x half> @llvm.experimental.constrained.rint.v32f16(<32 x half>, metadata, metadata)
 declare <32 x half> @llvm.experimental.constrained.nearbyint.v32f16(<32 x half>, metadata, metadata)
 
@@ -155,7 +154,7 @@ define <32 x half> @strict_vector_ftrunc_v32f16(<32 x half> %f) #0 {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vrndscaleph $11, %zmm0, %zmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
-  %res = call <32 x half> @llvm.experimental.constrained.trunc.v32f16(<32 x half> %f, metadata !"fpexcept.strict") #0
+  %res = call <32 x half> @llvm.trunc.v32f16(<32 x half> %f) #0 [ "fpe.except"(metadata !"strict") ]
   ret <32 x half> %res
 }
 
diff --git a/llvm/test/CodeGen/X86/vec-strict-512.ll b/llvm/test/CodeGen/X86/vec-strict-512.ll
index 2cafd74af49538..43abaaa3d7d8ff 100644
--- a/llvm/test/CodeGen/X86/vec-strict-512.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-512.ll
@@ -20,8 +20,6 @@ declare <16 x float> @llvm.experimental.constrained.ceil.v16f32(<16 x float>, me
 declare <8 x double>  @llvm.experimental.constrained.ceil.v8f64(<8 x double>, metadata)
 declare <16 x float> @llvm.experimental.constrained.floor.v16f32(<16 x float>, metadata)
 declare <8 x double> @llvm.experimental.constrained.floor.v8f64(<8 x double>, metadata)
-declare <16 x float> @llvm.experimental.constrained.trunc.v16f32(<16 x float>, metadata)
-declare <8 x double> @llvm.experimental.constrained.trunc.v8f64(<8 x double>, metadata)
 declare <16 x float> @llvm.experimental.constrained.rint.v16f32(<16 x float>, metadata, metadata)
 declare <8 x double> @llvm.experimental.constrained.rint.v8f64(<8 x double>, metadata, metadata)
 declare <16 x float> @llvm.experimental.constrained.nearbyint.v16f32(<16 x float>, metadata, metadata)
@@ -227,7 +225,7 @@ define <16 x float> @strict_vector_ftrunc_v16f32(<16 x float> %f) #0 {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vrndscaleps $11, %zmm0, %zmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
-  %res = call <16 x float> @llvm.experimental.constrained.trunc.v16f32(<16 x float> %f, metadata !"fpexcept.strict") #0
+  %res = call <16 x float> @llvm.trunc.v16f32(<16 x float> %f) #0 [ "fpe.except"(metadata !"strict") ]
   ret <16 x float> %res
 }
 
@@ -236,7 +234,7 @@ define <8 x double> @strict_vector_ftrunc_v8f64(<8 x double> %f) #0 {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vrndscalepd $11, %zmm0, %zmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
-  %res = call <8 x double> @llvm.experimental.constrained.trunc.v8f64(<8 x double> %f, metadata !"fpexcept.strict") #0
+  %res = call <8 x double> @llvm.trunc.v8f64(<8 x double> %f) #0 [ "fpe.except"(metadata !"strict") ]
   ret <8 x double> %res
 }
 
diff --git a/llvm/test/CodeGen/X86/vec-strict-round-128.ll b/llvm/test/CodeGen/X86/vec-strict-round-128.ll
index 1f7507cc02bc59..403731057618e6 100644
--- a/llvm/test/CodeGen/X86/vec-strict-round-128.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-round-128.ll
@@ -10,8 +10,6 @@ declare <4 x float> @llvm.experimental.constrained.ceil.v4f32(<4 x float>, metad
 declare <2 x double>  @llvm.experimental.constrained.ceil.v2f64(<2 x double>, metadata)
 declare <4 x float> @llvm.experimental.constrained.floor.v4f32(<4 x float>, metadata)
 declare <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double>, metadata)
-declare <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float>, metadata)
-declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata)
 declare <4 x float> @llvm.experimental.constrained.rint.v4f32(<4 x float>, metadata, metadata)
 declare <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double>, metadata, metadata)
 declare <4 x float> @llvm.experimental.constrained.nearbyint.v4f32(<4 x float>, metadata, metadata)
@@ -87,8 +85,7 @@ define <4 x float> @ftruncv4f32(<4 x float> %f) #0 {
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    vroundps $11, %xmm0, %xmm0
 ; AVX-NEXT:    ret{{[l|q]}}
-  %res = call <4 x float> @llvm.experimental.constrained.trunc.v4f32(
-                          <4 x float> %f, metadata !"fpexcept.strict") #0
+  %res = call <4 x float> @llvm.trunc.v4f32(<4 x float> %f) #0 [ "fpe.except"(metadata !"strict") ]
   ret <4 x float> %res
 }
 
@@ -102,8 +99,7 @@ define <2 x double> @ftruncv2f64(<2 x double> %f) #0 {
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    vroundpd $11, %xmm0, %xmm0
 ; AVX-NEXT:    ret{{[l|q]}}
-  %res = call <2 x double> @llvm.experimental.constrained.trunc.v2f64(
-                        <2 x double> %f, metadata !"fpexcept.strict") #0
+  %res = call <2 x double> @llvm.trunc.v2f64(<2 x double> %f) #0 [ "fpe.except"(metadata !"strict") ]
   ret <2 x double> %res
 }
 
diff --git a/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
index 49062eaef31887..e7bb0744b86d46 100644
--- a/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
@@ -6372,9 +6372,7 @@ define <1 x float> @constrained_vector_trunc_v1f32_var(ptr %a) #0 {
 ; AVX-NEXT:    retq
 entry:
   %b = load <1 x float>, ptr %a
-  %trunc = call <1 x float> @llvm.experimental.constrained.trunc.v1f32(
-                               <1 x float> %b,
-                               metadata !"fpexcept.strict") #0
+  %trunc = call <1 x float> @llvm.trunc.v1f32(<1 x float> %b) #0 [ "fpe.except"(metadata !"strict") ]
   ret <1 x float> %trunc
 }
 
@@ -6403,9 +6401,7 @@ define <2 x double> @constrained_vector_trunc_v2f64_var(ptr %a) #0 {
 ; AVX-NEXT:    retq
 entry:
   %b = load <2 x double>, ptr %a
-  %trunc = call <2 x double> @llvm.experimental.constrained.trunc.v2f64(
-                                <2 x double> %b,
-                                metadata !"fpexcept.strict") #0
+  %trunc = call <2 x double> @llvm.trunc.v2f64(<2 x double> %b) #0 [ "fpe.except"(metadata !"strict") ]
   ret <2 x double> %trunc
 }
 
@@ -6446,9 +6442,7 @@ define <3 x float> @constrained_vector_trunc_v3f32_var(ptr %a) #0 {
 ; AVX-NEXT:    retq
 entry:
   %b = load <3 x float>, ptr %a
-  %trunc = call <3 x float> @llvm.experimental.constrained.trunc.v3f32(
-                              <3 x float> %b,
-                              metadata !"fpexcept.strict") #0
+  %trunc = call <3 x float> @llvm.trunc.v3f32(<3 x float> %b) #0 [ "fpe.except"(metadata !"strict") ]
   ret <3 x float> %trunc
 }
 
@@ -6490,9 +6484,7 @@ define <3 x double> @constrained_vector_trunc_v3f64_var(ptr %a) #0 {
 ; AVX-NEXT:    retq
 entry:
   %b = load <3 x double>, ptr %a
-  %trunc = call <3 x double> @llvm.experimental.constrained.trunc.v3f64(
-                          <3 x double> %b,
-                          metadata !"fpexcept.strict") #0
+  %trunc = call <3 x double> @llvm.trunc.v3f64(<3 x double> %b) #0 [ "fpe.except"(metadata !"strict") ]
   ret <3 x double> %trunc
 }
 
@@ -9975,7 +9967,6 @@ declare <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f32(<2 x float
 declare <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double>, metadata)
 declare <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double>, metadata)
 declare <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double>, metadata)
-declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata)
 declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i32(<2 x i32>, metadata, metadata)
 declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i32(<2 x i32>, metadata, metadata)
 declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64>, metadata, metadata)
@@ -10025,7 +10016,6 @@ declare <1 x double> @llvm.experimental.constrained.fpext.v1f64.v1f32(<1 x float
 declare <1 x float> @llvm.experimental.constrained.ceil.v1f32(<1 x float>, metadata)
 declare <1 x float> @llvm.experimental.constrained.floor.v1f32(<1 x float>, metadata)
 declare <1 x float> @llvm.experimental.constrained.round.v1f32(<1 x float>, metadata)
-declare <1 x float> @llvm.experimental.constrained.trunc.v1f32(<1 x float>, metadata)
 declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i32(<1 x i32>, metadata, metadata)
 declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i32(<1 x i32>, metadata, metadata)
 declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i64(<1 x i64>, metadata, metadata)
@@ -10104,8 +10094,6 @@ declare <3 x float> @llvm.experimental.constrained.floor.v3f32(<3 x float>, meta
 declare <3 x double> @llvm.experimental.constrained.floor.v3f64(<3 x double>, metadata)
 declare <3 x float> @llvm.experimental.constrained.round.v3f32(<3 x float>, metadata)
 declare <3 x double> @llvm.experimental.constrained.round.v3f64(<3 x double>, metadata)
-declare <3 x float> @llvm.experimental.constrained.trunc.v3f32(<3 x float>, metadata)
-declare <3 x double> @llvm.experimental.constrained.trunc.v3f64(<3 x double>, metadata)
 declare <3 x double> @llvm.experimental.constrained.sitofp.v3f64.v3i32(<3 x i32>, metadata, metadata)
 declare <3 x float> @llvm.experimental.constrained.sitofp.v3f32.v3i32(<3 x i32>, metadata, metadata)
 declare <3 x double> @llvm.experimental.constrained.sitofp.v3f64.v3i64(<3 x i64>, metadata, metadata)
@@ -10156,7 +10144,6 @@ declare <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f32(<4 x float
 declare <4 x double> @llvm.experimental.constrained.ceil.v4f64(<4 x double>, metadata)
 declare <4 x double> @llvm.experimental.constrained.floor.v4f64(<4 x double>, metadata)
 declare <4 x double> @llvm.experimental.constrained.round.v4f64(<4 x double>, metadata)
-declare <4 x double> @llvm.experimental.constrained.trunc.v4f64(<4 x double>, metadata)
 declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i32(<4 x i32>, metadata, metadata)
 declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i32(<4 x i32>, metadata, metadata)
 declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i64(<4 x i64>, metadata, metadata)
diff --git a/llvm/test/Transforms/InstSimplify/constfold-constrained.ll b/llvm/test/Transforms/InstSimplify/constfold-constrained.ll
index a9ef7f6a765d19..64d8e1d2454db2 100644
--- a/llvm/test/Transforms/InstSimplify/constfold-constrained.ll
+++ b/llvm/test/Transforms/InstSimplify/constfold-constrained.ll
@@ -17,7 +17,7 @@ entry:
 define double @floor_02() #0 {
 ; CHECK-LABEL: @floor_02(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.floor.f64(double -1.010000e+01, metadata !"fpexcept.strict") #[[ATTR0:[0-9]+]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.floor.f64(double -1.010000e+01, metadata !"fpexcept.strict") #[[ATTR3:[0-9]+]] [ "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    ret double -1.100000e+01
 ;
 entry:
@@ -40,7 +40,7 @@ entry:
 define double @ceil_02() #0 {
 ; CHECK-LABEL: @ceil_02(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.ceil.f64(double -1.010000e+01, metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.ceil.f64(double -1.010000e+01, metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    ret double -1.000000e+01
 ;
 entry:
@@ -55,7 +55,7 @@ define double @trunc_01() #0 {
 ; CHECK-NEXT:    ret double 1.000000e+01
 ;
 entry:
-  %result = call double @llvm.experimental.constrained.trunc.f64(double 1.010000e+01, metadata !"fpexcept.ignore") #0
+  %result = call double @llvm.trunc.f64(double 1.010000e+01) #0 [ "fpe.except"(metadata !"ignore") ]
   ret double %result
 }
 
@@ -63,7 +63,7 @@ entry:
 define double @trunc_02() #0 {
 ; CHECK-LABEL: @trunc_02(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.trunc.f64(double -1.010000e+01, metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.trunc.f64(double -1.010000e+01) #[[ATTR3]] [ "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    ret double -1.000000e+01
 ;
 entry:
@@ -86,7 +86,7 @@ entry:
 define double @round_02() #0 {
 ; CHECK-LABEL: @round_02(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.round.f64(double -1.050000e+01, metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.round.f64(double -1.050000e+01, metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    ret double -1.100000e+01
 ;
 entry:
@@ -120,7 +120,7 @@ entry:
 define double @nearbyint_03() #0 {
 ; CHECK-LABEL: @nearbyint_03(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.nearbyint.f64(double 1.050000e+01, metadata !"round.towardzero", metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.nearbyint.f64(double 1.050000e+01, metadata !"round.towardzero", metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.control"(metadata !"rtz"), "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    ret double 1.000000e+01
 ;
 entry:
@@ -132,7 +132,7 @@ entry:
 define double @nearbyint_04() #0 {
 ; CHECK-LABEL: @nearbyint_04(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.nearbyint.f64(double 1.050000e+01, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.nearbyint.f64(double 1.050000e+01, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.control"(metadata !"rte"), "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    ret double 1.000000e+01
 ;
 entry:
@@ -144,7 +144,7 @@ entry:
 define double @nearbyint_05() #0 {
 ; CHECK-LABEL: @nearbyint_05(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.nearbyint.f64(double 1.050000e+01, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.nearbyint.f64(double 1.050000e+01, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    ret double [[RESULT]]
 ;
 entry:
@@ -156,7 +156,7 @@ entry:
 define double @nonfinite_01() #0 {
 ; CHECK-LABEL: @nonfinite_01(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.trunc.f64(double 0x7FF4000000000000, metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.trunc.f64(double 0x7FF4000000000000) #[[ATTR3]] [ "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    ret double [[RESULT]]
 ;
 entry:
@@ -168,10 +168,10 @@ entry:
 define double @nonfinite_02() #0 {
 ; CHECK-LABEL: @nonfinite_02(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    ret double 0x7FF8000000000000
+; CHECK-NEXT:    ret double 0x7FFC000000000000
 ;
 entry:
-  %result = call double @llvm.experimental.constrained.trunc.f64(double 0x7ff4000000000000, metadata !"fpexcept.ignore") #0
+  %result = call double @llvm.trunc.f64(double 0x7ff4000000000000) #0 [ "fpe.except"(metadata !"ignore") ]
   ret double %result
 }
 
@@ -179,7 +179,7 @@ entry:
 define double @nonfinite_03() #0 {
 ; CHECK-LABEL: @nonfinite_03(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.trunc.f64(double 0x7FF8000000000000, metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.trunc.f64(double 0x7FF8000000000000) #[[ATTR3]] [ "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    ret double 0x7FF8000000000000
 ;
 entry:
@@ -191,7 +191,7 @@ entry:
 define double @nonfinite_04() #0 {
 ; CHECK-LABEL: @nonfinite_04(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.trunc.f64(double 0x7FF0000000000000, metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.trunc.f64(double 0x7FF0000000000000) #[[ATTR3]] [ "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    ret double 0x7FF0000000000000
 ;
 entry:
@@ -203,7 +203,7 @@ entry:
 define double @rint_01() #0 {
 ; CHECK-LABEL: @rint_01(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.rint.f64(double 1.000000e+01, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.rint.f64(double 1.000000e+01, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.control"(metadata !"rte"), "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    ret double 1.000000e+01
 ;
 entry:
@@ -215,7 +215,7 @@ entry:
 define double @rint_02() #0 {
 ; CHECK-LABEL: @rint_02(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.rint.f64(double 1.010000e+01, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.rint.f64(double 1.010000e+01, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.control"(metadata !"rte"), "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    ret double [[RESULT]]
 ;
 entry:
@@ -270,7 +270,7 @@ entry:
 define double @fadd_04() #0 {
 ; CHECK-LABEL: @fadd_04(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double 1.000000e+00, double 0x3FF0000000000001, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double 1.000000e+00, double 0x3FF0000000000001, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.control"(metadata !"rte"), "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    ret double [[RESULT]]
 ;
 entry:
@@ -282,7 +282,7 @@ entry:
 define double @fadd_05() #0 {
 ; CHECK-LABEL: @fadd_05(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double 1.000000e+00, double 2.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double 1.000000e+00, double 2.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.control"(metadata !"rte"), "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    ret double 3.000000e+00
 ;
 entry:
@@ -294,7 +294,7 @@ entry:
 define double @fadd_06() #0 {
 ; CHECK-LABEL: @fadd_06(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double 1.000000e+00, double 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double 1.000000e+00, double 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    ret double 3.000000e+00
 ;
 entry:
@@ -306,7 +306,7 @@ entry:
 define double @fadd_07() #0 {
 ; CHECK-LABEL: @fadd_07(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double 1.000000e+00, double 0x3FF0000000000001, metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double 1.000000e+00, double 0x3FF0000000000001, metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR3]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"ignore") ]
 ; CHECK-NEXT:    ret double [[RESULT]]
 ;
 entry:
@@ -328,7 +328,7 @@ entry:
 define double @fadd_09() #0 {
 ; CHECK-LABEL: @fadd_09(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.control"(metadata !"rte"), "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    ret double [[RESULT]]
 ;
 entry:
@@ -492,7 +492,7 @@ entry:
 define i1 @cmp_eq_nan_01() #0 {
 ; CHECK-LABEL: @cmp_eq_nan_01(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double 0x7FF4000000000000, double 1.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double 0x7FF4000000000000, double 1.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    ret i1 [[RESULT]]
 ;
 entry:
@@ -503,7 +503,7 @@ entry:
 define i1 @cmp_eq_nan_02() #0 {
 ; CHECK-LABEL: @cmp_eq_nan_02(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double 0x7FF4000000000000, double 1.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double 0x7FF4000000000000, double 1.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    ret i1 [[RESULT]]
 ;
 entry:
@@ -515,7 +515,7 @@ entry:
 define i1 @cmp_eq_nan_03() #0 {
 ; CHECK-LABEL: @cmp_eq_nan_03(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double 0x7FF8000000000000, double 1.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double 0x7FF8000000000000, double 1.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    ret i1 false
 ;
 entry:
@@ -526,7 +526,7 @@ entry:
 define i1 @cmp_eq_nan_04() #0 {
 ; CHECK-LABEL: @cmp_eq_nan_04(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RESULT:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double 0x7FF8000000000000, double 1.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double 0x7FF8000000000000, double 1.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR3]] [ "fpe.except"(metadata !"strict") ]
 ; CHECK-NEXT:    ret i1 [[RESULT]]
 ;
 entry:
@@ -540,7 +540,6 @@ attributes #0 = { strictfp }
 declare double @llvm.experimental.constrained.nearbyint.f64(double, metadata, metadata)
 declare double @llvm.experimental.constrained.floor.f64(double, metadata)
 declare double @llvm.experimental.constrained.ceil.f64(double, metadata)
-declare double @llvm.experimental.constrained.trunc.f64(double, metadata)
 declare double @llvm.experimental.constrained.round.f64(double, metadata)
 declare double @llvm.experimental.constrained.rint.f64(double, metadata, metadata)
 declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata)



More information about the llvm-branch-commits mailing list