[clang] [llvm] [IR] Allow fast math flags on fptrunc and fpext (PR #115894)

John Brawn via cfe-commits cfe-commits at lists.llvm.org
Wed Nov 13 06:55:26 PST 2024


https://github.com/john-brawn-arm updated https://github.com/llvm/llvm-project/pull/115894

>From 9001ad621028caa0ca2095156f28fac88deb8525 Mon Sep 17 00:00:00 2001
From: John Brawn <john.brawn at arm.com>
Date: Wed, 6 Nov 2024 12:15:17 +0000
Subject: [PATCH 1/3] [IR] Allow fast math flags on fptrunc and fpext

This consists of:
 * Make these instructions part of FPMathOperator.
 * Adjust bitcode/ir readers/writers to expect fast math flags on these
   instructions.
 * Make IRBuilder set the fast math flags on these instructions.
 * Adjust InstCombinerImpl::visitFPTrunc, as currently the fast math flags on
   all of the output instructions are set from the source operation. Instead we
   want each output instruction to get the fast math flags from their
   corresponding input instruction.
 * Update langref and release notes.
 * Update a bunch of tests.
---
 clang/test/CodeGen/X86/cx-complex-range.c     | 176 +++++++++---------
 clang/test/CodeGen/cx-complex-range.c         |  68 +++----
 .../CodeGen/matrix-type-operators-fast-math.c |  12 +-
 clang/test/CodeGen/nofpclass.c                |  60 +++---
 clang/test/CodeGenCUDA/amdgpu-atomic-ops.cu   |   2 +-
 clang/test/CodeGenHIP/printf_nonhostcall.cpp  |   4 +-
 .../__clang_hip_math_ocml_rounded_ops.hip     |  32 ++--
 llvm/docs/LangRef.rst                         |  12 +-
 llvm/docs/ReleaseNotes.md                     |   2 +
 llvm/include/llvm/IR/IRBuilder.h              |  25 ++-
 llvm/include/llvm/IR/Operator.h               |   2 +
 llvm/lib/AsmParser/LLParser.cpp               |  12 +-
 llvm/lib/Bitcode/Reader/BitcodeReader.cpp     |   5 +
 .../InstCombine/InstCombineCasts.cpp          |   8 +-
 llvm/test/Assembler/fast-math-flags.ll        |  32 ++++
 15 files changed, 256 insertions(+), 196 deletions(-)

diff --git a/clang/test/CodeGen/X86/cx-complex-range.c b/clang/test/CodeGen/X86/cx-complex-range.c
index a0e6dc219b36f7..f87091427df713 100644
--- a/clang/test/CodeGen/X86/cx-complex-range.c
+++ b/clang/test/CodeGen/X86/cx-complex-range.c
@@ -266,14 +266,14 @@
 // BASIC_FAST-NEXT:    [[A_REAL:%.*]] = load half, ptr [[A_REALP]], align 2
 // BASIC_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[A]], i32 0, i32 1
 // BASIC_FAST-NEXT:    [[A_IMAG:%.*]] = load half, ptr [[A_IMAGP]], align 2
-// BASIC_FAST-NEXT:    [[EXT:%.*]] = fpext half [[A_REAL]] to float
-// BASIC_FAST-NEXT:    [[EXT1:%.*]] = fpext half [[A_IMAG]] to float
+// BASIC_FAST-NEXT:    [[EXT:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[A_REAL]] to float
+// BASIC_FAST-NEXT:    [[EXT1:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[A_IMAG]] to float
 // BASIC_FAST-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[B]], i32 0, i32 0
 // BASIC_FAST-NEXT:    [[B_REAL:%.*]] = load half, ptr [[B_REALP]], align 2
 // BASIC_FAST-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[B]], i32 0, i32 1
 // BASIC_FAST-NEXT:    [[B_IMAG:%.*]] = load half, ptr [[B_IMAGP]], align 2
-// BASIC_FAST-NEXT:    [[EXT2:%.*]] = fpext half [[B_REAL]] to float
-// BASIC_FAST-NEXT:    [[EXT3:%.*]] = fpext half [[B_IMAG]] to float
+// BASIC_FAST-NEXT:    [[EXT2:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[B_REAL]] to float
+// BASIC_FAST-NEXT:    [[EXT3:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[B_IMAG]] to float
 // BASIC_FAST-NEXT:    [[TMP0:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[EXT]], [[EXT2]]
 // BASIC_FAST-NEXT:    [[TMP1:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[EXT1]], [[EXT3]]
 // BASIC_FAST-NEXT:    [[TMP2:%.*]] = fadd reassoc nnan ninf nsz arcp afn float [[TMP0]], [[TMP1]]
@@ -285,8 +285,8 @@
 // BASIC_FAST-NEXT:    [[TMP8:%.*]] = fsub reassoc nnan ninf nsz arcp afn float [[TMP6]], [[TMP7]]
 // BASIC_FAST-NEXT:    [[TMP9:%.*]] = fdiv reassoc nnan ninf nsz arcp afn float [[TMP2]], [[TMP5]]
 // BASIC_FAST-NEXT:    [[TMP10:%.*]] = fdiv reassoc nnan ninf nsz arcp afn float [[TMP8]], [[TMP5]]
-// BASIC_FAST-NEXT:    [[UNPROMOTION:%.*]] = fptrunc float [[TMP9]] to half
-// BASIC_FAST-NEXT:    [[UNPROMOTION4:%.*]] = fptrunc float [[TMP10]] to half
+// BASIC_FAST-NEXT:    [[UNPROMOTION:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn float [[TMP9]] to half
+// BASIC_FAST-NEXT:    [[UNPROMOTION4:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn float [[TMP10]] to half
 // BASIC_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[RETVAL]], i32 0, i32 0
 // BASIC_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[RETVAL]], i32 0, i32 1
 // BASIC_FAST-NEXT:    store half [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 2
@@ -307,22 +307,22 @@
 // FULL_FAST-NEXT:    [[A_REAL:%.*]] = load half, ptr [[A_REALP]], align 2
 // FULL_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[A]], i32 0, i32 1
 // FULL_FAST-NEXT:    [[A_IMAG:%.*]] = load half, ptr [[A_IMAGP]], align 2
-// FULL_FAST-NEXT:    [[EXT:%.*]] = fpext half [[A_REAL]] to float
-// FULL_FAST-NEXT:    [[EXT1:%.*]] = fpext half [[A_IMAG]] to float
+// FULL_FAST-NEXT:    [[EXT:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[A_REAL]] to float
+// FULL_FAST-NEXT:    [[EXT1:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[A_IMAG]] to float
 // FULL_FAST-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[B]], i32 0, i32 0
 // FULL_FAST-NEXT:    [[B_REAL:%.*]] = load half, ptr [[B_REALP]], align 2
 // FULL_FAST-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[B]], i32 0, i32 1
 // FULL_FAST-NEXT:    [[B_IMAG:%.*]] = load half, ptr [[B_IMAGP]], align 2
-// FULL_FAST-NEXT:    [[EXT2:%.*]] = fpext half [[B_REAL]] to float
-// FULL_FAST-NEXT:    [[EXT3:%.*]] = fpext half [[B_IMAG]] to float
+// FULL_FAST-NEXT:    [[EXT2:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[B_REAL]] to float
+// FULL_FAST-NEXT:    [[EXT3:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[B_IMAG]] to float
 // FULL_FAST-NEXT:    [[CALL:%.*]] = call reassoc nnan ninf nsz arcp afn nofpclass(nan inf) <2 x float> @__divsc3(float noundef nofpclass(nan inf) [[EXT]], float noundef nofpclass(nan inf) [[EXT1]], float noundef nofpclass(nan inf) [[EXT2]], float noundef nofpclass(nan inf) [[EXT3]]) #[[ATTR1:[0-9]+]]
 // FULL_FAST-NEXT:    store <2 x float> [[CALL]], ptr [[COERCE]], align 4
 // FULL_FAST-NEXT:    [[COERCE_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[COERCE]], i32 0, i32 0
 // FULL_FAST-NEXT:    [[COERCE_REAL:%.*]] = load float, ptr [[COERCE_REALP]], align 4
 // FULL_FAST-NEXT:    [[COERCE_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[COERCE]], i32 0, i32 1
 // FULL_FAST-NEXT:    [[COERCE_IMAG:%.*]] = load float, ptr [[COERCE_IMAGP]], align 4
-// FULL_FAST-NEXT:    [[UNPROMOTION:%.*]] = fptrunc float [[COERCE_REAL]] to half
-// FULL_FAST-NEXT:    [[UNPROMOTION4:%.*]] = fptrunc float [[COERCE_IMAG]] to half
+// FULL_FAST-NEXT:    [[UNPROMOTION:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn float [[COERCE_REAL]] to half
+// FULL_FAST-NEXT:    [[UNPROMOTION4:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn float [[COERCE_IMAG]] to half
 // FULL_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[RETVAL]], i32 0, i32 0
 // FULL_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[RETVAL]], i32 0, i32 1
 // FULL_FAST-NEXT:    store half [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 2
@@ -342,14 +342,14 @@
 // IMPRVD_FAST-NEXT:    [[A_REAL:%.*]] = load half, ptr [[A_REALP]], align 2
 // IMPRVD_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[A]], i32 0, i32 1
 // IMPRVD_FAST-NEXT:    [[A_IMAG:%.*]] = load half, ptr [[A_IMAGP]], align 2
-// IMPRVD_FAST-NEXT:    [[EXT:%.*]] = fpext half [[A_REAL]] to float
-// IMPRVD_FAST-NEXT:    [[EXT1:%.*]] = fpext half [[A_IMAG]] to float
+// IMPRVD_FAST-NEXT:    [[EXT:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[A_REAL]] to float
+// IMPRVD_FAST-NEXT:    [[EXT1:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[A_IMAG]] to float
 // IMPRVD_FAST-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[B]], i32 0, i32 0
 // IMPRVD_FAST-NEXT:    [[B_REAL:%.*]] = load half, ptr [[B_REALP]], align 2
 // IMPRVD_FAST-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[B]], i32 0, i32 1
 // IMPRVD_FAST-NEXT:    [[B_IMAG:%.*]] = load half, ptr [[B_IMAGP]], align 2
-// IMPRVD_FAST-NEXT:    [[EXT2:%.*]] = fpext half [[B_REAL]] to float
-// IMPRVD_FAST-NEXT:    [[EXT3:%.*]] = fpext half [[B_IMAG]] to float
+// IMPRVD_FAST-NEXT:    [[EXT2:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[B_REAL]] to float
+// IMPRVD_FAST-NEXT:    [[EXT3:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[B_IMAG]] to float
 // IMPRVD_FAST-NEXT:    [[TMP0:%.*]] = call reassoc nnan ninf nsz arcp afn float @llvm.fabs.f32(float [[EXT2]])
 // IMPRVD_FAST-NEXT:    [[TMP1:%.*]] = call reassoc nnan ninf nsz arcp afn float @llvm.fabs.f32(float [[EXT3]])
 // IMPRVD_FAST-NEXT:    [[ABS_CMP:%.*]] = fcmp reassoc nnan ninf nsz arcp afn ugt float [[TMP0]], [[TMP1]]
@@ -379,8 +379,8 @@
 // IMPRVD_FAST:       complex_div:
 // IMPRVD_FAST-NEXT:    [[TMP20:%.*]] = phi reassoc nnan ninf nsz arcp afn float [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
 // IMPRVD_FAST-NEXT:    [[TMP21:%.*]] = phi reassoc nnan ninf nsz arcp afn float [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
-// IMPRVD_FAST-NEXT:    [[UNPROMOTION:%.*]] = fptrunc float [[TMP20]] to half
-// IMPRVD_FAST-NEXT:    [[UNPROMOTION4:%.*]] = fptrunc float [[TMP21]] to half
+// IMPRVD_FAST-NEXT:    [[UNPROMOTION:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn float [[TMP20]] to half
+// IMPRVD_FAST-NEXT:    [[UNPROMOTION4:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn float [[TMP21]] to half
 // IMPRVD_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[RETVAL]], i32 0, i32 0
 // IMPRVD_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[RETVAL]], i32 0, i32 1
 // IMPRVD_FAST-NEXT:    store half [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 2
@@ -400,14 +400,14 @@
 // PRMTD_FAST-NEXT:    [[A_REAL:%.*]] = load half, ptr [[A_REALP]], align 2
 // PRMTD_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[A]], i32 0, i32 1
 // PRMTD_FAST-NEXT:    [[A_IMAG:%.*]] = load half, ptr [[A_IMAGP]], align 2
-// PRMTD_FAST-NEXT:    [[EXT:%.*]] = fpext half [[A_REAL]] to float
-// PRMTD_FAST-NEXT:    [[EXT1:%.*]] = fpext half [[A_IMAG]] to float
+// PRMTD_FAST-NEXT:    [[EXT:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[A_REAL]] to float
+// PRMTD_FAST-NEXT:    [[EXT1:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[A_IMAG]] to float
 // PRMTD_FAST-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[B]], i32 0, i32 0
 // PRMTD_FAST-NEXT:    [[B_REAL:%.*]] = load half, ptr [[B_REALP]], align 2
 // PRMTD_FAST-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[B]], i32 0, i32 1
 // PRMTD_FAST-NEXT:    [[B_IMAG:%.*]] = load half, ptr [[B_IMAGP]], align 2
-// PRMTD_FAST-NEXT:    [[EXT2:%.*]] = fpext half [[B_REAL]] to float
-// PRMTD_FAST-NEXT:    [[EXT3:%.*]] = fpext half [[B_IMAG]] to float
+// PRMTD_FAST-NEXT:    [[EXT2:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[B_REAL]] to float
+// PRMTD_FAST-NEXT:    [[EXT3:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[B_IMAG]] to float
 // PRMTD_FAST-NEXT:    [[TMP0:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[EXT]], [[EXT2]]
 // PRMTD_FAST-NEXT:    [[TMP1:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[EXT1]], [[EXT3]]
 // PRMTD_FAST-NEXT:    [[TMP2:%.*]] = fadd reassoc nnan ninf nsz arcp afn float [[TMP0]], [[TMP1]]
@@ -419,8 +419,8 @@
 // PRMTD_FAST-NEXT:    [[TMP8:%.*]] = fsub reassoc nnan ninf nsz arcp afn float [[TMP6]], [[TMP7]]
 // PRMTD_FAST-NEXT:    [[TMP9:%.*]] = fdiv reassoc nnan ninf nsz arcp afn float [[TMP2]], [[TMP5]]
 // PRMTD_FAST-NEXT:    [[TMP10:%.*]] = fdiv reassoc nnan ninf nsz arcp afn float [[TMP8]], [[TMP5]]
-// PRMTD_FAST-NEXT:    [[UNPROMOTION:%.*]] = fptrunc float [[TMP9]] to half
-// PRMTD_FAST-NEXT:    [[UNPROMOTION4:%.*]] = fptrunc float [[TMP10]] to half
+// PRMTD_FAST-NEXT:    [[UNPROMOTION:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn float [[TMP9]] to half
+// PRMTD_FAST-NEXT:    [[UNPROMOTION4:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn float [[TMP10]] to half
 // PRMTD_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[RETVAL]], i32 0, i32 0
 // PRMTD_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[RETVAL]], i32 0, i32 1
 // PRMTD_FAST-NEXT:    store half [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 2
@@ -636,22 +636,22 @@ _Complex _Float16 divf16(_Complex _Float16 a, _Complex _Float16 b) {
 // BASIC_FAST-NEXT:    [[A_REAL:%.*]] = load half, ptr [[A_REALP]], align 2
 // BASIC_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[A]], i32 0, i32 1
 // BASIC_FAST-NEXT:    [[A_IMAG:%.*]] = load half, ptr [[A_IMAGP]], align 2
-// BASIC_FAST-NEXT:    [[EXT:%.*]] = fpext half [[A_REAL]] to float
-// BASIC_FAST-NEXT:    [[EXT1:%.*]] = fpext half [[A_IMAG]] to float
+// BASIC_FAST-NEXT:    [[EXT:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[A_REAL]] to float
+// BASIC_FAST-NEXT:    [[EXT1:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[A_IMAG]] to float
 // BASIC_FAST-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[B]], i32 0, i32 0
 // BASIC_FAST-NEXT:    [[B_REAL:%.*]] = load half, ptr [[B_REALP]], align 2
 // BASIC_FAST-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[B]], i32 0, i32 1
 // BASIC_FAST-NEXT:    [[B_IMAG:%.*]] = load half, ptr [[B_IMAGP]], align 2
-// BASIC_FAST-NEXT:    [[EXT2:%.*]] = fpext half [[B_REAL]] to float
-// BASIC_FAST-NEXT:    [[EXT3:%.*]] = fpext half [[B_IMAG]] to float
+// BASIC_FAST-NEXT:    [[EXT2:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[B_REAL]] to float
+// BASIC_FAST-NEXT:    [[EXT3:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[B_IMAG]] to float
 // BASIC_FAST-NEXT:    [[MUL_AC:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[EXT]], [[EXT2]]
 // BASIC_FAST-NEXT:    [[MUL_BD:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[EXT1]], [[EXT3]]
 // BASIC_FAST-NEXT:    [[MUL_AD:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[EXT]], [[EXT3]]
 // BASIC_FAST-NEXT:    [[MUL_BC:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[EXT1]], [[EXT2]]
 // BASIC_FAST-NEXT:    [[MUL_R:%.*]] = fsub reassoc nnan ninf nsz arcp afn float [[MUL_AC]], [[MUL_BD]]
 // BASIC_FAST-NEXT:    [[MUL_I:%.*]] = fadd reassoc nnan ninf nsz arcp afn float [[MUL_AD]], [[MUL_BC]]
-// BASIC_FAST-NEXT:    [[UNPROMOTION:%.*]] = fptrunc float [[MUL_R]] to half
-// BASIC_FAST-NEXT:    [[UNPROMOTION4:%.*]] = fptrunc float [[MUL_I]] to half
+// BASIC_FAST-NEXT:    [[UNPROMOTION:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn float [[MUL_R]] to half
+// BASIC_FAST-NEXT:    [[UNPROMOTION4:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn float [[MUL_I]] to half
 // BASIC_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[RETVAL]], i32 0, i32 0
 // BASIC_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[RETVAL]], i32 0, i32 1
 // BASIC_FAST-NEXT:    store half [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 2
@@ -672,14 +672,14 @@ _Complex _Float16 divf16(_Complex _Float16 a, _Complex _Float16 b) {
 // FULL_FAST-NEXT:    [[A_REAL:%.*]] = load half, ptr [[A_REALP]], align 2
 // FULL_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[A]], i32 0, i32 1
 // FULL_FAST-NEXT:    [[A_IMAG:%.*]] = load half, ptr [[A_IMAGP]], align 2
-// FULL_FAST-NEXT:    [[EXT:%.*]] = fpext half [[A_REAL]] to float
-// FULL_FAST-NEXT:    [[EXT1:%.*]] = fpext half [[A_IMAG]] to float
+// FULL_FAST-NEXT:    [[EXT:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[A_REAL]] to float
+// FULL_FAST-NEXT:    [[EXT1:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[A_IMAG]] to float
 // FULL_FAST-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[B]], i32 0, i32 0
 // FULL_FAST-NEXT:    [[B_REAL:%.*]] = load half, ptr [[B_REALP]], align 2
 // FULL_FAST-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[B]], i32 0, i32 1
 // FULL_FAST-NEXT:    [[B_IMAG:%.*]] = load half, ptr [[B_IMAGP]], align 2
-// FULL_FAST-NEXT:    [[EXT2:%.*]] = fpext half [[B_REAL]] to float
-// FULL_FAST-NEXT:    [[EXT3:%.*]] = fpext half [[B_IMAG]] to float
+// FULL_FAST-NEXT:    [[EXT2:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[B_REAL]] to float
+// FULL_FAST-NEXT:    [[EXT3:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[B_IMAG]] to float
 // FULL_FAST-NEXT:    [[MUL_AC:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[EXT]], [[EXT2]]
 // FULL_FAST-NEXT:    [[MUL_BD:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[EXT1]], [[EXT3]]
 // FULL_FAST-NEXT:    [[MUL_AD:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[EXT]], [[EXT3]]
@@ -702,8 +702,8 @@ _Complex _Float16 divf16(_Complex _Float16 a, _Complex _Float16 b) {
 // FULL_FAST:       complex_mul_cont:
 // FULL_FAST-NEXT:    [[REAL_MUL_PHI:%.*]] = phi reassoc nnan ninf nsz arcp afn float [ [[MUL_R]], [[ENTRY:%.*]] ], [ [[MUL_R]], [[COMPLEX_MUL_IMAG_NAN]] ], [ [[COERCE_REAL]], [[COMPLEX_MUL_LIBCALL]] ]
 // FULL_FAST-NEXT:    [[IMAG_MUL_PHI:%.*]] = phi reassoc nnan ninf nsz arcp afn float [ [[MUL_I]], [[ENTRY]] ], [ [[MUL_I]], [[COMPLEX_MUL_IMAG_NAN]] ], [ [[COERCE_IMAG]], [[COMPLEX_MUL_LIBCALL]] ]
-// FULL_FAST-NEXT:    [[UNPROMOTION:%.*]] = fptrunc float [[REAL_MUL_PHI]] to half
-// FULL_FAST-NEXT:    [[UNPROMOTION5:%.*]] = fptrunc float [[IMAG_MUL_PHI]] to half
+// FULL_FAST-NEXT:    [[UNPROMOTION:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn float [[REAL_MUL_PHI]] to half
+// FULL_FAST-NEXT:    [[UNPROMOTION5:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn float [[IMAG_MUL_PHI]] to half
 // FULL_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[RETVAL]], i32 0, i32 0
 // FULL_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[RETVAL]], i32 0, i32 1
 // FULL_FAST-NEXT:    store half [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 2
@@ -723,22 +723,22 @@ _Complex _Float16 divf16(_Complex _Float16 a, _Complex _Float16 b) {
 // IMPRVD_FAST-NEXT:    [[A_REAL:%.*]] = load half, ptr [[A_REALP]], align 2
 // IMPRVD_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[A]], i32 0, i32 1
 // IMPRVD_FAST-NEXT:    [[A_IMAG:%.*]] = load half, ptr [[A_IMAGP]], align 2
-// IMPRVD_FAST-NEXT:    [[EXT:%.*]] = fpext half [[A_REAL]] to float
-// IMPRVD_FAST-NEXT:    [[EXT1:%.*]] = fpext half [[A_IMAG]] to float
+// IMPRVD_FAST-NEXT:    [[EXT:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[A_REAL]] to float
+// IMPRVD_FAST-NEXT:    [[EXT1:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[A_IMAG]] to float
 // IMPRVD_FAST-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[B]], i32 0, i32 0
 // IMPRVD_FAST-NEXT:    [[B_REAL:%.*]] = load half, ptr [[B_REALP]], align 2
 // IMPRVD_FAST-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[B]], i32 0, i32 1
 // IMPRVD_FAST-NEXT:    [[B_IMAG:%.*]] = load half, ptr [[B_IMAGP]], align 2
-// IMPRVD_FAST-NEXT:    [[EXT2:%.*]] = fpext half [[B_REAL]] to float
-// IMPRVD_FAST-NEXT:    [[EXT3:%.*]] = fpext half [[B_IMAG]] to float
+// IMPRVD_FAST-NEXT:    [[EXT2:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[B_REAL]] to float
+// IMPRVD_FAST-NEXT:    [[EXT3:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[B_IMAG]] to float
 // IMPRVD_FAST-NEXT:    [[MUL_AC:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[EXT]], [[EXT2]]
 // IMPRVD_FAST-NEXT:    [[MUL_BD:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[EXT1]], [[EXT3]]
 // IMPRVD_FAST-NEXT:    [[MUL_AD:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[EXT]], [[EXT3]]
 // IMPRVD_FAST-NEXT:    [[MUL_BC:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[EXT1]], [[EXT2]]
 // IMPRVD_FAST-NEXT:    [[MUL_R:%.*]] = fsub reassoc nnan ninf nsz arcp afn float [[MUL_AC]], [[MUL_BD]]
 // IMPRVD_FAST-NEXT:    [[MUL_I:%.*]] = fadd reassoc nnan ninf nsz arcp afn float [[MUL_AD]], [[MUL_BC]]
-// IMPRVD_FAST-NEXT:    [[UNPROMOTION:%.*]] = fptrunc float [[MUL_R]] to half
-// IMPRVD_FAST-NEXT:    [[UNPROMOTION4:%.*]] = fptrunc float [[MUL_I]] to half
+// IMPRVD_FAST-NEXT:    [[UNPROMOTION:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn float [[MUL_R]] to half
+// IMPRVD_FAST-NEXT:    [[UNPROMOTION4:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn float [[MUL_I]] to half
 // IMPRVD_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[RETVAL]], i32 0, i32 0
 // IMPRVD_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[RETVAL]], i32 0, i32 1
 // IMPRVD_FAST-NEXT:    store half [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 2
@@ -758,22 +758,22 @@ _Complex _Float16 divf16(_Complex _Float16 a, _Complex _Float16 b) {
 // PRMTD_FAST-NEXT:    [[A_REAL:%.*]] = load half, ptr [[A_REALP]], align 2
 // PRMTD_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[A]], i32 0, i32 1
 // PRMTD_FAST-NEXT:    [[A_IMAG:%.*]] = load half, ptr [[A_IMAGP]], align 2
-// PRMTD_FAST-NEXT:    [[EXT:%.*]] = fpext half [[A_REAL]] to float
-// PRMTD_FAST-NEXT:    [[EXT1:%.*]] = fpext half [[A_IMAG]] to float
+// PRMTD_FAST-NEXT:    [[EXT:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[A_REAL]] to float
+// PRMTD_FAST-NEXT:    [[EXT1:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[A_IMAG]] to float
 // PRMTD_FAST-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[B]], i32 0, i32 0
 // PRMTD_FAST-NEXT:    [[B_REAL:%.*]] = load half, ptr [[B_REALP]], align 2
 // PRMTD_FAST-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[B]], i32 0, i32 1
 // PRMTD_FAST-NEXT:    [[B_IMAG:%.*]] = load half, ptr [[B_IMAGP]], align 2
-// PRMTD_FAST-NEXT:    [[EXT2:%.*]] = fpext half [[B_REAL]] to float
-// PRMTD_FAST-NEXT:    [[EXT3:%.*]] = fpext half [[B_IMAG]] to float
+// PRMTD_FAST-NEXT:    [[EXT2:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[B_REAL]] to float
+// PRMTD_FAST-NEXT:    [[EXT3:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[B_IMAG]] to float
 // PRMTD_FAST-NEXT:    [[MUL_AC:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[EXT]], [[EXT2]]
 // PRMTD_FAST-NEXT:    [[MUL_BD:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[EXT1]], [[EXT3]]
 // PRMTD_FAST-NEXT:    [[MUL_AD:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[EXT]], [[EXT3]]
 // PRMTD_FAST-NEXT:    [[MUL_BC:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[EXT1]], [[EXT2]]
 // PRMTD_FAST-NEXT:    [[MUL_R:%.*]] = fsub reassoc nnan ninf nsz arcp afn float [[MUL_AC]], [[MUL_BD]]
 // PRMTD_FAST-NEXT:    [[MUL_I:%.*]] = fadd reassoc nnan ninf nsz arcp afn float [[MUL_AD]], [[MUL_BC]]
-// PRMTD_FAST-NEXT:    [[UNPROMOTION:%.*]] = fptrunc float [[MUL_R]] to half
-// PRMTD_FAST-NEXT:    [[UNPROMOTION4:%.*]] = fptrunc float [[MUL_I]] to half
+// PRMTD_FAST-NEXT:    [[UNPROMOTION:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn float [[MUL_R]] to half
+// PRMTD_FAST-NEXT:    [[UNPROMOTION4:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn float [[MUL_I]] to half
 // PRMTD_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[RETVAL]], i32 0, i32 0
 // PRMTD_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[RETVAL]], i32 0, i32 1
 // PRMTD_FAST-NEXT:    store half [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 2
@@ -1158,8 +1158,8 @@ _Complex _Float16 mulf16(_Complex _Float16 a, _Complex _Float16 b) {
 // BASIC_FAST-NEXT:    [[C_REAL:%.*]] = load half, ptr [[C_REALP]], align 2
 // BASIC_FAST-NEXT:    [[C_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[C]], i32 0, i32 1
 // BASIC_FAST-NEXT:    [[C_IMAG:%.*]] = load half, ptr [[C_IMAGP]], align 2
-// BASIC_FAST-NEXT:    [[CONV:%.*]] = fpext half [[C_REAL]] to x86_fp80
-// BASIC_FAST-NEXT:    [[CONV1:%.*]] = fpext half [[C_IMAG]] to x86_fp80
+// BASIC_FAST-NEXT:    [[CONV:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[C_REAL]] to x86_fp80
+// BASIC_FAST-NEXT:    [[CONV1:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[C_IMAG]] to x86_fp80
 // BASIC_FAST-NEXT:    [[TMP0:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[B_REAL]], [[CONV]]
 // BASIC_FAST-NEXT:    [[TMP1:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[B_IMAG]], [[CONV1]]
 // BASIC_FAST-NEXT:    [[TMP2:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP0]], [[TMP1]]
@@ -1171,16 +1171,16 @@ _Complex _Float16 mulf16(_Complex _Float16 a, _Complex _Float16 b) {
 // BASIC_FAST-NEXT:    [[TMP8:%.*]] = fsub reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP6]], [[TMP7]]
 // BASIC_FAST-NEXT:    [[TMP9:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP2]], [[TMP5]]
 // BASIC_FAST-NEXT:    [[TMP10:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP8]], [[TMP5]]
-// BASIC_FAST-NEXT:    [[CONV2:%.*]] = fptrunc x86_fp80 [[TMP9]] to half
-// BASIC_FAST-NEXT:    [[CONV3:%.*]] = fptrunc x86_fp80 [[TMP10]] to half
-// BASIC_FAST-NEXT:    [[EXT:%.*]] = fpext half [[CONV2]] to float
-// BASIC_FAST-NEXT:    [[EXT4:%.*]] = fpext half [[CONV3]] to float
+// BASIC_FAST-NEXT:    [[CONV2:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP9]] to half
+// BASIC_FAST-NEXT:    [[CONV3:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP10]] to half
+// BASIC_FAST-NEXT:    [[EXT:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[CONV2]] to float
+// BASIC_FAST-NEXT:    [[EXT4:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[CONV3]] to float
 // BASIC_FAST-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[A]], i32 0, i32 0
 // BASIC_FAST-NEXT:    [[A_REAL:%.*]] = load half, ptr [[A_REALP]], align 2
 // BASIC_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[A]], i32 0, i32 1
 // BASIC_FAST-NEXT:    [[A_IMAG:%.*]] = load half, ptr [[A_IMAGP]], align 2
-// BASIC_FAST-NEXT:    [[EXT5:%.*]] = fpext half [[A_REAL]] to float
-// BASIC_FAST-NEXT:    [[EXT6:%.*]] = fpext half [[A_IMAG]] to float
+// BASIC_FAST-NEXT:    [[EXT5:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[A_REAL]] to float
+// BASIC_FAST-NEXT:    [[EXT6:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[A_IMAG]] to float
 // BASIC_FAST-NEXT:    [[TMP11:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[EXT]], [[EXT5]]
 // BASIC_FAST-NEXT:    [[TMP12:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[EXT4]], [[EXT6]]
 // BASIC_FAST-NEXT:    [[TMP13:%.*]] = fadd reassoc nnan ninf nsz arcp afn float [[TMP11]], [[TMP12]]
@@ -1192,8 +1192,8 @@ _Complex _Float16 mulf16(_Complex _Float16 a, _Complex _Float16 b) {
 // BASIC_FAST-NEXT:    [[TMP19:%.*]] = fsub reassoc nnan ninf nsz arcp afn float [[TMP17]], [[TMP18]]
 // BASIC_FAST-NEXT:    [[TMP20:%.*]] = fdiv reassoc nnan ninf nsz arcp afn float [[TMP13]], [[TMP16]]
 // BASIC_FAST-NEXT:    [[TMP21:%.*]] = fdiv reassoc nnan ninf nsz arcp afn float [[TMP19]], [[TMP16]]
-// BASIC_FAST-NEXT:    [[UNPROMOTION:%.*]] = fptrunc float [[TMP20]] to half
-// BASIC_FAST-NEXT:    [[UNPROMOTION7:%.*]] = fptrunc float [[TMP21]] to half
+// BASIC_FAST-NEXT:    [[UNPROMOTION:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn float [[TMP20]] to half
+// BASIC_FAST-NEXT:    [[UNPROMOTION7:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn float [[TMP21]] to half
 // BASIC_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[RETVAL]], i32 0, i32 0
 // BASIC_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[RETVAL]], i32 0, i32 1
 // BASIC_FAST-NEXT:    store half [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 2
@@ -1218,29 +1218,29 @@ _Complex _Float16 mulf16(_Complex _Float16 a, _Complex _Float16 b) {
 // FULL_FAST-NEXT:    [[C_REAL:%.*]] = load half, ptr [[C_REALP]], align 2
 // FULL_FAST-NEXT:    [[C_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[C]], i32 0, i32 1
 // FULL_FAST-NEXT:    [[C_IMAG:%.*]] = load half, ptr [[C_IMAGP]], align 2
-// FULL_FAST-NEXT:    [[CONV:%.*]] = fpext half [[C_REAL]] to x86_fp80
-// FULL_FAST-NEXT:    [[CONV1:%.*]] = fpext half [[C_IMAG]] to x86_fp80
+// FULL_FAST-NEXT:    [[CONV:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[C_REAL]] to x86_fp80
+// FULL_FAST-NEXT:    [[CONV1:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[C_IMAG]] to x86_fp80
 // FULL_FAST-NEXT:    [[CALL:%.*]] = call reassoc nnan ninf nsz arcp afn nofpclass(nan inf) { x86_fp80, x86_fp80 } @__divxc3(x86_fp80 noundef nofpclass(nan inf) [[B_REAL]], x86_fp80 noundef nofpclass(nan inf) [[B_IMAG]], x86_fp80 noundef nofpclass(nan inf) [[CONV]], x86_fp80 noundef nofpclass(nan inf) [[CONV1]]) #[[ATTR1]]
 // FULL_FAST-NEXT:    [[TMP0:%.*]] = extractvalue { x86_fp80, x86_fp80 } [[CALL]], 0
 // FULL_FAST-NEXT:    [[TMP1:%.*]] = extractvalue { x86_fp80, x86_fp80 } [[CALL]], 1
-// FULL_FAST-NEXT:    [[CONV2:%.*]] = fptrunc x86_fp80 [[TMP0]] to half
-// FULL_FAST-NEXT:    [[CONV3:%.*]] = fptrunc x86_fp80 [[TMP1]] to half
-// FULL_FAST-NEXT:    [[EXT:%.*]] = fpext half [[CONV2]] to float
-// FULL_FAST-NEXT:    [[EXT4:%.*]] = fpext half [[CONV3]] to float
+// FULL_FAST-NEXT:    [[CONV2:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP0]] to half
+// FULL_FAST-NEXT:    [[CONV3:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP1]] to half
+// FULL_FAST-NEXT:    [[EXT:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[CONV2]] to float
+// FULL_FAST-NEXT:    [[EXT4:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[CONV3]] to float
 // FULL_FAST-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[A]], i32 0, i32 0
 // FULL_FAST-NEXT:    [[A_REAL:%.*]] = load half, ptr [[A_REALP]], align 2
 // FULL_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[A]], i32 0, i32 1
 // FULL_FAST-NEXT:    [[A_IMAG:%.*]] = load half, ptr [[A_IMAGP]], align 2
-// FULL_FAST-NEXT:    [[EXT5:%.*]] = fpext half [[A_REAL]] to float
-// FULL_FAST-NEXT:    [[EXT6:%.*]] = fpext half [[A_IMAG]] to float
+// FULL_FAST-NEXT:    [[EXT5:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[A_REAL]] to float
+// FULL_FAST-NEXT:    [[EXT6:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[A_IMAG]] to float
 // FULL_FAST-NEXT:    [[CALL7:%.*]] = call reassoc nnan ninf nsz arcp afn nofpclass(nan inf) <2 x float> @__divsc3(float noundef nofpclass(nan inf) [[EXT]], float noundef nofpclass(nan inf) [[EXT4]], float noundef nofpclass(nan inf) [[EXT5]], float noundef nofpclass(nan inf) [[EXT6]]) #[[ATTR1]]
 // FULL_FAST-NEXT:    store <2 x float> [[CALL7]], ptr [[COERCE]], align 4
 // FULL_FAST-NEXT:    [[COERCE_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[COERCE]], i32 0, i32 0
 // FULL_FAST-NEXT:    [[COERCE_REAL:%.*]] = load float, ptr [[COERCE_REALP]], align 4
 // FULL_FAST-NEXT:    [[COERCE_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[COERCE]], i32 0, i32 1
 // FULL_FAST-NEXT:    [[COERCE_IMAG:%.*]] = load float, ptr [[COERCE_IMAGP]], align 4
-// FULL_FAST-NEXT:    [[UNPROMOTION:%.*]] = fptrunc float [[COERCE_REAL]] to half
-// FULL_FAST-NEXT:    [[UNPROMOTION8:%.*]] = fptrunc float [[COERCE_IMAG]] to half
+// FULL_FAST-NEXT:    [[UNPROMOTION:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn float [[COERCE_REAL]] to half
+// FULL_FAST-NEXT:    [[UNPROMOTION8:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn float [[COERCE_IMAG]] to half
 // FULL_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[RETVAL]], i32 0, i32 0
 // FULL_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[RETVAL]], i32 0, i32 1
 // FULL_FAST-NEXT:    store half [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 2
@@ -1264,8 +1264,8 @@ _Complex _Float16 mulf16(_Complex _Float16 a, _Complex _Float16 b) {
 // IMPRVD_FAST-NEXT:    [[C_REAL:%.*]] = load half, ptr [[C_REALP]], align 2
 // IMPRVD_FAST-NEXT:    [[C_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[C]], i32 0, i32 1
 // IMPRVD_FAST-NEXT:    [[C_IMAG:%.*]] = load half, ptr [[C_IMAGP]], align 2
-// IMPRVD_FAST-NEXT:    [[CONV:%.*]] = fpext half [[C_REAL]] to x86_fp80
-// IMPRVD_FAST-NEXT:    [[CONV1:%.*]] = fpext half [[C_IMAG]] to x86_fp80
+// IMPRVD_FAST-NEXT:    [[CONV:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[C_REAL]] to x86_fp80
+// IMPRVD_FAST-NEXT:    [[CONV1:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[C_IMAG]] to x86_fp80
 // IMPRVD_FAST-NEXT:    [[TMP0:%.*]] = call reassoc nnan ninf nsz arcp afn x86_fp80 @llvm.fabs.f80(x86_fp80 [[CONV]])
 // IMPRVD_FAST-NEXT:    [[TMP1:%.*]] = call reassoc nnan ninf nsz arcp afn x86_fp80 @llvm.fabs.f80(x86_fp80 [[CONV1]])
 // IMPRVD_FAST-NEXT:    [[ABS_CMP:%.*]] = fcmp reassoc nnan ninf nsz arcp afn ugt x86_fp80 [[TMP0]], [[TMP1]]
@@ -1295,16 +1295,16 @@ _Complex _Float16 mulf16(_Complex _Float16 a, _Complex _Float16 b) {
 // IMPRVD_FAST:       complex_div:
 // IMPRVD_FAST-NEXT:    [[TMP20:%.*]] = phi reassoc nnan ninf nsz arcp afn x86_fp80 [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
 // IMPRVD_FAST-NEXT:    [[TMP21:%.*]] = phi reassoc nnan ninf nsz arcp afn x86_fp80 [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
-// IMPRVD_FAST-NEXT:    [[CONV2:%.*]] = fptrunc x86_fp80 [[TMP20]] to half
-// IMPRVD_FAST-NEXT:    [[CONV3:%.*]] = fptrunc x86_fp80 [[TMP21]] to half
-// IMPRVD_FAST-NEXT:    [[EXT:%.*]] = fpext half [[CONV2]] to float
-// IMPRVD_FAST-NEXT:    [[EXT4:%.*]] = fpext half [[CONV3]] to float
+// IMPRVD_FAST-NEXT:    [[CONV2:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP20]] to half
+// IMPRVD_FAST-NEXT:    [[CONV3:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP21]] to half
+// IMPRVD_FAST-NEXT:    [[EXT:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[CONV2]] to float
+// IMPRVD_FAST-NEXT:    [[EXT4:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[CONV3]] to float
 // IMPRVD_FAST-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[A]], i32 0, i32 0
 // IMPRVD_FAST-NEXT:    [[A_REAL:%.*]] = load half, ptr [[A_REALP]], align 2
 // IMPRVD_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[A]], i32 0, i32 1
 // IMPRVD_FAST-NEXT:    [[A_IMAG:%.*]] = load half, ptr [[A_IMAGP]], align 2
-// IMPRVD_FAST-NEXT:    [[EXT5:%.*]] = fpext half [[A_REAL]] to float
-// IMPRVD_FAST-NEXT:    [[EXT6:%.*]] = fpext half [[A_IMAG]] to float
+// IMPRVD_FAST-NEXT:    [[EXT5:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[A_REAL]] to float
+// IMPRVD_FAST-NEXT:    [[EXT6:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[A_IMAG]] to float
 // IMPRVD_FAST-NEXT:    [[TMP22:%.*]] = call reassoc nnan ninf nsz arcp afn float @llvm.fabs.f32(float [[EXT5]])
 // IMPRVD_FAST-NEXT:    [[TMP23:%.*]] = call reassoc nnan ninf nsz arcp afn float @llvm.fabs.f32(float [[EXT6]])
 // IMPRVD_FAST-NEXT:    [[ABS_CMP7:%.*]] = fcmp reassoc nnan ninf nsz arcp afn ugt float [[TMP22]], [[TMP23]]
@@ -1334,8 +1334,8 @@ _Complex _Float16 mulf16(_Complex _Float16 a, _Complex _Float16 b) {
 // IMPRVD_FAST:       complex_div10:
 // IMPRVD_FAST-NEXT:    [[TMP42:%.*]] = phi reassoc nnan ninf nsz arcp afn float [ [[TMP29]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI8]] ], [ [[TMP38]], [[ABS_RHSR_LESS_THAN_ABS_RHSI9]] ]
 // IMPRVD_FAST-NEXT:    [[TMP43:%.*]] = phi reassoc nnan ninf nsz arcp afn float [ [[TMP32]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI8]] ], [ [[TMP41]], [[ABS_RHSR_LESS_THAN_ABS_RHSI9]] ]
-// IMPRVD_FAST-NEXT:    [[UNPROMOTION:%.*]] = fptrunc float [[TMP42]] to half
-// IMPRVD_FAST-NEXT:    [[UNPROMOTION11:%.*]] = fptrunc float [[TMP43]] to half
+// IMPRVD_FAST-NEXT:    [[UNPROMOTION:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn float [[TMP42]] to half
+// IMPRVD_FAST-NEXT:    [[UNPROMOTION11:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn float [[TMP43]] to half
 // IMPRVD_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[RETVAL]], i32 0, i32 0
 // IMPRVD_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[RETVAL]], i32 0, i32 1
 // IMPRVD_FAST-NEXT:    store half [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 2
@@ -1359,8 +1359,8 @@ _Complex _Float16 mulf16(_Complex _Float16 a, _Complex _Float16 b) {
 // PRMTD_FAST-NEXT:    [[C_REAL:%.*]] = load half, ptr [[C_REALP]], align 2
 // PRMTD_FAST-NEXT:    [[C_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[C]], i32 0, i32 1
 // PRMTD_FAST-NEXT:    [[C_IMAG:%.*]] = load half, ptr [[C_IMAGP]], align 2
-// PRMTD_FAST-NEXT:    [[CONV:%.*]] = fpext half [[C_REAL]] to x86_fp80
-// PRMTD_FAST-NEXT:    [[CONV1:%.*]] = fpext half [[C_IMAG]] to x86_fp80
+// PRMTD_FAST-NEXT:    [[CONV:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[C_REAL]] to x86_fp80
+// PRMTD_FAST-NEXT:    [[CONV1:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[C_IMAG]] to x86_fp80
 // PRMTD_FAST-NEXT:    [[TMP0:%.*]] = call reassoc nnan ninf nsz arcp afn x86_fp80 @llvm.fabs.f80(x86_fp80 [[CONV]])
 // PRMTD_FAST-NEXT:    [[TMP1:%.*]] = call reassoc nnan ninf nsz arcp afn x86_fp80 @llvm.fabs.f80(x86_fp80 [[CONV1]])
 // PRMTD_FAST-NEXT:    [[ABS_CMP:%.*]] = fcmp reassoc nnan ninf nsz arcp afn ugt x86_fp80 [[TMP0]], [[TMP1]]
@@ -1390,16 +1390,16 @@ _Complex _Float16 mulf16(_Complex _Float16 a, _Complex _Float16 b) {
 // PRMTD_FAST:       complex_div:
 // PRMTD_FAST-NEXT:    [[TMP20:%.*]] = phi reassoc nnan ninf nsz arcp afn x86_fp80 [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
 // PRMTD_FAST-NEXT:    [[TMP21:%.*]] = phi reassoc nnan ninf nsz arcp afn x86_fp80 [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
-// PRMTD_FAST-NEXT:    [[CONV2:%.*]] = fptrunc x86_fp80 [[TMP20]] to half
-// PRMTD_FAST-NEXT:    [[CONV3:%.*]] = fptrunc x86_fp80 [[TMP21]] to half
-// PRMTD_FAST-NEXT:    [[EXT:%.*]] = fpext half [[CONV2]] to float
-// PRMTD_FAST-NEXT:    [[EXT4:%.*]] = fpext half [[CONV3]] to float
+// PRMTD_FAST-NEXT:    [[CONV2:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP20]] to half
+// PRMTD_FAST-NEXT:    [[CONV3:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP21]] to half
+// PRMTD_FAST-NEXT:    [[EXT:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[CONV2]] to float
+// PRMTD_FAST-NEXT:    [[EXT4:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[CONV3]] to float
 // PRMTD_FAST-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[A]], i32 0, i32 0
 // PRMTD_FAST-NEXT:    [[A_REAL:%.*]] = load half, ptr [[A_REALP]], align 2
 // PRMTD_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[A]], i32 0, i32 1
 // PRMTD_FAST-NEXT:    [[A_IMAG:%.*]] = load half, ptr [[A_IMAGP]], align 2
-// PRMTD_FAST-NEXT:    [[EXT5:%.*]] = fpext half [[A_REAL]] to float
-// PRMTD_FAST-NEXT:    [[EXT6:%.*]] = fpext half [[A_IMAG]] to float
+// PRMTD_FAST-NEXT:    [[EXT5:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[A_REAL]] to float
+// PRMTD_FAST-NEXT:    [[EXT6:%.*]] = fpext reassoc nnan ninf nsz arcp afn half [[A_IMAG]] to float
 // PRMTD_FAST-NEXT:    [[TMP22:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[EXT]], [[EXT5]]
 // PRMTD_FAST-NEXT:    [[TMP23:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[EXT4]], [[EXT6]]
 // PRMTD_FAST-NEXT:    [[TMP24:%.*]] = fadd reassoc nnan ninf nsz arcp afn float [[TMP22]], [[TMP23]]
@@ -1411,8 +1411,8 @@ _Complex _Float16 mulf16(_Complex _Float16 a, _Complex _Float16 b) {
 // PRMTD_FAST-NEXT:    [[TMP30:%.*]] = fsub reassoc nnan ninf nsz arcp afn float [[TMP28]], [[TMP29]]
 // PRMTD_FAST-NEXT:    [[TMP31:%.*]] = fdiv reassoc nnan ninf nsz arcp afn float [[TMP24]], [[TMP27]]
 // PRMTD_FAST-NEXT:    [[TMP32:%.*]] = fdiv reassoc nnan ninf nsz arcp afn float [[TMP30]], [[TMP27]]
-// PRMTD_FAST-NEXT:    [[UNPROMOTION:%.*]] = fptrunc float [[TMP31]] to half
-// PRMTD_FAST-NEXT:    [[UNPROMOTION7:%.*]] = fptrunc float [[TMP32]] to half
+// PRMTD_FAST-NEXT:    [[UNPROMOTION:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn float [[TMP31]] to half
+// PRMTD_FAST-NEXT:    [[UNPROMOTION7:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn float [[TMP32]] to half
 // PRMTD_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[RETVAL]], i32 0, i32 0
 // PRMTD_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[RETVAL]], i32 0, i32 1
 // PRMTD_FAST-NEXT:    store half [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 2
diff --git a/clang/test/CodeGen/cx-complex-range.c b/clang/test/CodeGen/cx-complex-range.c
index b2259031d75639..88300041061aae 100644
--- a/clang/test/CodeGen/cx-complex-range.c
+++ b/clang/test/CodeGen/cx-complex-range.c
@@ -485,14 +485,14 @@
 // PRMTD_FAST-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
 // PRMTD_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
 // PRMTD_FAST-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
-// PRMTD_FAST-NEXT:    [[EXT:%.*]] = fpext float [[A_REAL]] to double
-// PRMTD_FAST-NEXT:    [[EXT1:%.*]] = fpext float [[A_IMAG]] to double
+// PRMTD_FAST-NEXT:    [[EXT:%.*]] = fpext reassoc nnan ninf nsz arcp afn float [[A_REAL]] to double
+// PRMTD_FAST-NEXT:    [[EXT1:%.*]] = fpext reassoc nnan ninf nsz arcp afn float [[A_IMAG]] to double
 // PRMTD_FAST-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
 // PRMTD_FAST-NEXT:    [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
 // PRMTD_FAST-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
 // PRMTD_FAST-NEXT:    [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
-// PRMTD_FAST-NEXT:    [[EXT2:%.*]] = fpext float [[B_REAL]] to double
-// PRMTD_FAST-NEXT:    [[EXT3:%.*]] = fpext float [[B_IMAG]] to double
+// PRMTD_FAST-NEXT:    [[EXT2:%.*]] = fpext reassoc nnan ninf nsz arcp afn float [[B_REAL]] to double
+// PRMTD_FAST-NEXT:    [[EXT3:%.*]] = fpext reassoc nnan ninf nsz arcp afn float [[B_IMAG]] to double
 // PRMTD_FAST-NEXT:    [[TMP0:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[EXT]], [[EXT2]]
 // PRMTD_FAST-NEXT:    [[TMP1:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[EXT1]], [[EXT3]]
 // PRMTD_FAST-NEXT:    [[TMP2:%.*]] = fadd reassoc nnan ninf nsz arcp afn double [[TMP0]], [[TMP1]]
@@ -504,8 +504,8 @@
 // PRMTD_FAST-NEXT:    [[TMP8:%.*]] = fsub reassoc nnan ninf nsz arcp afn double [[TMP6]], [[TMP7]]
 // PRMTD_FAST-NEXT:    [[TMP9:%.*]] = fdiv reassoc nnan ninf nsz arcp afn double [[TMP2]], [[TMP5]]
 // PRMTD_FAST-NEXT:    [[TMP10:%.*]] = fdiv reassoc nnan ninf nsz arcp afn double [[TMP8]], [[TMP5]]
-// PRMTD_FAST-NEXT:    [[UNPROMOTION:%.*]] = fptrunc double [[TMP9]] to float
-// PRMTD_FAST-NEXT:    [[UNPROMOTION4:%.*]] = fptrunc double [[TMP10]] to float
+// PRMTD_FAST-NEXT:    [[UNPROMOTION:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn double [[TMP9]] to float
+// PRMTD_FAST-NEXT:    [[UNPROMOTION4:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn double [[TMP10]] to float
 // PRMTD_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
 // PRMTD_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
 // PRMTD_FAST-NEXT:    store float [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 4
@@ -1530,14 +1530,14 @@ _Complex float mulf(_Complex float a, _Complex float b) {
 // PRMTD_FAST-NEXT:    [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
 // PRMTD_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
 // PRMTD_FAST-NEXT:    [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
-// PRMTD_FAST-NEXT:    [[EXT:%.*]] = fpext double [[A_REAL]] to x86_fp80
-// PRMTD_FAST-NEXT:    [[EXT1:%.*]] = fpext double [[A_IMAG]] to x86_fp80
+// PRMTD_FAST-NEXT:    [[EXT:%.*]] = fpext reassoc nnan ninf nsz arcp afn double [[A_REAL]] to x86_fp80
+// PRMTD_FAST-NEXT:    [[EXT1:%.*]] = fpext reassoc nnan ninf nsz arcp afn double [[A_IMAG]] to x86_fp80
 // PRMTD_FAST-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
 // PRMTD_FAST-NEXT:    [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
 // PRMTD_FAST-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
 // PRMTD_FAST-NEXT:    [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
-// PRMTD_FAST-NEXT:    [[EXT2:%.*]] = fpext double [[B_REAL]] to x86_fp80
-// PRMTD_FAST-NEXT:    [[EXT3:%.*]] = fpext double [[B_IMAG]] to x86_fp80
+// PRMTD_FAST-NEXT:    [[EXT2:%.*]] = fpext reassoc nnan ninf nsz arcp afn double [[B_REAL]] to x86_fp80
+// PRMTD_FAST-NEXT:    [[EXT3:%.*]] = fpext reassoc nnan ninf nsz arcp afn double [[B_IMAG]] to x86_fp80
 // PRMTD_FAST-NEXT:    [[TMP4:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[EXT]], [[EXT2]]
 // PRMTD_FAST-NEXT:    [[TMP5:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[EXT1]], [[EXT3]]
 // PRMTD_FAST-NEXT:    [[TMP6:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP4]], [[TMP5]]
@@ -1549,8 +1549,8 @@ _Complex float mulf(_Complex float a, _Complex float b) {
 // PRMTD_FAST-NEXT:    [[TMP12:%.*]] = fsub reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP10]], [[TMP11]]
 // PRMTD_FAST-NEXT:    [[TMP13:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP6]], [[TMP9]]
 // PRMTD_FAST-NEXT:    [[TMP14:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP12]], [[TMP9]]
-// PRMTD_FAST-NEXT:    [[UNPROMOTION:%.*]] = fptrunc x86_fp80 [[TMP13]] to double
-// PRMTD_FAST-NEXT:    [[UNPROMOTION4:%.*]] = fptrunc x86_fp80 [[TMP14]] to double
+// PRMTD_FAST-NEXT:    [[UNPROMOTION:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP13]] to double
+// PRMTD_FAST-NEXT:    [[UNPROMOTION4:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP14]] to double
 // PRMTD_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 0
 // PRMTD_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 1
 // PRMTD_FAST-NEXT:    store double [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 8
@@ -3697,8 +3697,8 @@ _Complex long double mulld(_Complex long double a, _Complex long double b) {
 // BASIC_FAST-NEXT:    [[C_REAL:%.*]] = load float, ptr [[C_REALP]], align 4
 // BASIC_FAST-NEXT:    [[C_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 1
 // BASIC_FAST-NEXT:    [[C_IMAG:%.*]] = load float, ptr [[C_IMAGP]], align 4
-// BASIC_FAST-NEXT:    [[CONV:%.*]] = fpext float [[C_REAL]] to x86_fp80
-// BASIC_FAST-NEXT:    [[CONV1:%.*]] = fpext float [[C_IMAG]] to x86_fp80
+// BASIC_FAST-NEXT:    [[CONV:%.*]] = fpext reassoc nnan ninf nsz arcp afn float [[C_REAL]] to x86_fp80
+// BASIC_FAST-NEXT:    [[CONV1:%.*]] = fpext reassoc nnan ninf nsz arcp afn float [[C_IMAG]] to x86_fp80
 // BASIC_FAST-NEXT:    [[TMP0:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[B_REAL]], [[CONV]]
 // BASIC_FAST-NEXT:    [[TMP1:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[B_IMAG]], [[CONV1]]
 // BASIC_FAST-NEXT:    [[TMP2:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP0]], [[TMP1]]
@@ -3710,8 +3710,8 @@ _Complex long double mulld(_Complex long double a, _Complex long double b) {
 // BASIC_FAST-NEXT:    [[TMP8:%.*]] = fsub reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP6]], [[TMP7]]
 // BASIC_FAST-NEXT:    [[TMP9:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP2]], [[TMP5]]
 // BASIC_FAST-NEXT:    [[TMP10:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP8]], [[TMP5]]
-// BASIC_FAST-NEXT:    [[CONV2:%.*]] = fptrunc x86_fp80 [[TMP9]] to float
-// BASIC_FAST-NEXT:    [[CONV3:%.*]] = fptrunc x86_fp80 [[TMP10]] to float
+// BASIC_FAST-NEXT:    [[CONV2:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP9]] to float
+// BASIC_FAST-NEXT:    [[CONV3:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP10]] to float
 // BASIC_FAST-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
 // BASIC_FAST-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
 // BASIC_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
@@ -3751,13 +3751,13 @@ _Complex long double mulld(_Complex long double a, _Complex long double b) {
 // FULL_FAST-NEXT:    [[C_REAL:%.*]] = load float, ptr [[C_REALP]], align 4
 // FULL_FAST-NEXT:    [[C_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 1
 // FULL_FAST-NEXT:    [[C_IMAG:%.*]] = load float, ptr [[C_IMAGP]], align 4
-// FULL_FAST-NEXT:    [[CONV:%.*]] = fpext float [[C_REAL]] to x86_fp80
-// FULL_FAST-NEXT:    [[CONV1:%.*]] = fpext float [[C_IMAG]] to x86_fp80
+// FULL_FAST-NEXT:    [[CONV:%.*]] = fpext reassoc nnan ninf nsz arcp afn float [[C_REAL]] to x86_fp80
+// FULL_FAST-NEXT:    [[CONV1:%.*]] = fpext reassoc nnan ninf nsz arcp afn float [[C_IMAG]] to x86_fp80
 // FULL_FAST-NEXT:    [[CALL:%.*]] = call reassoc nnan ninf nsz arcp afn nofpclass(nan inf) { x86_fp80, x86_fp80 } @__divxc3(x86_fp80 noundef nofpclass(nan inf) [[B_REAL]], x86_fp80 noundef nofpclass(nan inf) [[B_IMAG]], x86_fp80 noundef nofpclass(nan inf) [[CONV]], x86_fp80 noundef nofpclass(nan inf) [[CONV1]]) #[[ATTR2]]
 // FULL_FAST-NEXT:    [[TMP0:%.*]] = extractvalue { x86_fp80, x86_fp80 } [[CALL]], 0
 // FULL_FAST-NEXT:    [[TMP1:%.*]] = extractvalue { x86_fp80, x86_fp80 } [[CALL]], 1
-// FULL_FAST-NEXT:    [[CONV2:%.*]] = fptrunc x86_fp80 [[TMP0]] to float
-// FULL_FAST-NEXT:    [[CONV3:%.*]] = fptrunc x86_fp80 [[TMP1]] to float
+// FULL_FAST-NEXT:    [[CONV2:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP0]] to float
+// FULL_FAST-NEXT:    [[CONV3:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP1]] to float
 // FULL_FAST-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
 // FULL_FAST-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
 // FULL_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
@@ -3791,8 +3791,8 @@ _Complex long double mulld(_Complex long double a, _Complex long double b) {
 // IMPRVD_FAST-NEXT:    [[C_REAL:%.*]] = load float, ptr [[C_REALP]], align 4
 // IMPRVD_FAST-NEXT:    [[C_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 1
 // IMPRVD_FAST-NEXT:    [[C_IMAG:%.*]] = load float, ptr [[C_IMAGP]], align 4
-// IMPRVD_FAST-NEXT:    [[CONV:%.*]] = fpext float [[C_REAL]] to x86_fp80
-// IMPRVD_FAST-NEXT:    [[CONV1:%.*]] = fpext float [[C_IMAG]] to x86_fp80
+// IMPRVD_FAST-NEXT:    [[CONV:%.*]] = fpext reassoc nnan ninf nsz arcp afn float [[C_REAL]] to x86_fp80
+// IMPRVD_FAST-NEXT:    [[CONV1:%.*]] = fpext reassoc nnan ninf nsz arcp afn float [[C_IMAG]] to x86_fp80
 // IMPRVD_FAST-NEXT:    [[TMP0:%.*]] = call reassoc nnan ninf nsz arcp afn x86_fp80 @llvm.fabs.f80(x86_fp80 [[CONV]])
 // IMPRVD_FAST-NEXT:    [[TMP1:%.*]] = call reassoc nnan ninf nsz arcp afn x86_fp80 @llvm.fabs.f80(x86_fp80 [[CONV1]])
 // IMPRVD_FAST-NEXT:    [[ABS_CMP:%.*]] = fcmp reassoc nnan ninf nsz arcp afn ugt x86_fp80 [[TMP0]], [[TMP1]]
@@ -3822,8 +3822,8 @@ _Complex long double mulld(_Complex long double a, _Complex long double b) {
 // IMPRVD_FAST:       complex_div:
 // IMPRVD_FAST-NEXT:    [[TMP20:%.*]] = phi reassoc nnan ninf nsz arcp afn x86_fp80 [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
 // IMPRVD_FAST-NEXT:    [[TMP21:%.*]] = phi reassoc nnan ninf nsz arcp afn x86_fp80 [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
-// IMPRVD_FAST-NEXT:    [[CONV2:%.*]] = fptrunc x86_fp80 [[TMP20]] to float
-// IMPRVD_FAST-NEXT:    [[CONV3:%.*]] = fptrunc x86_fp80 [[TMP21]] to float
+// IMPRVD_FAST-NEXT:    [[CONV2:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP20]] to float
+// IMPRVD_FAST-NEXT:    [[CONV3:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP21]] to float
 // IMPRVD_FAST-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
 // IMPRVD_FAST-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
 // IMPRVD_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
@@ -3880,8 +3880,8 @@ _Complex long double mulld(_Complex long double a, _Complex long double b) {
 // PRMTD_FAST-NEXT:    [[C_REAL:%.*]] = load float, ptr [[C_REALP]], align 4
 // PRMTD_FAST-NEXT:    [[C_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 1
 // PRMTD_FAST-NEXT:    [[C_IMAG:%.*]] = load float, ptr [[C_IMAGP]], align 4
-// PRMTD_FAST-NEXT:    [[CONV:%.*]] = fpext float [[C_REAL]] to x86_fp80
-// PRMTD_FAST-NEXT:    [[CONV1:%.*]] = fpext float [[C_IMAG]] to x86_fp80
+// PRMTD_FAST-NEXT:    [[CONV:%.*]] = fpext reassoc nnan ninf nsz arcp afn float [[C_REAL]] to x86_fp80
+// PRMTD_FAST-NEXT:    [[CONV1:%.*]] = fpext reassoc nnan ninf nsz arcp afn float [[C_IMAG]] to x86_fp80
 // PRMTD_FAST-NEXT:    [[TMP0:%.*]] = call reassoc nnan ninf nsz arcp afn x86_fp80 @llvm.fabs.f80(x86_fp80 [[CONV]])
 // PRMTD_FAST-NEXT:    [[TMP1:%.*]] = call reassoc nnan ninf nsz arcp afn x86_fp80 @llvm.fabs.f80(x86_fp80 [[CONV1]])
 // PRMTD_FAST-NEXT:    [[ABS_CMP:%.*]] = fcmp reassoc nnan ninf nsz arcp afn ugt x86_fp80 [[TMP0]], [[TMP1]]
@@ -3911,16 +3911,16 @@ _Complex long double mulld(_Complex long double a, _Complex long double b) {
 // PRMTD_FAST:       complex_div:
 // PRMTD_FAST-NEXT:    [[TMP20:%.*]] = phi reassoc nnan ninf nsz arcp afn x86_fp80 [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
 // PRMTD_FAST-NEXT:    [[TMP21:%.*]] = phi reassoc nnan ninf nsz arcp afn x86_fp80 [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
-// PRMTD_FAST-NEXT:    [[CONV2:%.*]] = fptrunc x86_fp80 [[TMP20]] to float
-// PRMTD_FAST-NEXT:    [[CONV3:%.*]] = fptrunc x86_fp80 [[TMP21]] to float
-// PRMTD_FAST-NEXT:    [[EXT:%.*]] = fpext float [[CONV2]] to double
-// PRMTD_FAST-NEXT:    [[EXT4:%.*]] = fpext float [[CONV3]] to double
+// PRMTD_FAST-NEXT:    [[CONV2:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP20]] to float
+// PRMTD_FAST-NEXT:    [[CONV3:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP21]] to float
+// PRMTD_FAST-NEXT:    [[EXT:%.*]] = fpext reassoc nnan ninf nsz arcp afn float [[CONV2]] to double
+// PRMTD_FAST-NEXT:    [[EXT4:%.*]] = fpext reassoc nnan ninf nsz arcp afn float [[CONV3]] to double
 // PRMTD_FAST-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
 // PRMTD_FAST-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
 // PRMTD_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
 // PRMTD_FAST-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
-// PRMTD_FAST-NEXT:    [[EXT5:%.*]] = fpext float [[A_REAL]] to double
-// PRMTD_FAST-NEXT:    [[EXT6:%.*]] = fpext float [[A_IMAG]] to double
+// PRMTD_FAST-NEXT:    [[EXT5:%.*]] = fpext reassoc nnan ninf nsz arcp afn float [[A_REAL]] to double
+// PRMTD_FAST-NEXT:    [[EXT6:%.*]] = fpext reassoc nnan ninf nsz arcp afn float [[A_IMAG]] to double
 // PRMTD_FAST-NEXT:    [[TMP22:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[EXT]], [[EXT5]]
 // PRMTD_FAST-NEXT:    [[TMP23:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[EXT4]], [[EXT6]]
 // PRMTD_FAST-NEXT:    [[TMP24:%.*]] = fadd reassoc nnan ninf nsz arcp afn double [[TMP22]], [[TMP23]]
@@ -3932,8 +3932,8 @@ _Complex long double mulld(_Complex long double a, _Complex long double b) {
 // PRMTD_FAST-NEXT:    [[TMP30:%.*]] = fsub reassoc nnan ninf nsz arcp afn double [[TMP28]], [[TMP29]]
 // PRMTD_FAST-NEXT:    [[TMP31:%.*]] = fdiv reassoc nnan ninf nsz arcp afn double [[TMP24]], [[TMP27]]
 // PRMTD_FAST-NEXT:    [[TMP32:%.*]] = fdiv reassoc nnan ninf nsz arcp afn double [[TMP30]], [[TMP27]]
-// PRMTD_FAST-NEXT:    [[UNPROMOTION:%.*]] = fptrunc double [[TMP31]] to float
-// PRMTD_FAST-NEXT:    [[UNPROMOTION7:%.*]] = fptrunc double [[TMP32]] to float
+// PRMTD_FAST-NEXT:    [[UNPROMOTION:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn double [[TMP31]] to float
+// PRMTD_FAST-NEXT:    [[UNPROMOTION7:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn double [[TMP32]] to float
 // PRMTD_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
 // PRMTD_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
 // PRMTD_FAST-NEXT:    store float [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 4
diff --git a/clang/test/CodeGen/matrix-type-operators-fast-math.c b/clang/test/CodeGen/matrix-type-operators-fast-math.c
index 724b2828302861..a5cb2ffdabcc5a 100644
--- a/clang/test/CodeGen/matrix-type-operators-fast-math.c
+++ b/clang/test/CodeGen/matrix-type-operators-fast-math.c
@@ -41,7 +41,7 @@ void add_matrix_scalar_double_float(dx5x5_t a, float vf) {
   // CHECK-LABEL: define{{.*}} void @add_matrix_scalar_double_float(<25 x double> noundef nofpclass(nan inf) %a, float noundef nofpclass(nan inf) %vf)
   // CHECK:       [[MATRIX:%.*]] = load <25 x double>, ptr {{.*}}, align 8
   // CHECK-NEXT:  [[SCALAR:%.*]] = load float, ptr %vf.addr, align 4
-  // CHECK-NEXT:  [[SCALAR_EXT:%.*]] = fpext float [[SCALAR]] to double
+  // CHECK-NEXT:  [[SCALAR_EXT:%.*]] = fpext reassoc nnan ninf nsz arcp afn float [[SCALAR]] to double
   // CHECK-NEXT:  [[SCALAR_EMBED:%.*]] = insertelement <25 x double> poison, double [[SCALAR_EXT]], i64 0
   // CHECK-NEXT:  [[SCALAR_EMBED1:%.*]] = shufflevector <25 x double> [[SCALAR_EMBED]], <25 x double> poison, <25 x i32> zeroinitializer
   // CHECK-NEXT:  [[RES:%.*]] = fadd reassoc nnan ninf nsz arcp afn <25 x double> [[MATRIX]], [[SCALAR_EMBED1]]
@@ -53,7 +53,7 @@ void add_matrix_scalar_double_float(dx5x5_t a, float vf) {
 void add_compound_matrix_scalar_double_float(dx5x5_t a, float vf) {
   // CHECK-LABEL: define{{.*}} void @add_compound_matrix_scalar_double_float(<25 x double> noundef nofpclass(nan inf) %a, float noundef nofpclass(nan inf) %vf)
   // CHECK:  [[SCALAR:%.*]] = load float, ptr %vf.addr, align 4
-  // CHECK-NEXT:  [[SCALAR_EXT:%.*]] = fpext float [[SCALAR]] to double
+  // CHECK-NEXT:  [[SCALAR_EXT:%.*]] = fpext reassoc nnan ninf nsz arcp afn float [[SCALAR]] to double
   // CHECK-NEXT:  [[MATRIX:%.*]] = load <25 x double>, ptr {{.*}}, align 8
   // CHECK-NEXT:  [[SCALAR_EMBED:%.*]] = insertelement <25 x double> poison, double [[SCALAR_EXT]], i64 0
   // CHECK-NEXT:  [[SCALAR_EMBED1:%.*]] = shufflevector <25 x double> [[SCALAR_EMBED]], <25 x double> poison, <25 x i32> zeroinitializer
@@ -66,7 +66,7 @@ void add_compound_matrix_scalar_double_float(dx5x5_t a, float vf) {
 void subtract_compound_matrix_scalar_double_float(dx5x5_t a, float vf) {
   // CHECK-LABEL: define{{.*}} void @subtract_compound_matrix_scalar_double_float(<25 x double> noundef nofpclass(nan inf) %a, float noundef nofpclass(nan inf) %vf)
   // CHECK:  [[SCALAR:%.*]] = load float, ptr %vf.addr, align 4
-  // CHECK-NEXT:  [[SCALAR_EXT:%.*]] = fpext float [[SCALAR]] to double
+  // CHECK-NEXT:  [[SCALAR_EXT:%.*]] = fpext reassoc nnan ninf nsz arcp afn float [[SCALAR]] to double
   // CHECK-NEXT:  [[MATRIX:%.*]] = load <25 x double>, ptr {{.*}}, align 8
   // CHECK-NEXT:  [[SCALAR_EMBED:%.*]] = insertelement <25 x double> poison, double [[SCALAR_EXT]], i64 0
   // CHECK-NEXT:  [[SCALAR_EMBED1:%.*]] = shufflevector <25 x double> [[SCALAR_EMBED]], <25 x double> poison, <25 x i32> zeroinitializer
@@ -104,7 +104,7 @@ void multiply_compound_matrix_matrix_double(dx5x5_t b, dx5x5_t c) {
 // CHECK-LABEL: @multiply_double_matrix_scalar_float(
 // CHECK:         [[A:%.*]] = load <25 x double>, ptr {{.*}}, align 8
 // CHECK-NEXT:    [[S:%.*]] = load float, ptr %s.addr, align 4
-// CHECK-NEXT:    [[S_EXT:%.*]] = fpext float [[S]] to double
+// CHECK-NEXT:    [[S_EXT:%.*]] = fpext reassoc nnan ninf nsz arcp afn float [[S]] to double
 // CHECK-NEXT:    [[VECINSERT:%.*]] = insertelement <25 x double> poison, double [[S_EXT]], i64 0
 // CHECK-NEXT:    [[VECSPLAT:%.*]] = shufflevector <25 x double> [[VECINSERT]], <25 x double> poison, <25 x i32> zeroinitializer
 // CHECK-NEXT:    [[RES:%.*]] = fmul reassoc nnan ninf nsz arcp afn <25 x double> [[A]], [[VECSPLAT]]
@@ -117,7 +117,7 @@ void multiply_double_matrix_scalar_float(dx5x5_t a, float s) {
 
 // CHECK-LABEL: @multiply_compound_double_matrix_scalar_float
 // CHECK:         [[S:%.*]] = load float, ptr %s.addr, align 4
-// CHECK-NEXT:    [[S_EXT:%.*]] = fpext float [[S]] to double
+// CHECK-NEXT:    [[S_EXT:%.*]] = fpext reassoc nnan ninf nsz arcp afn float [[S]] to double
 // CHECK-NEXT:    [[A:%.*]] = load <25 x double>, ptr {{.*}}, align 8
 // CHECK-NEXT:    [[VECINSERT:%.*]] = insertelement <25 x double> poison, double [[S_EXT]], i64 0
 // CHECK-NEXT:    [[VECSPLAT:%.*]] = shufflevector <25 x double> [[VECINSERT]], <25 x double> poison, <25 x i32> zeroinitializer
@@ -132,7 +132,7 @@ void multiply_compound_double_matrix_scalar_float(dx5x5_t a, float s) {
 // CHECK-LABEL: @divide_float_matrix_scalar_double(
 // CHECK:         [[MAT:%.*]] = load <6 x float>, ptr [[MAT_ADDR:%.*]], align 4
 // CHECK-NEXT:    [[S:%.*]] = load double, ptr %s.addr, align 8
-// CHECK-NEXT:    [[S_TRUNC:%.*]] = fptrunc double [[S]] to float
+// CHECK-NEXT:    [[S_TRUNC:%.*]] = fptrunc reassoc nnan ninf nsz arcp afn double [[S]] to float
 // CHECK-NEXT:    [[VECINSERT:%.*]] = insertelement <6 x float> poison, float [[S_TRUNC]], i64 0
 // CHECK-NEXT:    [[VECSPLAT:%.*]] = shufflevector <6 x float> [[VECINSERT]], <6 x float> poison, <6 x i32> zeroinitializer
 // CHECK-NEXT:    [[RES:%.*]] = fdiv reassoc nnan ninf nsz arcp afn <6 x float> [[MAT]], [[VECSPLAT]]
diff --git a/clang/test/CodeGen/nofpclass.c b/clang/test/CodeGen/nofpclass.c
index d9b34c8e383f99..75aa0318421de1 100644
--- a/clang/test/CodeGen/nofpclass.c
+++ b/clang/test/CodeGen/nofpclass.c
@@ -679,14 +679,14 @@ _Complex double defined_complex_func_f64_ret(_Complex double c) {
 // CFINITEONLY-NEXT:    [[C_REAL:%.*]] = load half, ptr [[C_REALP]], align 2
 // CFINITEONLY-NEXT:    [[C_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[C]], i32 0, i32 1
 // CFINITEONLY-NEXT:    [[C_IMAG:%.*]] = load half, ptr [[C_IMAGP]], align 2
-// CFINITEONLY-NEXT:    [[EXT:%.*]] = fpext half [[C_REAL]] to float
-// CFINITEONLY-NEXT:    [[EXT1:%.*]] = fpext half [[C_IMAG]] to float
+// CFINITEONLY-NEXT:    [[EXT:%.*]] = fpext nnan ninf half [[C_REAL]] to float
+// CFINITEONLY-NEXT:    [[EXT1:%.*]] = fpext nnan ninf half [[C_IMAG]] to float
 // CFINITEONLY-NEXT:    [[C_REALP2:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[C]], i32 0, i32 0
 // CFINITEONLY-NEXT:    [[C_REAL3:%.*]] = load half, ptr [[C_REALP2]], align 2
 // CFINITEONLY-NEXT:    [[C_IMAGP4:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[C]], i32 0, i32 1
 // CFINITEONLY-NEXT:    [[C_IMAG5:%.*]] = load half, ptr [[C_IMAGP4]], align 2
-// CFINITEONLY-NEXT:    [[EXT6:%.*]] = fpext half [[C_REAL3]] to float
-// CFINITEONLY-NEXT:    [[EXT7:%.*]] = fpext half [[C_IMAG5]] to float
+// CFINITEONLY-NEXT:    [[EXT6:%.*]] = fpext nnan ninf half [[C_REAL3]] to float
+// CFINITEONLY-NEXT:    [[EXT7:%.*]] = fpext nnan ninf half [[C_IMAG5]] to float
 // CFINITEONLY-NEXT:    [[MUL_AC:%.*]] = fmul nnan ninf float [[EXT]], [[EXT6]]
 // CFINITEONLY-NEXT:    [[MUL_BD:%.*]] = fmul nnan ninf float [[EXT1]], [[EXT7]]
 // CFINITEONLY-NEXT:    [[MUL_AD:%.*]] = fmul nnan ninf float [[EXT]], [[EXT7]]
@@ -709,8 +709,8 @@ _Complex double defined_complex_func_f64_ret(_Complex double c) {
 // CFINITEONLY:       complex_mul_cont:
 // CFINITEONLY-NEXT:    [[REAL_MUL_PHI:%.*]] = phi nnan ninf float [ [[MUL_R]], [[ENTRY:%.*]] ], [ [[MUL_R]], [[COMPLEX_MUL_IMAG_NAN]] ], [ [[COERCE_REAL]], [[COMPLEX_MUL_LIBCALL]] ]
 // CFINITEONLY-NEXT:    [[IMAG_MUL_PHI:%.*]] = phi nnan ninf float [ [[MUL_I]], [[ENTRY]] ], [ [[MUL_I]], [[COMPLEX_MUL_IMAG_NAN]] ], [ [[COERCE_IMAG]], [[COMPLEX_MUL_LIBCALL]] ]
-// CFINITEONLY-NEXT:    [[UNPROMOTION:%.*]] = fptrunc float [[REAL_MUL_PHI]] to half
-// CFINITEONLY-NEXT:    [[UNPROMOTION9:%.*]] = fptrunc float [[IMAG_MUL_PHI]] to half
+// CFINITEONLY-NEXT:    [[UNPROMOTION:%.*]] = fptrunc nnan ninf float [[REAL_MUL_PHI]] to half
+// CFINITEONLY-NEXT:    [[UNPROMOTION9:%.*]] = fptrunc nnan ninf float [[IMAG_MUL_PHI]] to half
 // CFINITEONLY-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[RETVAL]], i32 0, i32 0
 // CFINITEONLY-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[RETVAL]], i32 0, i32 1
 // CFINITEONLY-NEXT:    store half [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 2
@@ -723,16 +723,16 @@ _Complex double defined_complex_func_f64_ret(_Complex double c) {
 // CLFINITEONLY-SAME: (<2 x half> noundef nofpclass(nan inf) [[C_COERCE:%.*]]) local_unnamed_addr #[[ATTR7:[0-9]+]] {
 // CLFINITEONLY-NEXT:  entry:
 // CLFINITEONLY-NEXT:    [[C_SROA_0_0_VEC_EXTRACT:%.*]] = extractelement <2 x half> [[C_COERCE]], i64 0
-// CLFINITEONLY-NEXT:    [[EXT:%.*]] = fpext half [[C_SROA_0_0_VEC_EXTRACT]] to float
+// CLFINITEONLY-NEXT:    [[EXT:%.*]] = fpext nnan ninf half [[C_SROA_0_0_VEC_EXTRACT]] to float
 // CLFINITEONLY-NEXT:    [[C_SROA_0_2_VEC_EXTRACT:%.*]] = extractelement <2 x half> [[C_COERCE]], i64 1
-// CLFINITEONLY-NEXT:    [[EXT1:%.*]] = fpext half [[C_SROA_0_2_VEC_EXTRACT]] to float
+// CLFINITEONLY-NEXT:    [[EXT1:%.*]] = fpext nnan ninf half [[C_SROA_0_2_VEC_EXTRACT]] to float
 // CLFINITEONLY-NEXT:    [[MUL_AD:%.*]] = fmul nnan ninf float [[EXT]], [[EXT1]]
 // CLFINITEONLY-NEXT:    [[MUL_I:%.*]] = fadd nnan ninf float [[MUL_AD]], [[MUL_AD]]
 // CLFINITEONLY-NEXT:    [[MUL_AC:%.*]] = fmul nnan ninf float [[EXT]], [[EXT]]
 // CLFINITEONLY-NEXT:    [[MUL_BD:%.*]] = fmul nnan ninf float [[EXT1]], [[EXT1]]
 // CLFINITEONLY-NEXT:    [[MUL_R:%.*]] = fsub nnan ninf float [[MUL_AC]], [[MUL_BD]]
-// CLFINITEONLY-NEXT:    [[UNPROMOTION:%.*]] = fptrunc float [[MUL_R]] to half
-// CLFINITEONLY-NEXT:    [[UNPROMOTION9:%.*]] = fptrunc float [[MUL_I]] to half
+// CLFINITEONLY-NEXT:    [[UNPROMOTION:%.*]] = fptrunc nnan ninf float [[MUL_R]] to half
+// CLFINITEONLY-NEXT:    [[UNPROMOTION9:%.*]] = fptrunc nnan ninf float [[MUL_I]] to half
 // CLFINITEONLY-NEXT:    [[RETVAL_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x half> poison, half [[UNPROMOTION]], i64 0
 // CLFINITEONLY-NEXT:    [[RETVAL_SROA_0_2_VEC_INSERT:%.*]] = insertelement <2 x half> [[RETVAL_SROA_0_0_VEC_INSERT]], half [[UNPROMOTION9]], i64 1
 // CLFINITEONLY-NEXT:    ret <2 x half> [[RETVAL_SROA_0_2_VEC_INSERT]]
@@ -749,14 +749,14 @@ _Complex double defined_complex_func_f64_ret(_Complex double c) {
 // NONANS-NEXT:    [[C_REAL:%.*]] = load half, ptr [[C_REALP]], align 2
 // NONANS-NEXT:    [[C_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[C]], i32 0, i32 1
 // NONANS-NEXT:    [[C_IMAG:%.*]] = load half, ptr [[C_IMAGP]], align 2
-// NONANS-NEXT:    [[EXT:%.*]] = fpext half [[C_REAL]] to float
-// NONANS-NEXT:    [[EXT1:%.*]] = fpext half [[C_IMAG]] to float
+// NONANS-NEXT:    [[EXT:%.*]] = fpext nnan half [[C_REAL]] to float
+// NONANS-NEXT:    [[EXT1:%.*]] = fpext nnan half [[C_IMAG]] to float
 // NONANS-NEXT:    [[C_REALP2:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[C]], i32 0, i32 0
 // NONANS-NEXT:    [[C_REAL3:%.*]] = load half, ptr [[C_REALP2]], align 2
 // NONANS-NEXT:    [[C_IMAGP4:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[C]], i32 0, i32 1
 // NONANS-NEXT:    [[C_IMAG5:%.*]] = load half, ptr [[C_IMAGP4]], align 2
-// NONANS-NEXT:    [[EXT6:%.*]] = fpext half [[C_REAL3]] to float
-// NONANS-NEXT:    [[EXT7:%.*]] = fpext half [[C_IMAG5]] to float
+// NONANS-NEXT:    [[EXT6:%.*]] = fpext nnan half [[C_REAL3]] to float
+// NONANS-NEXT:    [[EXT7:%.*]] = fpext nnan half [[C_IMAG5]] to float
 // NONANS-NEXT:    [[MUL_AC:%.*]] = fmul nnan float [[EXT]], [[EXT6]]
 // NONANS-NEXT:    [[MUL_BD:%.*]] = fmul nnan float [[EXT1]], [[EXT7]]
 // NONANS-NEXT:    [[MUL_AD:%.*]] = fmul nnan float [[EXT]], [[EXT7]]
@@ -779,8 +779,8 @@ _Complex double defined_complex_func_f64_ret(_Complex double c) {
 // NONANS:       complex_mul_cont:
 // NONANS-NEXT:    [[REAL_MUL_PHI:%.*]] = phi nnan float [ [[MUL_R]], [[ENTRY:%.*]] ], [ [[MUL_R]], [[COMPLEX_MUL_IMAG_NAN]] ], [ [[COERCE_REAL]], [[COMPLEX_MUL_LIBCALL]] ]
 // NONANS-NEXT:    [[IMAG_MUL_PHI:%.*]] = phi nnan float [ [[MUL_I]], [[ENTRY]] ], [ [[MUL_I]], [[COMPLEX_MUL_IMAG_NAN]] ], [ [[COERCE_IMAG]], [[COMPLEX_MUL_LIBCALL]] ]
-// NONANS-NEXT:    [[UNPROMOTION:%.*]] = fptrunc float [[REAL_MUL_PHI]] to half
-// NONANS-NEXT:    [[UNPROMOTION9:%.*]] = fptrunc float [[IMAG_MUL_PHI]] to half
+// NONANS-NEXT:    [[UNPROMOTION:%.*]] = fptrunc nnan float [[REAL_MUL_PHI]] to half
+// NONANS-NEXT:    [[UNPROMOTION9:%.*]] = fptrunc nnan float [[IMAG_MUL_PHI]] to half
 // NONANS-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[RETVAL]], i32 0, i32 0
 // NONANS-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[RETVAL]], i32 0, i32 1
 // NONANS-NEXT:    store half [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 2
@@ -800,14 +800,14 @@ _Complex double defined_complex_func_f64_ret(_Complex double c) {
 // NOINFS-NEXT:    [[C_REAL:%.*]] = load half, ptr [[C_REALP]], align 2
 // NOINFS-NEXT:    [[C_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[C]], i32 0, i32 1
 // NOINFS-NEXT:    [[C_IMAG:%.*]] = load half, ptr [[C_IMAGP]], align 2
-// NOINFS-NEXT:    [[EXT:%.*]] = fpext half [[C_REAL]] to float
-// NOINFS-NEXT:    [[EXT1:%.*]] = fpext half [[C_IMAG]] to float
+// NOINFS-NEXT:    [[EXT:%.*]] = fpext ninf half [[C_REAL]] to float
+// NOINFS-NEXT:    [[EXT1:%.*]] = fpext ninf half [[C_IMAG]] to float
 // NOINFS-NEXT:    [[C_REALP2:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[C]], i32 0, i32 0
 // NOINFS-NEXT:    [[C_REAL3:%.*]] = load half, ptr [[C_REALP2]], align 2
 // NOINFS-NEXT:    [[C_IMAGP4:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[C]], i32 0, i32 1
 // NOINFS-NEXT:    [[C_IMAG5:%.*]] = load half, ptr [[C_IMAGP4]], align 2
-// NOINFS-NEXT:    [[EXT6:%.*]] = fpext half [[C_REAL3]] to float
-// NOINFS-NEXT:    [[EXT7:%.*]] = fpext half [[C_IMAG5]] to float
+// NOINFS-NEXT:    [[EXT6:%.*]] = fpext ninf half [[C_REAL3]] to float
+// NOINFS-NEXT:    [[EXT7:%.*]] = fpext ninf half [[C_IMAG5]] to float
 // NOINFS-NEXT:    [[MUL_AC:%.*]] = fmul ninf float [[EXT]], [[EXT6]]
 // NOINFS-NEXT:    [[MUL_BD:%.*]] = fmul ninf float [[EXT1]], [[EXT7]]
 // NOINFS-NEXT:    [[MUL_AD:%.*]] = fmul ninf float [[EXT]], [[EXT7]]
@@ -830,8 +830,8 @@ _Complex double defined_complex_func_f64_ret(_Complex double c) {
 // NOINFS:       complex_mul_cont:
 // NOINFS-NEXT:    [[REAL_MUL_PHI:%.*]] = phi ninf float [ [[MUL_R]], [[ENTRY:%.*]] ], [ [[MUL_R]], [[COMPLEX_MUL_IMAG_NAN]] ], [ [[COERCE_REAL]], [[COMPLEX_MUL_LIBCALL]] ]
 // NOINFS-NEXT:    [[IMAG_MUL_PHI:%.*]] = phi ninf float [ [[MUL_I]], [[ENTRY]] ], [ [[MUL_I]], [[COMPLEX_MUL_IMAG_NAN]] ], [ [[COERCE_IMAG]], [[COMPLEX_MUL_LIBCALL]] ]
-// NOINFS-NEXT:    [[UNPROMOTION:%.*]] = fptrunc float [[REAL_MUL_PHI]] to half
-// NOINFS-NEXT:    [[UNPROMOTION9:%.*]] = fptrunc float [[IMAG_MUL_PHI]] to half
+// NOINFS-NEXT:    [[UNPROMOTION:%.*]] = fptrunc ninf float [[REAL_MUL_PHI]] to half
+// NOINFS-NEXT:    [[UNPROMOTION9:%.*]] = fptrunc ninf float [[IMAG_MUL_PHI]] to half
 // NOINFS-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[RETVAL]], i32 0, i32 0
 // NOINFS-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[RETVAL]], i32 0, i32 1
 // NOINFS-NEXT:    store half [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 2
@@ -879,7 +879,7 @@ _Complex _Float16 defined_complex_func_f16_ret(_Complex _Float16 c) {
 // CFINITEONLY-NEXT:    store <2 x half> [[V2F162]], ptr [[V2F16_ADDR]], align 4
 // CFINITEONLY-NEXT:    [[TMP2:%.*]] = load float, ptr [[F32_ADDR]], align 4
 // CFINITEONLY-NEXT:    [[TMP3:%.*]] = load float, ptr [[F32_ADDR]], align 4
-// CFINITEONLY-NEXT:    [[CONV:%.*]] = fpext float [[TMP3]] to double
+// CFINITEONLY-NEXT:    [[CONV:%.*]] = fpext nnan ninf float [[TMP3]] to double
 // CFINITEONLY-NEXT:    [[TMP4:%.*]] = load double, ptr [[F64_ADDR]], align 8
 // CFINITEONLY-NEXT:    [[TMP5:%.*]] = load half, ptr [[F16_ADDR]], align 2
 // CFINITEONLY-NEXT:    [[TMP6:%.*]] = load <2 x float>, ptr [[V2F32_ADDR]], align 8
@@ -923,7 +923,7 @@ _Complex _Float16 defined_complex_func_f16_ret(_Complex _Float16 c) {
 // CLFINITEONLY-SAME: (float noundef nofpclass(nan inf) [[F32:%.*]], double noundef nofpclass(nan inf) [[F64:%.*]], half noundef nofpclass(nan inf) [[F16:%.*]], double noundef nofpclass(nan inf) [[V2F32_COERCE:%.*]], <2 x double> noundef nofpclass(nan inf) [[V2F64:%.*]], i32 noundef [[V2F16_COERCE:%.*]], <2 x float> noundef nofpclass(nan inf) [[CF32_COERCE:%.*]], double noundef nofpclass(nan inf) [[CF64_COERCE0:%.*]], double noundef nofpclass(nan inf) [[CF64_COERCE1:%.*]], ptr nocapture noundef readonly byval({ half, half }) align 8 [[CF16:%.*]]) local_unnamed_addr #[[ATTR5]] {
 // CLFINITEONLY-NEXT:  entry:
 // CLFINITEONLY-NEXT:    [[BYVAL_TEMP:%.*]] = alloca { double, double }, align 8
-// CLFINITEONLY-NEXT:    [[CONV:%.*]] = fpext float [[F32]] to double
+// CLFINITEONLY-NEXT:    [[CONV:%.*]] = fpext nnan ninf float [[F32]] to double
 // CLFINITEONLY-NEXT:    [[CF16_REAL:%.*]] = load half, ptr [[CF16]], align 8
 // CLFINITEONLY-NEXT:    [[CF16_IMAGP:%.*]] = getelementptr inbounds nuw i8, ptr [[CF16]], i64 2
 // CLFINITEONLY-NEXT:    [[CF16_IMAG:%.*]] = load half, ptr [[CF16_IMAGP]], align 2
@@ -973,7 +973,7 @@ _Complex _Float16 defined_complex_func_f16_ret(_Complex _Float16 c) {
 // NONANS-NEXT:    store <2 x half> [[V2F162]], ptr [[V2F16_ADDR]], align 4
 // NONANS-NEXT:    [[TMP2:%.*]] = load float, ptr [[F32_ADDR]], align 4
 // NONANS-NEXT:    [[TMP3:%.*]] = load float, ptr [[F32_ADDR]], align 4
-// NONANS-NEXT:    [[CONV:%.*]] = fpext float [[TMP3]] to double
+// NONANS-NEXT:    [[CONV:%.*]] = fpext nnan float [[TMP3]] to double
 // NONANS-NEXT:    [[TMP4:%.*]] = load double, ptr [[F64_ADDR]], align 8
 // NONANS-NEXT:    [[TMP5:%.*]] = load half, ptr [[F16_ADDR]], align 2
 // NONANS-NEXT:    [[TMP6:%.*]] = load <2 x float>, ptr [[V2F32_ADDR]], align 8
@@ -1048,7 +1048,7 @@ _Complex _Float16 defined_complex_func_f16_ret(_Complex _Float16 c) {
 // NOINFS-NEXT:    store <2 x half> [[V2F162]], ptr [[V2F16_ADDR]], align 4
 // NOINFS-NEXT:    [[TMP2:%.*]] = load float, ptr [[F32_ADDR]], align 4
 // NOINFS-NEXT:    [[TMP3:%.*]] = load float, ptr [[F32_ADDR]], align 4
-// NOINFS-NEXT:    [[CONV:%.*]] = fpext float [[TMP3]] to double
+// NOINFS-NEXT:    [[CONV:%.*]] = fpext ninf float [[TMP3]] to double
 // NOINFS-NEXT:    [[TMP4:%.*]] = load double, ptr [[F64_ADDR]], align 8
 // NOINFS-NEXT:    [[TMP5:%.*]] = load half, ptr [[F16_ADDR]], align 2
 // NOINFS-NEXT:    [[TMP6:%.*]] = load <2 x float>, ptr [[V2F32_ADDR]], align 8
@@ -1132,7 +1132,7 @@ float call_variadic(float f32, double f64, _Float16 f16,
 // CFINITEONLY-NEXT:    [[TMP2:%.*]] = load ptr, ptr [[FPTR_ADDR]], align 8
 // CFINITEONLY-NEXT:    [[TMP3:%.*]] = load float, ptr [[F32_ADDR]], align 4
 // CFINITEONLY-NEXT:    [[TMP4:%.*]] = load float, ptr [[F32_ADDR]], align 4
-// CFINITEONLY-NEXT:    [[CONV:%.*]] = fpext float [[TMP4]] to double
+// CFINITEONLY-NEXT:    [[CONV:%.*]] = fpext nnan ninf float [[TMP4]] to double
 // CFINITEONLY-NEXT:    [[TMP5:%.*]] = load double, ptr [[F64_ADDR]], align 8
 // CFINITEONLY-NEXT:    [[TMP6:%.*]] = load half, ptr [[F16_ADDR]], align 2
 // CFINITEONLY-NEXT:    [[TMP7:%.*]] = load <2 x float>, ptr [[V2F32_ADDR]], align 8
@@ -1176,7 +1176,7 @@ float call_variadic(float f32, double f64, _Float16 f16,
 // CLFINITEONLY-SAME: (ptr nocapture noundef readonly [[FPTR:%.*]], float noundef nofpclass(nan inf) [[F32:%.*]], double noundef nofpclass(nan inf) [[F64:%.*]], half noundef nofpclass(nan inf) [[F16:%.*]], double noundef nofpclass(nan inf) [[V2F32_COERCE:%.*]], <2 x double> noundef nofpclass(nan inf) [[V2F64:%.*]], i32 noundef [[V2F16_COERCE:%.*]], <2 x float> noundef nofpclass(nan inf) [[CF32_COERCE:%.*]], double noundef nofpclass(nan inf) [[CF64_COERCE0:%.*]], double noundef nofpclass(nan inf) [[CF64_COERCE1:%.*]], ptr nocapture noundef readonly byval({ half, half }) align 8 [[CF16:%.*]]) local_unnamed_addr #[[ATTR5]] {
 // CLFINITEONLY-NEXT:  entry:
 // CLFINITEONLY-NEXT:    [[BYVAL_TEMP:%.*]] = alloca { double, double }, align 8
-// CLFINITEONLY-NEXT:    [[CONV:%.*]] = fpext float [[F32]] to double
+// CLFINITEONLY-NEXT:    [[CONV:%.*]] = fpext nnan ninf float [[F32]] to double
 // CLFINITEONLY-NEXT:    [[CF16_REAL:%.*]] = load half, ptr [[CF16]], align 8
 // CLFINITEONLY-NEXT:    [[CF16_IMAGP:%.*]] = getelementptr inbounds nuw i8, ptr [[CF16]], i64 2
 // CLFINITEONLY-NEXT:    [[CF16_IMAG:%.*]] = load half, ptr [[CF16_IMAGP]], align 2
@@ -1229,7 +1229,7 @@ float call_variadic(float f32, double f64, _Float16 f16,
 // NONANS-NEXT:    [[TMP2:%.*]] = load ptr, ptr [[FPTR_ADDR]], align 8
 // NONANS-NEXT:    [[TMP3:%.*]] = load float, ptr [[F32_ADDR]], align 4
 // NONANS-NEXT:    [[TMP4:%.*]] = load float, ptr [[F32_ADDR]], align 4
-// NONANS-NEXT:    [[CONV:%.*]] = fpext float [[TMP4]] to double
+// NONANS-NEXT:    [[CONV:%.*]] = fpext nnan float [[TMP4]] to double
 // NONANS-NEXT:    [[TMP5:%.*]] = load double, ptr [[F64_ADDR]], align 8
 // NONANS-NEXT:    [[TMP6:%.*]] = load half, ptr [[F16_ADDR]], align 2
 // NONANS-NEXT:    [[TMP7:%.*]] = load <2 x float>, ptr [[V2F32_ADDR]], align 8
@@ -1307,7 +1307,7 @@ float call_variadic(float f32, double f64, _Float16 f16,
 // NOINFS-NEXT:    [[TMP2:%.*]] = load ptr, ptr [[FPTR_ADDR]], align 8
 // NOINFS-NEXT:    [[TMP3:%.*]] = load float, ptr [[F32_ADDR]], align 4
 // NOINFS-NEXT:    [[TMP4:%.*]] = load float, ptr [[F32_ADDR]], align 4
-// NOINFS-NEXT:    [[CONV:%.*]] = fpext float [[TMP4]] to double
+// NOINFS-NEXT:    [[CONV:%.*]] = fpext ninf float [[TMP4]] to double
 // NOINFS-NEXT:    [[TMP5:%.*]] = load double, ptr [[F64_ADDR]], align 8
 // NOINFS-NEXT:    [[TMP6:%.*]] = load half, ptr [[F16_ADDR]], align 2
 // NOINFS-NEXT:    [[TMP7:%.*]] = load <2 x float>, ptr [[V2F32_ADDR]], align 8
diff --git a/clang/test/CodeGenCUDA/amdgpu-atomic-ops.cu b/clang/test/CodeGenCUDA/amdgpu-atomic-ops.cu
index efe75be8488b3c..0e5fe8fa35cf1e 100644
--- a/clang/test/CodeGenCUDA/amdgpu-atomic-ops.cu
+++ b/clang/test/CodeGenCUDA/amdgpu-atomic-ops.cu
@@ -157,7 +157,7 @@ __global__ void ffp3(long double *p) {
 
 __device__ double ffp4(double *p, float f) {
   // CHECK-LABEL: @_Z4ffp4Pdf
-  // CHECK: fpext float {{.*}} to double
+  // CHECK: fpext contract float {{.*}} to double
   // SAFEIR: atomicrmw fsub ptr {{.*}} monotonic, align 8{{$}}
   // UNSAFEIR: atomicrmw fsub ptr {{.*}} monotonic, align 8, !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
 
diff --git a/clang/test/CodeGenHIP/printf_nonhostcall.cpp b/clang/test/CodeGenHIP/printf_nonhostcall.cpp
index 2c6d0ecac1e8a9..1982eb864e2a4b 100644
--- a/clang/test/CodeGenHIP/printf_nonhostcall.cpp
+++ b/clang/test/CodeGenHIP/printf_nonhostcall.cpp
@@ -263,7 +263,7 @@ __device__ _BitInt(128) Int128 = 45637;
 // CHECK-NEXT:    [[CONV:%.*]] = zext i16 [[TMP1]] to i32
 // CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr addrspacecast (ptr addrspace(1) @n to ptr), align 8
 // CHECK-NEXT:    [[TMP3:%.*]] = load float, ptr addrspacecast (ptr addrspace(1) @f1 to ptr), align 4
-// CHECK-NEXT:    [[CONV1:%.*]] = fpext float [[TMP3]] to double
+// CHECK-NEXT:    [[CONV1:%.*]] = fpext contract float [[TMP3]] to double
 // CHECK-NEXT:    [[TMP4:%.*]] = load double, ptr addrspacecast (ptr addrspace(1) @f2 to ptr), align 8
 // CHECK-NEXT:    [[TMP5:%.*]] = load half, ptr addrspacecast (ptr addrspace(1) @f3 to ptr), align 2
 // CHECK-NEXT:    [[TMP6:%.*]] = load bfloat, ptr addrspacecast (ptr addrspace(1) @f4 to ptr), align 2
@@ -324,7 +324,7 @@ __device__ _BitInt(128) Int128 = 45637;
 // CHECK_CONSTRAINED-NEXT:    [[CONV:%.*]] = zext i16 [[TMP1]] to i32
 // CHECK_CONSTRAINED-NEXT:    [[TMP2:%.*]] = load i64, ptr addrspacecast (ptr addrspace(1) @n to ptr), align 8
 // CHECK_CONSTRAINED-NEXT:    [[TMP3:%.*]] = load float, ptr addrspacecast (ptr addrspace(1) @f1 to ptr), align 4
-// CHECK_CONSTRAINED-NEXT:    [[CONV1:%.*]] = fpext float [[TMP3]] to double
+// CHECK_CONSTRAINED-NEXT:    [[CONV1:%.*]] = fpext contract float [[TMP3]] to double
 // CHECK_CONSTRAINED-NEXT:    [[TMP4:%.*]] = load double, ptr addrspacecast (ptr addrspace(1) @f2 to ptr), align 8
 // CHECK_CONSTRAINED-NEXT:    [[TMP5:%.*]] = load half, ptr addrspacecast (ptr addrspace(1) @f3 to ptr), align 2
 // CHECK_CONSTRAINED-NEXT:    [[TMP6:%.*]] = load bfloat, ptr addrspacecast (ptr addrspace(1) @f4 to ptr), align 2
diff --git a/clang/test/Headers/__clang_hip_math_ocml_rounded_ops.hip b/clang/test/Headers/__clang_hip_math_ocml_rounded_ops.hip
index 9e30c2c9c2ccbb..9bccb0f5716433 100644
--- a/clang/test/Headers/__clang_hip_math_ocml_rounded_ops.hip
+++ b/clang/test/Headers/__clang_hip_math_ocml_rounded_ops.hip
@@ -297,9 +297,9 @@ extern "C" __device__ double test___dmul_rz(double x, double y) {
 
 // CHECK-LABEL: @test___drcp_rd(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[CONV:%.*]] = fpext float [[X:%.*]] to double
+// CHECK-NEXT:    [[CONV:%.*]] = fpext contract float [[X:%.*]] to double
 // CHECK-NEXT:    [[CALL_I:%.*]] = tail call contract noundef double @__ocml_div_rtn_f64(double noundef 1.000000e+00, double noundef [[CONV]]) #[[ATTR3]]
-// CHECK-NEXT:    [[CONV1:%.*]] = fptrunc double [[CALL_I]] to float
+// CHECK-NEXT:    [[CONV1:%.*]] = fptrunc contract double [[CALL_I]] to float
 // CHECK-NEXT:    ret float [[CONV1]]
 //
 extern "C" __device__ float test___drcp_rd(float x) {
@@ -308,9 +308,9 @@ extern "C" __device__ float test___drcp_rd(float x) {
 
 // CHECK-LABEL: @test___drcp_rn(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[CONV:%.*]] = fpext float [[X:%.*]] to double
+// CHECK-NEXT:    [[CONV:%.*]] = fpext contract float [[X:%.*]] to double
 // CHECK-NEXT:    [[CALL_I:%.*]] = tail call contract noundef double @__ocml_div_rte_f64(double noundef 1.000000e+00, double noundef [[CONV]]) #[[ATTR3]]
-// CHECK-NEXT:    [[CONV1:%.*]] = fptrunc double [[CALL_I]] to float
+// CHECK-NEXT:    [[CONV1:%.*]] = fptrunc contract double [[CALL_I]] to float
 // CHECK-NEXT:    ret float [[CONV1]]
 //
 extern "C" __device__ float test___drcp_rn(float x) {
@@ -319,9 +319,9 @@ extern "C" __device__ float test___drcp_rn(float x) {
 
 // CHECK-LABEL: @test___drcp_ru(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[CONV:%.*]] = fpext float [[X:%.*]] to double
+// CHECK-NEXT:    [[CONV:%.*]] = fpext contract float [[X:%.*]] to double
 // CHECK-NEXT:    [[CALL_I:%.*]] = tail call contract noundef double @__ocml_div_rtp_f64(double noundef 1.000000e+00, double noundef [[CONV]]) #[[ATTR3]]
-// CHECK-NEXT:    [[CONV1:%.*]] = fptrunc double [[CALL_I]] to float
+// CHECK-NEXT:    [[CONV1:%.*]] = fptrunc contract double [[CALL_I]] to float
 // CHECK-NEXT:    ret float [[CONV1]]
 //
 extern "C" __device__ float test___drcp_ru(float x) {
@@ -330,9 +330,9 @@ extern "C" __device__ float test___drcp_ru(float x) {
 
 // CHECK-LABEL: @test___drcp_rz(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[CONV:%.*]] = fpext float [[X:%.*]] to double
+// CHECK-NEXT:    [[CONV:%.*]] = fpext contract float [[X:%.*]] to double
 // CHECK-NEXT:    [[CALL_I:%.*]] = tail call contract noundef double @__ocml_div_rtz_f64(double noundef 1.000000e+00, double noundef [[CONV]]) #[[ATTR3]]
-// CHECK-NEXT:    [[CONV1:%.*]] = fptrunc double [[CALL_I]] to float
+// CHECK-NEXT:    [[CONV1:%.*]] = fptrunc contract double [[CALL_I]] to float
 // CHECK-NEXT:    ret float [[CONV1]]
 //
 extern "C" __device__ float test___drcp_rz(float x) {
@@ -341,9 +341,9 @@ extern "C" __device__ float test___drcp_rz(float x) {
 
 // CHECK-LABEL: @test___dsqrt_rd(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[CONV:%.*]] = fpext float [[X:%.*]] to double
+// CHECK-NEXT:    [[CONV:%.*]] = fpext contract float [[X:%.*]] to double
 // CHECK-NEXT:    [[CALL_I:%.*]] = tail call contract noundef double @__ocml_sqrt_rtn_f64(double noundef [[CONV]]) #[[ATTR3]]
-// CHECK-NEXT:    [[CONV1:%.*]] = fptrunc double [[CALL_I]] to float
+// CHECK-NEXT:    [[CONV1:%.*]] = fptrunc contract double [[CALL_I]] to float
 // CHECK-NEXT:    ret float [[CONV1]]
 //
 extern "C" __device__ float test___dsqrt_rd(float x) {
@@ -352,9 +352,9 @@ extern "C" __device__ float test___dsqrt_rd(float x) {
 
 // CHECK-LABEL: @test___dsqrt_rn(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[CONV:%.*]] = fpext float [[X:%.*]] to double
+// CHECK-NEXT:    [[CONV:%.*]] = fpext contract float [[X:%.*]] to double
 // CHECK-NEXT:    [[CALL_I:%.*]] = tail call contract noundef double @__ocml_sqrt_rte_f64(double noundef [[CONV]]) #[[ATTR3]]
-// CHECK-NEXT:    [[CONV1:%.*]] = fptrunc double [[CALL_I]] to float
+// CHECK-NEXT:    [[CONV1:%.*]] = fptrunc contract double [[CALL_I]] to float
 // CHECK-NEXT:    ret float [[CONV1]]
 //
 extern "C" __device__ float test___dsqrt_rn(float x) {
@@ -363,9 +363,9 @@ extern "C" __device__ float test___dsqrt_rn(float x) {
 
 // CHECK-LABEL: @test___dsqrt_ru(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[CONV:%.*]] = fpext float [[X:%.*]] to double
+// CHECK-NEXT:    [[CONV:%.*]] = fpext contract float [[X:%.*]] to double
 // CHECK-NEXT:    [[CALL_I:%.*]] = tail call contract noundef double @__ocml_sqrt_rtp_f64(double noundef [[CONV]]) #[[ATTR3]]
-// CHECK-NEXT:    [[CONV1:%.*]] = fptrunc double [[CALL_I]] to float
+// CHECK-NEXT:    [[CONV1:%.*]] = fptrunc contract double [[CALL_I]] to float
 // CHECK-NEXT:    ret float [[CONV1]]
 //
 extern "C" __device__ float test___dsqrt_ru(float x) {
@@ -374,9 +374,9 @@ extern "C" __device__ float test___dsqrt_ru(float x) {
 
 // CHECK-LABEL: @test___dsqrt_rz(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[CONV:%.*]] = fpext float [[X:%.*]] to double
+// CHECK-NEXT:    [[CONV:%.*]] = fpext contract float [[X:%.*]] to double
 // CHECK-NEXT:    [[CALL_I:%.*]] = tail call contract noundef double @__ocml_sqrt_rtz_f64(double noundef [[CONV]]) #[[ATTR3]]
-// CHECK-NEXT:    [[CONV1:%.*]] = fptrunc double [[CALL_I]] to float
+// CHECK-NEXT:    [[CONV1:%.*]] = fptrunc contract double [[CALL_I]] to float
 // CHECK-NEXT:    ret float [[CONV1]]
 //
 extern "C" __device__ float test___dsqrt_rz(float x) {
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index ef38c5ab33b926..87bc59d0e53879 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -3730,10 +3730,10 @@ Fast-Math Flags
 
 LLVM IR floating-point operations (:ref:`fneg <i_fneg>`, :ref:`fadd <i_fadd>`,
 :ref:`fsub <i_fsub>`, :ref:`fmul <i_fmul>`, :ref:`fdiv <i_fdiv>`,
-:ref:`frem <i_frem>`, :ref:`fcmp <i_fcmp>`), and :ref:`phi <i_phi>`,
-:ref:`select <i_select>`, or :ref:`call <i_call>` instructions that return
-floating-point types may use the following flags to enable otherwise unsafe
-floating-point transformations.
+:ref:`frem <i_frem>`, :ref:`fcmp <i_fcmp>`, :ref:`fptrunc <i_fptrunc>`,
+:ref:`fpext <i_fpext>`), and :ref:`phi <i_phi>`, :ref:`select <i_select>`, or
+:ref:`call <i_call>` instructions that return floating-point types may use the
+following flags to enable otherwise unsafe floating-point transformations.
 
 ``fast``
    This flag is a shorthand for specifying all fast-math flags at once, and
@@ -11827,6 +11827,8 @@ Example:
       %Y = sext i1 true to i32             ; yields i32:-1
       %Z = sext <2 x i16> <i16 8, i16 7> to <2 x i32> ; yields <i32 8, i32 7>
 
+.. _i_fptrunc:
+
 '``fptrunc .. to``' Instruction
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -11875,6 +11877,8 @@ Example:
       %X = fptrunc double 16777217.0 to float    ; yields float:16777216.0
       %Y = fptrunc double 1.0E+300 to half       ; yields half:+infinity
 
+.. _i_fpext:
+
 '``fpext .. to``' Instruction
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
diff --git a/llvm/docs/ReleaseNotes.md b/llvm/docs/ReleaseNotes.md
index ec57b71f82ce9c..d36378c68eaa9e 100644
--- a/llvm/docs/ReleaseNotes.md
+++ b/llvm/docs/ReleaseNotes.md
@@ -100,6 +100,8 @@ Changes to the LLVM IR
 
 * Operand bundle values can now be metadata strings.
 
+* Fast math flags are now permitted on `fptrunc` and `fpext`.
+
 Changes to LLVM infrastructure
 ------------------------------
 
diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h
index 23fd8350a29b3d..19e2b3571a6488 100644
--- a/llvm/include/llvm/IR/IRBuilder.h
+++ b/llvm/include/llvm/IR/IRBuilder.h
@@ -2126,19 +2126,20 @@ class IRBuilderBase {
   }
 
   Value *CreateFPTrunc(Value *V, Type *DestTy,
-                       const Twine &Name = "") {
+                       const Twine &Name = "", MDNode *FPMathTag = nullptr) {
     if (IsFPConstrained)
       return CreateConstrainedFPCast(
           Intrinsic::experimental_constrained_fptrunc, V, DestTy, nullptr,
-          Name);
-    return CreateCast(Instruction::FPTrunc, V, DestTy, Name);
+          Name, FPMathTag);
+    return CreateCast(Instruction::FPTrunc, V, DestTy, Name, FPMathTag);
   }
 
-  Value *CreateFPExt(Value *V, Type *DestTy, const Twine &Name = "") {
+  Value *CreateFPExt(Value *V, Type *DestTy, const Twine &Name = "",
+                     MDNode *FPMathTag = nullptr) {
     if (IsFPConstrained)
       return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fpext,
-                                     V, DestTy, nullptr, Name);
-    return CreateCast(Instruction::FPExt, V, DestTy, Name);
+                                     V, DestTy, nullptr, Name, FPMathTag);
+    return CreateCast(Instruction::FPExt, V, DestTy, Name, FPMathTag);
   }
 
   Value *CreatePtrToInt(Value *V, Type *DestTy,
@@ -2186,12 +2187,15 @@ class IRBuilderBase {
   }
 
   Value *CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy,
-                    const Twine &Name = "") {
+                    const Twine &Name = "", MDNode *FPMathTag = nullptr) {
     if (V->getType() == DestTy)
       return V;
     if (Value *Folded = Folder.FoldCast(Op, V, DestTy))
       return Folded;
-    return Insert(CastInst::Create(Op, V, DestTy), Name);
+    Instruction *Cast = CastInst::Create(Op, V, DestTy);
+    if (isa<FPMathOperator>(Cast))
+      setFPAttrs(Cast, FPMathTag, FMF);
+    return Insert(Cast, Name);
   }
 
   Value *CreatePointerCast(Value *V, Type *DestTy,
@@ -2241,12 +2245,13 @@ class IRBuilderBase {
     return CreateBitCast(V, DestTy, Name);
   }
 
-  Value *CreateFPCast(Value *V, Type *DestTy, const Twine &Name = "") {
+  Value *CreateFPCast(Value *V, Type *DestTy, const Twine &Name = "",
+                      MDNode *FPMathTag = nullptr) {
     Instruction::CastOps CastOp =
         V->getType()->getScalarSizeInBits() > DestTy->getScalarSizeInBits()
             ? Instruction::FPTrunc
             : Instruction::FPExt;
-    return CreateCast(CastOp, V, DestTy, Name);
+    return CreateCast(CastOp, V, DestTy, Name, FPMathTag);
   }
 
   CallInst *CreateConstrainedFPCast(
diff --git a/llvm/include/llvm/IR/Operator.h b/llvm/include/llvm/IR/Operator.h
index 93bdcc5bb02911..4d77860be994e6 100644
--- a/llvm/include/llvm/IR/Operator.h
+++ b/llvm/include/llvm/IR/Operator.h
@@ -365,6 +365,8 @@ class FPMathOperator : public Operator {
     case Instruction::FMul:
     case Instruction::FDiv:
     case Instruction::FRem:
+    case Instruction::FPTrunc:
+    case Instruction::FPExt:
     // FIXME: To clean up and correct the semantics of fast-math-flags, FCmp
     //        should not be treated as a math op, but the other opcodes should.
     //        This would make things consistent with Select/PHI (FP value type
diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp
index dd1baabc7e9ac4..29391bdb6aa9f5 100644
--- a/llvm/lib/AsmParser/LLParser.cpp
+++ b/llvm/lib/AsmParser/LLParser.cpp
@@ -7001,8 +7001,6 @@ int LLParser::parseInstruction(Instruction *&Inst, BasicBlock *BB,
     return false;
   }
   case lltok::kw_sext:
-  case lltok::kw_fptrunc:
-  case lltok::kw_fpext:
   case lltok::kw_bitcast:
   case lltok::kw_addrspacecast:
   case lltok::kw_sitofp:
@@ -7011,6 +7009,16 @@ int LLParser::parseInstruction(Instruction *&Inst, BasicBlock *BB,
   case lltok::kw_inttoptr:
   case lltok::kw_ptrtoint:
     return parseCast(Inst, PFS, KeywordVal);
+  case lltok::kw_fptrunc:
+  case lltok::kw_fpext: {
+    FastMathFlags FMF = EatFastMathFlagsIfPresent();
+    if (parseCast(Inst, PFS, KeywordVal))
+      return true;
+    if (FMF.any())
+      Inst->setFastMathFlags(FMF);
+    return false;
+  }
+
   // Other.
   case lltok::kw_select: {
     FastMathFlags FMF = EatFastMathFlagsIfPresent();
diff --git a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
index 3e82aa7188bd67..d0aa2f99fc7e38 100644
--- a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -5164,6 +5164,11 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
           if (Record[OpNum] & (1 << bitc::TIO_NO_SIGNED_WRAP))
             cast<TruncInst>(I)->setHasNoSignedWrap(true);
         }
+        if (isa<FPMathOperator>(I)) {
+          FastMathFlags FMF = getDecodedFastMathFlags(Record[OpNum]);
+          if (FMF.any())
+            I->setFastMathFlags(FMF);
+        }
       }
 
       InstructionList.push_back(I);
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
index 6c2554ea73b7f8..bf7deb2d1e70a4 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -1856,10 +1856,8 @@ Instruction *InstCombinerImpl::visitFPTrunc(FPTruncInst &FPT) {
   Value *X;
   Instruction *Op = dyn_cast<Instruction>(FPT.getOperand(0));
   if (Op && Op->hasOneUse()) {
-    // FIXME: The FMF should propagate from the fptrunc, not the source op.
     IRBuilder<>::FastMathFlagGuard FMFG(Builder);
-    if (isa<FPMathOperator>(Op))
-      Builder.setFastMathFlags(Op->getFastMathFlags());
+    Builder.setFastMathFlags(FPT.getFastMathFlags());
 
     if (match(Op, m_FNeg(m_Value(X)))) {
       Value *InnerTrunc = Builder.CreateFPTrunc(X, Ty);
@@ -1875,6 +1873,8 @@ Instruction *InstCombinerImpl::visitFPTrunc(FPTruncInst &FPT) {
       // fptrunc (select Cond, (fpext X), Y --> select Cond, X, (fptrunc Y)
       Value *NarrowY = Builder.CreateFPTrunc(Y, Ty);
       Value *Sel = Builder.CreateSelect(Cond, X, NarrowY, "narrow.sel", Op);
+      if (auto *I = dyn_cast<Instruction>(Sel))
+        I->setFastMathFlags(Op->getFastMathFlags());
       return replaceInstUsesWith(FPT, Sel);
     }
     if (match(Op, m_Select(m_Value(Cond), m_Value(Y), m_FPExt(m_Value(X)))) &&
@@ -1882,6 +1882,8 @@ Instruction *InstCombinerImpl::visitFPTrunc(FPTruncInst &FPT) {
       // fptrunc (select Cond, Y, (fpext X) --> select Cond, (fptrunc Y), X
       Value *NarrowY = Builder.CreateFPTrunc(Y, Ty);
       Value *Sel = Builder.CreateSelect(Cond, NarrowY, X, "narrow.sel", Op);
+      if (auto *I = dyn_cast<Instruction>(Sel))
+        I->setFastMathFlags(Op->getFastMathFlags());
       return replaceInstUsesWith(FPT, Sel);
     }
   }
diff --git a/llvm/test/Assembler/fast-math-flags.ll b/llvm/test/Assembler/fast-math-flags.ll
index 116f9305af8d30..992f47b33668ca 100644
--- a/llvm/test/Assembler/fast-math-flags.ll
+++ b/llvm/test/Assembler/fast-math-flags.ll
@@ -42,6 +42,14 @@ entry:
   %f = fneg float %x
 ; CHECK:  %f_vec = fneg <3 x float> %vec
   %f_vec = fneg <3 x float> %vec
+; CHECK: %g = fpext float %x to double
+  %g = fpext float %x to double
+; CHECK: %g_vec = fpext <3 x float> %vec to <3 x double>
+  %g_vec = fpext <3 x float> %vec to <3 x double>
+; CHECK: %h = fptrunc float %x to half
+  %h = fptrunc float %x to half
+; CHECK: %h_vec = fptrunc <3 x float> %vec to <3 x half>
+  %h_vec = fptrunc <3 x float> %vec to <3 x half>
 ; CHECK:  ret float %f
   ret  float %f
 }
@@ -80,6 +88,14 @@ entry:
   %f = fneg nnan float %x
 ; CHECK:  %f_vec = fneg nnan <3 x float> %vec
   %f_vec = fneg nnan <3 x float> %vec
+; CHECK: %g = fpext nnan float %x to double
+  %g = fpext nnan float %x to double
+; CHECK: %g_vec = fpext nnan <3 x float> %vec to <3 x double>
+  %g_vec = fpext nnan <3 x float> %vec to <3 x double>
+; CHECK: %h = fptrunc nnan float %x to half
+  %h = fptrunc nnan float %x to half
+; CHECK: %h_vec = fptrunc nnan <3 x float> %vec to <3 x half>
+  %h_vec = fptrunc nnan <3 x float> %vec to <3 x half>
 ; CHECK:  ret float %f
   ret float %f
 }
@@ -93,6 +109,10 @@ entry:
   %b = fadd contract float %x, %y
 ; CHECK: %c = fmul contract float %a, %b
   %c = fmul contract float %a, %b
+; CHECK: %d = fpext contract float %x to double
+  %d = fpext contract float %x to double
+; CHECK: %e = fptrunc contract float %x to half
+  %e = fptrunc contract float %x to half
   ret float %c
 }
 
@@ -104,6 +124,10 @@ define float @reassoc(float %x, float %y) {
   %b = fmul reassoc float %x, %y
 ; CHECK: %c = call reassoc float @foo(float %b)
   %c = call reassoc float @foo(float %b)
+; CHECK: %d = fpext reassoc float %x to double
+  %d = fpext reassoc float %x to double
+; CHECK: %e = fptrunc reassoc float %x to half
+  %e = fptrunc reassoc float %x to half
   ret float %c
 }
 
@@ -148,6 +172,14 @@ entry:
   %e = frem nnan float %x, %y
 ; CHECK:  %e_vec = frem nnan ninf <3 x float> %vec, %vec
   %e_vec = frem ninf nnan <3 x float> %vec, %vec
+; CHECK: %f = fpext nnan ninf float %x to double
+  %f = fpext ninf nnan float %x to double
+; CHECK: %f_vec = fpext nnan ninf <3 x float> %vec to <3 x double>
+  %f_vec = fpext ninf nnan <3 x float> %vec to <3 x double>
+; CHECK: %g = fptrunc nnan ninf float %x to half
+  %g = fptrunc ninf nnan float %x to half
+; CHECK: %g_vec = fptrunc nnan ninf <3 x float> %vec to <3 x half>
+  %g_vec = fptrunc ninf nnan <3 x float> %vec to <3 x half>
 ; CHECK:  ret float %e
   ret float %e
 }

>From a201f7644e3648bb7a73820b99ef76603eae4b34 Mon Sep 17 00:00:00 2001
From: John Brawn <john.brawn at arm.com>
Date: Tue, 12 Nov 2024 17:09:02 +0000
Subject: [PATCH 2/3] Fix formatting.

---
 llvm/include/llvm/IR/IRBuilder.h | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h
index 19e2b3571a6488..8cdfa27ece9378 100644
--- a/llvm/include/llvm/IR/IRBuilder.h
+++ b/llvm/include/llvm/IR/IRBuilder.h
@@ -2125,12 +2125,12 @@ class IRBuilderBase {
     return CreateCast(Instruction::SIToFP, V, DestTy, Name);
   }
 
-  Value *CreateFPTrunc(Value *V, Type *DestTy,
-                       const Twine &Name = "", MDNode *FPMathTag = nullptr) {
+  Value *CreateFPTrunc(Value *V, Type *DestTy, const Twine &Name = "",
+                       MDNode *FPMathTag = nullptr) {
     if (IsFPConstrained)
       return CreateConstrainedFPCast(
-          Intrinsic::experimental_constrained_fptrunc, V, DestTy, nullptr,
-          Name, FPMathTag);
+          Intrinsic::experimental_constrained_fptrunc, V, DestTy, nullptr, Name,
+          FPMathTag);
     return CreateCast(Instruction::FPTrunc, V, DestTy, Name, FPMathTag);
   }
 

>From 8a4eccbe1257aaeabd70c672cfbf04823f5e3d86 Mon Sep 17 00:00:00 2001
From: John Brawn <john.brawn at arm.com>
Date: Wed, 13 Nov 2024 11:52:45 +0000
Subject: [PATCH 3/3] Adjust based on review comments

Add fast math flags to fptrunc and fpext in langref
Add scalable vector test
Add test in Bitcode/compatibility.ll
---
 llvm/docs/LangRef.rst                  | 12 ++++++--
 llvm/test/Assembler/fast-math-flags.ll | 18 +++++++++++
 llvm/test/Bitcode/compatibility.ll     | 42 ++++++++++++++++++++++++++
 3 files changed, 70 insertions(+), 2 deletions(-)

diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 87bc59d0e53879..6b4e4a97ac880c 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -11837,7 +11837,7 @@ Syntax:
 
 ::
 
-      <result> = fptrunc <ty> <value> to <ty2>             ; yields ty2
+      <result> = fptrunc [fast-math flags]* <ty> <value> to <ty2> ; yields ty2
 
 Overview:
 """""""""
@@ -11869,6 +11869,10 @@ the low order bits leads to an all-0 payload, this cannot be represented as a
 signaling NaN (it would represent an infinity instead), so in that case
 "Unchanged NaN propagation" is not possible.
 
+This instruction can also take any number of :ref:`fast-math
+flags <fastmath>`, which are optimization hints to enable otherwise
+unsafe floating-point optimizations.
+
 Example:
 """"""""
 
@@ -11887,7 +11891,7 @@ Syntax:
 
 ::
 
-      <result> = fpext <ty> <value> to <ty2>             ; yields ty2
+      <result> = fpext [fast-math flags]* <ty> <value> to <ty2> ; yields ty2
 
 Overview:
 """""""""
@@ -11916,6 +11920,10 @@ NaN payload is propagated from the input ("Quieting NaN propagation" or
 "Unchanged NaN propagation" cases), then it is copied to the high order bits of
 the resulting payload, and the remaining low order bits are zero.
 
+This instruction can also take any number of :ref:`fast-math
+flags <fastmath>`, which are optimization hints to enable otherwise
+unsafe floating-point optimizations.
+
 Example:
 """"""""
 
diff --git a/llvm/test/Assembler/fast-math-flags.ll b/llvm/test/Assembler/fast-math-flags.ll
index 992f47b33668ca..9c08e9da1d19ed 100644
--- a/llvm/test/Assembler/fast-math-flags.ll
+++ b/llvm/test/Assembler/fast-math-flags.ll
@@ -17,6 +17,8 @@ entry:
   %select = load i1, ptr @select
 ; CHECK:  %arr = load [3 x float], ptr @arr
   %arr    = load [3 x float], ptr @arr
+; CHECK:  %scalable = load <vscale x 3 x float>, ptr @vec
+  %scalable = load <vscale x 3 x float>, ptr @vec
 
 ; CHECK:  %a = fadd float %x, %y
   %a = fadd float %x, %y
@@ -46,10 +48,14 @@ entry:
   %g = fpext float %x to double
 ; CHECK: %g_vec = fpext <3 x float> %vec to <3 x double>
   %g_vec = fpext <3 x float> %vec to <3 x double>
+; CHECK: %g_scalable = fpext <vscale x 3 x float> %scalable to <vscale x 3 x double>
+  %g_scalable = fpext <vscale x 3 x float> %scalable to <vscale x 3 x double>
 ; CHECK: %h = fptrunc float %x to half
   %h = fptrunc float %x to half
 ; CHECK: %h_vec = fptrunc <3 x float> %vec to <3 x half>
   %h_vec = fptrunc <3 x float> %vec to <3 x half>
+; CHECK: %h_scalable = fptrunc <vscale x 3 x float> %scalable to <vscale x 3 x half>
+  %h_scalable = fptrunc <vscale x 3 x float> %scalable to <vscale x 3 x half>
 ; CHECK:  ret float %f
   ret  float %f
 }
@@ -63,6 +69,8 @@ entry:
   %select = load i1, ptr @select
 ; CHECK:  %arr = load [3 x float], ptr @arr
   %arr    = load [3 x float], ptr @arr
+; CHECK:  %scalable = load <vscale x 3 x float>, ptr @vec
+  %scalable = load <vscale x 3 x float>, ptr @vec
 
 ; CHECK:  %a = fadd nnan float %x, %y
   %a = fadd nnan float %x, %y
@@ -92,10 +100,14 @@ entry:
   %g = fpext nnan float %x to double
 ; CHECK: %g_vec = fpext nnan <3 x float> %vec to <3 x double>
   %g_vec = fpext nnan <3 x float> %vec to <3 x double>
+; CHECK: %g_scalable = fpext nnan <vscale x 3 x float> %scalable to <vscale x 3 x double>
+  %g_scalable = fpext nnan <vscale x 3 x float> %scalable to <vscale x 3 x double>
 ; CHECK: %h = fptrunc nnan float %x to half
   %h = fptrunc nnan float %x to half
 ; CHECK: %h_vec = fptrunc nnan <3 x float> %vec to <3 x half>
   %h_vec = fptrunc nnan <3 x float> %vec to <3 x half>
+; CHECK: %h_scalable = fptrunc nnan <vscale x 3 x float> %scalable to <vscale x 3 x half>
+  %h_scalable = fptrunc nnan <vscale x 3 x float> %scalable to <vscale x 3 x half>
 ; CHECK:  ret float %f
   ret float %f
 }
@@ -151,6 +163,8 @@ entry:
   %select = load i1, ptr @select
 ; CHECK:  %arr = load [3 x float], ptr @arr
   %arr    = load [3 x float], ptr @arr
+; CHECK:  %scalable = load <vscale x 3 x float>, ptr @vec
+  %scalable = load <vscale x 3 x float>, ptr @vec
 
 ; CHECK:  %a = fadd nnan ninf float %x, %y
   %a = fadd ninf nnan float %x, %y
@@ -176,10 +190,14 @@ entry:
   %f = fpext ninf nnan float %x to double
 ; CHECK: %f_vec = fpext nnan ninf <3 x float> %vec to <3 x double>
   %f_vec = fpext ninf nnan <3 x float> %vec to <3 x double>
+; CHECK: %f_scalable = fpext nnan ninf <vscale x 3 x float> %scalable to <vscale x 3 x double>
+  %f_scalable = fpext ninf nnan <vscale x 3 x float> %scalable to <vscale x 3 x double>
 ; CHECK: %g = fptrunc nnan ninf float %x to half
   %g = fptrunc ninf nnan float %x to half
 ; CHECK: %g_vec = fptrunc nnan ninf <3 x float> %vec to <3 x half>
   %g_vec = fptrunc ninf nnan <3 x float> %vec to <3 x half>
+; CHECK: %g_scalable = fptrunc nnan ninf <vscale x 3 x float> %scalable to <vscale x 3 x half>
+  %g_scalable = fptrunc ninf nnan <vscale x 3 x float> %scalable to <vscale x 3 x half>
 ; CHECK:  ret float %e
   ret float %e
 }
diff --git a/llvm/test/Bitcode/compatibility.ll b/llvm/test/Bitcode/compatibility.ll
index a849789da536ac..a28156cdaa2797 100644
--- a/llvm/test/Bitcode/compatibility.ll
+++ b/llvm/test/Bitcode/compatibility.ll
@@ -1142,6 +1142,48 @@ define void @fastMathFlagsForStructCalls() {
   ret void
 }
 
+; CHECK-LABEL: fastmathflags_fpext(
+define void @fastmathflags_fpext(float %op1) {
+  %f.nnan = fpext nnan float %op1 to double
+  ; CHECK: %f.nnan = fpext nnan float %op1 to double
+  %f.ninf = fpext ninf float %op1 to double
+  ; CHECK: %f.ninf = fpext ninf float %op1 to double
+  %f.nsz = fpext nsz float %op1 to double
+  ; CHECK: %f.nsz = fpext nsz float %op1 to double
+  %f.arcp = fpext arcp float %op1 to double
+  ; CHECK: %f.arcp = fpext arcp float %op1 to double
+  %f.contract = fpext contract float %op1 to double
+  ; CHECK: %f.contract = fpext contract float %op1 to double
+  %f.afn = fpext afn float %op1 to double
+  ; CHECK: %f.afn = fpext afn float %op1 to double
+  %f.reassoc = fpext reassoc float %op1 to double
+  ; CHECK: %f.reassoc = fpext reassoc float %op1 to double
+  %f.fast = fpext fast float %op1 to double
+  ; CHECK: %f.fast = fpext fast float %op1 to double
+  ret void
+}
+
+; CHECK-LABEL: fastmathflags_fptrunc(
+define void @fastmathflags_fptrunc(float %op1) {
+  %f.nnan = fptrunc nnan float %op1 to half
+  ; CHECK: %f.nnan = fptrunc nnan float %op1 to half
+  %f.ninf = fptrunc ninf float %op1 to half
+  ; CHECK: %f.ninf = fptrunc ninf float %op1 to half
+  %f.nsz = fptrunc nsz float %op1 to half
+  ; CHECK: %f.nsz = fptrunc nsz float %op1 to half
+  %f.arcp = fptrunc arcp float %op1 to half
+  ; CHECK: %f.arcp = fptrunc arcp float %op1 to half
+  %f.contract = fptrunc contract float %op1 to half
+  ; CHECK: %f.contract = fptrunc contract float %op1 to half
+  %f.afn = fptrunc afn float %op1 to half
+  ; CHECK: %f.afn = fptrunc afn float %op1 to half
+  %f.reassoc = fptrunc reassoc float %op1 to half
+  ; CHECK: %f.reassoc = fptrunc reassoc float %op1 to half
+  %f.fast = fptrunc fast float %op1 to half
+  ; CHECK: %f.fast = fptrunc fast float %op1 to half
+  ret void
+}
+
 ;; Type System
 %opaquety = type opaque
 define void @typesystem() {



More information about the cfe-commits mailing list