[llvm] WIP: [AMDGPU] Remove `UnsafeFPMath` (PR #151079)

via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 29 02:22:17 PDT 2025


https://github.com/paperchalice updated https://github.com/llvm/llvm-project/pull/151079

>From 29001f0f4dd552ef154d66e29c04a425da5abdf5 Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Tue, 29 Jul 2025 12:44:26 +0800
Subject: [PATCH 1/4] Remove `UnsafeFPMath` in `AMDGPUCodeGenPrepare`

---
 .../Target/AMDGPU/AMDGPUCodeGenPrepare.cpp    | 15 +---
 llvm/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.ll   | 16 ++--
 llvm/test/CodeGen/AMDGPU/rsq.f32.ll           | 90 +++++++++----------
 3 files changed, 57 insertions(+), 64 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index 5f1983791cfae..7e2c67d22cf6e 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -89,10 +89,6 @@ static cl::opt<bool> DisableFDivExpand(
   cl::ReallyHidden,
   cl::init(false));
 
-static bool hasUnsafeFPMath(const Function &F) {
-  return F.getFnAttribute("unsafe-fp-math").getValueAsBool();
-}
-
 class AMDGPUCodeGenPrepareImpl
     : public InstVisitor<AMDGPUCodeGenPrepareImpl, bool> {
 public:
@@ -104,7 +100,6 @@ class AMDGPUCodeGenPrepareImpl
   const DominatorTree *DT;
   const UniformityInfo &UA;
   const DataLayout &DL;
-  const bool HasUnsafeFPMath;
   const bool HasFP32DenormalFlush;
   bool FlowChanged = false;
   mutable Function *SqrtF32 = nullptr;
@@ -117,7 +112,6 @@ class AMDGPUCodeGenPrepareImpl
                            const DominatorTree *DT, const UniformityInfo &UA)
       : F(F), ST(TM.getSubtarget<GCNSubtarget>(F)), TM(TM), TLI(TLI), AC(AC),
         DT(DT), UA(UA), DL(F.getDataLayout()),
-        HasUnsafeFPMath(hasUnsafeFPMath(F)),
         HasFP32DenormalFlush(SIModeRegisterDefaults(F, ST).FP32Denormals ==
                              DenormalMode::getPreserveSign()) {}
 
@@ -637,8 +631,7 @@ bool AMDGPUCodeGenPrepareImpl::canOptimizeWithRsq(const FPMathOperator *SqrtOp,
     return false;
 
   // v_rsq_f32 gives 1ulp
-  return SqrtFMF.approxFunc() || HasUnsafeFPMath ||
-         SqrtOp->getFPAccuracy() >= 1.0f;
+  return SqrtFMF.approxFunc() || SqrtOp->getFPAccuracy() >= 1.0f;
 }
 
 Value *AMDGPUCodeGenPrepareImpl::optimizeWithRsq(
@@ -664,7 +657,7 @@ Value *AMDGPUCodeGenPrepareImpl::optimizeWithRsq(
     IRBuilder<>::FastMathFlagGuard Guard(Builder);
     Builder.setFastMathFlags(DivFMF | SqrtFMF);
 
-    if ((DivFMF.approxFunc() && SqrtFMF.approxFunc()) || HasUnsafeFPMath ||
+    if ((DivFMF.approxFunc() && SqrtFMF.approxFunc()) ||
         canIgnoreDenormalInput(Den, CtxI)) {
       Value *Result = Builder.CreateUnaryIntrinsic(Intrinsic::amdgcn_rsq, Den);
       // -1.0 / sqrt(x) -> fneg(rsq(x))
@@ -852,7 +845,7 @@ bool AMDGPUCodeGenPrepareImpl::visitFDiv(BinaryOperator &FDiv) {
   // expansion of afn to codegen. The current interpretation is so aggressive we
   // don't need any pre-consideration here when we have better information. A
   // more conservative interpretation could use handling here.
-  const bool AllowInaccurateRcp = HasUnsafeFPMath || DivFMF.approxFunc();
+  const bool AllowInaccurateRcp = DivFMF.approxFunc();
   if (!RsqOp && AllowInaccurateRcp)
     return false;
 
@@ -2026,7 +2019,7 @@ bool AMDGPUCodeGenPrepareImpl::visitSqrt(IntrinsicInst &Sqrt) {
 
   // We're trying to handle the fast-but-not-that-fast case only. The lowering
   // of fast llvm.sqrt will give the raw instruction anyway.
-  if (SqrtFMF.approxFunc() || HasUnsafeFPMath)
+  if (SqrtFMF.approxFunc())
     return false;
 
   const float ReqdAccuracy = FPOp->getFPAccuracy();
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.ll
index 425a8530afa97..477f0a610feec 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.ll
@@ -51,7 +51,7 @@ define amdgpu_kernel void @safe_no_fp32_denormals_rcp_f32(ptr addrspace(1) %out,
 ; SI-NOT: [[RESULT]]
 ; SI: buffer_store_dword [[RESULT]]
 define amdgpu_kernel void @safe_f32_denormals_rcp_pat_f32(ptr addrspace(1) %out, float %src) #4 {
-  %rcp = fdiv float 1.0, %src, !fpmath !0
+  %rcp = fdiv afn float 1.0, %src, !fpmath !0
   store float %rcp, ptr addrspace(1) %out, align 4
   ret void
 }
@@ -105,8 +105,8 @@ define amdgpu_kernel void @safe_rsq_rcp_pat_amdgcn_sqrt_f32_nocontract(ptr addrs
 ; SI: v_sqrt_f32_e32
 ; SI: v_rcp_f32_e32
 define amdgpu_kernel void @unsafe_rsq_rcp_pat_f32(ptr addrspace(1) %out, float %src) #2 {
-  %sqrt = call float @llvm.sqrt.f32(float %src)
-  %rcp = call float @llvm.amdgcn.rcp.f32(float %sqrt)
+  %sqrt = call afn float @llvm.sqrt.f32(float %src)
+  %rcp = call afn float @llvm.amdgcn.rcp.f32(float %sqrt)
   store float %rcp, ptr addrspace(1) %out, align 4
   ret void
 }
@@ -148,7 +148,7 @@ define amdgpu_kernel void @rcp_pat_f64(ptr addrspace(1) %out, double %src) #1 {
 ; SI: v_fma_f64
 ; SI: v_fma_f64
 define amdgpu_kernel void @unsafe_rcp_pat_f64(ptr addrspace(1) %out, double %src) #2 {
-  %rcp = fdiv double 1.0, %src
+  %rcp = fdiv afn double 1.0, %src
   store double %rcp, ptr addrspace(1) %out, align 8
   ret void
 }
@@ -214,9 +214,9 @@ define amdgpu_kernel void @unsafe_amdgcn_sqrt_rsq_rcp_pat_f64(ptr addrspace(1) %
 }
 
 attributes #0 = { nounwind readnone }
-attributes #1 = { nounwind "unsafe-fp-math"="false" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
-attributes #2 = { nounwind "unsafe-fp-math"="true" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
-attributes #3 = { nounwind "unsafe-fp-math"="false" "denormal-fp-math-f32"="ieee,ieee" }
-attributes #4 = { nounwind "unsafe-fp-math"="true" "denormal-fp-math-f32"="ieee,ieee" }
+attributes #1 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
+attributes #2 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
+attributes #3 = { nounwind "denormal-fp-math-f32"="ieee,ieee" }
+attributes #4 = { nounwind "denormal-fp-math-f32"="ieee,ieee" }
 
 !0 = !{float 2.500000e+00}
diff --git a/llvm/test/CodeGen/AMDGPU/rsq.f32.ll b/llvm/test/CodeGen/AMDGPU/rsq.f32.ll
index f7e0388561104..f967e951b27a4 100644
--- a/llvm/test/CodeGen/AMDGPU/rsq.f32.ll
+++ b/llvm/test/CodeGen/AMDGPU/rsq.f32.ll
@@ -1,10 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti -denormal-fp-math-f32=preserve-sign -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=GCN-DAZ,GCN-DAZ-UNSAFE,SI-DAZ-UNSAFE %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti -denormal-fp-math-f32=ieee -enable-unsafe-fp-math < %s          | FileCheck -check-prefixes=GCN-IEEE,GCN-IEEE-UNSAFE,SI-IEEE-UNSAFE %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti -denormal-fp-math-f32=preserve-sign < %s | FileCheck -check-prefixes=GCN-DAZ,GCN-DAZ-UNSAFE,SI-DAZ-UNSAFE %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti -denormal-fp-math-f32=ieee < %s          | FileCheck -check-prefixes=GCN-IEEE,GCN-IEEE-UNSAFE,SI-IEEE-UNSAFE %s
 
 
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=hawaii -denormal-fp-math-f32=preserve-sign -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=GCN-DAZ,GCN-DAZ-UNSAFE,CI-DAZ-UNSAFE %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=hawaii -denormal-fp-math-f32=ieee -enable-unsafe-fp-math < %s          | FileCheck -check-prefixes=GCN-IEEE,GCN-IEEE-UNSAFE,CI-IEEE-UNSAFE %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=hawaii -denormal-fp-math-f32=preserve-sign < %s | FileCheck -check-prefixes=GCN-DAZ,GCN-DAZ-UNSAFE,CI-DAZ-UNSAFE %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=hawaii -denormal-fp-math-f32=ieee < %s          | FileCheck -check-prefixes=GCN-IEEE,GCN-IEEE-UNSAFE,CI-IEEE-UNSAFE %s
 
 
 declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
@@ -65,8 +65,8 @@ define amdgpu_kernel void @rsq_f32(ptr addrspace(1) noalias %out, ptr addrspace(
 ; GCN-UNSAFE-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; GCN-UNSAFE-NEXT:    s_endpgm
   %val = load float, ptr addrspace(1) %in, align 4
-  %sqrt = call contract float @llvm.sqrt.f32(float %val) nounwind readnone
-  %div = fdiv contract float 1.0, %sqrt, !fpmath !0
+  %sqrt = call afn contract float @llvm.sqrt.f32(float %val) nounwind readnone
+  %div = fdiv afn contract float 1.0, %sqrt, !fpmath !0
   store float %div, ptr addrspace(1) %out, align 4
   ret void
 }
@@ -103,8 +103,8 @@ define amdgpu_kernel void @rsq_f32_sgpr(ptr addrspace(1) noalias %out, float %va
 ; GCN-UNSAFE-NEXT:    s_mov_b32 s2, -1
 ; GCN-UNSAFE-NEXT:    buffer_store_dword v0, off, s[0:3], 0
 ; GCN-UNSAFE-NEXT:    s_endpgm
-  %sqrt = call contract float @llvm.sqrt.f32(float %val) nounwind readnone
-  %div = fdiv contract float 1.0, %sqrt, !fpmath !0
+  %sqrt = call afn contract float @llvm.sqrt.f32(float %val) nounwind readnone
+  %div = fdiv afn contract float 1.0, %sqrt, !fpmath !0
   store float %div, ptr addrspace(1) %out, align 4
   ret void
 }
@@ -196,7 +196,7 @@ define amdgpu_kernel void @rsqrt_fmul(ptr addrspace(1) %out, ptr addrspace(1) %i
 
   %x = call contract float @llvm.sqrt.f32(float %a)
   %y = fmul contract float %x, %b
-  %z = fdiv arcp contract float %c, %y
+  %z = fdiv arcp afn contract float %c, %y
   store float %z, ptr addrspace(1) %out.gep
   ret void
 }
@@ -258,8 +258,8 @@ define amdgpu_kernel void @neg_rsq_f32(ptr addrspace(1) noalias %out, ptr addrsp
 ; GCN-UNSAFE-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; GCN-UNSAFE-NEXT:    s_endpgm
   %val = load float, ptr addrspace(1) %in, align 4
-  %sqrt = call contract float @llvm.sqrt.f32(float %val)
-  %div = fdiv contract float -1.0, %sqrt, !fpmath !0
+  %sqrt = call afn contract float @llvm.sqrt.f32(float %val)
+  %div = fdiv afn contract float -1.0, %sqrt, !fpmath !0
   store float %div, ptr addrspace(1) %out, align 4
   ret void
 }
@@ -322,8 +322,8 @@ define amdgpu_kernel void @neg_rsq_neg_f32(ptr addrspace(1) noalias %out, ptr ad
 ; GCN-UNSAFE-NEXT:    s_endpgm
   %val = load float, ptr addrspace(1) %in, align 4
   %val.fneg = fneg float %val
-  %sqrt = call contract float @llvm.sqrt.f32(float %val.fneg)
-  %div = fdiv contract float -1.0, %sqrt, !fpmath !0
+  %sqrt = call afn contract float @llvm.sqrt.f32(float %val.fneg)
+  %div = fdiv afn contract float -1.0, %sqrt, !fpmath !0
   store float %div, ptr addrspace(1) %out, align 4
   ret void
 }
@@ -343,8 +343,8 @@ define float @v_neg_rsq_neg_f32(float %val) {
 ; GCN-IEEE-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
 ; GCN-IEEE-NEXT:    s_setpc_b64 s[30:31]
   %val.fneg = fneg float %val
-  %sqrt = call contract float @llvm.sqrt.f32(float %val.fneg)
-  %div = fdiv contract float -1.0, %sqrt, !fpmath !0
+  %sqrt = call afn contract float @llvm.sqrt.f32(float %val.fneg)
+  %div = fdiv afn contract float -1.0, %sqrt, !fpmath !0
   ret float %div
 }
 
@@ -367,8 +367,8 @@ define <2 x float> @v_neg_rsq_neg_v2f32(<2 x float> %val) {
 ; GCN-IEEE-NEXT:    v_xor_b32_e32 v1, 0x80000000, v1
 ; GCN-IEEE-NEXT:    s_setpc_b64 s[30:31]
   %val.fneg = fneg <2 x float> %val
-  %sqrt = call contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %val.fneg)
-  %div = fdiv contract <2 x float> <float -1.0, float -1.0>, %sqrt, !fpmath !0
+  %sqrt = call afn contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %val.fneg)
+  %div = fdiv afn contract <2 x float> <float -1.0, float -1.0>, %sqrt, !fpmath !0
   ret <2 x float> %div
 }
 
@@ -387,8 +387,8 @@ define float @v_neg_rsq_neg_f32_foldable_user(float %val0, float %val1) {
 ; GCN-IEEE-NEXT:    v_mul_f32_e64 v0, -v0, v1
 ; GCN-IEEE-NEXT:    s_setpc_b64 s[30:31]
   %val0.neg = fneg float %val0
-  %sqrt = call contract float @llvm.sqrt.f32(float %val0.neg)
-  %div = fdiv contract float -1.0, %sqrt, !fpmath !0
+  %sqrt = call afn contract float @llvm.sqrt.f32(float %val0.neg)
+  %div = fdiv afn contract float -1.0, %sqrt, !fpmath !0
   %user = fmul contract float %div, %val1
   ret float %user
 }
@@ -412,8 +412,8 @@ define <2 x float> @v_neg_rsq_neg_v2f32_foldable_user(<2 x float> %val0, <2 x fl
 ; GCN-IEEE-NEXT:    v_mul_f32_e64 v1, -v1, v3
 ; GCN-IEEE-NEXT:    s_setpc_b64 s[30:31]
   %val0.fneg = fneg <2 x float> %val0
-  %sqrt = call contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %val0.fneg)
-  %div = fdiv contract <2 x float> <float -1.0, float -1.0>, %sqrt, !fpmath !0
+  %sqrt = call afn contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %val0.fneg)
+  %div = fdiv afn contract <2 x float> <float -1.0, float -1.0>, %sqrt, !fpmath !0
   %user = fmul contract <2 x float> %div, %val1
   ret <2 x float> %user
 }
@@ -432,8 +432,8 @@ define float @v_neg_rsq_f32(float %val) {
 ; GCN-IEEE-NEXT:    v_rsq_f32_e32 v0, v0
 ; GCN-IEEE-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
 ; GCN-IEEE-NEXT:    s_setpc_b64 s[30:31]
-  %sqrt = call contract float @llvm.sqrt.f32(float %val)
-  %div = fdiv contract float -1.0, %sqrt, !fpmath !0
+  %sqrt = call afn contract float @llvm.sqrt.f32(float %val)
+  %div = fdiv afn contract float -1.0, %sqrt, !fpmath !0
   ret float %div
 }
 
@@ -455,8 +455,8 @@ define <2 x float> @v_neg_rsq_v2f32(<2 x float> %val) {
 ; GCN-IEEE-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
 ; GCN-IEEE-NEXT:    v_xor_b32_e32 v1, 0x80000000, v1
 ; GCN-IEEE-NEXT:    s_setpc_b64 s[30:31]
-  %sqrt = call contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %val)
-  %div = fdiv contract <2 x float> <float -1.0, float -1.0>, %sqrt, !fpmath !0
+  %sqrt = call afn contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %val)
+  %div = fdiv afn contract <2 x float> <float -1.0, float -1.0>, %sqrt, !fpmath !0
   ret <2 x float> %div
 }
 
@@ -474,8 +474,8 @@ define float @v_neg_rsq_f32_foldable_user(float %val0, float %val1) {
 ; GCN-IEEE-NEXT:    v_rsq_f32_e32 v0, v0
 ; GCN-IEEE-NEXT:    v_mul_f32_e64 v0, -v0, v1
 ; GCN-IEEE-NEXT:    s_setpc_b64 s[30:31]
-  %sqrt = call contract float @llvm.sqrt.f32(float %val0)
-  %div = fdiv contract float -1.0, %sqrt, !fpmath !0
+  %sqrt = call afn contract float @llvm.sqrt.f32(float %val0)
+  %div = fdiv afn contract float -1.0, %sqrt, !fpmath !0
   %user = fmul contract float %div, %val1
   ret float %user
 }
@@ -643,8 +643,8 @@ define <2 x float> @v_neg_rsq_v2f32_foldable_user(<2 x float> %val0, <2 x float>
 ; CI-IEEE-SAFE-NEXT:    v_mul_f32_e32 v0, v0, v2
 ; CI-IEEE-SAFE-NEXT:    v_mul_f32_e32 v1, v1, v3
 ; CI-IEEE-SAFE-NEXT:    s_setpc_b64 s[30:31]
-  %sqrt = call contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %val0)
-  %div = fdiv contract <2 x float> <float -1.0, float -1.0>, %sqrt, !fpmath !0
+  %sqrt = call afn contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %val0)
+  %div = fdiv afn contract <2 x float> <float -1.0, float -1.0>, %sqrt, !fpmath !0
   %user = fmul contract <2 x float> %div, %val1
   ret <2 x float> %user
 }
@@ -672,8 +672,8 @@ define float @v_rsq_f32(float %val) {
 ; GCN-IEEE-SAFE-NEXT:    v_cndmask_b32_e64 v1, 0, 12, vcc
 ; GCN-IEEE-SAFE-NEXT:    v_ldexp_f32_e32 v0, v0, v1
 ; GCN-IEEE-SAFE-NEXT:    s_setpc_b64 s[30:31]
-  %sqrt = call contract float @llvm.sqrt.f32(float %val), !fpmath !1
-  %div = fdiv contract float 1.0, %sqrt, !fpmath !1
+  %sqrt = call afn contract float @llvm.sqrt.f32(float %val), !fpmath !1
+  %div = fdiv afn contract float 1.0, %sqrt, !fpmath !1
   ret float %div
 }
 
@@ -756,9 +756,9 @@ define { float, float } @v_rsq_f32_multi_use(float %val) {
 ; CI-IEEE-SAFE-NEXT:    v_sub_i32_e32 v2, vcc, 0, v2
 ; CI-IEEE-SAFE-NEXT:    v_ldexp_f32_e32 v1, v1, v2
 ; CI-IEEE-SAFE-NEXT:    s_setpc_b64 s[30:31]
-  %sqrt = call contract float @llvm.sqrt.f32(float %val), !fpmath !1
+  %sqrt = call afn contract float @llvm.sqrt.f32(float %val), !fpmath !1
   %insert.0 = insertvalue { float, float } poison, float %sqrt, 0
-  %div = fdiv arcp contract float 1.0, %sqrt, !fpmath !1
+  %div = fdiv arcp afn contract float 1.0, %sqrt, !fpmath !1
   %insert.1 = insertvalue { float, float } %insert.0, float %div, 1
   ret { float, float } %insert.1
 }
@@ -838,8 +838,8 @@ define float @v_rsq_f32_missing_contract0(float %val) {
 ; CI-IEEE-SAFE-NEXT:    v_sub_i32_e32 v0, vcc, 0, v0
 ; CI-IEEE-SAFE-NEXT:    v_ldexp_f32_e32 v0, v1, v0
 ; CI-IEEE-SAFE-NEXT:    s_setpc_b64 s[30:31]
-  %sqrt = call float @llvm.sqrt.f32(float %val), !fpmath !1
-  %div = fdiv arcp contract float 1.0, %sqrt, !fpmath !1
+  %sqrt = call afn float @llvm.sqrt.f32(float %val), !fpmath !1
+  %div = fdiv arcp afn contract float 1.0, %sqrt, !fpmath !1
   ret float %div
 }
 
@@ -855,8 +855,8 @@ define float @v_rsq_f32_missing_contract1(float %val) {
 ; GCN-IEEE-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-IEEE-NEXT:    v_rsq_f32_e32 v0, v0
 ; GCN-IEEE-NEXT:    s_setpc_b64 s[30:31]
-  %sqrt = call contract float @llvm.sqrt.f32(float %val), !fpmath !1
-  %div = fdiv arcp float 1.0, %sqrt, !fpmath !1
+  %sqrt = call afn contract float @llvm.sqrt.f32(float %val), !fpmath !1
+  %div = fdiv arcp afn float 1.0, %sqrt, !fpmath !1
   ret float %div
 }
 
@@ -876,8 +876,8 @@ define float @v_rsq_f32_contractable_user(float %val0, float %val1) {
 ; GCN-IEEE-NEXT:    v_rsq_f32_e32 v0, v0
 ; GCN-IEEE-NEXT:    v_add_f32_e32 v0, v0, v1
 ; GCN-IEEE-NEXT:    s_setpc_b64 s[30:31]
-  %sqrt = call contract float @llvm.sqrt.f32(float %val0), !fpmath !1
-  %div = fdiv contract float 1.0, %sqrt, !fpmath !1
+  %sqrt = call afn contract float @llvm.sqrt.f32(float %val0), !fpmath !1
+  %div = fdiv afn contract float 1.0, %sqrt, !fpmath !1
   %add = fadd contract float %div, %val1
   ret float %add
 }
@@ -897,8 +897,8 @@ define float @v_rsq_f32_contractable_user_missing_contract0(float %val0, float %
 ; GCN-IEEE-NEXT:    v_rsq_f32_e32 v0, v0
 ; GCN-IEEE-NEXT:    v_add_f32_e32 v0, v0, v1
 ; GCN-IEEE-NEXT:    s_setpc_b64 s[30:31]
-  %sqrt = call contract float @llvm.sqrt.f32(float %val0), !fpmath !1
-  %div = fdiv contract float 1.0, %sqrt, !fpmath !1
+  %sqrt = call afn contract float @llvm.sqrt.f32(float %val0), !fpmath !1
+  %div = fdiv afn contract float 1.0, %sqrt, !fpmath !1
   %add = fadd contract float %div, %val1
   ret float %add
 }
@@ -918,8 +918,8 @@ define float @v_rsq_f32_contractable_user_missing_contract1(float %val0, float %
 ; GCN-IEEE-NEXT:    v_rsq_f32_e32 v0, v0
 ; GCN-IEEE-NEXT:    v_add_f32_e32 v0, v0, v1
 ; GCN-IEEE-NEXT:    s_setpc_b64 s[30:31]
-  %sqrt = call contract float @llvm.sqrt.f32(float %val0), !fpmath !1
-  %div = fdiv contract float 1.0, %sqrt, !fpmath !1
+  %sqrt = call afn contract float @llvm.sqrt.f32(float %val0), !fpmath !1
+  %div = fdiv afn contract float 1.0, %sqrt, !fpmath !1
   %add = fadd float %div, %val1
   ret float %add
 }
@@ -953,8 +953,8 @@ define float @v_rsq_f32_known_never_posdenormal(float nofpclass(psub) %val) {
 ; GCN-IEEE-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-IEEE-NEXT:    v_rsq_f32_e32 v0, v0
 ; GCN-IEEE-NEXT:    s_setpc_b64 s[30:31]
-  %sqrt = call contract float @llvm.sqrt.f32(float %val), !fpmath !1
-  %div = fdiv contract float 1.0, %sqrt, !fpmath !1
+  %sqrt = call afn contract float @llvm.sqrt.f32(float %val), !fpmath !1
+  %div = fdiv afn contract float 1.0, %sqrt, !fpmath !1
   ret float %div
 }
 

>From f8bb469a294c7de4e989b0039c7f0f6f19851a98 Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Tue, 29 Jul 2025 14:44:15 +0800
Subject: [PATCH 2/4] Remove `UnsafeFPMath` in `AMDGPUInstructions.td`

---
 llvm/lib/Target/AMDGPU/AMDGPUInstructions.td | 1 -
 1 file changed, 1 deletion(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td b/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td
index 7a50923ffedc6..511fc6967da31 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td
@@ -94,7 +94,6 @@ def NoFP32Denormals : Predicate<"MF->getInfo<SIMachineFunctionInfo>()->getMode()
 def NoFP64Denormals : Predicate<"MF->getInfo<SIMachineFunctionInfo>()->getMode().FP64FP16Denormals == DenormalMode::getPreserveSign()">;
 def IEEEModeEnabled : Predicate<"MF->getInfo<SIMachineFunctionInfo>()->getMode().IEEE">;
 def IEEEModeDisabled : Predicate<"!MF->getInfo<SIMachineFunctionInfo>()->getMode().IEEE">;
-def UnsafeFPMath : Predicate<"TM.Options.UnsafeFPMath">;
 }
 
 def FMA : Predicate<"Subtarget->hasFMA()">;

>From 226f3c4fa548b78c6cccd5cf594334976755e9b7 Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Tue, 29 Jul 2025 16:15:16 +0800
Subject: [PATCH 3/4] Remove `UnsafeFPMath` in `*ISelLowering.cpp`

---
 llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp |    6 +-
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     |   14 +-
 llvm/test/CodeGen/AMDGPU/fdiv.f16.ll          |    4 +-
 llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll       | 1045 +++--------------
 llvm/test/CodeGen/AMDGPU/fptrunc.ll           |  633 ++++------
 llvm/test/CodeGen/AMDGPU/fsqrt.f32.ll         |    5 +-
 llvm/test/CodeGen/AMDGPU/llvm.exp.ll          |    4 +-
 llvm/test/CodeGen/AMDGPU/llvm.exp10.ll        |    4 +-
 llvm/test/CodeGen/AMDGPU/llvm.log.ll          |    4 +-
 llvm/test/CodeGen/AMDGPU/llvm.log10.ll        |    4 +-
 llvm/test/CodeGen/AMDGPU/rcp-pattern.ll       |    8 +-
 llvm/test/CodeGen/AMDGPU/rsq.f64.ll           |   13 +-
 12 files changed, 368 insertions(+), 1376 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 61189337e5233..31c4f62d24dfe 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -2634,7 +2634,7 @@ bool AMDGPUTargetLowering::allowApproxFunc(const SelectionDAG &DAG,
   if (Flags.hasApproximateFuncs())
     return true;
   auto &Options = DAG.getTarget().Options;
-  return Options.UnsafeFPMath || Options.ApproxFuncFPMath;
+  return Options.ApproxFuncFPMath;
 }
 
 bool AMDGPUTargetLowering::needsDenormHandlingF32(const SelectionDAG &DAG,
@@ -2757,7 +2757,7 @@ SDValue AMDGPUTargetLowering::LowerFLOGCommon(SDValue Op,
 
   const auto &Options = getTargetMachine().Options;
   if (VT == MVT::f16 || Flags.hasApproximateFuncs() ||
-      Options.ApproxFuncFPMath || Options.UnsafeFPMath) {
+      Options.ApproxFuncFPMath) {
 
     if (VT == MVT::f16 && !Subtarget->has16BitInsts()) {
       // Log and multiply in f32 is good enough for f16.
@@ -3585,7 +3585,7 @@ SDValue AMDGPUTargetLowering::LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) con
   if (N0.getValueType() == MVT::f32)
     return DAG.getNode(AMDGPUISD::FP_TO_FP16, DL, Op.getValueType(), N0);
 
-  if (getTargetMachine().Options.UnsafeFPMath) {
+  if (Op->getFlags().hasApproximateFuncs()) {
     // There is a generic expand for FP_TO_FP16 with unsafe fast math.
     return SDValue();
   }
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 9017f4f26f835..1ae4a927ad9fe 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -7148,7 +7148,7 @@ SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
       SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16);
       return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc);
     }
-    if (getTargetMachine().Options.UnsafeFPMath) {
+    if (Op->getFlags().hasApproximateFuncs()) {
       SDValue Flags = Op.getOperand(1);
       SDValue Src32 = DAG.getNode(ISD::FP_ROUND, DL, MVT::f32, Src, Flags);
       return DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, Src32, Flags);
@@ -11243,8 +11243,7 @@ SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op,
   EVT VT = Op.getValueType();
   const SDNodeFlags Flags = Op->getFlags();
 
-  bool AllowInaccurateRcp =
-      Flags.hasApproximateFuncs() || DAG.getTarget().Options.UnsafeFPMath;
+  bool AllowInaccurateRcp = Flags.hasApproximateFuncs();
 
   if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) {
     // Without !fpmath accuracy information, we can't do more because we don't
@@ -11263,7 +11262,7 @@ SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op,
 
       // 1.0 / sqrt(x) -> rsq(x)
 
-      // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP
+      // XXX - Is afn sufficient to do this for f64? The maximum ULP
       // error seems really high at 2^29 ULP.
       // 1.0 / x -> rcp(x)
       return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
@@ -11297,8 +11296,7 @@ SDValue SITargetLowering::lowerFastUnsafeFDIV64(SDValue Op,
   EVT VT = Op.getValueType();
   const SDNodeFlags Flags = Op->getFlags();
 
-  bool AllowInaccurateDiv =
-      Flags.hasApproximateFuncs() || DAG.getTarget().Options.UnsafeFPMath;
+  bool AllowInaccurateDiv = Flags.hasApproximateFuncs();
   if (!AllowInaccurateDiv)
     return SDValue();
 
@@ -14550,7 +14548,7 @@ unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG,
     return ISD::FMAD;
 
   const TargetOptions &Options = DAG.getTarget().Options;
-  if ((Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath ||
+  if ((Options.AllowFPOpFusion == FPOpFusion::Fast ||
        (N0->getFlags().hasAllowContract() &&
         N1->getFlags().hasAllowContract())) &&
       isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) {
@@ -15675,7 +15673,7 @@ SDValue SITargetLowering::performFMACombine(SDNode *N,
   // regardless of the denorm mode setting. Therefore,
   // unsafe-fp-math/fp-contract is sufficient to allow generating fdot2.
   const TargetOptions &Options = DAG.getTarget().Options;
-  if (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath ||
+  if (Options.AllowFPOpFusion == FPOpFusion::Fast ||
       (N->getFlags().hasAllowContract() &&
        FMA->getFlags().hasAllowContract())) {
     Op1 = Op1.getOperand(0);
diff --git a/llvm/test/CodeGen/AMDGPU/fdiv.f16.ll b/llvm/test/CodeGen/AMDGPU/fdiv.f16.ll
index 9ae9d1977bd11..210e09fd9169a 100644
--- a/llvm/test/CodeGen/AMDGPU/fdiv.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fdiv.f16.ll
@@ -1702,7 +1702,7 @@ entry:
   %gep.r = getelementptr inbounds half, ptr addrspace(1) %r, i64 %tid.ext
   %a.val = load volatile half, ptr addrspace(1) %gep.a
   %b.val = load volatile half, ptr addrspace(1) %gep.b
-  %r.val = fdiv half %a.val, %b.val
+  %r.val = fdiv afn half %a.val, %b.val
   store half %r.val, ptr addrspace(1) %gep.r
   ret void
 }
@@ -2475,4 +2475,4 @@ declare <2 x half> @llvm.sqrt.v2f16(<2 x half>) #2
 
 attributes #0 = { nounwind }
 attributes #1 = { nounwind readnone }
-attributes #2 = { nounwind "unsafe-fp-math"="true" }
+attributes #2 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll b/llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll
index 57b4857776246..1d33c26686528 100644
--- a/llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll
@@ -1101,62 +1101,21 @@ entry:
 define amdgpu_kernel void @fptrunc_f64_to_f16_afn(
 ; SI-SDAG-LABEL: fptrunc_f64_to_f16_afn:
 ; SI-SDAG:       ; %bb.0: ; %entry
-; SI-SDAG-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x9
-; SI-SDAG-NEXT:    s_mov_b32 s3, 0xf000
-; SI-SDAG-NEXT:    s_mov_b32 s2, -1
-; SI-SDAG-NEXT:    s_mov_b32 s10, s2
-; SI-SDAG-NEXT:    s_mov_b32 s11, s3
+; SI-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-SDAG-NEXT:    s_mov_b32 s7, 0xf000
+; SI-SDAG-NEXT:    s_mov_b32 s6, -1
+; SI-SDAG-NEXT:    s_mov_b32 s10, s6
+; SI-SDAG-NEXT:    s_mov_b32 s11, s7
 ; SI-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-SDAG-NEXT:    s_mov_b32 s8, s6
-; SI-SDAG-NEXT:    s_mov_b32 s9, s7
+; SI-SDAG-NEXT:    s_mov_b32 s8, s2
+; SI-SDAG-NEXT:    s_mov_b32 s9, s3
 ; SI-SDAG-NEXT:    buffer_load_dwordx2 v[0:1], off, s[8:11], 0
-; SI-SDAG-NEXT:    s_movk_i32 s0, 0x7e00
+; SI-SDAG-NEXT:    s_mov_b32 s4, s0
+; SI-SDAG-NEXT:    s_mov_b32 s5, s1
 ; SI-SDAG-NEXT:    s_waitcnt vmcnt(0)
-; SI-SDAG-NEXT:    v_readfirstlane_b32 s1, v1
-; SI-SDAG-NEXT:    s_and_b32 s6, s1, 0x1ff
-; SI-SDAG-NEXT:    s_lshr_b32 s7, s1, 8
-; SI-SDAG-NEXT:    s_bfe_u32 s8, s1, 0xb0014
-; SI-SDAG-NEXT:    v_or_b32_e32 v0, s6, v0
-; SI-SDAG-NEXT:    s_and_b32 s6, s7, 0xffe
-; SI-SDAG-NEXT:    s_sub_i32 s7, 0x3f1, s8
-; SI-SDAG-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
-; SI-SDAG-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; SI-SDAG-NEXT:    v_med3_i32 v1, s7, 0, 13
-; SI-SDAG-NEXT:    v_readfirstlane_b32 s7, v0
-; SI-SDAG-NEXT:    v_readfirstlane_b32 s9, v1
-; SI-SDAG-NEXT:    s_or_b32 s6, s6, s7
-; SI-SDAG-NEXT:    s_or_b32 s7, s6, 0x1000
-; SI-SDAG-NEXT:    s_lshr_b32 s10, s7, s9
-; SI-SDAG-NEXT:    s_lshl_b32 s9, s10, s9
-; SI-SDAG-NEXT:    s_cmp_lg_u32 s9, s7
-; SI-SDAG-NEXT:    s_cselect_b32 s7, 1, 0
-; SI-SDAG-NEXT:    s_addk_i32 s8, 0xfc10
-; SI-SDAG-NEXT:    s_or_b32 s7, s10, s7
-; SI-SDAG-NEXT:    s_lshl_b32 s9, s8, 12
-; SI-SDAG-NEXT:    s_or_b32 s9, s6, s9
-; SI-SDAG-NEXT:    s_cmp_lt_i32 s8, 1
-; SI-SDAG-NEXT:    s_cselect_b32 s7, s7, s9
-; SI-SDAG-NEXT:    s_and_b32 s9, s7, 7
-; SI-SDAG-NEXT:    s_cmp_gt_i32 s9, 5
-; SI-SDAG-NEXT:    s_cselect_b32 s10, 1, 0
-; SI-SDAG-NEXT:    s_cmp_eq_u32 s9, 3
-; SI-SDAG-NEXT:    s_cselect_b32 s9, 1, 0
-; SI-SDAG-NEXT:    s_lshr_b32 s7, s7, 2
-; SI-SDAG-NEXT:    s_or_b32 s9, s9, s10
-; SI-SDAG-NEXT:    s_add_i32 s7, s7, s9
-; SI-SDAG-NEXT:    s_cmp_lt_i32 s8, 31
-; SI-SDAG-NEXT:    s_cselect_b32 s7, s7, 0x7c00
-; SI-SDAG-NEXT:    s_cmp_lg_u32 s6, 0
-; SI-SDAG-NEXT:    s_cselect_b32 s0, s0, 0x7c00
-; SI-SDAG-NEXT:    s_cmpk_eq_i32 s8, 0x40f
-; SI-SDAG-NEXT:    s_cselect_b32 s0, s0, s7
-; SI-SDAG-NEXT:    s_lshr_b32 s1, s1, 16
-; SI-SDAG-NEXT:    s_and_b32 s1, s1, 0x8000
-; SI-SDAG-NEXT:    s_or_b32 s6, s1, s0
-; SI-SDAG-NEXT:    s_mov_b32 s0, s4
-; SI-SDAG-NEXT:    s_mov_b32 s1, s5
-; SI-SDAG-NEXT:    v_mov_b32_e32 v0, s6
-; SI-SDAG-NEXT:    buffer_store_short v0, off, s[0:3], 0
+; SI-SDAG-NEXT:    v_cvt_f32_f64_e32 v0, v[0:1]
+; SI-SDAG-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; SI-SDAG-NEXT:    buffer_store_short v0, off, s[4:7], 0
 ; SI-SDAG-NEXT:    s_endpgm
 ;
 ; SI-GISEL-LABEL: fptrunc_f64_to_f16_afn:
@@ -1174,62 +1133,21 @@ define amdgpu_kernel void @fptrunc_f64_to_f16_afn(
 ;
 ; VI-SDAG-LABEL: fptrunc_f64_to_f16_afn:
 ; VI-SDAG:       ; %bb.0: ; %entry
-; VI-SDAG-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x24
-; VI-SDAG-NEXT:    s_mov_b32 s3, 0xf000
-; VI-SDAG-NEXT:    s_mov_b32 s2, -1
-; VI-SDAG-NEXT:    s_mov_b32 s10, s2
-; VI-SDAG-NEXT:    s_mov_b32 s11, s3
+; VI-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-SDAG-NEXT:    s_mov_b32 s7, 0xf000
+; VI-SDAG-NEXT:    s_mov_b32 s6, -1
+; VI-SDAG-NEXT:    s_mov_b32 s10, s6
+; VI-SDAG-NEXT:    s_mov_b32 s11, s7
 ; VI-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-SDAG-NEXT:    s_mov_b32 s8, s6
-; VI-SDAG-NEXT:    s_mov_b32 s9, s7
+; VI-SDAG-NEXT:    s_mov_b32 s8, s2
+; VI-SDAG-NEXT:    s_mov_b32 s9, s3
 ; VI-SDAG-NEXT:    buffer_load_dwordx2 v[0:1], off, s[8:11], 0
-; VI-SDAG-NEXT:    s_mov_b32 s0, s4
-; VI-SDAG-NEXT:    s_mov_b32 s1, s5
-; VI-SDAG-NEXT:    s_movk_i32 s6, 0x7e00
+; VI-SDAG-NEXT:    s_mov_b32 s4, s0
+; VI-SDAG-NEXT:    s_mov_b32 s5, s1
 ; VI-SDAG-NEXT:    s_waitcnt vmcnt(0)
-; VI-SDAG-NEXT:    v_readfirstlane_b32 s4, v1
-; VI-SDAG-NEXT:    s_and_b32 s5, s4, 0x1ff
-; VI-SDAG-NEXT:    v_or_b32_e32 v0, s5, v0
-; VI-SDAG-NEXT:    s_lshr_b32 s7, s4, 8
-; VI-SDAG-NEXT:    s_bfe_u32 s8, s4, 0xb0014
-; VI-SDAG-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
-; VI-SDAG-NEXT:    s_and_b32 s5, s7, 0xffe
-; VI-SDAG-NEXT:    s_sub_i32 s7, 0x3f1, s8
-; VI-SDAG-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; VI-SDAG-NEXT:    v_med3_i32 v1, s7, 0, 13
-; VI-SDAG-NEXT:    v_readfirstlane_b32 s7, v0
-; VI-SDAG-NEXT:    s_or_b32 s5, s5, s7
-; VI-SDAG-NEXT:    v_readfirstlane_b32 s9, v1
-; VI-SDAG-NEXT:    s_or_b32 s7, s5, 0x1000
-; VI-SDAG-NEXT:    s_lshr_b32 s10, s7, s9
-; VI-SDAG-NEXT:    s_lshl_b32 s9, s10, s9
-; VI-SDAG-NEXT:    s_cmp_lg_u32 s9, s7
-; VI-SDAG-NEXT:    s_cselect_b32 s7, 1, 0
-; VI-SDAG-NEXT:    s_addk_i32 s8, 0xfc10
-; VI-SDAG-NEXT:    s_lshl_b32 s9, s8, 12
-; VI-SDAG-NEXT:    s_or_b32 s7, s10, s7
-; VI-SDAG-NEXT:    s_or_b32 s9, s5, s9
-; VI-SDAG-NEXT:    s_cmp_lt_i32 s8, 1
-; VI-SDAG-NEXT:    s_cselect_b32 s7, s7, s9
-; VI-SDAG-NEXT:    s_and_b32 s9, s7, 7
-; VI-SDAG-NEXT:    s_cmp_gt_i32 s9, 5
-; VI-SDAG-NEXT:    s_cselect_b32 s10, 1, 0
-; VI-SDAG-NEXT:    s_cmp_eq_u32 s9, 3
-; VI-SDAG-NEXT:    s_cselect_b32 s9, 1, 0
-; VI-SDAG-NEXT:    s_lshr_b32 s7, s7, 2
-; VI-SDAG-NEXT:    s_or_b32 s9, s9, s10
-; VI-SDAG-NEXT:    s_add_i32 s7, s7, s9
-; VI-SDAG-NEXT:    s_cmp_lt_i32 s8, 31
-; VI-SDAG-NEXT:    s_cselect_b32 s7, s7, 0x7c00
-; VI-SDAG-NEXT:    s_cmp_lg_u32 s5, 0
-; VI-SDAG-NEXT:    s_cselect_b32 s5, s6, 0x7c00
-; VI-SDAG-NEXT:    s_cmpk_eq_i32 s8, 0x40f
-; VI-SDAG-NEXT:    s_cselect_b32 s5, s5, s7
-; VI-SDAG-NEXT:    s_lshr_b32 s4, s4, 16
-; VI-SDAG-NEXT:    s_and_b32 s4, s4, 0x8000
-; VI-SDAG-NEXT:    s_or_b32 s4, s4, s5
-; VI-SDAG-NEXT:    v_mov_b32_e32 v0, s4
-; VI-SDAG-NEXT:    buffer_store_short v0, off, s[0:3], 0
+; VI-SDAG-NEXT:    v_cvt_f32_f64_e32 v0, v[0:1]
+; VI-SDAG-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; VI-SDAG-NEXT:    buffer_store_short v0, off, s[4:7], 0
 ; VI-SDAG-NEXT:    s_endpgm
 ;
 ; VI-GISEL-LABEL: fptrunc_f64_to_f16_afn:
@@ -1247,62 +1165,21 @@ define amdgpu_kernel void @fptrunc_f64_to_f16_afn(
 ;
 ; GFX9-SDAG-LABEL: fptrunc_f64_to_f16_afn:
 ; GFX9-SDAG:       ; %bb.0: ; %entry
-; GFX9-SDAG-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x24
-; GFX9-SDAG-NEXT:    s_mov_b32 s3, 0xf000
-; GFX9-SDAG-NEXT:    s_mov_b32 s2, -1
-; GFX9-SDAG-NEXT:    s_mov_b32 s6, s2
-; GFX9-SDAG-NEXT:    s_mov_b32 s7, s3
+; GFX9-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9-SDAG-NEXT:    s_mov_b32 s7, 0xf000
+; GFX9-SDAG-NEXT:    s_mov_b32 s6, -1
+; GFX9-SDAG-NEXT:    s_mov_b32 s10, s6
+; GFX9-SDAG-NEXT:    s_mov_b32 s11, s7
 ; GFX9-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-SDAG-NEXT:    s_mov_b32 s4, s10
-; GFX9-SDAG-NEXT:    s_mov_b32 s5, s11
-; GFX9-SDAG-NEXT:    buffer_load_dwordx2 v[0:1], off, s[4:7], 0
-; GFX9-SDAG-NEXT:    s_mov_b32 s0, s8
-; GFX9-SDAG-NEXT:    s_mov_b32 s1, s9
-; GFX9-SDAG-NEXT:    s_movk_i32 s4, 0x7e00
+; GFX9-SDAG-NEXT:    s_mov_b32 s8, s2
+; GFX9-SDAG-NEXT:    s_mov_b32 s9, s3
+; GFX9-SDAG-NEXT:    buffer_load_dwordx2 v[0:1], off, s[8:11], 0
+; GFX9-SDAG-NEXT:    s_mov_b32 s4, s0
+; GFX9-SDAG-NEXT:    s_mov_b32 s5, s1
 ; GFX9-SDAG-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-SDAG-NEXT:    v_readfirstlane_b32 s5, v1
-; GFX9-SDAG-NEXT:    s_and_b32 s6, s5, 0x1ff
-; GFX9-SDAG-NEXT:    v_or_b32_e32 v0, s6, v0
-; GFX9-SDAG-NEXT:    s_lshr_b32 s7, s5, 8
-; GFX9-SDAG-NEXT:    s_bfe_u32 s8, s5, 0xb0014
-; GFX9-SDAG-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
-; GFX9-SDAG-NEXT:    s_and_b32 s6, s7, 0xffe
-; GFX9-SDAG-NEXT:    s_sub_i32 s7, 0x3f1, s8
-; GFX9-SDAG-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX9-SDAG-NEXT:    v_med3_i32 v1, s7, 0, 13
-; GFX9-SDAG-NEXT:    v_readfirstlane_b32 s7, v0
-; GFX9-SDAG-NEXT:    s_or_b32 s6, s6, s7
-; GFX9-SDAG-NEXT:    v_readfirstlane_b32 s9, v1
-; GFX9-SDAG-NEXT:    s_or_b32 s7, s6, 0x1000
-; GFX9-SDAG-NEXT:    s_lshr_b32 s10, s7, s9
-; GFX9-SDAG-NEXT:    s_lshl_b32 s9, s10, s9
-; GFX9-SDAG-NEXT:    s_cmp_lg_u32 s9, s7
-; GFX9-SDAG-NEXT:    s_cselect_b32 s7, 1, 0
-; GFX9-SDAG-NEXT:    s_addk_i32 s8, 0xfc10
-; GFX9-SDAG-NEXT:    s_lshl_b32 s9, s8, 12
-; GFX9-SDAG-NEXT:    s_or_b32 s7, s10, s7
-; GFX9-SDAG-NEXT:    s_or_b32 s9, s6, s9
-; GFX9-SDAG-NEXT:    s_cmp_lt_i32 s8, 1
-; GFX9-SDAG-NEXT:    s_cselect_b32 s7, s7, s9
-; GFX9-SDAG-NEXT:    s_and_b32 s9, s7, 7
-; GFX9-SDAG-NEXT:    s_cmp_gt_i32 s9, 5
-; GFX9-SDAG-NEXT:    s_cselect_b32 s10, 1, 0
-; GFX9-SDAG-NEXT:    s_cmp_eq_u32 s9, 3
-; GFX9-SDAG-NEXT:    s_cselect_b32 s9, 1, 0
-; GFX9-SDAG-NEXT:    s_lshr_b32 s7, s7, 2
-; GFX9-SDAG-NEXT:    s_or_b32 s9, s9, s10
-; GFX9-SDAG-NEXT:    s_add_i32 s7, s7, s9
-; GFX9-SDAG-NEXT:    s_cmp_lt_i32 s8, 31
-; GFX9-SDAG-NEXT:    s_cselect_b32 s7, s7, 0x7c00
-; GFX9-SDAG-NEXT:    s_cmp_lg_u32 s6, 0
-; GFX9-SDAG-NEXT:    s_cselect_b32 s4, s4, 0x7c00
-; GFX9-SDAG-NEXT:    s_cmpk_eq_i32 s8, 0x40f
-; GFX9-SDAG-NEXT:    s_cselect_b32 s4, s4, s7
-; GFX9-SDAG-NEXT:    s_lshr_b32 s5, s5, 16
-; GFX9-SDAG-NEXT:    s_and_b32 s5, s5, 0x8000
-; GFX9-SDAG-NEXT:    s_or_b32 s4, s5, s4
-; GFX9-SDAG-NEXT:    v_mov_b32_e32 v0, s4
-; GFX9-SDAG-NEXT:    buffer_store_short v0, off, s[0:3], 0
+; GFX9-SDAG-NEXT:    v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX9-SDAG-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; GFX9-SDAG-NEXT:    buffer_store_short v0, off, s[4:7], 0
 ; GFX9-SDAG-NEXT:    s_endpgm
 ;
 ; GFX9-GISEL-LABEL: fptrunc_f64_to_f16_afn:
@@ -1320,62 +1197,21 @@ define amdgpu_kernel void @fptrunc_f64_to_f16_afn(
 ;
 ; GFX950-SDAG-LABEL: fptrunc_f64_to_f16_afn:
 ; GFX950-SDAG:       ; %bb.0: ; %entry
-; GFX950-SDAG-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x24
-; GFX950-SDAG-NEXT:    s_mov_b32 s3, 0xf000
-; GFX950-SDAG-NEXT:    s_mov_b32 s2, -1
-; GFX950-SDAG-NEXT:    s_mov_b32 s6, s2
-; GFX950-SDAG-NEXT:    s_mov_b32 s7, s3
+; GFX950-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX950-SDAG-NEXT:    s_mov_b32 s7, 0xf000
+; GFX950-SDAG-NEXT:    s_mov_b32 s6, -1
+; GFX950-SDAG-NEXT:    s_mov_b32 s10, s6
+; GFX950-SDAG-NEXT:    s_mov_b32 s11, s7
 ; GFX950-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX950-SDAG-NEXT:    s_mov_b32 s4, s10
-; GFX950-SDAG-NEXT:    s_mov_b32 s5, s11
-; GFX950-SDAG-NEXT:    buffer_load_dwordx2 v[0:1], off, s[4:7], 0
-; GFX950-SDAG-NEXT:    s_mov_b32 s0, s8
-; GFX950-SDAG-NEXT:    s_mov_b32 s1, s9
-; GFX950-SDAG-NEXT:    s_movk_i32 s4, 0x7e00
+; GFX950-SDAG-NEXT:    s_mov_b32 s8, s2
+; GFX950-SDAG-NEXT:    s_mov_b32 s9, s3
+; GFX950-SDAG-NEXT:    buffer_load_dwordx2 v[0:1], off, s[8:11], 0
+; GFX950-SDAG-NEXT:    s_mov_b32 s4, s0
+; GFX950-SDAG-NEXT:    s_mov_b32 s5, s1
 ; GFX950-SDAG-NEXT:    s_waitcnt vmcnt(0)
-; GFX950-SDAG-NEXT:    v_readfirstlane_b32 s5, v1
-; GFX950-SDAG-NEXT:    s_and_b32 s6, s5, 0x1ff
-; GFX950-SDAG-NEXT:    v_or_b32_e32 v0, s6, v0
-; GFX950-SDAG-NEXT:    s_lshr_b32 s7, s5, 8
-; GFX950-SDAG-NEXT:    s_bfe_u32 s8, s5, 0xb0014
-; GFX950-SDAG-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
-; GFX950-SDAG-NEXT:    s_and_b32 s6, s7, 0xffe
-; GFX950-SDAG-NEXT:    s_sub_i32 s7, 0x3f1, s8
-; GFX950-SDAG-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX950-SDAG-NEXT:    v_med3_i32 v1, s7, 0, 13
-; GFX950-SDAG-NEXT:    v_readfirstlane_b32 s7, v0
-; GFX950-SDAG-NEXT:    s_or_b32 s6, s6, s7
-; GFX950-SDAG-NEXT:    v_readfirstlane_b32 s9, v1
-; GFX950-SDAG-NEXT:    s_or_b32 s7, s6, 0x1000
-; GFX950-SDAG-NEXT:    s_lshr_b32 s10, s7, s9
-; GFX950-SDAG-NEXT:    s_lshl_b32 s9, s10, s9
-; GFX950-SDAG-NEXT:    s_cmp_lg_u32 s9, s7
-; GFX950-SDAG-NEXT:    s_cselect_b32 s7, 1, 0
-; GFX950-SDAG-NEXT:    s_addk_i32 s8, 0xfc10
-; GFX950-SDAG-NEXT:    s_lshl_b32 s9, s8, 12
-; GFX950-SDAG-NEXT:    s_or_b32 s7, s10, s7
-; GFX950-SDAG-NEXT:    s_or_b32 s9, s6, s9
-; GFX950-SDAG-NEXT:    s_cmp_lt_i32 s8, 1
-; GFX950-SDAG-NEXT:    s_cselect_b32 s7, s7, s9
-; GFX950-SDAG-NEXT:    s_and_b32 s9, s7, 7
-; GFX950-SDAG-NEXT:    s_cmp_gt_i32 s9, 5
-; GFX950-SDAG-NEXT:    s_cselect_b32 s10, 1, 0
-; GFX950-SDAG-NEXT:    s_cmp_eq_u32 s9, 3
-; GFX950-SDAG-NEXT:    s_cselect_b32 s9, 1, 0
-; GFX950-SDAG-NEXT:    s_lshr_b32 s7, s7, 2
-; GFX950-SDAG-NEXT:    s_or_b32 s9, s9, s10
-; GFX950-SDAG-NEXT:    s_add_i32 s7, s7, s9
-; GFX950-SDAG-NEXT:    s_cmp_lt_i32 s8, 31
-; GFX950-SDAG-NEXT:    s_cselect_b32 s7, s7, 0x7c00
-; GFX950-SDAG-NEXT:    s_cmp_lg_u32 s6, 0
-; GFX950-SDAG-NEXT:    s_cselect_b32 s4, s4, 0x7c00
-; GFX950-SDAG-NEXT:    s_cmpk_eq_i32 s8, 0x40f
-; GFX950-SDAG-NEXT:    s_cselect_b32 s4, s4, s7
-; GFX950-SDAG-NEXT:    s_lshr_b32 s5, s5, 16
-; GFX950-SDAG-NEXT:    s_and_b32 s5, s5, 0x8000
-; GFX950-SDAG-NEXT:    s_or_b32 s4, s5, s4
-; GFX950-SDAG-NEXT:    v_mov_b32_e32 v0, s4
-; GFX950-SDAG-NEXT:    buffer_store_short v0, off, s[0:3], 0
+; GFX950-SDAG-NEXT:    v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX950-SDAG-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; GFX950-SDAG-NEXT:    buffer_store_short v0, off, s[4:7], 0
 ; GFX950-SDAG-NEXT:    s_endpgm
 ;
 ; GFX950-GISEL-LABEL: fptrunc_f64_to_f16_afn:
@@ -1401,60 +1237,13 @@ define amdgpu_kernel void @fptrunc_f64_to_f16_afn(
 ; GFX11-SDAG-TRUE16-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-SDAG-TRUE16-NEXT:    s_mov_b32 s8, s2
 ; GFX11-SDAG-TRUE16-NEXT:    s_mov_b32 s9, s3
-; GFX11-SDAG-TRUE16-NEXT:    buffer_load_b64 v[0:1], off, s[8:11], 0
-; GFX11-SDAG-TRUE16-NEXT:    s_waitcnt vmcnt(0)
-; GFX11-SDAG-TRUE16-NEXT:    v_readfirstlane_b32 s2, v1
-; GFX11-SDAG-TRUE16-NEXT:    s_and_b32 s3, s2, 0x1ff
-; GFX11-SDAG-TRUE16-NEXT:    s_lshr_b32 s5, s2, 8
-; GFX11-SDAG-TRUE16-NEXT:    v_or_b32_e32 v0, s3, v0
-; GFX11-SDAG-TRUE16-NEXT:    s_bfe_u32 s3, s2, 0xb0014
-; GFX11-SDAG-TRUE16-NEXT:    s_and_b32 s5, s5, 0xffe
-; GFX11-SDAG-TRUE16-NEXT:    s_sub_i32 s4, 0x3f1, s3
-; GFX11-SDAG-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-SDAG-TRUE16-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-SDAG-TRUE16-NEXT:    v_med3_i32 v1, s4, 0, 13
-; GFX11-SDAG-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-SDAG-TRUE16-NEXT:    v_readfirstlane_b32 s8, v1
-; GFX11-SDAG-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT:    v_readfirstlane_b32 s4, v0
-; GFX11-SDAG-TRUE16-NEXT:    s_or_b32 s4, s5, s4
-; GFX11-SDAG-TRUE16-NEXT:    s_or_b32 s5, s4, 0x1000
-; GFX11-SDAG-TRUE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT:    s_lshr_b32 s9, s5, s8
-; GFX11-SDAG-TRUE16-NEXT:    s_lshl_b32 s8, s9, s8
-; GFX11-SDAG-TRUE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT:    s_cmp_lg_u32 s8, s5
-; GFX11-SDAG-TRUE16-NEXT:    s_cselect_b32 s5, 1, 0
-; GFX11-SDAG-TRUE16-NEXT:    s_addk_i32 s3, 0xfc10
-; GFX11-SDAG-TRUE16-NEXT:    s_or_b32 s5, s9, s5
-; GFX11-SDAG-TRUE16-NEXT:    s_lshl_b32 s8, s3, 12
-; GFX11-SDAG-TRUE16-NEXT:    s_or_b32 s8, s4, s8
-; GFX11-SDAG-TRUE16-NEXT:    s_cmp_lt_i32 s3, 1
-; GFX11-SDAG-TRUE16-NEXT:    s_cselect_b32 s5, s5, s8
-; GFX11-SDAG-TRUE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT:    s_and_b32 s8, s5, 7
-; GFX11-SDAG-TRUE16-NEXT:    s_cmp_gt_i32 s8, 5
-; GFX11-SDAG-TRUE16-NEXT:    s_cselect_b32 s9, 1, 0
-; GFX11-SDAG-TRUE16-NEXT:    s_cmp_eq_u32 s8, 3
-; GFX11-SDAG-TRUE16-NEXT:    s_cselect_b32 s8, 1, 0
-; GFX11-SDAG-TRUE16-NEXT:    s_lshr_b32 s5, s5, 2
-; GFX11-SDAG-TRUE16-NEXT:    s_or_b32 s8, s8, s9
-; GFX11-SDAG-TRUE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT:    s_add_i32 s5, s5, s8
-; GFX11-SDAG-TRUE16-NEXT:    s_cmp_lt_i32 s3, 31
-; GFX11-SDAG-TRUE16-NEXT:    s_movk_i32 s8, 0x7e00
-; GFX11-SDAG-TRUE16-NEXT:    s_cselect_b32 s5, s5, 0x7c00
-; GFX11-SDAG-TRUE16-NEXT:    s_cmp_lg_u32 s4, 0
-; GFX11-SDAG-TRUE16-NEXT:    s_cselect_b32 s4, s8, 0x7c00
-; GFX11-SDAG-TRUE16-NEXT:    s_cmpk_eq_i32 s3, 0x40f
-; GFX11-SDAG-TRUE16-NEXT:    s_cselect_b32 s3, s4, s5
-; GFX11-SDAG-TRUE16-NEXT:    s_lshr_b32 s2, s2, 16
 ; GFX11-SDAG-TRUE16-NEXT:    s_mov_b32 s4, s0
-; GFX11-SDAG-TRUE16-NEXT:    s_and_b32 s2, s2, 0x8000
+; GFX11-SDAG-TRUE16-NEXT:    buffer_load_b64 v[0:1], off, s[8:11], 0
 ; GFX11-SDAG-TRUE16-NEXT:    s_mov_b32 s5, s1
-; GFX11-SDAG-TRUE16-NEXT:    s_or_b32 s2, s2, s3
-; GFX11-SDAG-TRUE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT:    v_mov_b32_e32 v0, s2
+; GFX11-SDAG-TRUE16-NEXT:    s_waitcnt vmcnt(0)
+; GFX11-SDAG-TRUE16-NEXT:    v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX11-SDAG-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SDAG-TRUE16-NEXT:    v_cvt_f16_f32_e32 v0.l, v0
 ; GFX11-SDAG-TRUE16-NEXT:    buffer_store_b16 v0, off, s[4:7], 0
 ; GFX11-SDAG-TRUE16-NEXT:    s_endpgm
 ;
@@ -1468,60 +1257,13 @@ define amdgpu_kernel void @fptrunc_f64_to_f16_afn(
 ; GFX11-SDAG-FAKE16-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-SDAG-FAKE16-NEXT:    s_mov_b32 s8, s2
 ; GFX11-SDAG-FAKE16-NEXT:    s_mov_b32 s9, s3
-; GFX11-SDAG-FAKE16-NEXT:    buffer_load_b64 v[0:1], off, s[8:11], 0
-; GFX11-SDAG-FAKE16-NEXT:    s_waitcnt vmcnt(0)
-; GFX11-SDAG-FAKE16-NEXT:    v_readfirstlane_b32 s2, v1
-; GFX11-SDAG-FAKE16-NEXT:    s_and_b32 s3, s2, 0x1ff
-; GFX11-SDAG-FAKE16-NEXT:    s_lshr_b32 s5, s2, 8
-; GFX11-SDAG-FAKE16-NEXT:    v_or_b32_e32 v0, s3, v0
-; GFX11-SDAG-FAKE16-NEXT:    s_bfe_u32 s3, s2, 0xb0014
-; GFX11-SDAG-FAKE16-NEXT:    s_and_b32 s5, s5, 0xffe
-; GFX11-SDAG-FAKE16-NEXT:    s_sub_i32 s4, 0x3f1, s3
-; GFX11-SDAG-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-SDAG-FAKE16-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-SDAG-FAKE16-NEXT:    v_med3_i32 v1, s4, 0, 13
-; GFX11-SDAG-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-SDAG-FAKE16-NEXT:    v_readfirstlane_b32 s8, v1
-; GFX11-SDAG-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT:    v_readfirstlane_b32 s4, v0
-; GFX11-SDAG-FAKE16-NEXT:    s_or_b32 s4, s5, s4
-; GFX11-SDAG-FAKE16-NEXT:    s_or_b32 s5, s4, 0x1000
-; GFX11-SDAG-FAKE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT:    s_lshr_b32 s9, s5, s8
-; GFX11-SDAG-FAKE16-NEXT:    s_lshl_b32 s8, s9, s8
-; GFX11-SDAG-FAKE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT:    s_cmp_lg_u32 s8, s5
-; GFX11-SDAG-FAKE16-NEXT:    s_cselect_b32 s5, 1, 0
-; GFX11-SDAG-FAKE16-NEXT:    s_addk_i32 s3, 0xfc10
-; GFX11-SDAG-FAKE16-NEXT:    s_or_b32 s5, s9, s5
-; GFX11-SDAG-FAKE16-NEXT:    s_lshl_b32 s8, s3, 12
-; GFX11-SDAG-FAKE16-NEXT:    s_or_b32 s8, s4, s8
-; GFX11-SDAG-FAKE16-NEXT:    s_cmp_lt_i32 s3, 1
-; GFX11-SDAG-FAKE16-NEXT:    s_cselect_b32 s5, s5, s8
-; GFX11-SDAG-FAKE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT:    s_and_b32 s8, s5, 7
-; GFX11-SDAG-FAKE16-NEXT:    s_cmp_gt_i32 s8, 5
-; GFX11-SDAG-FAKE16-NEXT:    s_cselect_b32 s9, 1, 0
-; GFX11-SDAG-FAKE16-NEXT:    s_cmp_eq_u32 s8, 3
-; GFX11-SDAG-FAKE16-NEXT:    s_cselect_b32 s8, 1, 0
-; GFX11-SDAG-FAKE16-NEXT:    s_lshr_b32 s5, s5, 2
-; GFX11-SDAG-FAKE16-NEXT:    s_or_b32 s8, s8, s9
-; GFX11-SDAG-FAKE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT:    s_add_i32 s5, s5, s8
-; GFX11-SDAG-FAKE16-NEXT:    s_cmp_lt_i32 s3, 31
-; GFX11-SDAG-FAKE16-NEXT:    s_movk_i32 s8, 0x7e00
-; GFX11-SDAG-FAKE16-NEXT:    s_cselect_b32 s5, s5, 0x7c00
-; GFX11-SDAG-FAKE16-NEXT:    s_cmp_lg_u32 s4, 0
-; GFX11-SDAG-FAKE16-NEXT:    s_cselect_b32 s4, s8, 0x7c00
-; GFX11-SDAG-FAKE16-NEXT:    s_cmpk_eq_i32 s3, 0x40f
-; GFX11-SDAG-FAKE16-NEXT:    s_cselect_b32 s3, s4, s5
-; GFX11-SDAG-FAKE16-NEXT:    s_lshr_b32 s2, s2, 16
 ; GFX11-SDAG-FAKE16-NEXT:    s_mov_b32 s4, s0
-; GFX11-SDAG-FAKE16-NEXT:    s_and_b32 s2, s2, 0x8000
+; GFX11-SDAG-FAKE16-NEXT:    buffer_load_b64 v[0:1], off, s[8:11], 0
 ; GFX11-SDAG-FAKE16-NEXT:    s_mov_b32 s5, s1
-; GFX11-SDAG-FAKE16-NEXT:    s_or_b32 s2, s2, s3
-; GFX11-SDAG-FAKE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT:    v_mov_b32_e32 v0, s2
+; GFX11-SDAG-FAKE16-NEXT:    s_waitcnt vmcnt(0)
+; GFX11-SDAG-FAKE16-NEXT:    v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX11-SDAG-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SDAG-FAKE16-NEXT:    v_cvt_f16_f32_e32 v0, v0
 ; GFX11-SDAG-FAKE16-NEXT:    buffer_store_b16 v0, off, s[4:7], 0
 ; GFX11-SDAG-FAKE16-NEXT:    s_endpgm
 ;
@@ -3026,106 +2768,25 @@ entry:
 define amdgpu_kernel void @fptrunc_v2f64_to_v2f16_afn(
 ; SI-SDAG-LABEL: fptrunc_v2f64_to_v2f16_afn:
 ; SI-SDAG:       ; %bb.0: ; %entry
-; SI-SDAG-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x9
-; SI-SDAG-NEXT:    s_mov_b32 s3, 0xf000
-; SI-SDAG-NEXT:    s_mov_b32 s2, -1
-; SI-SDAG-NEXT:    s_mov_b32 s10, s2
-; SI-SDAG-NEXT:    s_mov_b32 s11, s3
+; SI-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-SDAG-NEXT:    s_mov_b32 s7, 0xf000
+; SI-SDAG-NEXT:    s_mov_b32 s6, -1
+; SI-SDAG-NEXT:    s_mov_b32 s10, s6
+; SI-SDAG-NEXT:    s_mov_b32 s11, s7
 ; SI-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-SDAG-NEXT:    s_mov_b32 s8, s6
-; SI-SDAG-NEXT:    s_mov_b32 s9, s7
+; SI-SDAG-NEXT:    s_mov_b32 s8, s2
+; SI-SDAG-NEXT:    s_mov_b32 s9, s3
 ; SI-SDAG-NEXT:    buffer_load_dwordx4 v[0:3], off, s[8:11], 0
-; SI-SDAG-NEXT:    s_movk_i32 s0, 0x7e00
+; SI-SDAG-NEXT:    s_mov_b32 s4, s0
+; SI-SDAG-NEXT:    s_mov_b32 s5, s1
 ; SI-SDAG-NEXT:    s_waitcnt vmcnt(0)
-; SI-SDAG-NEXT:    v_readfirstlane_b32 s1, v3
-; SI-SDAG-NEXT:    v_readfirstlane_b32 s6, v1
-; SI-SDAG-NEXT:    s_and_b32 s7, s1, 0x1ff
-; SI-SDAG-NEXT:    s_lshr_b32 s8, s1, 8
-; SI-SDAG-NEXT:    s_bfe_u32 s9, s1, 0xb0014
-; SI-SDAG-NEXT:    v_or_b32_e32 v1, s7, v2
-; SI-SDAG-NEXT:    s_and_b32 s7, s8, 0xffe
-; SI-SDAG-NEXT:    s_sub_i32 s8, 0x3f1, s9
-; SI-SDAG-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v1
-; SI-SDAG-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
-; SI-SDAG-NEXT:    v_med3_i32 v2, s8, 0, 13
-; SI-SDAG-NEXT:    v_readfirstlane_b32 s8, v1
-; SI-SDAG-NEXT:    v_readfirstlane_b32 s10, v2
-; SI-SDAG-NEXT:    s_or_b32 s7, s7, s8
-; SI-SDAG-NEXT:    s_or_b32 s8, s7, 0x1000
-; SI-SDAG-NEXT:    s_lshr_b32 s11, s8, s10
-; SI-SDAG-NEXT:    s_lshl_b32 s10, s11, s10
-; SI-SDAG-NEXT:    s_cmp_lg_u32 s10, s8
-; SI-SDAG-NEXT:    s_cselect_b32 s8, 1, 0
-; SI-SDAG-NEXT:    s_addk_i32 s9, 0xfc10
-; SI-SDAG-NEXT:    s_or_b32 s8, s11, s8
-; SI-SDAG-NEXT:    s_lshl_b32 s10, s9, 12
-; SI-SDAG-NEXT:    s_or_b32 s10, s7, s10
-; SI-SDAG-NEXT:    s_cmp_lt_i32 s9, 1
-; SI-SDAG-NEXT:    s_cselect_b32 s8, s8, s10
-; SI-SDAG-NEXT:    s_and_b32 s10, s8, 7
-; SI-SDAG-NEXT:    s_cmp_gt_i32 s10, 5
-; SI-SDAG-NEXT:    s_cselect_b32 s11, 1, 0
-; SI-SDAG-NEXT:    s_cmp_eq_u32 s10, 3
-; SI-SDAG-NEXT:    s_cselect_b32 s10, 1, 0
-; SI-SDAG-NEXT:    s_lshr_b32 s8, s8, 2
-; SI-SDAG-NEXT:    s_or_b32 s10, s10, s11
-; SI-SDAG-NEXT:    s_add_i32 s8, s8, s10
-; SI-SDAG-NEXT:    s_cmp_lt_i32 s9, 31
-; SI-SDAG-NEXT:    s_cselect_b32 s8, s8, 0x7c00
-; SI-SDAG-NEXT:    s_cmp_lg_u32 s7, 0
-; SI-SDAG-NEXT:    s_cselect_b32 s7, s0, 0x7c00
-; SI-SDAG-NEXT:    s_cmpk_eq_i32 s9, 0x40f
-; SI-SDAG-NEXT:    s_cselect_b32 s7, s7, s8
-; SI-SDAG-NEXT:    s_lshr_b32 s1, s1, 16
-; SI-SDAG-NEXT:    s_and_b32 s8, s6, 0x1ff
-; SI-SDAG-NEXT:    s_lshr_b32 s9, s6, 8
-; SI-SDAG-NEXT:    s_bfe_u32 s10, s6, 0xb0014
-; SI-SDAG-NEXT:    s_and_b32 s1, s1, 0x8000
-; SI-SDAG-NEXT:    v_or_b32_e32 v0, s8, v0
-; SI-SDAG-NEXT:    s_and_b32 s8, s9, 0xffe
-; SI-SDAG-NEXT:    s_sub_i32 s9, 0x3f1, s10
-; SI-SDAG-NEXT:    s_or_b32 s1, s1, s7
-; SI-SDAG-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
-; SI-SDAG-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; SI-SDAG-NEXT:    v_med3_i32 v1, s9, 0, 13
-; SI-SDAG-NEXT:    s_lshl_b32 s1, s1, 16
-; SI-SDAG-NEXT:    v_readfirstlane_b32 s7, v0
-; SI-SDAG-NEXT:    v_readfirstlane_b32 s9, v1
-; SI-SDAG-NEXT:    s_or_b32 s7, s8, s7
-; SI-SDAG-NEXT:    s_or_b32 s8, s7, 0x1000
-; SI-SDAG-NEXT:    s_lshr_b32 s11, s8, s9
-; SI-SDAG-NEXT:    s_lshl_b32 s9, s11, s9
-; SI-SDAG-NEXT:    s_cmp_lg_u32 s9, s8
-; SI-SDAG-NEXT:    s_cselect_b32 s8, 1, 0
-; SI-SDAG-NEXT:    s_addk_i32 s10, 0xfc10
-; SI-SDAG-NEXT:    s_or_b32 s8, s11, s8
-; SI-SDAG-NEXT:    s_lshl_b32 s9, s10, 12
-; SI-SDAG-NEXT:    s_or_b32 s9, s7, s9
-; SI-SDAG-NEXT:    s_cmp_lt_i32 s10, 1
-; SI-SDAG-NEXT:    s_cselect_b32 s8, s8, s9
-; SI-SDAG-NEXT:    s_and_b32 s9, s8, 7
-; SI-SDAG-NEXT:    s_cmp_gt_i32 s9, 5
-; SI-SDAG-NEXT:    s_cselect_b32 s11, 1, 0
-; SI-SDAG-NEXT:    s_cmp_eq_u32 s9, 3
-; SI-SDAG-NEXT:    s_cselect_b32 s9, 1, 0
-; SI-SDAG-NEXT:    s_lshr_b32 s8, s8, 2
-; SI-SDAG-NEXT:    s_or_b32 s9, s9, s11
-; SI-SDAG-NEXT:    s_add_i32 s8, s8, s9
-; SI-SDAG-NEXT:    s_cmp_lt_i32 s10, 31
-; SI-SDAG-NEXT:    s_cselect_b32 s8, s8, 0x7c00
-; SI-SDAG-NEXT:    s_cmp_lg_u32 s7, 0
-; SI-SDAG-NEXT:    s_cselect_b32 s0, s0, 0x7c00
-; SI-SDAG-NEXT:    s_cmpk_eq_i32 s10, 0x40f
-; SI-SDAG-NEXT:    s_cselect_b32 s0, s0, s8
-; SI-SDAG-NEXT:    s_lshr_b32 s6, s6, 16
-; SI-SDAG-NEXT:    s_and_b32 s6, s6, 0x8000
-; SI-SDAG-NEXT:    s_or_b32 s0, s6, s0
-; SI-SDAG-NEXT:    s_and_b32 s0, s0, 0xffff
-; SI-SDAG-NEXT:    s_or_b32 s6, s0, s1
-; SI-SDAG-NEXT:    s_mov_b32 s0, s4
-; SI-SDAG-NEXT:    s_mov_b32 s1, s5
-; SI-SDAG-NEXT:    v_mov_b32_e32 v0, s6
-; SI-SDAG-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; SI-SDAG-NEXT:    v_cvt_f32_f64_e32 v2, v[2:3]
+; SI-SDAG-NEXT:    v_cvt_f32_f64_e32 v0, v[0:1]
+; SI-SDAG-NEXT:    v_cvt_f16_f32_e32 v1, v2
+; SI-SDAG-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; SI-SDAG-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; SI-SDAG-NEXT:    v_or_b32_e32 v0, v0, v1
+; SI-SDAG-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; SI-SDAG-NEXT:    s_endpgm
 ;
 ; SI-GISEL-LABEL: fptrunc_v2f64_to_v2f16_afn:
@@ -3147,106 +2808,24 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16_afn(
 ;
 ; VI-SDAG-LABEL: fptrunc_v2f64_to_v2f16_afn:
 ; VI-SDAG:       ; %bb.0: ; %entry
-; VI-SDAG-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x24
-; VI-SDAG-NEXT:    s_mov_b32 s3, 0xf000
-; VI-SDAG-NEXT:    s_mov_b32 s2, -1
-; VI-SDAG-NEXT:    s_mov_b32 s10, s2
-; VI-SDAG-NEXT:    s_mov_b32 s11, s3
+; VI-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-SDAG-NEXT:    s_mov_b32 s7, 0xf000
+; VI-SDAG-NEXT:    s_mov_b32 s6, -1
+; VI-SDAG-NEXT:    s_mov_b32 s10, s6
+; VI-SDAG-NEXT:    s_mov_b32 s11, s7
 ; VI-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-SDAG-NEXT:    s_mov_b32 s8, s6
-; VI-SDAG-NEXT:    s_mov_b32 s9, s7
+; VI-SDAG-NEXT:    s_mov_b32 s8, s2
+; VI-SDAG-NEXT:    s_mov_b32 s9, s3
 ; VI-SDAG-NEXT:    buffer_load_dwordx4 v[0:3], off, s[8:11], 0
-; VI-SDAG-NEXT:    s_mov_b32 s0, s4
-; VI-SDAG-NEXT:    s_mov_b32 s1, s5
-; VI-SDAG-NEXT:    s_movk_i32 s6, 0x7e00
+; VI-SDAG-NEXT:    s_mov_b32 s4, s0
+; VI-SDAG-NEXT:    s_mov_b32 s5, s1
 ; VI-SDAG-NEXT:    s_waitcnt vmcnt(0)
-; VI-SDAG-NEXT:    v_readfirstlane_b32 s4, v3
-; VI-SDAG-NEXT:    s_and_b32 s7, s4, 0x1ff
-; VI-SDAG-NEXT:    v_readfirstlane_b32 s5, v1
-; VI-SDAG-NEXT:    v_or_b32_e32 v1, s7, v2
-; VI-SDAG-NEXT:    s_lshr_b32 s8, s4, 8
-; VI-SDAG-NEXT:    s_bfe_u32 s9, s4, 0xb0014
-; VI-SDAG-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v1
-; VI-SDAG-NEXT:    s_and_b32 s7, s8, 0xffe
-; VI-SDAG-NEXT:    s_sub_i32 s8, 0x3f1, s9
-; VI-SDAG-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
-; VI-SDAG-NEXT:    v_med3_i32 v2, s8, 0, 13
-; VI-SDAG-NEXT:    v_readfirstlane_b32 s8, v1
-; VI-SDAG-NEXT:    s_or_b32 s7, s7, s8
-; VI-SDAG-NEXT:    v_readfirstlane_b32 s10, v2
-; VI-SDAG-NEXT:    s_or_b32 s8, s7, 0x1000
-; VI-SDAG-NEXT:    s_lshr_b32 s11, s8, s10
-; VI-SDAG-NEXT:    s_lshl_b32 s10, s11, s10
-; VI-SDAG-NEXT:    s_cmp_lg_u32 s10, s8
-; VI-SDAG-NEXT:    s_cselect_b32 s8, 1, 0
-; VI-SDAG-NEXT:    s_addk_i32 s9, 0xfc10
-; VI-SDAG-NEXT:    s_lshl_b32 s10, s9, 12
-; VI-SDAG-NEXT:    s_or_b32 s8, s11, s8
-; VI-SDAG-NEXT:    s_or_b32 s10, s7, s10
-; VI-SDAG-NEXT:    s_cmp_lt_i32 s9, 1
-; VI-SDAG-NEXT:    s_cselect_b32 s8, s8, s10
-; VI-SDAG-NEXT:    s_and_b32 s10, s8, 7
-; VI-SDAG-NEXT:    s_cmp_gt_i32 s10, 5
-; VI-SDAG-NEXT:    s_cselect_b32 s11, 1, 0
-; VI-SDAG-NEXT:    s_cmp_eq_u32 s10, 3
-; VI-SDAG-NEXT:    s_cselect_b32 s10, 1, 0
-; VI-SDAG-NEXT:    s_lshr_b32 s8, s8, 2
-; VI-SDAG-NEXT:    s_or_b32 s10, s10, s11
-; VI-SDAG-NEXT:    s_add_i32 s8, s8, s10
-; VI-SDAG-NEXT:    s_cmp_lt_i32 s9, 31
-; VI-SDAG-NEXT:    s_cselect_b32 s8, s8, 0x7c00
-; VI-SDAG-NEXT:    s_cmp_lg_u32 s7, 0
-; VI-SDAG-NEXT:    s_cselect_b32 s7, s6, 0x7c00
-; VI-SDAG-NEXT:    s_cmpk_eq_i32 s9, 0x40f
-; VI-SDAG-NEXT:    s_cselect_b32 s7, s7, s8
-; VI-SDAG-NEXT:    s_and_b32 s8, s5, 0x1ff
-; VI-SDAG-NEXT:    v_or_b32_e32 v0, s8, v0
-; VI-SDAG-NEXT:    s_lshr_b32 s4, s4, 16
-; VI-SDAG-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
-; VI-SDAG-NEXT:    s_lshr_b32 s9, s5, 8
-; VI-SDAG-NEXT:    s_bfe_u32 s10, s5, 0xb0014
-; VI-SDAG-NEXT:    s_and_b32 s4, s4, 0x8000
-; VI-SDAG-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; VI-SDAG-NEXT:    s_and_b32 s8, s9, 0xffe
-; VI-SDAG-NEXT:    s_sub_i32 s9, 0x3f1, s10
-; VI-SDAG-NEXT:    s_or_b32 s4, s4, s7
-; VI-SDAG-NEXT:    v_readfirstlane_b32 s7, v0
-; VI-SDAG-NEXT:    v_med3_i32 v1, s9, 0, 13
-; VI-SDAG-NEXT:    s_or_b32 s7, s8, s7
-; VI-SDAG-NEXT:    v_readfirstlane_b32 s9, v1
-; VI-SDAG-NEXT:    s_or_b32 s8, s7, 0x1000
-; VI-SDAG-NEXT:    s_lshr_b32 s11, s8, s9
-; VI-SDAG-NEXT:    s_lshl_b32 s4, s4, 16
-; VI-SDAG-NEXT:    s_lshl_b32 s9, s11, s9
-; VI-SDAG-NEXT:    s_cmp_lg_u32 s9, s8
-; VI-SDAG-NEXT:    s_cselect_b32 s8, 1, 0
-; VI-SDAG-NEXT:    s_addk_i32 s10, 0xfc10
-; VI-SDAG-NEXT:    s_lshl_b32 s9, s10, 12
-; VI-SDAG-NEXT:    s_or_b32 s8, s11, s8
-; VI-SDAG-NEXT:    s_or_b32 s9, s7, s9
-; VI-SDAG-NEXT:    s_cmp_lt_i32 s10, 1
-; VI-SDAG-NEXT:    s_cselect_b32 s8, s8, s9
-; VI-SDAG-NEXT:    s_and_b32 s9, s8, 7
-; VI-SDAG-NEXT:    s_cmp_gt_i32 s9, 5
-; VI-SDAG-NEXT:    s_cselect_b32 s11, 1, 0
-; VI-SDAG-NEXT:    s_cmp_eq_u32 s9, 3
-; VI-SDAG-NEXT:    s_cselect_b32 s9, 1, 0
-; VI-SDAG-NEXT:    s_lshr_b32 s8, s8, 2
-; VI-SDAG-NEXT:    s_or_b32 s9, s9, s11
-; VI-SDAG-NEXT:    s_add_i32 s8, s8, s9
-; VI-SDAG-NEXT:    s_cmp_lt_i32 s10, 31
-; VI-SDAG-NEXT:    s_cselect_b32 s8, s8, 0x7c00
-; VI-SDAG-NEXT:    s_cmp_lg_u32 s7, 0
-; VI-SDAG-NEXT:    s_cselect_b32 s6, s6, 0x7c00
-; VI-SDAG-NEXT:    s_cmpk_eq_i32 s10, 0x40f
-; VI-SDAG-NEXT:    s_cselect_b32 s6, s6, s8
-; VI-SDAG-NEXT:    s_lshr_b32 s5, s5, 16
-; VI-SDAG-NEXT:    s_and_b32 s5, s5, 0x8000
-; VI-SDAG-NEXT:    s_or_b32 s5, s5, s6
-; VI-SDAG-NEXT:    s_and_b32 s5, s5, 0xffff
-; VI-SDAG-NEXT:    s_or_b32 s4, s5, s4
-; VI-SDAG-NEXT:    v_mov_b32_e32 v0, s4
-; VI-SDAG-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; VI-SDAG-NEXT:    v_cvt_f32_f64_e32 v2, v[2:3]
+; VI-SDAG-NEXT:    v_cvt_f32_f64_e32 v0, v[0:1]
+; VI-SDAG-NEXT:    v_cvt_f16_f32_sdwa v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+; VI-SDAG-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; VI-SDAG-NEXT:    v_or_b32_e32 v0, v0, v1
+; VI-SDAG-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; VI-SDAG-NEXT:    s_endpgm
 ;
 ; VI-GISEL-LABEL: fptrunc_v2f64_to_v2f16_afn:
@@ -3267,104 +2846,24 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16_afn(
 ;
 ; GFX9-SDAG-LABEL: fptrunc_v2f64_to_v2f16_afn:
 ; GFX9-SDAG:       ; %bb.0: ; %entry
-; GFX9-SDAG-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x24
-; GFX9-SDAG-NEXT:    s_mov_b32 s3, 0xf000
-; GFX9-SDAG-NEXT:    s_mov_b32 s2, -1
-; GFX9-SDAG-NEXT:    s_mov_b32 s6, s2
-; GFX9-SDAG-NEXT:    s_mov_b32 s7, s3
+; GFX9-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9-SDAG-NEXT:    s_mov_b32 s7, 0xf000
+; GFX9-SDAG-NEXT:    s_mov_b32 s6, -1
+; GFX9-SDAG-NEXT:    s_mov_b32 s10, s6
+; GFX9-SDAG-NEXT:    s_mov_b32 s11, s7
 ; GFX9-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-SDAG-NEXT:    s_mov_b32 s4, s10
-; GFX9-SDAG-NEXT:    s_mov_b32 s5, s11
-; GFX9-SDAG-NEXT:    buffer_load_dwordx4 v[0:3], off, s[4:7], 0
-; GFX9-SDAG-NEXT:    s_mov_b32 s0, s8
-; GFX9-SDAG-NEXT:    s_mov_b32 s1, s9
-; GFX9-SDAG-NEXT:    s_movk_i32 s4, 0x7e00
+; GFX9-SDAG-NEXT:    s_mov_b32 s8, s2
+; GFX9-SDAG-NEXT:    s_mov_b32 s9, s3
+; GFX9-SDAG-NEXT:    buffer_load_dwordx4 v[0:3], off, s[8:11], 0
+; GFX9-SDAG-NEXT:    s_mov_b32 s4, s0
+; GFX9-SDAG-NEXT:    s_mov_b32 s5, s1
 ; GFX9-SDAG-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-SDAG-NEXT:    v_readfirstlane_b32 s5, v3
-; GFX9-SDAG-NEXT:    s_and_b32 s7, s5, 0x1ff
-; GFX9-SDAG-NEXT:    v_readfirstlane_b32 s6, v1
-; GFX9-SDAG-NEXT:    v_or_b32_e32 v1, s7, v2
-; GFX9-SDAG-NEXT:    s_lshr_b32 s8, s5, 8
-; GFX9-SDAG-NEXT:    s_bfe_u32 s9, s5, 0xb0014
-; GFX9-SDAG-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v1
-; GFX9-SDAG-NEXT:    s_and_b32 s7, s8, 0xffe
-; GFX9-SDAG-NEXT:    s_sub_i32 s8, 0x3f1, s9
-; GFX9-SDAG-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
-; GFX9-SDAG-NEXT:    v_med3_i32 v2, s8, 0, 13
-; GFX9-SDAG-NEXT:    v_readfirstlane_b32 s8, v1
-; GFX9-SDAG-NEXT:    s_or_b32 s7, s7, s8
-; GFX9-SDAG-NEXT:    v_readfirstlane_b32 s10, v2
-; GFX9-SDAG-NEXT:    s_or_b32 s8, s7, 0x1000
-; GFX9-SDAG-NEXT:    s_lshr_b32 s11, s8, s10
-; GFX9-SDAG-NEXT:    s_lshl_b32 s10, s11, s10
-; GFX9-SDAG-NEXT:    s_cmp_lg_u32 s10, s8
-; GFX9-SDAG-NEXT:    s_cselect_b32 s8, 1, 0
-; GFX9-SDAG-NEXT:    s_addk_i32 s9, 0xfc10
-; GFX9-SDAG-NEXT:    s_lshl_b32 s10, s9, 12
-; GFX9-SDAG-NEXT:    s_or_b32 s8, s11, s8
-; GFX9-SDAG-NEXT:    s_or_b32 s10, s7, s10
-; GFX9-SDAG-NEXT:    s_cmp_lt_i32 s9, 1
-; GFX9-SDAG-NEXT:    s_cselect_b32 s8, s8, s10
-; GFX9-SDAG-NEXT:    s_and_b32 s10, s8, 7
-; GFX9-SDAG-NEXT:    s_cmp_gt_i32 s10, 5
-; GFX9-SDAG-NEXT:    s_cselect_b32 s11, 1, 0
-; GFX9-SDAG-NEXT:    s_cmp_eq_u32 s10, 3
-; GFX9-SDAG-NEXT:    s_cselect_b32 s10, 1, 0
-; GFX9-SDAG-NEXT:    s_lshr_b32 s8, s8, 2
-; GFX9-SDAG-NEXT:    s_or_b32 s10, s10, s11
-; GFX9-SDAG-NEXT:    s_add_i32 s8, s8, s10
-; GFX9-SDAG-NEXT:    s_cmp_lt_i32 s9, 31
-; GFX9-SDAG-NEXT:    s_cselect_b32 s8, s8, 0x7c00
-; GFX9-SDAG-NEXT:    s_cmp_lg_u32 s7, 0
-; GFX9-SDAG-NEXT:    s_cselect_b32 s7, s4, 0x7c00
-; GFX9-SDAG-NEXT:    s_cmpk_eq_i32 s9, 0x40f
-; GFX9-SDAG-NEXT:    s_cselect_b32 s7, s7, s8
-; GFX9-SDAG-NEXT:    s_and_b32 s8, s6, 0x1ff
-; GFX9-SDAG-NEXT:    v_or_b32_e32 v0, s8, v0
-; GFX9-SDAG-NEXT:    s_lshr_b32 s5, s5, 16
-; GFX9-SDAG-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
-; GFX9-SDAG-NEXT:    s_lshr_b32 s9, s6, 8
-; GFX9-SDAG-NEXT:    s_bfe_u32 s10, s6, 0xb0014
-; GFX9-SDAG-NEXT:    s_and_b32 s5, s5, 0x8000
-; GFX9-SDAG-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX9-SDAG-NEXT:    s_and_b32 s8, s9, 0xffe
-; GFX9-SDAG-NEXT:    s_sub_i32 s9, 0x3f1, s10
-; GFX9-SDAG-NEXT:    s_or_b32 s5, s5, s7
-; GFX9-SDAG-NEXT:    v_readfirstlane_b32 s7, v0
-; GFX9-SDAG-NEXT:    v_med3_i32 v1, s9, 0, 13
-; GFX9-SDAG-NEXT:    s_or_b32 s7, s8, s7
-; GFX9-SDAG-NEXT:    v_readfirstlane_b32 s9, v1
-; GFX9-SDAG-NEXT:    s_or_b32 s8, s7, 0x1000
-; GFX9-SDAG-NEXT:    s_lshr_b32 s11, s8, s9
-; GFX9-SDAG-NEXT:    s_lshl_b32 s9, s11, s9
-; GFX9-SDAG-NEXT:    s_cmp_lg_u32 s9, s8
-; GFX9-SDAG-NEXT:    s_cselect_b32 s8, 1, 0
-; GFX9-SDAG-NEXT:    s_addk_i32 s10, 0xfc10
-; GFX9-SDAG-NEXT:    s_lshl_b32 s9, s10, 12
-; GFX9-SDAG-NEXT:    s_or_b32 s8, s11, s8
-; GFX9-SDAG-NEXT:    s_or_b32 s9, s7, s9
-; GFX9-SDAG-NEXT:    s_cmp_lt_i32 s10, 1
-; GFX9-SDAG-NEXT:    s_cselect_b32 s8, s8, s9
-; GFX9-SDAG-NEXT:    s_and_b32 s9, s8, 7
-; GFX9-SDAG-NEXT:    s_cmp_gt_i32 s9, 5
-; GFX9-SDAG-NEXT:    s_cselect_b32 s11, 1, 0
-; GFX9-SDAG-NEXT:    s_cmp_eq_u32 s9, 3
-; GFX9-SDAG-NEXT:    s_cselect_b32 s9, 1, 0
-; GFX9-SDAG-NEXT:    s_lshr_b32 s8, s8, 2
-; GFX9-SDAG-NEXT:    s_or_b32 s9, s9, s11
-; GFX9-SDAG-NEXT:    s_add_i32 s8, s8, s9
-; GFX9-SDAG-NEXT:    s_cmp_lt_i32 s10, 31
-; GFX9-SDAG-NEXT:    s_cselect_b32 s8, s8, 0x7c00
-; GFX9-SDAG-NEXT:    s_cmp_lg_u32 s7, 0
-; GFX9-SDAG-NEXT:    s_cselect_b32 s4, s4, 0x7c00
-; GFX9-SDAG-NEXT:    s_cmpk_eq_i32 s10, 0x40f
-; GFX9-SDAG-NEXT:    s_cselect_b32 s4, s4, s8
-; GFX9-SDAG-NEXT:    s_lshr_b32 s6, s6, 16
-; GFX9-SDAG-NEXT:    s_and_b32 s6, s6, 0x8000
-; GFX9-SDAG-NEXT:    s_or_b32 s4, s6, s4
-; GFX9-SDAG-NEXT:    s_pack_ll_b32_b16 s4, s4, s5
-; GFX9-SDAG-NEXT:    v_mov_b32_e32 v0, s4
-; GFX9-SDAG-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; GFX9-SDAG-NEXT:    v_cvt_f32_f64_e32 v2, v[2:3]
+; GFX9-SDAG-NEXT:    v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX9-SDAG-NEXT:    v_cvt_f16_f32_e32 v1, v2
+; GFX9-SDAG-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; GFX9-SDAG-NEXT:    v_pack_b32_f16 v0, v0, v1
+; GFX9-SDAG-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; GFX9-SDAG-NEXT:    s_endpgm
 ;
 ; GFX9-GISEL-LABEL: fptrunc_v2f64_to_v2f16_afn:
@@ -3385,104 +2884,22 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16_afn(
 ;
 ; GFX950-SDAG-LABEL: fptrunc_v2f64_to_v2f16_afn:
 ; GFX950-SDAG:       ; %bb.0: ; %entry
-; GFX950-SDAG-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x24
-; GFX950-SDAG-NEXT:    s_mov_b32 s3, 0xf000
-; GFX950-SDAG-NEXT:    s_mov_b32 s2, -1
-; GFX950-SDAG-NEXT:    s_mov_b32 s6, s2
-; GFX950-SDAG-NEXT:    s_mov_b32 s7, s3
+; GFX950-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX950-SDAG-NEXT:    s_mov_b32 s7, 0xf000
+; GFX950-SDAG-NEXT:    s_mov_b32 s6, -1
+; GFX950-SDAG-NEXT:    s_mov_b32 s10, s6
+; GFX950-SDAG-NEXT:    s_mov_b32 s11, s7
 ; GFX950-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX950-SDAG-NEXT:    s_mov_b32 s4, s10
-; GFX950-SDAG-NEXT:    s_mov_b32 s5, s11
-; GFX950-SDAG-NEXT:    buffer_load_dwordx4 v[0:3], off, s[4:7], 0
-; GFX950-SDAG-NEXT:    s_mov_b32 s0, s8
-; GFX950-SDAG-NEXT:    s_mov_b32 s1, s9
-; GFX950-SDAG-NEXT:    s_movk_i32 s4, 0x7e00
+; GFX950-SDAG-NEXT:    s_mov_b32 s8, s2
+; GFX950-SDAG-NEXT:    s_mov_b32 s9, s3
+; GFX950-SDAG-NEXT:    buffer_load_dwordx4 v[0:3], off, s[8:11], 0
+; GFX950-SDAG-NEXT:    s_mov_b32 s4, s0
+; GFX950-SDAG-NEXT:    s_mov_b32 s5, s1
 ; GFX950-SDAG-NEXT:    s_waitcnt vmcnt(0)
-; GFX950-SDAG-NEXT:    v_readfirstlane_b32 s5, v3
-; GFX950-SDAG-NEXT:    s_and_b32 s7, s5, 0x1ff
-; GFX950-SDAG-NEXT:    v_readfirstlane_b32 s6, v1
-; GFX950-SDAG-NEXT:    v_or_b32_e32 v1, s7, v2
-; GFX950-SDAG-NEXT:    s_lshr_b32 s8, s5, 8
-; GFX950-SDAG-NEXT:    s_bfe_u32 s9, s5, 0xb0014
-; GFX950-SDAG-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v1
-; GFX950-SDAG-NEXT:    s_and_b32 s7, s8, 0xffe
-; GFX950-SDAG-NEXT:    s_sub_i32 s8, 0x3f1, s9
-; GFX950-SDAG-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
-; GFX950-SDAG-NEXT:    v_med3_i32 v2, s8, 0, 13
-; GFX950-SDAG-NEXT:    v_readfirstlane_b32 s8, v1
-; GFX950-SDAG-NEXT:    s_or_b32 s7, s7, s8
-; GFX950-SDAG-NEXT:    v_readfirstlane_b32 s10, v2
-; GFX950-SDAG-NEXT:    s_or_b32 s8, s7, 0x1000
-; GFX950-SDAG-NEXT:    s_lshr_b32 s11, s8, s10
-; GFX950-SDAG-NEXT:    s_lshl_b32 s10, s11, s10
-; GFX950-SDAG-NEXT:    s_cmp_lg_u32 s10, s8
-; GFX950-SDAG-NEXT:    s_cselect_b32 s8, 1, 0
-; GFX950-SDAG-NEXT:    s_addk_i32 s9, 0xfc10
-; GFX950-SDAG-NEXT:    s_lshl_b32 s10, s9, 12
-; GFX950-SDAG-NEXT:    s_or_b32 s8, s11, s8
-; GFX950-SDAG-NEXT:    s_or_b32 s10, s7, s10
-; GFX950-SDAG-NEXT:    s_cmp_lt_i32 s9, 1
-; GFX950-SDAG-NEXT:    s_cselect_b32 s8, s8, s10
-; GFX950-SDAG-NEXT:    s_and_b32 s10, s8, 7
-; GFX950-SDAG-NEXT:    s_cmp_gt_i32 s10, 5
-; GFX950-SDAG-NEXT:    s_cselect_b32 s11, 1, 0
-; GFX950-SDAG-NEXT:    s_cmp_eq_u32 s10, 3
-; GFX950-SDAG-NEXT:    s_cselect_b32 s10, 1, 0
-; GFX950-SDAG-NEXT:    s_lshr_b32 s8, s8, 2
-; GFX950-SDAG-NEXT:    s_or_b32 s10, s10, s11
-; GFX950-SDAG-NEXT:    s_add_i32 s8, s8, s10
-; GFX950-SDAG-NEXT:    s_cmp_lt_i32 s9, 31
-; GFX950-SDAG-NEXT:    s_cselect_b32 s8, s8, 0x7c00
-; GFX950-SDAG-NEXT:    s_cmp_lg_u32 s7, 0
-; GFX950-SDAG-NEXT:    s_cselect_b32 s7, s4, 0x7c00
-; GFX950-SDAG-NEXT:    s_cmpk_eq_i32 s9, 0x40f
-; GFX950-SDAG-NEXT:    s_cselect_b32 s7, s7, s8
-; GFX950-SDAG-NEXT:    s_and_b32 s8, s6, 0x1ff
-; GFX950-SDAG-NEXT:    v_or_b32_e32 v0, s8, v0
-; GFX950-SDAG-NEXT:    s_lshr_b32 s5, s5, 16
-; GFX950-SDAG-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
-; GFX950-SDAG-NEXT:    s_lshr_b32 s9, s6, 8
-; GFX950-SDAG-NEXT:    s_bfe_u32 s10, s6, 0xb0014
-; GFX950-SDAG-NEXT:    s_and_b32 s5, s5, 0x8000
-; GFX950-SDAG-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX950-SDAG-NEXT:    s_and_b32 s8, s9, 0xffe
-; GFX950-SDAG-NEXT:    s_sub_i32 s9, 0x3f1, s10
-; GFX950-SDAG-NEXT:    s_or_b32 s5, s5, s7
-; GFX950-SDAG-NEXT:    v_readfirstlane_b32 s7, v0
-; GFX950-SDAG-NEXT:    v_med3_i32 v1, s9, 0, 13
-; GFX950-SDAG-NEXT:    s_or_b32 s7, s8, s7
-; GFX950-SDAG-NEXT:    v_readfirstlane_b32 s9, v1
-; GFX950-SDAG-NEXT:    s_or_b32 s8, s7, 0x1000
-; GFX950-SDAG-NEXT:    s_lshr_b32 s11, s8, s9
-; GFX950-SDAG-NEXT:    s_lshl_b32 s9, s11, s9
-; GFX950-SDAG-NEXT:    s_cmp_lg_u32 s9, s8
-; GFX950-SDAG-NEXT:    s_cselect_b32 s8, 1, 0
-; GFX950-SDAG-NEXT:    s_addk_i32 s10, 0xfc10
-; GFX950-SDAG-NEXT:    s_lshl_b32 s9, s10, 12
-; GFX950-SDAG-NEXT:    s_or_b32 s8, s11, s8
-; GFX950-SDAG-NEXT:    s_or_b32 s9, s7, s9
-; GFX950-SDAG-NEXT:    s_cmp_lt_i32 s10, 1
-; GFX950-SDAG-NEXT:    s_cselect_b32 s8, s8, s9
-; GFX950-SDAG-NEXT:    s_and_b32 s9, s8, 7
-; GFX950-SDAG-NEXT:    s_cmp_gt_i32 s9, 5
-; GFX950-SDAG-NEXT:    s_cselect_b32 s11, 1, 0
-; GFX950-SDAG-NEXT:    s_cmp_eq_u32 s9, 3
-; GFX950-SDAG-NEXT:    s_cselect_b32 s9, 1, 0
-; GFX950-SDAG-NEXT:    s_lshr_b32 s8, s8, 2
-; GFX950-SDAG-NEXT:    s_or_b32 s9, s9, s11
-; GFX950-SDAG-NEXT:    s_add_i32 s8, s8, s9
-; GFX950-SDAG-NEXT:    s_cmp_lt_i32 s10, 31
-; GFX950-SDAG-NEXT:    s_cselect_b32 s8, s8, 0x7c00
-; GFX950-SDAG-NEXT:    s_cmp_lg_u32 s7, 0
-; GFX950-SDAG-NEXT:    s_cselect_b32 s4, s4, 0x7c00
-; GFX950-SDAG-NEXT:    s_cmpk_eq_i32 s10, 0x40f
-; GFX950-SDAG-NEXT:    s_cselect_b32 s4, s4, s8
-; GFX950-SDAG-NEXT:    s_lshr_b32 s6, s6, 16
-; GFX950-SDAG-NEXT:    s_and_b32 s6, s6, 0x8000
-; GFX950-SDAG-NEXT:    s_or_b32 s4, s6, s4
-; GFX950-SDAG-NEXT:    s_pack_ll_b32_b16 s4, s4, s5
-; GFX950-SDAG-NEXT:    v_mov_b32_e32 v0, s4
-; GFX950-SDAG-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; GFX950-SDAG-NEXT:    v_cvt_f32_f64_e32 v2, v[2:3]
+; GFX950-SDAG-NEXT:    v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX950-SDAG-NEXT:    v_cvt_pk_f16_f32 v0, v0, v2
+; GFX950-SDAG-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; GFX950-SDAG-NEXT:    s_endpgm
 ;
 ; GFX950-GISEL-LABEL: fptrunc_v2f64_to_v2f16_afn:
@@ -3511,109 +2928,17 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16_afn(
 ; GFX11-SDAG-TRUE16-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-SDAG-TRUE16-NEXT:    s_mov_b32 s8, s2
 ; GFX11-SDAG-TRUE16-NEXT:    s_mov_b32 s9, s3
+; GFX11-SDAG-TRUE16-NEXT:    s_mov_b32 s4, s0
 ; GFX11-SDAG-TRUE16-NEXT:    buffer_load_b128 v[0:3], off, s[8:11], 0
-; GFX11-SDAG-TRUE16-NEXT:    s_waitcnt vmcnt(0)
-; GFX11-SDAG-TRUE16-NEXT:    v_readfirstlane_b32 s2, v3
-; GFX11-SDAG-TRUE16-NEXT:    s_and_b32 s3, s2, 0x1ff
-; GFX11-SDAG-TRUE16-NEXT:    s_lshr_b32 s5, s2, 8
-; GFX11-SDAG-TRUE16-NEXT:    v_or_b32_e32 v2, s3, v2
-; GFX11-SDAG-TRUE16-NEXT:    s_bfe_u32 s3, s2, 0xb0014
-; GFX11-SDAG-TRUE16-NEXT:    s_and_b32 s5, s5, 0xffe
-; GFX11-SDAG-TRUE16-NEXT:    s_sub_i32 s4, 0x3f1, s3
-; GFX11-SDAG-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-SDAG-TRUE16-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-SDAG-TRUE16-NEXT:    v_med3_i32 v3, s4, 0, 13
-; GFX11-SDAG-TRUE16-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc_lo
-; GFX11-SDAG-TRUE16-NEXT:    v_readfirstlane_b32 s8, v3
-; GFX11-SDAG-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT:    v_readfirstlane_b32 s4, v2
-; GFX11-SDAG-TRUE16-NEXT:    s_or_b32 s4, s5, s4
-; GFX11-SDAG-TRUE16-NEXT:    s_or_b32 s5, s4, 0x1000
-; GFX11-SDAG-TRUE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT:    s_lshr_b32 s9, s5, s8
-; GFX11-SDAG-TRUE16-NEXT:    s_lshl_b32 s8, s9, s8
-; GFX11-SDAG-TRUE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT:    s_cmp_lg_u32 s8, s5
-; GFX11-SDAG-TRUE16-NEXT:    s_cselect_b32 s5, 1, 0
-; GFX11-SDAG-TRUE16-NEXT:    s_addk_i32 s3, 0xfc10
-; GFX11-SDAG-TRUE16-NEXT:    s_or_b32 s5, s9, s5
-; GFX11-SDAG-TRUE16-NEXT:    s_lshl_b32 s8, s3, 12
-; GFX11-SDAG-TRUE16-NEXT:    s_or_b32 s8, s4, s8
-; GFX11-SDAG-TRUE16-NEXT:    s_cmp_lt_i32 s3, 1
-; GFX11-SDAG-TRUE16-NEXT:    s_cselect_b32 s5, s5, s8
-; GFX11-SDAG-TRUE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT:    s_and_b32 s8, s5, 7
-; GFX11-SDAG-TRUE16-NEXT:    s_cmp_gt_i32 s8, 5
-; GFX11-SDAG-TRUE16-NEXT:    s_cselect_b32 s9, 1, 0
-; GFX11-SDAG-TRUE16-NEXT:    s_cmp_eq_u32 s8, 3
-; GFX11-SDAG-TRUE16-NEXT:    s_cselect_b32 s8, 1, 0
-; GFX11-SDAG-TRUE16-NEXT:    s_lshr_b32 s5, s5, 2
-; GFX11-SDAG-TRUE16-NEXT:    s_or_b32 s8, s8, s9
-; GFX11-SDAG-TRUE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT:    s_add_i32 s5, s5, s8
-; GFX11-SDAG-TRUE16-NEXT:    s_cmp_lt_i32 s3, 31
-; GFX11-SDAG-TRUE16-NEXT:    s_movk_i32 s8, 0x7e00
-; GFX11-SDAG-TRUE16-NEXT:    s_cselect_b32 s5, s5, 0x7c00
-; GFX11-SDAG-TRUE16-NEXT:    s_cmp_lg_u32 s4, 0
-; GFX11-SDAG-TRUE16-NEXT:    v_readfirstlane_b32 s4, v1
-; GFX11-SDAG-TRUE16-NEXT:    s_cselect_b32 s9, s8, 0x7c00
-; GFX11-SDAG-TRUE16-NEXT:    s_cmpk_eq_i32 s3, 0x40f
-; GFX11-SDAG-TRUE16-NEXT:    s_cselect_b32 s3, s9, s5
-; GFX11-SDAG-TRUE16-NEXT:    s_and_b32 s5, s4, 0x1ff
-; GFX11-SDAG-TRUE16-NEXT:    s_lshr_b32 s10, s4, 8
-; GFX11-SDAG-TRUE16-NEXT:    v_or_b32_e32 v0, s5, v0
-; GFX11-SDAG-TRUE16-NEXT:    s_bfe_u32 s5, s4, 0xb0014
-; GFX11-SDAG-TRUE16-NEXT:    s_and_b32 s10, s10, 0xffe
-; GFX11-SDAG-TRUE16-NEXT:    s_sub_i32 s9, 0x3f1, s5
-; GFX11-SDAG-TRUE16-NEXT:    s_lshr_b32 s2, s2, 16
-; GFX11-SDAG-TRUE16-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-SDAG-TRUE16-NEXT:    v_med3_i32 v1, s9, 0, 13
-; GFX11-SDAG-TRUE16-NEXT:    s_and_b32 s2, s2, 0x8000
-; GFX11-SDAG-TRUE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-SDAG-TRUE16-NEXT:    s_or_b32 s2, s2, s3
-; GFX11-SDAG-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-SDAG-TRUE16-NEXT:    v_readfirstlane_b32 s11, v1
-; GFX11-SDAG-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT:    v_readfirstlane_b32 s9, v0
-; GFX11-SDAG-TRUE16-NEXT:    s_or_b32 s9, s10, s9
-; GFX11-SDAG-TRUE16-NEXT:    s_or_b32 s10, s9, 0x1000
-; GFX11-SDAG-TRUE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT:    s_lshr_b32 s12, s10, s11
-; GFX11-SDAG-TRUE16-NEXT:    s_lshl_b32 s11, s12, s11
-; GFX11-SDAG-TRUE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT:    s_cmp_lg_u32 s11, s10
-; GFX11-SDAG-TRUE16-NEXT:    s_cselect_b32 s3, 1, 0
-; GFX11-SDAG-TRUE16-NEXT:    s_addk_i32 s5, 0xfc10
-; GFX11-SDAG-TRUE16-NEXT:    s_or_b32 s3, s12, s3
-; GFX11-SDAG-TRUE16-NEXT:    s_lshl_b32 s10, s5, 12
-; GFX11-SDAG-TRUE16-NEXT:    s_or_b32 s10, s9, s10
-; GFX11-SDAG-TRUE16-NEXT:    s_cmp_lt_i32 s5, 1
-; GFX11-SDAG-TRUE16-NEXT:    s_cselect_b32 s3, s3, s10
-; GFX11-SDAG-TRUE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT:    s_and_b32 s10, s3, 7
-; GFX11-SDAG-TRUE16-NEXT:    s_cmp_gt_i32 s10, 5
-; GFX11-SDAG-TRUE16-NEXT:    s_cselect_b32 s11, 1, 0
-; GFX11-SDAG-TRUE16-NEXT:    s_cmp_eq_u32 s10, 3
-; GFX11-SDAG-TRUE16-NEXT:    s_cselect_b32 s10, 1, 0
-; GFX11-SDAG-TRUE16-NEXT:    s_lshr_b32 s3, s3, 2
-; GFX11-SDAG-TRUE16-NEXT:    s_or_b32 s10, s10, s11
-; GFX11-SDAG-TRUE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT:    s_add_i32 s3, s3, s10
-; GFX11-SDAG-TRUE16-NEXT:    s_cmp_lt_i32 s5, 31
-; GFX11-SDAG-TRUE16-NEXT:    s_cselect_b32 s3, s3, 0x7c00
-; GFX11-SDAG-TRUE16-NEXT:    s_cmp_lg_u32 s9, 0
-; GFX11-SDAG-TRUE16-NEXT:    s_cselect_b32 s8, s8, 0x7c00
-; GFX11-SDAG-TRUE16-NEXT:    s_cmpk_eq_i32 s5, 0x40f
 ; GFX11-SDAG-TRUE16-NEXT:    s_mov_b32 s5, s1
-; GFX11-SDAG-TRUE16-NEXT:    s_cselect_b32 s3, s8, s3
-; GFX11-SDAG-TRUE16-NEXT:    s_lshr_b32 s4, s4, 16
-; GFX11-SDAG-TRUE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT:    s_and_b32 s4, s4, 0x8000
-; GFX11-SDAG-TRUE16-NEXT:    s_or_b32 s3, s4, s3
-; GFX11-SDAG-TRUE16-NEXT:    s_mov_b32 s4, s0
-; GFX11-SDAG-TRUE16-NEXT:    s_pack_ll_b32_b16 s2, s3, s2
-; GFX11-SDAG-TRUE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT:    v_mov_b32_e32 v0, s2
+; GFX11-SDAG-TRUE16-NEXT:    s_waitcnt vmcnt(0)
+; GFX11-SDAG-TRUE16-NEXT:    v_cvt_f32_f64_e32 v2, v[2:3]
+; GFX11-SDAG-TRUE16-NEXT:    v_cvt_f32_f64_e32 v1, v[0:1]
+; GFX11-SDAG-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-SDAG-TRUE16-NEXT:    v_cvt_f16_f32_e32 v0.l, v2
+; GFX11-SDAG-TRUE16-NEXT:    v_cvt_f16_f32_e32 v0.h, v1
+; GFX11-SDAG-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SDAG-TRUE16-NEXT:    v_pack_b32_f16 v0, v0.h, v0.l
 ; GFX11-SDAG-TRUE16-NEXT:    buffer_store_b32 v0, off, s[4:7], 0
 ; GFX11-SDAG-TRUE16-NEXT:    s_endpgm
 ;
@@ -3627,109 +2952,17 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16_afn(
 ; GFX11-SDAG-FAKE16-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-SDAG-FAKE16-NEXT:    s_mov_b32 s8, s2
 ; GFX11-SDAG-FAKE16-NEXT:    s_mov_b32 s9, s3
+; GFX11-SDAG-FAKE16-NEXT:    s_mov_b32 s4, s0
 ; GFX11-SDAG-FAKE16-NEXT:    buffer_load_b128 v[0:3], off, s[8:11], 0
-; GFX11-SDAG-FAKE16-NEXT:    s_waitcnt vmcnt(0)
-; GFX11-SDAG-FAKE16-NEXT:    v_readfirstlane_b32 s2, v3
-; GFX11-SDAG-FAKE16-NEXT:    s_and_b32 s3, s2, 0x1ff
-; GFX11-SDAG-FAKE16-NEXT:    s_lshr_b32 s5, s2, 8
-; GFX11-SDAG-FAKE16-NEXT:    v_or_b32_e32 v2, s3, v2
-; GFX11-SDAG-FAKE16-NEXT:    s_bfe_u32 s3, s2, 0xb0014
-; GFX11-SDAG-FAKE16-NEXT:    s_and_b32 s5, s5, 0xffe
-; GFX11-SDAG-FAKE16-NEXT:    s_sub_i32 s4, 0x3f1, s3
-; GFX11-SDAG-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-SDAG-FAKE16-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-SDAG-FAKE16-NEXT:    v_med3_i32 v3, s4, 0, 13
-; GFX11-SDAG-FAKE16-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc_lo
-; GFX11-SDAG-FAKE16-NEXT:    v_readfirstlane_b32 s8, v3
-; GFX11-SDAG-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT:    v_readfirstlane_b32 s4, v2
-; GFX11-SDAG-FAKE16-NEXT:    s_or_b32 s4, s5, s4
-; GFX11-SDAG-FAKE16-NEXT:    s_or_b32 s5, s4, 0x1000
-; GFX11-SDAG-FAKE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT:    s_lshr_b32 s9, s5, s8
-; GFX11-SDAG-FAKE16-NEXT:    s_lshl_b32 s8, s9, s8
-; GFX11-SDAG-FAKE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT:    s_cmp_lg_u32 s8, s5
-; GFX11-SDAG-FAKE16-NEXT:    s_cselect_b32 s5, 1, 0
-; GFX11-SDAG-FAKE16-NEXT:    s_addk_i32 s3, 0xfc10
-; GFX11-SDAG-FAKE16-NEXT:    s_or_b32 s5, s9, s5
-; GFX11-SDAG-FAKE16-NEXT:    s_lshl_b32 s8, s3, 12
-; GFX11-SDAG-FAKE16-NEXT:    s_or_b32 s8, s4, s8
-; GFX11-SDAG-FAKE16-NEXT:    s_cmp_lt_i32 s3, 1
-; GFX11-SDAG-FAKE16-NEXT:    s_cselect_b32 s5, s5, s8
-; GFX11-SDAG-FAKE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT:    s_and_b32 s8, s5, 7
-; GFX11-SDAG-FAKE16-NEXT:    s_cmp_gt_i32 s8, 5
-; GFX11-SDAG-FAKE16-NEXT:    s_cselect_b32 s9, 1, 0
-; GFX11-SDAG-FAKE16-NEXT:    s_cmp_eq_u32 s8, 3
-; GFX11-SDAG-FAKE16-NEXT:    s_cselect_b32 s8, 1, 0
-; GFX11-SDAG-FAKE16-NEXT:    s_lshr_b32 s5, s5, 2
-; GFX11-SDAG-FAKE16-NEXT:    s_or_b32 s8, s8, s9
-; GFX11-SDAG-FAKE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT:    s_add_i32 s5, s5, s8
-; GFX11-SDAG-FAKE16-NEXT:    s_cmp_lt_i32 s3, 31
-; GFX11-SDAG-FAKE16-NEXT:    s_movk_i32 s8, 0x7e00
-; GFX11-SDAG-FAKE16-NEXT:    s_cselect_b32 s5, s5, 0x7c00
-; GFX11-SDAG-FAKE16-NEXT:    s_cmp_lg_u32 s4, 0
-; GFX11-SDAG-FAKE16-NEXT:    v_readfirstlane_b32 s4, v1
-; GFX11-SDAG-FAKE16-NEXT:    s_cselect_b32 s9, s8, 0x7c00
-; GFX11-SDAG-FAKE16-NEXT:    s_cmpk_eq_i32 s3, 0x40f
-; GFX11-SDAG-FAKE16-NEXT:    s_cselect_b32 s3, s9, s5
-; GFX11-SDAG-FAKE16-NEXT:    s_and_b32 s5, s4, 0x1ff
-; GFX11-SDAG-FAKE16-NEXT:    s_lshr_b32 s10, s4, 8
-; GFX11-SDAG-FAKE16-NEXT:    v_or_b32_e32 v0, s5, v0
-; GFX11-SDAG-FAKE16-NEXT:    s_bfe_u32 s5, s4, 0xb0014
-; GFX11-SDAG-FAKE16-NEXT:    s_and_b32 s10, s10, 0xffe
-; GFX11-SDAG-FAKE16-NEXT:    s_sub_i32 s9, 0x3f1, s5
-; GFX11-SDAG-FAKE16-NEXT:    s_lshr_b32 s2, s2, 16
-; GFX11-SDAG-FAKE16-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-SDAG-FAKE16-NEXT:    v_med3_i32 v1, s9, 0, 13
-; GFX11-SDAG-FAKE16-NEXT:    s_and_b32 s2, s2, 0x8000
-; GFX11-SDAG-FAKE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-SDAG-FAKE16-NEXT:    s_or_b32 s2, s2, s3
-; GFX11-SDAG-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-SDAG-FAKE16-NEXT:    v_readfirstlane_b32 s11, v1
-; GFX11-SDAG-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT:    v_readfirstlane_b32 s9, v0
-; GFX11-SDAG-FAKE16-NEXT:    s_or_b32 s9, s10, s9
-; GFX11-SDAG-FAKE16-NEXT:    s_or_b32 s10, s9, 0x1000
-; GFX11-SDAG-FAKE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT:    s_lshr_b32 s12, s10, s11
-; GFX11-SDAG-FAKE16-NEXT:    s_lshl_b32 s11, s12, s11
-; GFX11-SDAG-FAKE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT:    s_cmp_lg_u32 s11, s10
-; GFX11-SDAG-FAKE16-NEXT:    s_cselect_b32 s3, 1, 0
-; GFX11-SDAG-FAKE16-NEXT:    s_addk_i32 s5, 0xfc10
-; GFX11-SDAG-FAKE16-NEXT:    s_or_b32 s3, s12, s3
-; GFX11-SDAG-FAKE16-NEXT:    s_lshl_b32 s10, s5, 12
-; GFX11-SDAG-FAKE16-NEXT:    s_or_b32 s10, s9, s10
-; GFX11-SDAG-FAKE16-NEXT:    s_cmp_lt_i32 s5, 1
-; GFX11-SDAG-FAKE16-NEXT:    s_cselect_b32 s3, s3, s10
-; GFX11-SDAG-FAKE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT:    s_and_b32 s10, s3, 7
-; GFX11-SDAG-FAKE16-NEXT:    s_cmp_gt_i32 s10, 5
-; GFX11-SDAG-FAKE16-NEXT:    s_cselect_b32 s11, 1, 0
-; GFX11-SDAG-FAKE16-NEXT:    s_cmp_eq_u32 s10, 3
-; GFX11-SDAG-FAKE16-NEXT:    s_cselect_b32 s10, 1, 0
-; GFX11-SDAG-FAKE16-NEXT:    s_lshr_b32 s3, s3, 2
-; GFX11-SDAG-FAKE16-NEXT:    s_or_b32 s10, s10, s11
-; GFX11-SDAG-FAKE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT:    s_add_i32 s3, s3, s10
-; GFX11-SDAG-FAKE16-NEXT:    s_cmp_lt_i32 s5, 31
-; GFX11-SDAG-FAKE16-NEXT:    s_cselect_b32 s3, s3, 0x7c00
-; GFX11-SDAG-FAKE16-NEXT:    s_cmp_lg_u32 s9, 0
-; GFX11-SDAG-FAKE16-NEXT:    s_cselect_b32 s8, s8, 0x7c00
-; GFX11-SDAG-FAKE16-NEXT:    s_cmpk_eq_i32 s5, 0x40f
 ; GFX11-SDAG-FAKE16-NEXT:    s_mov_b32 s5, s1
-; GFX11-SDAG-FAKE16-NEXT:    s_cselect_b32 s3, s8, s3
-; GFX11-SDAG-FAKE16-NEXT:    s_lshr_b32 s4, s4, 16
-; GFX11-SDAG-FAKE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT:    s_and_b32 s4, s4, 0x8000
-; GFX11-SDAG-FAKE16-NEXT:    s_or_b32 s3, s4, s3
-; GFX11-SDAG-FAKE16-NEXT:    s_mov_b32 s4, s0
-; GFX11-SDAG-FAKE16-NEXT:    s_pack_ll_b32_b16 s2, s3, s2
-; GFX11-SDAG-FAKE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT:    v_mov_b32_e32 v0, s2
+; GFX11-SDAG-FAKE16-NEXT:    s_waitcnt vmcnt(0)
+; GFX11-SDAG-FAKE16-NEXT:    v_cvt_f32_f64_e32 v2, v[2:3]
+; GFX11-SDAG-FAKE16-NEXT:    v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX11-SDAG-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-SDAG-FAKE16-NEXT:    v_cvt_f16_f32_e32 v1, v2
+; GFX11-SDAG-FAKE16-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; GFX11-SDAG-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SDAG-FAKE16-NEXT:    v_pack_b32_f16 v0, v0, v1
 ; GFX11-SDAG-FAKE16-NEXT:    buffer_store_b32 v0, off, s[4:7], 0
 ; GFX11-SDAG-FAKE16-NEXT:    s_endpgm
 ;
diff --git a/llvm/test/CodeGen/AMDGPU/fptrunc.ll b/llvm/test/CodeGen/AMDGPU/fptrunc.ll
index 4f8eab1c2fec0..5d311776066e5 100644
--- a/llvm/test/CodeGen/AMDGPU/fptrunc.ll
+++ b/llvm/test/CodeGen/AMDGPU/fptrunc.ll
@@ -226,59 +226,59 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(ptr addrspace(1) %out, double %in)
 ; SI-NEXT:    buffer_store_short v0, off, s[0:3], 0
 ; SI-NEXT:    s_endpgm
 ;
-; VI-SAFE-SDAG-LABEL: fptrunc_f64_to_f16:
-; VI-SAFE-SDAG:       ; %bb.0:
-; VI-SAFE-SDAG-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x24
-; VI-SAFE-SDAG-NEXT:    s_mov_b32 s3, 0xf000
-; VI-SAFE-SDAG-NEXT:    s_mov_b32 s2, -1
-; VI-SAFE-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-SAFE-SDAG-NEXT:    s_mov_b32 s0, s4
-; VI-SAFE-SDAG-NEXT:    s_lshr_b32 s4, s7, 8
-; VI-SAFE-SDAG-NEXT:    s_and_b32 s8, s4, 0xffe
-; VI-SAFE-SDAG-NEXT:    s_and_b32 s4, s7, 0x1ff
-; VI-SAFE-SDAG-NEXT:    s_or_b32 s4, s4, s6
-; VI-SAFE-SDAG-NEXT:    s_cmp_lg_u32 s4, 0
-; VI-SAFE-SDAG-NEXT:    s_mov_b32 s1, s5
-; VI-SAFE-SDAG-NEXT:    s_cselect_b64 s[4:5], -1, 0
-; VI-SAFE-SDAG-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
-; VI-SAFE-SDAG-NEXT:    v_readfirstlane_b32 s4, v0
-; VI-SAFE-SDAG-NEXT:    s_bfe_u32 s6, s7, 0xb0014
-; VI-SAFE-SDAG-NEXT:    s_or_b32 s4, s8, s4
-; VI-SAFE-SDAG-NEXT:    s_sub_i32 s8, 0x3f1, s6
-; VI-SAFE-SDAG-NEXT:    v_med3_i32 v0, s8, 0, 13
-; VI-SAFE-SDAG-NEXT:    s_or_b32 s5, s4, 0x1000
-; VI-SAFE-SDAG-NEXT:    v_readfirstlane_b32 s8, v0
-; VI-SAFE-SDAG-NEXT:    s_lshr_b32 s9, s5, s8
-; VI-SAFE-SDAG-NEXT:    s_lshl_b32 s8, s9, s8
-; VI-SAFE-SDAG-NEXT:    s_cmp_lg_u32 s8, s5
-; VI-SAFE-SDAG-NEXT:    s_cselect_b32 s5, 1, 0
-; VI-SAFE-SDAG-NEXT:    s_addk_i32 s6, 0xfc10
-; VI-SAFE-SDAG-NEXT:    s_lshl_b32 s8, s6, 12
-; VI-SAFE-SDAG-NEXT:    s_or_b32 s5, s9, s5
-; VI-SAFE-SDAG-NEXT:    s_or_b32 s8, s4, s8
-; VI-SAFE-SDAG-NEXT:    s_cmp_lt_i32 s6, 1
-; VI-SAFE-SDAG-NEXT:    s_cselect_b32 s5, s5, s8
-; VI-SAFE-SDAG-NEXT:    s_and_b32 s8, s5, 7
-; VI-SAFE-SDAG-NEXT:    s_cmp_gt_i32 s8, 5
-; VI-SAFE-SDAG-NEXT:    s_cselect_b32 s9, 1, 0
-; VI-SAFE-SDAG-NEXT:    s_cmp_eq_u32 s8, 3
-; VI-SAFE-SDAG-NEXT:    s_cselect_b32 s8, 1, 0
-; VI-SAFE-SDAG-NEXT:    s_or_b32 s8, s8, s9
-; VI-SAFE-SDAG-NEXT:    s_lshr_b32 s5, s5, 2
-; VI-SAFE-SDAG-NEXT:    s_add_i32 s5, s5, s8
-; VI-SAFE-SDAG-NEXT:    s_cmp_lt_i32 s6, 31
-; VI-SAFE-SDAG-NEXT:    s_cselect_b32 s5, s5, 0x7c00
-; VI-SAFE-SDAG-NEXT:    s_cmp_lg_u32 s4, 0
-; VI-SAFE-SDAG-NEXT:    s_movk_i32 s4, 0x7e00
-; VI-SAFE-SDAG-NEXT:    s_cselect_b32 s4, s4, 0x7c00
-; VI-SAFE-SDAG-NEXT:    s_cmpk_eq_i32 s6, 0x40f
-; VI-SAFE-SDAG-NEXT:    s_cselect_b32 s4, s4, s5
-; VI-SAFE-SDAG-NEXT:    s_lshr_b32 s5, s7, 16
-; VI-SAFE-SDAG-NEXT:    s_and_b32 s5, s5, 0x8000
-; VI-SAFE-SDAG-NEXT:    s_or_b32 s4, s5, s4
-; VI-SAFE-SDAG-NEXT:    v_mov_b32_e32 v0, s4
-; VI-SAFE-SDAG-NEXT:    buffer_store_short v0, off, s[0:3], 0
-; VI-SAFE-SDAG-NEXT:    s_endpgm
+; VI-SDAG-LABEL: fptrunc_f64_to_f16:
+; VI-SDAG:       ; %bb.0:
+; VI-SDAG-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x24
+; VI-SDAG-NEXT:    s_mov_b32 s3, 0xf000
+; VI-SDAG-NEXT:    s_mov_b32 s2, -1
+; VI-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-SDAG-NEXT:    s_mov_b32 s0, s4
+; VI-SDAG-NEXT:    s_lshr_b32 s4, s7, 8
+; VI-SDAG-NEXT:    s_and_b32 s8, s4, 0xffe
+; VI-SDAG-NEXT:    s_and_b32 s4, s7, 0x1ff
+; VI-SDAG-NEXT:    s_or_b32 s4, s4, s6
+; VI-SDAG-NEXT:    s_cmp_lg_u32 s4, 0
+; VI-SDAG-NEXT:    s_mov_b32 s1, s5
+; VI-SDAG-NEXT:    s_cselect_b64 s[4:5], -1, 0
+; VI-SDAG-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; VI-SDAG-NEXT:    v_readfirstlane_b32 s4, v0
+; VI-SDAG-NEXT:    s_bfe_u32 s6, s7, 0xb0014
+; VI-SDAG-NEXT:    s_or_b32 s4, s8, s4
+; VI-SDAG-NEXT:    s_sub_i32 s8, 0x3f1, s6
+; VI-SDAG-NEXT:    v_med3_i32 v0, s8, 0, 13
+; VI-SDAG-NEXT:    s_or_b32 s5, s4, 0x1000
+; VI-SDAG-NEXT:    v_readfirstlane_b32 s8, v0
+; VI-SDAG-NEXT:    s_lshr_b32 s9, s5, s8
+; VI-SDAG-NEXT:    s_lshl_b32 s8, s9, s8
+; VI-SDAG-NEXT:    s_cmp_lg_u32 s8, s5
+; VI-SDAG-NEXT:    s_cselect_b32 s5, 1, 0
+; VI-SDAG-NEXT:    s_addk_i32 s6, 0xfc10
+; VI-SDAG-NEXT:    s_lshl_b32 s8, s6, 12
+; VI-SDAG-NEXT:    s_or_b32 s5, s9, s5
+; VI-SDAG-NEXT:    s_or_b32 s8, s4, s8
+; VI-SDAG-NEXT:    s_cmp_lt_i32 s6, 1
+; VI-SDAG-NEXT:    s_cselect_b32 s5, s5, s8
+; VI-SDAG-NEXT:    s_and_b32 s8, s5, 7
+; VI-SDAG-NEXT:    s_cmp_gt_i32 s8, 5
+; VI-SDAG-NEXT:    s_cselect_b32 s9, 1, 0
+; VI-SDAG-NEXT:    s_cmp_eq_u32 s8, 3
+; VI-SDAG-NEXT:    s_cselect_b32 s8, 1, 0
+; VI-SDAG-NEXT:    s_or_b32 s8, s8, s9
+; VI-SDAG-NEXT:    s_lshr_b32 s5, s5, 2
+; VI-SDAG-NEXT:    s_add_i32 s5, s5, s8
+; VI-SDAG-NEXT:    s_cmp_lt_i32 s6, 31
+; VI-SDAG-NEXT:    s_cselect_b32 s5, s5, 0x7c00
+; VI-SDAG-NEXT:    s_cmp_lg_u32 s4, 0
+; VI-SDAG-NEXT:    s_movk_i32 s4, 0x7e00
+; VI-SDAG-NEXT:    s_cselect_b32 s4, s4, 0x7c00
+; VI-SDAG-NEXT:    s_cmpk_eq_i32 s6, 0x40f
+; VI-SDAG-NEXT:    s_cselect_b32 s4, s4, s5
+; VI-SDAG-NEXT:    s_lshr_b32 s5, s7, 16
+; VI-SDAG-NEXT:    s_and_b32 s5, s5, 0x8000
+; VI-SDAG-NEXT:    s_or_b32 s4, s5, s4
+; VI-SDAG-NEXT:    v_mov_b32_e32 v0, s4
+; VI-SDAG-NEXT:    buffer_store_short v0, off, s[0:3], 0
+; VI-SDAG-NEXT:    s_endpgm
 ;
 ; VI-GISEL-LABEL: fptrunc_f64_to_f16:
 ; VI-GISEL:       ; %bb.0:
@@ -331,68 +331,57 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(ptr addrspace(1) %out, double %in)
 ; VI-GISEL-NEXT:    buffer_store_short v0, off, s[0:3], 0
 ; VI-GISEL-NEXT:    s_endpgm
 ;
-; VI-UNSAFE-SDAG-LABEL: fptrunc_f64_to_f16:
-; VI-UNSAFE-SDAG:       ; %bb.0:
-; VI-UNSAFE-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-UNSAFE-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-UNSAFE-SDAG-NEXT:    v_cvt_f32_f64_e32 v0, s[2:3]
-; VI-UNSAFE-SDAG-NEXT:    s_mov_b32 s3, 0xf000
-; VI-UNSAFE-SDAG-NEXT:    s_mov_b32 s2, -1
-; VI-UNSAFE-SDAG-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; VI-UNSAFE-SDAG-NEXT:    buffer_store_short v0, off, s[0:3], 0
-; VI-UNSAFE-SDAG-NEXT:    s_endpgm
-;
-; GFX10-SAFE-SDAG-LABEL: fptrunc_f64_to_f16:
-; GFX10-SAFE-SDAG:       ; %bb.0:
-; GFX10-SAFE-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX10-SAFE-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX10-SAFE-SDAG-NEXT:    s_and_b32 s4, s3, 0x1ff
-; GFX10-SAFE-SDAG-NEXT:    s_lshr_b32 s5, s3, 8
-; GFX10-SAFE-SDAG-NEXT:    s_or_b32 s2, s4, s2
-; GFX10-SAFE-SDAG-NEXT:    s_and_b32 s4, s5, 0xffe
-; GFX10-SAFE-SDAG-NEXT:    s_cmp_lg_u32 s2, 0
-; GFX10-SAFE-SDAG-NEXT:    s_cselect_b32 s2, -1, 0
-; GFX10-SAFE-SDAG-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s2
-; GFX10-SAFE-SDAG-NEXT:    s_bfe_u32 s2, s3, 0xb0014
-; GFX10-SAFE-SDAG-NEXT:    s_sub_i32 s5, 0x3f1, s2
-; GFX10-SAFE-SDAG-NEXT:    v_med3_i32 v1, s5, 0, 13
-; GFX10-SAFE-SDAG-NEXT:    v_readfirstlane_b32 s5, v0
-; GFX10-SAFE-SDAG-NEXT:    v_readfirstlane_b32 s6, v1
-; GFX10-SAFE-SDAG-NEXT:    s_or_b32 s4, s4, s5
-; GFX10-SAFE-SDAG-NEXT:    s_or_b32 s5, s4, 0x1000
-; GFX10-SAFE-SDAG-NEXT:    s_lshr_b32 s7, s5, s6
-; GFX10-SAFE-SDAG-NEXT:    s_lshl_b32 s6, s7, s6
-; GFX10-SAFE-SDAG-NEXT:    s_cmp_lg_u32 s6, s5
-; GFX10-SAFE-SDAG-NEXT:    s_cselect_b32 s5, 1, 0
-; GFX10-SAFE-SDAG-NEXT:    s_addk_i32 s2, 0xfc10
-; GFX10-SAFE-SDAG-NEXT:    s_or_b32 s5, s7, s5
-; GFX10-SAFE-SDAG-NEXT:    s_lshl_b32 s6, s2, 12
-; GFX10-SAFE-SDAG-NEXT:    s_or_b32 s6, s4, s6
-; GFX10-SAFE-SDAG-NEXT:    s_cmp_lt_i32 s2, 1
-; GFX10-SAFE-SDAG-NEXT:    s_cselect_b32 s5, s5, s6
-; GFX10-SAFE-SDAG-NEXT:    s_and_b32 s6, s5, 7
-; GFX10-SAFE-SDAG-NEXT:    s_cmp_gt_i32 s6, 5
-; GFX10-SAFE-SDAG-NEXT:    s_cselect_b32 s7, 1, 0
-; GFX10-SAFE-SDAG-NEXT:    s_cmp_eq_u32 s6, 3
-; GFX10-SAFE-SDAG-NEXT:    s_cselect_b32 s6, 1, 0
-; GFX10-SAFE-SDAG-NEXT:    s_lshr_b32 s5, s5, 2
-; GFX10-SAFE-SDAG-NEXT:    s_or_b32 s6, s6, s7
-; GFX10-SAFE-SDAG-NEXT:    s_add_i32 s5, s5, s6
-; GFX10-SAFE-SDAG-NEXT:    s_cmp_lt_i32 s2, 31
-; GFX10-SAFE-SDAG-NEXT:    s_movk_i32 s6, 0x7e00
-; GFX10-SAFE-SDAG-NEXT:    s_cselect_b32 s5, s5, 0x7c00
-; GFX10-SAFE-SDAG-NEXT:    s_cmp_lg_u32 s4, 0
-; GFX10-SAFE-SDAG-NEXT:    s_cselect_b32 s4, s6, 0x7c00
-; GFX10-SAFE-SDAG-NEXT:    s_cmpk_eq_i32 s2, 0x40f
-; GFX10-SAFE-SDAG-NEXT:    s_cselect_b32 s2, s4, s5
-; GFX10-SAFE-SDAG-NEXT:    s_lshr_b32 s3, s3, 16
-; GFX10-SAFE-SDAG-NEXT:    s_and_b32 s3, s3, 0x8000
-; GFX10-SAFE-SDAG-NEXT:    s_or_b32 s2, s3, s2
-; GFX10-SAFE-SDAG-NEXT:    s_mov_b32 s3, 0x31016000
-; GFX10-SAFE-SDAG-NEXT:    v_mov_b32_e32 v0, s2
-; GFX10-SAFE-SDAG-NEXT:    s_mov_b32 s2, -1
-; GFX10-SAFE-SDAG-NEXT:    buffer_store_short v0, off, s[0:3], 0
-; GFX10-SAFE-SDAG-NEXT:    s_endpgm
+; GFX10-SDAG-LABEL: fptrunc_f64_to_f16:
+; GFX10-SDAG:       ; %bb.0:
+; GFX10-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10-SDAG-NEXT:    s_and_b32 s4, s3, 0x1ff
+; GFX10-SDAG-NEXT:    s_lshr_b32 s5, s3, 8
+; GFX10-SDAG-NEXT:    s_or_b32 s2, s4, s2
+; GFX10-SDAG-NEXT:    s_and_b32 s4, s5, 0xffe
+; GFX10-SDAG-NEXT:    s_cmp_lg_u32 s2, 0
+; GFX10-SDAG-NEXT:    s_cselect_b32 s2, -1, 0
+; GFX10-SDAG-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s2
+; GFX10-SDAG-NEXT:    s_bfe_u32 s2, s3, 0xb0014
+; GFX10-SDAG-NEXT:    s_sub_i32 s5, 0x3f1, s2
+; GFX10-SDAG-NEXT:    v_med3_i32 v1, s5, 0, 13
+; GFX10-SDAG-NEXT:    v_readfirstlane_b32 s5, v0
+; GFX10-SDAG-NEXT:    v_readfirstlane_b32 s6, v1
+; GFX10-SDAG-NEXT:    s_or_b32 s4, s4, s5
+; GFX10-SDAG-NEXT:    s_or_b32 s5, s4, 0x1000
+; GFX10-SDAG-NEXT:    s_lshr_b32 s7, s5, s6
+; GFX10-SDAG-NEXT:    s_lshl_b32 s6, s7, s6
+; GFX10-SDAG-NEXT:    s_cmp_lg_u32 s6, s5
+; GFX10-SDAG-NEXT:    s_cselect_b32 s5, 1, 0
+; GFX10-SDAG-NEXT:    s_addk_i32 s2, 0xfc10
+; GFX10-SDAG-NEXT:    s_or_b32 s5, s7, s5
+; GFX10-SDAG-NEXT:    s_lshl_b32 s6, s2, 12
+; GFX10-SDAG-NEXT:    s_or_b32 s6, s4, s6
+; GFX10-SDAG-NEXT:    s_cmp_lt_i32 s2, 1
+; GFX10-SDAG-NEXT:    s_cselect_b32 s5, s5, s6
+; GFX10-SDAG-NEXT:    s_and_b32 s6, s5, 7
+; GFX10-SDAG-NEXT:    s_cmp_gt_i32 s6, 5
+; GFX10-SDAG-NEXT:    s_cselect_b32 s7, 1, 0
+; GFX10-SDAG-NEXT:    s_cmp_eq_u32 s6, 3
+; GFX10-SDAG-NEXT:    s_cselect_b32 s6, 1, 0
+; GFX10-SDAG-NEXT:    s_lshr_b32 s5, s5, 2
+; GFX10-SDAG-NEXT:    s_or_b32 s6, s6, s7
+; GFX10-SDAG-NEXT:    s_add_i32 s5, s5, s6
+; GFX10-SDAG-NEXT:    s_cmp_lt_i32 s2, 31
+; GFX10-SDAG-NEXT:    s_movk_i32 s6, 0x7e00
+; GFX10-SDAG-NEXT:    s_cselect_b32 s5, s5, 0x7c00
+; GFX10-SDAG-NEXT:    s_cmp_lg_u32 s4, 0
+; GFX10-SDAG-NEXT:    s_cselect_b32 s4, s6, 0x7c00
+; GFX10-SDAG-NEXT:    s_cmpk_eq_i32 s2, 0x40f
+; GFX10-SDAG-NEXT:    s_cselect_b32 s2, s4, s5
+; GFX10-SDAG-NEXT:    s_lshr_b32 s3, s3, 16
+; GFX10-SDAG-NEXT:    s_and_b32 s3, s3, 0x8000
+; GFX10-SDAG-NEXT:    s_or_b32 s2, s3, s2
+; GFX10-SDAG-NEXT:    s_mov_b32 s3, 0x31016000
+; GFX10-SDAG-NEXT:    v_mov_b32_e32 v0, s2
+; GFX10-SDAG-NEXT:    s_mov_b32 s2, -1
+; GFX10-SDAG-NEXT:    buffer_store_short v0, off, s[0:3], 0
+; GFX10-SDAG-NEXT:    s_endpgm
 ;
 ; GFX10-GISEL-LABEL: fptrunc_f64_to_f16:
 ; GFX10-GISEL:       ; %bb.0:
@@ -445,76 +434,65 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(ptr addrspace(1) %out, double %in)
 ; GFX10-GISEL-NEXT:    buffer_store_short v0, off, s[0:3], 0
 ; GFX10-GISEL-NEXT:    s_endpgm
 ;
-; GFX10-UNSAFE-SDAG-LABEL: fptrunc_f64_to_f16:
-; GFX10-UNSAFE-SDAG:       ; %bb.0:
-; GFX10-UNSAFE-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX10-UNSAFE-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX10-UNSAFE-SDAG-NEXT:    v_cvt_f32_f64_e32 v0, s[2:3]
-; GFX10-UNSAFE-SDAG-NEXT:    s_mov_b32 s3, 0x31016000
-; GFX10-UNSAFE-SDAG-NEXT:    s_mov_b32 s2, -1
-; GFX10-UNSAFE-SDAG-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GFX10-UNSAFE-SDAG-NEXT:    buffer_store_short v0, off, s[0:3], 0
-; GFX10-UNSAFE-SDAG-NEXT:    s_endpgm
-;
-; GFX11-SAFE-SDAG-LABEL: fptrunc_f64_to_f16:
-; GFX11-SAFE-SDAG:       ; %bb.0:
-; GFX11-SAFE-SDAG-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
-; GFX11-SAFE-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX11-SAFE-SDAG-NEXT:    s_and_b32 s4, s3, 0x1ff
-; GFX11-SAFE-SDAG-NEXT:    s_lshr_b32 s5, s3, 8
-; GFX11-SAFE-SDAG-NEXT:    s_or_b32 s2, s4, s2
-; GFX11-SAFE-SDAG-NEXT:    s_and_b32 s4, s5, 0xffe
-; GFX11-SAFE-SDAG-NEXT:    s_cmp_lg_u32 s2, 0
-; GFX11-SAFE-SDAG-NEXT:    s_cselect_b32 s2, -1, 0
-; GFX11-SAFE-SDAG-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s2
-; GFX11-SAFE-SDAG-NEXT:    s_bfe_u32 s2, s3, 0xb0014
-; GFX11-SAFE-SDAG-NEXT:    s_sub_i32 s5, 0x3f1, s2
-; GFX11-SAFE-SDAG-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SAFE-SDAG-NEXT:    v_med3_i32 v1, s5, 0, 13
-; GFX11-SAFE-SDAG-NEXT:    v_readfirstlane_b32 s5, v0
-; GFX11-SAFE-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT:    v_readfirstlane_b32 s6, v1
-; GFX11-SAFE-SDAG-NEXT:    s_or_b32 s4, s4, s5
-; GFX11-SAFE-SDAG-NEXT:    s_or_b32 s5, s4, 0x1000
-; GFX11-SAFE-SDAG-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT:    s_lshr_b32 s7, s5, s6
-; GFX11-SAFE-SDAG-NEXT:    s_lshl_b32 s6, s7, s6
-; GFX11-SAFE-SDAG-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT:    s_cmp_lg_u32 s6, s5
-; GFX11-SAFE-SDAG-NEXT:    s_cselect_b32 s5, 1, 0
-; GFX11-SAFE-SDAG-NEXT:    s_addk_i32 s2, 0xfc10
-; GFX11-SAFE-SDAG-NEXT:    s_or_b32 s5, s7, s5
-; GFX11-SAFE-SDAG-NEXT:    s_lshl_b32 s6, s2, 12
-; GFX11-SAFE-SDAG-NEXT:    s_or_b32 s6, s4, s6
-; GFX11-SAFE-SDAG-NEXT:    s_cmp_lt_i32 s2, 1
-; GFX11-SAFE-SDAG-NEXT:    s_cselect_b32 s5, s5, s6
-; GFX11-SAFE-SDAG-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT:    s_and_b32 s6, s5, 7
-; GFX11-SAFE-SDAG-NEXT:    s_cmp_gt_i32 s6, 5
-; GFX11-SAFE-SDAG-NEXT:    s_cselect_b32 s7, 1, 0
-; GFX11-SAFE-SDAG-NEXT:    s_cmp_eq_u32 s6, 3
-; GFX11-SAFE-SDAG-NEXT:    s_cselect_b32 s6, 1, 0
-; GFX11-SAFE-SDAG-NEXT:    s_lshr_b32 s5, s5, 2
-; GFX11-SAFE-SDAG-NEXT:    s_or_b32 s6, s6, s7
-; GFX11-SAFE-SDAG-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT:    s_add_i32 s5, s5, s6
-; GFX11-SAFE-SDAG-NEXT:    s_cmp_lt_i32 s2, 31
-; GFX11-SAFE-SDAG-NEXT:    s_movk_i32 s6, 0x7e00
-; GFX11-SAFE-SDAG-NEXT:    s_cselect_b32 s5, s5, 0x7c00
-; GFX11-SAFE-SDAG-NEXT:    s_cmp_lg_u32 s4, 0
-; GFX11-SAFE-SDAG-NEXT:    s_cselect_b32 s4, s6, 0x7c00
-; GFX11-SAFE-SDAG-NEXT:    s_cmpk_eq_i32 s2, 0x40f
-; GFX11-SAFE-SDAG-NEXT:    s_cselect_b32 s2, s4, s5
-; GFX11-SAFE-SDAG-NEXT:    s_lshr_b32 s3, s3, 16
-; GFX11-SAFE-SDAG-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT:    s_and_b32 s3, s3, 0x8000
-; GFX11-SAFE-SDAG-NEXT:    s_or_b32 s2, s3, s2
-; GFX11-SAFE-SDAG-NEXT:    s_mov_b32 s3, 0x31016000
-; GFX11-SAFE-SDAG-NEXT:    v_mov_b32_e32 v0, s2
-; GFX11-SAFE-SDAG-NEXT:    s_mov_b32 s2, -1
-; GFX11-SAFE-SDAG-NEXT:    buffer_store_b16 v0, off, s[0:3], 0
-; GFX11-SAFE-SDAG-NEXT:    s_endpgm
+; GFX11-SDAG-LABEL: fptrunc_f64_to_f16:
+; GFX11-SDAG:       ; %bb.0:
+; GFX11-SDAG-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-NEXT:    s_and_b32 s4, s3, 0x1ff
+; GFX11-SDAG-NEXT:    s_lshr_b32 s5, s3, 8
+; GFX11-SDAG-NEXT:    s_or_b32 s2, s4, s2
+; GFX11-SDAG-NEXT:    s_and_b32 s4, s5, 0xffe
+; GFX11-SDAG-NEXT:    s_cmp_lg_u32 s2, 0
+; GFX11-SDAG-NEXT:    s_cselect_b32 s2, -1, 0
+; GFX11-SDAG-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s2
+; GFX11-SDAG-NEXT:    s_bfe_u32 s2, s3, 0xb0014
+; GFX11-SDAG-NEXT:    s_sub_i32 s5, 0x3f1, s2
+; GFX11-SDAG-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-SDAG-NEXT:    v_med3_i32 v1, s5, 0, 13
+; GFX11-SDAG-NEXT:    v_readfirstlane_b32 s5, v0
+; GFX11-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT:    v_readfirstlane_b32 s6, v1
+; GFX11-SDAG-NEXT:    s_or_b32 s4, s4, s5
+; GFX11-SDAG-NEXT:    s_or_b32 s5, s4, 0x1000
+; GFX11-SDAG-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT:    s_lshr_b32 s7, s5, s6
+; GFX11-SDAG-NEXT:    s_lshl_b32 s6, s7, s6
+; GFX11-SDAG-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT:    s_cmp_lg_u32 s6, s5
+; GFX11-SDAG-NEXT:    s_cselect_b32 s5, 1, 0
+; GFX11-SDAG-NEXT:    s_addk_i32 s2, 0xfc10
+; GFX11-SDAG-NEXT:    s_or_b32 s5, s7, s5
+; GFX11-SDAG-NEXT:    s_lshl_b32 s6, s2, 12
+; GFX11-SDAG-NEXT:    s_or_b32 s6, s4, s6
+; GFX11-SDAG-NEXT:    s_cmp_lt_i32 s2, 1
+; GFX11-SDAG-NEXT:    s_cselect_b32 s5, s5, s6
+; GFX11-SDAG-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT:    s_and_b32 s6, s5, 7
+; GFX11-SDAG-NEXT:    s_cmp_gt_i32 s6, 5
+; GFX11-SDAG-NEXT:    s_cselect_b32 s7, 1, 0
+; GFX11-SDAG-NEXT:    s_cmp_eq_u32 s6, 3
+; GFX11-SDAG-NEXT:    s_cselect_b32 s6, 1, 0
+; GFX11-SDAG-NEXT:    s_lshr_b32 s5, s5, 2
+; GFX11-SDAG-NEXT:    s_or_b32 s6, s6, s7
+; GFX11-SDAG-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT:    s_add_i32 s5, s5, s6
+; GFX11-SDAG-NEXT:    s_cmp_lt_i32 s2, 31
+; GFX11-SDAG-NEXT:    s_movk_i32 s6, 0x7e00
+; GFX11-SDAG-NEXT:    s_cselect_b32 s5, s5, 0x7c00
+; GFX11-SDAG-NEXT:    s_cmp_lg_u32 s4, 0
+; GFX11-SDAG-NEXT:    s_cselect_b32 s4, s6, 0x7c00
+; GFX11-SDAG-NEXT:    s_cmpk_eq_i32 s2, 0x40f
+; GFX11-SDAG-NEXT:    s_cselect_b32 s2, s4, s5
+; GFX11-SDAG-NEXT:    s_lshr_b32 s3, s3, 16
+; GFX11-SDAG-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT:    s_and_b32 s3, s3, 0x8000
+; GFX11-SDAG-NEXT:    s_or_b32 s2, s3, s2
+; GFX11-SDAG-NEXT:    s_mov_b32 s3, 0x31016000
+; GFX11-SDAG-NEXT:    v_mov_b32_e32 v0, s2
+; GFX11-SDAG-NEXT:    s_mov_b32 s2, -1
+; GFX11-SDAG-NEXT:    buffer_store_b16 v0, off, s[0:3], 0
+; GFX11-SDAG-NEXT:    s_endpgm
 ;
 ; GFX11-GISEL-LABEL: fptrunc_f64_to_f16:
 ; GFX11-GISEL:       ; %bb.0:
@@ -570,30 +548,6 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(ptr addrspace(1) %out, double %in)
 ; GFX11-GISEL-NEXT:    s_mov_b32 s2, -1
 ; GFX11-GISEL-NEXT:    buffer_store_b16 v0, off, s[0:3], 0
 ; GFX11-GISEL-NEXT:    s_endpgm
-;
-; GFX11-UNSAFE-DAG-TRUE16-LABEL: fptrunc_f64_to_f16:
-; GFX11-UNSAFE-DAG-TRUE16:       ; %bb.0:
-; GFX11-UNSAFE-DAG-TRUE16-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
-; GFX11-UNSAFE-DAG-TRUE16-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX11-UNSAFE-DAG-TRUE16-NEXT:    v_cvt_f32_f64_e32 v0, s[2:3]
-; GFX11-UNSAFE-DAG-TRUE16-NEXT:    s_mov_b32 s3, 0x31016000
-; GFX11-UNSAFE-DAG-TRUE16-NEXT:    s_mov_b32 s2, -1
-; GFX11-UNSAFE-DAG-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GFX11-UNSAFE-DAG-TRUE16-NEXT:    v_cvt_f16_f32_e32 v0.l, v0
-; GFX11-UNSAFE-DAG-TRUE16-NEXT:    buffer_store_b16 v0, off, s[0:3], 0
-; GFX11-UNSAFE-DAG-TRUE16-NEXT:    s_endpgm
-;
-; GFX11-UNSAFE-DAG-FAKE16-LABEL: fptrunc_f64_to_f16:
-; GFX11-UNSAFE-DAG-FAKE16:       ; %bb.0:
-; GFX11-UNSAFE-DAG-FAKE16-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
-; GFX11-UNSAFE-DAG-FAKE16-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX11-UNSAFE-DAG-FAKE16-NEXT:    v_cvt_f32_f64_e32 v0, s[2:3]
-; GFX11-UNSAFE-DAG-FAKE16-NEXT:    s_mov_b32 s3, 0x31016000
-; GFX11-UNSAFE-DAG-FAKE16-NEXT:    s_mov_b32 s2, -1
-; GFX11-UNSAFE-DAG-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GFX11-UNSAFE-DAG-FAKE16-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GFX11-UNSAFE-DAG-FAKE16-NEXT:    buffer_store_b16 v0, off, s[0:3], 0
-; GFX11-UNSAFE-DAG-FAKE16-NEXT:    s_endpgm
   %result = fptrunc double %in to half
   %result_i16 = bitcast half %result to i16
   store i16 %result_i16, ptr addrspace(1) %out
@@ -603,111 +557,27 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(ptr addrspace(1) %out, double %in)
 define amdgpu_kernel void @fptrunc_f64_to_f16_afn(ptr addrspace(1) %out, double %in) {
 ; SI-LABEL: fptrunc_f64_to_f16_afn:
 ; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x9
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_movk_i32 s2, 0x7e00
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_lshr_b32 s0, s7, 8
-; SI-NEXT:    s_and_b32 s1, s7, 0x1ff
-; SI-NEXT:    s_and_b32 s8, s0, 0xffe
-; SI-NEXT:    s_or_b32 s0, s1, s6
-; SI-NEXT:    s_cmp_lg_u32 s0, 0
-; SI-NEXT:    s_cselect_b64 s[0:1], -1, 0
-; SI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
-; SI-NEXT:    s_bfe_u32 s0, s7, 0xb0014
-; SI-NEXT:    v_readfirstlane_b32 s1, v0
-; SI-NEXT:    s_sub_i32 s6, 0x3f1, s0
-; SI-NEXT:    s_or_b32 s1, s8, s1
-; SI-NEXT:    v_med3_i32 v0, s6, 0, 13
-; SI-NEXT:    s_or_b32 s6, s1, 0x1000
-; SI-NEXT:    v_readfirstlane_b32 s8, v0
-; SI-NEXT:    s_lshr_b32 s9, s6, s8
-; SI-NEXT:    s_lshl_b32 s8, s9, s8
-; SI-NEXT:    s_cmp_lg_u32 s8, s6
-; SI-NEXT:    s_cselect_b32 s6, 1, 0
-; SI-NEXT:    s_addk_i32 s0, 0xfc10
-; SI-NEXT:    s_or_b32 s6, s9, s6
-; SI-NEXT:    s_lshl_b32 s8, s0, 12
-; SI-NEXT:    s_or_b32 s8, s1, s8
-; SI-NEXT:    s_cmp_lt_i32 s0, 1
-; SI-NEXT:    s_cselect_b32 s6, s6, s8
-; SI-NEXT:    s_and_b32 s8, s6, 7
-; SI-NEXT:    s_cmp_gt_i32 s8, 5
-; SI-NEXT:    s_cselect_b32 s9, 1, 0
-; SI-NEXT:    s_cmp_eq_u32 s8, 3
-; SI-NEXT:    s_cselect_b32 s8, 1, 0
-; SI-NEXT:    s_lshr_b32 s6, s6, 2
-; SI-NEXT:    s_or_b32 s8, s8, s9
-; SI-NEXT:    s_add_i32 s6, s6, s8
-; SI-NEXT:    s_cmp_lt_i32 s0, 31
-; SI-NEXT:    s_cselect_b32 s6, s6, 0x7c00
-; SI-NEXT:    s_cmp_lg_u32 s1, 0
-; SI-NEXT:    s_cselect_b32 s1, s2, 0x7c00
-; SI-NEXT:    s_cmpk_eq_i32 s0, 0x40f
-; SI-NEXT:    s_cselect_b32 s0, s1, s6
-; SI-NEXT:    s_lshr_b32 s1, s7, 16
-; SI-NEXT:    s_and_b32 s1, s1, 0x8000
-; SI-NEXT:    s_or_b32 s6, s1, s0
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:    s_mov_b32 s0, s4
-; SI-NEXT:    s_mov_b32 s1, s5
-; SI-NEXT:    v_mov_b32_e32 v0, s6
-; SI-NEXT:    buffer_store_short v0, off, s[0:3], 0
+; SI-NEXT:    s_mov_b32 s4, s0
+; SI-NEXT:    s_mov_b32 s5, s1
+; SI-NEXT:    v_cvt_f32_f64_e32 v0, s[2:3]
+; SI-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT:    buffer_store_short v0, off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
 ;
-; VI-SAFE-SDAG-LABEL: fptrunc_f64_to_f16_afn:
-; VI-SAFE-SDAG:       ; %bb.0:
-; VI-SAFE-SDAG-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x24
-; VI-SAFE-SDAG-NEXT:    s_mov_b32 s3, 0xf000
-; VI-SAFE-SDAG-NEXT:    s_mov_b32 s2, -1
-; VI-SAFE-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-SAFE-SDAG-NEXT:    s_mov_b32 s0, s4
-; VI-SAFE-SDAG-NEXT:    s_lshr_b32 s4, s7, 8
-; VI-SAFE-SDAG-NEXT:    s_and_b32 s8, s4, 0xffe
-; VI-SAFE-SDAG-NEXT:    s_and_b32 s4, s7, 0x1ff
-; VI-SAFE-SDAG-NEXT:    s_or_b32 s4, s4, s6
-; VI-SAFE-SDAG-NEXT:    s_cmp_lg_u32 s4, 0
-; VI-SAFE-SDAG-NEXT:    s_mov_b32 s1, s5
-; VI-SAFE-SDAG-NEXT:    s_cselect_b64 s[4:5], -1, 0
-; VI-SAFE-SDAG-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
-; VI-SAFE-SDAG-NEXT:    v_readfirstlane_b32 s4, v0
-; VI-SAFE-SDAG-NEXT:    s_bfe_u32 s6, s7, 0xb0014
-; VI-SAFE-SDAG-NEXT:    s_or_b32 s4, s8, s4
-; VI-SAFE-SDAG-NEXT:    s_sub_i32 s8, 0x3f1, s6
-; VI-SAFE-SDAG-NEXT:    v_med3_i32 v0, s8, 0, 13
-; VI-SAFE-SDAG-NEXT:    s_or_b32 s5, s4, 0x1000
-; VI-SAFE-SDAG-NEXT:    v_readfirstlane_b32 s8, v0
-; VI-SAFE-SDAG-NEXT:    s_lshr_b32 s9, s5, s8
-; VI-SAFE-SDAG-NEXT:    s_lshl_b32 s8, s9, s8
-; VI-SAFE-SDAG-NEXT:    s_cmp_lg_u32 s8, s5
-; VI-SAFE-SDAG-NEXT:    s_cselect_b32 s5, 1, 0
-; VI-SAFE-SDAG-NEXT:    s_addk_i32 s6, 0xfc10
-; VI-SAFE-SDAG-NEXT:    s_lshl_b32 s8, s6, 12
-; VI-SAFE-SDAG-NEXT:    s_or_b32 s5, s9, s5
-; VI-SAFE-SDAG-NEXT:    s_or_b32 s8, s4, s8
-; VI-SAFE-SDAG-NEXT:    s_cmp_lt_i32 s6, 1
-; VI-SAFE-SDAG-NEXT:    s_cselect_b32 s5, s5, s8
-; VI-SAFE-SDAG-NEXT:    s_and_b32 s8, s5, 7
-; VI-SAFE-SDAG-NEXT:    s_cmp_gt_i32 s8, 5
-; VI-SAFE-SDAG-NEXT:    s_cselect_b32 s9, 1, 0
-; VI-SAFE-SDAG-NEXT:    s_cmp_eq_u32 s8, 3
-; VI-SAFE-SDAG-NEXT:    s_cselect_b32 s8, 1, 0
-; VI-SAFE-SDAG-NEXT:    s_or_b32 s8, s8, s9
-; VI-SAFE-SDAG-NEXT:    s_lshr_b32 s5, s5, 2
-; VI-SAFE-SDAG-NEXT:    s_add_i32 s5, s5, s8
-; VI-SAFE-SDAG-NEXT:    s_cmp_lt_i32 s6, 31
-; VI-SAFE-SDAG-NEXT:    s_cselect_b32 s5, s5, 0x7c00
-; VI-SAFE-SDAG-NEXT:    s_cmp_lg_u32 s4, 0
-; VI-SAFE-SDAG-NEXT:    s_movk_i32 s4, 0x7e00
-; VI-SAFE-SDAG-NEXT:    s_cselect_b32 s4, s4, 0x7c00
-; VI-SAFE-SDAG-NEXT:    s_cmpk_eq_i32 s6, 0x40f
-; VI-SAFE-SDAG-NEXT:    s_cselect_b32 s4, s4, s5
-; VI-SAFE-SDAG-NEXT:    s_lshr_b32 s5, s7, 16
-; VI-SAFE-SDAG-NEXT:    s_and_b32 s5, s5, 0x8000
-; VI-SAFE-SDAG-NEXT:    s_or_b32 s4, s5, s4
-; VI-SAFE-SDAG-NEXT:    v_mov_b32_e32 v0, s4
-; VI-SAFE-SDAG-NEXT:    buffer_store_short v0, off, s[0:3], 0
-; VI-SAFE-SDAG-NEXT:    s_endpgm
+; VI-SDAG-LABEL: fptrunc_f64_to_f16_afn:
+; VI-SDAG:       ; %bb.0:
+; VI-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-SDAG-NEXT:    v_cvt_f32_f64_e32 v0, s[2:3]
+; VI-SDAG-NEXT:    s_mov_b32 s3, 0xf000
+; VI-SDAG-NEXT:    s_mov_b32 s2, -1
+; VI-SDAG-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; VI-SDAG-NEXT:    buffer_store_short v0, off, s[0:3], 0
+; VI-SDAG-NEXT:    s_endpgm
 ;
 ; VI-GISEL-LABEL: fptrunc_f64_to_f16_afn:
 ; VI-GISEL:       ; %bb.0:
@@ -720,68 +590,16 @@ define amdgpu_kernel void @fptrunc_f64_to_f16_afn(ptr addrspace(1) %out, double
 ; VI-GISEL-NEXT:    buffer_store_short v0, off, s[0:3], 0
 ; VI-GISEL-NEXT:    s_endpgm
 ;
-; VI-UNSAFE-SDAG-LABEL: fptrunc_f64_to_f16_afn:
-; VI-UNSAFE-SDAG:       ; %bb.0:
-; VI-UNSAFE-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-UNSAFE-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-UNSAFE-SDAG-NEXT:    v_cvt_f32_f64_e32 v0, s[2:3]
-; VI-UNSAFE-SDAG-NEXT:    s_mov_b32 s3, 0xf000
-; VI-UNSAFE-SDAG-NEXT:    s_mov_b32 s2, -1
-; VI-UNSAFE-SDAG-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; VI-UNSAFE-SDAG-NEXT:    buffer_store_short v0, off, s[0:3], 0
-; VI-UNSAFE-SDAG-NEXT:    s_endpgm
-;
-; GFX10-SAFE-SDAG-LABEL: fptrunc_f64_to_f16_afn:
-; GFX10-SAFE-SDAG:       ; %bb.0:
-; GFX10-SAFE-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX10-SAFE-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX10-SAFE-SDAG-NEXT:    s_and_b32 s4, s3, 0x1ff
-; GFX10-SAFE-SDAG-NEXT:    s_lshr_b32 s5, s3, 8
-; GFX10-SAFE-SDAG-NEXT:    s_or_b32 s2, s4, s2
-; GFX10-SAFE-SDAG-NEXT:    s_and_b32 s4, s5, 0xffe
-; GFX10-SAFE-SDAG-NEXT:    s_cmp_lg_u32 s2, 0
-; GFX10-SAFE-SDAG-NEXT:    s_cselect_b32 s2, -1, 0
-; GFX10-SAFE-SDAG-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s2
-; GFX10-SAFE-SDAG-NEXT:    s_bfe_u32 s2, s3, 0xb0014
-; GFX10-SAFE-SDAG-NEXT:    s_sub_i32 s5, 0x3f1, s2
-; GFX10-SAFE-SDAG-NEXT:    v_med3_i32 v1, s5, 0, 13
-; GFX10-SAFE-SDAG-NEXT:    v_readfirstlane_b32 s5, v0
-; GFX10-SAFE-SDAG-NEXT:    v_readfirstlane_b32 s6, v1
-; GFX10-SAFE-SDAG-NEXT:    s_or_b32 s4, s4, s5
-; GFX10-SAFE-SDAG-NEXT:    s_or_b32 s5, s4, 0x1000
-; GFX10-SAFE-SDAG-NEXT:    s_lshr_b32 s7, s5, s6
-; GFX10-SAFE-SDAG-NEXT:    s_lshl_b32 s6, s7, s6
-; GFX10-SAFE-SDAG-NEXT:    s_cmp_lg_u32 s6, s5
-; GFX10-SAFE-SDAG-NEXT:    s_cselect_b32 s5, 1, 0
-; GFX10-SAFE-SDAG-NEXT:    s_addk_i32 s2, 0xfc10
-; GFX10-SAFE-SDAG-NEXT:    s_or_b32 s5, s7, s5
-; GFX10-SAFE-SDAG-NEXT:    s_lshl_b32 s6, s2, 12
-; GFX10-SAFE-SDAG-NEXT:    s_or_b32 s6, s4, s6
-; GFX10-SAFE-SDAG-NEXT:    s_cmp_lt_i32 s2, 1
-; GFX10-SAFE-SDAG-NEXT:    s_cselect_b32 s5, s5, s6
-; GFX10-SAFE-SDAG-NEXT:    s_and_b32 s6, s5, 7
-; GFX10-SAFE-SDAG-NEXT:    s_cmp_gt_i32 s6, 5
-; GFX10-SAFE-SDAG-NEXT:    s_cselect_b32 s7, 1, 0
-; GFX10-SAFE-SDAG-NEXT:    s_cmp_eq_u32 s6, 3
-; GFX10-SAFE-SDAG-NEXT:    s_cselect_b32 s6, 1, 0
-; GFX10-SAFE-SDAG-NEXT:    s_lshr_b32 s5, s5, 2
-; GFX10-SAFE-SDAG-NEXT:    s_or_b32 s6, s6, s7
-; GFX10-SAFE-SDAG-NEXT:    s_add_i32 s5, s5, s6
-; GFX10-SAFE-SDAG-NEXT:    s_cmp_lt_i32 s2, 31
-; GFX10-SAFE-SDAG-NEXT:    s_movk_i32 s6, 0x7e00
-; GFX10-SAFE-SDAG-NEXT:    s_cselect_b32 s5, s5, 0x7c00
-; GFX10-SAFE-SDAG-NEXT:    s_cmp_lg_u32 s4, 0
-; GFX10-SAFE-SDAG-NEXT:    s_cselect_b32 s4, s6, 0x7c00
-; GFX10-SAFE-SDAG-NEXT:    s_cmpk_eq_i32 s2, 0x40f
-; GFX10-SAFE-SDAG-NEXT:    s_cselect_b32 s2, s4, s5
-; GFX10-SAFE-SDAG-NEXT:    s_lshr_b32 s3, s3, 16
-; GFX10-SAFE-SDAG-NEXT:    s_and_b32 s3, s3, 0x8000
-; GFX10-SAFE-SDAG-NEXT:    s_or_b32 s2, s3, s2
-; GFX10-SAFE-SDAG-NEXT:    s_mov_b32 s3, 0x31016000
-; GFX10-SAFE-SDAG-NEXT:    v_mov_b32_e32 v0, s2
-; GFX10-SAFE-SDAG-NEXT:    s_mov_b32 s2, -1
-; GFX10-SAFE-SDAG-NEXT:    buffer_store_short v0, off, s[0:3], 0
-; GFX10-SAFE-SDAG-NEXT:    s_endpgm
+; GFX10-SDAG-LABEL: fptrunc_f64_to_f16_afn:
+; GFX10-SDAG:       ; %bb.0:
+; GFX10-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10-SDAG-NEXT:    v_cvt_f32_f64_e32 v0, s[2:3]
+; GFX10-SDAG-NEXT:    s_mov_b32 s3, 0x31016000
+; GFX10-SDAG-NEXT:    s_mov_b32 s2, -1
+; GFX10-SDAG-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; GFX10-SDAG-NEXT:    buffer_store_short v0, off, s[0:3], 0
+; GFX10-SDAG-NEXT:    s_endpgm
 ;
 ; GFX10-GISEL-LABEL: fptrunc_f64_to_f16_afn:
 ; GFX10-GISEL:       ; %bb.0:
@@ -794,74 +612,15 @@ define amdgpu_kernel void @fptrunc_f64_to_f16_afn(ptr addrspace(1) %out, double
 ; GFX10-GISEL-NEXT:    buffer_store_short v0, off, s[0:3], 0
 ; GFX10-GISEL-NEXT:    s_endpgm
 ;
-; GFX10-UNSAFE-SDAG-LABEL: fptrunc_f64_to_f16_afn:
-; GFX10-UNSAFE-SDAG:       ; %bb.0:
-; GFX10-UNSAFE-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX10-UNSAFE-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX10-UNSAFE-SDAG-NEXT:    v_cvt_f32_f64_e32 v0, s[2:3]
-; GFX10-UNSAFE-SDAG-NEXT:    s_mov_b32 s3, 0x31016000
-; GFX10-UNSAFE-SDAG-NEXT:    s_mov_b32 s2, -1
-; GFX10-UNSAFE-SDAG-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GFX10-UNSAFE-SDAG-NEXT:    buffer_store_short v0, off, s[0:3], 0
-; GFX10-UNSAFE-SDAG-NEXT:    s_endpgm
-;
 ; GFX11-SAFE-SDAG-LABEL: fptrunc_f64_to_f16_afn:
 ; GFX11-SAFE-SDAG:       ; %bb.0:
 ; GFX11-SAFE-SDAG-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
 ; GFX11-SAFE-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX11-SAFE-SDAG-NEXT:    s_and_b32 s4, s3, 0x1ff
-; GFX11-SAFE-SDAG-NEXT:    s_lshr_b32 s5, s3, 8
-; GFX11-SAFE-SDAG-NEXT:    s_or_b32 s2, s4, s2
-; GFX11-SAFE-SDAG-NEXT:    s_and_b32 s4, s5, 0xffe
-; GFX11-SAFE-SDAG-NEXT:    s_cmp_lg_u32 s2, 0
-; GFX11-SAFE-SDAG-NEXT:    s_cselect_b32 s2, -1, 0
-; GFX11-SAFE-SDAG-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s2
-; GFX11-SAFE-SDAG-NEXT:    s_bfe_u32 s2, s3, 0xb0014
-; GFX11-SAFE-SDAG-NEXT:    s_sub_i32 s5, 0x3f1, s2
-; GFX11-SAFE-SDAG-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SAFE-SDAG-NEXT:    v_med3_i32 v1, s5, 0, 13
-; GFX11-SAFE-SDAG-NEXT:    v_readfirstlane_b32 s5, v0
-; GFX11-SAFE-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT:    v_readfirstlane_b32 s6, v1
-; GFX11-SAFE-SDAG-NEXT:    s_or_b32 s4, s4, s5
-; GFX11-SAFE-SDAG-NEXT:    s_or_b32 s5, s4, 0x1000
-; GFX11-SAFE-SDAG-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT:    s_lshr_b32 s7, s5, s6
-; GFX11-SAFE-SDAG-NEXT:    s_lshl_b32 s6, s7, s6
-; GFX11-SAFE-SDAG-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT:    s_cmp_lg_u32 s6, s5
-; GFX11-SAFE-SDAG-NEXT:    s_cselect_b32 s5, 1, 0
-; GFX11-SAFE-SDAG-NEXT:    s_addk_i32 s2, 0xfc10
-; GFX11-SAFE-SDAG-NEXT:    s_or_b32 s5, s7, s5
-; GFX11-SAFE-SDAG-NEXT:    s_lshl_b32 s6, s2, 12
-; GFX11-SAFE-SDAG-NEXT:    s_or_b32 s6, s4, s6
-; GFX11-SAFE-SDAG-NEXT:    s_cmp_lt_i32 s2, 1
-; GFX11-SAFE-SDAG-NEXT:    s_cselect_b32 s5, s5, s6
-; GFX11-SAFE-SDAG-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT:    s_and_b32 s6, s5, 7
-; GFX11-SAFE-SDAG-NEXT:    s_cmp_gt_i32 s6, 5
-; GFX11-SAFE-SDAG-NEXT:    s_cselect_b32 s7, 1, 0
-; GFX11-SAFE-SDAG-NEXT:    s_cmp_eq_u32 s6, 3
-; GFX11-SAFE-SDAG-NEXT:    s_cselect_b32 s6, 1, 0
-; GFX11-SAFE-SDAG-NEXT:    s_lshr_b32 s5, s5, 2
-; GFX11-SAFE-SDAG-NEXT:    s_or_b32 s6, s6, s7
-; GFX11-SAFE-SDAG-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT:    s_add_i32 s5, s5, s6
-; GFX11-SAFE-SDAG-NEXT:    s_cmp_lt_i32 s2, 31
-; GFX11-SAFE-SDAG-NEXT:    s_movk_i32 s6, 0x7e00
-; GFX11-SAFE-SDAG-NEXT:    s_cselect_b32 s5, s5, 0x7c00
-; GFX11-SAFE-SDAG-NEXT:    s_cmp_lg_u32 s4, 0
-; GFX11-SAFE-SDAG-NEXT:    s_cselect_b32 s4, s6, 0x7c00
-; GFX11-SAFE-SDAG-NEXT:    s_cmpk_eq_i32 s2, 0x40f
-; GFX11-SAFE-SDAG-NEXT:    s_cselect_b32 s2, s4, s5
-; GFX11-SAFE-SDAG-NEXT:    s_lshr_b32 s3, s3, 16
-; GFX11-SAFE-SDAG-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT:    s_and_b32 s3, s3, 0x8000
-; GFX11-SAFE-SDAG-NEXT:    s_or_b32 s2, s3, s2
+; GFX11-SAFE-SDAG-NEXT:    v_cvt_f32_f64_e32 v0, s[2:3]
 ; GFX11-SAFE-SDAG-NEXT:    s_mov_b32 s3, 0x31016000
-; GFX11-SAFE-SDAG-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX11-SAFE-SDAG-NEXT:    s_mov_b32 s2, -1
+; GFX11-SAFE-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SAFE-SDAG-NEXT:    v_cvt_f16_f32_e32 v0.l, v0
 ; GFX11-SAFE-SDAG-NEXT:    buffer_store_b16 v0, off, s[0:3], 0
 ; GFX11-SAFE-SDAG-NEXT:    s_endpgm
 ;
@@ -1833,4 +1592,8 @@ define amdgpu_kernel void @fptrunc_v8f64_to_v8f32_afn(ptr addrspace(1) %out, <8
 }
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
 ; GFX10-SAFE-GISEL: {{.*}}
+; GFX10-SAFE-SDAG: {{.*}}
+; GFX10-UNSAFE-SDAG: {{.*}}
 ; VI-SAFE-GISEL: {{.*}}
+; VI-SAFE-SDAG: {{.*}}
+; VI-UNSAFE-SDAG: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/fsqrt.f32.ll b/llvm/test/CodeGen/AMDGPU/fsqrt.f32.ll
index 87c7cce854b11..f81950bde03e0 100644
--- a/llvm/test/CodeGen/AMDGPU/fsqrt.f32.ll
+++ b/llvm/test/CodeGen/AMDGPU/fsqrt.f32.ll
@@ -1294,13 +1294,13 @@ define float @v_sqrt_f32__enough_unsafe_attrs(float %x) #3 {
   ret float %result
 }
 
-define float @v_sqrt_f32__unsafe_attr(float %x) #4 {
+define float @v_sqrt_f32__unsafe_attr(float %x) {
 ; GCN-LABEL: v_sqrt_f32__unsafe_attr:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_sqrt_f32_e32 v0, v0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
-  %result = call nsz float @llvm.sqrt.f32(float %x)
+  %result = call afn nsz float @llvm.sqrt.f32(float %x)
   ret float %result
 }
 
@@ -4763,7 +4763,6 @@ attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memo
 attributes #1 = { convergent nounwind willreturn memory(none) }
 attributes #2 = { "approx-func-fp-math"="true" }
 attributes #3 = { "approx-func-fp-math"="true" "no-nans-fp-math"="true" "no-infs-fp-math"="true" }
-attributes #4 = { "unsafe-fp-math"="true" }
 attributes #5 = { "no-infs-fp-math"="true" }
 
 !0 = !{float 0.5}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.exp.ll b/llvm/test/CodeGen/AMDGPU/llvm.exp.ll
index 8c1e166babaf8..0b80b10a211bb 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.exp.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.exp.ll
@@ -3227,7 +3227,7 @@ define float @v_exp_f32_fast(float %in) {
   ret float %result
 }
 
-define float @v_exp_f32_unsafe_math_attr(float %in) "unsafe-fp-math"="true" {
+define float @v_exp_f32_unsafe_math_attr(float %in) {
 ; GCN-SDAG-LABEL: v_exp_f32_unsafe_math_attr:
 ; GCN-SDAG:       ; %bb.0:
 ; GCN-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -3289,7 +3289,7 @@ define float @v_exp_f32_unsafe_math_attr(float %in) "unsafe-fp-math"="true" {
 ; CM:       ; %bb.0:
 ; CM-NEXT:    CF_END
 ; CM-NEXT:    PAD
-  %result = call float @llvm.exp.f32(float %in)
+  %result = call afn float @llvm.exp.f32(float %in)
   ret float %result
 }
 
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.exp10.ll b/llvm/test/CodeGen/AMDGPU/llvm.exp10.ll
index edc505bdd6c1d..1d1901482f73c 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.exp10.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.exp10.ll
@@ -3235,7 +3235,7 @@ define float @v_exp10_f32_fast(float %in) {
   ret float %result
 }
 
-define float @v_exp10_f32_unsafe_math_attr(float %in) "unsafe-fp-math"="true" {
+define float @v_exp10_f32_unsafe_math_attr(float %in) {
 ; GCN-SDAG-LABEL: v_exp10_f32_unsafe_math_attr:
 ; GCN-SDAG:       ; %bb.0:
 ; GCN-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -3303,7 +3303,7 @@ define float @v_exp10_f32_unsafe_math_attr(float %in) "unsafe-fp-math"="true" {
 ; CM:       ; %bb.0:
 ; CM-NEXT:    CF_END
 ; CM-NEXT:    PAD
-  %result = call float @llvm.exp10.f32(float %in)
+  %result = call afn float @llvm.exp10.f32(float %in)
   ret float %result
 }
 
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.log.ll b/llvm/test/CodeGen/AMDGPU/llvm.log.ll
index 38d1b4789cf45..e0091fd3783c2 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.log.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.log.ll
@@ -3076,7 +3076,7 @@ define float @v_log_f32_fast(float %in) {
   ret float %result
 }
 
-define float @v_log_f32_unsafe_math_attr(float %in) "unsafe-fp-math"="true" {
+define float @v_log_f32_unsafe_math_attr(float %in) {
 ; SI-SDAG-LABEL: v_log_f32_unsafe_math_attr:
 ; SI-SDAG:       ; %bb.0:
 ; SI-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -3187,7 +3187,7 @@ define float @v_log_f32_unsafe_math_attr(float %in) "unsafe-fp-math"="true" {
 ; CM:       ; %bb.0:
 ; CM-NEXT:    CF_END
 ; CM-NEXT:    PAD
-  %result = call float @llvm.log.f32(float %in)
+  %result = call afn float @llvm.log.f32(float %in)
   ret float %result
 }
 
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.log10.ll b/llvm/test/CodeGen/AMDGPU/llvm.log10.ll
index 058933f5481a0..b161aa3341b2f 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.log10.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.log10.ll
@@ -3076,7 +3076,7 @@ define float @v_log10_f32_fast(float %in) {
   ret float %result
 }
 
-define float @v_log10_f32_unsafe_math_attr(float %in) "unsafe-fp-math"="true" {
+define float @v_log10_f32_unsafe_math_attr(float %in) {
 ; SI-SDAG-LABEL: v_log10_f32_unsafe_math_attr:
 ; SI-SDAG:       ; %bb.0:
 ; SI-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -3187,7 +3187,7 @@ define float @v_log10_f32_unsafe_math_attr(float %in) "unsafe-fp-math"="true" {
 ; CM:       ; %bb.0:
 ; CM-NEXT:    CF_END
 ; CM-NEXT:    PAD
-  %result = call float @llvm.log10.f32(float %in)
+  %result = call afn float @llvm.log10.f32(float %in)
   ret float %result
 }
 
diff --git a/llvm/test/CodeGen/AMDGPU/rcp-pattern.ll b/llvm/test/CodeGen/AMDGPU/rcp-pattern.ll
index 228420ef0acb0..9f0ffbcf6eff9 100644
--- a/llvm/test/CodeGen/AMDGPU/rcp-pattern.ll
+++ b/llvm/test/CodeGen/AMDGPU/rcp-pattern.ll
@@ -56,7 +56,7 @@ define float @v_rcp_f32_ieee_unsafe(float %x) #4 {
 ; R600:       ; %bb.0:
 ; R600-NEXT:    CF_END
 ; R600-NEXT:    PAD
-  %rcp = fdiv float 1.0, %x
+  %rcp = fdiv afn float 1.0, %x
   ret float %rcp
 }
 
@@ -1411,10 +1411,10 @@ define amdgpu_kernel void @s_div_arcp_neg_k_x_pat_f32_daz(ptr addrspace(1) %out)
 declare float @llvm.fabs.f32(float) #1
 declare float @llvm.sqrt.f32(float) #1
 
-attributes #0 = { nounwind "unsafe-fp-math"="false" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
+attributes #0 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
 attributes #1 = { nounwind readnone }
-attributes #2 = { nounwind "unsafe-fp-math"="true" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
+attributes #2 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
 attributes #3 = { nounwind "denormal-fp-math-f32"="ieee,ieee" }
-attributes #4 = { nounwind "unsafe-fp-math"="true" "denormal-fp-math-f32"="ieee,ieee" }
+attributes #4 = { nounwind "denormal-fp-math-f32"="ieee,ieee" }
 
 !0 = !{float 2.500000e+00}
diff --git a/llvm/test/CodeGen/AMDGPU/rsq.f64.ll b/llvm/test/CodeGen/AMDGPU/rsq.f64.ll
index b78cbb0ac29cf..48bc56041729a 100644
--- a/llvm/test/CodeGen/AMDGPU/rsq.f64.ll
+++ b/llvm/test/CodeGen/AMDGPU/rsq.f64.ll
@@ -4504,7 +4504,7 @@ define <2 x double> @v_rsq_v2f64__afn_nnan_ninf(<2 x double> %x) {
   ret <2 x double> %rsq
 }
 
-define amdgpu_ps <2 x i32> @s_rsq_f64_unsafe(double inreg %x) #0 {
+define amdgpu_ps <2 x i32> @s_rsq_f64_unsafe(double inreg %x) {
 ; SI-SDAG-LABEL: s_rsq_f64_unsafe:
 ; SI-SDAG:       ; %bb.0:
 ; SI-SDAG-NEXT:    v_mov_b32_e32 v0, 0
@@ -4648,8 +4648,8 @@ define amdgpu_ps <2 x i32> @s_rsq_f64_unsafe(double inreg %x) #0 {
 ; VI-GISEL-NEXT:    v_readfirstlane_b32 s0, v0
 ; VI-GISEL-NEXT:    v_readfirstlane_b32 s1, v1
 ; VI-GISEL-NEXT:    ; return to shader part epilog
-  %rsq = call contract double @llvm.sqrt.f64(double %x)
-  %result = fdiv contract double 1.0, %rsq
+  %rsq = call contract afn double @llvm.sqrt.f64(double %x)
+  %result = fdiv contract afn double 1.0, %rsq
   %cast = bitcast double %result to <2 x i32>
   %cast.0 = extractelement <2 x i32> %cast, i32 0
   %cast.1 = extractelement <2 x i32> %cast, i32 1
@@ -4660,7 +4660,7 @@ define amdgpu_ps <2 x i32> @s_rsq_f64_unsafe(double inreg %x) #0 {
   ret <2 x i32> %insert.1
 }
 
-define double @v_rsq_f64_unsafe(double %x) #0 {
+define double @v_rsq_f64_unsafe(double %x) {
 ; SI-SDAG-LABEL: v_rsq_f64_unsafe:
 ; SI-SDAG:       ; %bb.0:
 ; SI-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -4800,8 +4800,8 @@ define double @v_rsq_f64_unsafe(double %x) #0 {
 ; VI-GISEL-NEXT:    v_fma_f64 v[0:1], -v[0:1], v[2:3], 1.0
 ; VI-GISEL-NEXT:    v_fma_f64 v[0:1], v[0:1], v[2:3], v[2:3]
 ; VI-GISEL-NEXT:    s_setpc_b64 s[30:31]
-  %sqrt = call double @llvm.sqrt.f64(double %x)
-  %rsq = fdiv double 1.0, %sqrt
+  %sqrt = call afn double @llvm.sqrt.f64(double %x)
+  %rsq = fdiv afn double 1.0, %sqrt
   ret double %rsq
 }
 
@@ -5737,7 +5737,6 @@ define double @v_div_const_contract_sqrt_f64(double %x) {
   ret double %rsq
 }
 
-attributes #0 = { "unsafe-fp-math"="true" }
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
 ; GCN: {{.*}}
 ; GISEL: {{.*}}

>From 88eea82863dcddd88cf5b1bf14e797925a4c87c8 Mon Sep 17 00:00:00 2001
From: PaperChalice <liujunchang97 at outlook.com>
Date: Tue, 29 Jul 2025 17:22:01 +0800
Subject: [PATCH 4/4] Remove `UnsafeFPMath` in `AMDGPULegalizerInfo.cpp`

---
 llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp | 10 ++++------
 1 file changed, 4 insertions(+), 6 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index fedfa3f9dd900..395dd46c36f62 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -3326,7 +3326,7 @@ static bool allowApproxFunc(const MachineFunction &MF, unsigned Flags) {
   if (Flags & MachineInstr::FmAfn)
     return true;
   const auto &Options = MF.getTarget().Options;
-  return Options.UnsafeFPMath || Options.ApproxFuncFPMath;
+  return Options.ApproxFuncFPMath;
 }
 
 static bool needsDenormHandlingF32(const MachineFunction &MF, Register Src,
@@ -3432,7 +3432,7 @@ bool AMDGPULegalizerInfo::legalizeFlogCommon(MachineInstr &MI,
       static_cast<const AMDGPUTargetMachine &>(MF.getTarget());
 
   if (Ty == F16 || MI.getFlag(MachineInstr::FmAfn) ||
-      TM.Options.ApproxFuncFPMath || TM.Options.UnsafeFPMath) {
+      TM.Options.ApproxFuncFPMath) {
     if (Ty == F16 && !ST.has16BitInsts()) {
       Register LogVal = MRI.createGenericVirtualRegister(F32);
       auto PromoteSrc = B.buildFPExt(F32, X);
@@ -4861,8 +4861,7 @@ bool AMDGPULegalizerInfo::legalizeFastUnsafeFDIV(MachineInstr &MI,
   LLT ResTy = MRI.getType(Res);
 
   const MachineFunction &MF = B.getMF();
-  bool AllowInaccurateRcp = MI.getFlag(MachineInstr::FmAfn) ||
-                            MF.getTarget().Options.UnsafeFPMath;
+  bool AllowInaccurateRcp = MI.getFlag(MachineInstr::FmAfn);
 
   if (const auto *CLHS = getConstantFPVRegVal(LHS, MRI)) {
     if (!AllowInaccurateRcp && ResTy != LLT::scalar(16))
@@ -4923,8 +4922,7 @@ bool AMDGPULegalizerInfo::legalizeFastUnsafeFDIV64(MachineInstr &MI,
   LLT ResTy = MRI.getType(Res);
 
   const MachineFunction &MF = B.getMF();
-  bool AllowInaccurateRcp = MF.getTarget().Options.UnsafeFPMath ||
-                            MI.getFlag(MachineInstr::FmAfn);
+  bool AllowInaccurateRcp = MI.getFlag(MachineInstr::FmAfn);
 
   if (!AllowInaccurateRcp)
     return false;



More information about the llvm-commits mailing list