[llvm] 9c82dc6 - AMDGPU: Always use v_rcp_f16 and v_rsq_f16

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 5 13:53:09 PDT 2023


Author: Matt Arsenault
Date: 2023-07-05T16:53:01-04:00
New Revision: 9c82dc6a6ba1f3d75b5547680e0a8532684879c9

URL: https://github.com/llvm/llvm-project/commit/9c82dc6a6ba1f3d75b5547680e0a8532684879c9
DIFF: https://github.com/llvm/llvm-project/commit/9c82dc6a6ba1f3d75b5547680e0a8532684879c9.diff

LOG: AMDGPU: Always use v_rcp_f16 and v_rsq_f16

These inherited the fast math checks from f32, but the manual suggests
these should be accurate enough for unconditional use. The definition
of correctly rounded is 0.5ulp, but the manual says "0.51ulp". I've
been a bit nervous about changing this as the OpenCL conformance test
does not cover half. Brute force produces identical values compared to
a reference host implementation for all values.

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
    llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
    llvm/lib/Target/AMDGPU/SIISelLowering.cpp
    llvm/test/CodeGen/AMDGPU/GlobalISel/fdiv.f16.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fdiv.mir
    llvm/test/CodeGen/AMDGPU/fdiv.f16.ll
    llvm/test/CodeGen/AMDGPU/fneg-combines.f16.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index 0bf87cf2edcbd0..65f1ed5f97a33d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -835,17 +835,12 @@ static Value *optimizeWithFDivFast(Value *Num, Value *Den, float ReqdAccuracy,
 //
 // NOTE: rcp is the preference in cases that both are legal.
 bool AMDGPUCodeGenPrepareImpl::visitFDiv(BinaryOperator &FDiv) {
-
   Type *Ty = FDiv.getType()->getScalarType();
-
-  // The f64 rcp/rsq approximations are pretty inaccurate. We can do an
-  // expansion around them in codegen.
-  if (Ty->isDoubleTy())
+  if (!Ty->isFloatTy())
     return false;
 
-  // No intrinsic for fdiv16 if target does not support f16.
-  if (Ty->isHalfTy() && !ST->has16BitInsts())
-    return false;
+  // The f64 rcp/rsq approximations are pretty inaccurate. We can do an
+  // expansion around them in codegen. f16 is good enough to always use.
 
   const FPMathOperator *FPOp = cast<const FPMathOperator>(&FDiv);
   const float ReqdAccuracy =  FPOp->getFPAccuracy();
@@ -854,11 +849,10 @@ bool AMDGPUCodeGenPrepareImpl::visitFDiv(BinaryOperator &FDiv) {
   FastMathFlags FMF = FPOp->getFastMathFlags();
   const bool AllowInaccurateRcp = HasUnsafeFPMath || FMF.approxFunc();
 
-  // rcp_f16 is accurate for !fpmath >= 1.0ulp.
+  // rcp_f16 is accurate to 0.51 ulp.
   // rcp_f32 is accurate for !fpmath >= 1.0ulp and denormals are flushed.
   // rcp_f64 is never accurate.
-  const bool RcpIsAccurate = (Ty->isHalfTy() && ReqdAccuracy >= 1.0f) ||
-            (Ty->isFloatTy() && !HasFP32Denormals && ReqdAccuracy >= 1.0f);
+  const bool RcpIsAccurate = !HasFP32Denormals && ReqdAccuracy >= 1.0f;
 
   IRBuilder<> Builder(FDiv.getParent(), std::next(FDiv.getIterator()));
   Builder.setFastMathFlags(FMF);

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 4edb6e5a4feac6..6266f337def3bb 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -4246,13 +4246,20 @@ bool AMDGPULegalizerInfo::legalizeFastUnsafeFDIV(MachineInstr &MI,
   LLT ResTy = MRI.getType(Res);
 
   const MachineFunction &MF = B.getMF();
-  bool AllowInaccurateRcp = MF.getTarget().Options.UnsafeFPMath ||
-                            MI.getFlag(MachineInstr::FmAfn);
-
-  if (!AllowInaccurateRcp)
-    return false;
+  bool AllowInaccurateRcp = MI.getFlag(MachineInstr::FmAfn) ||
+                            MF.getTarget().Options.UnsafeFPMath;
 
   if (auto CLHS = getConstantFPVRegVal(LHS, MRI)) {
+    if (!AllowInaccurateRcp && ResTy != LLT::scalar(16))
+      return false;
+
+    // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to
+    // the CI documentation has a worst case error of 1 ulp.
+    // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to
+    // use it as long as we aren't trying to use denormals.
+    //
+    // v_rcp_f16 and v_rsq_f16 DO support denormals and 0.51ulp.
+
     // 1 / x -> RCP(x)
     if (CLHS->isExactlyValue(1.0)) {
       B.buildIntrinsic(Intrinsic::amdgcn_rcp, Res, false)
@@ -4263,6 +4270,8 @@ bool AMDGPULegalizerInfo::legalizeFastUnsafeFDIV(MachineInstr &MI,
       return true;
     }
 
+    // TODO: Match rsq
+
     // -1 / x -> RCP( FNEG(x) )
     if (CLHS->isExactlyValue(-1.0)) {
       auto FNeg = B.buildFNeg(ResTy, RHS, Flags);
@@ -4275,6 +4284,12 @@ bool AMDGPULegalizerInfo::legalizeFastUnsafeFDIV(MachineInstr &MI,
     }
   }
 
+  // For f16 require arcp only.
+  // For f32 require afn+arcp.
+  if (!AllowInaccurateRcp && (ResTy != LLT::scalar(16) ||
+                              !MI.getFlag(MachineInstr::FmArcp)))
+    return false;
+
   // x / y -> x * (1.0 / y)
   auto RCP = B.buildIntrinsic(Intrinsic::amdgcn_rcp, {ResTy}, false)
     .addUse(RHS)

diff  --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 64b7f6a761917d..447d8a543d9915 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -9130,26 +9130,30 @@ SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op,
   EVT VT = Op.getValueType();
   const SDNodeFlags Flags = Op->getFlags();
 
-  bool AllowInaccurateRcp = Flags.hasApproximateFuncs();
-
-  // Without !fpmath accuracy information, we can't do more because we don't
-  // know exactly whether rcp is accurate enough to meet !fpmath requirement.
-  if (!AllowInaccurateRcp)
-    return SDValue();
+  bool AllowInaccurateRcp = Flags.hasApproximateFuncs() ||
+                            DAG.getTarget().Options.UnsafeFPMath;
 
   if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) {
+    // Without !fpmath accuracy information, we can't do more because we don't
+    // know exactly whether rcp is accurate enough to meet !fpmath requirement.
+    // f16 is always accurate enough
+    if (!AllowInaccurateRcp && VT != MVT::f16)
+      return SDValue();
+
     if (CLHS->isExactlyValue(1.0)) {
       // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to
       // the CI documentation has a worst case error of 1 ulp.
       // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to
       // use it as long as we aren't trying to use denormals.
       //
-      // v_rcp_f16 and v_rsq_f16 DO support denormals.
+      // v_rcp_f16 and v_rsq_f16 DO support denormals and 0.51ulp.
 
       // 1.0 / sqrt(x) -> rsq(x)
 
       // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP
       // error seems really high at 2^29 ULP.
+
+      // XXX - do we need afn for this or is arcp sufficent?
       if (RHS.getOpcode() == ISD::FSQRT)
         return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0));
 
@@ -9165,6 +9169,11 @@ SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op,
     }
   }
 
+  // For f16 require arcp only.
+  // For f32 require afn+arcp.
+  if (!AllowInaccurateRcp && (VT != MVT::f16 || !Flags.hasAllowReciprocal()))
+    return SDValue();
+
   // Turn into multiply by the reciprocal.
   // x / y -> x * (1.0 / y)
   SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/fdiv.f16.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/fdiv.f16.ll
index b5819f8214155d..7d90dfd46435cf 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/fdiv.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/fdiv.f16.ll
@@ -254,36 +254,14 @@ define half @v_neg_rcp_f16(half %x) {
 ; GFX89-LABEL: v_neg_rcp_f16:
 ; GFX89:       ; %bb.0:
 ; GFX89-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX89-NEXT:    v_cvt_f32_f16_e32 v1, v0
-; GFX89-NEXT:    v_cvt_f32_f16_e32 v2, -1.0
-; GFX89-NEXT:    v_rcp_f32_e32 v1, v1
-; GFX89-NEXT:    v_mul_f32_e32 v1, v2, v1
-; GFX89-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GFX89-NEXT:    v_div_fixup_f16 v0, v1, v0, -1.0
+; GFX89-NEXT:    v_rcp_f16_e64 v0, -v0
 ; GFX89-NEXT:    s_setpc_b64 s[30:31]
 ;
-; GFX10-LABEL: v_neg_rcp_f16:
-; GFX10:       ; %bb.0:
-; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT:    v_cvt_f32_f16_e32 v1, v0
-; GFX10-NEXT:    v_cvt_f32_f16_e32 v2, -1.0
-; GFX10-NEXT:    v_rcp_f32_e32 v1, v1
-; GFX10-NEXT:    v_mul_f32_e32 v1, v2, v1
-; GFX10-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GFX10-NEXT:    v_div_fixup_f16 v0, v1, v0, -1.0
-; GFX10-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11-LABEL: v_neg_rcp_f16:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v1, v0
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v2, -1.0
-; GFX11-NEXT:    v_rcp_f32_e32 v1, v1
-; GFX11-NEXT:    s_waitcnt_depctr 0xfff
-; GFX11-NEXT:    v_mul_f32_e32 v1, v2, v1
-; GFX11-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GFX11-NEXT:    v_div_fixup_f16 v0, v1, v0, -1.0
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
+; GFX10PLUS-LABEL: v_neg_rcp_f16:
+; GFX10PLUS:       ; %bb.0:
+; GFX10PLUS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT:    v_rcp_f16_e64 v0, -v0
+; GFX10PLUS-NEXT:    s_setpc_b64 s[30:31]
   %fdiv = fdiv half -1.0, %x
   ret half %fdiv
 }
@@ -333,36 +311,14 @@ define half @v_rcp_f16(half %x) {
 ; GFX89-LABEL: v_rcp_f16:
 ; GFX89:       ; %bb.0:
 ; GFX89-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX89-NEXT:    v_cvt_f32_f16_e32 v1, v0
-; GFX89-NEXT:    v_cvt_f32_f16_e32 v2, 1.0
-; GFX89-NEXT:    v_rcp_f32_e32 v1, v1
-; GFX89-NEXT:    v_mul_f32_e32 v1, v2, v1
-; GFX89-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GFX89-NEXT:    v_div_fixup_f16 v0, v1, v0, 1.0
+; GFX89-NEXT:    v_rcp_f16_e32 v0, v0
 ; GFX89-NEXT:    s_setpc_b64 s[30:31]
 ;
-; GFX10-LABEL: v_rcp_f16:
-; GFX10:       ; %bb.0:
-; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT:    v_cvt_f32_f16_e32 v1, v0
-; GFX10-NEXT:    v_cvt_f32_f16_e32 v2, 1.0
-; GFX10-NEXT:    v_rcp_f32_e32 v1, v1
-; GFX10-NEXT:    v_mul_f32_e32 v1, v2, v1
-; GFX10-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GFX10-NEXT:    v_div_fixup_f16 v0, v1, v0, 1.0
-; GFX10-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11-LABEL: v_rcp_f16:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v1, v0
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v2, 1.0
-; GFX11-NEXT:    v_rcp_f32_e32 v1, v1
-; GFX11-NEXT:    s_waitcnt_depctr 0xfff
-; GFX11-NEXT:    v_mul_f32_e32 v1, v2, v1
-; GFX11-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GFX11-NEXT:    v_div_fixup_f16 v0, v1, v0, 1.0
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
+; GFX10PLUS-LABEL: v_rcp_f16:
+; GFX10PLUS:       ; %bb.0:
+; GFX10PLUS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT:    v_rcp_f16_e32 v0, v0
+; GFX10PLUS-NEXT:    s_setpc_b64 s[30:31]
   %fdiv = fdiv half 1.0, %x
   ret half %fdiv
 }
@@ -412,36 +368,14 @@ define half @v_rcp_f16_arcp(half %x) {
 ; GFX89-LABEL: v_rcp_f16_arcp:
 ; GFX89:       ; %bb.0:
 ; GFX89-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX89-NEXT:    v_cvt_f32_f16_e32 v1, v0
-; GFX89-NEXT:    v_cvt_f32_f16_e32 v2, 1.0
-; GFX89-NEXT:    v_rcp_f32_e32 v1, v1
-; GFX89-NEXT:    v_mul_f32_e32 v1, v2, v1
-; GFX89-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GFX89-NEXT:    v_div_fixup_f16 v0, v1, v0, 1.0
+; GFX89-NEXT:    v_rcp_f16_e32 v0, v0
 ; GFX89-NEXT:    s_setpc_b64 s[30:31]
 ;
-; GFX10-LABEL: v_rcp_f16_arcp:
-; GFX10:       ; %bb.0:
-; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT:    v_cvt_f32_f16_e32 v1, v0
-; GFX10-NEXT:    v_cvt_f32_f16_e32 v2, 1.0
-; GFX10-NEXT:    v_rcp_f32_e32 v1, v1
-; GFX10-NEXT:    v_mul_f32_e32 v1, v2, v1
-; GFX10-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GFX10-NEXT:    v_div_fixup_f16 v0, v1, v0, 1.0
-; GFX10-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11-LABEL: v_rcp_f16_arcp:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v1, v0
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v2, 1.0
-; GFX11-NEXT:    v_rcp_f32_e32 v1, v1
-; GFX11-NEXT:    s_waitcnt_depctr 0xfff
-; GFX11-NEXT:    v_mul_f32_e32 v1, v2, v1
-; GFX11-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GFX11-NEXT:    v_div_fixup_f16 v0, v1, v0, 1.0
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
+; GFX10PLUS-LABEL: v_rcp_f16_arcp:
+; GFX10PLUS:       ; %bb.0:
+; GFX10PLUS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT:    v_rcp_f16_e32 v0, v0
+; GFX10PLUS-NEXT:    s_setpc_b64 s[30:31]
   %fdiv = fdiv arcp half 1.0, %x
   ret half %fdiv
 }
@@ -610,35 +544,23 @@ define half @v_fdiv_f16_arcp_ulp25(half %a, half %b) {
 ; GFX89-LABEL: v_fdiv_f16_arcp_ulp25:
 ; GFX89:       ; %bb.0:
 ; GFX89-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX89-NEXT:    v_cvt_f32_f16_e32 v2, v1
-; GFX89-NEXT:    v_cvt_f32_f16_e32 v3, v0
-; GFX89-NEXT:    v_rcp_f32_e32 v2, v2
-; GFX89-NEXT:    v_mul_f32_e32 v2, v3, v2
-; GFX89-NEXT:    v_cvt_f16_f32_e32 v2, v2
-; GFX89-NEXT:    v_div_fixup_f16 v0, v2, v1, v0
+; GFX89-NEXT:    v_rcp_f16_e32 v1, v1
+; GFX89-NEXT:    v_mul_f16_e32 v0, v0, v1
 ; GFX89-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-LABEL: v_fdiv_f16_arcp_ulp25:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT:    v_cvt_f32_f16_e32 v2, v1
-; GFX10-NEXT:    v_cvt_f32_f16_e32 v3, v0
-; GFX10-NEXT:    v_rcp_f32_e32 v2, v2
-; GFX10-NEXT:    v_mul_f32_e32 v2, v3, v2
-; GFX10-NEXT:    v_cvt_f16_f32_e32 v2, v2
-; GFX10-NEXT:    v_div_fixup_f16 v0, v2, v1, v0
+; GFX10-NEXT:    v_rcp_f16_e32 v1, v1
+; GFX10-NEXT:    v_mul_f16_e32 v0, v0, v1
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-LABEL: v_fdiv_f16_arcp_ulp25:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v2, v1
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v3, v0
-; GFX11-NEXT:    v_rcp_f32_e32 v2, v2
+; GFX11-NEXT:    v_rcp_f16_e32 v1, v1
 ; GFX11-NEXT:    s_waitcnt_depctr 0xfff
-; GFX11-NEXT:    v_mul_f32_e32 v2, v3, v2
-; GFX11-NEXT:    v_cvt_f16_f32_e32 v2, v2
-; GFX11-NEXT:    v_div_fixup_f16 v0, v2, v1, v0
+; GFX11-NEXT:    v_mul_f16_e32 v0, v0, v1
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
   %fdiv = fdiv arcp half %a, %b, !fpmath !0
   ret half %fdiv
@@ -720,19 +642,19 @@ define <2 x half> @v_fdiv_v2f16(<2 x half> %a, <2 x half> %b) {
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX8-NEXT:    v_lshrrev_b32_e32 v4, 16, v1
-; GFX8-NEXT:    v_cvt_f32_f16_e32 v2, v1
+; GFX8-NEXT:    v_cvt_f32_f16_e32 v3, v1
 ; GFX8-NEXT:    v_cvt_f32_f16_e32 v5, v4
-; GFX8-NEXT:    v_lshrrev_b32_e32 v6, 16, v0
-; GFX8-NEXT:    v_cvt_f32_f16_e32 v3, v0
-; GFX8-NEXT:    v_rcp_f32_e32 v2, v2
-; GFX8-NEXT:    v_cvt_f32_f16_e32 v7, v6
+; GFX8-NEXT:    v_lshrrev_b32_e32 v2, 16, v0
+; GFX8-NEXT:    v_cvt_f32_f16_e32 v6, v0
+; GFX8-NEXT:    v_rcp_f32_e32 v3, v3
+; GFX8-NEXT:    v_cvt_f32_f16_e32 v7, v2
 ; GFX8-NEXT:    v_rcp_f32_e32 v5, v5
-; GFX8-NEXT:    v_mul_f32_e32 v2, v3, v2
-; GFX8-NEXT:    v_cvt_f16_f32_e32 v2, v2
-; GFX8-NEXT:    v_mul_f32_e32 v3, v7, v5
+; GFX8-NEXT:    v_mul_f32_e32 v3, v6, v3
 ; GFX8-NEXT:    v_cvt_f16_f32_e32 v3, v3
-; GFX8-NEXT:    v_div_fixup_f16 v0, v2, v1, v0
-; GFX8-NEXT:    v_div_fixup_f16 v1, v3, v4, v6
+; GFX8-NEXT:    v_mul_f32_e32 v5, v7, v5
+; GFX8-NEXT:    v_cvt_f16_f32_e32 v5, v5
+; GFX8-NEXT:    v_div_fixup_f16 v0, v3, v1, v0
+; GFX8-NEXT:    v_div_fixup_f16 v1, v5, v4, v2
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GFX8-NEXT:    v_or_b32_e32 v0, v0, v1
 ; GFX8-NEXT:    s_setpc_b64 s[30:31]
@@ -741,19 +663,19 @@ define <2 x half> @v_fdiv_v2f16(<2 x half> %a, <2 x half> %b) {
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX9-NEXT:    v_lshrrev_b32_e32 v4, 16, v1
-; GFX9-NEXT:    v_cvt_f32_f16_e32 v2, v1
+; GFX9-NEXT:    v_cvt_f32_f16_e32 v3, v1
 ; GFX9-NEXT:    v_cvt_f32_f16_e32 v5, v4
-; GFX9-NEXT:    v_lshrrev_b32_e32 v6, 16, v0
-; GFX9-NEXT:    v_cvt_f32_f16_e32 v3, v0
-; GFX9-NEXT:    v_rcp_f32_e32 v2, v2
-; GFX9-NEXT:    v_cvt_f32_f16_e32 v7, v6
+; GFX9-NEXT:    v_lshrrev_b32_e32 v2, 16, v0
+; GFX9-NEXT:    v_cvt_f32_f16_e32 v6, v0
+; GFX9-NEXT:    v_rcp_f32_e32 v3, v3
+; GFX9-NEXT:    v_cvt_f32_f16_e32 v7, v2
 ; GFX9-NEXT:    v_rcp_f32_e32 v5, v5
-; GFX9-NEXT:    v_mul_f32_e32 v2, v3, v2
-; GFX9-NEXT:    v_cvt_f16_f32_e32 v2, v2
-; GFX9-NEXT:    v_mul_f32_e32 v3, v7, v5
+; GFX9-NEXT:    v_mul_f32_e32 v3, v6, v3
 ; GFX9-NEXT:    v_cvt_f16_f32_e32 v3, v3
-; GFX9-NEXT:    v_div_fixup_f16 v0, v2, v1, v0
-; GFX9-NEXT:    v_div_fixup_f16 v1, v3, v4, v6
+; GFX9-NEXT:    v_mul_f32_e32 v5, v7, v5
+; GFX9-NEXT:    v_cvt_f16_f32_e32 v5, v5
+; GFX9-NEXT:    v_div_fixup_f16 v0, v3, v1, v0
+; GFX9-NEXT:    v_div_fixup_f16 v1, v5, v4, v2
 ; GFX9-NEXT:    v_pack_b32_f16 v0, v0, v1
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
 ;
@@ -938,19 +860,19 @@ define <2 x half> @v_fdiv_v2f16_ulp25(<2 x half> %a, <2 x half> %b) {
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX8-NEXT:    v_lshrrev_b32_e32 v4, 16, v1
-; GFX8-NEXT:    v_cvt_f32_f16_e32 v2, v1
+; GFX8-NEXT:    v_cvt_f32_f16_e32 v3, v1
 ; GFX8-NEXT:    v_cvt_f32_f16_e32 v5, v4
-; GFX8-NEXT:    v_lshrrev_b32_e32 v6, 16, v0
-; GFX8-NEXT:    v_cvt_f32_f16_e32 v3, v0
-; GFX8-NEXT:    v_rcp_f32_e32 v2, v2
-; GFX8-NEXT:    v_cvt_f32_f16_e32 v7, v6
+; GFX8-NEXT:    v_lshrrev_b32_e32 v2, 16, v0
+; GFX8-NEXT:    v_cvt_f32_f16_e32 v6, v0
+; GFX8-NEXT:    v_rcp_f32_e32 v3, v3
+; GFX8-NEXT:    v_cvt_f32_f16_e32 v7, v2
 ; GFX8-NEXT:    v_rcp_f32_e32 v5, v5
-; GFX8-NEXT:    v_mul_f32_e32 v2, v3, v2
-; GFX8-NEXT:    v_cvt_f16_f32_e32 v2, v2
-; GFX8-NEXT:    v_mul_f32_e32 v3, v7, v5
+; GFX8-NEXT:    v_mul_f32_e32 v3, v6, v3
 ; GFX8-NEXT:    v_cvt_f16_f32_e32 v3, v3
-; GFX8-NEXT:    v_div_fixup_f16 v0, v2, v1, v0
-; GFX8-NEXT:    v_div_fixup_f16 v1, v3, v4, v6
+; GFX8-NEXT:    v_mul_f32_e32 v5, v7, v5
+; GFX8-NEXT:    v_cvt_f16_f32_e32 v5, v5
+; GFX8-NEXT:    v_div_fixup_f16 v0, v3, v1, v0
+; GFX8-NEXT:    v_div_fixup_f16 v1, v5, v4, v2
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GFX8-NEXT:    v_or_b32_e32 v0, v0, v1
 ; GFX8-NEXT:    s_setpc_b64 s[30:31]
@@ -959,19 +881,19 @@ define <2 x half> @v_fdiv_v2f16_ulp25(<2 x half> %a, <2 x half> %b) {
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX9-NEXT:    v_lshrrev_b32_e32 v4, 16, v1
-; GFX9-NEXT:    v_cvt_f32_f16_e32 v2, v1
+; GFX9-NEXT:    v_cvt_f32_f16_e32 v3, v1
 ; GFX9-NEXT:    v_cvt_f32_f16_e32 v5, v4
-; GFX9-NEXT:    v_lshrrev_b32_e32 v6, 16, v0
-; GFX9-NEXT:    v_cvt_f32_f16_e32 v3, v0
-; GFX9-NEXT:    v_rcp_f32_e32 v2, v2
-; GFX9-NEXT:    v_cvt_f32_f16_e32 v7, v6
+; GFX9-NEXT:    v_lshrrev_b32_e32 v2, 16, v0
+; GFX9-NEXT:    v_cvt_f32_f16_e32 v6, v0
+; GFX9-NEXT:    v_rcp_f32_e32 v3, v3
+; GFX9-NEXT:    v_cvt_f32_f16_e32 v7, v2
 ; GFX9-NEXT:    v_rcp_f32_e32 v5, v5
-; GFX9-NEXT:    v_mul_f32_e32 v2, v3, v2
-; GFX9-NEXT:    v_cvt_f16_f32_e32 v2, v2
-; GFX9-NEXT:    v_mul_f32_e32 v3, v7, v5
+; GFX9-NEXT:    v_mul_f32_e32 v3, v6, v3
 ; GFX9-NEXT:    v_cvt_f16_f32_e32 v3, v3
-; GFX9-NEXT:    v_div_fixup_f16 v0, v2, v1, v0
-; GFX9-NEXT:    v_div_fixup_f16 v1, v3, v4, v6
+; GFX9-NEXT:    v_mul_f32_e32 v5, v7, v5
+; GFX9-NEXT:    v_cvt_f16_f32_e32 v5, v5
+; GFX9-NEXT:    v_div_fixup_f16 v0, v3, v1, v0
+; GFX9-NEXT:    v_div_fixup_f16 v1, v5, v4, v2
 ; GFX9-NEXT:    v_pack_b32_f16 v0, v0, v1
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
 ;
@@ -1716,74 +1638,43 @@ define <2 x half> @v_rcp_v2f16_arcp(<2 x half> %x) {
 ; GFX8-LABEL: v_rcp_v2f16_arcp:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT:    v_lshrrev_b32_e32 v2, 16, v0
-; GFX8-NEXT:    v_cvt_f32_f16_e32 v1, v0
-; GFX8-NEXT:    v_cvt_f32_f16_e32 v3, v2
-; GFX8-NEXT:    v_cvt_f32_f16_e32 v4, 1.0
-; GFX8-NEXT:    v_rcp_f32_e32 v1, v1
-; GFX8-NEXT:    v_rcp_f32_e32 v3, v3
-; GFX8-NEXT:    v_mul_f32_e32 v1, v4, v1
-; GFX8-NEXT:    v_mul_f32_e32 v3, v4, v3
-; GFX8-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GFX8-NEXT:    v_cvt_f16_f32_e32 v3, v3
-; GFX8-NEXT:    v_div_fixup_f16 v0, v1, v0, 1.0
-; GFX8-NEXT:    v_div_fixup_f16 v1, v3, v2, 1.0
-; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; GFX8-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT:    v_rcp_f16_e32 v1, v0
+; GFX8-NEXT:    v_rcp_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX8-NEXT:    v_mov_b32_e32 v2, 0x3c00
+; GFX8-NEXT:    v_mul_f16_e32 v1, 1.0, v1
+; GFX8-NEXT:    v_mul_f16_sdwa v0, v2, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT:    v_or_b32_e32 v0, v1, v0
 ; GFX8-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX9-LABEL: v_rcp_v2f16_arcp:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    v_lshrrev_b32_e32 v2, 16, v0
-; GFX9-NEXT:    v_cvt_f32_f16_e32 v1, v0
-; GFX9-NEXT:    v_cvt_f32_f16_e32 v3, v2
-; GFX9-NEXT:    v_cvt_f32_f16_e32 v4, 1.0
-; GFX9-NEXT:    v_rcp_f32_e32 v1, v1
-; GFX9-NEXT:    v_rcp_f32_e32 v3, v3
-; GFX9-NEXT:    v_mul_f32_e32 v1, v4, v1
-; GFX9-NEXT:    v_mul_f32_e32 v3, v4, v3
-; GFX9-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GFX9-NEXT:    v_cvt_f16_f32_e32 v3, v3
-; GFX9-NEXT:    v_div_fixup_f16 v0, v1, v0, 1.0
-; GFX9-NEXT:    v_div_fixup_f16 v1, v3, v2, 1.0
-; GFX9-NEXT:    v_pack_b32_f16 v0, v0, v1
+; GFX9-NEXT:    v_rcp_f16_e32 v1, v0
+; GFX9-NEXT:    v_rcp_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX9-NEXT:    v_mul_f16_e32 v1, 1.0, v1
+; GFX9-NEXT:    v_mul_f16_e32 v0, 1.0, v0
+; GFX9-NEXT:    v_pack_b32_f16 v0, v1, v0
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-LABEL: v_rcp_v2f16_arcp:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
-; GFX10-NEXT:    v_cvt_f32_f16_e32 v2, v0
-; GFX10-NEXT:    v_cvt_f32_f16_e32 v4, 1.0
-; GFX10-NEXT:    v_cvt_f32_f16_e32 v3, v1
-; GFX10-NEXT:    v_rcp_f32_e32 v2, v2
-; GFX10-NEXT:    v_rcp_f32_e32 v3, v3
-; GFX10-NEXT:    v_mul_f32_e32 v2, v4, v2
-; GFX10-NEXT:    v_mul_f32_e32 v3, v4, v3
-; GFX10-NEXT:    v_cvt_f16_f32_e32 v2, v2
-; GFX10-NEXT:    v_cvt_f16_f32_e32 v3, v3
-; GFX10-NEXT:    v_div_fixup_f16 v0, v2, v0, 1.0
-; GFX10-NEXT:    v_div_fixup_f16 v1, v3, v1, 1.0
-; GFX10-NEXT:    v_pack_b32_f16 v0, v0, v1
+; GFX10-NEXT:    v_rcp_f16_e32 v1, v0
+; GFX10-NEXT:    v_rcp_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-NEXT:    v_mul_f16_e32 v1, 1.0, v1
+; GFX10-NEXT:    v_mul_f16_e32 v0, 1.0, v0
+; GFX10-NEXT:    v_pack_b32_f16 v0, v1, v0
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-LABEL: v_rcp_v2f16_arcp:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v2, v0
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v4, 1.0
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v3, v1
-; GFX11-NEXT:    v_rcp_f32_e32 v2, v2
-; GFX11-NEXT:    v_rcp_f32_e32 v3, v3
+; GFX11-NEXT:    v_rcp_f16_e32 v0, v0
+; GFX11-NEXT:    v_rcp_f16_e32 v1, v1
 ; GFX11-NEXT:    s_waitcnt_depctr 0xfff
-; GFX11-NEXT:    v_mul_f32_e32 v2, v4, v2
-; GFX11-NEXT:    v_mul_f32_e32 v3, v4, v3
-; GFX11-NEXT:    v_cvt_f16_f32_e32 v2, v2
-; GFX11-NEXT:    v_cvt_f16_f32_e32 v3, v3
-; GFX11-NEXT:    v_div_fixup_f16 v0, v2, v0, 1.0
-; GFX11-NEXT:    v_div_fixup_f16 v1, v3, v1, 1.0
+; GFX11-NEXT:    v_mul_f16_e32 v0, 1.0, v0
+; GFX11-NEXT:    v_mul_f16_e32 v1, 1.0, v1
 ; GFX11-NEXT:    v_pack_b32_f16 v0, v0, v1
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
   %fdiv = fdiv arcp <2 x half> <half 1.0, half 1.0>, %x
@@ -1809,7 +1700,10 @@ define <2 x half> @v_rcp_v2f16_arcp_afn(<2 x half> %x) {
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX8-NEXT:    v_rcp_f16_e32 v1, v0
-; GFX8-NEXT:    v_rcp_f16_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX8-NEXT:    v_rcp_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX8-NEXT:    v_mov_b32_e32 v2, 0x3c00
+; GFX8-NEXT:    v_mul_f16_e32 v1, 1.0, v1
+; GFX8-NEXT:    v_mul_f16_sdwa v0, v2, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
 ; GFX8-NEXT:    v_or_b32_e32 v0, v1, v0
 ; GFX8-NEXT:    s_setpc_b64 s[30:31]
 ;
@@ -1818,6 +1712,8 @@ define <2 x half> @v_rcp_v2f16_arcp_afn(<2 x half> %x) {
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX9-NEXT:    v_rcp_f16_e32 v1, v0
 ; GFX9-NEXT:    v_rcp_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX9-NEXT:    v_mul_f16_e32 v1, 1.0, v1
+; GFX9-NEXT:    v_mul_f16_e32 v0, 1.0, v0
 ; GFX9-NEXT:    v_pack_b32_f16 v0, v1, v0
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
 ;
@@ -1826,6 +1722,8 @@ define <2 x half> @v_rcp_v2f16_arcp_afn(<2 x half> %x) {
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_rcp_f16_e32 v1, v0
 ; GFX10-NEXT:    v_rcp_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-NEXT:    v_mul_f16_e32 v1, 1.0, v1
+; GFX10-NEXT:    v_mul_f16_e32 v0, 1.0, v0
 ; GFX10-NEXT:    v_pack_b32_f16 v0, v1, v0
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
 ;
@@ -1836,6 +1734,8 @@ define <2 x half> @v_rcp_v2f16_arcp_afn(<2 x half> %x) {
 ; GFX11-NEXT:    v_rcp_f16_e32 v0, v0
 ; GFX11-NEXT:    v_rcp_f16_e32 v1, v1
 ; GFX11-NEXT:    s_waitcnt_depctr 0xfff
+; GFX11-NEXT:    v_mul_f16_e32 v0, 1.0, v0
+; GFX11-NEXT:    v_mul_f16_e32 v1, 1.0, v1
 ; GFX11-NEXT:    v_pack_b32_f16 v0, v0, v1
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
   %fdiv = fdiv arcp afn <2 x half> <half 1.0, half 1.0>, %x
@@ -1916,34 +1816,74 @@ define <2 x half> @v_rcp_v2f16_ulp25(<2 x half> %x) {
 ; GFX8-LABEL: v_rcp_v2f16_ulp25:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT:    v_rcp_f16_e32 v1, v0
-; GFX8-NEXT:    v_rcp_f16_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1
-; GFX8-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX8-NEXT:    v_lshrrev_b32_e32 v2, 16, v0
+; GFX8-NEXT:    v_cvt_f32_f16_e32 v1, v0
+; GFX8-NEXT:    v_cvt_f32_f16_e32 v3, v2
+; GFX8-NEXT:    v_cvt_f32_f16_e32 v4, 1.0
+; GFX8-NEXT:    v_rcp_f32_e32 v1, v1
+; GFX8-NEXT:    v_rcp_f32_e32 v3, v3
+; GFX8-NEXT:    v_mul_f32_e32 v1, v4, v1
+; GFX8-NEXT:    v_mul_f32_e32 v3, v4, v3
+; GFX8-NEXT:    v_cvt_f16_f32_e32 v1, v1
+; GFX8-NEXT:    v_cvt_f16_f32_e32 v3, v3
+; GFX8-NEXT:    v_div_fixup_f16 v0, v1, v0, 1.0
+; GFX8-NEXT:    v_div_fixup_f16 v1, v3, v2, 1.0
+; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX8-NEXT:    v_or_b32_e32 v0, v0, v1
 ; GFX8-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX9-LABEL: v_rcp_v2f16_ulp25:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    v_rcp_f16_e32 v1, v0
-; GFX9-NEXT:    v_rcp_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
-; GFX9-NEXT:    v_pack_b32_f16 v0, v1, v0
+; GFX9-NEXT:    v_lshrrev_b32_e32 v2, 16, v0
+; GFX9-NEXT:    v_cvt_f32_f16_e32 v1, v0
+; GFX9-NEXT:    v_cvt_f32_f16_e32 v3, v2
+; GFX9-NEXT:    v_cvt_f32_f16_e32 v4, 1.0
+; GFX9-NEXT:    v_rcp_f32_e32 v1, v1
+; GFX9-NEXT:    v_rcp_f32_e32 v3, v3
+; GFX9-NEXT:    v_mul_f32_e32 v1, v4, v1
+; GFX9-NEXT:    v_mul_f32_e32 v3, v4, v3
+; GFX9-NEXT:    v_cvt_f16_f32_e32 v1, v1
+; GFX9-NEXT:    v_cvt_f16_f32_e32 v3, v3
+; GFX9-NEXT:    v_div_fixup_f16 v0, v1, v0, 1.0
+; GFX9-NEXT:    v_div_fixup_f16 v1, v3, v2, 1.0
+; GFX9-NEXT:    v_pack_b32_f16 v0, v0, v1
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-LABEL: v_rcp_v2f16_ulp25:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT:    v_rcp_f16_e32 v1, v0
-; GFX10-NEXT:    v_rcp_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
-; GFX10-NEXT:    v_pack_b32_f16 v0, v1, v0
+; GFX10-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
+; GFX10-NEXT:    v_cvt_f32_f16_e32 v2, v0
+; GFX10-NEXT:    v_cvt_f32_f16_e32 v4, 1.0
+; GFX10-NEXT:    v_cvt_f32_f16_e32 v3, v1
+; GFX10-NEXT:    v_rcp_f32_e32 v2, v2
+; GFX10-NEXT:    v_rcp_f32_e32 v3, v3
+; GFX10-NEXT:    v_mul_f32_e32 v2, v4, v2
+; GFX10-NEXT:    v_mul_f32_e32 v3, v4, v3
+; GFX10-NEXT:    v_cvt_f16_f32_e32 v2, v2
+; GFX10-NEXT:    v_cvt_f16_f32_e32 v3, v3
+; GFX10-NEXT:    v_div_fixup_f16 v0, v2, v0, 1.0
+; GFX10-NEXT:    v_div_fixup_f16 v1, v3, v1, 1.0
+; GFX10-NEXT:    v_pack_b32_f16 v0, v0, v1
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-LABEL: v_rcp_v2f16_ulp25:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
-; GFX11-NEXT:    v_rcp_f16_e32 v0, v0
-; GFX11-NEXT:    v_rcp_f16_e32 v1, v1
+; GFX11-NEXT:    v_cvt_f32_f16_e32 v2, v0
+; GFX11-NEXT:    v_cvt_f32_f16_e32 v4, 1.0
+; GFX11-NEXT:    v_cvt_f32_f16_e32 v3, v1
+; GFX11-NEXT:    v_rcp_f32_e32 v2, v2
+; GFX11-NEXT:    v_rcp_f32_e32 v3, v3
 ; GFX11-NEXT:    s_waitcnt_depctr 0xfff
+; GFX11-NEXT:    v_mul_f32_e32 v2, v4, v2
+; GFX11-NEXT:    v_mul_f32_e32 v3, v4, v3
+; GFX11-NEXT:    v_cvt_f16_f32_e32 v2, v2
+; GFX11-NEXT:    v_cvt_f16_f32_e32 v3, v3
+; GFX11-NEXT:    v_div_fixup_f16 v0, v2, v0, 1.0
+; GFX11-NEXT:    v_div_fixup_f16 v1, v3, v1, 1.0
 ; GFX11-NEXT:    v_pack_b32_f16 v0, v0, v1
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
   %fdiv = fdiv <2 x half> <half 1.0, half 1.0>, %x, !fpmath !0
@@ -2087,81 +2027,43 @@ define <2 x half> @v_fdiv_v2f16_arcp_ulp25(<2 x half> %a, <2 x half> %b) {
 ; GFX8-LABEL: v_fdiv_v2f16_arcp_ulp25:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT:    v_lshrrev_b32_e32 v4, 16, v1
-; GFX8-NEXT:    v_cvt_f32_f16_e32 v2, v1
-; GFX8-NEXT:    v_cvt_f32_f16_e32 v5, v4
-; GFX8-NEXT:    v_lshrrev_b32_e32 v6, 16, v0
-; GFX8-NEXT:    v_cvt_f32_f16_e32 v3, v0
-; GFX8-NEXT:    v_rcp_f32_e32 v2, v2
-; GFX8-NEXT:    v_cvt_f32_f16_e32 v7, v6
-; GFX8-NEXT:    v_rcp_f32_e32 v5, v5
-; GFX8-NEXT:    v_mul_f32_e32 v2, v3, v2
-; GFX8-NEXT:    v_cvt_f16_f32_e32 v2, v2
-; GFX8-NEXT:    v_mul_f32_e32 v3, v7, v5
-; GFX8-NEXT:    v_cvt_f16_f32_e32 v3, v3
-; GFX8-NEXT:    v_div_fixup_f16 v0, v2, v1, v0
-; GFX8-NEXT:    v_div_fixup_f16 v1, v3, v4, v6
-; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; GFX8-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT:    v_rcp_f16_e32 v2, v1
+; GFX8-NEXT:    v_rcp_f16_sdwa v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX8-NEXT:    v_mul_f16_e32 v2, v0, v2
+; GFX8-NEXT:    v_mul_f16_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX8-NEXT:    v_or_b32_e32 v0, v2, v0
 ; GFX8-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX9-LABEL: v_fdiv_v2f16_arcp_ulp25:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    v_lshrrev_b32_e32 v4, 16, v1
-; GFX9-NEXT:    v_cvt_f32_f16_e32 v2, v1
-; GFX9-NEXT:    v_cvt_f32_f16_e32 v5, v4
-; GFX9-NEXT:    v_lshrrev_b32_e32 v6, 16, v0
-; GFX9-NEXT:    v_cvt_f32_f16_e32 v3, v0
-; GFX9-NEXT:    v_rcp_f32_e32 v2, v2
-; GFX9-NEXT:    v_cvt_f32_f16_e32 v7, v6
-; GFX9-NEXT:    v_rcp_f32_e32 v5, v5
-; GFX9-NEXT:    v_mul_f32_e32 v2, v3, v2
-; GFX9-NEXT:    v_cvt_f16_f32_e32 v2, v2
-; GFX9-NEXT:    v_mul_f32_e32 v3, v7, v5
-; GFX9-NEXT:    v_cvt_f16_f32_e32 v3, v3
-; GFX9-NEXT:    v_div_fixup_f16 v0, v2, v1, v0
-; GFX9-NEXT:    v_div_fixup_f16 v1, v3, v4, v6
-; GFX9-NEXT:    v_pack_b32_f16 v0, v0, v1
+; GFX9-NEXT:    v_rcp_f16_e32 v2, v1
+; GFX9-NEXT:    v_rcp_f16_sdwa v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX9-NEXT:    v_mul_f16_e32 v2, v0, v2
+; GFX9-NEXT:    v_mul_f16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX9-NEXT:    v_pack_b32_f16 v0, v2, v0
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-LABEL: v_fdiv_v2f16_arcp_ulp25:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT:    v_lshrrev_b32_e32 v2, 16, v1
-; GFX10-NEXT:    v_cvt_f32_f16_e32 v3, v1
-; GFX10-NEXT:    v_lshrrev_b32_e32 v5, 16, v0
-; GFX10-NEXT:    v_cvt_f32_f16_e32 v6, v0
-; GFX10-NEXT:    v_cvt_f32_f16_e32 v4, v2
-; GFX10-NEXT:    v_rcp_f32_e32 v3, v3
-; GFX10-NEXT:    v_cvt_f32_f16_e32 v7, v5
-; GFX10-NEXT:    v_rcp_f32_e32 v4, v4
-; GFX10-NEXT:    v_mul_f32_e32 v3, v6, v3
-; GFX10-NEXT:    v_mul_f32_e32 v4, v7, v4
-; GFX10-NEXT:    v_cvt_f16_f32_e32 v3, v3
-; GFX10-NEXT:    v_cvt_f16_f32_e32 v4, v4
-; GFX10-NEXT:    v_div_fixup_f16 v0, v3, v1, v0
-; GFX10-NEXT:    v_div_fixup_f16 v1, v4, v2, v5
-; GFX10-NEXT:    v_pack_b32_f16 v0, v0, v1
+; GFX10-NEXT:    v_rcp_f16_e32 v2, v1
+; GFX10-NEXT:    v_rcp_f16_sdwa v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-NEXT:    v_mul_f16_e32 v2, v0, v2
+; GFX10-NEXT:    v_mul_f16_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX10-NEXT:    v_pack_b32_f16 v0, v2, v0
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-LABEL: v_fdiv_v2f16_arcp_ulp25:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_lshrrev_b32_e32 v2, 16, v1
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v3, v1
-; GFX11-NEXT:    v_lshrrev_b32_e32 v5, 16, v0
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v6, v0
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v4, v2
-; GFX11-NEXT:    v_rcp_f32_e32 v3, v3
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v7, v5
-; GFX11-NEXT:    v_rcp_f32_e32 v4, v4
+; GFX11-NEXT:    v_rcp_f16_e32 v1, v1
+; GFX11-NEXT:    v_lshrrev_b32_e32 v3, 16, v0
+; GFX11-NEXT:    v_rcp_f16_e32 v2, v2
 ; GFX11-NEXT:    s_waitcnt_depctr 0xfff
-; GFX11-NEXT:    v_dual_mul_f32 v3, v6, v3 :: v_dual_mul_f32 v4, v7, v4
-; GFX11-NEXT:    v_cvt_f16_f32_e32 v3, v3
-; GFX11-NEXT:    v_cvt_f16_f32_e32 v4, v4
-; GFX11-NEXT:    v_div_fixup_f16 v0, v3, v1, v0
-; GFX11-NEXT:    v_div_fixup_f16 v1, v4, v2, v5
+; GFX11-NEXT:    v_mul_f16_e32 v0, v0, v1
+; GFX11-NEXT:    v_mul_f16_e32 v1, v3, v2
 ; GFX11-NEXT:    v_pack_b32_f16 v0, v0, v1
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
   %fdiv = fdiv arcp <2 x half> %a, %b, !fpmath !0
@@ -2357,36 +2259,23 @@ define amdgpu_ps i16 @s_fdiv_f16_arcp(i16 inreg %a.arg, i16 inreg %b.arg) {
 ;
 ; GFX89-LABEL: s_fdiv_f16_arcp:
 ; GFX89:       ; %bb.0:
-; GFX89-NEXT:    v_cvt_f32_f16_e32 v0, s1
-; GFX89-NEXT:    v_cvt_f32_f16_e32 v1, s0
-; GFX89-NEXT:    v_rcp_f32_e32 v0, v0
-; GFX89-NEXT:    v_mul_f32_e32 v0, v1, v0
-; GFX89-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GFX89-NEXT:    v_mov_b32_e32 v1, s1
-; GFX89-NEXT:    v_div_fixup_f16 v0, v0, v1, s0
+; GFX89-NEXT:    v_rcp_f16_e32 v0, s1
+; GFX89-NEXT:    v_mul_f16_e32 v0, s0, v0
 ; GFX89-NEXT:    v_readfirstlane_b32 s0, v0
 ; GFX89-NEXT:    ; return to shader part epilog
 ;
 ; GFX10-LABEL: s_fdiv_f16_arcp:
 ; GFX10:       ; %bb.0:
-; GFX10-NEXT:    v_cvt_f32_f16_e32 v0, s1
-; GFX10-NEXT:    v_cvt_f32_f16_e32 v1, s0
-; GFX10-NEXT:    v_rcp_f32_e32 v0, v0
-; GFX10-NEXT:    v_mul_f32_e32 v0, v1, v0
-; GFX10-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GFX10-NEXT:    v_div_fixup_f16 v0, v0, s1, s0
+; GFX10-NEXT:    v_rcp_f16_e32 v0, s1
+; GFX10-NEXT:    v_mul_f16_e32 v0, s0, v0
 ; GFX10-NEXT:    v_readfirstlane_b32 s0, v0
 ; GFX10-NEXT:    ; return to shader part epilog
 ;
 ; GFX11-LABEL: s_fdiv_f16_arcp:
 ; GFX11:       ; %bb.0:
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v0, s1
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v1, s0
-; GFX11-NEXT:    v_rcp_f32_e32 v0, v0
+; GFX11-NEXT:    v_rcp_f16_e32 v0, s1
 ; GFX11-NEXT:    s_waitcnt_depctr 0xfff
-; GFX11-NEXT:    v_mul_f32_e32 v0, v1, v0
-; GFX11-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GFX11-NEXT:    v_div_fixup_f16 v0, v0, s1, s0
+; GFX11-NEXT:    v_mul_f16_e32 v0, s0, v0
 ; GFX11-NEXT:    v_readfirstlane_b32 s0, v0
 ; GFX11-NEXT:    ; return to shader part epilog
   %a = bitcast i16 %a.arg to half
@@ -2518,21 +2407,21 @@ define amdgpu_ps i32 @s_fdiv_v2f16(i32 inreg %a.arg, i32 inreg %b.arg) {
 ; GFX8-LABEL: s_fdiv_v2f16:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    v_cvt_f32_f16_e32 v0, s1
-; GFX8-NEXT:    s_lshr_b32 s2, s1, 16
-; GFX8-NEXT:    v_cvt_f32_f16_e32 v2, s2
-; GFX8-NEXT:    v_cvt_f32_f16_e32 v1, s0
+; GFX8-NEXT:    s_lshr_b32 s3, s1, 16
+; GFX8-NEXT:    v_cvt_f32_f16_e32 v1, s3
+; GFX8-NEXT:    s_lshr_b32 s2, s0, 16
+; GFX8-NEXT:    v_cvt_f32_f16_e32 v2, s0
 ; GFX8-NEXT:    v_rcp_f32_e32 v0, v0
-; GFX8-NEXT:    s_lshr_b32 s3, s0, 16
-; GFX8-NEXT:    v_cvt_f32_f16_e32 v3, s3
-; GFX8-NEXT:    v_rcp_f32_e32 v2, v2
-; GFX8-NEXT:    v_mul_f32_e32 v0, v1, v0
+; GFX8-NEXT:    v_cvt_f32_f16_e32 v3, s2
+; GFX8-NEXT:    v_rcp_f32_e32 v1, v1
+; GFX8-NEXT:    v_mul_f32_e32 v0, v2, v0
 ; GFX8-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GFX8-NEXT:    v_mul_f32_e32 v1, v3, v2
+; GFX8-NEXT:    v_mul_f32_e32 v1, v3, v1
 ; GFX8-NEXT:    v_cvt_f16_f32_e32 v1, v1
 ; GFX8-NEXT:    v_mov_b32_e32 v2, s1
 ; GFX8-NEXT:    v_div_fixup_f16 v0, v0, v2, s0
-; GFX8-NEXT:    v_mov_b32_e32 v2, s2
-; GFX8-NEXT:    v_div_fixup_f16 v1, v1, v2, s3
+; GFX8-NEXT:    v_mov_b32_e32 v2, s3
+; GFX8-NEXT:    v_div_fixup_f16 v1, v1, v2, s2
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GFX8-NEXT:    v_or_b32_e32 v0, v0, v1
 ; GFX8-NEXT:    v_readfirstlane_b32 s0, v0
@@ -2541,21 +2430,21 @@ define amdgpu_ps i32 @s_fdiv_v2f16(i32 inreg %a.arg, i32 inreg %b.arg) {
 ; GFX9-LABEL: s_fdiv_v2f16:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    v_cvt_f32_f16_e32 v0, s1
-; GFX9-NEXT:    s_lshr_b32 s2, s1, 16
-; GFX9-NEXT:    v_cvt_f32_f16_e32 v2, s2
-; GFX9-NEXT:    v_cvt_f32_f16_e32 v1, s0
+; GFX9-NEXT:    s_lshr_b32 s3, s1, 16
+; GFX9-NEXT:    v_cvt_f32_f16_e32 v1, s3
+; GFX9-NEXT:    s_lshr_b32 s2, s0, 16
+; GFX9-NEXT:    v_cvt_f32_f16_e32 v2, s0
 ; GFX9-NEXT:    v_rcp_f32_e32 v0, v0
-; GFX9-NEXT:    s_lshr_b32 s3, s0, 16
-; GFX9-NEXT:    v_cvt_f32_f16_e32 v3, s3
-; GFX9-NEXT:    v_rcp_f32_e32 v2, v2
-; GFX9-NEXT:    v_mul_f32_e32 v0, v1, v0
+; GFX9-NEXT:    v_cvt_f32_f16_e32 v3, s2
+; GFX9-NEXT:    v_rcp_f32_e32 v1, v1
+; GFX9-NEXT:    v_mul_f32_e32 v0, v2, v0
 ; GFX9-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GFX9-NEXT:    v_mul_f32_e32 v1, v3, v2
+; GFX9-NEXT:    v_mul_f32_e32 v1, v3, v1
 ; GFX9-NEXT:    v_cvt_f16_f32_e32 v1, v1
 ; GFX9-NEXT:    v_mov_b32_e32 v2, s1
 ; GFX9-NEXT:    v_div_fixup_f16 v0, v0, v2, s0
-; GFX9-NEXT:    v_mov_b32_e32 v2, s2
-; GFX9-NEXT:    v_div_fixup_f16 v1, v1, v2, s3
+; GFX9-NEXT:    v_mov_b32_e32 v2, s3
+; GFX9-NEXT:    v_div_fixup_f16 v1, v1, v2, s2
 ; GFX9-NEXT:    v_pack_b32_f16 v0, v0, v1
 ; GFX9-NEXT:    v_readfirstlane_b32 s0, v0
 ; GFX9-NEXT:    ; return to shader part epilog
@@ -2650,35 +2539,20 @@ define amdgpu_ps i16 @s_rcp_f16(i16 inreg %a.arg) {
 ;
 ; GFX89-LABEL: s_rcp_f16:
 ; GFX89:       ; %bb.0:
-; GFX89-NEXT:    v_cvt_f32_f16_e32 v0, s0
-; GFX89-NEXT:    v_cvt_f32_f16_e32 v1, 1.0
-; GFX89-NEXT:    v_rcp_f32_e32 v0, v0
-; GFX89-NEXT:    v_mul_f32_e32 v0, v1, v0
-; GFX89-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GFX89-NEXT:    v_div_fixup_f16 v0, v0, s0, 1.0
+; GFX89-NEXT:    v_rcp_f16_e32 v0, s0
 ; GFX89-NEXT:    v_readfirstlane_b32 s0, v0
 ; GFX89-NEXT:    ; return to shader part epilog
 ;
 ; GFX10-LABEL: s_rcp_f16:
 ; GFX10:       ; %bb.0:
-; GFX10-NEXT:    v_cvt_f32_f16_e32 v0, s0
-; GFX10-NEXT:    v_cvt_f32_f16_e32 v1, 1.0
-; GFX10-NEXT:    v_rcp_f32_e32 v0, v0
-; GFX10-NEXT:    v_mul_f32_e32 v0, v1, v0
-; GFX10-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GFX10-NEXT:    v_div_fixup_f16 v0, v0, s0, 1.0
+; GFX10-NEXT:    v_rcp_f16_e32 v0, s0
 ; GFX10-NEXT:    v_readfirstlane_b32 s0, v0
 ; GFX10-NEXT:    ; return to shader part epilog
 ;
 ; GFX11-LABEL: s_rcp_f16:
 ; GFX11:       ; %bb.0:
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v0, s0
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v1, 1.0
-; GFX11-NEXT:    v_rcp_f32_e32 v0, v0
+; GFX11-NEXT:    v_rcp_f16_e32 v0, s0
 ; GFX11-NEXT:    s_waitcnt_depctr 0xfff
-; GFX11-NEXT:    v_mul_f32_e32 v0, v1, v0
-; GFX11-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GFX11-NEXT:    v_div_fixup_f16 v0, v0, s0, 1.0
 ; GFX11-NEXT:    v_readfirstlane_b32 s0, v0
 ; GFX11-NEXT:    ; return to shader part epilog
   %a = bitcast i16 %a.arg to half
@@ -2731,35 +2605,20 @@ define amdgpu_ps i16 @s_neg_rcp_f16(i16 inreg %a.arg) {
 ;
 ; GFX89-LABEL: s_neg_rcp_f16:
 ; GFX89:       ; %bb.0:
-; GFX89-NEXT:    v_cvt_f32_f16_e32 v0, s0
-; GFX89-NEXT:    v_cvt_f32_f16_e32 v1, -1.0
-; GFX89-NEXT:    v_rcp_f32_e32 v0, v0
-; GFX89-NEXT:    v_mul_f32_e32 v0, v1, v0
-; GFX89-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GFX89-NEXT:    v_div_fixup_f16 v0, v0, s0, -1.0
+; GFX89-NEXT:    v_rcp_f16_e64 v0, -s0
 ; GFX89-NEXT:    v_readfirstlane_b32 s0, v0
 ; GFX89-NEXT:    ; return to shader part epilog
 ;
 ; GFX10-LABEL: s_neg_rcp_f16:
 ; GFX10:       ; %bb.0:
-; GFX10-NEXT:    v_cvt_f32_f16_e32 v0, s0
-; GFX10-NEXT:    v_cvt_f32_f16_e32 v1, -1.0
-; GFX10-NEXT:    v_rcp_f32_e32 v0, v0
-; GFX10-NEXT:    v_mul_f32_e32 v0, v1, v0
-; GFX10-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GFX10-NEXT:    v_div_fixup_f16 v0, v0, s0, -1.0
+; GFX10-NEXT:    v_rcp_f16_e64 v0, -s0
 ; GFX10-NEXT:    v_readfirstlane_b32 s0, v0
 ; GFX10-NEXT:    ; return to shader part epilog
 ;
 ; GFX11-LABEL: s_neg_rcp_f16:
 ; GFX11:       ; %bb.0:
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v0, s0
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v1, -1.0
-; GFX11-NEXT:    v_rcp_f32_e32 v0, v0
+; GFX11-NEXT:    v_rcp_f16_e64 v0, -s0
 ; GFX11-NEXT:    s_waitcnt_depctr 0xfff
-; GFX11-NEXT:    v_mul_f32_e32 v0, v1, v0
-; GFX11-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GFX11-NEXT:    v_div_fixup_f16 v0, v0, s0, -1.0
 ; GFX11-NEXT:    v_readfirstlane_b32 s0, v0
 ; GFX11-NEXT:    ; return to shader part epilog
   %a = bitcast i16 %a.arg to half
@@ -2818,39 +2677,20 @@ define amdgpu_ps i16 @s_rsq_f16(i16 inreg %a.arg) {
 ;
 ; GFX89-LABEL: s_rsq_f16:
 ; GFX89:       ; %bb.0:
-; GFX89-NEXT:    v_sqrt_f16_e32 v0, s0
-; GFX89-NEXT:    v_cvt_f32_f16_e32 v2, 1.0
-; GFX89-NEXT:    v_cvt_f32_f16_e32 v1, v0
-; GFX89-NEXT:    v_rcp_f32_e32 v1, v1
-; GFX89-NEXT:    v_mul_f32_e32 v1, v2, v1
-; GFX89-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GFX89-NEXT:    v_div_fixup_f16 v0, v1, v0, 1.0
+; GFX89-NEXT:    v_rsq_f16_e32 v0, s0
 ; GFX89-NEXT:    v_readfirstlane_b32 s0, v0
 ; GFX89-NEXT:    ; return to shader part epilog
 ;
 ; GFX10-LABEL: s_rsq_f16:
 ; GFX10:       ; %bb.0:
-; GFX10-NEXT:    v_sqrt_f16_e32 v0, s0
-; GFX10-NEXT:    v_cvt_f32_f16_e32 v2, 1.0
-; GFX10-NEXT:    v_cvt_f32_f16_e32 v1, v0
-; GFX10-NEXT:    v_rcp_f32_e32 v1, v1
-; GFX10-NEXT:    v_mul_f32_e32 v1, v2, v1
-; GFX10-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GFX10-NEXT:    v_div_fixup_f16 v0, v1, v0, 1.0
+; GFX10-NEXT:    v_rsq_f16_e32 v0, s0
 ; GFX10-NEXT:    v_readfirstlane_b32 s0, v0
 ; GFX10-NEXT:    ; return to shader part epilog
 ;
 ; GFX11-LABEL: s_rsq_f16:
 ; GFX11:       ; %bb.0:
-; GFX11-NEXT:    v_sqrt_f16_e32 v0, s0
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v2, 1.0
+; GFX11-NEXT:    v_rsq_f16_e32 v0, s0
 ; GFX11-NEXT:    s_waitcnt_depctr 0xfff
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v1, v0
-; GFX11-NEXT:    v_rcp_f32_e32 v1, v1
-; GFX11-NEXT:    s_waitcnt_depctr 0xfff
-; GFX11-NEXT:    v_mul_f32_e32 v1, v2, v1
-; GFX11-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GFX11-NEXT:    v_div_fixup_f16 v0, v1, v0, 1.0
 ; GFX11-NEXT:    v_readfirstlane_b32 s0, v0
 ; GFX11-NEXT:    ; return to shader part epilog
   %a = bitcast i16 %a.arg to half
@@ -2952,23 +2792,20 @@ define amdgpu_ps i32 @s_rsq_v2f16(i32 inreg %a.arg) {
 ;
 ; GFX8-LABEL: s_rsq_v2f16:
 ; GFX8:       ; %bb.0:
-; GFX8-NEXT:    s_lshr_b32 s1, s0, 16
-; GFX8-NEXT:    v_mov_b32_e32 v1, s1
 ; GFX8-NEXT:    v_sqrt_f16_e32 v0, s0
-; GFX8-NEXT:    v_sqrt_f16_sdwa v1, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+; GFX8-NEXT:    s_lshr_b32 s0, s0, 16
+; GFX8-NEXT:    v_sqrt_f16_e32 v1, s0
 ; GFX8-NEXT:    v_cvt_f32_f16_e32 v4, -1.0
-; GFX8-NEXT:    v_or_b32_e32 v0, v0, v1
-; GFX8-NEXT:    v_lshrrev_b32_e32 v2, 16, v0
-; GFX8-NEXT:    v_cvt_f32_f16_e32 v1, v0
-; GFX8-NEXT:    v_cvt_f32_f16_e32 v3, v2
-; GFX8-NEXT:    v_rcp_f32_e32 v1, v1
+; GFX8-NEXT:    v_cvt_f32_f16_e32 v2, v0
+; GFX8-NEXT:    v_cvt_f32_f16_e32 v3, v1
+; GFX8-NEXT:    v_rcp_f32_e32 v2, v2
 ; GFX8-NEXT:    v_rcp_f32_e32 v3, v3
-; GFX8-NEXT:    v_mul_f32_e32 v1, v4, v1
+; GFX8-NEXT:    v_mul_f32_e32 v2, v4, v2
+; GFX8-NEXT:    v_cvt_f16_f32_e32 v2, v2
 ; GFX8-NEXT:    v_mul_f32_e32 v3, v4, v3
-; GFX8-NEXT:    v_cvt_f16_f32_e32 v1, v1
 ; GFX8-NEXT:    v_cvt_f16_f32_e32 v3, v3
-; GFX8-NEXT:    v_div_fixup_f16 v0, v1, v0, -1.0
-; GFX8-NEXT:    v_div_fixup_f16 v1, v3, v2, -1.0
+; GFX8-NEXT:    v_div_fixup_f16 v0, v2, v0, -1.0
+; GFX8-NEXT:    v_div_fixup_f16 v1, v3, v1, -1.0
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GFX8-NEXT:    v_or_b32_e32 v0, v0, v1
 ; GFX8-NEXT:    v_readfirstlane_b32 s0, v0
@@ -3093,40 +2930,14 @@ define half @v_rsq_f16(half %a) {
 ; GFX89-LABEL: v_rsq_f16:
 ; GFX89:       ; %bb.0:
 ; GFX89-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX89-NEXT:    v_sqrt_f16_e32 v0, v0
-; GFX89-NEXT:    v_cvt_f32_f16_e32 v2, 1.0
-; GFX89-NEXT:    v_cvt_f32_f16_e32 v1, v0
-; GFX89-NEXT:    v_rcp_f32_e32 v1, v1
-; GFX89-NEXT:    v_mul_f32_e32 v1, v2, v1
-; GFX89-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GFX89-NEXT:    v_div_fixup_f16 v0, v1, v0, 1.0
+; GFX89-NEXT:    v_rsq_f16_e32 v0, v0
 ; GFX89-NEXT:    s_setpc_b64 s[30:31]
 ;
-; GFX10-LABEL: v_rsq_f16:
-; GFX10:       ; %bb.0:
-; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT:    v_sqrt_f16_e32 v0, v0
-; GFX10-NEXT:    v_cvt_f32_f16_e32 v2, 1.0
-; GFX10-NEXT:    v_cvt_f32_f16_e32 v1, v0
-; GFX10-NEXT:    v_rcp_f32_e32 v1, v1
-; GFX10-NEXT:    v_mul_f32_e32 v1, v2, v1
-; GFX10-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GFX10-NEXT:    v_div_fixup_f16 v0, v1, v0, 1.0
-; GFX10-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11-LABEL: v_rsq_f16:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_sqrt_f16_e32 v0, v0
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v2, 1.0
-; GFX11-NEXT:    s_waitcnt_depctr 0xfff
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v1, v0
-; GFX11-NEXT:    v_rcp_f32_e32 v1, v1
-; GFX11-NEXT:    s_waitcnt_depctr 0xfff
-; GFX11-NEXT:    v_mul_f32_e32 v1, v2, v1
-; GFX11-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GFX11-NEXT:    v_div_fixup_f16 v0, v1, v0, 1.0
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
+; GFX10PLUS-LABEL: v_rsq_f16:
+; GFX10PLUS:       ; %bb.0:
+; GFX10PLUS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT:    v_rsq_f16_e32 v0, v0
+; GFX10PLUS-NEXT:    s_setpc_b64 s[30:31]
   %sqrt = call half @llvm.sqrt.f16(half %a)
   %fdiv = fdiv half 1.0, %sqrt
   ret half %fdiv
@@ -3184,38 +2995,22 @@ define half @v_neg_rsq_f16(half %a) {
 ; GFX89:       ; %bb.0:
 ; GFX89-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX89-NEXT:    v_sqrt_f16_e32 v0, v0
-; GFX89-NEXT:    v_cvt_f32_f16_e32 v2, -1.0
-; GFX89-NEXT:    v_cvt_f32_f16_e32 v1, v0
-; GFX89-NEXT:    v_rcp_f32_e32 v1, v1
-; GFX89-NEXT:    v_mul_f32_e32 v1, v2, v1
-; GFX89-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GFX89-NEXT:    v_div_fixup_f16 v0, v1, v0, -1.0
+; GFX89-NEXT:    v_rcp_f16_e64 v0, -v0
 ; GFX89-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-LABEL: v_neg_rsq_f16:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_sqrt_f16_e32 v0, v0
-; GFX10-NEXT:    v_cvt_f32_f16_e32 v2, -1.0
-; GFX10-NEXT:    v_cvt_f32_f16_e32 v1, v0
-; GFX10-NEXT:    v_rcp_f32_e32 v1, v1
-; GFX10-NEXT:    v_mul_f32_e32 v1, v2, v1
-; GFX10-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GFX10-NEXT:    v_div_fixup_f16 v0, v1, v0, -1.0
+; GFX10-NEXT:    v_rcp_f16_e64 v0, -v0
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-LABEL: v_neg_rsq_f16:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_sqrt_f16_e32 v0, v0
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v2, -1.0
-; GFX11-NEXT:    s_waitcnt_depctr 0xfff
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v1, v0
-; GFX11-NEXT:    v_rcp_f32_e32 v1, v1
 ; GFX11-NEXT:    s_waitcnt_depctr 0xfff
-; GFX11-NEXT:    v_mul_f32_e32 v1, v2, v1
-; GFX11-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GFX11-NEXT:    v_div_fixup_f16 v0, v1, v0, -1.0
+; GFX11-NEXT:    v_rcp_f16_e64 v0, -v0
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
   %sqrt = call half @llvm.sqrt.f16(half %a)
   %fdiv = fdiv half -1.0, %sqrt
@@ -3274,38 +3069,22 @@ define half @v_neg_rsq_f16_fabs(half %a) {
 ; GFX89:       ; %bb.0:
 ; GFX89-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX89-NEXT:    v_sqrt_f16_e64 v0, |v0|
-; GFX89-NEXT:    v_cvt_f32_f16_e32 v2, -1.0
-; GFX89-NEXT:    v_cvt_f32_f16_e32 v1, v0
-; GFX89-NEXT:    v_rcp_f32_e32 v1, v1
-; GFX89-NEXT:    v_mul_f32_e32 v1, v2, v1
-; GFX89-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GFX89-NEXT:    v_div_fixup_f16 v0, v1, v0, -1.0
+; GFX89-NEXT:    v_rcp_f16_e64 v0, -v0
 ; GFX89-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-LABEL: v_neg_rsq_f16_fabs:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_sqrt_f16_e64 v0, |v0|
-; GFX10-NEXT:    v_cvt_f32_f16_e32 v2, -1.0
-; GFX10-NEXT:    v_cvt_f32_f16_e32 v1, v0
-; GFX10-NEXT:    v_rcp_f32_e32 v1, v1
-; GFX10-NEXT:    v_mul_f32_e32 v1, v2, v1
-; GFX10-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GFX10-NEXT:    v_div_fixup_f16 v0, v1, v0, -1.0
+; GFX10-NEXT:    v_rcp_f16_e64 v0, -v0
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-LABEL: v_neg_rsq_f16_fabs:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_sqrt_f16_e64 v0, |v0|
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v2, -1.0
 ; GFX11-NEXT:    s_waitcnt_depctr 0xfff
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v1, v0
-; GFX11-NEXT:    v_rcp_f32_e32 v1, v1
-; GFX11-NEXT:    s_waitcnt_depctr 0xfff
-; GFX11-NEXT:    v_mul_f32_e32 v1, v2, v1
-; GFX11-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GFX11-NEXT:    v_div_fixup_f16 v0, v1, v0, -1.0
+; GFX11-NEXT:    v_rcp_f16_e64 v0, -v0
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
   %a.fabs = call half @llvm.fabs.f16(half %a)
   %sqrt = call half @llvm.sqrt.f16(half %a.fabs)
@@ -3364,40 +3143,14 @@ define half @v_rsq_f16_arcp(half %a) {
 ; GFX89-LABEL: v_rsq_f16_arcp:
 ; GFX89:       ; %bb.0:
 ; GFX89-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX89-NEXT:    v_sqrt_f16_e32 v0, v0
-; GFX89-NEXT:    v_cvt_f32_f16_e32 v2, 1.0
-; GFX89-NEXT:    v_cvt_f32_f16_e32 v1, v0
-; GFX89-NEXT:    v_rcp_f32_e32 v1, v1
-; GFX89-NEXT:    v_mul_f32_e32 v1, v2, v1
-; GFX89-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GFX89-NEXT:    v_div_fixup_f16 v0, v1, v0, 1.0
+; GFX89-NEXT:    v_rsq_f16_e32 v0, v0
 ; GFX89-NEXT:    s_setpc_b64 s[30:31]
 ;
-; GFX10-LABEL: v_rsq_f16_arcp:
-; GFX10:       ; %bb.0:
-; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT:    v_sqrt_f16_e32 v0, v0
-; GFX10-NEXT:    v_cvt_f32_f16_e32 v2, 1.0
-; GFX10-NEXT:    v_cvt_f32_f16_e32 v1, v0
-; GFX10-NEXT:    v_rcp_f32_e32 v1, v1
-; GFX10-NEXT:    v_mul_f32_e32 v1, v2, v1
-; GFX10-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GFX10-NEXT:    v_div_fixup_f16 v0, v1, v0, 1.0
-; GFX10-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11-LABEL: v_rsq_f16_arcp:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_sqrt_f16_e32 v0, v0
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v2, 1.0
-; GFX11-NEXT:    s_waitcnt_depctr 0xfff
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v1, v0
-; GFX11-NEXT:    v_rcp_f32_e32 v1, v1
-; GFX11-NEXT:    s_waitcnt_depctr 0xfff
-; GFX11-NEXT:    v_mul_f32_e32 v1, v2, v1
-; GFX11-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GFX11-NEXT:    v_div_fixup_f16 v0, v1, v0, 1.0
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
+; GFX10PLUS-LABEL: v_rsq_f16_arcp:
+; GFX10PLUS:       ; %bb.0:
+; GFX10PLUS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT:    v_rsq_f16_e32 v0, v0
+; GFX10PLUS-NEXT:    s_setpc_b64 s[30:31]
   %sqrt = call half @llvm.sqrt.f16(half %a)
   %fdiv = fdiv arcp half 1.0, %sqrt
   ret half %fdiv
@@ -3455,38 +3208,22 @@ define half @v_neg_rsq_f16_arcp(half %a) {
 ; GFX89:       ; %bb.0:
 ; GFX89-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX89-NEXT:    v_sqrt_f16_e32 v0, v0
-; GFX89-NEXT:    v_cvt_f32_f16_e32 v2, -1.0
-; GFX89-NEXT:    v_cvt_f32_f16_e32 v1, v0
-; GFX89-NEXT:    v_rcp_f32_e32 v1, v1
-; GFX89-NEXT:    v_mul_f32_e32 v1, v2, v1
-; GFX89-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GFX89-NEXT:    v_div_fixup_f16 v0, v1, v0, -1.0
+; GFX89-NEXT:    v_rcp_f16_e64 v0, -v0
 ; GFX89-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-LABEL: v_neg_rsq_f16_arcp:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_sqrt_f16_e32 v0, v0
-; GFX10-NEXT:    v_cvt_f32_f16_e32 v2, -1.0
-; GFX10-NEXT:    v_cvt_f32_f16_e32 v1, v0
-; GFX10-NEXT:    v_rcp_f32_e32 v1, v1
-; GFX10-NEXT:    v_mul_f32_e32 v1, v2, v1
-; GFX10-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GFX10-NEXT:    v_div_fixup_f16 v0, v1, v0, -1.0
+; GFX10-NEXT:    v_rcp_f16_e64 v0, -v0
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-LABEL: v_neg_rsq_f16_arcp:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_sqrt_f16_e32 v0, v0
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v2, -1.0
-; GFX11-NEXT:    s_waitcnt_depctr 0xfff
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v1, v0
-; GFX11-NEXT:    v_rcp_f32_e32 v1, v1
 ; GFX11-NEXT:    s_waitcnt_depctr 0xfff
-; GFX11-NEXT:    v_mul_f32_e32 v1, v2, v1
-; GFX11-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GFX11-NEXT:    v_div_fixup_f16 v0, v1, v0, -1.0
+; GFX11-NEXT:    v_rcp_f16_e64 v0, -v0
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
   %sqrt = call half @llvm.sqrt.f16(half %a)
   %fdiv = fdiv arcp half -1.0, %sqrt
@@ -3611,22 +3348,20 @@ define <2 x half> @v_rsq_v2f16(<2 x half> %a) {
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX8-NEXT:    v_sqrt_f16_e32 v1, v0
-; GFX8-NEXT:    v_sqrt_f16_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX8-NEXT:    v_sqrt_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
 ; GFX8-NEXT:    v_cvt_f32_f16_e32 v4, 1.0
-; GFX8-NEXT:    v_or_b32_e32 v0, v1, v0
-; GFX8-NEXT:    v_lshrrev_b32_e32 v2, 16, v0
-; GFX8-NEXT:    v_cvt_f32_f16_e32 v1, v0
-; GFX8-NEXT:    v_cvt_f32_f16_e32 v3, v2
-; GFX8-NEXT:    v_rcp_f32_e32 v1, v1
+; GFX8-NEXT:    v_cvt_f32_f16_e32 v2, v1
+; GFX8-NEXT:    v_cvt_f32_f16_e32 v3, v0
+; GFX8-NEXT:    v_rcp_f32_e32 v2, v2
 ; GFX8-NEXT:    v_rcp_f32_e32 v3, v3
-; GFX8-NEXT:    v_mul_f32_e32 v1, v4, v1
+; GFX8-NEXT:    v_mul_f32_e32 v2, v4, v2
 ; GFX8-NEXT:    v_mul_f32_e32 v3, v4, v3
-; GFX8-NEXT:    v_cvt_f16_f32_e32 v1, v1
 ; GFX8-NEXT:    v_cvt_f16_f32_e32 v3, v3
-; GFX8-NEXT:    v_div_fixup_f16 v0, v1, v0, 1.0
-; GFX8-NEXT:    v_div_fixup_f16 v1, v3, v2, 1.0
-; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; GFX8-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT:    v_cvt_f16_f32_e32 v2, v2
+; GFX8-NEXT:    v_div_fixup_f16 v0, v3, v0, 1.0
+; GFX8-NEXT:    v_div_fixup_f16 v1, v2, v1, 1.0
+; GFX8-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
+; GFX8-NEXT:    v_or_b32_e32 v0, v1, v0
 ; GFX8-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX9-LABEL: v_rsq_v2f16:
@@ -3781,22 +3516,20 @@ define <2 x half> @v_neg_rsq_v2f16(<2 x half> %a) {
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX8-NEXT:    v_sqrt_f16_e32 v1, v0
-; GFX8-NEXT:    v_sqrt_f16_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX8-NEXT:    v_sqrt_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
 ; GFX8-NEXT:    v_cvt_f32_f16_e32 v4, -1.0
-; GFX8-NEXT:    v_or_b32_e32 v0, v1, v0
-; GFX8-NEXT:    v_lshrrev_b32_e32 v2, 16, v0
-; GFX8-NEXT:    v_cvt_f32_f16_e32 v1, v0
-; GFX8-NEXT:    v_cvt_f32_f16_e32 v3, v2
-; GFX8-NEXT:    v_rcp_f32_e32 v1, v1
+; GFX8-NEXT:    v_cvt_f32_f16_e32 v2, v1
+; GFX8-NEXT:    v_cvt_f32_f16_e32 v3, v0
+; GFX8-NEXT:    v_rcp_f32_e32 v2, v2
 ; GFX8-NEXT:    v_rcp_f32_e32 v3, v3
-; GFX8-NEXT:    v_mul_f32_e32 v1, v4, v1
+; GFX8-NEXT:    v_mul_f32_e32 v2, v4, v2
 ; GFX8-NEXT:    v_mul_f32_e32 v3, v4, v3
-; GFX8-NEXT:    v_cvt_f16_f32_e32 v1, v1
 ; GFX8-NEXT:    v_cvt_f16_f32_e32 v3, v3
-; GFX8-NEXT:    v_div_fixup_f16 v0, v1, v0, -1.0
-; GFX8-NEXT:    v_div_fixup_f16 v1, v3, v2, -1.0
-; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; GFX8-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT:    v_cvt_f16_f32_e32 v2, v2
+; GFX8-NEXT:    v_div_fixup_f16 v0, v3, v0, -1.0
+; GFX8-NEXT:    v_div_fixup_f16 v1, v2, v1, -1.0
+; GFX8-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
+; GFX8-NEXT:    v_or_b32_e32 v0, v1, v0
 ; GFX8-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX9-LABEL: v_neg_rsq_v2f16:

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fdiv.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fdiv.mir
index da4bcf07336990..e774c2c83dfd8e 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fdiv.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fdiv.mir
@@ -2055,30 +2055,18 @@ body: |
     ; VI-LABEL: name: test_fdiv_s16_constant_one_rcp
     ; VI: liveins: $vgpr0
     ; VI-NEXT: {{  $}}
-    ; VI-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3C00
     ; VI-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
-    ; VI-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[C]](s16)
-    ; VI-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
-    ; VI-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
-    ; VI-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]]
-    ; VI-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMUL]](s32)
-    ; VI-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC]](s16), [[TRUNC]](s16), [[C]](s16)
-    ; VI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT1]](s16)
+    ; VI-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC]](s16)
+    ; VI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT]](s16)
     ; VI-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
     ; GFX9-LABEL: name: test_fdiv_s16_constant_one_rcp
     ; GFX9: liveins: $vgpr0
     ; GFX9-NEXT: {{  $}}
-    ; GFX9-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3C00
     ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
-    ; GFX9-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[C]](s16)
-    ; GFX9-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
-    ; GFX9-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
-    ; GFX9-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]]
-    ; GFX9-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMUL]](s32)
-    ; GFX9-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC]](s16), [[TRUNC]](s16), [[C]](s16)
-    ; GFX9-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT1]](s16)
+    ; GFX9-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC]](s16)
+    ; GFX9-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT]](s16)
     ; GFX9-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
     ; GFX9-UNSAFE-LABEL: name: test_fdiv_s16_constant_one_rcp
     ; GFX9-UNSAFE: liveins: $vgpr0
@@ -2091,16 +2079,10 @@ body: |
     ; GFX10-LABEL: name: test_fdiv_s16_constant_one_rcp
     ; GFX10: liveins: $vgpr0
     ; GFX10-NEXT: {{  $}}
-    ; GFX10-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3C00
     ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
-    ; GFX10-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[C]](s16)
-    ; GFX10-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
-    ; GFX10-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
-    ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]]
-    ; GFX10-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMUL]](s32)
-    ; GFX10-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC]](s16), [[TRUNC]](s16), [[C]](s16)
-    ; GFX10-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT1]](s16)
+    ; GFX10-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC]](s16)
+    ; GFX10-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT]](s16)
     ; GFX10-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
     %0:_(s16) = G_FCONSTANT half 1.0
     %1:_(s32) = COPY $vgpr0
@@ -2143,30 +2125,20 @@ body: |
     ; VI-LABEL: name: test_fdiv_s16_constant_negative_one_rcp
     ; VI: liveins: $vgpr0
     ; VI-NEXT: {{  $}}
-    ; VI-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xHBC00
     ; VI-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
-    ; VI-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[C]](s16)
-    ; VI-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
-    ; VI-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
-    ; VI-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]]
-    ; VI-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMUL]](s32)
-    ; VI-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC]](s16), [[TRUNC]](s16), [[C]](s16)
-    ; VI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT1]](s16)
+    ; VI-NEXT: [[FNEG:%[0-9]+]]:_(s16) = G_FNEG [[TRUNC]]
+    ; VI-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FNEG]](s16)
+    ; VI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT]](s16)
     ; VI-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
     ; GFX9-LABEL: name: test_fdiv_s16_constant_negative_one_rcp
     ; GFX9: liveins: $vgpr0
     ; GFX9-NEXT: {{  $}}
-    ; GFX9-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xHBC00
     ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
-    ; GFX9-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[C]](s16)
-    ; GFX9-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
-    ; GFX9-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
-    ; GFX9-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]]
-    ; GFX9-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMUL]](s32)
-    ; GFX9-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC]](s16), [[TRUNC]](s16), [[C]](s16)
-    ; GFX9-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT1]](s16)
+    ; GFX9-NEXT: [[FNEG:%[0-9]+]]:_(s16) = G_FNEG [[TRUNC]]
+    ; GFX9-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FNEG]](s16)
+    ; GFX9-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT]](s16)
     ; GFX9-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
     ; GFX9-UNSAFE-LABEL: name: test_fdiv_s16_constant_negative_one_rcp
     ; GFX9-UNSAFE: liveins: $vgpr0
@@ -2180,16 +2152,11 @@ body: |
     ; GFX10-LABEL: name: test_fdiv_s16_constant_negative_one_rcp
     ; GFX10: liveins: $vgpr0
     ; GFX10-NEXT: {{  $}}
-    ; GFX10-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xHBC00
     ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
-    ; GFX10-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[C]](s16)
-    ; GFX10-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
-    ; GFX10-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
-    ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]]
-    ; GFX10-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMUL]](s32)
-    ; GFX10-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC]](s16), [[TRUNC]](s16), [[C]](s16)
-    ; GFX10-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT1]](s16)
+    ; GFX10-NEXT: [[FNEG:%[0-9]+]]:_(s16) = G_FNEG [[TRUNC]]
+    ; GFX10-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FNEG]](s16)
+    ; GFX10-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT]](s16)
     ; GFX10-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
     %0:_(s16) = G_FCONSTANT half -1.0
     %1:_(s32) = COPY $vgpr0

diff  --git a/llvm/test/CodeGen/AMDGPU/fdiv.f16.ll b/llvm/test/CodeGen/AMDGPU/fdiv.f16.ll
index 95290aca2324a6..2778eee5b747bc 100644
--- a/llvm/test/CodeGen/AMDGPU/fdiv.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fdiv.f16.ll
@@ -92,10 +92,7 @@ entry:
 
 ; GCN-LABEL: {{^}}reciprocal_f16_rounded:
 ; GFX8PLUS: {{flat|global}}_load_{{ushort|u16}} [[VAL16:v[0-9]+]], v{{.+}}
-; GFX8PLUS: v_cvt_f32_f16_e32 [[CVT_TO32:v[0-9]+]], [[VAL16]]
-; GFX8PLUS: v_rcp_f32_e32 [[RCP32:v[0-9]+]], [[CVT_TO32]]
-; GFX8PLUS: v_cvt_f16_f32_e32 [[CVT_BACK16:v[0-9]+]], [[RCP32]]
-; GFX8PLUS: v_div_fixup_f16 [[RESULT:v[0-9]+]], [[CVT_BACK16]], [[VAL16]], 1.0
+; GFX8PLUS: v_rcp_f16_e32 [[RESULT:v[0-9]+]], [[VAL16]]
 ; GFX8PLUS: {{flat|global}}_store_{{short|b16}} v{{.+}}, [[RESULT]]
 define amdgpu_kernel void @reciprocal_f16_rounded(ptr addrspace(1) %r, ptr addrspace(1) %b) #0 {
 entry:
@@ -269,8 +266,8 @@ define amdgpu_kernel void @div_afn_neg_k_x_pat_f16(ptr addrspace(1) %out) #0 {
 ; SI: v_rcp_f32
 ; SI: v_mul_f32
 
-; GFX8PLUS: v_rcp_f32
-; GFX8PLUS: v_mul_f32
+; GFX8PLUS: v_rcp_f16
+; GFX8PLUS: v_mul_f16
 define half @v_fdiv_f16_arcp(half %x, half %y) {
   %fdiv = fdiv arcp half %x, %y
   ret half %fdiv

diff  --git a/llvm/test/CodeGen/AMDGPU/fneg-combines.f16.ll b/llvm/test/CodeGen/AMDGPU/fneg-combines.f16.ll
index e463e8e9781969..e324b27f3f4ba0 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-combines.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-combines.f16.ll
@@ -527,69 +527,54 @@ define amdgpu_ps half @fneg_fadd_0_f16(half inreg %tmp2, half inreg %tmp6, <4 x
 ;
 ; VI-SAFE-LABEL: fneg_fadd_0_f16:
 ; VI-SAFE:       ; %bb.0: ; %.entry
-; VI-SAFE-NEXT:    v_cvt_f32_f16_e32 v0, s1
-; VI-SAFE-NEXT:    v_mov_b32_e32 v2, s0
-; VI-SAFE-NEXT:    v_mov_b32_e32 v1, 0x7e00
-; VI-SAFE-NEXT:    v_rcp_f32_e32 v0, v0
-; VI-SAFE-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; VI-SAFE-NEXT:    v_div_fixup_f16 v0, v0, s1, 1.0
+; VI-SAFE-NEXT:    v_rcp_f16_e32 v0, s1
+; VI-SAFE-NEXT:    v_mov_b32_e32 v1, s0
 ; VI-SAFE-NEXT:    v_mul_f16_e32 v0, 0, v0
 ; VI-SAFE-NEXT:    v_add_f16_e32 v0, 0, v0
-; VI-SAFE-NEXT:    v_xor_b32_e32 v3, 0x8000, v0
+; VI-SAFE-NEXT:    v_xor_b32_e32 v2, 0x8000, v0
 ; VI-SAFE-NEXT:    v_cmp_ngt_f16_e32 vcc, s0, v0
-; VI-SAFE-NEXT:    v_cndmask_b32_e32 v0, v3, v2, vcc
+; VI-SAFE-NEXT:    v_cndmask_b32_e32 v0, v2, v1, vcc
+; VI-SAFE-NEXT:    v_mov_b32_e32 v1, 0x7e00
 ; VI-SAFE-NEXT:    v_cmp_nlt_f16_e32 vcc, 0, v0
 ; VI-SAFE-NEXT:    v_cndmask_b32_e64 v0, v1, 0, vcc
 ; VI-SAFE-NEXT:    ; return to shader part epilog
 ;
 ; VI-NSZ-LABEL: fneg_fadd_0_f16:
 ; VI-NSZ:       ; %bb.0: ; %.entry
-; VI-NSZ-NEXT:    v_cvt_f32_f16_e32 v0, s1
-; VI-NSZ-NEXT:    v_mov_b32_e32 v2, s0
-; VI-NSZ-NEXT:    v_mov_b32_e32 v1, 0x7e00
-; VI-NSZ-NEXT:    v_rcp_f32_e32 v0, v0
-; VI-NSZ-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; VI-NSZ-NEXT:    v_div_fixup_f16 v0, v0, s1, 1.0
+; VI-NSZ-NEXT:    v_rcp_f16_e32 v0, s1
+; VI-NSZ-NEXT:    v_mov_b32_e32 v1, s0
 ; VI-NSZ-NEXT:    v_mul_f16_e32 v0, 0x8000, v0
 ; VI-NSZ-NEXT:    v_cmp_nlt_f16_e64 vcc, -v0, s0
-; VI-NSZ-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
+; VI-NSZ-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; VI-NSZ-NEXT:    v_mov_b32_e32 v1, 0x7e00
 ; VI-NSZ-NEXT:    v_cmp_nlt_f16_e32 vcc, 0, v0
 ; VI-NSZ-NEXT:    v_cndmask_b32_e64 v0, v1, 0, vcc
 ; VI-NSZ-NEXT:    ; return to shader part epilog
 ;
 ; GFX11-SAFE-LABEL: fneg_fadd_0_f16:
 ; GFX11-SAFE:       ; %bb.0: ; %.entry
-; GFX11-SAFE-NEXT:    v_cvt_f32_f16_e32 v0, s1
-; GFX11-SAFE-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
-; GFX11-SAFE-NEXT:    v_rcp_f32_e32 v0, v0
+; GFX11-SAFE-NEXT:    v_rcp_f16_e32 v0, s1
 ; GFX11-SAFE-NEXT:    s_waitcnt_depctr 0xfff
-; GFX11-SAFE-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GFX11-SAFE-NEXT:    v_div_fixup_f16 v0, v0, s1, 1.0
-; GFX11-SAFE-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-SAFE-NEXT:    v_mul_f16_e32 v0, 0, v0
+; GFX11-SAFE-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-SAFE-NEXT:    v_add_f16_e32 v0, 0, v0
-; GFX11-SAFE-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
 ; GFX11-SAFE-NEXT:    v_xor_b32_e32 v1, 0x8000, v0
 ; GFX11-SAFE-NEXT:    v_cmp_ngt_f16_e32 vcc_lo, s0, v0
+; GFX11-SAFE-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-SAFE-NEXT:    v_cndmask_b32_e64 v0, v1, s0, vcc_lo
-; GFX11-SAFE-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11-SAFE-NEXT:    v_cmp_nlt_f16_e32 vcc_lo, 0, v0
 ; GFX11-SAFE-NEXT:    v_cndmask_b32_e64 v0, 0x7e00, 0, vcc_lo
 ; GFX11-SAFE-NEXT:    ; return to shader part epilog
 ;
 ; GFX11-NSZ-LABEL: fneg_fadd_0_f16:
 ; GFX11-NSZ:       ; %bb.0: ; %.entry
-; GFX11-NSZ-NEXT:    v_cvt_f32_f16_e32 v0, s1
-; GFX11-NSZ-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
-; GFX11-NSZ-NEXT:    v_rcp_f32_e32 v0, v0
+; GFX11-NSZ-NEXT:    v_rcp_f16_e32 v0, s1
 ; GFX11-NSZ-NEXT:    s_waitcnt_depctr 0xfff
-; GFX11-NSZ-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GFX11-NSZ-NEXT:    v_div_fixup_f16 v0, v0, s1, 1.0
-; GFX11-NSZ-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NSZ-NEXT:    v_mul_f16_e32 v0, 0x8000, v0
-; GFX11-NSZ-NEXT:    v_cmp_nlt_f16_e64 s1, -v0, s0
 ; GFX11-NSZ-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NSZ-NEXT:    v_cmp_nlt_f16_e64 s1, -v0, s0
 ; GFX11-NSZ-NEXT:    v_cndmask_b32_e64 v0, v0, s0, s1
+; GFX11-NSZ-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11-NSZ-NEXT:    v_cmp_nlt_f16_e32 vcc_lo, 0, v0
 ; GFX11-NSZ-NEXT:    v_cndmask_b32_e64 v0, 0x7e00, 0, vcc_lo
 ; GFX11-NSZ-NEXT:    ; return to shader part epilog


        


More information about the llvm-commits mailing list