[llvm] [SelectionDAG] Handle `fneg`/`fabs`/`fcopysign` in `SimplifyDemandedBits` (PR #139239)

Iris Shi via llvm-commits llvm-commits at lists.llvm.org
Mon Jun 16 03:36:40 PDT 2025


https://github.com/el-ev updated https://github.com/llvm/llvm-project/pull/139239

>From b4e08dc599a58bc7783ff8995a35dd24cd92f941 Mon Sep 17 00:00:00 2001
From: Iris Shi <0.0 at owo.li>
Date: Fri, 9 May 2025 18:01:25 +0800
Subject: [PATCH 1/7] [SelectionDAG] Handle `fneg`/`fabs`/`fcopysign` in
 `SimplifyDemandedBits`

---
 llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp |  29 +---
 .../CodeGen/SelectionDAG/TargetLowering.cpp   |  71 +++++++++
 .../CodeGen/AArch64/extract-vector-elt.ll     |   5 +-
 llvm/test/CodeGen/AMDGPU/bf16-conversions.ll  |  38 ++---
 llvm/test/CodeGen/AMDGPU/bf16.ll              |  36 ++---
 llvm/test/CodeGen/AMDGPU/fnearbyint.ll        | 142 +++++++++---------
 llvm/test/CodeGen/AMDGPU/rcp-pattern.ll       |  14 +-
 llvm/test/CodeGen/AMDGPU/roundeven.ll         |  49 +++---
 .../AMDGPU/select-fabs-fneg-extract.v2f16.ll  |  20 +--
 9 files changed, 228 insertions(+), 176 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 5d62ded171f4f..22cadd06b14cd 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -18386,21 +18386,6 @@ SDValue DAGCombiner::visitFCOPYSIGN(SDNode *N) {
     }
   }
 
-  // copysign(fabs(x), y) -> copysign(x, y)
-  // copysign(fneg(x), y) -> copysign(x, y)
-  // copysign(copysign(x,z), y) -> copysign(x, y)
-  if (N0.getOpcode() == ISD::FABS || N0.getOpcode() == ISD::FNEG ||
-      N0.getOpcode() == ISD::FCOPYSIGN)
-    return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N0.getOperand(0), N1);
-
-  // copysign(x, abs(y)) -> abs(x)
-  if (N1.getOpcode() == ISD::FABS)
-    return DAG.getNode(ISD::FABS, DL, VT, N0);
-
-  // copysign(x, copysign(y,z)) -> copysign(x, z)
-  if (N1.getOpcode() == ISD::FCOPYSIGN)
-    return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N0, N1.getOperand(1));
-
   // copysign(x, fp_extend(y)) -> copysign(x, y)
   // copysign(x, fp_round(y)) -> copysign(x, y)
   if (CanCombineFCOPYSIGN_EXTEND_ROUND(N))
@@ -18941,6 +18926,9 @@ SDValue DAGCombiner::visitFNEG(SDNode *N) {
                        N0.getOperand(0));
   }
 
+  if (SimplifyDemandedBits(N0, APInt::getAllOnes(VT.getScalarSizeInBits())))
+    return SDValue(N, 0);
+
   if (SDValue Cast = foldSignChangeInBitcast(N))
     return Cast;
 
@@ -19014,14 +19002,9 @@ SDValue DAGCombiner::visitFABS(SDNode *N) {
   if (SDValue C = DAG.FoldConstantArithmetic(ISD::FABS, DL, VT, {N0}))
     return C;
 
-  // fold (fabs (fabs x)) -> (fabs x)
-  if (N0.getOpcode() == ISD::FABS)
-    return N->getOperand(0);
-
-  // fold (fabs (fneg x)) -> (fabs x)
-  // fold (fabs (fcopysign x, y)) -> (fabs x)
-  if (N0.getOpcode() == ISD::FNEG || N0.getOpcode() == ISD::FCOPYSIGN)
-    return DAG.getNode(ISD::FABS, DL, VT, N0.getOperand(0));
+  if (SimplifyDemandedBits(N0,
+                           APInt::getSignedMaxValue(VT.getScalarSizeInBits())))
+    return SDValue(N, 0);
 
   if (SDValue Cast = foldSignChangeInBitcast(N))
     return Cast;
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index a0ffb4b6d5a4c..971ed5d97d523 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -2966,6 +2966,77 @@ bool TargetLowering::SimplifyDemandedBits(
     }
     break;
   }
+  case ISD::FABS: {
+    SDValue Op0 = Op.getOperand(0);
+    APInt SignMask = APInt::getSignMask(BitWidth);
+
+    if (!DemandedBits.intersects(SignMask))
+      return TLO.CombineTo(Op, Op0);
+
+    if (SimplifyDemandedBits(Op0, ~SignMask & DemandedBits, DemandedElts, Known,
+                             TLO, Depth + 1))
+      return true;
+
+    if (Known.isNonNegative())
+      return TLO.CombineTo(Op, Op0);
+    if (Known.isNegative())
+      return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::FNEG, dl, VT, Op0));
+
+    Known.Zero |= SignMask;
+    Known.One &= ~SignMask;
+
+    break;
+  }
+  case ISD::FCOPYSIGN: {
+    SDValue Op0 = Op.getOperand(0);
+    SDValue Op1 = Op.getOperand(1);
+    APInt SignMask = APInt::getSignMask(BitWidth);
+
+    if (!DemandedBits.intersects(SignMask))
+      return TLO.CombineTo(Op, Op0);
+
+    if (SimplifyDemandedBits(Op0, ~SignMask & DemandedBits, DemandedElts, Known,
+                             TLO, Depth + 1))
+      return true;
+    if (SimplifyDemandedBits(Op1, SignMask, DemandedElts, Known2, TLO,
+                             Depth + 1))
+      return true;
+
+    if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO))
+      return true;
+
+    if ((Known.isNonNegative() && Known2.isNonNegative()) ||
+        (Known.isNegative() && Known2.isNegative()))
+      return TLO.CombineTo(Op, Op0);
+
+    if (Known2.isNonNegative())
+      return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::FABS, dl, VT, Op0));
+
+    if (Known2.isNegative()) {
+      Known.One |= SignMask;
+      Known.Zero &= ~SignMask;
+    }
+
+    break;
+  }
+  case ISD::FNEG: {
+    SDValue Op0 = Op.getOperand(0);
+    APInt SignMask = APInt::getSignMask(BitWidth);
+
+    if (!DemandedBits.intersects(SignMask))
+      return TLO.CombineTo(Op, Op0);
+
+    if (SimplifyDemandedBits(Op0, DemandedBits, DemandedElts, Known, TLO,
+                             Depth + 1))
+      return true;
+
+    if (Known.isNonNegative() || Known.isNegative()) {
+      Known.Zero ^= SignMask;
+      Known.One ^= SignMask;
+    }
+
+    break;
+  }
   default:
     // We also ask the target about intrinsics (which could be specific to it).
     if (Op.getOpcode() >= ISD::BUILTIN_OP_END ||
diff --git a/llvm/test/CodeGen/AArch64/extract-vector-elt.ll b/llvm/test/CodeGen/AArch64/extract-vector-elt.ll
index 5e5fdd6d31705..554b2e3444fe4 100644
--- a/llvm/test/CodeGen/AArch64/extract-vector-elt.ll
+++ b/llvm/test/CodeGen/AArch64/extract-vector-elt.ll
@@ -425,10 +425,7 @@ entry:
 define float @extract_v4i32_copysign_build_vector_const(<4 x float> %a, <4 x float> %b, i32 %c) {
 ; CHECK-SD-LABEL: extract_v4i32_copysign_build_vector_const:
 ; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    adrp x8, .LCPI17_0
-; CHECK-SD-NEXT:    mvni v1.4s, #128, lsl #24
-; CHECK-SD-NEXT:    ldr q2, [x8, :lo12:.LCPI17_0]
-; CHECK-SD-NEXT:    bif v0.16b, v2.16b, v1.16b
+; CHECK-SD-NEXT:    fabs v0.4s, v0.4s
 ; CHECK-SD-NEXT:    mov s0, v0.s[2]
 ; CHECK-SD-NEXT:    ret
 ;
diff --git a/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll b/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll
index a597faa028f22..f03958a967328 100644
--- a/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll
+++ b/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll
@@ -427,16 +427,18 @@ entry:
 define amdgpu_ps void @fptrunc_f64_to_bf16_abs(double %a, ptr %out) {
 ; GFX-942-LABEL: fptrunc_f64_to_bf16_abs:
 ; GFX-942:       ; %bb.0: ; %entry
-; GFX-942-NEXT:    v_cvt_f32_f64_e64 v6, |v[0:1]|
-; GFX-942-NEXT:    v_cvt_f64_f32_e32 v[4:5], v6
-; GFX-942-NEXT:    v_and_b32_e32 v7, 1, v6
-; GFX-942-NEXT:    v_cmp_gt_f64_e64 s[2:3], |v[0:1]|, |v[4:5]|
-; GFX-942-NEXT:    v_cmp_nlg_f64_e64 s[0:1], |v[0:1]|, v[4:5]
-; GFX-942-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v7
+; GFX-942-NEXT:    v_cvt_f32_f64_e64 v8, |v[0:1]|
+; GFX-942-NEXT:    v_and_b32_e32 v5, 0x7fffffff, v1
+; GFX-942-NEXT:    v_mov_b32_e32 v4, v0
+; GFX-942-NEXT:    v_cvt_f64_f32_e32 v[6:7], v8
+; GFX-942-NEXT:    v_and_b32_e32 v9, 1, v8
+; GFX-942-NEXT:    v_cmp_gt_f64_e64 s[2:3], |v[4:5]|, |v[6:7]|
+; GFX-942-NEXT:    v_cmp_nlg_f64_e64 s[0:1], |v[0:1]|, v[6:7]
+; GFX-942-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v9
 ; GFX-942-NEXT:    v_cndmask_b32_e64 v4, -1, 1, s[2:3]
-; GFX-942-NEXT:    v_add_u32_e32 v4, v6, v4
+; GFX-942-NEXT:    v_add_u32_e32 v4, v8, v4
 ; GFX-942-NEXT:    s_or_b64 vcc, s[0:1], vcc
-; GFX-942-NEXT:    v_cndmask_b32_e32 v4, v4, v6, vcc
+; GFX-942-NEXT:    v_cndmask_b32_e32 v4, v4, v8, vcc
 ; GFX-942-NEXT:    v_bfe_u32 v5, v4, 16, 1
 ; GFX-942-NEXT:    s_movk_i32 s0, 0x7fff
 ; GFX-942-NEXT:    v_add3_u32 v5, v5, v4, s0
@@ -449,16 +451,18 @@ define amdgpu_ps void @fptrunc_f64_to_bf16_abs(double %a, ptr %out) {
 ;
 ; GFX-950-LABEL: fptrunc_f64_to_bf16_abs:
 ; GFX-950:       ; %bb.0: ; %entry
-; GFX-950-NEXT:    v_cvt_f32_f64_e64 v6, |v[0:1]|
-; GFX-950-NEXT:    v_cvt_f64_f32_e32 v[4:5], v6
-; GFX-950-NEXT:    v_and_b32_e32 v7, 1, v6
-; GFX-950-NEXT:    v_cmp_gt_f64_e64 s[2:3], |v[0:1]|, |v[4:5]|
-; GFX-950-NEXT:    v_cmp_nlg_f64_e64 s[0:1], |v[0:1]|, v[4:5]
-; GFX-950-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v7
-; GFX-950-NEXT:    v_cndmask_b32_e64 v0, -1, 1, s[2:3]
-; GFX-950-NEXT:    v_add_u32_e32 v0, v6, v0
+; GFX-950-NEXT:    v_cvt_f32_f64_e64 v8, |v[0:1]|
+; GFX-950-NEXT:    v_and_b32_e32 v5, 0x7fffffff, v1
+; GFX-950-NEXT:    v_mov_b32_e32 v4, v0
+; GFX-950-NEXT:    v_cvt_f64_f32_e32 v[6:7], v8
+; GFX-950-NEXT:    v_cmp_nlg_f64_e64 s[0:1], |v[0:1]|, v[6:7]
+; GFX-950-NEXT:    v_and_b32_e32 v0, 1, v8
+; GFX-950-NEXT:    v_cmp_gt_f64_e64 s[2:3], |v[4:5]|, |v[6:7]|
+; GFX-950-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
 ; GFX-950-NEXT:    s_or_b64 vcc, s[0:1], vcc
-; GFX-950-NEXT:    v_cndmask_b32_e32 v0, v0, v6, vcc
+; GFX-950-NEXT:    v_cndmask_b32_e64 v0, -1, 1, s[2:3]
+; GFX-950-NEXT:    v_add_u32_e32 v0, v8, v0
+; GFX-950-NEXT:    v_cndmask_b32_e32 v0, v0, v8, vcc
 ; GFX-950-NEXT:    v_cvt_pk_bf16_f32 v0, v0, s0
 ; GFX-950-NEXT:    flat_store_short v[2:3], v0
 ; GFX-950-NEXT:    s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/bf16.ll b/llvm/test/CodeGen/AMDGPU/bf16.ll
index 2bdf994496421..951f103f7d9c4 100644
--- a/llvm/test/CodeGen/AMDGPU/bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/bf16.ll
@@ -18639,8 +18639,8 @@ define bfloat @v_fabs_bf16(bfloat %a) {
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_mul_f32_e32 v0, 1.0, v0
-; GCN-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT:    v_and_b32_e32 v0, 0x7fffffff, v0
+; GCN-NEXT:    v_and_b32_e32 v0, 0x7fff0000, v0
+; GCN-NEXT:    v_mul_f32_e64 v0, 1.0, |v0|
 ; GCN-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
 ;
@@ -18648,8 +18648,8 @@ define bfloat @v_fabs_bf16(bfloat %a) {
 ; GFX7:       ; %bb.0:
 ; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX7-NEXT:    v_mul_f32_e32 v0, 1.0, v0
-; GFX7-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT:    v_and_b32_e32 v0, 0x7fffffff, v0
+; GFX7-NEXT:    v_and_b32_e32 v0, 0x7fff0000, v0
+; GFX7-NEXT:    v_mul_f32_e64 v0, 1.0, |v0|
 ; GFX7-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX7-NEXT:    s_setpc_b64 s[30:31]
 ;
@@ -18832,8 +18832,8 @@ define bfloat @v_fneg_fabs_bf16(bfloat %a) {
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_mul_f32_e32 v0, 1.0, v0
-; GCN-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT:    v_and_b32_e32 v0, 0x7fffffff, v0
+; GCN-NEXT:    v_and_b32_e32 v0, 0x7fff0000, v0
+; GCN-NEXT:    v_mul_f32_e64 v0, 1.0, |v0|
 ; GCN-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GCN-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
 ; GCN-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
@@ -18843,8 +18843,8 @@ define bfloat @v_fneg_fabs_bf16(bfloat %a) {
 ; GFX7:       ; %bb.0:
 ; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX7-NEXT:    v_mul_f32_e32 v0, 1.0, v0
-; GFX7-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT:    v_and_b32_e32 v0, 0x7fffffff, v0
+; GFX7-NEXT:    v_and_b32_e32 v0, 0x7fff0000, v0
+; GFX7-NEXT:    v_mul_f32_e64 v0, 1.0, |v0|
 ; GFX7-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX7-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
 ; GFX7-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
@@ -18889,23 +18889,23 @@ define amdgpu_ps i32 @s_fneg_fabs_bf16(bfloat inreg %a) {
 ; GCN-LABEL: s_fneg_fabs_bf16:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    v_mul_f32_e64 v0, 1.0, s0
+; GCN-NEXT:    v_and_b32_e32 v0, 0x7fff0000, v0
+; GCN-NEXT:    v_mul_f32_e64 v0, 1.0, |v0|
+; GCN-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
+; GCN-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
+; GCN-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GCN-NEXT:    v_readfirstlane_b32 s0, v0
-; GCN-NEXT:    s_and_b32 s0, s0, 0xffff0000
-; GCN-NEXT:    s_bitset0_b32 s0, 31
-; GCN-NEXT:    s_and_b32 s0, s0, 0xffff0000
-; GCN-NEXT:    s_xor_b32 s0, s0, 0x80000000
-; GCN-NEXT:    s_lshr_b32 s0, s0, 16
 ; GCN-NEXT:    ; return to shader part epilog
 ;
 ; GFX7-LABEL: s_fneg_fabs_bf16:
 ; GFX7:       ; %bb.0:
 ; GFX7-NEXT:    v_mul_f32_e64 v0, 1.0, s0
+; GFX7-NEXT:    v_and_b32_e32 v0, 0x7fff0000, v0
+; GFX7-NEXT:    v_mul_f32_e64 v0, 1.0, |v0|
+; GFX7-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
+; GFX7-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
+; GFX7-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GFX7-NEXT:    v_readfirstlane_b32 s0, v0
-; GFX7-NEXT:    s_and_b32 s0, s0, 0xffff0000
-; GFX7-NEXT:    s_bitset0_b32 s0, 31
-; GFX7-NEXT:    s_and_b32 s0, s0, 0xffff0000
-; GFX7-NEXT:    s_xor_b32 s0, s0, 0x80000000
-; GFX7-NEXT:    s_lshr_b32 s0, s0, 16
 ; GFX7-NEXT:    ; return to shader part epilog
 ;
 ; GFX8-LABEL: s_fneg_fabs_bf16:
diff --git a/llvm/test/CodeGen/AMDGPU/fnearbyint.ll b/llvm/test/CodeGen/AMDGPU/fnearbyint.ll
index e9fd6119d0c36..9f9b14d1c87a0 100644
--- a/llvm/test/CodeGen/AMDGPU/fnearbyint.ll
+++ b/llvm/test/CodeGen/AMDGPU/fnearbyint.ll
@@ -211,22 +211,22 @@ define amdgpu_kernel void @nearbyint_f64(ptr addrspace(1) %out, double %in) {
 ; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_mov_b32 s6, -1
-; SI-NEXT:    s_brev_b32 s8, -2
-; SI-NEXT:    v_mov_b32_e32 v1, 0x43300000
-; SI-NEXT:    v_mov_b32_e32 v0, 0
-; SI-NEXT:    v_mov_b32_e32 v2, -1
-; SI-NEXT:    v_mov_b32_e32 v3, 0x432fffff
+; SI-NEXT:    v_mov_b32_e32 v0, -1
+; SI-NEXT:    v_mov_b32_e32 v1, 0x432fffff
+; SI-NEXT:    v_mov_b32_e32 v2, 0
+; SI-NEXT:    v_mov_b32_e32 v3, 0x43300000
+; SI-NEXT:    s_mov_b32 s8, 0
+; SI-NEXT:    s_mov_b32 s9, 0xc3300000
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    s_mov_b32 s4, s0
 ; SI-NEXT:    s_mov_b32 s5, s1
-; SI-NEXT:    v_mov_b32_e32 v6, s3
-; SI-NEXT:    v_bfi_b32 v1, s8, v1, v6
-; SI-NEXT:    v_mov_b32_e32 v7, s2
-; SI-NEXT:    v_add_f64 v[4:5], s[2:3], v[0:1]
-; SI-NEXT:    v_add_f64 v[0:1], v[4:5], -v[0:1]
-; SI-NEXT:    v_cmp_gt_f64_e64 vcc, |s[2:3]|, v[2:3]
-; SI-NEXT:    v_cndmask_b32_e32 v1, v1, v6, vcc
-; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v7, vcc
+; SI-NEXT:    v_add_f64 v[2:3], s[2:3], v[2:3]
+; SI-NEXT:    v_mov_b32_e32 v4, s3
+; SI-NEXT:    v_mov_b32_e32 v5, s2
+; SI-NEXT:    v_add_f64 v[2:3], v[2:3], s[8:9]
+; SI-NEXT:    v_cmp_gt_f64_e64 vcc, |s[2:3]|, v[0:1]
+; SI-NEXT:    v_cndmask_b32_e32 v1, v3, v4, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v0, v2, v5, vcc
 ; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
 ;
@@ -270,30 +270,31 @@ define amdgpu_kernel void @nearbyint_v2f64(ptr addrspace(1) %out, <2 x double> %
 ; SI-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0xd
 ; SI-NEXT:    s_mov_b32 s3, 0xf000
 ; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:    s_brev_b32 s10, -2
-; SI-NEXT:    v_mov_b32_e32 v6, 0x43300000
 ; SI-NEXT:    s_mov_b32 s9, 0x432fffff
-; SI-NEXT:    v_mov_b32_e32 v0, 0
+; SI-NEXT:    s_mov_b32 s10, 0
+; SI-NEXT:    s_mov_b32 s11, 0x43300000
+; SI-NEXT:    s_mov_b32 s12, 0
+; SI-NEXT:    s_mov_b32 s13, 0xc3300000
 ; SI-NEXT:    s_mov_b32 s8, s2
-; SI-NEXT:    v_mov_b32_e32 v4, s8
-; SI-NEXT:    v_mov_b32_e32 v5, s9
+; SI-NEXT:    v_mov_b32_e32 v0, s8
+; SI-NEXT:    v_mov_b32_e32 v1, s9
+; SI-NEXT:    v_mov_b32_e32 v2, s10
+; SI-NEXT:    v_mov_b32_e32 v3, s11
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v7, s7
-; SI-NEXT:    v_bfi_b32 v1, s10, v6, v7
-; SI-NEXT:    v_mov_b32_e32 v8, s6
-; SI-NEXT:    v_mov_b32_e32 v9, s5
-; SI-NEXT:    v_mov_b32_e32 v10, s4
-; SI-NEXT:    v_add_f64 v[2:3], s[6:7], v[0:1]
-; SI-NEXT:    v_add_f64 v[2:3], v[2:3], -v[0:1]
-; SI-NEXT:    v_bfi_b32 v1, s10, v6, v9
-; SI-NEXT:    v_cmp_gt_f64_e64 vcc, |s[6:7]|, v[4:5]
-; SI-NEXT:    v_cndmask_b32_e32 v3, v3, v7, vcc
-; SI-NEXT:    v_cndmask_b32_e32 v2, v2, v8, vcc
-; SI-NEXT:    v_add_f64 v[6:7], s[4:5], v[0:1]
-; SI-NEXT:    v_add_f64 v[0:1], v[6:7], -v[0:1]
-; SI-NEXT:    v_cmp_gt_f64_e64 vcc, |s[4:5]|, v[4:5]
-; SI-NEXT:    v_cndmask_b32_e32 v1, v1, v9, vcc
-; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v10, vcc
+; SI-NEXT:    v_add_f64 v[4:5], s[6:7], v[2:3]
+; SI-NEXT:    v_mov_b32_e32 v8, s7
+; SI-NEXT:    v_mov_b32_e32 v9, s6
+; SI-NEXT:    v_add_f64 v[2:3], s[4:5], v[2:3]
+; SI-NEXT:    v_mov_b32_e32 v10, s5
+; SI-NEXT:    v_mov_b32_e32 v11, s4
+; SI-NEXT:    v_add_f64 v[4:5], v[4:5], s[12:13]
+; SI-NEXT:    v_add_f64 v[6:7], v[2:3], s[12:13]
+; SI-NEXT:    v_cmp_gt_f64_e64 vcc, |s[6:7]|, v[0:1]
+; SI-NEXT:    v_cndmask_b32_e32 v3, v5, v8, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v2, v4, v9, vcc
+; SI-NEXT:    v_cmp_gt_f64_e64 vcc, |s[4:5]|, v[0:1]
+; SI-NEXT:    v_cndmask_b32_e32 v1, v7, v10, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v0, v6, v11, vcc
 ; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; SI-NEXT:    s_endpgm
 ;
@@ -347,46 +348,45 @@ define amdgpu_kernel void @nearbyint_v4f64(ptr addrspace(1) %out, <4 x double> %
 ; SI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x11
 ; SI-NEXT:    s_mov_b32 s11, 0xf000
 ; SI-NEXT:    s_mov_b32 s10, -1
-; SI-NEXT:    s_brev_b32 s14, -2
-; SI-NEXT:    v_mov_b32_e32 v10, 0x43300000
 ; SI-NEXT:    s_mov_b32 s13, 0x432fffff
-; SI-NEXT:    v_mov_b32_e32 v4, 0
+; SI-NEXT:    s_mov_b32 s14, 0
+; SI-NEXT:    s_mov_b32 s15, 0x43300000
+; SI-NEXT:    s_mov_b32 s16, 0
+; SI-NEXT:    s_mov_b32 s17, 0xc3300000
 ; SI-NEXT:    s_mov_b32 s12, s10
-; SI-NEXT:    v_mov_b32_e32 v8, s12
-; SI-NEXT:    v_mov_b32_e32 v9, s13
+; SI-NEXT:    v_mov_b32_e32 v4, s12
+; SI-NEXT:    v_mov_b32_e32 v5, s13
+; SI-NEXT:    v_mov_b32_e32 v0, s14
+; SI-NEXT:    v_mov_b32_e32 v1, s15
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v2, s3
-; SI-NEXT:    v_bfi_b32 v5, s14, v10, v2
-; SI-NEXT:    v_mov_b32_e32 v6, s2
-; SI-NEXT:    v_mov_b32_e32 v7, s1
-; SI-NEXT:    v_mov_b32_e32 v11, s0
-; SI-NEXT:    v_mov_b32_e32 v12, s7
-; SI-NEXT:    v_mov_b32_e32 v13, s6
-; SI-NEXT:    v_mov_b32_e32 v14, s5
-; SI-NEXT:    v_mov_b32_e32 v15, s4
-; SI-NEXT:    v_add_f64 v[0:1], s[2:3], v[4:5]
-; SI-NEXT:    v_add_f64 v[0:1], v[0:1], -v[4:5]
-; SI-NEXT:    v_bfi_b32 v5, s14, v10, v7
-; SI-NEXT:    v_cmp_gt_f64_e64 vcc, |s[2:3]|, v[8:9]
-; SI-NEXT:    v_cndmask_b32_e32 v3, v1, v2, vcc
-; SI-NEXT:    v_cndmask_b32_e32 v2, v0, v6, vcc
-; SI-NEXT:    v_add_f64 v[0:1], s[0:1], v[4:5]
-; SI-NEXT:    v_add_f64 v[0:1], v[0:1], -v[4:5]
-; SI-NEXT:    v_bfi_b32 v5, s14, v10, v12
-; SI-NEXT:    v_cmp_gt_f64_e64 vcc, |s[0:1]|, v[8:9]
-; SI-NEXT:    v_cndmask_b32_e32 v1, v1, v7, vcc
-; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v11, vcc
-; SI-NEXT:    v_add_f64 v[6:7], s[6:7], v[4:5]
-; SI-NEXT:    v_add_f64 v[6:7], v[6:7], -v[4:5]
-; SI-NEXT:    v_bfi_b32 v5, s14, v10, v14
-; SI-NEXT:    v_cmp_gt_f64_e64 vcc, |s[6:7]|, v[8:9]
-; SI-NEXT:    v_cndmask_b32_e32 v7, v7, v12, vcc
-; SI-NEXT:    v_cndmask_b32_e32 v6, v6, v13, vcc
-; SI-NEXT:    v_add_f64 v[10:11], s[4:5], v[4:5]
-; SI-NEXT:    v_add_f64 v[4:5], v[10:11], -v[4:5]
-; SI-NEXT:    v_cmp_gt_f64_e64 vcc, |s[4:5]|, v[8:9]
-; SI-NEXT:    v_cndmask_b32_e32 v5, v5, v14, vcc
-; SI-NEXT:    v_cndmask_b32_e32 v4, v4, v15, vcc
+; SI-NEXT:    v_add_f64 v[2:3], s[2:3], v[0:1]
+; SI-NEXT:    v_mov_b32_e32 v10, s3
+; SI-NEXT:    v_mov_b32_e32 v11, s2
+; SI-NEXT:    v_add_f64 v[6:7], s[0:1], v[0:1]
+; SI-NEXT:    v_mov_b32_e32 v12, s1
+; SI-NEXT:    v_mov_b32_e32 v13, s0
+; SI-NEXT:    v_add_f64 v[8:9], s[6:7], v[0:1]
+; SI-NEXT:    v_mov_b32_e32 v14, s7
+; SI-NEXT:    v_mov_b32_e32 v15, s6
+; SI-NEXT:    v_add_f64 v[0:1], s[4:5], v[0:1]
+; SI-NEXT:    v_mov_b32_e32 v16, s5
+; SI-NEXT:    v_add_f64 v[2:3], v[2:3], s[16:17]
+; SI-NEXT:    v_cmp_gt_f64_e64 vcc, |s[2:3]|, v[4:5]
+; SI-NEXT:    v_cndmask_b32_e32 v3, v3, v10, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v2, v2, v11, vcc
+; SI-NEXT:    v_mov_b32_e32 v17, s4
+; SI-NEXT:    v_add_f64 v[6:7], v[6:7], s[16:17]
+; SI-NEXT:    v_add_f64 v[8:9], v[8:9], s[16:17]
+; SI-NEXT:    v_add_f64 v[10:11], v[0:1], s[16:17]
+; SI-NEXT:    v_cmp_gt_f64_e64 vcc, |s[0:1]|, v[4:5]
+; SI-NEXT:    v_cndmask_b32_e32 v1, v7, v12, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v0, v6, v13, vcc
+; SI-NEXT:    v_cmp_gt_f64_e64 vcc, |s[6:7]|, v[4:5]
+; SI-NEXT:    v_cndmask_b32_e32 v7, v9, v14, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v6, v8, v15, vcc
+; SI-NEXT:    v_cmp_gt_f64_e64 vcc, |s[4:5]|, v[4:5]
+; SI-NEXT:    v_cndmask_b32_e32 v5, v11, v16, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v4, v10, v17, vcc
 ; SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[8:11], 0 offset:16
 ; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[8:11], 0
 ; SI-NEXT:    s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/rcp-pattern.ll b/llvm/test/CodeGen/AMDGPU/rcp-pattern.ll
index 228420ef0acb0..0b6baf4b5f504 100644
--- a/llvm/test/CodeGen/AMDGPU/rcp-pattern.ll
+++ b/llvm/test/CodeGen/AMDGPU/rcp-pattern.ll
@@ -559,10 +559,11 @@ define float @v_rcp_fabs_f32_ieee_ulp25(float %x) #3 {
 ; SI-LABEL: v_rcp_fabs_f32_ieee_ulp25:
 ; SI:       ; %bb.0:
 ; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    v_and_b32_e32 v1, 0x7fffffff, v0
 ; SI-NEXT:    s_mov_b32 s4, 0x7f800000
-; SI-NEXT:    v_frexp_mant_f32_e64 v1, |v0|
-; SI-NEXT:    v_cmp_lt_f32_e64 s[4:5], |v0|, s4
-; SI-NEXT:    v_cndmask_b32_e64 v1, |v0|, v1, s[4:5]
+; SI-NEXT:    v_frexp_mant_f32_e64 v2, |v0|
+; SI-NEXT:    v_cmp_lt_f32_e64 s[4:5], |v1|, s4
+; SI-NEXT:    v_cndmask_b32_e64 v1, |v0|, v2, s[4:5]
 ; SI-NEXT:    v_rcp_f32_e32 v1, v1
 ; SI-NEXT:    v_frexp_exp_i32_f32_e32 v0, v0
 ; SI-NEXT:    v_sub_i32_e32 v0, vcc, 0, v0
@@ -700,10 +701,11 @@ define float @v_rcp_neg_fabs_f32_ieee_ulp25(float %x) #3 {
 ; SI-LABEL: v_rcp_neg_fabs_f32_ieee_ulp25:
 ; SI:       ; %bb.0:
 ; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    v_and_b32_e32 v1, 0x7fffffff, v0
 ; SI-NEXT:    s_mov_b32 s4, 0x7f800000
-; SI-NEXT:    v_frexp_mant_f32_e64 v1, -|v0|
-; SI-NEXT:    v_cmp_lt_f32_e64 s[4:5], |v0|, s4
-; SI-NEXT:    v_cndmask_b32_e64 v1, -|v0|, v1, s[4:5]
+; SI-NEXT:    v_frexp_mant_f32_e64 v2, -|v0|
+; SI-NEXT:    v_cmp_lt_f32_e64 s[4:5], |v1|, s4
+; SI-NEXT:    v_cndmask_b32_e64 v1, -|v0|, v2, s[4:5]
 ; SI-NEXT:    v_rcp_f32_e32 v1, v1
 ; SI-NEXT:    v_frexp_exp_i32_f32_e32 v0, v0
 ; SI-NEXT:    v_sub_i32_e32 v0, vcc, 0, v0
diff --git a/llvm/test/CodeGen/AMDGPU/roundeven.ll b/llvm/test/CodeGen/AMDGPU/roundeven.ll
index 59a1fe041bf90..5ebc08cf8402c 100644
--- a/llvm/test/CodeGen/AMDGPU/roundeven.ll
+++ b/llvm/test/CodeGen/AMDGPU/roundeven.ll
@@ -1124,14 +1124,14 @@ define double @v_roundeven_f64(double %x) {
 ; SDAG_GFX6-LABEL: v_roundeven_f64:
 ; SDAG_GFX6:       ; %bb.0:
 ; SDAG_GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG_GFX6-NEXT:    s_brev_b32 s6, -2
-; SDAG_GFX6-NEXT:    v_mov_b32_e32 v2, 0x43300000
-; SDAG_GFX6-NEXT:    v_bfi_b32 v3, s6, v2, v1
-; SDAG_GFX6-NEXT:    v_mov_b32_e32 v2, 0
+; SDAG_GFX6-NEXT:    s_mov_b32 s6, 0
+; SDAG_GFX6-NEXT:    s_mov_b32 s7, 0x43300000
 ; SDAG_GFX6-NEXT:    s_mov_b32 s4, -1
-; SDAG_GFX6-NEXT:    v_add_f64 v[4:5], v[0:1], v[2:3]
+; SDAG_GFX6-NEXT:    v_add_f64 v[2:3], v[0:1], s[6:7]
+; SDAG_GFX6-NEXT:    s_mov_b32 s6, 0
 ; SDAG_GFX6-NEXT:    s_mov_b32 s5, 0x432fffff
-; SDAG_GFX6-NEXT:    v_add_f64 v[2:3], v[4:5], -v[2:3]
+; SDAG_GFX6-NEXT:    s_mov_b32 s7, 0xc3300000
+; SDAG_GFX6-NEXT:    v_add_f64 v[2:3], v[2:3], s[6:7]
 ; SDAG_GFX6-NEXT:    v_cmp_gt_f64_e64 vcc, |v[0:1]|, s[4:5]
 ; SDAG_GFX6-NEXT:    v_cndmask_b32_e32 v0, v2, v0, vcc
 ; SDAG_GFX6-NEXT:    v_cndmask_b32_e32 v1, v3, v1, vcc
@@ -1208,18 +1208,18 @@ define double @v_roundeven_f64_fneg(double %x) {
 ; SDAG_GFX6-LABEL: v_roundeven_f64_fneg:
 ; SDAG_GFX6:       ; %bb.0:
 ; SDAG_GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG_GFX6-NEXT:    v_xor_b32_e32 v6, 0x80000000, v1
-; SDAG_GFX6-NEXT:    s_brev_b32 s4, -2
-; SDAG_GFX6-NEXT:    v_mov_b32_e32 v2, 0x43300000
-; SDAG_GFX6-NEXT:    v_bfi_b32 v3, s4, v2, v6
-; SDAG_GFX6-NEXT:    v_mov_b32_e32 v2, 0
-; SDAG_GFX6-NEXT:    v_add_f64 v[4:5], -v[0:1], v[2:3]
+; SDAG_GFX6-NEXT:    s_mov_b32 s6, 0
+; SDAG_GFX6-NEXT:    s_mov_b32 s7, 0x43300000
 ; SDAG_GFX6-NEXT:    s_mov_b32 s4, -1
+; SDAG_GFX6-NEXT:    v_add_f64 v[2:3], -v[0:1], s[6:7]
+; SDAG_GFX6-NEXT:    s_mov_b32 s6, 0
 ; SDAG_GFX6-NEXT:    s_mov_b32 s5, 0x432fffff
-; SDAG_GFX6-NEXT:    v_add_f64 v[2:3], v[4:5], -v[2:3]
+; SDAG_GFX6-NEXT:    s_mov_b32 s7, 0xc3300000
+; SDAG_GFX6-NEXT:    v_add_f64 v[2:3], v[2:3], s[6:7]
 ; SDAG_GFX6-NEXT:    v_cmp_gt_f64_e64 vcc, |v[0:1]|, s[4:5]
+; SDAG_GFX6-NEXT:    v_xor_b32_e32 v4, 0x80000000, v1
 ; SDAG_GFX6-NEXT:    v_cndmask_b32_e32 v0, v2, v0, vcc
-; SDAG_GFX6-NEXT:    v_cndmask_b32_e32 v1, v3, v6, vcc
+; SDAG_GFX6-NEXT:    v_cndmask_b32_e32 v1, v3, v4, vcc
 ; SDAG_GFX6-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; SDAG_GFX7-LABEL: v_roundeven_f64_fneg:
@@ -1304,20 +1304,19 @@ define <2 x double> @v_roundeven_v2f64(<2 x double> %x) {
 ; SDAG_GFX6-LABEL: v_roundeven_v2f64:
 ; SDAG_GFX6:       ; %bb.0:
 ; SDAG_GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG_GFX6-NEXT:    s_brev_b32 s6, -2
-; SDAG_GFX6-NEXT:    v_mov_b32_e32 v8, 0x43300000
-; SDAG_GFX6-NEXT:    v_bfi_b32 v5, s6, v8, v1
-; SDAG_GFX6-NEXT:    v_mov_b32_e32 v4, 0
-; SDAG_GFX6-NEXT:    v_add_f64 v[6:7], v[0:1], v[4:5]
+; SDAG_GFX6-NEXT:    s_mov_b32 s6, 0
+; SDAG_GFX6-NEXT:    s_mov_b32 s7, 0x43300000
 ; SDAG_GFX6-NEXT:    s_mov_b32 s4, -1
+; SDAG_GFX6-NEXT:    v_add_f64 v[4:5], v[0:1], s[6:7]
+; SDAG_GFX6-NEXT:    s_mov_b32 s8, 0
 ; SDAG_GFX6-NEXT:    s_mov_b32 s5, 0x432fffff
-; SDAG_GFX6-NEXT:    v_add_f64 v[5:6], v[6:7], -v[4:5]
+; SDAG_GFX6-NEXT:    s_mov_b32 s9, 0xc3300000
+; SDAG_GFX6-NEXT:    v_add_f64 v[4:5], v[4:5], s[8:9]
 ; SDAG_GFX6-NEXT:    v_cmp_gt_f64_e64 vcc, |v[0:1]|, s[4:5]
-; SDAG_GFX6-NEXT:    v_cndmask_b32_e32 v0, v5, v0, vcc
-; SDAG_GFX6-NEXT:    v_bfi_b32 v5, s6, v8, v3
-; SDAG_GFX6-NEXT:    v_add_f64 v[7:8], v[2:3], v[4:5]
-; SDAG_GFX6-NEXT:    v_cndmask_b32_e32 v1, v6, v1, vcc
-; SDAG_GFX6-NEXT:    v_add_f64 v[4:5], v[7:8], -v[4:5]
+; SDAG_GFX6-NEXT:    v_add_f64 v[6:7], v[2:3], s[6:7]
+; SDAG_GFX6-NEXT:    v_cndmask_b32_e32 v0, v4, v0, vcc
+; SDAG_GFX6-NEXT:    v_cndmask_b32_e32 v1, v5, v1, vcc
+; SDAG_GFX6-NEXT:    v_add_f64 v[4:5], v[6:7], s[8:9]
 ; SDAG_GFX6-NEXT:    v_cmp_gt_f64_e64 vcc, |v[2:3]|, s[4:5]
 ; SDAG_GFX6-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
 ; SDAG_GFX6-NEXT:    v_cndmask_b32_e32 v3, v5, v3, vcc
diff --git a/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.v2f16.ll b/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.v2f16.ll
index 7ed27f008083e..8691925797621 100644
--- a/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.v2f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.v2f16.ll
@@ -776,20 +776,16 @@ define <2 x half> @add_select_fabs_negk_negk_v2f16(<2 x i32> %c, <2 x half> %x)
 ; CI-LABEL: add_select_fabs_negk_negk_v2f16:
 ; CI:       ; %bb.0:
 ; CI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
-; CI-NEXT:    v_cndmask_b32_e64 v0, -1.0, -2.0, vcc
-; CI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
-; CI-NEXT:    v_cndmask_b32_e64 v1, -1.0, -2.0, vcc
 ; CI-NEXT:    v_cvt_f16_f32_e32 v3, v3
 ; CI-NEXT:    v_cvt_f16_f32_e32 v2, v2
-; CI-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; CI-NEXT:    v_cvt_f16_f32_e32 v1, v1
+; CI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
+; CI-NEXT:    v_cndmask_b32_e64 v1, -1.0, -2.0, vcc
 ; CI-NEXT:    v_cvt_f32_f16_e32 v3, v3
 ; CI-NEXT:    v_cvt_f32_f16_e32 v2, v2
-; CI-NEXT:    v_cvt_f32_f16_e64 v0, |v0|
-; CI-NEXT:    v_cvt_f32_f16_e64 v1, |v1|
-; CI-NEXT:    v_add_f32_e32 v0, v0, v2
-; CI-NEXT:    v_add_f32_e32 v1, v1, v3
+; CI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; CI-NEXT:    v_cndmask_b32_e64 v0, -1.0, -2.0, vcc
+; CI-NEXT:    v_sub_f32_e32 v1, v3, v1
+; CI-NEXT:    v_sub_f32_e32 v0, v2, v0
 ; CI-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; VI-LABEL: add_select_fabs_negk_negk_v2f16:
@@ -801,8 +797,8 @@ define <2 x half> @add_select_fabs_negk_negk_v2f16(<2 x i32> %c, <2 x half> %x)
 ; VI-NEXT:    v_cndmask_b32_e32 v0, v3, v4, vcc
 ; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
 ; VI-NEXT:    v_cndmask_b32_e32 v1, v3, v4, vcc
-; VI-NEXT:    v_add_f16_sdwa v1, |v1|, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; VI-NEXT:    v_add_f16_e64 v0, |v0|, v2
+; VI-NEXT:    v_sub_f16_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; VI-NEXT:    v_sub_f16_e32 v0, v2, v0
 ; VI-NEXT:    v_or_b32_e32 v0, v0, v1
 ; VI-NEXT:    s_setpc_b64 s[30:31]
 ;

>From 7b544d41a7b484910800c1bd0fe5b5cfd13bc557 Mon Sep 17 00:00:00 2001
From: Iris Shi <0.0 at owo.li>
Date: Sat, 10 May 2025 23:56:28 +0800
Subject: [PATCH 2/7] update

---
 llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp |  4 ++
 .../CodeGen/SelectionDAG/TargetLowering.cpp   | 14 +++----
 llvm/test/CodeGen/AMDGPU/bf16-conversions.ll  | 38 +++++++++----------
 llvm/test/CodeGen/AMDGPU/rcp-pattern.ll       | 14 +++----
 4 files changed, 33 insertions(+), 37 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 22cadd06b14cd..4048d5a20142c 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -19002,6 +19002,10 @@ SDValue DAGCombiner::visitFABS(SDNode *N) {
   if (SDValue C = DAG.FoldConstantArithmetic(ISD::FABS, DL, VT, {N0}))
     return C;
 
+  // fold (fabs (fabs x)) -> (fabs x)
+  if (N0.getOpcode() == ISD::FABS)
+    return N->getOperand(0);
+
   if (SimplifyDemandedBits(N0,
                            APInt::getSignedMaxValue(VT.getScalarSizeInBits())))
     return SDValue(N, 0);
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 971ed5d97d523..6dde51dc16513 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -2980,7 +2980,8 @@ bool TargetLowering::SimplifyDemandedBits(
     if (Known.isNonNegative())
       return TLO.CombineTo(Op, Op0);
     if (Known.isNegative())
-      return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::FNEG, dl, VT, Op0));
+      return TLO.CombineTo(
+          Op, TLO.DAG.getNode(ISD::FNEG, dl, VT, Op0, Op->getFlags()));
 
     Known.Zero |= SignMask;
     Known.One &= ~SignMask;
@@ -2996,21 +2997,18 @@ bool TargetLowering::SimplifyDemandedBits(
       return TLO.CombineTo(Op, Op0);
 
     if (SimplifyDemandedBits(Op0, ~SignMask & DemandedBits, DemandedElts, Known,
-                             TLO, Depth + 1))
-      return true;
-    if (SimplifyDemandedBits(Op1, SignMask, DemandedElts, Known2, TLO,
+                             TLO, Depth + 1) ||
+        SimplifyDemandedBits(Op1, SignMask, DemandedElts, Known2, TLO,
                              Depth + 1))
       return true;
 
-    if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO))
-      return true;
-
     if ((Known.isNonNegative() && Known2.isNonNegative()) ||
         (Known.isNegative() && Known2.isNegative()))
       return TLO.CombineTo(Op, Op0);
 
     if (Known2.isNonNegative())
-      return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::FABS, dl, VT, Op0));
+      return TLO.CombineTo(
+          Op, TLO.DAG.getNode(ISD::FABS, dl, VT, Op0, Op->getFlags()));
 
     if (Known2.isNegative()) {
       Known.One |= SignMask;
diff --git a/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll b/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll
index f03958a967328..a597faa028f22 100644
--- a/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll
+++ b/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll
@@ -427,18 +427,16 @@ entry:
 define amdgpu_ps void @fptrunc_f64_to_bf16_abs(double %a, ptr %out) {
 ; GFX-942-LABEL: fptrunc_f64_to_bf16_abs:
 ; GFX-942:       ; %bb.0: ; %entry
-; GFX-942-NEXT:    v_cvt_f32_f64_e64 v8, |v[0:1]|
-; GFX-942-NEXT:    v_and_b32_e32 v5, 0x7fffffff, v1
-; GFX-942-NEXT:    v_mov_b32_e32 v4, v0
-; GFX-942-NEXT:    v_cvt_f64_f32_e32 v[6:7], v8
-; GFX-942-NEXT:    v_and_b32_e32 v9, 1, v8
-; GFX-942-NEXT:    v_cmp_gt_f64_e64 s[2:3], |v[4:5]|, |v[6:7]|
-; GFX-942-NEXT:    v_cmp_nlg_f64_e64 s[0:1], |v[0:1]|, v[6:7]
-; GFX-942-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v9
+; GFX-942-NEXT:    v_cvt_f32_f64_e64 v6, |v[0:1]|
+; GFX-942-NEXT:    v_cvt_f64_f32_e32 v[4:5], v6
+; GFX-942-NEXT:    v_and_b32_e32 v7, 1, v6
+; GFX-942-NEXT:    v_cmp_gt_f64_e64 s[2:3], |v[0:1]|, |v[4:5]|
+; GFX-942-NEXT:    v_cmp_nlg_f64_e64 s[0:1], |v[0:1]|, v[4:5]
+; GFX-942-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v7
 ; GFX-942-NEXT:    v_cndmask_b32_e64 v4, -1, 1, s[2:3]
-; GFX-942-NEXT:    v_add_u32_e32 v4, v8, v4
+; GFX-942-NEXT:    v_add_u32_e32 v4, v6, v4
 ; GFX-942-NEXT:    s_or_b64 vcc, s[0:1], vcc
-; GFX-942-NEXT:    v_cndmask_b32_e32 v4, v4, v8, vcc
+; GFX-942-NEXT:    v_cndmask_b32_e32 v4, v4, v6, vcc
 ; GFX-942-NEXT:    v_bfe_u32 v5, v4, 16, 1
 ; GFX-942-NEXT:    s_movk_i32 s0, 0x7fff
 ; GFX-942-NEXT:    v_add3_u32 v5, v5, v4, s0
@@ -451,18 +449,16 @@ define amdgpu_ps void @fptrunc_f64_to_bf16_abs(double %a, ptr %out) {
 ;
 ; GFX-950-LABEL: fptrunc_f64_to_bf16_abs:
 ; GFX-950:       ; %bb.0: ; %entry
-; GFX-950-NEXT:    v_cvt_f32_f64_e64 v8, |v[0:1]|
-; GFX-950-NEXT:    v_and_b32_e32 v5, 0x7fffffff, v1
-; GFX-950-NEXT:    v_mov_b32_e32 v4, v0
-; GFX-950-NEXT:    v_cvt_f64_f32_e32 v[6:7], v8
-; GFX-950-NEXT:    v_cmp_nlg_f64_e64 s[0:1], |v[0:1]|, v[6:7]
-; GFX-950-NEXT:    v_and_b32_e32 v0, 1, v8
-; GFX-950-NEXT:    v_cmp_gt_f64_e64 s[2:3], |v[4:5]|, |v[6:7]|
-; GFX-950-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX-950-NEXT:    s_or_b64 vcc, s[0:1], vcc
+; GFX-950-NEXT:    v_cvt_f32_f64_e64 v6, |v[0:1]|
+; GFX-950-NEXT:    v_cvt_f64_f32_e32 v[4:5], v6
+; GFX-950-NEXT:    v_and_b32_e32 v7, 1, v6
+; GFX-950-NEXT:    v_cmp_gt_f64_e64 s[2:3], |v[0:1]|, |v[4:5]|
+; GFX-950-NEXT:    v_cmp_nlg_f64_e64 s[0:1], |v[0:1]|, v[4:5]
+; GFX-950-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v7
 ; GFX-950-NEXT:    v_cndmask_b32_e64 v0, -1, 1, s[2:3]
-; GFX-950-NEXT:    v_add_u32_e32 v0, v8, v0
-; GFX-950-NEXT:    v_cndmask_b32_e32 v0, v0, v8, vcc
+; GFX-950-NEXT:    v_add_u32_e32 v0, v6, v0
+; GFX-950-NEXT:    s_or_b64 vcc, s[0:1], vcc
+; GFX-950-NEXT:    v_cndmask_b32_e32 v0, v0, v6, vcc
 ; GFX-950-NEXT:    v_cvt_pk_bf16_f32 v0, v0, s0
 ; GFX-950-NEXT:    flat_store_short v[2:3], v0
 ; GFX-950-NEXT:    s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/rcp-pattern.ll b/llvm/test/CodeGen/AMDGPU/rcp-pattern.ll
index 0b6baf4b5f504..228420ef0acb0 100644
--- a/llvm/test/CodeGen/AMDGPU/rcp-pattern.ll
+++ b/llvm/test/CodeGen/AMDGPU/rcp-pattern.ll
@@ -559,11 +559,10 @@ define float @v_rcp_fabs_f32_ieee_ulp25(float %x) #3 {
 ; SI-LABEL: v_rcp_fabs_f32_ieee_ulp25:
 ; SI:       ; %bb.0:
 ; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0x7fffffff, v0
 ; SI-NEXT:    s_mov_b32 s4, 0x7f800000
-; SI-NEXT:    v_frexp_mant_f32_e64 v2, |v0|
-; SI-NEXT:    v_cmp_lt_f32_e64 s[4:5], |v1|, s4
-; SI-NEXT:    v_cndmask_b32_e64 v1, |v0|, v2, s[4:5]
+; SI-NEXT:    v_frexp_mant_f32_e64 v1, |v0|
+; SI-NEXT:    v_cmp_lt_f32_e64 s[4:5], |v0|, s4
+; SI-NEXT:    v_cndmask_b32_e64 v1, |v0|, v1, s[4:5]
 ; SI-NEXT:    v_rcp_f32_e32 v1, v1
 ; SI-NEXT:    v_frexp_exp_i32_f32_e32 v0, v0
 ; SI-NEXT:    v_sub_i32_e32 v0, vcc, 0, v0
@@ -701,11 +700,10 @@ define float @v_rcp_neg_fabs_f32_ieee_ulp25(float %x) #3 {
 ; SI-LABEL: v_rcp_neg_fabs_f32_ieee_ulp25:
 ; SI:       ; %bb.0:
 ; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, 0x7fffffff, v0
 ; SI-NEXT:    s_mov_b32 s4, 0x7f800000
-; SI-NEXT:    v_frexp_mant_f32_e64 v2, -|v0|
-; SI-NEXT:    v_cmp_lt_f32_e64 s[4:5], |v1|, s4
-; SI-NEXT:    v_cndmask_b32_e64 v1, -|v0|, v2, s[4:5]
+; SI-NEXT:    v_frexp_mant_f32_e64 v1, -|v0|
+; SI-NEXT:    v_cmp_lt_f32_e64 s[4:5], |v0|, s4
+; SI-NEXT:    v_cndmask_b32_e64 v1, -|v0|, v1, s[4:5]
 ; SI-NEXT:    v_rcp_f32_e32 v1, v1
 ; SI-NEXT:    v_frexp_exp_i32_f32_e32 v0, v0
 ; SI-NEXT:    v_sub_i32_e32 v0, vcc, 0, v0

>From f4c47fea884adac87a5de046fcf61af0ae901b89 Mon Sep 17 00:00:00 2001
From: Iris Shi <0.0 at owo.li>
Date: Wed, 11 Jun 2025 21:06:51 +0800
Subject: [PATCH 3/7] fix tests

---
 .../CodeGen/SelectionDAG/TargetLowering.cpp   | 20 ++++++++++++-------
 llvm/test/CodeGen/AMDGPU/fabs.bf16.ll         |  4 ++--
 llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll    | 19 +++++++++---------
 3 files changed, 24 insertions(+), 19 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 6dde51dc16513..36923aa24e4ea 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -2991,14 +2991,20 @@ bool TargetLowering::SimplifyDemandedBits(
   case ISD::FCOPYSIGN: {
     SDValue Op0 = Op.getOperand(0);
     SDValue Op1 = Op.getOperand(1);
-    APInt SignMask = APInt::getSignMask(BitWidth);
 
-    if (!DemandedBits.intersects(SignMask))
+    unsigned BitWidth0 = Op0.getScalarValueSizeInBits();
+    unsigned BitWidth1 = Op1.getScalarValueSizeInBits();
+    APInt SignMask0 = APInt::getSignMask(BitWidth0);
+    APInt SignMask1 = APInt::getSignMask(BitWidth1);
+
+    if (!DemandedBits.intersects(SignMask0))
       return TLO.CombineTo(Op, Op0);
 
-    if (SimplifyDemandedBits(Op0, ~SignMask & DemandedBits, DemandedElts, Known,
-                             TLO, Depth + 1) ||
-        SimplifyDemandedBits(Op1, SignMask, DemandedElts, Known2, TLO,
+    APInt ScalarDemandedBits = DemandedBits.trunc(BitWidth0);
+
+    if (SimplifyDemandedBits(Op0, ~SignMask0 & ScalarDemandedBits, DemandedElts,
+                             Known, TLO, Depth + 1) ||
+        SimplifyDemandedBits(Op1, SignMask1, DemandedElts, Known2, TLO,
                              Depth + 1))
       return true;
 
@@ -3011,8 +3017,8 @@ bool TargetLowering::SimplifyDemandedBits(
           Op, TLO.DAG.getNode(ISD::FABS, dl, VT, Op0, Op->getFlags()));
 
     if (Known2.isNegative()) {
-      Known.One |= SignMask;
-      Known.Zero &= ~SignMask;
+      Known.One |= SignMask0;
+      Known.Zero &= ~SignMask0;
     }
 
     break;
diff --git a/llvm/test/CodeGen/AMDGPU/fabs.bf16.ll b/llvm/test/CodeGen/AMDGPU/fabs.bf16.ll
index 71b1a16c79e69..1be99af42b8f8 100644
--- a/llvm/test/CodeGen/AMDGPU/fabs.bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fabs.bf16.ll
@@ -220,7 +220,7 @@ define amdgpu_kernel void @s_fabs_v4bf16(ptr addrspace(1) %out, <4 x bfloat> %in
 ; CI-NEXT:    s_waitcnt lgkmcnt(0)
 ; CI-NEXT:    s_and_b32 s4, s3, 0xffff0000
 ; CI-NEXT:    s_lshl_b32 s3, s3, 16
-; CI-NEXT:    s_and_b32 s5, s2, 0xffff0000
+; CI-NEXT:    s_and_b32 s5, s2, 0x7fff0000
 ; CI-NEXT:    v_mul_f32_e64 v0, 1.0, |s4|
 ; CI-NEXT:    v_mul_f32_e64 v1, 1.0, |s3|
 ; CI-NEXT:    v_mul_f32_e64 v2, 1.0, |s5|
@@ -944,7 +944,7 @@ define amdgpu_kernel void @v_extract_fabs_fold_v2bf16(ptr addrspace(1) %in) #0 {
 ; CI-NEXT:    flat_load_dword v0, v[0:1]
 ; CI-NEXT:    s_waitcnt vmcnt(0)
 ; CI-NEXT:    v_lshlrev_b32_e32 v1, 16, v0
-; CI-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
+; CI-NEXT:    v_and_b32_e32 v0, 0x7fff0000, v0
 ; CI-NEXT:    v_mul_f32_e64 v1, 1.0, |v1|
 ; CI-NEXT:    v_mul_f32_e64 v0, 1.0, |v0|
 ; CI-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
diff --git a/llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll b/llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll
index d189b6d4c1e83..8e2a187a71384 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll
@@ -481,17 +481,16 @@ define amdgpu_kernel void @s_fneg_fabs_v2bf16_non_bc_src(ptr addrspace(1) %out,
 ; CI-NEXT:    s_lshl_b32 s2, s2, 16
 ; CI-NEXT:    v_add_f32_e64 v0, s3, 2.0
 ; CI-NEXT:    v_add_f32_e64 v1, s2, 1.0
-; CI-NEXT:    v_readfirstlane_b32 s2, v0
-; CI-NEXT:    s_and_b32 s2, s2, 0xffff0000
-; CI-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
-; CI-NEXT:    s_bitset0_b32 s2, 31
-; CI-NEXT:    v_and_b32_e32 v0, 0x7fffffff, v1
-; CI-NEXT:    s_and_b32 s2, s2, 0xffff0000
-; CI-NEXT:    s_xor_b32 s2, s2, 0x80000000
+; CI-NEXT:    v_and_b32_e32 v0, 0x7fff0000, v0
+; CI-NEXT:    v_and_b32_e32 v1, 0x7fff0000, v1
+; CI-NEXT:    v_mul_f32_e64 v0, 1.0, |v0|
+; CI-NEXT:    v_mul_f32_e64 v1, 1.0, |v1|
 ; CI-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; CI-NEXT:    s_lshr_b32 s2, s2, 16
 ; CI-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
-; CI-NEXT:    v_alignbit_b32 v2, s2, v0, 16
+; CI-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
+; CI-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
+; CI-NEXT:    v_xor_b32_e32 v1, 0x80000000, v1
+; CI-NEXT:    v_alignbit_b32 v2, v0, v1, 16
 ; CI-NEXT:    v_mov_b32_e32 v0, s0
 ; CI-NEXT:    v_mov_b32_e32 v1, s1
 ; CI-NEXT:    flat_store_dword v[0:1], v2
@@ -676,7 +675,7 @@ define amdgpu_kernel void @fneg_fabs_v4bf16(ptr addrspace(1) %out, <4 x bfloat>
 ; CI-NEXT:    s_lshr_b32 flat_scratch_hi, s12, 8
 ; CI-NEXT:    s_waitcnt lgkmcnt(0)
 ; CI-NEXT:    s_lshl_b32 s4, s2, 16
-; CI-NEXT:    s_and_b32 s2, s2, 0xffff0000
+; CI-NEXT:    s_and_b32 s2, s2, 0x7fff0000
 ; CI-NEXT:    v_mul_f32_e64 v2, 1.0, |s2|
 ; CI-NEXT:    s_and_b32 s2, s3, 0xffff0000
 ; CI-NEXT:    s_lshl_b32 s5, s3, 16

>From 176fdd1fdb908d83c8c0557710b3efb0d26224e2 Mon Sep 17 00:00:00 2001
From: Iris Shi <0.0 at owo.li>
Date: Wed, 11 Jun 2025 21:52:59 +0800
Subject: [PATCH 4/7] fix final known bits of fcopysign

---
 llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp |  29 +-
 .../CodeGen/SelectionDAG/TargetLowering.cpp   |  14 +-
 .../CodeGen/AArch64/extract-vector-elt.ll     |   7 +-
 llvm/test/CodeGen/AMDGPU/bf16.ll              |  36 +--
 llvm/test/CodeGen/AMDGPU/fabs.bf16.ll         |   4 +-
 llvm/test/CodeGen/AMDGPU/fcopysign.bf16.ll    |  54 ++--
 llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll     | 288 +++++++++---------
 llvm/test/CodeGen/AMDGPU/fnearbyint.ll        | 142 ++++-----
 llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll    |  19 +-
 llvm/test/CodeGen/AMDGPU/roundeven.ll         |  49 +--
 10 files changed, 301 insertions(+), 341 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 4048d5a20142c..6e788e0051256 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -18372,34 +18372,12 @@ SDValue DAGCombiner::visitFCOPYSIGN(SDNode *N) {
   if (SDValue C = DAG.FoldConstantArithmetic(ISD::FCOPYSIGN, DL, VT, {N0, N1}))
     return C;
 
-  if (ConstantFPSDNode *N1C = isConstOrConstSplatFP(N->getOperand(1))) {
-    const APFloat &V = N1C->getValueAPF();
-    // copysign(x, c1) -> fabs(x)       iff ispos(c1)
-    // copysign(x, c1) -> fneg(fabs(x)) iff isneg(c1)
-    if (!V.isNegative()) {
-      if (!LegalOperations || TLI.isOperationLegal(ISD::FABS, VT))
-        return DAG.getNode(ISD::FABS, DL, VT, N0);
-    } else {
-      if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))
-        return DAG.getNode(ISD::FNEG, DL, VT,
-                           DAG.getNode(ISD::FABS, SDLoc(N0), VT, N0));
-    }
-  }
-
   // copysign(x, fp_extend(y)) -> copysign(x, y)
   // copysign(x, fp_round(y)) -> copysign(x, y)
   if (CanCombineFCOPYSIGN_EXTEND_ROUND(N))
     return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N0, N1.getOperand(0));
 
-  // We only take the sign bit from the sign operand.
-  EVT SignVT = N1.getValueType();
-  if (SimplifyDemandedBits(N1,
-                           APInt::getSignMask(SignVT.getScalarSizeInBits())))
-    return SDValue(N, 0);
-
-  // We only take the non-sign bits from the value operand
-  if (SimplifyDemandedBits(N0,
-                           APInt::getSignedMaxValue(VT.getScalarSizeInBits())))
+  if (SimplifyDemandedBits(SDValue(N, 0)))
     return SDValue(N, 0);
 
   return SDValue();
@@ -18926,7 +18904,7 @@ SDValue DAGCombiner::visitFNEG(SDNode *N) {
                        N0.getOperand(0));
   }
 
-  if (SimplifyDemandedBits(N0, APInt::getAllOnes(VT.getScalarSizeInBits())))
+  if (SimplifyDemandedBits(SDValue(N, 0)))
     return SDValue(N, 0);
 
   if (SDValue Cast = foldSignChangeInBitcast(N))
@@ -19006,8 +18984,7 @@ SDValue DAGCombiner::visitFABS(SDNode *N) {
   if (N0.getOpcode() == ISD::FABS)
     return N->getOperand(0);
 
-  if (SimplifyDemandedBits(N0,
-                           APInt::getSignedMaxValue(VT.getScalarSizeInBits())))
+  if (SimplifyDemandedBits(N0))
     return SDValue(N, 0);
 
   if (SDValue Cast = foldSignChangeInBitcast(N))
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 36923aa24e4ea..e06a50bd59222 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -3000,9 +3000,7 @@ bool TargetLowering::SimplifyDemandedBits(
     if (!DemandedBits.intersects(SignMask0))
       return TLO.CombineTo(Op, Op0);
 
-    APInt ScalarDemandedBits = DemandedBits.trunc(BitWidth0);
-
-    if (SimplifyDemandedBits(Op0, ~SignMask0 & ScalarDemandedBits, DemandedElts,
+    if (SimplifyDemandedBits(Op0, ~SignMask0 & DemandedBits, DemandedElts,
                              Known, TLO, Depth + 1) ||
         SimplifyDemandedBits(Op1, SignMask1, DemandedElts, Known2, TLO,
                              Depth + 1))
@@ -3016,11 +3014,13 @@ bool TargetLowering::SimplifyDemandedBits(
       return TLO.CombineTo(
           Op, TLO.DAG.getNode(ISD::FABS, dl, VT, Op0, Op->getFlags()));
 
-    if (Known2.isNegative()) {
-      Known.One |= SignMask0;
-      Known.Zero &= ~SignMask0;
-    }
+    if (Known2.isNegative())
+      return TLO.CombineTo(
+          Op, TLO.DAG.getNode(ISD::FNEG, dl, VT,
+                              TLO.DAG.getNode(ISD::FABS, SDLoc(Op0), VT, Op0)));
 
+    Known.Zero &= ~SignMask0;
+    Known.One &= ~SignMask0;
     break;
   }
   case ISD::FNEG: {
diff --git a/llvm/test/CodeGen/AArch64/extract-vector-elt.ll b/llvm/test/CodeGen/AArch64/extract-vector-elt.ll
index 554b2e3444fe4..0189f52bbac06 100644
--- a/llvm/test/CodeGen/AArch64/extract-vector-elt.ll
+++ b/llvm/test/CodeGen/AArch64/extract-vector-elt.ll
@@ -391,13 +391,10 @@ define float @extract_v4i32_copysign_build_vector(<4 x float> %a, <4 x float> %b
 ; CHECK-SD:       // %bb.0: // %entry
 ; CHECK-SD-NEXT:    sub sp, sp, #16
 ; CHECK-SD-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-SD-NEXT:    adrp x8, .LCPI16_0
-; CHECK-SD-NEXT:    mvni v1.4s, #128, lsl #24
-; CHECK-SD-NEXT:    // kill: def $w0 killed $w0 def $x0
-; CHECK-SD-NEXT:    ldr q2, [x8, :lo12:.LCPI16_0]
+; CHECK-SD-NEXT:    fabs v0.4s, v0.4s
 ; CHECK-SD-NEXT:    mov x8, sp
+; CHECK-SD-NEXT:    // kill: def $w0 killed $w0 def $x0
 ; CHECK-SD-NEXT:    bfi x8, x0, #2, #2
-; CHECK-SD-NEXT:    bif v0.16b, v2.16b, v1.16b
 ; CHECK-SD-NEXT:    str q0, [sp]
 ; CHECK-SD-NEXT:    ldr s0, [x8]
 ; CHECK-SD-NEXT:    add sp, sp, #16
diff --git a/llvm/test/CodeGen/AMDGPU/bf16.ll b/llvm/test/CodeGen/AMDGPU/bf16.ll
index 951f103f7d9c4..2bdf994496421 100644
--- a/llvm/test/CodeGen/AMDGPU/bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/bf16.ll
@@ -18639,8 +18639,8 @@ define bfloat @v_fabs_bf16(bfloat %a) {
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_mul_f32_e32 v0, 1.0, v0
-; GCN-NEXT:    v_and_b32_e32 v0, 0x7fff0000, v0
-; GCN-NEXT:    v_mul_f32_e64 v0, 1.0, |v0|
+; GCN-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
+; GCN-NEXT:    v_and_b32_e32 v0, 0x7fffffff, v0
 ; GCN-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
 ;
@@ -18648,8 +18648,8 @@ define bfloat @v_fabs_bf16(bfloat %a) {
 ; GFX7:       ; %bb.0:
 ; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX7-NEXT:    v_mul_f32_e32 v0, 1.0, v0
-; GFX7-NEXT:    v_and_b32_e32 v0, 0x7fff0000, v0
-; GFX7-NEXT:    v_mul_f32_e64 v0, 1.0, |v0|
+; GFX7-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
+; GFX7-NEXT:    v_and_b32_e32 v0, 0x7fffffff, v0
 ; GFX7-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX7-NEXT:    s_setpc_b64 s[30:31]
 ;
@@ -18832,8 +18832,8 @@ define bfloat @v_fneg_fabs_bf16(bfloat %a) {
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_mul_f32_e32 v0, 1.0, v0
-; GCN-NEXT:    v_and_b32_e32 v0, 0x7fff0000, v0
-; GCN-NEXT:    v_mul_f32_e64 v0, 1.0, |v0|
+; GCN-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
+; GCN-NEXT:    v_and_b32_e32 v0, 0x7fffffff, v0
 ; GCN-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GCN-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
 ; GCN-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
@@ -18843,8 +18843,8 @@ define bfloat @v_fneg_fabs_bf16(bfloat %a) {
 ; GFX7:       ; %bb.0:
 ; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX7-NEXT:    v_mul_f32_e32 v0, 1.0, v0
-; GFX7-NEXT:    v_and_b32_e32 v0, 0x7fff0000, v0
-; GFX7-NEXT:    v_mul_f32_e64 v0, 1.0, |v0|
+; GFX7-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
+; GFX7-NEXT:    v_and_b32_e32 v0, 0x7fffffff, v0
 ; GFX7-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX7-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
 ; GFX7-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
@@ -18889,23 +18889,23 @@ define amdgpu_ps i32 @s_fneg_fabs_bf16(bfloat inreg %a) {
 ; GCN-LABEL: s_fneg_fabs_bf16:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    v_mul_f32_e64 v0, 1.0, s0
-; GCN-NEXT:    v_and_b32_e32 v0, 0x7fff0000, v0
-; GCN-NEXT:    v_mul_f32_e64 v0, 1.0, |v0|
-; GCN-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
-; GCN-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GCN-NEXT:    v_readfirstlane_b32 s0, v0
+; GCN-NEXT:    s_and_b32 s0, s0, 0xffff0000
+; GCN-NEXT:    s_bitset0_b32 s0, 31
+; GCN-NEXT:    s_and_b32 s0, s0, 0xffff0000
+; GCN-NEXT:    s_xor_b32 s0, s0, 0x80000000
+; GCN-NEXT:    s_lshr_b32 s0, s0, 16
 ; GCN-NEXT:    ; return to shader part epilog
 ;
 ; GFX7-LABEL: s_fneg_fabs_bf16:
 ; GFX7:       ; %bb.0:
 ; GFX7-NEXT:    v_mul_f32_e64 v0, 1.0, s0
-; GFX7-NEXT:    v_and_b32_e32 v0, 0x7fff0000, v0
-; GFX7-NEXT:    v_mul_f32_e64 v0, 1.0, |v0|
-; GFX7-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
-; GFX7-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GFX7-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX7-NEXT:    s_and_b32 s0, s0, 0xffff0000
+; GFX7-NEXT:    s_bitset0_b32 s0, 31
+; GFX7-NEXT:    s_and_b32 s0, s0, 0xffff0000
+; GFX7-NEXT:    s_xor_b32 s0, s0, 0x80000000
+; GFX7-NEXT:    s_lshr_b32 s0, s0, 16
 ; GFX7-NEXT:    ; return to shader part epilog
 ;
 ; GFX8-LABEL: s_fneg_fabs_bf16:
diff --git a/llvm/test/CodeGen/AMDGPU/fabs.bf16.ll b/llvm/test/CodeGen/AMDGPU/fabs.bf16.ll
index 1be99af42b8f8..71b1a16c79e69 100644
--- a/llvm/test/CodeGen/AMDGPU/fabs.bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fabs.bf16.ll
@@ -220,7 +220,7 @@ define amdgpu_kernel void @s_fabs_v4bf16(ptr addrspace(1) %out, <4 x bfloat> %in
 ; CI-NEXT:    s_waitcnt lgkmcnt(0)
 ; CI-NEXT:    s_and_b32 s4, s3, 0xffff0000
 ; CI-NEXT:    s_lshl_b32 s3, s3, 16
-; CI-NEXT:    s_and_b32 s5, s2, 0x7fff0000
+; CI-NEXT:    s_and_b32 s5, s2, 0xffff0000
 ; CI-NEXT:    v_mul_f32_e64 v0, 1.0, |s4|
 ; CI-NEXT:    v_mul_f32_e64 v1, 1.0, |s3|
 ; CI-NEXT:    v_mul_f32_e64 v2, 1.0, |s5|
@@ -944,7 +944,7 @@ define amdgpu_kernel void @v_extract_fabs_fold_v2bf16(ptr addrspace(1) %in) #0 {
 ; CI-NEXT:    flat_load_dword v0, v[0:1]
 ; CI-NEXT:    s_waitcnt vmcnt(0)
 ; CI-NEXT:    v_lshlrev_b32_e32 v1, 16, v0
-; CI-NEXT:    v_and_b32_e32 v0, 0x7fff0000, v0
+; CI-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; CI-NEXT:    v_mul_f32_e64 v1, 1.0, |v1|
 ; CI-NEXT:    v_mul_f32_e64 v0, 1.0, |v0|
 ; CI-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
diff --git a/llvm/test/CodeGen/AMDGPU/fcopysign.bf16.ll b/llvm/test/CodeGen/AMDGPU/fcopysign.bf16.ll
index 7c89a41d62fbf..f901626e54a68 100644
--- a/llvm/test/CodeGen/AMDGPU/fcopysign.bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fcopysign.bf16.ll
@@ -4388,12 +4388,11 @@ define <2 x bfloat> @v_copysign_out_v2bf16_mag_v2bf16_sign_v2f32(<2 x bfloat> %m
 ; GFX8-LABEL: v_copysign_out_v2bf16_mag_v2bf16_sign_v2f32:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT:    v_bfe_u32 v4, v1, 16, 1
-; GFX8-NEXT:    v_add_u32_e32 v4, vcc, v4, v1
-; GFX8-NEXT:    v_add_u32_e32 v4, vcc, 0x7fff, v4
-; GFX8-NEXT:    v_or_b32_e32 v3, 0x400000, v1
+; GFX8-NEXT:    v_bfe_u32 v3, v1, 16, 1
+; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v3, v1
+; GFX8-NEXT:    v_add_u32_e32 v3, vcc, 0x7fff, v3
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
-; GFX8-NEXT:    v_cndmask_b32_e32 v1, v4, v3, vcc
+; GFX8-NEXT:    v_cndmask_b32_e32 v1, v3, v1, vcc
 ; GFX8-NEXT:    v_bfe_u32 v3, v2, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v3, v2
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, 0x7fff, v3
@@ -5267,13 +5266,12 @@ define amdgpu_ps i32 @s_copysign_out_v2bf16_mag_v2bf16_sign_v2f32(<2 x bfloat> i
 ;
 ; GFX8-LABEL: s_copysign_out_v2bf16_mag_v2bf16_sign_v2f32:
 ; GFX8:       ; %bb.0:
-; GFX8-NEXT:    s_bfe_u32 s4, s1, 0x10010
-; GFX8-NEXT:    s_add_i32 s4, s4, s1
-; GFX8-NEXT:    s_or_b32 s3, s1, 0x400000
-; GFX8-NEXT:    s_add_i32 s6, s4, 0x7fff
+; GFX8-NEXT:    s_bfe_u32 s3, s1, 0x10010
+; GFX8-NEXT:    s_add_i32 s3, s3, s1
+; GFX8-NEXT:    s_addk_i32 s3, 0x7fff
 ; GFX8-NEXT:    v_cmp_u_f32_e64 s[4:5], s1, s1
 ; GFX8-NEXT:    s_and_b64 s[4:5], s[4:5], exec
-; GFX8-NEXT:    s_cselect_b32 s1, s3, s6
+; GFX8-NEXT:    s_cselect_b32 s1, s1, s3
 ; GFX8-NEXT:    s_bfe_u32 s3, s2, 0x10010
 ; GFX8-NEXT:    s_add_i32 s3, s3, s2
 ; GFX8-NEXT:    s_addk_i32 s3, 0x7fff
@@ -6340,18 +6338,16 @@ define <3 x bfloat> @v_copysign_out_v3bf16_mag_v3bf16_sign_v3f32(<3 x bfloat> %m
 ; GFX8-LABEL: v_copysign_out_v3bf16_mag_v3bf16_sign_v3f32:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT:    v_bfe_u32 v6, v2, 16, 1
-; GFX8-NEXT:    v_add_u32_e32 v6, vcc, v6, v2
-; GFX8-NEXT:    v_add_u32_e32 v6, vcc, 0x7fff, v6
-; GFX8-NEXT:    v_or_b32_e32 v5, 0x400000, v2
-; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
-; GFX8-NEXT:    v_cndmask_b32_e32 v2, v6, v5, vcc
 ; GFX8-NEXT:    v_bfe_u32 v5, v4, 16, 1
-; GFX8-NEXT:    s_movk_i32 s4, 0x7fff
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, v5, v4
-; GFX8-NEXT:    v_add_u32_e32 v5, vcc, s4, v5
+; GFX8-NEXT:    v_add_u32_e32 v5, vcc, 0x7fff, v5
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v4, v4
 ; GFX8-NEXT:    v_cndmask_b32_e32 v4, v5, v4, vcc
+; GFX8-NEXT:    v_bfe_u32 v5, v2, 16, 1
+; GFX8-NEXT:    v_add_u32_e32 v5, vcc, v5, v2
+; GFX8-NEXT:    v_add_u32_e32 v5, vcc, 0x7fff, v5
+; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
+; GFX8-NEXT:    v_cndmask_b32_e32 v2, v5, v2, vcc
 ; GFX8-NEXT:    v_bfe_u32 v5, v3, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, v5, v3
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, 0x7fff, v5
@@ -7687,24 +7683,22 @@ define <4 x bfloat> @v_copysign_out_v4bf16_mag_v4bf16_sign_v4f32(<4 x bfloat> %m
 ; GFX8-LABEL: v_copysign_out_v4bf16_mag_v4bf16_sign_v4f32:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT:    v_bfe_u32 v7, v4, 16, 1
-; GFX8-NEXT:    v_add_u32_e32 v7, vcc, v7, v4
-; GFX8-NEXT:    v_add_u32_e32 v7, vcc, 0x7fff, v7
-; GFX8-NEXT:    v_or_b32_e32 v6, 0x400000, v4
+; GFX8-NEXT:    v_bfe_u32 v6, v4, 16, 1
+; GFX8-NEXT:    v_add_u32_e32 v6, vcc, v6, v4
+; GFX8-NEXT:    v_add_u32_e32 v6, vcc, 0x7fff, v6
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v4, v4
-; GFX8-NEXT:    v_cndmask_b32_e32 v4, v7, v6, vcc
-; GFX8-NEXT:    v_bfe_u32 v7, v2, 16, 1
-; GFX8-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX8-NEXT:    v_add_u32_e32 v7, vcc, v7, v2
-; GFX8-NEXT:    v_add_u32_e32 v7, vcc, s4, v7
-; GFX8-NEXT:    v_or_b32_e32 v6, 0x400000, v2
-; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
-; GFX8-NEXT:    v_cndmask_b32_e32 v2, v7, v6, vcc
+; GFX8-NEXT:    v_cndmask_b32_e32 v4, v6, v4, vcc
 ; GFX8-NEXT:    v_bfe_u32 v6, v5, 16, 1
+; GFX8-NEXT:    s_movk_i32 s4, 0x7fff
 ; GFX8-NEXT:    v_add_u32_e32 v6, vcc, v6, v5
 ; GFX8-NEXT:    v_add_u32_e32 v6, vcc, s4, v6
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v5, v5
 ; GFX8-NEXT:    v_cndmask_b32_e32 v5, v6, v5, vcc
+; GFX8-NEXT:    v_bfe_u32 v6, v2, 16, 1
+; GFX8-NEXT:    v_add_u32_e32 v6, vcc, v6, v2
+; GFX8-NEXT:    v_add_u32_e32 v6, vcc, s4, v6
+; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
+; GFX8-NEXT:    v_cndmask_b32_e32 v2, v6, v2, vcc
 ; GFX8-NEXT:    v_bfe_u32 v6, v3, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v6, vcc, v6, v3
 ; GFX8-NEXT:    v_add_u32_e32 v6, vcc, 0x7fff, v6
diff --git a/llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll b/llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll
index e74d5ba24079d..a3ec35da29f67 100644
--- a/llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll
@@ -3227,40 +3227,38 @@ define <2 x half> @v_copysign_out_v2f16_mag_v2f64_sign_v2f16(<2 x double> %mag,
 ; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
 ; VI-NEXT:    v_and_b32_e32 v3, 0xffe, v3
 ; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; VI-NEXT:    v_bfe_u32 v5, v1, 20, 11
+; VI-NEXT:    v_bfe_u32 v1, v1, 20, 11
 ; VI-NEXT:    v_or_b32_e32 v0, v3, v0
-; VI-NEXT:    v_sub_u32_e32 v8, vcc, s4, v5
+; VI-NEXT:    v_sub_u32_e32 v5, vcc, s4, v1
 ; VI-NEXT:    v_or_b32_e32 v3, 0x1000, v0
-; VI-NEXT:    v_med3_i32 v8, v8, 0, 13
-; VI-NEXT:    v_lshrrev_b32_e32 v9, v8, v3
-; VI-NEXT:    v_lshlrev_b32_e32 v8, v8, v9
-; VI-NEXT:    v_cmp_ne_u32_e32 vcc, v8, v3
+; VI-NEXT:    v_med3_i32 v5, v5, 0, 13
+; VI-NEXT:    v_lshrrev_b32_e32 v8, v5, v3
+; VI-NEXT:    v_lshlrev_b32_e32 v5, v5, v8
+; VI-NEXT:    v_cmp_ne_u32_e32 vcc, v5, v3
 ; VI-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
-; VI-NEXT:    v_add_u32_e32 v5, vcc, s5, v5
-; VI-NEXT:    v_lshlrev_b32_e32 v8, 12, v5
-; VI-NEXT:    v_or_b32_e32 v3, v9, v3
-; VI-NEXT:    v_or_b32_e32 v8, v0, v8
-; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 1, v5
-; VI-NEXT:    v_cndmask_b32_e32 v3, v8, v3, vcc
-; VI-NEXT:    v_and_b32_e32 v8, 7, v3
-; VI-NEXT:    v_cmp_lt_i32_e32 vcc, 5, v8
-; VI-NEXT:    v_cndmask_b32_e64 v9, 0, 1, vcc
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v8
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s5, v1
+; VI-NEXT:    v_lshlrev_b32_e32 v5, 12, v1
+; VI-NEXT:    v_or_b32_e32 v3, v8, v3
+; VI-NEXT:    v_or_b32_e32 v5, v0, v5
+; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 1, v1
+; VI-NEXT:    v_cndmask_b32_e32 v3, v5, v3, vcc
+; VI-NEXT:    v_and_b32_e32 v5, 7, v3
+; VI-NEXT:    v_cmp_lt_i32_e32 vcc, 5, v5
 ; VI-NEXT:    v_cndmask_b32_e64 v8, 0, 1, vcc
-; VI-NEXT:    v_or_b32_e32 v8, v8, v9
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v5
+; VI-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
+; VI-NEXT:    v_or_b32_e32 v5, v5, v8
 ; VI-NEXT:    v_lshrrev_b32_e32 v3, 2, v3
-; VI-NEXT:    v_add_u32_e32 v3, vcc, v3, v8
-; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 31, v5
+; VI-NEXT:    v_add_u32_e32 v3, vcc, v3, v5
+; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 31, v1
 ; VI-NEXT:    v_cndmask_b32_e32 v3, v6, v3, vcc
 ; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
 ; VI-NEXT:    v_cndmask_b32_e32 v0, v6, v7, vcc
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v5
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v1
 ; VI-NEXT:    v_cndmask_b32_e32 v0, v3, v0, vcc
-; VI-NEXT:    v_mov_b32_e32 v3, 0x8000
-; VI-NEXT:    v_and_b32_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
 ; VI-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
-; VI-NEXT:    v_or_b32_e32 v0, v1, v0
-; VI-NEXT:    v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT:    v_and_b32_e32 v0, 0x7fff, v0
+; VI-NEXT:    v_or_b32_e32 v0, v0, v2
 ; VI-NEXT:    s_mov_b32 s4, 0x7fff7fff
 ; VI-NEXT:    v_bfi_b32 v0, s4, v0, v4
 ; VI-NEXT:    s_setpc_b64 s[30:31]
@@ -4050,41 +4048,38 @@ define amdgpu_ps i32 @s_copysign_out_v2f16_mag_v2f64_sign_v2f16(<2 x double> inr
 ; VI-NEXT:    s_cmp_lg_u32 s0, 0
 ; VI-NEXT:    s_cselect_b64 s[2:3], -1, 0
 ; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[2:3]
+; VI-NEXT:    s_bfe_u32 s1, s1, 0xb0014
 ; VI-NEXT:    v_readfirstlane_b32 s0, v0
-; VI-NEXT:    s_bfe_u32 s3, s1, 0xb0014
+; VI-NEXT:    s_sub_i32 s3, 0x3f1, s1
 ; VI-NEXT:    s_or_b32 s0, s7, s0
-; VI-NEXT:    s_sub_i32 s7, 0x3f1, s3
-; VI-NEXT:    v_med3_i32 v0, s7, 0, 13
+; VI-NEXT:    v_med3_i32 v0, s3, 0, 13
 ; VI-NEXT:    s_or_b32 s2, s0, 0x1000
-; VI-NEXT:    v_readfirstlane_b32 s7, v0
-; VI-NEXT:    s_lshr_b32 s8, s2, s7
-; VI-NEXT:    s_lshl_b32 s7, s8, s7
-; VI-NEXT:    s_cmp_lg_u32 s7, s2
+; VI-NEXT:    v_readfirstlane_b32 s3, v0
+; VI-NEXT:    s_lshr_b32 s7, s2, s3
+; VI-NEXT:    s_lshl_b32 s3, s7, s3
+; VI-NEXT:    s_cmp_lg_u32 s3, s2
 ; VI-NEXT:    s_cselect_b32 s2, 1, 0
-; VI-NEXT:    s_addk_i32 s3, 0xfc10
-; VI-NEXT:    s_lshl_b32 s7, s3, 12
-; VI-NEXT:    s_or_b32 s2, s8, s2
-; VI-NEXT:    s_or_b32 s7, s0, s7
-; VI-NEXT:    s_cmp_lt_i32 s3, 1
-; VI-NEXT:    s_cselect_b32 s2, s2, s7
-; VI-NEXT:    s_and_b32 s7, s2, 7
-; VI-NEXT:    s_cmp_gt_i32 s7, 5
-; VI-NEXT:    s_cselect_b32 s8, 1, 0
-; VI-NEXT:    s_cmp_eq_u32 s7, 3
+; VI-NEXT:    s_addk_i32 s1, 0xfc10
+; VI-NEXT:    s_lshl_b32 s3, s1, 12
+; VI-NEXT:    s_or_b32 s2, s7, s2
+; VI-NEXT:    s_or_b32 s3, s0, s3
+; VI-NEXT:    s_cmp_lt_i32 s1, 1
+; VI-NEXT:    s_cselect_b32 s2, s2, s3
+; VI-NEXT:    s_and_b32 s3, s2, 7
+; VI-NEXT:    s_cmp_gt_i32 s3, 5
 ; VI-NEXT:    s_cselect_b32 s7, 1, 0
-; VI-NEXT:    s_or_b32 s7, s7, s8
+; VI-NEXT:    s_cmp_eq_u32 s3, 3
+; VI-NEXT:    s_cselect_b32 s3, 1, 0
+; VI-NEXT:    s_or_b32 s3, s3, s7
 ; VI-NEXT:    s_lshr_b32 s2, s2, 2
-; VI-NEXT:    s_add_i32 s2, s2, s7
-; VI-NEXT:    s_cmp_lt_i32 s3, 31
+; VI-NEXT:    s_add_i32 s2, s2, s3
+; VI-NEXT:    s_cmp_lt_i32 s1, 31
 ; VI-NEXT:    s_cselect_b32 s2, s2, 0x7c00
 ; VI-NEXT:    s_cmp_lg_u32 s0, 0
 ; VI-NEXT:    s_cselect_b32 s0, s6, 0x7c00
-; VI-NEXT:    s_cmpk_eq_i32 s3, 0x40f
+; VI-NEXT:    s_cmpk_eq_i32 s1, 0x40f
 ; VI-NEXT:    s_cselect_b32 s0, s0, s2
-; VI-NEXT:    s_lshr_b32 s1, s1, 16
-; VI-NEXT:    s_and_b32 s1, s1, 0x8000
-; VI-NEXT:    s_or_b32 s0, s1, s0
-; VI-NEXT:    s_and_b32 s0, s0, 0xffff
+; VI-NEXT:    s_and_b32 s0, s0, 0x7fff
 ; VI-NEXT:    s_or_b32 s0, s0, s5
 ; VI-NEXT:    s_mov_b32 s1, 0x7fff7fff
 ; VI-NEXT:    v_mov_b32_e32 v0, s0
@@ -4918,40 +4913,37 @@ define <3 x half> @v_copysign_out_v3f16_mag_v3f64_sign_v3f16(<3 x double> %mag,
 ; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
 ; VI-NEXT:    v_and_b32_e32 v5, 0xffe, v5
 ; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; VI-NEXT:    v_bfe_u32 v8, v1, 20, 11
+; VI-NEXT:    v_bfe_u32 v1, v1, 20, 11
 ; VI-NEXT:    v_or_b32_e32 v0, v5, v0
-; VI-NEXT:    v_sub_u32_e32 v11, vcc, s4, v8
+; VI-NEXT:    v_sub_u32_e32 v8, vcc, s4, v1
 ; VI-NEXT:    v_or_b32_e32 v5, 0x1000, v0
-; VI-NEXT:    v_med3_i32 v11, v11, 0, 13
-; VI-NEXT:    v_lshrrev_b32_e32 v12, v11, v5
-; VI-NEXT:    v_lshlrev_b32_e32 v11, v11, v12
-; VI-NEXT:    v_cmp_ne_u32_e32 vcc, v11, v5
+; VI-NEXT:    v_med3_i32 v8, v8, 0, 13
+; VI-NEXT:    v_lshrrev_b32_e32 v11, v8, v5
+; VI-NEXT:    v_lshlrev_b32_e32 v8, v8, v11
+; VI-NEXT:    v_cmp_ne_u32_e32 vcc, v8, v5
 ; VI-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
-; VI-NEXT:    v_add_u32_e32 v8, vcc, s5, v8
-; VI-NEXT:    v_lshlrev_b32_e32 v11, 12, v8
-; VI-NEXT:    v_or_b32_e32 v5, v12, v5
-; VI-NEXT:    v_or_b32_e32 v11, v0, v11
-; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 1, v8
-; VI-NEXT:    v_cndmask_b32_e32 v5, v11, v5, vcc
-; VI-NEXT:    v_and_b32_e32 v11, 7, v5
-; VI-NEXT:    v_cmp_lt_i32_e32 vcc, 5, v11
-; VI-NEXT:    v_cndmask_b32_e64 v12, 0, 1, vcc
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v11
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s5, v1
+; VI-NEXT:    v_lshlrev_b32_e32 v8, 12, v1
+; VI-NEXT:    v_or_b32_e32 v5, v11, v5
+; VI-NEXT:    v_or_b32_e32 v8, v0, v8
+; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 1, v1
+; VI-NEXT:    v_cndmask_b32_e32 v5, v8, v5, vcc
+; VI-NEXT:    v_and_b32_e32 v8, 7, v5
+; VI-NEXT:    v_cmp_lt_i32_e32 vcc, 5, v8
 ; VI-NEXT:    v_cndmask_b32_e64 v11, 0, 1, vcc
-; VI-NEXT:    v_or_b32_e32 v11, v11, v12
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v8
+; VI-NEXT:    v_cndmask_b32_e64 v8, 0, 1, vcc
+; VI-NEXT:    v_or_b32_e32 v8, v8, v11
 ; VI-NEXT:    v_lshrrev_b32_e32 v5, 2, v5
-; VI-NEXT:    v_add_u32_e32 v5, vcc, v5, v11
-; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 31, v8
+; VI-NEXT:    v_add_u32_e32 v5, vcc, v5, v8
+; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 31, v1
 ; VI-NEXT:    v_cndmask_b32_e32 v5, v9, v5, vcc
 ; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
 ; VI-NEXT:    v_cndmask_b32_e32 v0, v9, v10, vcc
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v8
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v1
 ; VI-NEXT:    v_cndmask_b32_e32 v0, v5, v0, vcc
-; VI-NEXT:    v_mov_b32_e32 v5, 0x8000
-; VI-NEXT:    v_and_b32_sdwa v1, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
 ; VI-NEXT:    v_and_b32_e32 v5, 0x1ff, v3
 ; VI-NEXT:    v_or_b32_e32 v2, v5, v2
-; VI-NEXT:    v_or_b32_e32 v0, v1, v0
 ; VI-NEXT:    v_lshrrev_b32_e32 v1, 8, v3
 ; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
 ; VI-NEXT:    v_and_b32_e32 v1, 0xffe, v1
@@ -4986,7 +4978,8 @@ define <3 x half> @v_copysign_out_v3f16_mag_v3f64_sign_v3f16(<3 x double> %mag,
 ; VI-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v3
 ; VI-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
 ; VI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; VI-NEXT:    v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT:    v_and_b32_e32 v0, 0x7fff, v0
+; VI-NEXT:    v_or_b32_e32 v0, v0, v1
 ; VI-NEXT:    s_mov_b32 s4, 0x7fff7fff
 ; VI-NEXT:    v_bfi_b32 v0, s4, v0, v6
 ; VI-NEXT:    v_bfi_b32 v1, s4, v4, v7
@@ -6061,76 +6054,73 @@ define <4 x half> @v_copysign_out_v4f16_mag_v4f64_sign_v4f16(<4 x double> %mag,
 ; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v4
 ; VI-NEXT:    v_and_b32_e32 v10, 0xffe, v10
 ; VI-NEXT:    v_cndmask_b32_e64 v4, 0, 1, vcc
-; VI-NEXT:    v_bfe_u32 v11, v5, 20, 11
+; VI-NEXT:    v_bfe_u32 v5, v5, 20, 11
 ; VI-NEXT:    s_movk_i32 s4, 0x3f1
 ; VI-NEXT:    v_or_b32_e32 v4, v10, v4
-; VI-NEXT:    v_sub_u32_e32 v12, vcc, s4, v11
+; VI-NEXT:    v_sub_u32_e32 v11, vcc, s4, v5
 ; VI-NEXT:    v_or_b32_e32 v10, 0x1000, v4
-; VI-NEXT:    v_med3_i32 v12, v12, 0, 13
-; VI-NEXT:    v_lshrrev_b32_e32 v13, v12, v10
-; VI-NEXT:    v_lshlrev_b32_e32 v12, v12, v13
-; VI-NEXT:    v_cmp_ne_u32_e32 vcc, v12, v10
+; VI-NEXT:    v_med3_i32 v11, v11, 0, 13
+; VI-NEXT:    v_lshrrev_b32_e32 v12, v11, v10
+; VI-NEXT:    v_lshlrev_b32_e32 v11, v11, v12
+; VI-NEXT:    v_cmp_ne_u32_e32 vcc, v11, v10
 ; VI-NEXT:    s_movk_i32 s5, 0xfc10
 ; VI-NEXT:    v_cndmask_b32_e64 v10, 0, 1, vcc
-; VI-NEXT:    v_add_u32_e32 v11, vcc, s5, v11
-; VI-NEXT:    v_lshlrev_b32_e32 v12, 12, v11
-; VI-NEXT:    v_or_b32_e32 v10, v13, v10
-; VI-NEXT:    v_or_b32_e32 v12, v4, v12
-; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 1, v11
-; VI-NEXT:    v_cndmask_b32_e32 v10, v12, v10, vcc
-; VI-NEXT:    v_and_b32_e32 v12, 7, v10
-; VI-NEXT:    v_cmp_lt_i32_e32 vcc, 5, v12
-; VI-NEXT:    v_cndmask_b32_e64 v13, 0, 1, vcc
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v12
+; VI-NEXT:    v_add_u32_e32 v5, vcc, s5, v5
+; VI-NEXT:    v_lshlrev_b32_e32 v11, 12, v5
+; VI-NEXT:    v_or_b32_e32 v10, v12, v10
+; VI-NEXT:    v_or_b32_e32 v11, v4, v11
+; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 1, v5
+; VI-NEXT:    v_cndmask_b32_e32 v10, v11, v10, vcc
+; VI-NEXT:    v_and_b32_e32 v11, 7, v10
+; VI-NEXT:    v_cmp_lt_i32_e32 vcc, 5, v11
 ; VI-NEXT:    v_cndmask_b32_e64 v12, 0, 1, vcc
-; VI-NEXT:    v_or_b32_e32 v12, v12, v13
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v11
+; VI-NEXT:    v_cndmask_b32_e64 v11, 0, 1, vcc
+; VI-NEXT:    v_or_b32_e32 v11, v11, v12
 ; VI-NEXT:    v_lshrrev_b32_e32 v10, 2, v10
-; VI-NEXT:    v_add_u32_e32 v10, vcc, v10, v12
-; VI-NEXT:    v_mov_b32_e32 v12, 0x7c00
-; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 31, v11
-; VI-NEXT:    v_cndmask_b32_e32 v10, v12, v10, vcc
-; VI-NEXT:    v_mov_b32_e32 v13, 0x7e00
+; VI-NEXT:    v_add_u32_e32 v10, vcc, v10, v11
+; VI-NEXT:    v_mov_b32_e32 v11, 0x7c00
+; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 31, v5
+; VI-NEXT:    v_cndmask_b32_e32 v10, v11, v10, vcc
+; VI-NEXT:    v_mov_b32_e32 v12, 0x7e00
 ; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v4
 ; VI-NEXT:    s_movk_i32 s6, 0x40f
-; VI-NEXT:    v_cndmask_b32_e32 v4, v12, v13, vcc
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v11
+; VI-NEXT:    v_cndmask_b32_e32 v4, v11, v12, vcc
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v5
 ; VI-NEXT:    v_cndmask_b32_e32 v4, v10, v4, vcc
-; VI-NEXT:    v_mov_b32_e32 v10, 0x8000
-; VI-NEXT:    v_and_b32_e32 v11, 0x1ff, v7
-; VI-NEXT:    v_and_b32_sdwa v5, v5, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; VI-NEXT:    v_or_b32_e32 v6, v11, v6
-; VI-NEXT:    v_or_b32_e32 v4, v5, v4
+; VI-NEXT:    v_and_b32_e32 v10, 0x1ff, v7
+; VI-NEXT:    v_or_b32_e32 v6, v10, v6
 ; VI-NEXT:    v_lshrrev_b32_e32 v5, 8, v7
 ; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v6
 ; VI-NEXT:    v_and_b32_e32 v5, 0xffe, v5
 ; VI-NEXT:    v_cndmask_b32_e64 v6, 0, 1, vcc
 ; VI-NEXT:    v_bfe_u32 v7, v7, 20, 11
 ; VI-NEXT:    v_or_b32_e32 v5, v5, v6
-; VI-NEXT:    v_sub_u32_e32 v11, vcc, s4, v7
+; VI-NEXT:    v_sub_u32_e32 v10, vcc, s4, v7
 ; VI-NEXT:    v_or_b32_e32 v6, 0x1000, v5
-; VI-NEXT:    v_med3_i32 v11, v11, 0, 13
-; VI-NEXT:    v_lshrrev_b32_e32 v14, v11, v6
-; VI-NEXT:    v_lshlrev_b32_e32 v11, v11, v14
-; VI-NEXT:    v_cmp_ne_u32_e32 vcc, v11, v6
+; VI-NEXT:    v_med3_i32 v10, v10, 0, 13
+; VI-NEXT:    v_lshrrev_b32_e32 v13, v10, v6
+; VI-NEXT:    v_lshlrev_b32_e32 v10, v10, v13
+; VI-NEXT:    v_cmp_ne_u32_e32 vcc, v10, v6
 ; VI-NEXT:    v_cndmask_b32_e64 v6, 0, 1, vcc
 ; VI-NEXT:    v_add_u32_e32 v7, vcc, s5, v7
-; VI-NEXT:    v_lshlrev_b32_e32 v11, 12, v7
-; VI-NEXT:    v_or_b32_e32 v6, v14, v6
-; VI-NEXT:    v_or_b32_e32 v11, v5, v11
+; VI-NEXT:    v_lshlrev_b32_e32 v10, 12, v7
+; VI-NEXT:    v_or_b32_e32 v6, v13, v6
+; VI-NEXT:    v_or_b32_e32 v10, v5, v10
 ; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 1, v7
-; VI-NEXT:    v_cndmask_b32_e32 v6, v11, v6, vcc
-; VI-NEXT:    v_and_b32_e32 v11, 7, v6
-; VI-NEXT:    v_cmp_lt_i32_e32 vcc, 5, v11
-; VI-NEXT:    v_cndmask_b32_e64 v14, 0, 1, vcc
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v11
-; VI-NEXT:    v_cndmask_b32_e64 v11, 0, 1, vcc
-; VI-NEXT:    v_or_b32_e32 v11, v11, v14
+; VI-NEXT:    v_cndmask_b32_e32 v6, v10, v6, vcc
+; VI-NEXT:    v_and_b32_e32 v10, 7, v6
+; VI-NEXT:    v_cmp_lt_i32_e32 vcc, 5, v10
+; VI-NEXT:    v_cndmask_b32_e64 v13, 0, 1, vcc
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v10
+; VI-NEXT:    v_cndmask_b32_e64 v10, 0, 1, vcc
+; VI-NEXT:    v_or_b32_e32 v10, v10, v13
 ; VI-NEXT:    v_lshrrev_b32_e32 v6, 2, v6
-; VI-NEXT:    v_add_u32_e32 v6, vcc, v6, v11
+; VI-NEXT:    v_add_u32_e32 v6, vcc, v6, v10
 ; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 31, v7
-; VI-NEXT:    v_cndmask_b32_e32 v6, v12, v6, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v6, v11, v6, vcc
 ; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v5
-; VI-NEXT:    v_cndmask_b32_e32 v5, v12, v13, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v5, v11, v12, vcc
 ; VI-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v7
 ; VI-NEXT:    v_and_b32_e32 v7, 0x1ff, v1
 ; VI-NEXT:    v_or_b32_e32 v0, v7, v0
@@ -6139,39 +6129,37 @@ define <4 x half> @v_copysign_out_v4f16_mag_v4f64_sign_v4f16(<4 x double> %mag,
 ; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
 ; VI-NEXT:    v_and_b32_e32 v6, 0xffe, v6
 ; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; VI-NEXT:    v_bfe_u32 v7, v1, 20, 11
+; VI-NEXT:    v_bfe_u32 v1, v1, 20, 11
 ; VI-NEXT:    v_or_b32_e32 v0, v6, v0
-; VI-NEXT:    v_sub_u32_e32 v11, vcc, s4, v7
+; VI-NEXT:    v_sub_u32_e32 v7, vcc, s4, v1
 ; VI-NEXT:    v_or_b32_e32 v6, 0x1000, v0
-; VI-NEXT:    v_med3_i32 v11, v11, 0, 13
-; VI-NEXT:    v_lshrrev_b32_e32 v14, v11, v6
-; VI-NEXT:    v_lshlrev_b32_e32 v11, v11, v14
-; VI-NEXT:    v_cmp_ne_u32_e32 vcc, v11, v6
+; VI-NEXT:    v_med3_i32 v7, v7, 0, 13
+; VI-NEXT:    v_lshrrev_b32_e32 v10, v7, v6
+; VI-NEXT:    v_lshlrev_b32_e32 v7, v7, v10
+; VI-NEXT:    v_cmp_ne_u32_e32 vcc, v7, v6
 ; VI-NEXT:    v_cndmask_b32_e64 v6, 0, 1, vcc
-; VI-NEXT:    v_add_u32_e32 v7, vcc, s5, v7
-; VI-NEXT:    v_lshlrev_b32_e32 v11, 12, v7
-; VI-NEXT:    v_or_b32_e32 v6, v14, v6
-; VI-NEXT:    v_or_b32_e32 v11, v0, v11
-; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 1, v7
-; VI-NEXT:    v_cndmask_b32_e32 v6, v11, v6, vcc
-; VI-NEXT:    v_and_b32_e32 v11, 7, v6
-; VI-NEXT:    v_cmp_lt_i32_e32 vcc, 5, v11
-; VI-NEXT:    v_cndmask_b32_e64 v14, 0, 1, vcc
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v11
-; VI-NEXT:    v_cndmask_b32_e64 v11, 0, 1, vcc
-; VI-NEXT:    v_or_b32_e32 v11, v11, v14
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s5, v1
+; VI-NEXT:    v_lshlrev_b32_e32 v7, 12, v1
+; VI-NEXT:    v_or_b32_e32 v6, v10, v6
+; VI-NEXT:    v_or_b32_e32 v7, v0, v7
+; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 1, v1
+; VI-NEXT:    v_cndmask_b32_e32 v6, v7, v6, vcc
+; VI-NEXT:    v_and_b32_e32 v7, 7, v6
+; VI-NEXT:    v_cmp_lt_i32_e32 vcc, 5, v7
+; VI-NEXT:    v_cndmask_b32_e64 v10, 0, 1, vcc
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v7
+; VI-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
+; VI-NEXT:    v_or_b32_e32 v7, v7, v10
 ; VI-NEXT:    v_lshrrev_b32_e32 v6, 2, v6
-; VI-NEXT:    v_add_u32_e32 v6, vcc, v6, v11
-; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 31, v7
-; VI-NEXT:    v_cndmask_b32_e32 v6, v12, v6, vcc
+; VI-NEXT:    v_add_u32_e32 v6, vcc, v6, v7
+; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 31, v1
+; VI-NEXT:    v_cndmask_b32_e32 v6, v11, v6, vcc
 ; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
-; VI-NEXT:    v_cndmask_b32_e32 v0, v12, v13, vcc
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v7
+; VI-NEXT:    v_cndmask_b32_e32 v0, v11, v12, vcc
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v1
 ; VI-NEXT:    v_cndmask_b32_e32 v0, v6, v0, vcc
 ; VI-NEXT:    v_and_b32_e32 v6, 0x1ff, v3
-; VI-NEXT:    v_and_b32_sdwa v1, v1, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
 ; VI-NEXT:    v_or_b32_e32 v2, v6, v2
-; VI-NEXT:    v_or_b32_e32 v0, v1, v0
 ; VI-NEXT:    v_lshrrev_b32_e32 v1, 8, v3
 ; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
 ; VI-NEXT:    v_and_b32_e32 v1, 0xffe, v1
@@ -6200,16 +6188,18 @@ define <4 x half> @v_copysign_out_v4f16_mag_v4f64_sign_v4f16(<4 x double> %mag,
 ; VI-NEXT:    v_lshrrev_b32_e32 v2, 2, v2
 ; VI-NEXT:    v_add_u32_e32 v2, vcc, v2, v6
 ; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 31, v3
-; VI-NEXT:    v_cndmask_b32_e32 v2, v12, v2, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v2, v11, v2, vcc
 ; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v1
-; VI-NEXT:    v_cndmask_b32_e32 v1, v12, v13, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v1, v11, v12, vcc
 ; VI-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v3
 ; VI-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
 ; VI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; VI-NEXT:    v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT:    v_and_b32_e32 v0, 0x7fff, v0
+; VI-NEXT:    v_or_b32_e32 v0, v0, v1
 ; VI-NEXT:    v_lshlrev_b32_e32 v1, 16, v5
+; VI-NEXT:    v_and_b32_e32 v2, 0x7fff, v4
 ; VI-NEXT:    s_mov_b32 s4, 0x7fff7fff
-; VI-NEXT:    v_or_b32_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT:    v_or_b32_e32 v1, v2, v1
 ; VI-NEXT:    v_bfi_b32 v0, s4, v0, v8
 ; VI-NEXT:    v_bfi_b32 v1, s4, v1, v9
 ; VI-NEXT:    s_setpc_b64 s[30:31]
diff --git a/llvm/test/CodeGen/AMDGPU/fnearbyint.ll b/llvm/test/CodeGen/AMDGPU/fnearbyint.ll
index 9f9b14d1c87a0..e9fd6119d0c36 100644
--- a/llvm/test/CodeGen/AMDGPU/fnearbyint.ll
+++ b/llvm/test/CodeGen/AMDGPU/fnearbyint.ll
@@ -211,22 +211,22 @@ define amdgpu_kernel void @nearbyint_f64(ptr addrspace(1) %out, double %in) {
 ; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_mov_b32 s6, -1
-; SI-NEXT:    v_mov_b32_e32 v0, -1
-; SI-NEXT:    v_mov_b32_e32 v1, 0x432fffff
-; SI-NEXT:    v_mov_b32_e32 v2, 0
-; SI-NEXT:    v_mov_b32_e32 v3, 0x43300000
-; SI-NEXT:    s_mov_b32 s8, 0
-; SI-NEXT:    s_mov_b32 s9, 0xc3300000
+; SI-NEXT:    s_brev_b32 s8, -2
+; SI-NEXT:    v_mov_b32_e32 v1, 0x43300000
+; SI-NEXT:    v_mov_b32_e32 v0, 0
+; SI-NEXT:    v_mov_b32_e32 v2, -1
+; SI-NEXT:    v_mov_b32_e32 v3, 0x432fffff
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    s_mov_b32 s4, s0
 ; SI-NEXT:    s_mov_b32 s5, s1
-; SI-NEXT:    v_add_f64 v[2:3], s[2:3], v[2:3]
-; SI-NEXT:    v_mov_b32_e32 v4, s3
-; SI-NEXT:    v_mov_b32_e32 v5, s2
-; SI-NEXT:    v_add_f64 v[2:3], v[2:3], s[8:9]
-; SI-NEXT:    v_cmp_gt_f64_e64 vcc, |s[2:3]|, v[0:1]
-; SI-NEXT:    v_cndmask_b32_e32 v1, v3, v4, vcc
-; SI-NEXT:    v_cndmask_b32_e32 v0, v2, v5, vcc
+; SI-NEXT:    v_mov_b32_e32 v6, s3
+; SI-NEXT:    v_bfi_b32 v1, s8, v1, v6
+; SI-NEXT:    v_mov_b32_e32 v7, s2
+; SI-NEXT:    v_add_f64 v[4:5], s[2:3], v[0:1]
+; SI-NEXT:    v_add_f64 v[0:1], v[4:5], -v[0:1]
+; SI-NEXT:    v_cmp_gt_f64_e64 vcc, |s[2:3]|, v[2:3]
+; SI-NEXT:    v_cndmask_b32_e32 v1, v1, v6, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v7, vcc
 ; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
 ;
@@ -270,31 +270,30 @@ define amdgpu_kernel void @nearbyint_v2f64(ptr addrspace(1) %out, <2 x double> %
 ; SI-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0xd
 ; SI-NEXT:    s_mov_b32 s3, 0xf000
 ; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_brev_b32 s10, -2
+; SI-NEXT:    v_mov_b32_e32 v6, 0x43300000
 ; SI-NEXT:    s_mov_b32 s9, 0x432fffff
-; SI-NEXT:    s_mov_b32 s10, 0
-; SI-NEXT:    s_mov_b32 s11, 0x43300000
-; SI-NEXT:    s_mov_b32 s12, 0
-; SI-NEXT:    s_mov_b32 s13, 0xc3300000
+; SI-NEXT:    v_mov_b32_e32 v0, 0
 ; SI-NEXT:    s_mov_b32 s8, s2
-; SI-NEXT:    v_mov_b32_e32 v0, s8
-; SI-NEXT:    v_mov_b32_e32 v1, s9
-; SI-NEXT:    v_mov_b32_e32 v2, s10
-; SI-NEXT:    v_mov_b32_e32 v3, s11
+; SI-NEXT:    v_mov_b32_e32 v4, s8
+; SI-NEXT:    v_mov_b32_e32 v5, s9
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_add_f64 v[4:5], s[6:7], v[2:3]
-; SI-NEXT:    v_mov_b32_e32 v8, s7
-; SI-NEXT:    v_mov_b32_e32 v9, s6
-; SI-NEXT:    v_add_f64 v[2:3], s[4:5], v[2:3]
-; SI-NEXT:    v_mov_b32_e32 v10, s5
-; SI-NEXT:    v_mov_b32_e32 v11, s4
-; SI-NEXT:    v_add_f64 v[4:5], v[4:5], s[12:13]
-; SI-NEXT:    v_add_f64 v[6:7], v[2:3], s[12:13]
-; SI-NEXT:    v_cmp_gt_f64_e64 vcc, |s[6:7]|, v[0:1]
-; SI-NEXT:    v_cndmask_b32_e32 v3, v5, v8, vcc
-; SI-NEXT:    v_cndmask_b32_e32 v2, v4, v9, vcc
-; SI-NEXT:    v_cmp_gt_f64_e64 vcc, |s[4:5]|, v[0:1]
-; SI-NEXT:    v_cndmask_b32_e32 v1, v7, v10, vcc
-; SI-NEXT:    v_cndmask_b32_e32 v0, v6, v11, vcc
+; SI-NEXT:    v_mov_b32_e32 v7, s7
+; SI-NEXT:    v_bfi_b32 v1, s10, v6, v7
+; SI-NEXT:    v_mov_b32_e32 v8, s6
+; SI-NEXT:    v_mov_b32_e32 v9, s5
+; SI-NEXT:    v_mov_b32_e32 v10, s4
+; SI-NEXT:    v_add_f64 v[2:3], s[6:7], v[0:1]
+; SI-NEXT:    v_add_f64 v[2:3], v[2:3], -v[0:1]
+; SI-NEXT:    v_bfi_b32 v1, s10, v6, v9
+; SI-NEXT:    v_cmp_gt_f64_e64 vcc, |s[6:7]|, v[4:5]
+; SI-NEXT:    v_cndmask_b32_e32 v3, v3, v7, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v2, v2, v8, vcc
+; SI-NEXT:    v_add_f64 v[6:7], s[4:5], v[0:1]
+; SI-NEXT:    v_add_f64 v[0:1], v[6:7], -v[0:1]
+; SI-NEXT:    v_cmp_gt_f64_e64 vcc, |s[4:5]|, v[4:5]
+; SI-NEXT:    v_cndmask_b32_e32 v1, v1, v9, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v10, vcc
 ; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; SI-NEXT:    s_endpgm
 ;
@@ -348,45 +347,46 @@ define amdgpu_kernel void @nearbyint_v4f64(ptr addrspace(1) %out, <4 x double> %
 ; SI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x11
 ; SI-NEXT:    s_mov_b32 s11, 0xf000
 ; SI-NEXT:    s_mov_b32 s10, -1
+; SI-NEXT:    s_brev_b32 s14, -2
+; SI-NEXT:    v_mov_b32_e32 v10, 0x43300000
 ; SI-NEXT:    s_mov_b32 s13, 0x432fffff
-; SI-NEXT:    s_mov_b32 s14, 0
-; SI-NEXT:    s_mov_b32 s15, 0x43300000
-; SI-NEXT:    s_mov_b32 s16, 0
-; SI-NEXT:    s_mov_b32 s17, 0xc3300000
+; SI-NEXT:    v_mov_b32_e32 v4, 0
 ; SI-NEXT:    s_mov_b32 s12, s10
-; SI-NEXT:    v_mov_b32_e32 v4, s12
-; SI-NEXT:    v_mov_b32_e32 v5, s13
-; SI-NEXT:    v_mov_b32_e32 v0, s14
-; SI-NEXT:    v_mov_b32_e32 v1, s15
+; SI-NEXT:    v_mov_b32_e32 v8, s12
+; SI-NEXT:    v_mov_b32_e32 v9, s13
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_add_f64 v[2:3], s[2:3], v[0:1]
-; SI-NEXT:    v_mov_b32_e32 v10, s3
-; SI-NEXT:    v_mov_b32_e32 v11, s2
-; SI-NEXT:    v_add_f64 v[6:7], s[0:1], v[0:1]
-; SI-NEXT:    v_mov_b32_e32 v12, s1
-; SI-NEXT:    v_mov_b32_e32 v13, s0
-; SI-NEXT:    v_add_f64 v[8:9], s[6:7], v[0:1]
-; SI-NEXT:    v_mov_b32_e32 v14, s7
-; SI-NEXT:    v_mov_b32_e32 v15, s6
-; SI-NEXT:    v_add_f64 v[0:1], s[4:5], v[0:1]
-; SI-NEXT:    v_mov_b32_e32 v16, s5
-; SI-NEXT:    v_add_f64 v[2:3], v[2:3], s[16:17]
-; SI-NEXT:    v_cmp_gt_f64_e64 vcc, |s[2:3]|, v[4:5]
-; SI-NEXT:    v_cndmask_b32_e32 v3, v3, v10, vcc
-; SI-NEXT:    v_cndmask_b32_e32 v2, v2, v11, vcc
-; SI-NEXT:    v_mov_b32_e32 v17, s4
-; SI-NEXT:    v_add_f64 v[6:7], v[6:7], s[16:17]
-; SI-NEXT:    v_add_f64 v[8:9], v[8:9], s[16:17]
-; SI-NEXT:    v_add_f64 v[10:11], v[0:1], s[16:17]
-; SI-NEXT:    v_cmp_gt_f64_e64 vcc, |s[0:1]|, v[4:5]
-; SI-NEXT:    v_cndmask_b32_e32 v1, v7, v12, vcc
-; SI-NEXT:    v_cndmask_b32_e32 v0, v6, v13, vcc
-; SI-NEXT:    v_cmp_gt_f64_e64 vcc, |s[6:7]|, v[4:5]
-; SI-NEXT:    v_cndmask_b32_e32 v7, v9, v14, vcc
-; SI-NEXT:    v_cndmask_b32_e32 v6, v8, v15, vcc
-; SI-NEXT:    v_cmp_gt_f64_e64 vcc, |s[4:5]|, v[4:5]
-; SI-NEXT:    v_cndmask_b32_e32 v5, v11, v16, vcc
-; SI-NEXT:    v_cndmask_b32_e32 v4, v10, v17, vcc
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_bfi_b32 v5, s14, v10, v2
+; SI-NEXT:    v_mov_b32_e32 v6, s2
+; SI-NEXT:    v_mov_b32_e32 v7, s1
+; SI-NEXT:    v_mov_b32_e32 v11, s0
+; SI-NEXT:    v_mov_b32_e32 v12, s7
+; SI-NEXT:    v_mov_b32_e32 v13, s6
+; SI-NEXT:    v_mov_b32_e32 v14, s5
+; SI-NEXT:    v_mov_b32_e32 v15, s4
+; SI-NEXT:    v_add_f64 v[0:1], s[2:3], v[4:5]
+; SI-NEXT:    v_add_f64 v[0:1], v[0:1], -v[4:5]
+; SI-NEXT:    v_bfi_b32 v5, s14, v10, v7
+; SI-NEXT:    v_cmp_gt_f64_e64 vcc, |s[2:3]|, v[8:9]
+; SI-NEXT:    v_cndmask_b32_e32 v3, v1, v2, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v2, v0, v6, vcc
+; SI-NEXT:    v_add_f64 v[0:1], s[0:1], v[4:5]
+; SI-NEXT:    v_add_f64 v[0:1], v[0:1], -v[4:5]
+; SI-NEXT:    v_bfi_b32 v5, s14, v10, v12
+; SI-NEXT:    v_cmp_gt_f64_e64 vcc, |s[0:1]|, v[8:9]
+; SI-NEXT:    v_cndmask_b32_e32 v1, v1, v7, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v11, vcc
+; SI-NEXT:    v_add_f64 v[6:7], s[6:7], v[4:5]
+; SI-NEXT:    v_add_f64 v[6:7], v[6:7], -v[4:5]
+; SI-NEXT:    v_bfi_b32 v5, s14, v10, v14
+; SI-NEXT:    v_cmp_gt_f64_e64 vcc, |s[6:7]|, v[8:9]
+; SI-NEXT:    v_cndmask_b32_e32 v7, v7, v12, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v6, v6, v13, vcc
+; SI-NEXT:    v_add_f64 v[10:11], s[4:5], v[4:5]
+; SI-NEXT:    v_add_f64 v[4:5], v[10:11], -v[4:5]
+; SI-NEXT:    v_cmp_gt_f64_e64 vcc, |s[4:5]|, v[8:9]
+; SI-NEXT:    v_cndmask_b32_e32 v5, v5, v14, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v4, v4, v15, vcc
 ; SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[8:11], 0 offset:16
 ; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[8:11], 0
 ; SI-NEXT:    s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll b/llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll
index 8e2a187a71384..d189b6d4c1e83 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll
@@ -481,16 +481,17 @@ define amdgpu_kernel void @s_fneg_fabs_v2bf16_non_bc_src(ptr addrspace(1) %out,
 ; CI-NEXT:    s_lshl_b32 s2, s2, 16
 ; CI-NEXT:    v_add_f32_e64 v0, s3, 2.0
 ; CI-NEXT:    v_add_f32_e64 v1, s2, 1.0
-; CI-NEXT:    v_and_b32_e32 v0, 0x7fff0000, v0
-; CI-NEXT:    v_and_b32_e32 v1, 0x7fff0000, v1
-; CI-NEXT:    v_mul_f32_e64 v0, 1.0, |v0|
-; CI-NEXT:    v_mul_f32_e64 v1, 1.0, |v1|
+; CI-NEXT:    v_readfirstlane_b32 s2, v0
+; CI-NEXT:    s_and_b32 s2, s2, 0xffff0000
+; CI-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
+; CI-NEXT:    s_bitset0_b32 s2, 31
+; CI-NEXT:    v_and_b32_e32 v0, 0x7fffffff, v1
+; CI-NEXT:    s_and_b32 s2, s2, 0xffff0000
+; CI-NEXT:    s_xor_b32 s2, s2, 0x80000000
 ; CI-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
+; CI-NEXT:    s_lshr_b32 s2, s2, 16
 ; CI-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
-; CI-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
-; CI-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
-; CI-NEXT:    v_xor_b32_e32 v1, 0x80000000, v1
-; CI-NEXT:    v_alignbit_b32 v2, v0, v1, 16
+; CI-NEXT:    v_alignbit_b32 v2, s2, v0, 16
 ; CI-NEXT:    v_mov_b32_e32 v0, s0
 ; CI-NEXT:    v_mov_b32_e32 v1, s1
 ; CI-NEXT:    flat_store_dword v[0:1], v2
@@ -675,7 +676,7 @@ define amdgpu_kernel void @fneg_fabs_v4bf16(ptr addrspace(1) %out, <4 x bfloat>
 ; CI-NEXT:    s_lshr_b32 flat_scratch_hi, s12, 8
 ; CI-NEXT:    s_waitcnt lgkmcnt(0)
 ; CI-NEXT:    s_lshl_b32 s4, s2, 16
-; CI-NEXT:    s_and_b32 s2, s2, 0x7fff0000
+; CI-NEXT:    s_and_b32 s2, s2, 0xffff0000
 ; CI-NEXT:    v_mul_f32_e64 v2, 1.0, |s2|
 ; CI-NEXT:    s_and_b32 s2, s3, 0xffff0000
 ; CI-NEXT:    s_lshl_b32 s5, s3, 16
diff --git a/llvm/test/CodeGen/AMDGPU/roundeven.ll b/llvm/test/CodeGen/AMDGPU/roundeven.ll
index 5ebc08cf8402c..59a1fe041bf90 100644
--- a/llvm/test/CodeGen/AMDGPU/roundeven.ll
+++ b/llvm/test/CodeGen/AMDGPU/roundeven.ll
@@ -1124,14 +1124,14 @@ define double @v_roundeven_f64(double %x) {
 ; SDAG_GFX6-LABEL: v_roundeven_f64:
 ; SDAG_GFX6:       ; %bb.0:
 ; SDAG_GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG_GFX6-NEXT:    s_mov_b32 s6, 0
-; SDAG_GFX6-NEXT:    s_mov_b32 s7, 0x43300000
+; SDAG_GFX6-NEXT:    s_brev_b32 s6, -2
+; SDAG_GFX6-NEXT:    v_mov_b32_e32 v2, 0x43300000
+; SDAG_GFX6-NEXT:    v_bfi_b32 v3, s6, v2, v1
+; SDAG_GFX6-NEXT:    v_mov_b32_e32 v2, 0
 ; SDAG_GFX6-NEXT:    s_mov_b32 s4, -1
-; SDAG_GFX6-NEXT:    v_add_f64 v[2:3], v[0:1], s[6:7]
-; SDAG_GFX6-NEXT:    s_mov_b32 s6, 0
+; SDAG_GFX6-NEXT:    v_add_f64 v[4:5], v[0:1], v[2:3]
 ; SDAG_GFX6-NEXT:    s_mov_b32 s5, 0x432fffff
-; SDAG_GFX6-NEXT:    s_mov_b32 s7, 0xc3300000
-; SDAG_GFX6-NEXT:    v_add_f64 v[2:3], v[2:3], s[6:7]
+; SDAG_GFX6-NEXT:    v_add_f64 v[2:3], v[4:5], -v[2:3]
 ; SDAG_GFX6-NEXT:    v_cmp_gt_f64_e64 vcc, |v[0:1]|, s[4:5]
 ; SDAG_GFX6-NEXT:    v_cndmask_b32_e32 v0, v2, v0, vcc
 ; SDAG_GFX6-NEXT:    v_cndmask_b32_e32 v1, v3, v1, vcc
@@ -1208,18 +1208,18 @@ define double @v_roundeven_f64_fneg(double %x) {
 ; SDAG_GFX6-LABEL: v_roundeven_f64_fneg:
 ; SDAG_GFX6:       ; %bb.0:
 ; SDAG_GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG_GFX6-NEXT:    s_mov_b32 s6, 0
-; SDAG_GFX6-NEXT:    s_mov_b32 s7, 0x43300000
+; SDAG_GFX6-NEXT:    v_xor_b32_e32 v6, 0x80000000, v1
+; SDAG_GFX6-NEXT:    s_brev_b32 s4, -2
+; SDAG_GFX6-NEXT:    v_mov_b32_e32 v2, 0x43300000
+; SDAG_GFX6-NEXT:    v_bfi_b32 v3, s4, v2, v6
+; SDAG_GFX6-NEXT:    v_mov_b32_e32 v2, 0
+; SDAG_GFX6-NEXT:    v_add_f64 v[4:5], -v[0:1], v[2:3]
 ; SDAG_GFX6-NEXT:    s_mov_b32 s4, -1
-; SDAG_GFX6-NEXT:    v_add_f64 v[2:3], -v[0:1], s[6:7]
-; SDAG_GFX6-NEXT:    s_mov_b32 s6, 0
 ; SDAG_GFX6-NEXT:    s_mov_b32 s5, 0x432fffff
-; SDAG_GFX6-NEXT:    s_mov_b32 s7, 0xc3300000
-; SDAG_GFX6-NEXT:    v_add_f64 v[2:3], v[2:3], s[6:7]
+; SDAG_GFX6-NEXT:    v_add_f64 v[2:3], v[4:5], -v[2:3]
 ; SDAG_GFX6-NEXT:    v_cmp_gt_f64_e64 vcc, |v[0:1]|, s[4:5]
-; SDAG_GFX6-NEXT:    v_xor_b32_e32 v4, 0x80000000, v1
 ; SDAG_GFX6-NEXT:    v_cndmask_b32_e32 v0, v2, v0, vcc
-; SDAG_GFX6-NEXT:    v_cndmask_b32_e32 v1, v3, v4, vcc
+; SDAG_GFX6-NEXT:    v_cndmask_b32_e32 v1, v3, v6, vcc
 ; SDAG_GFX6-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; SDAG_GFX7-LABEL: v_roundeven_f64_fneg:
@@ -1304,19 +1304,20 @@ define <2 x double> @v_roundeven_v2f64(<2 x double> %x) {
 ; SDAG_GFX6-LABEL: v_roundeven_v2f64:
 ; SDAG_GFX6:       ; %bb.0:
 ; SDAG_GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG_GFX6-NEXT:    s_mov_b32 s6, 0
-; SDAG_GFX6-NEXT:    s_mov_b32 s7, 0x43300000
+; SDAG_GFX6-NEXT:    s_brev_b32 s6, -2
+; SDAG_GFX6-NEXT:    v_mov_b32_e32 v8, 0x43300000
+; SDAG_GFX6-NEXT:    v_bfi_b32 v5, s6, v8, v1
+; SDAG_GFX6-NEXT:    v_mov_b32_e32 v4, 0
+; SDAG_GFX6-NEXT:    v_add_f64 v[6:7], v[0:1], v[4:5]
 ; SDAG_GFX6-NEXT:    s_mov_b32 s4, -1
-; SDAG_GFX6-NEXT:    v_add_f64 v[4:5], v[0:1], s[6:7]
-; SDAG_GFX6-NEXT:    s_mov_b32 s8, 0
 ; SDAG_GFX6-NEXT:    s_mov_b32 s5, 0x432fffff
-; SDAG_GFX6-NEXT:    s_mov_b32 s9, 0xc3300000
-; SDAG_GFX6-NEXT:    v_add_f64 v[4:5], v[4:5], s[8:9]
+; SDAG_GFX6-NEXT:    v_add_f64 v[5:6], v[6:7], -v[4:5]
 ; SDAG_GFX6-NEXT:    v_cmp_gt_f64_e64 vcc, |v[0:1]|, s[4:5]
-; SDAG_GFX6-NEXT:    v_add_f64 v[6:7], v[2:3], s[6:7]
-; SDAG_GFX6-NEXT:    v_cndmask_b32_e32 v0, v4, v0, vcc
-; SDAG_GFX6-NEXT:    v_cndmask_b32_e32 v1, v5, v1, vcc
-; SDAG_GFX6-NEXT:    v_add_f64 v[4:5], v[6:7], s[8:9]
+; SDAG_GFX6-NEXT:    v_cndmask_b32_e32 v0, v5, v0, vcc
+; SDAG_GFX6-NEXT:    v_bfi_b32 v5, s6, v8, v3
+; SDAG_GFX6-NEXT:    v_add_f64 v[7:8], v[2:3], v[4:5]
+; SDAG_GFX6-NEXT:    v_cndmask_b32_e32 v1, v6, v1, vcc
+; SDAG_GFX6-NEXT:    v_add_f64 v[4:5], v[7:8], -v[4:5]
 ; SDAG_GFX6-NEXT:    v_cmp_gt_f64_e64 vcc, |v[2:3]|, s[4:5]
 ; SDAG_GFX6-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
 ; SDAG_GFX6-NEXT:    v_cndmask_b32_e32 v3, v5, v3, vcc

>From 9949c6a7abad12baca18e898ed78385dab82b2ec Mon Sep 17 00:00:00 2001
From: Iris Shi <0.0 at owo.li>
Date: Thu, 12 Jun 2025 08:41:45 +0800
Subject: [PATCH 5/7] update fabs

---
 llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp |  6 +---
 llvm/test/CodeGen/AMDGPU/bf16.ll              | 36 +++++++++----------
 llvm/test/CodeGen/AMDGPU/fabs.bf16.ll         |  8 ++---
 llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll    | 21 ++++++-----
 .../AMDGPU/select-fabs-fneg-extract.f16.ll    | 12 +++----
 .../AMDGPU/select-fabs-fneg-extract.ll        |  3 +-
 .../AMDGPU/select-fabs-fneg-extract.v2f16.ll  | 21 ++++-------
 llvm/test/CodeGen/AMDGPU/udiv.ll              |  6 ++--
 8 files changed, 50 insertions(+), 63 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 6e788e0051256..af6f8b986c54b 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -18980,11 +18980,7 @@ SDValue DAGCombiner::visitFABS(SDNode *N) {
   if (SDValue C = DAG.FoldConstantArithmetic(ISD::FABS, DL, VT, {N0}))
     return C;
 
-  // fold (fabs (fabs x)) -> (fabs x)
-  if (N0.getOpcode() == ISD::FABS)
-    return N->getOperand(0);
-
-  if (SimplifyDemandedBits(N0))
+  if (SimplifyDemandedBits(SDValue(N, 0)))
     return SDValue(N, 0);
 
   if (SDValue Cast = foldSignChangeInBitcast(N))
diff --git a/llvm/test/CodeGen/AMDGPU/bf16.ll b/llvm/test/CodeGen/AMDGPU/bf16.ll
index 2bdf994496421..884b1e39b2d2a 100644
--- a/llvm/test/CodeGen/AMDGPU/bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/bf16.ll
@@ -18639,8 +18639,8 @@ define bfloat @v_fabs_bf16(bfloat %a) {
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_mul_f32_e32 v0, 1.0, v0
-; GCN-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT:    v_and_b32_e32 v0, 0x7fffffff, v0
+; GCN-NEXT:    v_and_b32_e32 v0, 0x7fff0000, v0
+; GCN-NEXT:    v_mul_f32_e32 v0, 1.0, v0
 ; GCN-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
 ;
@@ -18648,8 +18648,8 @@ define bfloat @v_fabs_bf16(bfloat %a) {
 ; GFX7:       ; %bb.0:
 ; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX7-NEXT:    v_mul_f32_e32 v0, 1.0, v0
-; GFX7-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT:    v_and_b32_e32 v0, 0x7fffffff, v0
+; GFX7-NEXT:    v_and_b32_e32 v0, 0x7fff0000, v0
+; GFX7-NEXT:    v_mul_f32_e32 v0, 1.0, v0
 ; GFX7-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX7-NEXT:    s_setpc_b64 s[30:31]
 ;
@@ -18832,8 +18832,8 @@ define bfloat @v_fneg_fabs_bf16(bfloat %a) {
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_mul_f32_e32 v0, 1.0, v0
-; GCN-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT:    v_and_b32_e32 v0, 0x7fffffff, v0
+; GCN-NEXT:    v_and_b32_e32 v0, 0x7fff0000, v0
+; GCN-NEXT:    v_mul_f32_e32 v0, 1.0, v0
 ; GCN-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GCN-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
 ; GCN-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
@@ -18843,8 +18843,8 @@ define bfloat @v_fneg_fabs_bf16(bfloat %a) {
 ; GFX7:       ; %bb.0:
 ; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX7-NEXT:    v_mul_f32_e32 v0, 1.0, v0
-; GFX7-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT:    v_and_b32_e32 v0, 0x7fffffff, v0
+; GFX7-NEXT:    v_and_b32_e32 v0, 0x7fff0000, v0
+; GFX7-NEXT:    v_mul_f32_e32 v0, 1.0, v0
 ; GFX7-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX7-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
 ; GFX7-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
@@ -18889,23 +18889,23 @@ define amdgpu_ps i32 @s_fneg_fabs_bf16(bfloat inreg %a) {
 ; GCN-LABEL: s_fneg_fabs_bf16:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    v_mul_f32_e64 v0, 1.0, s0
+; GCN-NEXT:    v_and_b32_e32 v0, 0x7fff0000, v0
+; GCN-NEXT:    v_mul_f32_e32 v0, 1.0, v0
+; GCN-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
+; GCN-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
+; GCN-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GCN-NEXT:    v_readfirstlane_b32 s0, v0
-; GCN-NEXT:    s_and_b32 s0, s0, 0xffff0000
-; GCN-NEXT:    s_bitset0_b32 s0, 31
-; GCN-NEXT:    s_and_b32 s0, s0, 0xffff0000
-; GCN-NEXT:    s_xor_b32 s0, s0, 0x80000000
-; GCN-NEXT:    s_lshr_b32 s0, s0, 16
 ; GCN-NEXT:    ; return to shader part epilog
 ;
 ; GFX7-LABEL: s_fneg_fabs_bf16:
 ; GFX7:       ; %bb.0:
 ; GFX7-NEXT:    v_mul_f32_e64 v0, 1.0, s0
+; GFX7-NEXT:    v_and_b32_e32 v0, 0x7fff0000, v0
+; GFX7-NEXT:    v_mul_f32_e32 v0, 1.0, v0
+; GFX7-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
+; GFX7-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
+; GFX7-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GFX7-NEXT:    v_readfirstlane_b32 s0, v0
-; GFX7-NEXT:    s_and_b32 s0, s0, 0xffff0000
-; GFX7-NEXT:    s_bitset0_b32 s0, 31
-; GFX7-NEXT:    s_and_b32 s0, s0, 0xffff0000
-; GFX7-NEXT:    s_xor_b32 s0, s0, 0x80000000
-; GFX7-NEXT:    s_lshr_b32 s0, s0, 16
 ; GFX7-NEXT:    ; return to shader part epilog
 ;
 ; GFX8-LABEL: s_fneg_fabs_bf16:
diff --git a/llvm/test/CodeGen/AMDGPU/fabs.bf16.ll b/llvm/test/CodeGen/AMDGPU/fabs.bf16.ll
index 71b1a16c79e69..085d2792164d4 100644
--- a/llvm/test/CodeGen/AMDGPU/fabs.bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fabs.bf16.ll
@@ -220,10 +220,10 @@ define amdgpu_kernel void @s_fabs_v4bf16(ptr addrspace(1) %out, <4 x bfloat> %in
 ; CI-NEXT:    s_waitcnt lgkmcnt(0)
 ; CI-NEXT:    s_and_b32 s4, s3, 0xffff0000
 ; CI-NEXT:    s_lshl_b32 s3, s3, 16
-; CI-NEXT:    s_and_b32 s5, s2, 0xffff0000
+; CI-NEXT:    s_and_b32 s5, s2, 0x7fff0000
 ; CI-NEXT:    v_mul_f32_e64 v0, 1.0, |s4|
 ; CI-NEXT:    v_mul_f32_e64 v1, 1.0, |s3|
-; CI-NEXT:    v_mul_f32_e64 v2, 1.0, |s5|
+; CI-NEXT:    v_mul_f32_e64 v2, 1.0, s5
 ; CI-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; CI-NEXT:    s_lshl_b32 s2, s2, 16
 ; CI-NEXT:    v_alignbit_b32 v1, v0, v1, 16
@@ -944,9 +944,9 @@ define amdgpu_kernel void @v_extract_fabs_fold_v2bf16(ptr addrspace(1) %in) #0 {
 ; CI-NEXT:    flat_load_dword v0, v[0:1]
 ; CI-NEXT:    s_waitcnt vmcnt(0)
 ; CI-NEXT:    v_lshlrev_b32_e32 v1, 16, v0
-; CI-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
+; CI-NEXT:    v_and_b32_e32 v0, 0x7fff0000, v0
 ; CI-NEXT:    v_mul_f32_e64 v1, 1.0, |v1|
-; CI-NEXT:    v_mul_f32_e64 v0, 1.0, |v0|
+; CI-NEXT:    v_mul_f32_e32 v0, 1.0, v0
 ; CI-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; CI-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; CI-NEXT:    v_mul_f32_e32 v1, 4.0, v1
diff --git a/llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll b/llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll
index d189b6d4c1e83..bc8af685e91dc 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll
@@ -481,17 +481,16 @@ define amdgpu_kernel void @s_fneg_fabs_v2bf16_non_bc_src(ptr addrspace(1) %out,
 ; CI-NEXT:    s_lshl_b32 s2, s2, 16
 ; CI-NEXT:    v_add_f32_e64 v0, s3, 2.0
 ; CI-NEXT:    v_add_f32_e64 v1, s2, 1.0
-; CI-NEXT:    v_readfirstlane_b32 s2, v0
-; CI-NEXT:    s_and_b32 s2, s2, 0xffff0000
-; CI-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
-; CI-NEXT:    s_bitset0_b32 s2, 31
-; CI-NEXT:    v_and_b32_e32 v0, 0x7fffffff, v1
-; CI-NEXT:    s_and_b32 s2, s2, 0xffff0000
-; CI-NEXT:    s_xor_b32 s2, s2, 0x80000000
+; CI-NEXT:    v_and_b32_e32 v0, 0x7fff0000, v0
+; CI-NEXT:    v_and_b32_e32 v1, 0x7fff0000, v1
+; CI-NEXT:    v_mul_f32_e32 v0, 1.0, v0
+; CI-NEXT:    v_mul_f32_e32 v1, 1.0, v1
 ; CI-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; CI-NEXT:    s_lshr_b32 s2, s2, 16
 ; CI-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
-; CI-NEXT:    v_alignbit_b32 v2, s2, v0, 16
+; CI-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
+; CI-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
+; CI-NEXT:    v_xor_b32_e32 v1, 0x80000000, v1
+; CI-NEXT:    v_alignbit_b32 v2, v0, v1, 16
 ; CI-NEXT:    v_mov_b32_e32 v0, s0
 ; CI-NEXT:    v_mov_b32_e32 v1, s1
 ; CI-NEXT:    flat_store_dword v[0:1], v2
@@ -676,8 +675,8 @@ define amdgpu_kernel void @fneg_fabs_v4bf16(ptr addrspace(1) %out, <4 x bfloat>
 ; CI-NEXT:    s_lshr_b32 flat_scratch_hi, s12, 8
 ; CI-NEXT:    s_waitcnt lgkmcnt(0)
 ; CI-NEXT:    s_lshl_b32 s4, s2, 16
-; CI-NEXT:    s_and_b32 s2, s2, 0xffff0000
-; CI-NEXT:    v_mul_f32_e64 v2, 1.0, |s2|
+; CI-NEXT:    s_and_b32 s2, s2, 0x7fff0000
+; CI-NEXT:    v_mul_f32_e64 v2, 1.0, s2
 ; CI-NEXT:    s_and_b32 s2, s3, 0xffff0000
 ; CI-NEXT:    s_lshl_b32 s5, s3, 16
 ; CI-NEXT:    v_mul_f32_e64 v3, 1.0, |s2|
diff --git a/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.f16.ll b/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.f16.ll
index 030c332850124..0684c3081983f 100644
--- a/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.f16.ll
@@ -452,7 +452,7 @@ define half @add_select_fabs_negk_negk_f16(i32 %c, half %x) {
 ; CI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
 ; CI-NEXT:    v_cndmask_b32_e64 v0, -1.0, -2.0, vcc
 ; CI-NEXT:    v_cvt_f32_f16_e32 v1, v1
-; CI-NEXT:    v_add_f32_e64 v0, |v0|, v1
+; CI-NEXT:    v_sub_f32_e32 v0, v1, v0
 ; CI-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; VI-LABEL: add_select_fabs_negk_negk_f16:
@@ -462,7 +462,7 @@ define half @add_select_fabs_negk_negk_f16(i32 %c, half %x) {
 ; VI-NEXT:    v_mov_b32_e32 v3, 0xc000
 ; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
 ; VI-NEXT:    v_cndmask_b32_e32 v0, v2, v3, vcc
-; VI-NEXT:    v_add_f16_e64 v0, |v0|, v1
+; VI-NEXT:    v_sub_f16_e32 v0, v1, v0
 ; VI-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-SAFE-TRUE16-LABEL: add_select_fabs_negk_negk_f16:
@@ -472,7 +472,7 @@ define half @add_select_fabs_negk_negk_f16(i32 %c, half %x) {
 ; GFX11-SAFE-TRUE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v0
 ; GFX11-SAFE-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-SAFE-TRUE16-NEXT:    v_cndmask_b16 v0.l, 0xbc00, v2.l, vcc_lo
-; GFX11-SAFE-TRUE16-NEXT:    v_add_f16_e64 v0.l, |v0.l|, v1.l
+; GFX11-SAFE-TRUE16-NEXT:    v_sub_f16_e32 v0.l, v1.l, v0.l
 ; GFX11-SAFE-TRUE16-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-SAFE-FAKE16-LABEL: add_select_fabs_negk_negk_f16:
@@ -482,7 +482,7 @@ define half @add_select_fabs_negk_negk_f16(i32 %c, half %x) {
 ; GFX11-SAFE-FAKE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v0
 ; GFX11-SAFE-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-SAFE-FAKE16-NEXT:    v_cndmask_b32_e32 v0, 0xbc00, v2, vcc_lo
-; GFX11-SAFE-FAKE16-NEXT:    v_add_f16_e64 v0, |v0|, v1
+; GFX11-SAFE-FAKE16-NEXT:    v_sub_f16_e32 v0, v1, v0
 ; GFX11-SAFE-FAKE16-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-NSZ-TRUE16-LABEL: add_select_fabs_negk_negk_f16:
@@ -492,7 +492,7 @@ define half @add_select_fabs_negk_negk_f16(i32 %c, half %x) {
 ; GFX11-NSZ-TRUE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v0
 ; GFX11-NSZ-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NSZ-TRUE16-NEXT:    v_cndmask_b16 v0.l, 0xbc00, v2.l, vcc_lo
-; GFX11-NSZ-TRUE16-NEXT:    v_add_f16_e64 v0.l, |v0.l|, v1.l
+; GFX11-NSZ-TRUE16-NEXT:    v_sub_f16_e32 v0.l, v1.l, v0.l
 ; GFX11-NSZ-TRUE16-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-NSZ-FAKE16-LABEL: add_select_fabs_negk_negk_f16:
@@ -502,7 +502,7 @@ define half @add_select_fabs_negk_negk_f16(i32 %c, half %x) {
 ; GFX11-NSZ-FAKE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v0
 ; GFX11-NSZ-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NSZ-FAKE16-NEXT:    v_cndmask_b32_e32 v0, 0xbc00, v2, vcc_lo
-; GFX11-NSZ-FAKE16-NEXT:    v_add_f16_e64 v0, |v0|, v1
+; GFX11-NSZ-FAKE16-NEXT:    v_sub_f16_e32 v0, v1, v0
 ; GFX11-NSZ-FAKE16-NEXT:    s_setpc_b64 s[30:31]
   %cmp = icmp eq i32 %c, 0
   %select = select i1 %cmp, half -2.0, half -1.0
diff --git a/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.ll b/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.ll
index a680ba5933418..ec0455ab6e93e 100644
--- a/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.ll
+++ b/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.ll
@@ -132,12 +132,11 @@ define amdgpu_kernel void @add_select_fabs_negk_f32(i32 %c) #0 {
   ret void
 }
 
-; FIXME: fabs should fold away
 ; GCN-LABEL: {{^}}add_select_fabs_negk_negk_f32:
 ; GCN: buffer_load_dword [[X:v[0-9]+]]
 
 ; GCN: v_cndmask_b32_e64 [[SELECT:v[0-9]+]], -1.0, -2.0, s
-; GCN: v_add_f32_e64 v{{[0-9]+}}, |[[SELECT]]|, [[X]]
+; GCN: v_sub_f32_e32 v{{[0-9]+}}, [[X]], [[SELECT]]
 define amdgpu_kernel void @add_select_fabs_negk_negk_f32(i32 %c) #0 {
   %x = load volatile float, ptr addrspace(1) poison
   %cmp = icmp eq i32 %c, 0
diff --git a/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.v2f16.ll b/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.v2f16.ll
index 8691925797621..4e07c724b8a8c 100644
--- a/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.v2f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.v2f16.ll
@@ -812,8 +812,7 @@ define <2 x half> @add_select_fabs_negk_negk_v2f16(<2 x i32> %c, <2 x half> %x)
 ; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
 ; GFX9-NEXT:    v_cndmask_b32_e32 v0, v3, v4, vcc
 ; GFX9-NEXT:    v_pack_b32_f16 v0, v0, v1
-; GFX9-NEXT:    v_and_b32_e32 v0, 0x7fff7fff, v0
-; GFX9-NEXT:    v_pk_add_f16 v0, v0, v2
+; GFX9-NEXT:    v_pk_add_f16 v0, v2, v0 neg_lo:[0,1] neg_hi:[0,1]
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-SAFE-TRUE16-LABEL: add_select_fabs_negk_negk_v2f16:
@@ -827,9 +826,7 @@ define <2 x half> @add_select_fabs_negk_negk_v2f16(<2 x i32> %c, <2 x half> %x)
 ; GFX11-SAFE-TRUE16-NEXT:    v_cndmask_b16 v0.h, 0xbc00, v3.l, s0
 ; GFX11-SAFE-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-SAFE-TRUE16-NEXT:    v_pack_b32_f16 v0, v0.h, v0.l
-; GFX11-SAFE-TRUE16-NEXT:    v_and_b32_e32 v0, 0x7fff7fff, v0
-; GFX11-SAFE-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GFX11-SAFE-TRUE16-NEXT:    v_pk_add_f16 v0, v0, v2
+; GFX11-SAFE-TRUE16-NEXT:    v_pk_add_f16 v0, v2, v0 neg_lo:[0,1] neg_hi:[0,1]
 ; GFX11-SAFE-TRUE16-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-SAFE-FAKE16-LABEL: add_select_fabs_negk_negk_v2f16:
@@ -842,9 +839,8 @@ define <2 x half> @add_select_fabs_negk_negk_v2f16(<2 x i32> %c, <2 x half> %x)
 ; GFX11-SAFE-FAKE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v0
 ; GFX11-SAFE-FAKE16-NEXT:    v_cndmask_b32_e32 v0, 0xbc00, v3, vcc_lo
 ; GFX11-SAFE-FAKE16-NEXT:    v_pack_b32_f16 v0, v0, v1
-; GFX11-SAFE-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SAFE-FAKE16-NEXT:    v_and_b32_e32 v0, 0x7fff7fff, v0
-; GFX11-SAFE-FAKE16-NEXT:    v_pk_add_f16 v0, v0, v2
+; GFX11-SAFE-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SAFE-FAKE16-NEXT:    v_pk_add_f16 v0, v2, v0 neg_lo:[0,1] neg_hi:[0,1]
 ; GFX11-SAFE-FAKE16-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-NSZ-TRUE16-LABEL: add_select_fabs_negk_negk_v2f16:
@@ -858,9 +854,7 @@ define <2 x half> @add_select_fabs_negk_negk_v2f16(<2 x i32> %c, <2 x half> %x)
 ; GFX11-NSZ-TRUE16-NEXT:    v_cndmask_b16 v0.h, 0xbc00, v3.l, s0
 ; GFX11-NSZ-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NSZ-TRUE16-NEXT:    v_pack_b32_f16 v0, v0.h, v0.l
-; GFX11-NSZ-TRUE16-NEXT:    v_and_b32_e32 v0, 0x7fff7fff, v0
-; GFX11-NSZ-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NSZ-TRUE16-NEXT:    v_pk_add_f16 v0, v0, v2
+; GFX11-NSZ-TRUE16-NEXT:    v_pk_add_f16 v0, v2, v0 neg_lo:[0,1] neg_hi:[0,1]
 ; GFX11-NSZ-TRUE16-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-NSZ-FAKE16-LABEL: add_select_fabs_negk_negk_v2f16:
@@ -873,9 +867,8 @@ define <2 x half> @add_select_fabs_negk_negk_v2f16(<2 x i32> %c, <2 x half> %x)
 ; GFX11-NSZ-FAKE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v0
 ; GFX11-NSZ-FAKE16-NEXT:    v_cndmask_b32_e32 v0, 0xbc00, v3, vcc_lo
 ; GFX11-NSZ-FAKE16-NEXT:    v_pack_b32_f16 v0, v0, v1
-; GFX11-NSZ-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NSZ-FAKE16-NEXT:    v_and_b32_e32 v0, 0x7fff7fff, v0
-; GFX11-NSZ-FAKE16-NEXT:    v_pk_add_f16 v0, v0, v2
+; GFX11-NSZ-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NSZ-FAKE16-NEXT:    v_pk_add_f16 v0, v2, v0 neg_lo:[0,1] neg_hi:[0,1]
 ; GFX11-NSZ-FAKE16-NEXT:    s_setpc_b64 s[30:31]
   %cmp = icmp eq <2 x i32> %c, zeroinitializer
   %select = select <2 x i1> %cmp, <2 x half> <half -2.0, half -2.0>, <2 x half> <half -1.0, half -1.0>
diff --git a/llvm/test/CodeGen/AMDGPU/udiv.ll b/llvm/test/CodeGen/AMDGPU/udiv.ll
index 660ff4677547a..04b98730c6a1f 100644
--- a/llvm/test/CodeGen/AMDGPU/udiv.ll
+++ b/llvm/test/CodeGen/AMDGPU/udiv.ll
@@ -1515,7 +1515,7 @@ define amdgpu_kernel void @v_udiv_i8(ptr addrspace(1) %out, ptr addrspace(1) %in
 ; EG-NEXT:     TRUNC * T0.W, PV.W,
 ; EG-NEXT:     MULADD_IEEE T1.W, -PV.W, T0.Y, T0.X,
 ; EG-NEXT:     TRUNC * T0.W, PV.W,
-; EG-NEXT:     SETGE * T1.W, |PV.W|, |T0.Y|,
+; EG-NEXT:     SETGE * T1.W, |PV.W|, T0.Y,
 ; EG-NEXT:     CNDE T1.W, PV.W, 0.0, literal.x,
 ; EG-NEXT:     FLT_TO_UINT * T0.X, T0.W,
 ; EG-NEXT:    1(1.401298e-45), 0(0.000000e+00)
@@ -1658,7 +1658,7 @@ define amdgpu_kernel void @v_udiv_i16(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; EG-NEXT:     TRUNC * T0.W, PV.W,
 ; EG-NEXT:     MULADD_IEEE T1.W, -PV.W, T0.Y, T0.X,
 ; EG-NEXT:     TRUNC * T0.W, PV.W,
-; EG-NEXT:     SETGE * T1.W, |PV.W|, |T0.Y|,
+; EG-NEXT:     SETGE * T1.W, |PV.W|, T0.Y,
 ; EG-NEXT:     CNDE T1.W, PV.W, 0.0, literal.x,
 ; EG-NEXT:     FLT_TO_UINT * T0.X, T0.W,
 ; EG-NEXT:    1(1.401298e-45), 0(0.000000e+00)
@@ -1858,7 +1858,7 @@ define amdgpu_kernel void @v_udiv_i23(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; EG-NEXT:     TRUNC * T0.W, PV.W,
 ; EG-NEXT:     MULADD_IEEE T1.W, -PV.W, T0.X, T0.Z,
 ; EG-NEXT:     TRUNC * T0.W, PV.W,
-; EG-NEXT:     SETGE * T1.W, |PV.W|, |T0.X|,
+; EG-NEXT:     SETGE * T1.W, |PV.W|, T0.X,
 ; EG-NEXT:     CNDE T1.W, PV.W, 0.0, literal.x,
 ; EG-NEXT:     FLT_TO_UINT * T0.X, T0.W,
 ; EG-NEXT:    1(1.401298e-45), 0(0.000000e+00)

>From ef08d31a84feeab51feb648ffe91c5f09aea9784 Mon Sep 17 00:00:00 2001
From: Iris Shi <0.0 at owo.li>
Date: Mon, 16 Jun 2025 17:05:45 +0800
Subject: [PATCH 6/7] fix fabs DemandedBits

---
 .../CodeGen/SelectionDAG/TargetLowering.cpp   |  4 +--
 llvm/test/CodeGen/AMDGPU/bf16.ll              | 36 +++++++++----------
 llvm/test/CodeGen/AMDGPU/fabs.bf16.ll         |  8 ++---
 llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll    | 21 +++++------
 4 files changed, 35 insertions(+), 34 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index e06a50bd59222..881bb308fd3e5 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -2973,8 +2973,8 @@ bool TargetLowering::SimplifyDemandedBits(
     if (!DemandedBits.intersects(SignMask))
       return TLO.CombineTo(Op, Op0);
 
-    if (SimplifyDemandedBits(Op0, ~SignMask & DemandedBits, DemandedElts, Known,
-                             TLO, Depth + 1))
+    if (SimplifyDemandedBits(Op0, DemandedBits, DemandedElts, Known, TLO,
+                             Depth + 1))
       return true;
 
     if (Known.isNonNegative())
diff --git a/llvm/test/CodeGen/AMDGPU/bf16.ll b/llvm/test/CodeGen/AMDGPU/bf16.ll
index 884b1e39b2d2a..2bdf994496421 100644
--- a/llvm/test/CodeGen/AMDGPU/bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/bf16.ll
@@ -18639,8 +18639,8 @@ define bfloat @v_fabs_bf16(bfloat %a) {
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_mul_f32_e32 v0, 1.0, v0
-; GCN-NEXT:    v_and_b32_e32 v0, 0x7fff0000, v0
-; GCN-NEXT:    v_mul_f32_e32 v0, 1.0, v0
+; GCN-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
+; GCN-NEXT:    v_and_b32_e32 v0, 0x7fffffff, v0
 ; GCN-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
 ;
@@ -18648,8 +18648,8 @@ define bfloat @v_fabs_bf16(bfloat %a) {
 ; GFX7:       ; %bb.0:
 ; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX7-NEXT:    v_mul_f32_e32 v0, 1.0, v0
-; GFX7-NEXT:    v_and_b32_e32 v0, 0x7fff0000, v0
-; GFX7-NEXT:    v_mul_f32_e32 v0, 1.0, v0
+; GFX7-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
+; GFX7-NEXT:    v_and_b32_e32 v0, 0x7fffffff, v0
 ; GFX7-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX7-NEXT:    s_setpc_b64 s[30:31]
 ;
@@ -18832,8 +18832,8 @@ define bfloat @v_fneg_fabs_bf16(bfloat %a) {
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_mul_f32_e32 v0, 1.0, v0
-; GCN-NEXT:    v_and_b32_e32 v0, 0x7fff0000, v0
-; GCN-NEXT:    v_mul_f32_e32 v0, 1.0, v0
+; GCN-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
+; GCN-NEXT:    v_and_b32_e32 v0, 0x7fffffff, v0
 ; GCN-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GCN-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
 ; GCN-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
@@ -18843,8 +18843,8 @@ define bfloat @v_fneg_fabs_bf16(bfloat %a) {
 ; GFX7:       ; %bb.0:
 ; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX7-NEXT:    v_mul_f32_e32 v0, 1.0, v0
-; GFX7-NEXT:    v_and_b32_e32 v0, 0x7fff0000, v0
-; GFX7-NEXT:    v_mul_f32_e32 v0, 1.0, v0
+; GFX7-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
+; GFX7-NEXT:    v_and_b32_e32 v0, 0x7fffffff, v0
 ; GFX7-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX7-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
 ; GFX7-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
@@ -18889,23 +18889,23 @@ define amdgpu_ps i32 @s_fneg_fabs_bf16(bfloat inreg %a) {
 ; GCN-LABEL: s_fneg_fabs_bf16:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    v_mul_f32_e64 v0, 1.0, s0
-; GCN-NEXT:    v_and_b32_e32 v0, 0x7fff0000, v0
-; GCN-NEXT:    v_mul_f32_e32 v0, 1.0, v0
-; GCN-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
-; GCN-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GCN-NEXT:    v_readfirstlane_b32 s0, v0
+; GCN-NEXT:    s_and_b32 s0, s0, 0xffff0000
+; GCN-NEXT:    s_bitset0_b32 s0, 31
+; GCN-NEXT:    s_and_b32 s0, s0, 0xffff0000
+; GCN-NEXT:    s_xor_b32 s0, s0, 0x80000000
+; GCN-NEXT:    s_lshr_b32 s0, s0, 16
 ; GCN-NEXT:    ; return to shader part epilog
 ;
 ; GFX7-LABEL: s_fneg_fabs_bf16:
 ; GFX7:       ; %bb.0:
 ; GFX7-NEXT:    v_mul_f32_e64 v0, 1.0, s0
-; GFX7-NEXT:    v_and_b32_e32 v0, 0x7fff0000, v0
-; GFX7-NEXT:    v_mul_f32_e32 v0, 1.0, v0
-; GFX7-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
-; GFX7-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GFX7-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX7-NEXT:    s_and_b32 s0, s0, 0xffff0000
+; GFX7-NEXT:    s_bitset0_b32 s0, 31
+; GFX7-NEXT:    s_and_b32 s0, s0, 0xffff0000
+; GFX7-NEXT:    s_xor_b32 s0, s0, 0x80000000
+; GFX7-NEXT:    s_lshr_b32 s0, s0, 16
 ; GFX7-NEXT:    ; return to shader part epilog
 ;
 ; GFX8-LABEL: s_fneg_fabs_bf16:
diff --git a/llvm/test/CodeGen/AMDGPU/fabs.bf16.ll b/llvm/test/CodeGen/AMDGPU/fabs.bf16.ll
index 085d2792164d4..71b1a16c79e69 100644
--- a/llvm/test/CodeGen/AMDGPU/fabs.bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fabs.bf16.ll
@@ -220,10 +220,10 @@ define amdgpu_kernel void @s_fabs_v4bf16(ptr addrspace(1) %out, <4 x bfloat> %in
 ; CI-NEXT:    s_waitcnt lgkmcnt(0)
 ; CI-NEXT:    s_and_b32 s4, s3, 0xffff0000
 ; CI-NEXT:    s_lshl_b32 s3, s3, 16
-; CI-NEXT:    s_and_b32 s5, s2, 0x7fff0000
+; CI-NEXT:    s_and_b32 s5, s2, 0xffff0000
 ; CI-NEXT:    v_mul_f32_e64 v0, 1.0, |s4|
 ; CI-NEXT:    v_mul_f32_e64 v1, 1.0, |s3|
-; CI-NEXT:    v_mul_f32_e64 v2, 1.0, s5
+; CI-NEXT:    v_mul_f32_e64 v2, 1.0, |s5|
 ; CI-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; CI-NEXT:    s_lshl_b32 s2, s2, 16
 ; CI-NEXT:    v_alignbit_b32 v1, v0, v1, 16
@@ -944,9 +944,9 @@ define amdgpu_kernel void @v_extract_fabs_fold_v2bf16(ptr addrspace(1) %in) #0 {
 ; CI-NEXT:    flat_load_dword v0, v[0:1]
 ; CI-NEXT:    s_waitcnt vmcnt(0)
 ; CI-NEXT:    v_lshlrev_b32_e32 v1, 16, v0
-; CI-NEXT:    v_and_b32_e32 v0, 0x7fff0000, v0
+; CI-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; CI-NEXT:    v_mul_f32_e64 v1, 1.0, |v1|
-; CI-NEXT:    v_mul_f32_e32 v0, 1.0, v0
+; CI-NEXT:    v_mul_f32_e64 v0, 1.0, |v0|
 ; CI-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; CI-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; CI-NEXT:    v_mul_f32_e32 v1, 4.0, v1
diff --git a/llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll b/llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll
index bc8af685e91dc..d189b6d4c1e83 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll
@@ -481,16 +481,17 @@ define amdgpu_kernel void @s_fneg_fabs_v2bf16_non_bc_src(ptr addrspace(1) %out,
 ; CI-NEXT:    s_lshl_b32 s2, s2, 16
 ; CI-NEXT:    v_add_f32_e64 v0, s3, 2.0
 ; CI-NEXT:    v_add_f32_e64 v1, s2, 1.0
-; CI-NEXT:    v_and_b32_e32 v0, 0x7fff0000, v0
-; CI-NEXT:    v_and_b32_e32 v1, 0x7fff0000, v1
-; CI-NEXT:    v_mul_f32_e32 v0, 1.0, v0
-; CI-NEXT:    v_mul_f32_e32 v1, 1.0, v1
+; CI-NEXT:    v_readfirstlane_b32 s2, v0
+; CI-NEXT:    s_and_b32 s2, s2, 0xffff0000
+; CI-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
+; CI-NEXT:    s_bitset0_b32 s2, 31
+; CI-NEXT:    v_and_b32_e32 v0, 0x7fffffff, v1
+; CI-NEXT:    s_and_b32 s2, s2, 0xffff0000
+; CI-NEXT:    s_xor_b32 s2, s2, 0x80000000
 ; CI-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
+; CI-NEXT:    s_lshr_b32 s2, s2, 16
 ; CI-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
-; CI-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
-; CI-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
-; CI-NEXT:    v_xor_b32_e32 v1, 0x80000000, v1
-; CI-NEXT:    v_alignbit_b32 v2, v0, v1, 16
+; CI-NEXT:    v_alignbit_b32 v2, s2, v0, 16
 ; CI-NEXT:    v_mov_b32_e32 v0, s0
 ; CI-NEXT:    v_mov_b32_e32 v1, s1
 ; CI-NEXT:    flat_store_dword v[0:1], v2
@@ -675,8 +676,8 @@ define amdgpu_kernel void @fneg_fabs_v4bf16(ptr addrspace(1) %out, <4 x bfloat>
 ; CI-NEXT:    s_lshr_b32 flat_scratch_hi, s12, 8
 ; CI-NEXT:    s_waitcnt lgkmcnt(0)
 ; CI-NEXT:    s_lshl_b32 s4, s2, 16
-; CI-NEXT:    s_and_b32 s2, s2, 0x7fff0000
-; CI-NEXT:    v_mul_f32_e64 v2, 1.0, s2
+; CI-NEXT:    s_and_b32 s2, s2, 0xffff0000
+; CI-NEXT:    v_mul_f32_e64 v2, 1.0, |s2|
 ; CI-NEXT:    s_and_b32 s2, s3, 0xffff0000
 ; CI-NEXT:    s_lshl_b32 s5, s3, 16
 ; CI-NEXT:    v_mul_f32_e64 v3, 1.0, |s2|

>From 5463e21441faa41498ad04e7dd62772c6a804e5d Mon Sep 17 00:00:00 2001
From: Iris Shi <0.0 at owo.li>
Date: Mon, 16 Jun 2025 18:33:32 +0800
Subject: [PATCH 7/7] fix fcopysign DemandedBits

---
 .../CodeGen/SelectionDAG/TargetLowering.cpp   |    6 +-
 .../AMDGPU/copysign-simplify-demanded-bits.ll |    3 +-
 llvm/test/CodeGen/AMDGPU/fcopysign.bf16.ll    |   84 +-
 llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll     | 1507 +++++++++--------
 llvm/test/CodeGen/X86/combine-fcopysign.ll    |   28 +-
 5 files changed, 866 insertions(+), 762 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 881bb308fd3e5..6df3cd9184e82 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -3000,8 +3000,8 @@ bool TargetLowering::SimplifyDemandedBits(
     if (!DemandedBits.intersects(SignMask0))
       return TLO.CombineTo(Op, Op0);
 
-    if (SimplifyDemandedBits(Op0, ~SignMask0 & DemandedBits, DemandedElts,
-                             Known, TLO, Depth + 1) ||
+    if (SimplifyDemandedBits(Op0, DemandedBits, DemandedElts, Known, TLO,
+                             Depth + 1) ||
         SimplifyDemandedBits(Op1, SignMask1, DemandedElts, Known2, TLO,
                              Depth + 1))
       return true;
@@ -3034,7 +3034,7 @@ bool TargetLowering::SimplifyDemandedBits(
                              Depth + 1))
       return true;
 
-    if (Known.isNonNegative() || Known.isNegative()) {
+    if (!Known.isSignUnknown()) {
       Known.Zero ^= SignMask;
       Known.One ^= SignMask;
     }
diff --git a/llvm/test/CodeGen/AMDGPU/copysign-simplify-demanded-bits.ll b/llvm/test/CodeGen/AMDGPU/copysign-simplify-demanded-bits.ll
index 021104114d796..16928f8c2f4d9 100644
--- a/llvm/test/CodeGen/AMDGPU/copysign-simplify-demanded-bits.ll
+++ b/llvm/test/CodeGen/AMDGPU/copysign-simplify-demanded-bits.ll
@@ -198,8 +198,9 @@ define float @copysign_f32_f32_sign_known_p0_or_n0__mag_known_positive_fabs(floa
 ; GFX9-LABEL: copysign_f32_f32_sign_known_p0_or_n0__mag_known_positive_fabs:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    v_lshlrev_b32_e32 v1, 31, v1
 ; GFX9-NEXT:    s_brev_b32 s4, -2
+; GFX9-NEXT:    v_and_b32_e32 v0, 0x7fffffff, v0
+; GFX9-NEXT:    v_lshlrev_b32_e32 v1, 31, v1
 ; GFX9-NEXT:    v_bfi_b32 v0, s4, v0, v1
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
   %x = call float @llvm.fabs.f32(float %x.arg)
diff --git a/llvm/test/CodeGen/AMDGPU/fcopysign.bf16.ll b/llvm/test/CodeGen/AMDGPU/fcopysign.bf16.ll
index f901626e54a68..2fc017444a747 100644
--- a/llvm/test/CodeGen/AMDGPU/fcopysign.bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fcopysign.bf16.ll
@@ -3729,8 +3729,8 @@ define <2 x float> @v_copysign_out_v2f32_mag_v2bf16_sign_v2f32(<2 x bfloat> %mag
 ; GCN-NEXT:    v_mul_f32_e32 v0, 1.0, v0
 ; GCN-NEXT:    v_mul_f32_e32 v1, 1.0, v1
 ; GCN-NEXT:    s_brev_b32 s4, -2
-; GCN-NEXT:    v_and_b32_e32 v1, 0x7fff0000, v1
-; GCN-NEXT:    v_and_b32_e32 v0, 0x7fff0000, v0
+; GCN-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
+; GCN-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GCN-NEXT:    v_bfi_b32 v0, s4, v0, v2
 ; GCN-NEXT:    v_bfi_b32 v1, s4, v1, v3
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
@@ -3740,8 +3740,8 @@ define <2 x float> @v_copysign_out_v2f32_mag_v2bf16_sign_v2f32(<2 x bfloat> %mag
 ; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX7-NEXT:    v_mul_f32_e32 v0, 1.0, v0
 ; GFX7-NEXT:    v_mul_f32_e32 v1, 1.0, v1
-; GFX7-NEXT:    v_and_b32_e32 v1, 0x7fff0000, v1
-; GFX7-NEXT:    v_and_b32_e32 v0, 0x7fff0000, v0
+; GFX7-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
+; GFX7-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX7-NEXT:    s_brev_b32 s4, -2
 ; GFX7-NEXT:    v_bfi_b32 v0, s4, v0, v2
 ; GFX7-NEXT:    v_bfi_b32 v1, s4, v1, v3
@@ -3750,7 +3750,7 @@ define <2 x float> @v_copysign_out_v2f32_mag_v2bf16_sign_v2f32(<2 x bfloat> %mag
 ; GFX8-LABEL: v_copysign_out_v2f32_mag_v2bf16_sign_v2f32:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT:    v_and_b32_e32 v3, 0x7fff0000, v0
+; GFX8-NEXT:    v_and_b32_e32 v3, 0xffff0000, v0
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
 ; GFX8-NEXT:    s_brev_b32 s4, -2
 ; GFX8-NEXT:    v_bfi_b32 v0, s4, v0, v1
@@ -3760,7 +3760,7 @@ define <2 x float> @v_copysign_out_v2f32_mag_v2bf16_sign_v2f32(<2 x bfloat> %mag
 ; GFX9-LABEL: v_copysign_out_v2f32_mag_v2bf16_sign_v2f32:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    v_and_b32_e32 v3, 0x7fff0000, v0
+; GFX9-NEXT:    v_and_b32_e32 v3, 0xffff0000, v0
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
 ; GFX9-NEXT:    s_brev_b32 s4, -2
 ; GFX9-NEXT:    v_bfi_b32 v0, s4, v0, v1
@@ -3771,7 +3771,7 @@ define <2 x float> @v_copysign_out_v2f32_mag_v2bf16_sign_v2f32(<2 x bfloat> %mag
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v3, 16, v0
-; GFX10-NEXT:    v_and_b32_e32 v4, 0x7fff0000, v0
+; GFX10-NEXT:    v_and_b32_e32 v4, 0xffff0000, v0
 ; GFX10-NEXT:    v_bfi_b32 v0, 0x7fffffff, v3, v1
 ; GFX10-NEXT:    v_bfi_b32 v1, 0x7fffffff, v4, v2
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
@@ -3780,7 +3780,7 @@ define <2 x float> @v_copysign_out_v2f32_mag_v2bf16_sign_v2f32(<2 x bfloat> %mag
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v3, 16, v0
-; GFX11-NEXT:    v_and_b32_e32 v4, 0x7fff0000, v0
+; GFX11-NEXT:    v_and_b32_e32 v4, 0xffff0000, v0
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
 ; GFX11-NEXT:    v_bfi_b32 v0, 0x7fffffff, v3, v1
 ; GFX11-NEXT:    v_bfi_b32 v1, 0x7fffffff, v4, v2
@@ -4574,8 +4574,8 @@ define amdgpu_ps <2 x i32> @s_copysign_out_v2f32_mag_v2bf16_sign_v2f32(<2 x bflo
 ; GCN-NEXT:    s_brev_b32 s0, -2
 ; GCN-NEXT:    v_mov_b32_e32 v2, s3
 ; GCN-NEXT:    v_mov_b32_e32 v3, s2
-; GCN-NEXT:    v_and_b32_e32 v1, 0x7fff0000, v1
-; GCN-NEXT:    v_and_b32_e32 v0, 0x7fff0000, v0
+; GCN-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
+; GCN-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GCN-NEXT:    v_bfi_b32 v0, s0, v0, v2
 ; GCN-NEXT:    v_bfi_b32 v1, s0, v1, v3
 ; GCN-NEXT:    v_readfirstlane_b32 s0, v1
@@ -4586,10 +4586,10 @@ define amdgpu_ps <2 x i32> @s_copysign_out_v2f32_mag_v2bf16_sign_v2f32(<2 x bflo
 ; GFX7:       ; %bb.0:
 ; GFX7-NEXT:    v_mul_f32_e64 v0, 1.0, s1
 ; GFX7-NEXT:    v_mul_f32_e64 v1, 1.0, s0
-; GFX7-NEXT:    v_and_b32_e32 v0, 0x7fff0000, v0
+; GFX7-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX7-NEXT:    s_brev_b32 s0, -2
 ; GFX7-NEXT:    v_mov_b32_e32 v2, s3
-; GFX7-NEXT:    v_and_b32_e32 v1, 0x7fff0000, v1
+; GFX7-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX7-NEXT:    v_bfi_b32 v0, s0, v0, v2
 ; GFX7-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX7-NEXT:    v_bfi_b32 v1, s0, v1, v2
@@ -4600,7 +4600,7 @@ define amdgpu_ps <2 x i32> @s_copysign_out_v2f32_mag_v2bf16_sign_v2f32(<2 x bflo
 ; GFX8-LABEL: s_copysign_out_v2f32_mag_v2bf16_sign_v2f32:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_lshl_b32 s3, s0, 16
-; GFX8-NEXT:    s_and_b32 s0, s0, 0x7fff0000
+; GFX8-NEXT:    s_and_b32 s0, s0, 0xffff0000
 ; GFX8-NEXT:    s_brev_b32 s4, -2
 ; GFX8-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX8-NEXT:    v_mov_b32_e32 v1, s2
@@ -4615,7 +4615,7 @@ define amdgpu_ps <2 x i32> @s_copysign_out_v2f32_mag_v2bf16_sign_v2f32(<2 x bflo
 ; GFX9-LABEL: s_copysign_out_v2f32_mag_v2bf16_sign_v2f32:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_lshl_b32 s3, s0, 16
-; GFX9-NEXT:    s_and_b32 s0, s0, 0x7fff0000
+; GFX9-NEXT:    s_and_b32 s0, s0, 0xffff0000
 ; GFX9-NEXT:    s_brev_b32 s4, -2
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s2
@@ -4632,7 +4632,7 @@ define amdgpu_ps <2 x i32> @s_copysign_out_v2f32_mag_v2bf16_sign_v2f32(<2 x bflo
 ; GFX10-NEXT:    v_mov_b32_e32 v0, s1
 ; GFX10-NEXT:    v_mov_b32_e32 v1, s2
 ; GFX10-NEXT:    s_lshl_b32 s1, s0, 16
-; GFX10-NEXT:    s_and_b32 s0, s0, 0x7fff0000
+; GFX10-NEXT:    s_and_b32 s0, s0, 0xffff0000
 ; GFX10-NEXT:    v_bfi_b32 v0, 0x7fffffff, s1, v0
 ; GFX10-NEXT:    v_bfi_b32 v1, 0x7fffffff, s0, v1
 ; GFX10-NEXT:    v_readfirstlane_b32 s0, v0
@@ -4643,7 +4643,7 @@ define amdgpu_ps <2 x i32> @s_copysign_out_v2f32_mag_v2bf16_sign_v2f32(<2 x bflo
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    v_dual_mov_b32 v0, s1 :: v_dual_mov_b32 v1, s2
 ; GFX11-NEXT:    s_lshl_b32 s1, s0, 16
-; GFX11-NEXT:    s_and_b32 s0, s0, 0x7fff0000
+; GFX11-NEXT:    s_and_b32 s0, s0, 0xffff0000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
 ; GFX11-NEXT:    v_bfi_b32 v0, 0x7fffffff, s1, v0
 ; GFX11-NEXT:    v_bfi_b32 v1, 0x7fffffff, s0, v1
@@ -5470,9 +5470,9 @@ define <3 x float> @v_copysign_out_v3f32_mag_v3bf16_sign_v3f32(<3 x bfloat> %mag
 ; GCN-NEXT:    v_mul_f32_e32 v1, 1.0, v1
 ; GCN-NEXT:    v_mul_f32_e32 v2, 1.0, v2
 ; GCN-NEXT:    s_brev_b32 s4, -2
-; GCN-NEXT:    v_and_b32_e32 v2, 0x7fff0000, v2
-; GCN-NEXT:    v_and_b32_e32 v1, 0x7fff0000, v1
-; GCN-NEXT:    v_and_b32_e32 v0, 0x7fff0000, v0
+; GCN-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
+; GCN-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
+; GCN-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GCN-NEXT:    v_bfi_b32 v0, s4, v0, v3
 ; GCN-NEXT:    v_bfi_b32 v1, s4, v1, v4
 ; GCN-NEXT:    v_bfi_b32 v2, s4, v2, v5
@@ -5484,9 +5484,9 @@ define <3 x float> @v_copysign_out_v3f32_mag_v3bf16_sign_v3f32(<3 x bfloat> %mag
 ; GFX7-NEXT:    v_mul_f32_e32 v0, 1.0, v0
 ; GFX7-NEXT:    v_mul_f32_e32 v1, 1.0, v1
 ; GFX7-NEXT:    v_mul_f32_e32 v2, 1.0, v2
-; GFX7-NEXT:    v_and_b32_e32 v2, 0x7fff0000, v2
-; GFX7-NEXT:    v_and_b32_e32 v1, 0x7fff0000, v1
-; GFX7-NEXT:    v_and_b32_e32 v0, 0x7fff0000, v0
+; GFX7-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
+; GFX7-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
+; GFX7-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX7-NEXT:    s_brev_b32 s4, -2
 ; GFX7-NEXT:    v_bfi_b32 v0, s4, v0, v3
 ; GFX7-NEXT:    v_bfi_b32 v1, s4, v1, v4
@@ -5497,7 +5497,7 @@ define <3 x float> @v_copysign_out_v3f32_mag_v3bf16_sign_v3f32(<3 x bfloat> %mag
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v5, 16, v1
-; GFX8-NEXT:    v_and_b32_e32 v1, 0x7fff0000, v0
+; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff0000, v0
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
 ; GFX8-NEXT:    s_brev_b32 s4, -2
 ; GFX8-NEXT:    v_bfi_b32 v0, s4, v0, v2
@@ -5509,7 +5509,7 @@ define <3 x float> @v_copysign_out_v3f32_mag_v3bf16_sign_v3f32(<3 x bfloat> %mag
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v5, 16, v1
-; GFX9-NEXT:    v_and_b32_e32 v1, 0x7fff0000, v0
+; GFX9-NEXT:    v_and_b32_e32 v1, 0xffff0000, v0
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
 ; GFX9-NEXT:    s_brev_b32 s4, -2
 ; GFX9-NEXT:    v_bfi_b32 v0, s4, v0, v2
@@ -5521,7 +5521,7 @@ define <3 x float> @v_copysign_out_v3f32_mag_v3bf16_sign_v3f32(<3 x bfloat> %mag
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v5, 16, v0
-; GFX10-NEXT:    v_and_b32_e32 v6, 0x7fff0000, v0
+; GFX10-NEXT:    v_and_b32_e32 v6, 0xffff0000, v0
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v7, 16, v1
 ; GFX10-NEXT:    v_bfi_b32 v0, 0x7fffffff, v5, v2
 ; GFX10-NEXT:    v_bfi_b32 v1, 0x7fffffff, v6, v3
@@ -5532,7 +5532,7 @@ define <3 x float> @v_copysign_out_v3f32_mag_v3bf16_sign_v3f32(<3 x bfloat> %mag
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v5, 16, v0
-; GFX11-NEXT:    v_and_b32_e32 v6, 0x7fff0000, v0
+; GFX11-NEXT:    v_and_b32_e32 v6, 0xffff0000, v0
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v7, 16, v1
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-NEXT:    v_bfi_b32 v0, 0x7fffffff, v5, v2
@@ -6607,10 +6607,10 @@ define <4 x float> @v_copysign_out_v4f32_mag_v4bf16_sign_v4f32(<4 x bfloat> %mag
 ; GCN-NEXT:    v_mul_f32_e32 v2, 1.0, v2
 ; GCN-NEXT:    v_mul_f32_e32 v3, 1.0, v3
 ; GCN-NEXT:    s_brev_b32 s4, -2
-; GCN-NEXT:    v_and_b32_e32 v3, 0x7fff0000, v3
-; GCN-NEXT:    v_and_b32_e32 v2, 0x7fff0000, v2
-; GCN-NEXT:    v_and_b32_e32 v1, 0x7fff0000, v1
-; GCN-NEXT:    v_and_b32_e32 v0, 0x7fff0000, v0
+; GCN-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
+; GCN-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
+; GCN-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
+; GCN-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GCN-NEXT:    v_bfi_b32 v0, s4, v0, v4
 ; GCN-NEXT:    v_bfi_b32 v1, s4, v1, v5
 ; GCN-NEXT:    v_bfi_b32 v2, s4, v2, v6
@@ -6624,10 +6624,10 @@ define <4 x float> @v_copysign_out_v4f32_mag_v4bf16_sign_v4f32(<4 x bfloat> %mag
 ; GFX7-NEXT:    v_mul_f32_e32 v1, 1.0, v1
 ; GFX7-NEXT:    v_mul_f32_e32 v2, 1.0, v2
 ; GFX7-NEXT:    v_mul_f32_e32 v3, 1.0, v3
-; GFX7-NEXT:    v_and_b32_e32 v3, 0x7fff0000, v3
-; GFX7-NEXT:    v_and_b32_e32 v2, 0x7fff0000, v2
-; GFX7-NEXT:    v_and_b32_e32 v1, 0x7fff0000, v1
-; GFX7-NEXT:    v_and_b32_e32 v0, 0x7fff0000, v0
+; GFX7-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
+; GFX7-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
+; GFX7-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
+; GFX7-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX7-NEXT:    s_brev_b32 s4, -2
 ; GFX7-NEXT:    v_bfi_b32 v0, s4, v0, v4
 ; GFX7-NEXT:    v_bfi_b32 v1, s4, v1, v5
@@ -6638,9 +6638,9 @@ define <4 x float> @v_copysign_out_v4f32_mag_v4bf16_sign_v4f32(<4 x bfloat> %mag
 ; GFX8-LABEL: v_copysign_out_v4f32_mag_v4bf16_sign_v4f32:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT:    v_and_b32_e32 v6, 0x7fff0000, v1
+; GFX8-NEXT:    v_and_b32_e32 v6, 0xffff0000, v1
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v7, 16, v1
-; GFX8-NEXT:    v_and_b32_e32 v1, 0x7fff0000, v0
+; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff0000, v0
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
 ; GFX8-NEXT:    s_brev_b32 s4, -2
 ; GFX8-NEXT:    v_bfi_b32 v0, s4, v0, v2
@@ -6652,9 +6652,9 @@ define <4 x float> @v_copysign_out_v4f32_mag_v4bf16_sign_v4f32(<4 x bfloat> %mag
 ; GFX9-LABEL: v_copysign_out_v4f32_mag_v4bf16_sign_v4f32:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    v_and_b32_e32 v6, 0x7fff0000, v1
+; GFX9-NEXT:    v_and_b32_e32 v6, 0xffff0000, v1
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v7, 16, v1
-; GFX9-NEXT:    v_and_b32_e32 v1, 0x7fff0000, v0
+; GFX9-NEXT:    v_and_b32_e32 v1, 0xffff0000, v0
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
 ; GFX9-NEXT:    s_brev_b32 s4, -2
 ; GFX9-NEXT:    v_bfi_b32 v0, s4, v0, v2
@@ -6667,9 +6667,9 @@ define <4 x float> @v_copysign_out_v4f32_mag_v4bf16_sign_v4f32(<4 x bfloat> %mag
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v6, 16, v0
-; GFX10-NEXT:    v_and_b32_e32 v7, 0x7fff0000, v0
+; GFX10-NEXT:    v_and_b32_e32 v7, 0xffff0000, v0
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v8, 16, v1
-; GFX10-NEXT:    v_and_b32_e32 v9, 0x7fff0000, v1
+; GFX10-NEXT:    v_and_b32_e32 v9, 0xffff0000, v1
 ; GFX10-NEXT:    v_bfi_b32 v0, 0x7fffffff, v6, v2
 ; GFX10-NEXT:    v_bfi_b32 v1, 0x7fffffff, v7, v3
 ; GFX10-NEXT:    v_bfi_b32 v2, 0x7fffffff, v8, v4
@@ -6680,9 +6680,9 @@ define <4 x float> @v_copysign_out_v4f32_mag_v4bf16_sign_v4f32(<4 x bfloat> %mag
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v6, 16, v0
-; GFX11-NEXT:    v_and_b32_e32 v7, 0x7fff0000, v0
+; GFX11-NEXT:    v_and_b32_e32 v7, 0xffff0000, v0
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v8, 16, v1
-; GFX11-NEXT:    v_and_b32_e32 v9, 0x7fff0000, v1
+; GFX11-NEXT:    v_and_b32_e32 v9, 0xffff0000, v1
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
 ; GFX11-NEXT:    v_bfi_b32 v0, 0x7fffffff, v6, v2
 ; GFX11-NEXT:    v_bfi_b32 v1, 0x7fffffff, v7, v3
diff --git a/llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll b/llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll
index a3ec35da29f67..1386214437721 100644
--- a/llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll
@@ -916,39 +916,42 @@ define half @v_copysign_out_f16_mag_f64_sign_f16(double %mag, half %sign) {
 ; VI-NEXT:    v_lshrrev_b32_e32 v3, 8, v1
 ; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
 ; VI-NEXT:    v_and_b32_e32 v3, 0xffe, v3
-; VI-NEXT:    v_bfe_u32 v1, v1, 20, 11
+; VI-NEXT:    v_bfe_u32 v4, v1, 20, 11
 ; VI-NEXT:    v_or_b32_e32 v0, v3, v0
-; VI-NEXT:    v_sub_u32_e32 v4, vcc, 0x3f1, v1
+; VI-NEXT:    v_sub_u32_e32 v5, vcc, 0x3f1, v4
 ; VI-NEXT:    v_or_b32_e32 v3, 0x1000, v0
-; VI-NEXT:    v_med3_i32 v4, v4, 0, 13
-; VI-NEXT:    v_lshrrev_b32_e32 v5, v4, v3
-; VI-NEXT:    v_lshlrev_b32_e32 v4, v4, v5
-; VI-NEXT:    v_cmp_ne_u32_e32 vcc, v4, v3
+; VI-NEXT:    v_med3_i32 v5, v5, 0, 13
+; VI-NEXT:    v_lshrrev_b32_e32 v6, v5, v3
+; VI-NEXT:    v_lshlrev_b32_e32 v5, v5, v6
+; VI-NEXT:    v_cmp_ne_u32_e32 vcc, v5, v3
 ; VI-NEXT:    s_movk_i32 s4, 0xfc10
 ; VI-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
-; VI-NEXT:    v_add_u32_e32 v1, vcc, s4, v1
-; VI-NEXT:    v_lshlrev_b32_e32 v4, 12, v1
-; VI-NEXT:    v_or_b32_e32 v3, v5, v3
-; VI-NEXT:    v_or_b32_e32 v4, v0, v4
-; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 1, v1
-; VI-NEXT:    v_cndmask_b32_e32 v3, v4, v3, vcc
-; VI-NEXT:    v_and_b32_e32 v4, 7, v3
-; VI-NEXT:    v_cmp_lt_i32_e32 vcc, 5, v4
+; VI-NEXT:    v_add_u32_e32 v4, vcc, s4, v4
+; VI-NEXT:    v_lshlrev_b32_e32 v5, 12, v4
+; VI-NEXT:    v_or_b32_e32 v3, v6, v3
+; VI-NEXT:    v_or_b32_e32 v5, v0, v5
+; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 1, v4
+; VI-NEXT:    v_cndmask_b32_e32 v3, v5, v3, vcc
+; VI-NEXT:    v_and_b32_e32 v5, 7, v3
+; VI-NEXT:    v_cmp_lt_i32_e32 vcc, 5, v5
+; VI-NEXT:    v_cndmask_b32_e64 v6, 0, 1, vcc
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v5
 ; VI-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v4
-; VI-NEXT:    v_cndmask_b32_e64 v4, 0, 1, vcc
-; VI-NEXT:    v_or_b32_e32 v4, v4, v5
+; VI-NEXT:    v_or_b32_e32 v5, v5, v6
 ; VI-NEXT:    v_lshrrev_b32_e32 v3, 2, v3
-; VI-NEXT:    v_add_u32_e32 v3, vcc, v3, v4
-; VI-NEXT:    v_mov_b32_e32 v4, 0x7c00
-; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 31, v1
-; VI-NEXT:    v_cndmask_b32_e32 v3, v4, v3, vcc
-; VI-NEXT:    v_mov_b32_e32 v5, 0x7e00
+; VI-NEXT:    v_add_u32_e32 v3, vcc, v3, v5
+; VI-NEXT:    v_mov_b32_e32 v5, 0x7c00
+; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 31, v4
+; VI-NEXT:    v_cndmask_b32_e32 v3, v5, v3, vcc
+; VI-NEXT:    v_mov_b32_e32 v6, 0x7e00
 ; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
 ; VI-NEXT:    s_movk_i32 s4, 0x40f
-; VI-NEXT:    v_cndmask_b32_e32 v0, v4, v5, vcc
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, s4, v1
+; VI-NEXT:    v_cndmask_b32_e32 v0, v5, v6, vcc
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, s4, v4
 ; VI-NEXT:    v_cndmask_b32_e32 v0, v3, v0, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, 0x8000
+; VI-NEXT:    v_and_b32_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; VI-NEXT:    v_or_b32_e32 v0, v1, v0
 ; VI-NEXT:    s_movk_i32 s4, 0x7fff
 ; VI-NEXT:    v_bfi_b32 v0, s4, v0, v2
 ; VI-NEXT:    s_setpc_b64 s[30:31]
@@ -962,87 +965,144 @@ define half @v_copysign_out_f16_mag_f64_sign_f16(double %mag, half %sign) {
 ; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
 ; GFX9-NEXT:    v_lshrrev_b32_e32 v3, 8, v1
 ; GFX9-NEXT:    s_movk_i32 s4, 0xffe
-; GFX9-NEXT:    v_bfe_u32 v1, v1, 20, 11
+; GFX9-NEXT:    v_bfe_u32 v4, v1, 20, 11
 ; GFX9-NEXT:    v_and_or_b32 v0, v3, s4, v0
-; GFX9-NEXT:    v_sub_u32_e32 v4, 0x3f1, v1
+; GFX9-NEXT:    v_sub_u32_e32 v5, 0x3f1, v4
 ; GFX9-NEXT:    v_or_b32_e32 v3, 0x1000, v0
-; GFX9-NEXT:    v_med3_i32 v4, v4, 0, 13
-; GFX9-NEXT:    v_lshrrev_b32_e32 v5, v4, v3
-; GFX9-NEXT:    v_lshlrev_b32_e32 v4, v4, v5
-; GFX9-NEXT:    v_cmp_ne_u32_e32 vcc, v4, v3
+; GFX9-NEXT:    v_med3_i32 v5, v5, 0, 13
+; GFX9-NEXT:    v_lshrrev_b32_e32 v6, v5, v3
+; GFX9-NEXT:    v_lshlrev_b32_e32 v5, v5, v6
+; GFX9-NEXT:    v_cmp_ne_u32_e32 vcc, v5, v3
 ; GFX9-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
-; GFX9-NEXT:    v_add_u32_e32 v1, 0xfffffc10, v1
-; GFX9-NEXT:    v_or_b32_e32 v3, v5, v3
-; GFX9-NEXT:    v_lshl_or_b32 v4, v1, 12, v0
-; GFX9-NEXT:    v_cmp_gt_i32_e32 vcc, 1, v1
-; GFX9-NEXT:    v_cndmask_b32_e32 v3, v4, v3, vcc
-; GFX9-NEXT:    v_and_b32_e32 v4, 7, v3
-; GFX9-NEXT:    v_cmp_lt_i32_e32 vcc, 5, v4
+; GFX9-NEXT:    v_add_u32_e32 v4, 0xfffffc10, v4
+; GFX9-NEXT:    v_or_b32_e32 v3, v6, v3
+; GFX9-NEXT:    v_lshl_or_b32 v5, v4, 12, v0
+; GFX9-NEXT:    v_cmp_gt_i32_e32 vcc, 1, v4
+; GFX9-NEXT:    v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX9-NEXT:    v_and_b32_e32 v5, 7, v3
+; GFX9-NEXT:    v_cmp_lt_i32_e32 vcc, 5, v5
+; GFX9-NEXT:    v_cndmask_b32_e64 v6, 0, 1, vcc
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v5
 ; GFX9-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v4
-; GFX9-NEXT:    v_cndmask_b32_e64 v4, 0, 1, vcc
-; GFX9-NEXT:    v_or_b32_e32 v4, v4, v5
+; GFX9-NEXT:    v_or_b32_e32 v5, v5, v6
 ; GFX9-NEXT:    v_lshrrev_b32_e32 v3, 2, v3
-; GFX9-NEXT:    v_add_u32_e32 v3, v3, v4
-; GFX9-NEXT:    v_mov_b32_e32 v4, 0x7c00
-; GFX9-NEXT:    v_cmp_gt_i32_e32 vcc, 31, v1
-; GFX9-NEXT:    v_cndmask_b32_e32 v3, v4, v3, vcc
-; GFX9-NEXT:    v_mov_b32_e32 v5, 0x7e00
+; GFX9-NEXT:    v_add_u32_e32 v3, v3, v5
+; GFX9-NEXT:    v_mov_b32_e32 v5, 0x7c00
+; GFX9-NEXT:    v_cmp_gt_i32_e32 vcc, 31, v4
+; GFX9-NEXT:    v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX9-NEXT:    v_mov_b32_e32 v6, 0x7e00
 ; GFX9-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
 ; GFX9-NEXT:    s_movk_i32 s4, 0x40f
-; GFX9-NEXT:    v_cndmask_b32_e32 v0, v4, v5, vcc
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, s4, v1
+; GFX9-NEXT:    v_cndmask_b32_e32 v0, v5, v6, vcc
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, s4, v4
 ; GFX9-NEXT:    v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX9-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
+; GFX9-NEXT:    s_mov_b32 s4, 0x8000
+; GFX9-NEXT:    v_and_or_b32 v0, v1, s4, v0
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
 ; GFX9-NEXT:    v_bfi_b32 v0, s4, v0, v2
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
 ;
-; GFX11-LABEL: v_copysign_out_f16_mag_f64_sign_f16:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_and_or_b32 v0, 0x1ff, v1, v0
-; GFX11-NEXT:    v_lshrrev_b32_e32 v3, 8, v1
-; GFX11-NEXT:    v_bfe_u32 v1, v1, 20, 11
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT:    v_sub_nc_u32_e32 v4, 0x3f1, v1
-; GFX11-NEXT:    v_add_nc_u32_e32 v1, 0xfffffc10, v1
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT:    v_and_or_b32 v0, 0xffe, v3, v0
-; GFX11-NEXT:    v_med3_i32 v3, v4, 0, 13
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_or_b32_e32 v4, 0x1000, v0
-; GFX11-NEXT:    v_lshrrev_b32_e32 v5, v3, v4
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_lshlrev_b32_e32 v3, v3, v5
-; GFX11-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v3, v4
-; GFX11-NEXT:    v_lshl_or_b32 v4, v1, 12, v0
-; GFX11-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc_lo
-; GFX11-NEXT:    v_cmp_gt_i32_e32 vcc_lo, 1, v1
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_or_b32_e32 v3, v5, v3
-; GFX11-NEXT:    v_cndmask_b32_e32 v3, v4, v3, vcc_lo
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT:    v_and_b32_e32 v4, 7, v3
-; GFX11-NEXT:    v_lshrrev_b32_e32 v3, 2, v3
-; GFX11-NEXT:    v_cmp_lt_i32_e32 vcc_lo, 5, v4
-; GFX11-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc_lo
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v4
-; GFX11-NEXT:    v_cndmask_b32_e64 v4, 0, 1, vcc_lo
-; GFX11-NEXT:    v_cmp_gt_i32_e32 vcc_lo, 31, v1
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_or_b32_e32 v4, v4, v5
-; GFX11-NEXT:    v_dual_mov_b32 v4, 0x7e00 :: v_dual_add_nc_u32 v3, v3, v4
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT:    v_cndmask_b32_e32 v3, 0x7c00, v3, vcc_lo
-; GFX11-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT:    v_cndmask_b32_e32 v0, 0x7c00, v4, vcc_lo
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0x40f, v1
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_cndmask_b32_e32 v0, v3, v0, vcc_lo
-; GFX11-NEXT:    v_bfi_b32 v0, 0x7fff, v0, v2
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
+; GFX11-TRUE16-LABEL: v_copysign_out_f16_mag_f64_sign_f16:
+; GFX11-TRUE16:       ; %bb.0:
+; GFX11-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT:    v_and_or_b32 v0, 0x1ff, v1, v0
+; GFX11-TRUE16-NEXT:    v_lshrrev_b32_e32 v3, 8, v1
+; GFX11-TRUE16-NEXT:    v_bfe_u32 v4, v1, 20, 11
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-TRUE16-NEXT:    v_sub_nc_u32_e32 v5, 0x3f1, v4
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT:    v_and_or_b32 v0, 0xffe, v3, v0
+; GFX11-TRUE16-NEXT:    v_med3_i32 v3, v5, 0, 13
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT:    v_or_b32_e32 v5, 0x1000, v0
+; GFX11-TRUE16-NEXT:    v_lshrrev_b32_e32 v6, v3, v5
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v3, v3, v6
+; GFX11-TRUE16-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v3, v5
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc_lo
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT:    v_or_b32_e32 v3, v6, v3
+; GFX11-TRUE16-NEXT:    v_add_nc_u32_e32 v4, 0xfffffc10, v4
+; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v5, v4, 12, v0
+; GFX11-TRUE16-NEXT:    v_cmp_gt_i32_e32 vcc_lo, 1, v4
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v3, v5, v3, vcc_lo
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v5, 7, v3
+; GFX11-TRUE16-NEXT:    v_lshrrev_b32_e32 v3, 2, v3
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT:    v_cmp_lt_i32_e32 vcc_lo, 5, v5
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v6, 0, 1, vcc_lo
+; GFX11-TRUE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v5
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc_lo
+; GFX11-TRUE16-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-TRUE16-NEXT:    v_or_b32_e32 v5, v5, v6
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT:    v_dual_mov_b32 v6, 0x7e00 :: v_dual_add_nc_u32 v3, v3, v5
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v0, 0x7c00, v6, vcc_lo
+; GFX11-TRUE16-NEXT:    v_cmp_gt_i32_e32 vcc_lo, 31, v4
+; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v5.l, v1.h
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v3, 0x7c00, v3, vcc_lo
+; GFX11-TRUE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0x40f, v4
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v0, v3, v0, vcc_lo
+; GFX11-TRUE16-NEXT:    v_and_or_b32 v0, 0x8000, v5, v0
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT:    v_bfi_b32 v0, 0x7fff, v0, v2
+; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: v_copysign_out_f16_mag_f64_sign_f16:
+; GFX11-FAKE16:       ; %bb.0:
+; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT:    v_and_or_b32 v0, 0x1ff, v1, v0
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v3, 8, v1
+; GFX11-FAKE16-NEXT:    v_bfe_u32 v4, v1, 20, 11
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-FAKE16-NEXT:    v_sub_nc_u32_e32 v5, 0x3f1, v4
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    v_and_or_b32 v0, 0xffe, v3, v0
+; GFX11-FAKE16-NEXT:    v_med3_i32 v3, v5, 0, 13
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v5, 0x1000, v0
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v6, v3, v5
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v3, v3, v6
+; GFX11-FAKE16-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v3, v5
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc_lo
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v3, v6, v3
+; GFX11-FAKE16-NEXT:    v_add_nc_u32_e32 v4, 0xfffffc10, v4
+; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v5, v4, 12, v0
+; GFX11-FAKE16-NEXT:    v_cmp_gt_i32_e32 vcc_lo, 1, v4
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v3, v5, v3, vcc_lo
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v5, 7, v3
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v3, 2, v3
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    v_cmp_lt_i32_e32 vcc_lo, 5, v5
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v6, 0, 1, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v5
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v5, v5, v6
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v6, 0x7e00 :: v_dual_add_nc_u32 v3, v3, v5
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v0, 0x7c00, v6, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cmp_gt_i32_e32 vcc_lo, 31, v4
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v3, 0x7c00, v3, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0x40f, v4
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v0, v3, v0, vcc_lo
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT:    v_and_or_b32 v0, 0x8000, v1, v0
+; GFX11-FAKE16-NEXT:    v_bfi_b32 v0, 0x7fff, v0, v2
+; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
   %mag.trunc = fptrunc double %mag to half
   %result = call half @llvm.copysign.f16(half %mag.trunc, half %sign)
   ret half %result
@@ -1109,39 +1169,42 @@ define amdgpu_ps i16 @s_copysign_out_f16_mag_f64_sign_f16(double inreg %mag, hal
 ; VI-NEXT:    s_cselect_b64 s[4:5], -1, 0
 ; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
 ; VI-NEXT:    s_lshr_b32 s0, s1, 8
-; VI-NEXT:    s_bfe_u32 s1, s1, 0xb0014
+; VI-NEXT:    s_bfe_u32 s4, s1, 0xb0014
 ; VI-NEXT:    s_and_b32 s0, s0, 0xffe
 ; VI-NEXT:    v_readfirstlane_b32 s3, v0
-; VI-NEXT:    s_sub_i32 s4, 0x3f1, s1
+; VI-NEXT:    s_sub_i32 s5, 0x3f1, s4
 ; VI-NEXT:    s_or_b32 s0, s0, s3
-; VI-NEXT:    v_med3_i32 v0, s4, 0, 13
+; VI-NEXT:    v_med3_i32 v0, s5, 0, 13
 ; VI-NEXT:    s_or_b32 s3, s0, 0x1000
-; VI-NEXT:    v_readfirstlane_b32 s4, v0
-; VI-NEXT:    s_lshr_b32 s5, s3, s4
-; VI-NEXT:    s_lshl_b32 s4, s5, s4
-; VI-NEXT:    s_cmp_lg_u32 s4, s3
+; VI-NEXT:    v_readfirstlane_b32 s5, v0
+; VI-NEXT:    s_lshr_b32 s6, s3, s5
+; VI-NEXT:    s_lshl_b32 s5, s6, s5
+; VI-NEXT:    s_cmp_lg_u32 s5, s3
 ; VI-NEXT:    s_cselect_b32 s3, 1, 0
-; VI-NEXT:    s_addk_i32 s1, 0xfc10
-; VI-NEXT:    s_lshl_b32 s4, s1, 12
-; VI-NEXT:    s_or_b32 s3, s5, s3
-; VI-NEXT:    s_or_b32 s4, s0, s4
-; VI-NEXT:    s_cmp_lt_i32 s1, 1
-; VI-NEXT:    s_cselect_b32 s3, s3, s4
-; VI-NEXT:    s_and_b32 s4, s3, 7
-; VI-NEXT:    s_cmp_gt_i32 s4, 5
+; VI-NEXT:    s_addk_i32 s4, 0xfc10
+; VI-NEXT:    s_lshl_b32 s5, s4, 12
+; VI-NEXT:    s_or_b32 s3, s6, s3
+; VI-NEXT:    s_or_b32 s5, s0, s5
+; VI-NEXT:    s_cmp_lt_i32 s4, 1
+; VI-NEXT:    s_cselect_b32 s3, s3, s5
+; VI-NEXT:    s_and_b32 s5, s3, 7
+; VI-NEXT:    s_cmp_gt_i32 s5, 5
+; VI-NEXT:    s_cselect_b32 s6, 1, 0
+; VI-NEXT:    s_cmp_eq_u32 s5, 3
 ; VI-NEXT:    s_cselect_b32 s5, 1, 0
-; VI-NEXT:    s_cmp_eq_u32 s4, 3
-; VI-NEXT:    s_cselect_b32 s4, 1, 0
-; VI-NEXT:    s_or_b32 s4, s4, s5
+; VI-NEXT:    s_or_b32 s5, s5, s6
 ; VI-NEXT:    s_lshr_b32 s3, s3, 2
-; VI-NEXT:    s_add_i32 s3, s3, s4
-; VI-NEXT:    s_cmp_lt_i32 s1, 31
+; VI-NEXT:    s_add_i32 s3, s3, s5
+; VI-NEXT:    s_cmp_lt_i32 s4, 31
 ; VI-NEXT:    s_cselect_b32 s3, s3, 0x7c00
 ; VI-NEXT:    s_cmp_lg_u32 s0, 0
 ; VI-NEXT:    s_movk_i32 s0, 0x7e00
 ; VI-NEXT:    s_cselect_b32 s0, s0, 0x7c00
-; VI-NEXT:    s_cmpk_eq_i32 s1, 0x40f
+; VI-NEXT:    s_cmpk_eq_i32 s4, 0x40f
 ; VI-NEXT:    s_cselect_b32 s0, s0, s3
+; VI-NEXT:    s_lshr_b32 s1, s1, 16
+; VI-NEXT:    s_and_b32 s1, s1, 0x8000
+; VI-NEXT:    s_or_b32 s0, s1, s0
 ; VI-NEXT:    s_movk_i32 s1, 0x7fff
 ; VI-NEXT:    v_mov_b32_e32 v0, s0
 ; VI-NEXT:    v_mov_b32_e32 v1, s2
@@ -1157,39 +1220,42 @@ define amdgpu_ps i16 @s_copysign_out_f16_mag_f64_sign_f16(double inreg %mag, hal
 ; GFX9-NEXT:    s_cselect_b64 s[4:5], -1, 0
 ; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
 ; GFX9-NEXT:    s_lshr_b32 s0, s1, 8
-; GFX9-NEXT:    s_bfe_u32 s1, s1, 0xb0014
+; GFX9-NEXT:    s_bfe_u32 s4, s1, 0xb0014
 ; GFX9-NEXT:    s_and_b32 s0, s0, 0xffe
 ; GFX9-NEXT:    v_readfirstlane_b32 s3, v0
-; GFX9-NEXT:    s_sub_i32 s4, 0x3f1, s1
+; GFX9-NEXT:    s_sub_i32 s5, 0x3f1, s4
 ; GFX9-NEXT:    s_or_b32 s0, s0, s3
-; GFX9-NEXT:    v_med3_i32 v0, s4, 0, 13
+; GFX9-NEXT:    v_med3_i32 v0, s5, 0, 13
 ; GFX9-NEXT:    s_or_b32 s3, s0, 0x1000
-; GFX9-NEXT:    v_readfirstlane_b32 s4, v0
-; GFX9-NEXT:    s_lshr_b32 s5, s3, s4
-; GFX9-NEXT:    s_lshl_b32 s4, s5, s4
-; GFX9-NEXT:    s_cmp_lg_u32 s4, s3
+; GFX9-NEXT:    v_readfirstlane_b32 s5, v0
+; GFX9-NEXT:    s_lshr_b32 s6, s3, s5
+; GFX9-NEXT:    s_lshl_b32 s5, s6, s5
+; GFX9-NEXT:    s_cmp_lg_u32 s5, s3
 ; GFX9-NEXT:    s_cselect_b32 s3, 1, 0
-; GFX9-NEXT:    s_addk_i32 s1, 0xfc10
-; GFX9-NEXT:    s_lshl_b32 s4, s1, 12
-; GFX9-NEXT:    s_or_b32 s3, s5, s3
-; GFX9-NEXT:    s_or_b32 s4, s0, s4
-; GFX9-NEXT:    s_cmp_lt_i32 s1, 1
-; GFX9-NEXT:    s_cselect_b32 s3, s3, s4
-; GFX9-NEXT:    s_and_b32 s4, s3, 7
-; GFX9-NEXT:    s_cmp_gt_i32 s4, 5
+; GFX9-NEXT:    s_addk_i32 s4, 0xfc10
+; GFX9-NEXT:    s_lshl_b32 s5, s4, 12
+; GFX9-NEXT:    s_or_b32 s3, s6, s3
+; GFX9-NEXT:    s_or_b32 s5, s0, s5
+; GFX9-NEXT:    s_cmp_lt_i32 s4, 1
+; GFX9-NEXT:    s_cselect_b32 s3, s3, s5
+; GFX9-NEXT:    s_and_b32 s5, s3, 7
+; GFX9-NEXT:    s_cmp_gt_i32 s5, 5
+; GFX9-NEXT:    s_cselect_b32 s6, 1, 0
+; GFX9-NEXT:    s_cmp_eq_u32 s5, 3
 ; GFX9-NEXT:    s_cselect_b32 s5, 1, 0
-; GFX9-NEXT:    s_cmp_eq_u32 s4, 3
-; GFX9-NEXT:    s_cselect_b32 s4, 1, 0
-; GFX9-NEXT:    s_or_b32 s4, s4, s5
+; GFX9-NEXT:    s_or_b32 s5, s5, s6
 ; GFX9-NEXT:    s_lshr_b32 s3, s3, 2
-; GFX9-NEXT:    s_add_i32 s3, s3, s4
-; GFX9-NEXT:    s_cmp_lt_i32 s1, 31
+; GFX9-NEXT:    s_add_i32 s3, s3, s5
+; GFX9-NEXT:    s_cmp_lt_i32 s4, 31
 ; GFX9-NEXT:    s_cselect_b32 s3, s3, 0x7c00
 ; GFX9-NEXT:    s_cmp_lg_u32 s0, 0
 ; GFX9-NEXT:    s_movk_i32 s0, 0x7e00
 ; GFX9-NEXT:    s_cselect_b32 s0, s0, 0x7c00
-; GFX9-NEXT:    s_cmpk_eq_i32 s1, 0x40f
+; GFX9-NEXT:    s_cmpk_eq_i32 s4, 0x40f
 ; GFX9-NEXT:    s_cselect_b32 s0, s0, s3
+; GFX9-NEXT:    s_lshr_b32 s1, s1, 16
+; GFX9-NEXT:    s_and_b32 s1, s1, 0x8000
+; GFX9-NEXT:    s_or_b32 s0, s1, s0
 ; GFX9-NEXT:    s_movk_i32 s1, 0x7fff
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s2
@@ -1204,47 +1270,51 @@ define amdgpu_ps i16 @s_copysign_out_f16_mag_f64_sign_f16(double inreg %mag, hal
 ; GFX11-TRUE16-NEXT:    s_or_b32 s0, s3, s0
 ; GFX11-TRUE16-NEXT:    s_cmp_lg_u32 s0, 0
 ; GFX11-TRUE16-NEXT:    s_cselect_b32 s0, -1, 0
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT:    s_lshr_b32 s4, s1, 8
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GFX11-TRUE16-NEXT:    s_bfe_u32 s0, s1, 0xb0014
-; GFX11-TRUE16-NEXT:    s_lshr_b32 s1, s1, 8
+; GFX11-TRUE16-NEXT:    s_and_b32 s4, s4, 0xffe
 ; GFX11-TRUE16-NEXT:    s_sub_i32 s3, 0x3f1, s0
-; GFX11-TRUE16-NEXT:    s_and_b32 s1, s1, 0xffe
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
 ; GFX11-TRUE16-NEXT:    v_med3_i32 v1, s3, 0, 13
 ; GFX11-TRUE16-NEXT:    v_readfirstlane_b32 s3, v0
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-TRUE16-NEXT:    v_readfirstlane_b32 s4, v1
-; GFX11-TRUE16-NEXT:    s_or_b32 s1, s1, s3
-; GFX11-TRUE16-NEXT:    s_or_b32 s3, s1, 0x1000
+; GFX11-TRUE16-NEXT:    v_readfirstlane_b32 s5, v1
+; GFX11-TRUE16-NEXT:    s_or_b32 s3, s4, s3
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT:    s_or_b32 s4, s3, 0x1000
+; GFX11-TRUE16-NEXT:    s_lshr_b32 s6, s4, s5
 ; GFX11-TRUE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-TRUE16-NEXT:    s_lshr_b32 s5, s3, s4
-; GFX11-TRUE16-NEXT:    s_lshl_b32 s4, s5, s4
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-TRUE16-NEXT:    s_cmp_lg_u32 s4, s3
-; GFX11-TRUE16-NEXT:    s_cselect_b32 s3, 1, 0
+; GFX11-TRUE16-NEXT:    s_lshl_b32 s5, s6, s5
+; GFX11-TRUE16-NEXT:    s_cmp_lg_u32 s5, s4
+; GFX11-TRUE16-NEXT:    s_cselect_b32 s4, 1, 0
 ; GFX11-TRUE16-NEXT:    s_addk_i32 s0, 0xfc10
-; GFX11-TRUE16-NEXT:    s_or_b32 s3, s5, s3
-; GFX11-TRUE16-NEXT:    s_lshl_b32 s4, s0, 12
-; GFX11-TRUE16-NEXT:    s_or_b32 s4, s1, s4
+; GFX11-TRUE16-NEXT:    s_or_b32 s4, s6, s4
+; GFX11-TRUE16-NEXT:    s_lshl_b32 s5, s0, 12
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT:    s_or_b32 s5, s3, s5
 ; GFX11-TRUE16-NEXT:    s_cmp_lt_i32 s0, 1
-; GFX11-TRUE16-NEXT:    s_cselect_b32 s3, s3, s4
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-TRUE16-NEXT:    s_and_b32 s4, s3, 7
-; GFX11-TRUE16-NEXT:    s_cmp_gt_i32 s4, 5
+; GFX11-TRUE16-NEXT:    s_cselect_b32 s4, s4, s5
+; GFX11-TRUE16-NEXT:    s_and_b32 s5, s4, 7
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT:    s_cmp_gt_i32 s5, 5
+; GFX11-TRUE16-NEXT:    s_cselect_b32 s6, 1, 0
+; GFX11-TRUE16-NEXT:    s_cmp_eq_u32 s5, 3
 ; GFX11-TRUE16-NEXT:    s_cselect_b32 s5, 1, 0
-; GFX11-TRUE16-NEXT:    s_cmp_eq_u32 s4, 3
-; GFX11-TRUE16-NEXT:    s_cselect_b32 s4, 1, 0
-; GFX11-TRUE16-NEXT:    s_lshr_b32 s3, s3, 2
-; GFX11-TRUE16-NEXT:    s_or_b32 s4, s4, s5
+; GFX11-TRUE16-NEXT:    s_lshr_b32 s4, s4, 2
+; GFX11-TRUE16-NEXT:    s_or_b32 s5, s5, s6
 ; GFX11-TRUE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-TRUE16-NEXT:    s_add_i32 s3, s3, s4
+; GFX11-TRUE16-NEXT:    s_add_i32 s4, s4, s5
 ; GFX11-TRUE16-NEXT:    s_cmp_lt_i32 s0, 31
-; GFX11-TRUE16-NEXT:    s_movk_i32 s4, 0x7e00
-; GFX11-TRUE16-NEXT:    s_cselect_b32 s3, s3, 0x7c00
-; GFX11-TRUE16-NEXT:    s_cmp_lg_u32 s1, 0
-; GFX11-TRUE16-NEXT:    s_cselect_b32 s1, s4, 0x7c00
+; GFX11-TRUE16-NEXT:    s_movk_i32 s5, 0x7e00
+; GFX11-TRUE16-NEXT:    s_cselect_b32 s4, s4, 0x7c00
+; GFX11-TRUE16-NEXT:    s_cmp_lg_u32 s3, 0
+; GFX11-TRUE16-NEXT:    s_cselect_b32 s3, s5, 0x7c00
 ; GFX11-TRUE16-NEXT:    s_cmpk_eq_i32 s0, 0x40f
-; GFX11-TRUE16-NEXT:    s_cselect_b32 s0, s1, s3
+; GFX11-TRUE16-NEXT:    s_cselect_b32 s0, s3, s4
+; GFX11-TRUE16-NEXT:    s_lshr_b32 s1, s1, 16
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT:    s_and_b32 s1, s1, 0x8000
+; GFX11-TRUE16-NEXT:    s_or_b32 s0, s1, s0
 ; GFX11-TRUE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX11-TRUE16-NEXT:    v_bfi_b32 v0, 0x7fff, v0, s2
@@ -1259,48 +1329,52 @@ define amdgpu_ps i16 @s_copysign_out_f16_mag_f64_sign_f16(double inreg %mag, hal
 ; GFX11-FAKE16-NEXT:    s_or_b32 s0, s3, s0
 ; GFX11-FAKE16-NEXT:    s_cmp_lg_u32 s0, 0
 ; GFX11-FAKE16-NEXT:    s_cselect_b32 s0, -1, 0
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT:    s_lshr_b32 s4, s1, 8
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
 ; GFX11-FAKE16-NEXT:    s_bfe_u32 s0, s1, 0xb0014
-; GFX11-FAKE16-NEXT:    s_lshr_b32 s1, s1, 8
+; GFX11-FAKE16-NEXT:    s_and_b32 s4, s4, 0xffe
 ; GFX11-FAKE16-NEXT:    s_sub_i32 s3, 0x3f1, s0
-; GFX11-FAKE16-NEXT:    s_and_b32 s1, s1, 0xffe
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
 ; GFX11-FAKE16-NEXT:    v_med3_i32 v1, s3, 0, 13
 ; GFX11-FAKE16-NEXT:    v_readfirstlane_b32 s3, v0
 ; GFX11-FAKE16-NEXT:    v_mov_b32_e32 v0, s2
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-FAKE16-NEXT:    v_readfirstlane_b32 s4, v1
-; GFX11-FAKE16-NEXT:    s_or_b32 s1, s1, s3
-; GFX11-FAKE16-NEXT:    s_or_b32 s3, s1, 0x1000
+; GFX11-FAKE16-NEXT:    v_readfirstlane_b32 s5, v1
+; GFX11-FAKE16-NEXT:    s_or_b32 s3, s4, s3
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT:    s_or_b32 s4, s3, 0x1000
+; GFX11-FAKE16-NEXT:    s_lshr_b32 s6, s4, s5
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-FAKE16-NEXT:    s_lshr_b32 s5, s3, s4
-; GFX11-FAKE16-NEXT:    s_lshl_b32 s4, s5, s4
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-FAKE16-NEXT:    s_cmp_lg_u32 s4, s3
-; GFX11-FAKE16-NEXT:    s_cselect_b32 s3, 1, 0
+; GFX11-FAKE16-NEXT:    s_lshl_b32 s5, s6, s5
+; GFX11-FAKE16-NEXT:    s_cmp_lg_u32 s5, s4
+; GFX11-FAKE16-NEXT:    s_cselect_b32 s4, 1, 0
 ; GFX11-FAKE16-NEXT:    s_addk_i32 s0, 0xfc10
-; GFX11-FAKE16-NEXT:    s_or_b32 s3, s5, s3
-; GFX11-FAKE16-NEXT:    s_lshl_b32 s4, s0, 12
-; GFX11-FAKE16-NEXT:    s_or_b32 s4, s1, s4
+; GFX11-FAKE16-NEXT:    s_or_b32 s4, s6, s4
+; GFX11-FAKE16-NEXT:    s_lshl_b32 s5, s0, 12
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT:    s_or_b32 s5, s3, s5
 ; GFX11-FAKE16-NEXT:    s_cmp_lt_i32 s0, 1
-; GFX11-FAKE16-NEXT:    s_cselect_b32 s3, s3, s4
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-FAKE16-NEXT:    s_and_b32 s4, s3, 7
-; GFX11-FAKE16-NEXT:    s_cmp_gt_i32 s4, 5
+; GFX11-FAKE16-NEXT:    s_cselect_b32 s4, s4, s5
+; GFX11-FAKE16-NEXT:    s_and_b32 s5, s4, 7
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT:    s_cmp_gt_i32 s5, 5
+; GFX11-FAKE16-NEXT:    s_cselect_b32 s6, 1, 0
+; GFX11-FAKE16-NEXT:    s_cmp_eq_u32 s5, 3
 ; GFX11-FAKE16-NEXT:    s_cselect_b32 s5, 1, 0
-; GFX11-FAKE16-NEXT:    s_cmp_eq_u32 s4, 3
-; GFX11-FAKE16-NEXT:    s_cselect_b32 s4, 1, 0
-; GFX11-FAKE16-NEXT:    s_lshr_b32 s3, s3, 2
-; GFX11-FAKE16-NEXT:    s_or_b32 s4, s4, s5
+; GFX11-FAKE16-NEXT:    s_lshr_b32 s4, s4, 2
+; GFX11-FAKE16-NEXT:    s_or_b32 s5, s5, s6
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-FAKE16-NEXT:    s_add_i32 s3, s3, s4
+; GFX11-FAKE16-NEXT:    s_add_i32 s4, s4, s5
 ; GFX11-FAKE16-NEXT:    s_cmp_lt_i32 s0, 31
-; GFX11-FAKE16-NEXT:    s_movk_i32 s4, 0x7e00
-; GFX11-FAKE16-NEXT:    s_cselect_b32 s3, s3, 0x7c00
-; GFX11-FAKE16-NEXT:    s_cmp_lg_u32 s1, 0
-; GFX11-FAKE16-NEXT:    s_cselect_b32 s1, s4, 0x7c00
+; GFX11-FAKE16-NEXT:    s_movk_i32 s5, 0x7e00
+; GFX11-FAKE16-NEXT:    s_cselect_b32 s4, s4, 0x7c00
+; GFX11-FAKE16-NEXT:    s_cmp_lg_u32 s3, 0
+; GFX11-FAKE16-NEXT:    s_cselect_b32 s3, s5, 0x7c00
 ; GFX11-FAKE16-NEXT:    s_cmpk_eq_i32 s0, 0x40f
-; GFX11-FAKE16-NEXT:    s_cselect_b32 s0, s1, s3
+; GFX11-FAKE16-NEXT:    s_cselect_b32 s0, s3, s4
+; GFX11-FAKE16-NEXT:    s_lshr_b32 s1, s1, 16
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT:    s_and_b32 s1, s1, 0x8000
+; GFX11-FAKE16-NEXT:    s_or_b32 s0, s1, s0
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-FAKE16-NEXT:    v_bfi_b32 v0, 0x7fff, s0, v0
 ; GFX11-FAKE16-NEXT:    v_readfirstlane_b32 s0, v0
@@ -3187,78 +3261,81 @@ define <2 x half> @v_copysign_out_v2f16_mag_v2f64_sign_v2f16(<2 x double> %mag,
 ; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
 ; VI-NEXT:    v_and_b32_e32 v5, 0xffe, v5
 ; VI-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
-; VI-NEXT:    v_bfe_u32 v3, v3, 20, 11
+; VI-NEXT:    v_bfe_u32 v6, v3, 20, 11
 ; VI-NEXT:    s_movk_i32 s4, 0x3f1
 ; VI-NEXT:    v_or_b32_e32 v2, v5, v2
-; VI-NEXT:    v_sub_u32_e32 v6, vcc, s4, v3
+; VI-NEXT:    v_sub_u32_e32 v7, vcc, s4, v6
 ; VI-NEXT:    v_or_b32_e32 v5, 0x1000, v2
-; VI-NEXT:    v_med3_i32 v6, v6, 0, 13
-; VI-NEXT:    v_lshrrev_b32_e32 v7, v6, v5
-; VI-NEXT:    v_lshlrev_b32_e32 v6, v6, v7
-; VI-NEXT:    v_cmp_ne_u32_e32 vcc, v6, v5
+; VI-NEXT:    v_med3_i32 v7, v7, 0, 13
+; VI-NEXT:    v_lshrrev_b32_e32 v8, v7, v5
+; VI-NEXT:    v_lshlrev_b32_e32 v7, v7, v8
+; VI-NEXT:    v_cmp_ne_u32_e32 vcc, v7, v5
 ; VI-NEXT:    s_movk_i32 s5, 0xfc10
 ; VI-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
-; VI-NEXT:    v_add_u32_e32 v3, vcc, s5, v3
-; VI-NEXT:    v_lshlrev_b32_e32 v6, 12, v3
-; VI-NEXT:    v_or_b32_e32 v5, v7, v5
-; VI-NEXT:    v_or_b32_e32 v6, v2, v6
-; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 1, v3
-; VI-NEXT:    v_cndmask_b32_e32 v5, v6, v5, vcc
-; VI-NEXT:    v_and_b32_e32 v6, 7, v5
-; VI-NEXT:    v_cmp_lt_i32_e32 vcc, 5, v6
+; VI-NEXT:    v_add_u32_e32 v6, vcc, s5, v6
+; VI-NEXT:    v_lshlrev_b32_e32 v7, 12, v6
+; VI-NEXT:    v_or_b32_e32 v5, v8, v5
+; VI-NEXT:    v_or_b32_e32 v7, v2, v7
+; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 1, v6
+; VI-NEXT:    v_cndmask_b32_e32 v5, v7, v5, vcc
+; VI-NEXT:    v_and_b32_e32 v7, 7, v5
+; VI-NEXT:    v_cmp_lt_i32_e32 vcc, 5, v7
+; VI-NEXT:    v_cndmask_b32_e64 v8, 0, 1, vcc
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v7
 ; VI-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v6
-; VI-NEXT:    v_cndmask_b32_e64 v6, 0, 1, vcc
-; VI-NEXT:    v_or_b32_e32 v6, v6, v7
+; VI-NEXT:    v_or_b32_e32 v7, v7, v8
 ; VI-NEXT:    v_lshrrev_b32_e32 v5, 2, v5
-; VI-NEXT:    v_add_u32_e32 v5, vcc, v5, v6
-; VI-NEXT:    v_mov_b32_e32 v6, 0x7c00
-; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 31, v3
-; VI-NEXT:    v_cndmask_b32_e32 v5, v6, v5, vcc
-; VI-NEXT:    v_mov_b32_e32 v7, 0x7e00
+; VI-NEXT:    v_add_u32_e32 v5, vcc, v5, v7
+; VI-NEXT:    v_mov_b32_e32 v7, 0x7c00
+; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 31, v6
+; VI-NEXT:    v_cndmask_b32_e32 v5, v7, v5, vcc
+; VI-NEXT:    v_mov_b32_e32 v8, 0x7e00
 ; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
 ; VI-NEXT:    s_movk_i32 s6, 0x40f
-; VI-NEXT:    v_cndmask_b32_e32 v2, v6, v7, vcc
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v3
+; VI-NEXT:    v_cndmask_b32_e32 v2, v7, v8, vcc
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v6
 ; VI-NEXT:    v_cndmask_b32_e32 v2, v5, v2, vcc
-; VI-NEXT:    v_and_b32_e32 v5, 0x1ff, v1
-; VI-NEXT:    v_or_b32_e32 v0, v5, v0
+; VI-NEXT:    v_mov_b32_e32 v5, 0x8000
+; VI-NEXT:    v_and_b32_e32 v6, 0x1ff, v1
+; VI-NEXT:    v_and_b32_sdwa v3, v3, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; VI-NEXT:    v_or_b32_e32 v0, v6, v0
+; VI-NEXT:    v_or_b32_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
 ; VI-NEXT:    v_lshrrev_b32_e32 v3, 8, v1
 ; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
 ; VI-NEXT:    v_and_b32_e32 v3, 0xffe, v3
 ; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; VI-NEXT:    v_bfe_u32 v1, v1, 20, 11
+; VI-NEXT:    v_bfe_u32 v6, v1, 20, 11
 ; VI-NEXT:    v_or_b32_e32 v0, v3, v0
-; VI-NEXT:    v_sub_u32_e32 v5, vcc, s4, v1
+; VI-NEXT:    v_sub_u32_e32 v9, vcc, s4, v6
 ; VI-NEXT:    v_or_b32_e32 v3, 0x1000, v0
-; VI-NEXT:    v_med3_i32 v5, v5, 0, 13
-; VI-NEXT:    v_lshrrev_b32_e32 v8, v5, v3
-; VI-NEXT:    v_lshlrev_b32_e32 v5, v5, v8
-; VI-NEXT:    v_cmp_ne_u32_e32 vcc, v5, v3
+; VI-NEXT:    v_med3_i32 v9, v9, 0, 13
+; VI-NEXT:    v_lshrrev_b32_e32 v10, v9, v3
+; VI-NEXT:    v_lshlrev_b32_e32 v9, v9, v10
+; VI-NEXT:    v_cmp_ne_u32_e32 vcc, v9, v3
 ; VI-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
-; VI-NEXT:    v_add_u32_e32 v1, vcc, s5, v1
-; VI-NEXT:    v_lshlrev_b32_e32 v5, 12, v1
-; VI-NEXT:    v_or_b32_e32 v3, v8, v3
-; VI-NEXT:    v_or_b32_e32 v5, v0, v5
-; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 1, v1
-; VI-NEXT:    v_cndmask_b32_e32 v3, v5, v3, vcc
-; VI-NEXT:    v_and_b32_e32 v5, 7, v3
-; VI-NEXT:    v_cmp_lt_i32_e32 vcc, 5, v5
-; VI-NEXT:    v_cndmask_b32_e64 v8, 0, 1, vcc
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v5
-; VI-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
-; VI-NEXT:    v_or_b32_e32 v5, v5, v8
+; VI-NEXT:    v_add_u32_e32 v6, vcc, s5, v6
+; VI-NEXT:    v_lshlrev_b32_e32 v9, 12, v6
+; VI-NEXT:    v_or_b32_e32 v3, v10, v3
+; VI-NEXT:    v_or_b32_e32 v9, v0, v9
+; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 1, v6
+; VI-NEXT:    v_cndmask_b32_e32 v3, v9, v3, vcc
+; VI-NEXT:    v_and_b32_e32 v9, 7, v3
+; VI-NEXT:    v_cmp_lt_i32_e32 vcc, 5, v9
+; VI-NEXT:    v_cndmask_b32_e64 v10, 0, 1, vcc
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v9
+; VI-NEXT:    v_cndmask_b32_e64 v9, 0, 1, vcc
+; VI-NEXT:    v_or_b32_e32 v9, v9, v10
 ; VI-NEXT:    v_lshrrev_b32_e32 v3, 2, v3
-; VI-NEXT:    v_add_u32_e32 v3, vcc, v3, v5
-; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 31, v1
-; VI-NEXT:    v_cndmask_b32_e32 v3, v6, v3, vcc
+; VI-NEXT:    v_add_u32_e32 v3, vcc, v3, v9
+; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 31, v6
+; VI-NEXT:    v_cndmask_b32_e32 v3, v7, v3, vcc
 ; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
-; VI-NEXT:    v_cndmask_b32_e32 v0, v6, v7, vcc
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v1
+; VI-NEXT:    v_cndmask_b32_e32 v0, v7, v8, vcc
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v6
 ; VI-NEXT:    v_cndmask_b32_e32 v0, v3, v0, vcc
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
-; VI-NEXT:    v_and_b32_e32 v0, 0x7fff, v0
-; VI-NEXT:    v_or_b32_e32 v0, v0, v2
+; VI-NEXT:    v_and_b32_sdwa v1, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; VI-NEXT:    v_or_b32_e32 v0, v1, v0
+; VI-NEXT:    v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
 ; VI-NEXT:    s_mov_b32 s4, 0x7fff7fff
 ; VI-NEXT:    v_bfi_b32 v0, s4, v0, v4
 ; VI-NEXT:    s_setpc_b64 s[30:31]
@@ -4008,78 +4085,84 @@ define amdgpu_ps i32 @s_copysign_out_v2f16_mag_v2f64_sign_v2f16(<2 x double> inr
 ; VI-NEXT:    s_cmp_lg_u32 s2, 0
 ; VI-NEXT:    s_cselect_b64 s[6:7], -1, 0
 ; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[6:7]
-; VI-NEXT:    s_bfe_u32 s3, s3, 0xb0014
+; VI-NEXT:    s_bfe_u32 s6, s3, 0xb0014
 ; VI-NEXT:    v_readfirstlane_b32 s2, v0
-; VI-NEXT:    s_sub_i32 s6, 0x3f1, s3
+; VI-NEXT:    s_sub_i32 s7, 0x3f1, s6
 ; VI-NEXT:    s_or_b32 s2, s5, s2
-; VI-NEXT:    v_med3_i32 v0, s6, 0, 13
+; VI-NEXT:    v_med3_i32 v0, s7, 0, 13
 ; VI-NEXT:    s_or_b32 s5, s2, 0x1000
-; VI-NEXT:    v_readfirstlane_b32 s6, v0
-; VI-NEXT:    s_lshr_b32 s7, s5, s6
-; VI-NEXT:    s_lshl_b32 s6, s7, s6
-; VI-NEXT:    s_cmp_lg_u32 s6, s5
+; VI-NEXT:    v_readfirstlane_b32 s7, v0
+; VI-NEXT:    s_lshr_b32 s8, s5, s7
+; VI-NEXT:    s_lshl_b32 s7, s8, s7
+; VI-NEXT:    s_cmp_lg_u32 s7, s5
 ; VI-NEXT:    s_cselect_b32 s5, 1, 0
-; VI-NEXT:    s_addk_i32 s3, 0xfc10
-; VI-NEXT:    s_lshl_b32 s6, s3, 12
-; VI-NEXT:    s_or_b32 s5, s7, s5
-; VI-NEXT:    s_or_b32 s6, s2, s6
-; VI-NEXT:    s_cmp_lt_i32 s3, 1
-; VI-NEXT:    s_cselect_b32 s5, s5, s6
-; VI-NEXT:    s_and_b32 s6, s5, 7
-; VI-NEXT:    s_cmp_gt_i32 s6, 5
+; VI-NEXT:    s_addk_i32 s6, 0xfc10
+; VI-NEXT:    s_lshl_b32 s7, s6, 12
+; VI-NEXT:    s_or_b32 s5, s8, s5
+; VI-NEXT:    s_or_b32 s7, s2, s7
+; VI-NEXT:    s_cmp_lt_i32 s6, 1
+; VI-NEXT:    s_cselect_b32 s5, s5, s7
+; VI-NEXT:    s_and_b32 s7, s5, 7
+; VI-NEXT:    s_cmp_gt_i32 s7, 5
+; VI-NEXT:    s_cselect_b32 s8, 1, 0
+; VI-NEXT:    s_cmp_eq_u32 s7, 3
 ; VI-NEXT:    s_cselect_b32 s7, 1, 0
-; VI-NEXT:    s_cmp_eq_u32 s6, 3
-; VI-NEXT:    s_cselect_b32 s6, 1, 0
-; VI-NEXT:    s_or_b32 s6, s6, s7
+; VI-NEXT:    s_or_b32 s7, s7, s8
 ; VI-NEXT:    s_lshr_b32 s5, s5, 2
-; VI-NEXT:    s_add_i32 s5, s5, s6
-; VI-NEXT:    s_cmp_lt_i32 s3, 31
+; VI-NEXT:    s_add_i32 s5, s5, s7
+; VI-NEXT:    s_cmp_lt_i32 s6, 31
 ; VI-NEXT:    s_cselect_b32 s5, s5, 0x7c00
 ; VI-NEXT:    s_cmp_lg_u32 s2, 0
-; VI-NEXT:    s_movk_i32 s6, 0x7e00
-; VI-NEXT:    s_cselect_b32 s2, s6, 0x7c00
-; VI-NEXT:    s_cmpk_eq_i32 s3, 0x40f
+; VI-NEXT:    s_movk_i32 s7, 0x7e00
+; VI-NEXT:    s_cselect_b32 s2, s7, 0x7c00
+; VI-NEXT:    s_cmpk_eq_i32 s6, 0x40f
 ; VI-NEXT:    s_cselect_b32 s2, s2, s5
+; VI-NEXT:    s_lshr_b32 s3, s3, 16
+; VI-NEXT:    s_and_b32 s3, s3, 0x8000
+; VI-NEXT:    s_or_b32 s2, s3, s2
 ; VI-NEXT:    s_lshl_b32 s5, s2, 16
 ; VI-NEXT:    s_lshr_b32 s2, s1, 8
-; VI-NEXT:    s_and_b32 s7, s2, 0xffe
+; VI-NEXT:    s_and_b32 s6, s2, 0xffe
 ; VI-NEXT:    s_and_b32 s2, s1, 0x1ff
 ; VI-NEXT:    s_or_b32 s0, s2, s0
 ; VI-NEXT:    s_cmp_lg_u32 s0, 0
 ; VI-NEXT:    s_cselect_b64 s[2:3], -1, 0
 ; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[2:3]
-; VI-NEXT:    s_bfe_u32 s1, s1, 0xb0014
 ; VI-NEXT:    v_readfirstlane_b32 s0, v0
-; VI-NEXT:    s_sub_i32 s3, 0x3f1, s1
-; VI-NEXT:    s_or_b32 s0, s7, s0
-; VI-NEXT:    v_med3_i32 v0, s3, 0, 13
+; VI-NEXT:    s_bfe_u32 s3, s1, 0xb0014
+; VI-NEXT:    s_or_b32 s0, s6, s0
+; VI-NEXT:    s_sub_i32 s6, 0x3f1, s3
+; VI-NEXT:    v_med3_i32 v0, s6, 0, 13
 ; VI-NEXT:    s_or_b32 s2, s0, 0x1000
-; VI-NEXT:    v_readfirstlane_b32 s3, v0
-; VI-NEXT:    s_lshr_b32 s7, s2, s3
-; VI-NEXT:    s_lshl_b32 s3, s7, s3
-; VI-NEXT:    s_cmp_lg_u32 s3, s2
+; VI-NEXT:    v_readfirstlane_b32 s6, v0
+; VI-NEXT:    s_lshr_b32 s8, s2, s6
+; VI-NEXT:    s_lshl_b32 s6, s8, s6
+; VI-NEXT:    s_cmp_lg_u32 s6, s2
 ; VI-NEXT:    s_cselect_b32 s2, 1, 0
-; VI-NEXT:    s_addk_i32 s1, 0xfc10
-; VI-NEXT:    s_lshl_b32 s3, s1, 12
-; VI-NEXT:    s_or_b32 s2, s7, s2
-; VI-NEXT:    s_or_b32 s3, s0, s3
-; VI-NEXT:    s_cmp_lt_i32 s1, 1
-; VI-NEXT:    s_cselect_b32 s2, s2, s3
-; VI-NEXT:    s_and_b32 s3, s2, 7
-; VI-NEXT:    s_cmp_gt_i32 s3, 5
-; VI-NEXT:    s_cselect_b32 s7, 1, 0
-; VI-NEXT:    s_cmp_eq_u32 s3, 3
-; VI-NEXT:    s_cselect_b32 s3, 1, 0
-; VI-NEXT:    s_or_b32 s3, s3, s7
+; VI-NEXT:    s_addk_i32 s3, 0xfc10
+; VI-NEXT:    s_lshl_b32 s6, s3, 12
+; VI-NEXT:    s_or_b32 s2, s8, s2
+; VI-NEXT:    s_or_b32 s6, s0, s6
+; VI-NEXT:    s_cmp_lt_i32 s3, 1
+; VI-NEXT:    s_cselect_b32 s2, s2, s6
+; VI-NEXT:    s_and_b32 s6, s2, 7
+; VI-NEXT:    s_cmp_gt_i32 s6, 5
+; VI-NEXT:    s_cselect_b32 s8, 1, 0
+; VI-NEXT:    s_cmp_eq_u32 s6, 3
+; VI-NEXT:    s_cselect_b32 s6, 1, 0
+; VI-NEXT:    s_or_b32 s6, s6, s8
 ; VI-NEXT:    s_lshr_b32 s2, s2, 2
-; VI-NEXT:    s_add_i32 s2, s2, s3
-; VI-NEXT:    s_cmp_lt_i32 s1, 31
+; VI-NEXT:    s_add_i32 s2, s2, s6
+; VI-NEXT:    s_cmp_lt_i32 s3, 31
 ; VI-NEXT:    s_cselect_b32 s2, s2, 0x7c00
 ; VI-NEXT:    s_cmp_lg_u32 s0, 0
-; VI-NEXT:    s_cselect_b32 s0, s6, 0x7c00
-; VI-NEXT:    s_cmpk_eq_i32 s1, 0x40f
+; VI-NEXT:    s_cselect_b32 s0, s7, 0x7c00
+; VI-NEXT:    s_cmpk_eq_i32 s3, 0x40f
 ; VI-NEXT:    s_cselect_b32 s0, s0, s2
-; VI-NEXT:    s_and_b32 s0, s0, 0x7fff
+; VI-NEXT:    s_lshr_b32 s1, s1, 16
+; VI-NEXT:    s_and_b32 s1, s1, 0x8000
+; VI-NEXT:    s_or_b32 s0, s1, s0
+; VI-NEXT:    s_and_b32 s0, s0, 0xffff
 ; VI-NEXT:    s_or_b32 s0, s0, s5
 ; VI-NEXT:    s_mov_b32 s1, 0x7fff7fff
 ; VI-NEXT:    v_mov_b32_e32 v0, s0
@@ -4873,113 +4956,118 @@ define <3 x half> @v_copysign_out_v3f16_mag_v3f64_sign_v3f16(<3 x double> %mag,
 ; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v4
 ; VI-NEXT:    v_and_b32_e32 v8, 0xffe, v8
 ; VI-NEXT:    v_cndmask_b32_e64 v4, 0, 1, vcc
-; VI-NEXT:    v_bfe_u32 v5, v5, 20, 11
+; VI-NEXT:    v_bfe_u32 v9, v5, 20, 11
 ; VI-NEXT:    s_movk_i32 s4, 0x3f1
 ; VI-NEXT:    v_or_b32_e32 v4, v8, v4
-; VI-NEXT:    v_sub_u32_e32 v9, vcc, s4, v5
+; VI-NEXT:    v_sub_u32_e32 v10, vcc, s4, v9
 ; VI-NEXT:    v_or_b32_e32 v8, 0x1000, v4
-; VI-NEXT:    v_med3_i32 v9, v9, 0, 13
-; VI-NEXT:    v_lshrrev_b32_e32 v10, v9, v8
-; VI-NEXT:    v_lshlrev_b32_e32 v9, v9, v10
-; VI-NEXT:    v_cmp_ne_u32_e32 vcc, v9, v8
+; VI-NEXT:    v_med3_i32 v10, v10, 0, 13
+; VI-NEXT:    v_lshrrev_b32_e32 v11, v10, v8
+; VI-NEXT:    v_lshlrev_b32_e32 v10, v10, v11
+; VI-NEXT:    v_cmp_ne_u32_e32 vcc, v10, v8
 ; VI-NEXT:    s_movk_i32 s5, 0xfc10
 ; VI-NEXT:    v_cndmask_b32_e64 v8, 0, 1, vcc
-; VI-NEXT:    v_add_u32_e32 v5, vcc, s5, v5
-; VI-NEXT:    v_lshlrev_b32_e32 v9, 12, v5
-; VI-NEXT:    v_or_b32_e32 v8, v10, v8
-; VI-NEXT:    v_or_b32_e32 v9, v4, v9
-; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 1, v5
-; VI-NEXT:    v_cndmask_b32_e32 v8, v9, v8, vcc
-; VI-NEXT:    v_and_b32_e32 v9, 7, v8
-; VI-NEXT:    v_cmp_lt_i32_e32 vcc, 5, v9
+; VI-NEXT:    v_add_u32_e32 v9, vcc, s5, v9
+; VI-NEXT:    v_lshlrev_b32_e32 v10, 12, v9
+; VI-NEXT:    v_or_b32_e32 v8, v11, v8
+; VI-NEXT:    v_or_b32_e32 v10, v4, v10
+; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 1, v9
+; VI-NEXT:    v_cndmask_b32_e32 v8, v10, v8, vcc
+; VI-NEXT:    v_and_b32_e32 v10, 7, v8
+; VI-NEXT:    v_cmp_lt_i32_e32 vcc, 5, v10
+; VI-NEXT:    v_cndmask_b32_e64 v11, 0, 1, vcc
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v10
 ; VI-NEXT:    v_cndmask_b32_e64 v10, 0, 1, vcc
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v9
-; VI-NEXT:    v_cndmask_b32_e64 v9, 0, 1, vcc
-; VI-NEXT:    v_or_b32_e32 v9, v9, v10
+; VI-NEXT:    v_or_b32_e32 v10, v10, v11
 ; VI-NEXT:    v_lshrrev_b32_e32 v8, 2, v8
-; VI-NEXT:    v_add_u32_e32 v8, vcc, v8, v9
-; VI-NEXT:    v_mov_b32_e32 v9, 0x7c00
-; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 31, v5
-; VI-NEXT:    v_cndmask_b32_e32 v8, v9, v8, vcc
-; VI-NEXT:    v_mov_b32_e32 v10, 0x7e00
+; VI-NEXT:    v_add_u32_e32 v8, vcc, v8, v10
+; VI-NEXT:    v_mov_b32_e32 v10, 0x7c00
+; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 31, v9
+; VI-NEXT:    v_cndmask_b32_e32 v8, v10, v8, vcc
+; VI-NEXT:    v_mov_b32_e32 v11, 0x7e00
 ; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v4
 ; VI-NEXT:    s_movk_i32 s6, 0x40f
-; VI-NEXT:    v_cndmask_b32_e32 v4, v9, v10, vcc
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v5
+; VI-NEXT:    v_cndmask_b32_e32 v4, v10, v11, vcc
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v9
 ; VI-NEXT:    v_cndmask_b32_e32 v4, v8, v4, vcc
-; VI-NEXT:    v_and_b32_e32 v8, 0x1ff, v1
-; VI-NEXT:    v_or_b32_e32 v0, v8, v0
+; VI-NEXT:    v_mov_b32_e32 v8, 0x8000
+; VI-NEXT:    v_and_b32_e32 v9, 0x1ff, v1
+; VI-NEXT:    v_and_b32_sdwa v5, v5, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; VI-NEXT:    v_or_b32_e32 v0, v9, v0
+; VI-NEXT:    v_or_b32_e32 v4, v5, v4
 ; VI-NEXT:    v_lshrrev_b32_e32 v5, 8, v1
 ; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
 ; VI-NEXT:    v_and_b32_e32 v5, 0xffe, v5
 ; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; VI-NEXT:    v_bfe_u32 v1, v1, 20, 11
+; VI-NEXT:    v_bfe_u32 v9, v1, 20, 11
 ; VI-NEXT:    v_or_b32_e32 v0, v5, v0
-; VI-NEXT:    v_sub_u32_e32 v8, vcc, s4, v1
+; VI-NEXT:    v_sub_u32_e32 v12, vcc, s4, v9
 ; VI-NEXT:    v_or_b32_e32 v5, 0x1000, v0
-; VI-NEXT:    v_med3_i32 v8, v8, 0, 13
-; VI-NEXT:    v_lshrrev_b32_e32 v11, v8, v5
-; VI-NEXT:    v_lshlrev_b32_e32 v8, v8, v11
-; VI-NEXT:    v_cmp_ne_u32_e32 vcc, v8, v5
+; VI-NEXT:    v_med3_i32 v12, v12, 0, 13
+; VI-NEXT:    v_lshrrev_b32_e32 v13, v12, v5
+; VI-NEXT:    v_lshlrev_b32_e32 v12, v12, v13
+; VI-NEXT:    v_cmp_ne_u32_e32 vcc, v12, v5
 ; VI-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
-; VI-NEXT:    v_add_u32_e32 v1, vcc, s5, v1
-; VI-NEXT:    v_lshlrev_b32_e32 v8, 12, v1
-; VI-NEXT:    v_or_b32_e32 v5, v11, v5
-; VI-NEXT:    v_or_b32_e32 v8, v0, v8
-; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 1, v1
-; VI-NEXT:    v_cndmask_b32_e32 v5, v8, v5, vcc
-; VI-NEXT:    v_and_b32_e32 v8, 7, v5
-; VI-NEXT:    v_cmp_lt_i32_e32 vcc, 5, v8
-; VI-NEXT:    v_cndmask_b32_e64 v11, 0, 1, vcc
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v8
-; VI-NEXT:    v_cndmask_b32_e64 v8, 0, 1, vcc
-; VI-NEXT:    v_or_b32_e32 v8, v8, v11
+; VI-NEXT:    v_add_u32_e32 v9, vcc, s5, v9
+; VI-NEXT:    v_lshlrev_b32_e32 v12, 12, v9
+; VI-NEXT:    v_or_b32_e32 v5, v13, v5
+; VI-NEXT:    v_or_b32_e32 v12, v0, v12
+; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 1, v9
+; VI-NEXT:    v_cndmask_b32_e32 v5, v12, v5, vcc
+; VI-NEXT:    v_and_b32_e32 v12, 7, v5
+; VI-NEXT:    v_cmp_lt_i32_e32 vcc, 5, v12
+; VI-NEXT:    v_cndmask_b32_e64 v13, 0, 1, vcc
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v12
+; VI-NEXT:    v_cndmask_b32_e64 v12, 0, 1, vcc
+; VI-NEXT:    v_or_b32_e32 v12, v12, v13
 ; VI-NEXT:    v_lshrrev_b32_e32 v5, 2, v5
-; VI-NEXT:    v_add_u32_e32 v5, vcc, v5, v8
-; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 31, v1
-; VI-NEXT:    v_cndmask_b32_e32 v5, v9, v5, vcc
+; VI-NEXT:    v_add_u32_e32 v5, vcc, v5, v12
+; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 31, v9
+; VI-NEXT:    v_cndmask_b32_e32 v5, v10, v5, vcc
 ; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
-; VI-NEXT:    v_cndmask_b32_e32 v0, v9, v10, vcc
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v1
+; VI-NEXT:    v_cndmask_b32_e32 v0, v10, v11, vcc
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v9
 ; VI-NEXT:    v_cndmask_b32_e32 v0, v5, v0, vcc
 ; VI-NEXT:    v_and_b32_e32 v5, 0x1ff, v3
+; VI-NEXT:    v_and_b32_sdwa v1, v1, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
 ; VI-NEXT:    v_or_b32_e32 v2, v5, v2
+; VI-NEXT:    v_or_b32_e32 v0, v1, v0
 ; VI-NEXT:    v_lshrrev_b32_e32 v1, 8, v3
 ; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
 ; VI-NEXT:    v_and_b32_e32 v1, 0xffe, v1
 ; VI-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
-; VI-NEXT:    v_bfe_u32 v3, v3, 20, 11
+; VI-NEXT:    v_bfe_u32 v5, v3, 20, 11
 ; VI-NEXT:    v_or_b32_e32 v1, v1, v2
-; VI-NEXT:    v_sub_u32_e32 v5, vcc, s4, v3
+; VI-NEXT:    v_sub_u32_e32 v9, vcc, s4, v5
 ; VI-NEXT:    v_or_b32_e32 v2, 0x1000, v1
-; VI-NEXT:    v_med3_i32 v5, v5, 0, 13
-; VI-NEXT:    v_lshrrev_b32_e32 v8, v5, v2
-; VI-NEXT:    v_lshlrev_b32_e32 v5, v5, v8
-; VI-NEXT:    v_cmp_ne_u32_e32 vcc, v5, v2
+; VI-NEXT:    v_med3_i32 v9, v9, 0, 13
+; VI-NEXT:    v_lshrrev_b32_e32 v12, v9, v2
+; VI-NEXT:    v_lshlrev_b32_e32 v9, v9, v12
+; VI-NEXT:    v_cmp_ne_u32_e32 vcc, v9, v2
 ; VI-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
-; VI-NEXT:    v_add_u32_e32 v3, vcc, s5, v3
-; VI-NEXT:    v_lshlrev_b32_e32 v5, 12, v3
-; VI-NEXT:    v_or_b32_e32 v2, v8, v2
-; VI-NEXT:    v_or_b32_e32 v5, v1, v5
-; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 1, v3
-; VI-NEXT:    v_cndmask_b32_e32 v2, v5, v2, vcc
-; VI-NEXT:    v_and_b32_e32 v5, 7, v2
-; VI-NEXT:    v_cmp_lt_i32_e32 vcc, 5, v5
-; VI-NEXT:    v_cndmask_b32_e64 v8, 0, 1, vcc
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v5
-; VI-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
-; VI-NEXT:    v_or_b32_e32 v5, v5, v8
-; VI-NEXT:    v_lshrrev_b32_e32 v2, 2, v2
-; VI-NEXT:    v_add_u32_e32 v2, vcc, v2, v5
-; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 31, v3
+; VI-NEXT:    v_add_u32_e32 v5, vcc, s5, v5
+; VI-NEXT:    v_lshlrev_b32_e32 v9, 12, v5
+; VI-NEXT:    v_or_b32_e32 v2, v12, v2
+; VI-NEXT:    v_or_b32_e32 v9, v1, v9
+; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 1, v5
 ; VI-NEXT:    v_cndmask_b32_e32 v2, v9, v2, vcc
+; VI-NEXT:    v_and_b32_e32 v9, 7, v2
+; VI-NEXT:    v_cmp_lt_i32_e32 vcc, 5, v9
+; VI-NEXT:    v_cndmask_b32_e64 v12, 0, 1, vcc
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v9
+; VI-NEXT:    v_cndmask_b32_e64 v9, 0, 1, vcc
+; VI-NEXT:    v_or_b32_e32 v9, v9, v12
+; VI-NEXT:    v_lshrrev_b32_e32 v2, 2, v2
+; VI-NEXT:    v_add_u32_e32 v2, vcc, v2, v9
+; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 31, v5
+; VI-NEXT:    v_cndmask_b32_e32 v2, v10, v2, vcc
 ; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v1
-; VI-NEXT:    v_cndmask_b32_e32 v1, v9, v10, vcc
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v3
+; VI-NEXT:    v_cndmask_b32_e32 v1, v10, v11, vcc
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v5
 ; VI-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
-; VI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; VI-NEXT:    v_and_b32_e32 v0, 0x7fff, v0
-; VI-NEXT:    v_or_b32_e32 v0, v0, v1
+; VI-NEXT:    v_and_b32_sdwa v2, v3, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; VI-NEXT:    v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-NEXT:    v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
 ; VI-NEXT:    s_mov_b32 s4, 0x7fff7fff
 ; VI-NEXT:    v_bfi_b32 v0, s4, v0, v6
 ; VI-NEXT:    v_bfi_b32 v1, s4, v4, v7
@@ -4994,71 +5082,73 @@ define <3 x half> @v_copysign_out_v3f16_mag_v3f64_sign_v3f16(<3 x double> %mag,
 ; GFX9-NEXT:    v_cndmask_b32_e64 v4, 0, 1, vcc
 ; GFX9-NEXT:    v_lshrrev_b32_e32 v8, 8, v5
 ; GFX9-NEXT:    s_movk_i32 s5, 0xffe
-; GFX9-NEXT:    v_bfe_u32 v5, v5, 20, 11
+; GFX9-NEXT:    v_bfe_u32 v9, v5, 20, 11
 ; GFX9-NEXT:    v_and_or_b32 v4, v8, s5, v4
-; GFX9-NEXT:    v_sub_u32_e32 v9, 0x3f1, v5
+; GFX9-NEXT:    v_sub_u32_e32 v10, 0x3f1, v9
 ; GFX9-NEXT:    v_or_b32_e32 v8, 0x1000, v4
-; GFX9-NEXT:    v_med3_i32 v9, v9, 0, 13
-; GFX9-NEXT:    v_lshrrev_b32_e32 v10, v9, v8
-; GFX9-NEXT:    v_lshlrev_b32_e32 v9, v9, v10
-; GFX9-NEXT:    v_cmp_ne_u32_e32 vcc, v9, v8
+; GFX9-NEXT:    v_med3_i32 v10, v10, 0, 13
+; GFX9-NEXT:    v_lshrrev_b32_e32 v11, v10, v8
+; GFX9-NEXT:    v_lshlrev_b32_e32 v10, v10, v11
+; GFX9-NEXT:    v_cmp_ne_u32_e32 vcc, v10, v8
 ; GFX9-NEXT:    v_cndmask_b32_e64 v8, 0, 1, vcc
-; GFX9-NEXT:    v_add_u32_e32 v5, 0xfffffc10, v5
-; GFX9-NEXT:    v_or_b32_e32 v8, v10, v8
-; GFX9-NEXT:    v_lshl_or_b32 v9, v5, 12, v4
-; GFX9-NEXT:    v_cmp_gt_i32_e32 vcc, 1, v5
-; GFX9-NEXT:    v_cndmask_b32_e32 v8, v9, v8, vcc
-; GFX9-NEXT:    v_and_b32_e32 v9, 7, v8
-; GFX9-NEXT:    v_cmp_lt_i32_e32 vcc, 5, v9
+; GFX9-NEXT:    v_add_u32_e32 v9, 0xfffffc10, v9
+; GFX9-NEXT:    v_or_b32_e32 v8, v11, v8
+; GFX9-NEXT:    v_lshl_or_b32 v10, v9, 12, v4
+; GFX9-NEXT:    v_cmp_gt_i32_e32 vcc, 1, v9
+; GFX9-NEXT:    v_cndmask_b32_e32 v8, v10, v8, vcc
+; GFX9-NEXT:    v_and_b32_e32 v10, 7, v8
+; GFX9-NEXT:    v_cmp_lt_i32_e32 vcc, 5, v10
+; GFX9-NEXT:    v_cndmask_b32_e64 v11, 0, 1, vcc
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v10
 ; GFX9-NEXT:    v_cndmask_b32_e64 v10, 0, 1, vcc
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v9
-; GFX9-NEXT:    v_cndmask_b32_e64 v9, 0, 1, vcc
-; GFX9-NEXT:    v_or_b32_e32 v9, v9, v10
+; GFX9-NEXT:    v_or_b32_e32 v10, v10, v11
 ; GFX9-NEXT:    v_lshrrev_b32_e32 v8, 2, v8
-; GFX9-NEXT:    v_add_u32_e32 v8, v8, v9
-; GFX9-NEXT:    v_mov_b32_e32 v9, 0x7c00
-; GFX9-NEXT:    v_cmp_gt_i32_e32 vcc, 31, v5
-; GFX9-NEXT:    v_cndmask_b32_e32 v8, v9, v8, vcc
-; GFX9-NEXT:    v_mov_b32_e32 v10, 0x7e00
+; GFX9-NEXT:    v_add_u32_e32 v8, v8, v10
+; GFX9-NEXT:    v_mov_b32_e32 v10, 0x7c00
+; GFX9-NEXT:    v_cmp_gt_i32_e32 vcc, 31, v9
+; GFX9-NEXT:    v_cndmask_b32_e32 v8, v10, v8, vcc
+; GFX9-NEXT:    v_mov_b32_e32 v11, 0x7e00
 ; GFX9-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v4
 ; GFX9-NEXT:    s_movk_i32 s6, 0x40f
-; GFX9-NEXT:    v_cndmask_b32_e32 v4, v9, v10, vcc
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v5
+; GFX9-NEXT:    v_cndmask_b32_e32 v4, v10, v11, vcc
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v9
 ; GFX9-NEXT:    v_and_or_b32 v0, v1, s4, v0
 ; GFX9-NEXT:    v_cndmask_b32_e32 v4, v8, v4, vcc
+; GFX9-NEXT:    v_lshrrev_b32_e32 v5, 16, v5
+; GFX9-NEXT:    s_mov_b32 s7, 0x8000
 ; GFX9-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX9-NEXT:    v_and_or_b32 v4, v5, s7, v4
 ; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
 ; GFX9-NEXT:    v_lshrrev_b32_e32 v5, 8, v1
 ; GFX9-NEXT:    v_bfe_u32 v8, v1, 20, 11
 ; GFX9-NEXT:    v_and_or_b32 v0, v5, s5, v0
-; GFX9-NEXT:    v_sub_u32_e32 v11, 0x3f1, v8
+; GFX9-NEXT:    v_sub_u32_e32 v9, 0x3f1, v8
 ; GFX9-NEXT:    v_or_b32_e32 v5, 0x1000, v0
-; GFX9-NEXT:    v_med3_i32 v11, v11, 0, 13
-; GFX9-NEXT:    v_lshrrev_b32_e32 v12, v11, v5
-; GFX9-NEXT:    v_lshlrev_b32_e32 v11, v11, v12
-; GFX9-NEXT:    v_cmp_ne_u32_e32 vcc, v11, v5
+; GFX9-NEXT:    v_med3_i32 v9, v9, 0, 13
+; GFX9-NEXT:    v_lshrrev_b32_e32 v12, v9, v5
+; GFX9-NEXT:    v_lshlrev_b32_e32 v9, v9, v12
+; GFX9-NEXT:    v_cmp_ne_u32_e32 vcc, v9, v5
 ; GFX9-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
 ; GFX9-NEXT:    v_add_u32_e32 v8, 0xfffffc10, v8
 ; GFX9-NEXT:    v_or_b32_e32 v5, v12, v5
-; GFX9-NEXT:    v_lshl_or_b32 v11, v8, 12, v0
+; GFX9-NEXT:    v_lshl_or_b32 v9, v8, 12, v0
 ; GFX9-NEXT:    v_cmp_gt_i32_e32 vcc, 1, v8
-; GFX9-NEXT:    v_cndmask_b32_e32 v5, v11, v5, vcc
-; GFX9-NEXT:    v_and_b32_e32 v11, 7, v5
-; GFX9-NEXT:    v_cmp_lt_i32_e32 vcc, 5, v11
+; GFX9-NEXT:    v_cndmask_b32_e32 v5, v9, v5, vcc
+; GFX9-NEXT:    v_and_b32_e32 v9, 7, v5
+; GFX9-NEXT:    v_cmp_lt_i32_e32 vcc, 5, v9
 ; GFX9-NEXT:    v_cndmask_b32_e64 v12, 0, 1, vcc
-; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v11
-; GFX9-NEXT:    v_cndmask_b32_e64 v11, 0, 1, vcc
-; GFX9-NEXT:    v_or_b32_e32 v11, v11, v12
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v9
+; GFX9-NEXT:    v_cndmask_b32_e64 v9, 0, 1, vcc
+; GFX9-NEXT:    v_or_b32_e32 v9, v9, v12
 ; GFX9-NEXT:    v_lshrrev_b32_e32 v5, 2, v5
-; GFX9-NEXT:    v_add_u32_e32 v5, v5, v11
+; GFX9-NEXT:    v_add_u32_e32 v5, v5, v9
 ; GFX9-NEXT:    v_cmp_gt_i32_e32 vcc, 31, v8
-; GFX9-NEXT:    v_cndmask_b32_e32 v5, v9, v5, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v5, v10, v5, vcc
 ; GFX9-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
-; GFX9-NEXT:    v_cndmask_b32_e32 v0, v9, v10, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v0, v10, v11, vcc
 ; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v8
 ; GFX9-NEXT:    v_cndmask_b32_e32 v0, v5, v0, vcc
 ; GFX9-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
-; GFX9-NEXT:    s_mov_b32 s7, 0x8000
 ; GFX9-NEXT:    v_and_or_b32 v0, v1, s7, v0
 ; GFX9-NEXT:    v_and_or_b32 v1, v3, s4, v2
 ; GFX9-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v1
@@ -5069,27 +5159,27 @@ define <3 x half> @v_copysign_out_v3f16_mag_v3f64_sign_v3f16(<3 x double> %mag,
 ; GFX9-NEXT:    v_sub_u32_e32 v8, 0x3f1, v5
 ; GFX9-NEXT:    v_or_b32_e32 v2, 0x1000, v1
 ; GFX9-NEXT:    v_med3_i32 v8, v8, 0, 13
-; GFX9-NEXT:    v_lshrrev_b32_e32 v11, v8, v2
-; GFX9-NEXT:    v_lshlrev_b32_e32 v8, v8, v11
+; GFX9-NEXT:    v_lshrrev_b32_e32 v9, v8, v2
+; GFX9-NEXT:    v_lshlrev_b32_e32 v8, v8, v9
 ; GFX9-NEXT:    v_cmp_ne_u32_e32 vcc, v8, v2
 ; GFX9-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
 ; GFX9-NEXT:    v_add_u32_e32 v5, 0xfffffc10, v5
-; GFX9-NEXT:    v_or_b32_e32 v2, v11, v2
+; GFX9-NEXT:    v_or_b32_e32 v2, v9, v2
 ; GFX9-NEXT:    v_lshl_or_b32 v8, v5, 12, v1
 ; GFX9-NEXT:    v_cmp_gt_i32_e32 vcc, 1, v5
 ; GFX9-NEXT:    v_cndmask_b32_e32 v2, v8, v2, vcc
 ; GFX9-NEXT:    v_and_b32_e32 v8, 7, v2
 ; GFX9-NEXT:    v_cmp_lt_i32_e32 vcc, 5, v8
-; GFX9-NEXT:    v_cndmask_b32_e64 v11, 0, 1, vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v9, 0, 1, vcc
 ; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v8
 ; GFX9-NEXT:    v_cndmask_b32_e64 v8, 0, 1, vcc
-; GFX9-NEXT:    v_or_b32_e32 v8, v8, v11
+; GFX9-NEXT:    v_or_b32_e32 v8, v8, v9
 ; GFX9-NEXT:    v_lshrrev_b32_e32 v2, 2, v2
 ; GFX9-NEXT:    v_add_u32_e32 v2, v2, v8
 ; GFX9-NEXT:    v_cmp_gt_i32_e32 vcc, 31, v5
-; GFX9-NEXT:    v_cndmask_b32_e32 v2, v9, v2, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v2, v10, v2, vcc
 ; GFX9-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v1
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v9, v10, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v1, v10, v11, vcc
 ; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v5
 ; GFX9-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
 ; GFX9-NEXT:    v_lshrrev_b32_e32 v2, 16, v3
@@ -5105,247 +5195,255 @@ define <3 x half> @v_copysign_out_v3f16_mag_v3f64_sign_v3f16(<3 x double> %mag,
 ; GFX11-TRUE16:       ; %bb.0:
 ; GFX11-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-TRUE16-NEXT:    v_and_or_b32 v4, 0x1ff, v5, v4
-; GFX11-TRUE16-NEXT:    v_and_or_b32 v2, 0x1ff, v3, v2
 ; GFX11-TRUE16-NEXT:    v_lshrrev_b32_e32 v8, 8, v5
-; GFX11-TRUE16-NEXT:    v_lshrrev_b32_e32 v9, 8, v3
-; GFX11-TRUE16-NEXT:    v_bfe_u32 v10, v3, 20, 11
+; GFX11-TRUE16-NEXT:    v_bfe_u32 v9, v5, 20, 11
+; GFX11-TRUE16-NEXT:    v_and_or_b32 v2, 0x1ff, v3, v2
+; GFX11-TRUE16-NEXT:    v_lshrrev_b32_e32 v11, 8, v3
 ; GFX11-TRUE16-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX11-TRUE16-NEXT:    v_bfe_u32 v5, v5, 20, 11
+; GFX11-TRUE16-NEXT:    v_bfe_u32 v12, v3, 20, 11
+; GFX11-TRUE16-NEXT:    v_sub_nc_u32_e32 v10, 0x3f1, v9
 ; GFX11-TRUE16-NEXT:    v_and_or_b32 v0, 0x1ff, v1, v0
-; GFX11-TRUE16-NEXT:    v_lshrrev_b32_e32 v13, 8, v1
-; GFX11-TRUE16-NEXT:    v_bfe_u32 v14, v1, 20, 11
+; GFX11-TRUE16-NEXT:    v_lshrrev_b32_e32 v14, 8, v1
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v4, 0, 1, vcc_lo
 ; GFX11-TRUE16-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-TRUE16-NEXT:    v_sub_nc_u32_e32 v11, 0x3f1, v5
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
+; GFX11-TRUE16-NEXT:    v_sub_nc_u32_e32 v16, 0x3f1, v12
+; GFX11-TRUE16-NEXT:    v_bfe_u32 v15, v1, 20, 11
+; GFX11-TRUE16-NEXT:    v_add_nc_u32_e32 v12, 0xfffffc10, v12
 ; GFX11-TRUE16-NEXT:    v_and_or_b32 v4, 0xffe, v8, v4
+; GFX11-TRUE16-NEXT:    v_med3_i32 v8, v10, 0, 13
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc_lo
-; GFX11-TRUE16-NEXT:    v_sub_nc_u32_e32 v8, 0x3f1, v10
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT:    v_or_b32_e32 v10, 0x1000, v4
+; GFX11-TRUE16-NEXT:    v_and_or_b32 v2, 0xffe, v11, v2
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT:    v_lshrrev_b32_e32 v13, v8, v10
+; GFX11-TRUE16-NEXT:    v_or_b32_e32 v11, 0x1000, v2
+; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v19, v12, 12, v2
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v8, v8, v13
+; GFX11-TRUE16-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v8, v10
+; GFX11-TRUE16-NEXT:    v_med3_i32 v10, v16, 0, 13
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v8, 0, 1, vcc_lo
 ; GFX11-TRUE16-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-TRUE16-NEXT:    v_med3_i32 v11, v11, 0, 13
-; GFX11-TRUE16-NEXT:    v_add_nc_u32_e32 v10, 0xfffffc10, v10
-; GFX11-TRUE16-NEXT:    v_and_or_b32 v2, 0xffe, v9, v2
-; GFX11-TRUE16-NEXT:    v_med3_i32 v8, v8, 0, 13
-; GFX11-TRUE16-NEXT:    v_or_b32_e32 v9, 0x1000, v4
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT:    v_or_b32_e32 v8, v13, v8
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT:    v_or_b32_e32 v12, 0x1000, v2
-; GFX11-TRUE16-NEXT:    v_lshrrev_b32_e32 v16, v11, v9
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT:    v_and_or_b32 v0, 0xffe, v13, v0
-; GFX11-TRUE16-NEXT:    v_sub_nc_u32_e32 v13, 0x3f1, v14
-; GFX11-TRUE16-NEXT:    v_lshrrev_b32_e32 v15, v8, v12
-; GFX11-TRUE16-NEXT:    v_add_nc_u32_e32 v14, 0xfffffc10, v14
-; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v11, v11, v16
-; GFX11-TRUE16-NEXT:    v_or_b32_e32 v17, 0x1000, v0
+; GFX11-TRUE16-NEXT:    v_add_nc_u32_e32 v9, 0xfffffc10, v9
+; GFX11-TRUE16-NEXT:    v_sub_nc_u32_e32 v13, 0x3f1, v15
+; GFX11-TRUE16-NEXT:    v_and_or_b32 v0, 0xffe, v14, v0
+; GFX11-TRUE16-NEXT:    v_lshrrev_b32_e32 v14, v10, v11
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v16, v9, 12, v4
+; GFX11-TRUE16-NEXT:    v_cmp_gt_i32_e32 vcc_lo, 1, v9
 ; GFX11-TRUE16-NEXT:    v_med3_i32 v13, v13, 0, 13
-; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v8, v8, v15
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v8, v12
-; GFX11-TRUE16-NEXT:    v_lshrrev_b32_e32 v12, v13, v17
-; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v8, 0, 1, vcc_lo
-; GFX11-TRUE16-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v11, v9
-; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v11, v10, 12, v2
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v13, v13, v12
-; GFX11-TRUE16-NEXT:    v_or_b32_e32 v8, v15, v8
-; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v9, 0, 1, vcc_lo
-; GFX11-TRUE16-NEXT:    v_cmp_gt_i32_e32 vcc_lo, 1, v10
-; GFX11-TRUE16-NEXT:    v_add_nc_u32_e32 v5, 0xfffffc10, v5
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT:    v_or_b32_e32 v9, v16, v9
-; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v8, v11, v8, vcc_lo
-; GFX11-TRUE16-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v13, v17
-; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v11, v5, 12, v4
-; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v17, 0x7e00
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v15, 7, v8
-; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v13, 0, 1, vcc_lo
-; GFX11-TRUE16-NEXT:    v_cmp_gt_i32_e32 vcc_lo, 1, v5
+; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v10, v10, v14
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v8, v16, v8, vcc_lo
+; GFX11-TRUE16-NEXT:    v_or_b32_e32 v16, 0x1000, v0
+; GFX11-TRUE16-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v10, v11
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT:    v_lshrrev_b32_e32 v18, v13, v16
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v10, 0, 1, vcc_lo
+; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v13, v13, v18
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT:    v_or_b32_e32 v10, v14, v10
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v17, 7, v8
 ; GFX11-TRUE16-NEXT:    v_lshrrev_b32_e32 v8, 2, v8
-; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v9, v11, v9, vcc_lo
-; GFX11-TRUE16-NEXT:    v_cmp_lt_i32_e32 vcc_lo, 5, v15
-; GFX11-TRUE16-NEXT:    v_or_b32_e32 v11, v12, v13
-; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v12, v14, 12, v0
-; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v13, 0, 1, vcc_lo
-; GFX11-TRUE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v15
-; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v15, 0, 1, vcc_lo
-; GFX11-TRUE16-NEXT:    v_cmp_gt_i32_e32 vcc_lo, 1, v14
+; GFX11-TRUE16-NEXT:    v_mov_b32_e32 v14, 0x7e00
+; GFX11-TRUE16-NEXT:    v_cmp_lt_i32_e32 vcc_lo, 5, v17
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v11, 0, 1, vcc_lo
+; GFX11-TRUE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v17
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v17, 0, 1, vcc_lo
+; GFX11-TRUE16-NEXT:    v_cmp_gt_i32_e32 vcc_lo, 1, v12
 ; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT:    v_or_b32_e32 v13, v15, v13
-; GFX11-TRUE16-NEXT:    v_dual_cndmask_b32 v11, v12, v11 :: v_dual_and_b32 v12, 7, v9
-; GFX11-TRUE16-NEXT:    v_lshrrev_b32_e32 v9, 2, v9
-; GFX11-TRUE16-NEXT:    v_add_nc_u32_e32 v8, v8, v13
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v15, 7, v11
-; GFX11-TRUE16-NEXT:    v_cmp_lt_i32_e32 vcc_lo, 5, v12
-; GFX11-TRUE16-NEXT:    v_lshrrev_b32_e32 v11, 2, v11
-; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v16, 0, 1, vcc_lo
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT:    v_cmp_lt_i32_e32 vcc_lo, 5, v15
+; GFX11-TRUE16-NEXT:    v_or_b32_e32 v11, v17, v11
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v10, v19, v10, vcc_lo
+; GFX11-TRUE16-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v13, v16
+; GFX11-TRUE16-NEXT:    v_add_nc_u32_e32 v8, v8, v11
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v13, 0, 1, vcc_lo
+; GFX11-TRUE16-NEXT:    v_cmp_gt_i32_e32 vcc_lo, 31, v9
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT:    v_or_b32_e32 v13, v18, v13
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v8, 0x7c00, v8, vcc_lo
+; GFX11-TRUE16-NEXT:    v_add_nc_u32_e32 v11, 0xfffffc10, v15
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v15, 7, v10
+; GFX11-TRUE16-NEXT:    v_lshrrev_b32_e32 v10, 2, v10
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT:    v_lshl_or_b32 v16, v11, 12, v0
+; GFX11-TRUE16-NEXT:    v_cmp_lt_i32_e32 vcc_lo, 5, v15
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v17, 0, 1, vcc_lo
 ; GFX11-TRUE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v15
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v15, 0, 1, vcc_lo
-; GFX11-TRUE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v12
-; GFX11-TRUE16-NEXT:    v_or_b32_e32 v13, v15, v13
-; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v12, 0, 1, vcc_lo
-; GFX11-TRUE16-NEXT:    v_cmp_gt_i32_e32 vcc_lo, 31, v10
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT:    v_add_nc_u32_e32 v11, v11, v13
-; GFX11-TRUE16-NEXT:    v_or_b32_e32 v12, v12, v16
-; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v8, 0x7c00, v8, vcc_lo
+; GFX11-TRUE16-NEXT:    v_cmp_gt_i32_e32 vcc_lo, 1, v11
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT:    v_or_b32_e32 v15, v15, v17
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v13, v16, v13, vcc_lo
+; GFX11-TRUE16-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v4
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v4, 0x7c00, v14, vcc_lo
+; GFX11-TRUE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0x40f, v9
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v4, v8, v4, vcc_lo
+; GFX11-TRUE16-NEXT:    v_add_nc_u32_e32 v8, v10, v15
+; GFX11-TRUE16-NEXT:    v_and_b32_e32 v16, 7, v13
+; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v15.l, v5.h
+; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v15.h, 0
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT:    v_cmp_lt_i32_e32 vcc_lo, 5, v16
+; GFX11-TRUE16-NEXT:    v_and_or_b32 v4, 0x8000, v15, v4
+; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v15.l, v3.h
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v9, 0, 1, vcc_lo
+; GFX11-TRUE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v16
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e64 v10, 0, 1, vcc_lo
+; GFX11-TRUE16-NEXT:    v_cmp_gt_i32_e32 vcc_lo, 31, v12
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v5, 0x7c00, v8, vcc_lo
 ; GFX11-TRUE16-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v2
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT:    v_or_b32_e32 v8, v10, v9
+; GFX11-TRUE16-NEXT:    v_lshrrev_b32_e32 v9, 2, v13
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v2, 0x7c00, v14, vcc_lo
+; GFX11-TRUE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0x40f, v12
+; GFX11-TRUE16-NEXT:    v_add_nc_u32_e32 v8, v9, v8
 ; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT:    v_dual_cndmask_b32 v2, 0x7c00, v17 :: v_dual_add_nc_u32 v9, v9, v12
-; GFX11-TRUE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0x40f, v10
-; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v2, v8, v2, vcc_lo
-; GFX11-TRUE16-NEXT:    v_cmp_gt_i32_e32 vcc_lo, 31, v14
-; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v8.l, v3.h
-; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v8.h, 0
-; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v3, 0x7c00, v11, vcc_lo
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v2, v5, v2, vcc_lo
+; GFX11-TRUE16-NEXT:    v_cmp_gt_i32_e32 vcc_lo, 31, v11
+; GFX11-TRUE16-NEXT:    v_and_or_b32 v2, 0x8000, v15, v2
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v3, 0x7c00, v8, vcc_lo
 ; GFX11-TRUE16-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT:    v_and_or_b32 v2, 0x8000, v8, v2
-; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v8.l, v1.h
-; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v0, 0x7c00, v17, vcc_lo
-; GFX11-TRUE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0x40f, v14
+; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v15.l, v1.h
+; GFX11-TRUE16-NEXT:    v_bfi_b32 v1, 0x7fff7fff, v4, v7
+; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v0, 0x7c00, v14, vcc_lo
+; GFX11-TRUE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0x40f, v11
+; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v0, v3, v0, vcc_lo
-; GFX11-TRUE16-NEXT:    v_cmp_gt_i32_e32 vcc_lo, 31, v5
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
-; GFX11-TRUE16-NEXT:    v_and_or_b32 v0, 0x8000, v8, v0
-; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v1, 0x7c00, v9, vcc_lo
-; GFX11-TRUE16-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v4
+; GFX11-TRUE16-NEXT:    v_and_or_b32 v0, 0x8000, v15, v0
 ; GFX11-TRUE16-NEXT:    v_mov_b16_e32 v0.h, v2.l
-; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v3, 0x7c00, v17, vcc_lo
-; GFX11-TRUE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0x40f, v5
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT:    v_bfi_b32 v0, 0x7fff7fff, v0, v6
-; GFX11-TRUE16-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc_lo
 ; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT:    v_bfi_b32 v1, 0x7fff7fff, v1, v7
+; GFX11-TRUE16-NEXT:    v_bfi_b32 v0, 0x7fff7fff, v0, v6
 ; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-FAKE16-LABEL: v_copysign_out_v3f16_mag_v3f64_sign_v3f16:
 ; GFX11-FAKE16:       ; %bb.0:
 ; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-FAKE16-NEXT:    v_and_or_b32 v4, 0x1ff, v5, v4
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v8, 8, v5
+; GFX11-FAKE16-NEXT:    v_bfe_u32 v9, v5, 20, 11
 ; GFX11-FAKE16-NEXT:    v_and_or_b32 v0, 0x1ff, v1, v0
-; GFX11-FAKE16-NEXT:    v_bfe_u32 v8, v5, 20, 11
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v5, 8, v5
 ; GFX11-FAKE16-NEXT:    v_and_or_b32 v2, 0x1ff, v3, v2
 ; GFX11-FAKE16-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX11-FAKE16-NEXT:    v_bfe_u32 v10, v1, 20, 11
-; GFX11-FAKE16-NEXT:    v_sub_nc_u32_e32 v9, 0x3f1, v8
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v12, 8, v3
-; GFX11-FAKE16-NEXT:    v_bfe_u32 v13, v3, 20, 11
+; GFX11-FAKE16-NEXT:    v_bfe_u32 v11, v1, 20, 11
+; GFX11-FAKE16-NEXT:    v_sub_nc_u32_e32 v10, 0x3f1, v9
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v13, 8, v3
+; GFX11-FAKE16-NEXT:    v_bfe_u32 v14, v3, 20, 11
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v4, 0, 1, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-FAKE16-NEXT:    v_med3_i32 v9, v9, 0, 13
+; GFX11-FAKE16-NEXT:    v_med3_i32 v10, v10, 0, 13
+; GFX11-FAKE16-NEXT:    v_sub_nc_u32_e32 v16, 0x3f1, v11
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v3, 16, v3
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
-; GFX11-FAKE16-NEXT:    v_and_or_b32 v4, 0xffe, v5, v4
+; GFX11-FAKE16-NEXT:    v_and_or_b32 v4, 0xffe, v8, v4
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v8, 8, v1
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v5, 8, v1
 ; GFX11-FAKE16-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v2
+; GFX11-FAKE16-NEXT:    v_med3_i32 v16, v16, 0, 13
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v12, 0x1000, v4
 ; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v11, 0x1000, v4
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-FAKE16-NEXT:    v_and_or_b32 v0, 0xffe, v5, v0
-; GFX11-FAKE16-NEXT:    v_sub_nc_u32_e32 v5, 0x3f1, v10
+; GFX11-FAKE16-NEXT:    v_and_or_b32 v0, 0xffe, v8, v0
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc_lo
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v15, v9, v11
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v14, 0x1000, v0
-; GFX11-FAKE16-NEXT:    v_med3_i32 v5, v5, 0, 13
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4)
-; GFX11-FAKE16-NEXT:    v_and_or_b32 v2, 0xffe, v12, v2
-; GFX11-FAKE16-NEXT:    v_sub_nc_u32_e32 v12, 0x3f1, v13
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v9, v9, v15
-; GFX11-FAKE16-NEXT:    v_add_nc_u32_e32 v13, 0xfffffc10, v13
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v16, v5, v14
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v17, 0x1000, v2
-; GFX11-FAKE16-NEXT:    v_med3_i32 v12, v12, 0, 13
-; GFX11-FAKE16-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v9, v11
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v5, v5, v16
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v11, v12, v17
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v9, 0, 1, vcc_lo
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v15, v10, v12
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v17, 0x1000, v0
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-FAKE16-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v5, v14
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v12, v12, v11
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v9, v15, v9
-; GFX11-FAKE16-NEXT:    v_add_nc_u32_e32 v8, 0xfffffc10, v8
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v12, v17
+; GFX11-FAKE16-NEXT:    v_and_or_b32 v2, 0xffe, v13, v2
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v8, v10, v15
+; GFX11-FAKE16-NEXT:    v_sub_nc_u32_e32 v10, 0x3f1, v14
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v14, v8, 12, v4
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v5, v16, v5
-; GFX11-FAKE16-NEXT:    v_add_nc_u32_e32 v10, 0xfffffc10, v10
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v12, 0, 1, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_gt_i32_e32 vcc_lo, 1, v8
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v13, 0x1000, v2
+; GFX11-FAKE16-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v8, v12
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v12, v16, v17
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    v_med3_i32 v10, v10, 0, 13
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v8, 0, 1, vcc_lo
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v16, v16, v12
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v15, v10, 12, v0
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v11, v11, v12
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v9, v14, v9, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_gt_i32_e32 vcc_lo, 1, v10
-; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v12, v13, 12, v2
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v5, v15, v5, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_gt_i32_e32 vcc_lo, 1, v13
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v14, 7, v9
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v9, 2, v9
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v11, v12, v11, vcc_lo
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT:    v_cmp_lt_i32_e32 vcc_lo, 5, v14
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v12, 0, 1, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v14
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v18, v10, v13
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v8, v15, v8
+; GFX11-FAKE16-NEXT:    v_add_nc_u32_e32 v9, 0xfffffc10, v9
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v10, v10, v18
+; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v15, v9, 12, v4
+; GFX11-FAKE16-NEXT:    v_cmp_gt_i32_e32 vcc_lo, 1, v9
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v8, v15, v8, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v16, v17
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v16, 7, v8
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v15, 0, 1, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v10, v13
+; GFX11-FAKE16-NEXT:    v_add_nc_u32_e32 v13, 0xfffffc10, v14
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v8, 2, v8
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v12, v12, v15
+; GFX11-FAKE16-NEXT:    v_add_nc_u32_e32 v11, 0xfffffc10, v11
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v10, 0, 1, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cmp_lt_i32_e32 vcc_lo, 5, v16
+; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v17, v13, 12, v2
+; GFX11-FAKE16-NEXT:    v_lshl_or_b32 v14, v11, 12, v0
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v10, v18, v10
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v15, 0, 1, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cmp_gt_i32_e32 vcc_lo, 1, v11
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v12, v14, v12, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v16
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    v_and_b32_e32 v16, 7, v12
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v14, 0, 1, vcc_lo
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v12, v14, v12
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v12, 0x7e00 :: v_dual_add_nc_u32 v9, v9, v12
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v15, 7, v5
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v16, 7, v11
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v5, 2, v5
-; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v11, 2, v11
-; GFX11-FAKE16-NEXT:    v_cmp_lt_i32_e32 vcc_lo, 5, v15
+; GFX11-FAKE16-NEXT:    v_cmp_gt_i32_e32 vcc_lo, 1, v13
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v12, 2, v12
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v14, v14, v15
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v10, v17, v10, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cmp_lt_i32_e32 vcc_lo, 5, v16
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    v_add_nc_u32_e32 v8, v8, v14
+; GFX11-FAKE16-NEXT:    v_dual_mov_b32 v14, 0x7e00 :: v_dual_and_b32 v15, 7, v10
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v17, 0, 1, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v16
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v10, 2, v10
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v16, 0, 1, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cmp_lt_i32_e32 vcc_lo, 5, v15
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v16, v16, v17
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v18, 0, 1, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v15
+; GFX11-FAKE16-NEXT:    v_add_nc_u32_e32 v12, v12, v16
 ; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v15, 0, 1, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_lt_i32_e32 vcc_lo, 5, v16
+; GFX11-FAKE16-NEXT:    v_cmp_gt_i32_e32 vcc_lo, 31, v9
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v14, v15, v17
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v18, 0, 1, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v16
-; GFX11-FAKE16-NEXT:    v_add_nc_u32_e32 v5, v5, v14
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e64 v16, 0, 1, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_gt_i32_e32 vcc_lo, 31, v10
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-FAKE16-NEXT:    v_or_b32_e32 v15, v16, v18
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v5, 0x7c00, v5, vcc_lo
+; GFX11-FAKE16-NEXT:    v_or_b32_e32 v15, v15, v18
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v8, 0x7c00, v8, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v4
+; GFX11-FAKE16-NEXT:    v_add_nc_u32_e32 v10, v10, v15
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v4, 0x7c00, v14, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cmp_gt_i32_e32 vcc_lo, 31, v11
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v12, 0x7c00, v12, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_dual_cndmask_b32 v0, 0x7c00, v12 :: v_dual_add_nc_u32 v11, v11, v15
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v0, 0x7c00, v14, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_gt_i32_e32 vcc_lo, 31, v13
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v11, 0x7c00, v11, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v10, 0x7c00, v10, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v2, 0x7c00, v12, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0x40f, v10
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v0, v5, v0, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v2, 0x7c00, v14, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0x40f, v11
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v0, v12, v0, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0x40f, v13
 ; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
 ; GFX11-FAKE16-NEXT:    v_and_or_b32 v0, 0x8000, v1, v0
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v2, v11, v2, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_gt_i32_e32 vcc_lo, 31, v8
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v2, v10, v2, vcc_lo
+; GFX11-FAKE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0x40f, v9
 ; GFX11-FAKE16-NEXT:    v_and_or_b32 v1, 0x8000, v3, v2
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v5, 0x7c00, v9, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v4, v8, v4, vcc_lo
+; GFX11-FAKE16-NEXT:    v_lshrrev_b32_e32 v2, 16, v5
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
 ; GFX11-FAKE16-NEXT:    v_perm_b32 v0, v1, v0, 0x5040100
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v2, 0x7c00, v12, vcc_lo
-; GFX11-FAKE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0x40f, v8
+; GFX11-FAKE16-NEXT:    v_and_or_b32 v1, 0x8000, v2, v4
+; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
 ; GFX11-FAKE16-NEXT:    v_bfi_b32 v0, 0x7fff7fff, v0, v6
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT:    v_cndmask_b32_e32 v1, v5, v2, vcc_lo
 ; GFX11-FAKE16-NEXT:    v_bfi_b32 v1, 0x7fff7fff, v1, v7
 ; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
   %mag.trunc = fptrunc <3 x double> %mag to <3 x half>
@@ -6054,152 +6152,157 @@ define <4 x half> @v_copysign_out_v4f16_mag_v4f64_sign_v4f16(<4 x double> %mag,
 ; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v4
 ; VI-NEXT:    v_and_b32_e32 v10, 0xffe, v10
 ; VI-NEXT:    v_cndmask_b32_e64 v4, 0, 1, vcc
-; VI-NEXT:    v_bfe_u32 v5, v5, 20, 11
+; VI-NEXT:    v_bfe_u32 v11, v5, 20, 11
 ; VI-NEXT:    s_movk_i32 s4, 0x3f1
 ; VI-NEXT:    v_or_b32_e32 v4, v10, v4
-; VI-NEXT:    v_sub_u32_e32 v11, vcc, s4, v5
+; VI-NEXT:    v_sub_u32_e32 v12, vcc, s4, v11
 ; VI-NEXT:    v_or_b32_e32 v10, 0x1000, v4
-; VI-NEXT:    v_med3_i32 v11, v11, 0, 13
-; VI-NEXT:    v_lshrrev_b32_e32 v12, v11, v10
-; VI-NEXT:    v_lshlrev_b32_e32 v11, v11, v12
-; VI-NEXT:    v_cmp_ne_u32_e32 vcc, v11, v10
+; VI-NEXT:    v_med3_i32 v12, v12, 0, 13
+; VI-NEXT:    v_lshrrev_b32_e32 v13, v12, v10
+; VI-NEXT:    v_lshlrev_b32_e32 v12, v12, v13
+; VI-NEXT:    v_cmp_ne_u32_e32 vcc, v12, v10
 ; VI-NEXT:    s_movk_i32 s5, 0xfc10
 ; VI-NEXT:    v_cndmask_b32_e64 v10, 0, 1, vcc
-; VI-NEXT:    v_add_u32_e32 v5, vcc, s5, v5
-; VI-NEXT:    v_lshlrev_b32_e32 v11, 12, v5
-; VI-NEXT:    v_or_b32_e32 v10, v12, v10
-; VI-NEXT:    v_or_b32_e32 v11, v4, v11
-; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 1, v5
-; VI-NEXT:    v_cndmask_b32_e32 v10, v11, v10, vcc
-; VI-NEXT:    v_and_b32_e32 v11, 7, v10
-; VI-NEXT:    v_cmp_lt_i32_e32 vcc, 5, v11
+; VI-NEXT:    v_add_u32_e32 v11, vcc, s5, v11
+; VI-NEXT:    v_lshlrev_b32_e32 v12, 12, v11
+; VI-NEXT:    v_or_b32_e32 v10, v13, v10
+; VI-NEXT:    v_or_b32_e32 v12, v4, v12
+; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 1, v11
+; VI-NEXT:    v_cndmask_b32_e32 v10, v12, v10, vcc
+; VI-NEXT:    v_and_b32_e32 v12, 7, v10
+; VI-NEXT:    v_cmp_lt_i32_e32 vcc, 5, v12
+; VI-NEXT:    v_cndmask_b32_e64 v13, 0, 1, vcc
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v12
 ; VI-NEXT:    v_cndmask_b32_e64 v12, 0, 1, vcc
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v11
-; VI-NEXT:    v_cndmask_b32_e64 v11, 0, 1, vcc
-; VI-NEXT:    v_or_b32_e32 v11, v11, v12
+; VI-NEXT:    v_or_b32_e32 v12, v12, v13
 ; VI-NEXT:    v_lshrrev_b32_e32 v10, 2, v10
-; VI-NEXT:    v_add_u32_e32 v10, vcc, v10, v11
-; VI-NEXT:    v_mov_b32_e32 v11, 0x7c00
-; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 31, v5
-; VI-NEXT:    v_cndmask_b32_e32 v10, v11, v10, vcc
-; VI-NEXT:    v_mov_b32_e32 v12, 0x7e00
+; VI-NEXT:    v_add_u32_e32 v10, vcc, v10, v12
+; VI-NEXT:    v_mov_b32_e32 v12, 0x7c00
+; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 31, v11
+; VI-NEXT:    v_cndmask_b32_e32 v10, v12, v10, vcc
+; VI-NEXT:    v_mov_b32_e32 v13, 0x7e00
 ; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v4
 ; VI-NEXT:    s_movk_i32 s6, 0x40f
-; VI-NEXT:    v_cndmask_b32_e32 v4, v11, v12, vcc
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v5
+; VI-NEXT:    v_cndmask_b32_e32 v4, v12, v13, vcc
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v11
 ; VI-NEXT:    v_cndmask_b32_e32 v4, v10, v4, vcc
-; VI-NEXT:    v_and_b32_e32 v10, 0x1ff, v7
-; VI-NEXT:    v_or_b32_e32 v6, v10, v6
+; VI-NEXT:    v_mov_b32_e32 v10, 0x8000
+; VI-NEXT:    v_and_b32_e32 v11, 0x1ff, v7
+; VI-NEXT:    v_and_b32_sdwa v5, v5, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; VI-NEXT:    v_or_b32_e32 v6, v11, v6
+; VI-NEXT:    v_or_b32_e32 v4, v5, v4
 ; VI-NEXT:    v_lshrrev_b32_e32 v5, 8, v7
 ; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v6
 ; VI-NEXT:    v_and_b32_e32 v5, 0xffe, v5
 ; VI-NEXT:    v_cndmask_b32_e64 v6, 0, 1, vcc
-; VI-NEXT:    v_bfe_u32 v7, v7, 20, 11
+; VI-NEXT:    v_bfe_u32 v11, v7, 20, 11
 ; VI-NEXT:    v_or_b32_e32 v5, v5, v6
-; VI-NEXT:    v_sub_u32_e32 v10, vcc, s4, v7
+; VI-NEXT:    v_sub_u32_e32 v14, vcc, s4, v11
 ; VI-NEXT:    v_or_b32_e32 v6, 0x1000, v5
-; VI-NEXT:    v_med3_i32 v10, v10, 0, 13
-; VI-NEXT:    v_lshrrev_b32_e32 v13, v10, v6
-; VI-NEXT:    v_lshlrev_b32_e32 v10, v10, v13
-; VI-NEXT:    v_cmp_ne_u32_e32 vcc, v10, v6
+; VI-NEXT:    v_med3_i32 v14, v14, 0, 13
+; VI-NEXT:    v_lshrrev_b32_e32 v15, v14, v6
+; VI-NEXT:    v_lshlrev_b32_e32 v14, v14, v15
+; VI-NEXT:    v_cmp_ne_u32_e32 vcc, v14, v6
 ; VI-NEXT:    v_cndmask_b32_e64 v6, 0, 1, vcc
-; VI-NEXT:    v_add_u32_e32 v7, vcc, s5, v7
-; VI-NEXT:    v_lshlrev_b32_e32 v10, 12, v7
-; VI-NEXT:    v_or_b32_e32 v6, v13, v6
-; VI-NEXT:    v_or_b32_e32 v10, v5, v10
-; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 1, v7
-; VI-NEXT:    v_cndmask_b32_e32 v6, v10, v6, vcc
-; VI-NEXT:    v_and_b32_e32 v10, 7, v6
-; VI-NEXT:    v_cmp_lt_i32_e32 vcc, 5, v10
-; VI-NEXT:    v_cndmask_b32_e64 v13, 0, 1, vcc
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v10
-; VI-NEXT:    v_cndmask_b32_e64 v10, 0, 1, vcc
-; VI-NEXT:    v_or_b32_e32 v10, v10, v13
+; VI-NEXT:    v_add_u32_e32 v11, vcc, s5, v11
+; VI-NEXT:    v_lshlrev_b32_e32 v14, 12, v11
+; VI-NEXT:    v_or_b32_e32 v6, v15, v6
+; VI-NEXT:    v_or_b32_e32 v14, v5, v14
+; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 1, v11
+; VI-NEXT:    v_cndmask_b32_e32 v6, v14, v6, vcc
+; VI-NEXT:    v_and_b32_e32 v14, 7, v6
+; VI-NEXT:    v_cmp_lt_i32_e32 vcc, 5, v14
+; VI-NEXT:    v_cndmask_b32_e64 v15, 0, 1, vcc
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v14
+; VI-NEXT:    v_cndmask_b32_e64 v14, 0, 1, vcc
+; VI-NEXT:    v_or_b32_e32 v14, v14, v15
 ; VI-NEXT:    v_lshrrev_b32_e32 v6, 2, v6
-; VI-NEXT:    v_add_u32_e32 v6, vcc, v6, v10
-; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 31, v7
-; VI-NEXT:    v_cndmask_b32_e32 v6, v11, v6, vcc
+; VI-NEXT:    v_add_u32_e32 v6, vcc, v6, v14
+; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 31, v11
+; VI-NEXT:    v_cndmask_b32_e32 v6, v12, v6, vcc
 ; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v5
-; VI-NEXT:    v_cndmask_b32_e32 v5, v11, v12, vcc
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v7
+; VI-NEXT:    v_cndmask_b32_e32 v5, v12, v13, vcc
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v11
+; VI-NEXT:    v_cndmask_b32_e32 v5, v6, v5, vcc
+; VI-NEXT:    v_and_b32_sdwa v6, v7, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
 ; VI-NEXT:    v_and_b32_e32 v7, 0x1ff, v1
 ; VI-NEXT:    v_or_b32_e32 v0, v7, v0
-; VI-NEXT:    v_cndmask_b32_e32 v5, v6, v5, vcc
+; VI-NEXT:    v_or_b32_sdwa v5, v6, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
 ; VI-NEXT:    v_lshrrev_b32_e32 v6, 8, v1
 ; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
 ; VI-NEXT:    v_and_b32_e32 v6, 0xffe, v6
 ; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; VI-NEXT:    v_bfe_u32 v1, v1, 20, 11
+; VI-NEXT:    v_bfe_u32 v7, v1, 20, 11
 ; VI-NEXT:    v_or_b32_e32 v0, v6, v0
-; VI-NEXT:    v_sub_u32_e32 v7, vcc, s4, v1
+; VI-NEXT:    v_sub_u32_e32 v11, vcc, s4, v7
 ; VI-NEXT:    v_or_b32_e32 v6, 0x1000, v0
-; VI-NEXT:    v_med3_i32 v7, v7, 0, 13
-; VI-NEXT:    v_lshrrev_b32_e32 v10, v7, v6
-; VI-NEXT:    v_lshlrev_b32_e32 v7, v7, v10
-; VI-NEXT:    v_cmp_ne_u32_e32 vcc, v7, v6
+; VI-NEXT:    v_med3_i32 v11, v11, 0, 13
+; VI-NEXT:    v_lshrrev_b32_e32 v14, v11, v6
+; VI-NEXT:    v_lshlrev_b32_e32 v11, v11, v14
+; VI-NEXT:    v_cmp_ne_u32_e32 vcc, v11, v6
 ; VI-NEXT:    v_cndmask_b32_e64 v6, 0, 1, vcc
-; VI-NEXT:    v_add_u32_e32 v1, vcc, s5, v1
-; VI-NEXT:    v_lshlrev_b32_e32 v7, 12, v1
-; VI-NEXT:    v_or_b32_e32 v6, v10, v6
-; VI-NEXT:    v_or_b32_e32 v7, v0, v7
-; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 1, v1
-; VI-NEXT:    v_cndmask_b32_e32 v6, v7, v6, vcc
-; VI-NEXT:    v_and_b32_e32 v7, 7, v6
-; VI-NEXT:    v_cmp_lt_i32_e32 vcc, 5, v7
-; VI-NEXT:    v_cndmask_b32_e64 v10, 0, 1, vcc
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v7
-; VI-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
-; VI-NEXT:    v_or_b32_e32 v7, v7, v10
-; VI-NEXT:    v_lshrrev_b32_e32 v6, 2, v6
-; VI-NEXT:    v_add_u32_e32 v6, vcc, v6, v7
-; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 31, v1
+; VI-NEXT:    v_add_u32_e32 v7, vcc, s5, v7
+; VI-NEXT:    v_lshlrev_b32_e32 v11, 12, v7
+; VI-NEXT:    v_or_b32_e32 v6, v14, v6
+; VI-NEXT:    v_or_b32_e32 v11, v0, v11
+; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 1, v7
 ; VI-NEXT:    v_cndmask_b32_e32 v6, v11, v6, vcc
+; VI-NEXT:    v_and_b32_e32 v11, 7, v6
+; VI-NEXT:    v_cmp_lt_i32_e32 vcc, 5, v11
+; VI-NEXT:    v_cndmask_b32_e64 v14, 0, 1, vcc
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v11
+; VI-NEXT:    v_cndmask_b32_e64 v11, 0, 1, vcc
+; VI-NEXT:    v_or_b32_e32 v11, v11, v14
+; VI-NEXT:    v_lshrrev_b32_e32 v6, 2, v6
+; VI-NEXT:    v_add_u32_e32 v6, vcc, v6, v11
+; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 31, v7
+; VI-NEXT:    v_cndmask_b32_e32 v6, v12, v6, vcc
 ; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
-; VI-NEXT:    v_cndmask_b32_e32 v0, v11, v12, vcc
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v1
+; VI-NEXT:    v_cndmask_b32_e32 v0, v12, v13, vcc
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v7
 ; VI-NEXT:    v_cndmask_b32_e32 v0, v6, v0, vcc
 ; VI-NEXT:    v_and_b32_e32 v6, 0x1ff, v3
+; VI-NEXT:    v_and_b32_sdwa v1, v1, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
 ; VI-NEXT:    v_or_b32_e32 v2, v6, v2
+; VI-NEXT:    v_or_b32_e32 v0, v1, v0
 ; VI-NEXT:    v_lshrrev_b32_e32 v1, 8, v3
 ; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
 ; VI-NEXT:    v_and_b32_e32 v1, 0xffe, v1
 ; VI-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
-; VI-NEXT:    v_bfe_u32 v3, v3, 20, 11
+; VI-NEXT:    v_bfe_u32 v6, v3, 20, 11
 ; VI-NEXT:    v_or_b32_e32 v1, v1, v2
-; VI-NEXT:    v_sub_u32_e32 v6, vcc, s4, v3
+; VI-NEXT:    v_sub_u32_e32 v7, vcc, s4, v6
 ; VI-NEXT:    v_or_b32_e32 v2, 0x1000, v1
-; VI-NEXT:    v_med3_i32 v6, v6, 0, 13
-; VI-NEXT:    v_lshrrev_b32_e32 v7, v6, v2
-; VI-NEXT:    v_lshlrev_b32_e32 v6, v6, v7
-; VI-NEXT:    v_cmp_ne_u32_e32 vcc, v6, v2
+; VI-NEXT:    v_med3_i32 v7, v7, 0, 13
+; VI-NEXT:    v_lshrrev_b32_e32 v11, v7, v2
+; VI-NEXT:    v_lshlrev_b32_e32 v7, v7, v11
+; VI-NEXT:    v_cmp_ne_u32_e32 vcc, v7, v2
 ; VI-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
-; VI-NEXT:    v_add_u32_e32 v3, vcc, s5, v3
-; VI-NEXT:    v_lshlrev_b32_e32 v6, 12, v3
-; VI-NEXT:    v_or_b32_e32 v2, v7, v2
-; VI-NEXT:    v_or_b32_e32 v6, v1, v6
-; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 1, v3
-; VI-NEXT:    v_cndmask_b32_e32 v2, v6, v2, vcc
-; VI-NEXT:    v_and_b32_e32 v6, 7, v2
-; VI-NEXT:    v_cmp_lt_i32_e32 vcc, 5, v6
+; VI-NEXT:    v_add_u32_e32 v6, vcc, s5, v6
+; VI-NEXT:    v_lshlrev_b32_e32 v7, 12, v6
+; VI-NEXT:    v_or_b32_e32 v2, v11, v2
+; VI-NEXT:    v_or_b32_e32 v7, v1, v7
+; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 1, v6
+; VI-NEXT:    v_cndmask_b32_e32 v2, v7, v2, vcc
+; VI-NEXT:    v_and_b32_e32 v7, 7, v2
+; VI-NEXT:    v_cmp_lt_i32_e32 vcc, 5, v7
+; VI-NEXT:    v_cndmask_b32_e64 v11, 0, 1, vcc
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v7
 ; VI-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v6
-; VI-NEXT:    v_cndmask_b32_e64 v6, 0, 1, vcc
-; VI-NEXT:    v_or_b32_e32 v6, v6, v7
+; VI-NEXT:    v_or_b32_e32 v7, v7, v11
 ; VI-NEXT:    v_lshrrev_b32_e32 v2, 2, v2
-; VI-NEXT:    v_add_u32_e32 v2, vcc, v2, v6
-; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 31, v3
-; VI-NEXT:    v_cndmask_b32_e32 v2, v11, v2, vcc
+; VI-NEXT:    v_add_u32_e32 v2, vcc, v2, v7
+; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 31, v6
+; VI-NEXT:    v_cndmask_b32_e32 v2, v12, v2, vcc
 ; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v1
-; VI-NEXT:    v_cndmask_b32_e32 v1, v11, v12, vcc
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v3
+; VI-NEXT:    v_cndmask_b32_e32 v1, v12, v13, vcc
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, s6, v6
 ; VI-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
-; VI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; VI-NEXT:    v_and_b32_e32 v0, 0x7fff, v0
-; VI-NEXT:    v_or_b32_e32 v0, v0, v1
-; VI-NEXT:    v_lshlrev_b32_e32 v1, 16, v5
-; VI-NEXT:    v_and_b32_e32 v2, 0x7fff, v4
+; VI-NEXT:    v_and_b32_sdwa v2, v3, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; VI-NEXT:    v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-NEXT:    v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
 ; VI-NEXT:    s_mov_b32 s4, 0x7fff7fff
-; VI-NEXT:    v_or_b32_e32 v1, v2, v1
+; VI-NEXT:    v_or_b32_sdwa v1, v4, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
 ; VI-NEXT:    v_bfi_b32 v0, s4, v0, v8
 ; VI-NEXT:    v_bfi_b32 v1, s4, v1, v9
 ; VI-NEXT:    s_setpc_b64 s[30:31]
diff --git a/llvm/test/CodeGen/X86/combine-fcopysign.ll b/llvm/test/CodeGen/X86/combine-fcopysign.ll
index d7031be3addd9..5d0e7b9f51808 100644
--- a/llvm/test/CodeGen/X86/combine-fcopysign.ll
+++ b/llvm/test/CodeGen/X86/combine-fcopysign.ll
@@ -106,17 +106,17 @@ define <4 x float> @combine_vec_fcopysign_fneg_fabs_sgn(<4 x float> %x, <4 x flo
 define <4 x float> @combine_vec_fcopysign_fabs_mag(<4 x float> %x, <4 x float> %y) {
 ; SSE-LABEL: combine_vec_fcopysign_fabs_mag:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
 ; SSE-NEXT:    andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT:    andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
 ; SSE-NEXT:    orps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_fcopysign_fabs_mag:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vbroadcastss {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; AVX-NEXT:    vandps %xmm2, %xmm1, %xmm1
 ; AVX-NEXT:    vbroadcastss {{.*#+}} xmm2 = [NaN,NaN,NaN,NaN]
 ; AVX-NEXT:    vandps %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vbroadcastss {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; AVX-NEXT:    vandps %xmm2, %xmm1, %xmm1
 ; AVX-NEXT:    vorps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = call <4 x float> @llvm.fabs.v4f32(<4 x float> %x)
@@ -150,17 +150,17 @@ define <4 x float> @combine_vec_fcopysign_fneg_mag(<4 x float> %x, <4 x float> %
 define <4 x float> @combine_vec_fcopysign_fcopysign_mag(<4 x float> %x, <4 x float> %y, <4 x float> %z) {
 ; SSE-LABEL: combine_vec_fcopysign_fcopysign_mag:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
 ; SSE-NEXT:    andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT:    andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
 ; SSE-NEXT:    orps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_fcopysign_fcopysign_mag:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vbroadcastss {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; AVX-NEXT:    vandps %xmm2, %xmm1, %xmm1
 ; AVX-NEXT:    vbroadcastss {{.*#+}} xmm2 = [NaN,NaN,NaN,NaN]
 ; AVX-NEXT:    vandps %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vbroadcastss {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; AVX-NEXT:    vandps %xmm2, %xmm1, %xmm1
 ; AVX-NEXT:    vorps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = call <4 x float> @llvm.copysign.v4f32(<4 x float> %x, <4 x float> %z)
@@ -172,18 +172,18 @@ define <4 x float> @combine_vec_fcopysign_fcopysign_mag(<4 x float> %x, <4 x flo
 define <4 x float> @combine_vec_fcopysign_fcopysign_sgn(<4 x float> %x, <4 x float> %y, <4 x float> %z) {
 ; SSE-LABEL: combine_vec_fcopysign_fcopysign_sgn:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
-; SSE-NEXT:    andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE-NEXT:    orps %xmm2, %xmm0
+; SSE-NEXT:    movaps {{.*#+}} xmm1 = [NaN,NaN,NaN,NaN]
+; SSE-NEXT:    andps %xmm1, %xmm0
+; SSE-NEXT:    andnps %xmm2, %xmm1
+; SSE-NEXT:    orps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_fcopysign_fcopysign_sgn:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vbroadcastss {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; AVX-NEXT:    vandps %xmm1, %xmm2, %xmm1
-; AVX-NEXT:    vbroadcastss {{.*#+}} xmm2 = [NaN,NaN,NaN,NaN]
-; AVX-NEXT:    vandps %xmm2, %xmm0, %xmm0
-; AVX-NEXT:    vorps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vbroadcastss {{.*#+}} xmm1 = [NaN,NaN,NaN,NaN]
+; AVX-NEXT:    vandnps %xmm2, %xmm1, %xmm2
+; AVX-NEXT:    vandps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vorps %xmm2, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = call <4 x float> @llvm.copysign.v4f32(<4 x float> %y, <4 x float> %z)
   %2 = call <4 x float> @llvm.copysign.v4f32(<4 x float> %x, <4 x float> %1)



More information about the llvm-commits mailing list