[llvm] 9eff001 - [TargetLowering] Correctly yield NaN from FP_TO_BF16

David Majnemer via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 21 14:18:26 PST 2024


Author: David Majnemer
Date: 2024-02-21T22:17:02Z
New Revision: 9eff001d3dbe84851caa7de4e1093af62c009e06

URL: https://github.com/llvm/llvm-project/commit/9eff001d3dbe84851caa7de4e1093af62c009e06
DIFF: https://github.com/llvm/llvm-project/commit/9eff001d3dbe84851caa7de4e1093af62c009e06.diff

LOG: [TargetLowering] Correctly yield NaN from FP_TO_BF16

We didn't set the exponent field, resulting in tiny numbers instead of
NaNs.

Added: 
    

Modified: 
    llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
    llvm/test/CodeGen/AMDGPU/bf16.ll
    llvm/test/CodeGen/AMDGPU/fmed3-cast-combine.ll
    llvm/test/CodeGen/AMDGPU/global-atomics-fp.ll
    llvm/test/CodeGen/AMDGPU/isel-amdgpu-cs-chain-preserve-cc.ll
    llvm/test/CodeGen/AMDGPU/local-atomics-fp.ll
    llvm/test/CodeGen/AMDGPU/vector_shuffle.packed.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index bde1fff4e1ca74..a4c5167ade3766 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -10948,12 +10948,11 @@ SDValue TargetLowering::expandFP_ROUND(SDNode *Node, SelectionDAG &DAG) const {
     Op = expandRoundInexactToOdd(F32, Op, dl, DAG);
     Op = DAG.getNode(ISD::BITCAST, dl, I32, Op);
 
-    // Extract the sign bit.
-    SDValue SignBit =
-        DAG.getNode(ISD::AND, dl, I32, Op,
-                    DAG.getConstant(APInt::getSignMask(32), dl, I32));
+    // Extract the sign bit and exponent.
+    SDValue SignBitAndExponentField = DAG.getNode(
+        ISD::AND, dl, I32, Op, DAG.getConstant(0xff800000, dl, I32));
     // Set the quiet bit.
-    SDValue NaN = DAG.getNode(ISD::OR, dl, I32, SignBit,
+    SDValue NaN = DAG.getNode(ISD::OR, dl, I32, SignBitAndExponentField,
                               DAG.getConstant(0x400000, dl, I32));
 
     // Factor in the contribution of the low 16 bits.

diff  --git a/llvm/test/CodeGen/AMDGPU/bf16.ll b/llvm/test/CodeGen/AMDGPU/bf16.ll
index 67538f26c550bd..63a09e49e00514 100644
--- a/llvm/test/CodeGen/AMDGPU/bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/bf16.ll
@@ -2182,7 +2182,7 @@ define void @test_load_store_f32_to_bf16(ptr addrspace(1) %in, ptr addrspace(1)
 ; GFX8-NEXT:    s_waitcnt vmcnt(0)
 ; GFX8-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
-; GFX8-NEXT:    v_and_b32_e32 v4, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v4, 0xff800000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
 ; GFX8-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -2199,7 +2199,7 @@ define void @test_load_store_f32_to_bf16(ptr addrspace(1) %in, ptr addrspace(1)
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
 ; GFX9-NEXT:    s_waitcnt vmcnt(0)
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v4, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v4, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -2212,7 +2212,7 @@ define void @test_load_store_f32_to_bf16(ptr addrspace(1) %in, ptr addrspace(1)
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    global_load_dword v0, v[0:1], off
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX10-NEXT:    v_and_or_b32 v4, v0, s4, 0x400000
@@ -2226,7 +2226,7 @@ define void @test_load_store_f32_to_bf16(ptr addrspace(1) %in, ptr addrspace(1)
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    global_load_b32 v0, v[0:1], off
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX11-NEXT:    v_and_or_b32 v4, v0, s0, 0x400000
@@ -2294,7 +2294,7 @@ define void @test_load_store_f64_to_bf16(ptr addrspace(1) %in, ptr addrspace(1)
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, v4, v5
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, 0x7fff, v4
 ; GFX8-NEXT:    v_cmp_u_f64_e32 vcc, v[0:1], v[0:1]
-; GFX8-NEXT:    v_and_b32_e32 v5, 0x80000000, v5
+; GFX8-NEXT:    v_and_b32_e32 v5, 0xff800000, v5
 ; GFX8-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v4, v5, vcc
 ; GFX8-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
@@ -2323,7 +2323,7 @@ define void @test_load_store_f64_to_bf16(ptr addrspace(1) %in, ptr addrspace(1)
 ; GFX9-NEXT:    v_and_or_b32 v5, v1, s8, v4
 ; GFX9-NEXT:    v_bfe_u32 v4, v4, 16, 1
 ; GFX9-NEXT:    v_add3_u32 v4, v4, v5, s9
-; GFX9-NEXT:    v_and_b32_e32 v5, 0x80000000, v5
+; GFX9-NEXT:    v_and_b32_e32 v5, 0xff800000, v5
 ; GFX9-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX9-NEXT:    v_cndmask_b32_e32 v0, v4, v5, vcc
 ; GFX9-NEXT:    global_store_short_d16_hi v[2:3], v0, off
@@ -2343,14 +2343,14 @@ define void @test_load_store_f64_to_bf16(ptr addrspace(1) %in, ptr addrspace(1)
 ; GFX10-NEXT:    v_cmp_nlg_f64_e64 s4, |v[0:1]|, v[4:5]
 ; GFX10-NEXT:    v_cndmask_b32_e64 v4, -1, 1, s5
 ; GFX10-NEXT:    s_or_b32 vcc_lo, s4, vcc_lo
-; GFX10-NEXT:    s_mov_b32 s4, 0x400000
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_add_nc_u32_e32 v4, v6, v4
 ; GFX10-NEXT:    v_cndmask_b32_e32 v4, v4, v6, vcc_lo
 ; GFX10-NEXT:    v_cmp_u_f64_e32 vcc_lo, v[0:1], v[0:1]
 ; GFX10-NEXT:    v_and_or_b32 v5, 0x80000000, v1, v4
 ; GFX10-NEXT:    v_bfe_u32 v4, v4, 16, 1
 ; GFX10-NEXT:    v_add3_u32 v4, v4, v5, 0x7fff
-; GFX10-NEXT:    v_and_or_b32 v5, 0x80000000, v5, s4
+; GFX10-NEXT:    v_and_or_b32 v5, v5, s4, 0x400000
 ; GFX10-NEXT:    v_cndmask_b32_e32 v0, v4, v5, vcc_lo
 ; GFX10-NEXT:    global_store_short_d16_hi v[2:3], v0, off
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
@@ -2371,7 +2371,7 @@ define void @test_load_store_f64_to_bf16(ptr addrspace(1) %in, ptr addrspace(1)
 ; GFX11-NEXT:    v_cndmask_b32_e64 v4, -1, 1, s1
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    s_or_b32 vcc_lo, s0, vcc_lo
-; GFX11-NEXT:    s_mov_b32 s0, 0x400000
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    v_add_nc_u32_e32 v4, v6, v4
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
 ; GFX11-NEXT:    v_cndmask_b32_e32 v4, v4, v6, vcc_lo
@@ -2380,7 +2380,7 @@ define void @test_load_store_f64_to_bf16(ptr addrspace(1) %in, ptr addrspace(1)
 ; GFX11-NEXT:    v_bfe_u32 v4, v4, 16, 1
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_add3_u32 v4, v4, v5, 0x7fff
-; GFX11-NEXT:    v_and_or_b32 v5, 0x80000000, v5, s0
+; GFX11-NEXT:    v_and_or_b32 v5, v5, s0, 0x400000
 ; GFX11-NEXT:    v_cndmask_b32_e32 v0, v4, v5, vcc_lo
 ; GFX11-NEXT:    global_store_d16_hi_b16 v[2:3], v0, off
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
@@ -8999,7 +8999,7 @@ define bfloat @v_fadd_bf16(bfloat %a, bfloat %b) {
 ; GFX8-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
@@ -9014,7 +9014,7 @@ define bfloat @v_fadd_bf16(bfloat %a, bfloat %b) {
 ; GFX9-NEXT:    v_add_f32_e32 v0, v0, v1
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -9027,7 +9027,7 @@ define bfloat @v_fadd_bf16(bfloat %a, bfloat %b) {
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_add_f32_e32 v0, v0, v1
 ; GFX10-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX10-NEXT:    v_and_or_b32 v2, v0, s4, 0x400000
@@ -9042,7 +9042,7 @@ define bfloat @v_fadd_bf16(bfloat %a, bfloat %b) {
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_add_f32_e32 v0, v0, v1
 ; GFX11-NEXT:    v_bfe_u32 v1, v0, 16, 1
@@ -9104,7 +9104,7 @@ define <2 x bfloat> @v_fadd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT:    v_and_b32_e32 v4, 0x80000000, v2
+; GFX8-NEXT:    v_and_b32_e32 v4, 0xff800000, v2
 ; GFX8-NEXT:    v_add_f32_e32 v0, v0, v1
 ; GFX8-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
@@ -9112,7 +9112,7 @@ define <2 x bfloat> @v_fadd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v2, v3, v4, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT:    v_and_b32_e32 v3, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v3, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v3, vcc
@@ -9128,7 +9128,7 @@ define <2 x bfloat> @v_fadd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; GFX9-NEXT:    v_add_f32_e32 v2, v3, v2
 ; GFX9-NEXT:    v_bfe_u32 v3, v2, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v4, 0x80000000, v2
+; GFX9-NEXT:    v_and_b32_e32 v4, 0xff800000, v2
 ; GFX9-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX9-NEXT:    v_add3_u32 v3, v3, v2, s4
@@ -9137,7 +9137,7 @@ define <2 x bfloat> @v_fadd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; GFX9-NEXT:    v_add_f32_e32 v0, v0, v1
 ; GFX9-NEXT:    v_cndmask_b32_e32 v2, v3, v4, vcc
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v3, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v3, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -9153,7 +9153,7 @@ define <2 x bfloat> @v_fadd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v3, 16, v0
 ; GFX10-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX10-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_add_f32_e32 v2, v3, v2
 ; GFX10-NEXT:    v_add_f32_e32 v0, v0, v1
 ; GFX10-NEXT:    v_bfe_u32 v1, v2, 16, 1
@@ -9176,7 +9176,7 @@ define <2 x bfloat> @v_fadd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; GFX11-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v3, 16, v0
 ; GFX11-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-NEXT:    v_add_f32_e32 v0, v0, v1
 ; GFX11-NEXT:    v_add_f32_e32 v2, v3, v2
@@ -9255,7 +9255,7 @@ define <3 x bfloat> @v_fadd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
 ; GFX8-NEXT:    v_bfe_u32 v3, v1, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v3, v1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT:    v_and_b32_e32 v4, 0x80000000, v1
+; GFX8-NEXT:    v_and_b32_e32 v4, 0xff800000, v1
 ; GFX8-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v1, v3, v4, vcc
@@ -9268,7 +9268,7 @@ define <3 x bfloat> @v_fadd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, s4, v4
-; GFX8-NEXT:    v_and_b32_e32 v5, 0x80000000, v3
+; GFX8-NEXT:    v_and_b32_e32 v5, 0xff800000, v3
 ; GFX8-NEXT:    v_add_f32_e32 v0, v0, v2
 ; GFX8-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
@@ -9276,7 +9276,7 @@ define <3 x bfloat> @v_fadd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v4, v5, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v2, v0
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT:    v_and_b32_e32 v4, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v4, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v2, v4, vcc
@@ -9293,7 +9293,7 @@ define <3 x bfloat> @v_fadd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
 ; GFX9-NEXT:    v_add_f32_e32 v1, v1, v3
 ; GFX9-NEXT:    v_bfe_u32 v3, v1, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v4, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v4, 0xff800000, v1
 ; GFX9-NEXT:    v_add3_u32 v3, v3, v1, s4
 ; GFX9-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -9302,7 +9302,7 @@ define <3 x bfloat> @v_fadd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v4, 16, v0
 ; GFX9-NEXT:    v_add_f32_e32 v3, v4, v3
 ; GFX9-NEXT:    v_bfe_u32 v4, v3, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v5, 0x80000000, v3
+; GFX9-NEXT:    v_and_b32_e32 v5, 0xff800000, v3
 ; GFX9-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX9-NEXT:    v_add3_u32 v4, v4, v3, s4
@@ -9311,7 +9311,7 @@ define <3 x bfloat> @v_fadd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
 ; GFX9-NEXT:    v_add_f32_e32 v0, v0, v2
 ; GFX9-NEXT:    v_cndmask_b32_e32 v3, v4, v5, vcc
 ; GFX9-NEXT:    v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v4, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v4, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v2, v2, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -9331,7 +9331,7 @@ define <3 x bfloat> @v_fadd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GFX10-NEXT:    v_add_f32_e32 v4, v5, v4
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_add_f32_e32 v0, v0, v2
 ; GFX10-NEXT:    v_add_f32_e32 v1, v1, v3
 ; GFX10-NEXT:    v_bfe_u32 v2, v4, 16, 1
@@ -9426,7 +9426,7 @@ define <4 x bfloat> @v_fadd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, 0x7fff, v5
-; GFX8-NEXT:    v_and_b32_e32 v6, 0x80000000, v4
+; GFX8-NEXT:    v_and_b32_e32 v6, 0xff800000, v4
 ; GFX8-NEXT:    v_add_f32_e32 v1, v1, v3
 ; GFX8-NEXT:    v_or_b32_e32 v6, 0x400000, v6
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v4, v4
@@ -9435,7 +9435,7 @@ define <4 x bfloat> @v_fadd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v4, v5, v6, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v3, v1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, s4, v3
-; GFX8-NEXT:    v_and_b32_e32 v5, 0x80000000, v1
+; GFX8-NEXT:    v_and_b32_e32 v5, 0xff800000, v1
 ; GFX8-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v1, v3, v5, vcc
@@ -9447,7 +9447,7 @@ define <4 x bfloat> @v_fadd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT:    v_and_b32_e32 v6, 0x80000000, v3
+; GFX8-NEXT:    v_and_b32_e32 v6, 0xff800000, v3
 ; GFX8-NEXT:    v_add_f32_e32 v0, v0, v2
 ; GFX8-NEXT:    v_or_b32_e32 v6, 0x400000, v6
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
@@ -9455,7 +9455,7 @@ define <4 x bfloat> @v_fadd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v5, v6, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v2, v0
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT:    v_and_b32_e32 v5, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v5, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v2, v5, vcc
@@ -9473,7 +9473,7 @@ define <4 x bfloat> @v_fadd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX9-NEXT:    v_add_f32_e32 v4, v5, v4
 ; GFX9-NEXT:    v_bfe_u32 v5, v4, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v6, 0x80000000, v4
+; GFX9-NEXT:    v_and_b32_e32 v6, 0xff800000, v4
 ; GFX9-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX9-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX9-NEXT:    v_add3_u32 v5, v5, v4, s4
@@ -9482,7 +9482,7 @@ define <4 x bfloat> @v_fadd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX9-NEXT:    v_add_f32_e32 v1, v1, v3
 ; GFX9-NEXT:    v_cndmask_b32_e32 v4, v5, v6, vcc
 ; GFX9-NEXT:    v_bfe_u32 v3, v1, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v5, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v5, 0xff800000, v1
 ; GFX9-NEXT:    v_add3_u32 v3, v3, v1, s4
 ; GFX9-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -9491,7 +9491,7 @@ define <4 x bfloat> @v_fadd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v5, 16, v0
 ; GFX9-NEXT:    v_add_f32_e32 v3, v5, v3
 ; GFX9-NEXT:    v_bfe_u32 v5, v3, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v6, 0x80000000, v3
+; GFX9-NEXT:    v_and_b32_e32 v6, 0xff800000, v3
 ; GFX9-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX9-NEXT:    v_add3_u32 v5, v5, v3, s4
@@ -9500,7 +9500,7 @@ define <4 x bfloat> @v_fadd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX9-NEXT:    v_add_f32_e32 v0, v0, v2
 ; GFX9-NEXT:    v_cndmask_b32_e32 v3, v5, v6, vcc
 ; GFX9-NEXT:    v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v5, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v5, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v2, v2, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -9525,7 +9525,7 @@ define <4 x bfloat> @v_fadd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX10-NEXT:    v_add_f32_e32 v1, v1, v3
 ; GFX10-NEXT:    v_add_f32_e32 v5, v7, v6
 ; GFX10-NEXT:    v_bfe_u32 v3, v4, 16, 1
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_add_f32_e32 v0, v0, v2
 ; GFX10-NEXT:    v_and_or_b32 v6, v4, s4, 0x400000
 ; GFX10-NEXT:    v_bfe_u32 v7, v5, 16, 1
@@ -9560,7 +9560,7 @@ define <4 x bfloat> @v_fadd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX11-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v5, 16, v1
 ; GFX11-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-NEXT:    v_dual_add_f32 v0, v0, v2 :: v_dual_and_b32 v3, 0xffff0000, v3
 ; GFX11-NEXT:    v_add_f32_e32 v4, v5, v4
@@ -9717,7 +9717,7 @@ define <8 x bfloat> @v_fadd_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
 ; GFX8-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX8-NEXT:    v_add_u32_e32 v9, vcc, 0x7fff, v9
-; GFX8-NEXT:    v_and_b32_e32 v10, 0x80000000, v8
+; GFX8-NEXT:    v_and_b32_e32 v10, 0xff800000, v8
 ; GFX8-NEXT:    v_add_f32_e32 v3, v3, v7
 ; GFX8-NEXT:    v_or_b32_e32 v10, 0x400000, v10
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v8, v8
@@ -9726,7 +9726,7 @@ define <8 x bfloat> @v_fadd_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v8, v9, v10, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v7, vcc, v7, v3
 ; GFX8-NEXT:    v_add_u32_e32 v7, vcc, s4, v7
-; GFX8-NEXT:    v_and_b32_e32 v9, 0x80000000, v3
+; GFX8-NEXT:    v_and_b32_e32 v9, 0xff800000, v3
 ; GFX8-NEXT:    v_or_b32_e32 v9, 0x400000, v9
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v7, v9, vcc
@@ -9738,7 +9738,7 @@ define <8 x bfloat> @v_fadd_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
 ; GFX8-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX8-NEXT:    v_add_u32_e32 v9, vcc, s4, v9
-; GFX8-NEXT:    v_and_b32_e32 v10, 0x80000000, v7
+; GFX8-NEXT:    v_and_b32_e32 v10, 0xff800000, v7
 ; GFX8-NEXT:    v_add_f32_e32 v2, v2, v6
 ; GFX8-NEXT:    v_or_b32_e32 v10, 0x400000, v10
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v7, v7
@@ -9746,7 +9746,7 @@ define <8 x bfloat> @v_fadd_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v7, v9, v10, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v6, vcc, v6, v2
 ; GFX8-NEXT:    v_add_u32_e32 v6, vcc, s4, v6
-; GFX8-NEXT:    v_and_b32_e32 v9, 0x80000000, v2
+; GFX8-NEXT:    v_and_b32_e32 v9, 0xff800000, v2
 ; GFX8-NEXT:    v_or_b32_e32 v9, 0x400000, v9
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
 ; GFX8-NEXT:    v_cndmask_b32_e32 v2, v6, v9, vcc
@@ -9758,7 +9758,7 @@ define <8 x bfloat> @v_fadd_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
 ; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX8-NEXT:    v_add_u32_e32 v9, vcc, s4, v9
-; GFX8-NEXT:    v_and_b32_e32 v10, 0x80000000, v6
+; GFX8-NEXT:    v_and_b32_e32 v10, 0xff800000, v6
 ; GFX8-NEXT:    v_add_f32_e32 v1, v1, v5
 ; GFX8-NEXT:    v_or_b32_e32 v10, 0x400000, v10
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v6, v6
@@ -9766,7 +9766,7 @@ define <8 x bfloat> @v_fadd_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v6, v9, v10, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, v5, v1
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT:    v_and_b32_e32 v9, 0x80000000, v1
+; GFX8-NEXT:    v_and_b32_e32 v9, 0xff800000, v1
 ; GFX8-NEXT:    v_or_b32_e32 v9, 0x400000, v9
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v1, v5, v9, vcc
@@ -9778,7 +9778,7 @@ define <8 x bfloat> @v_fadd_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v9, vcc, s4, v9
-; GFX8-NEXT:    v_and_b32_e32 v10, 0x80000000, v5
+; GFX8-NEXT:    v_and_b32_e32 v10, 0xff800000, v5
 ; GFX8-NEXT:    v_add_f32_e32 v0, v0, v4
 ; GFX8-NEXT:    v_or_b32_e32 v10, 0x400000, v10
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v5, v5
@@ -9786,7 +9786,7 @@ define <8 x bfloat> @v_fadd_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v5, v9, v10, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, v4, v0
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, 0x7fff, v4
-; GFX8-NEXT:    v_and_b32_e32 v9, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v9, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v9, 0x400000, v9
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v4, v9, vcc
@@ -9808,7 +9808,7 @@ define <8 x bfloat> @v_fadd_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX9-NEXT:    v_add_f32_e32 v8, v9, v8
 ; GFX9-NEXT:    v_bfe_u32 v9, v8, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v10, 0x80000000, v8
+; GFX9-NEXT:    v_and_b32_e32 v10, 0xff800000, v8
 ; GFX9-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
 ; GFX9-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX9-NEXT:    v_add3_u32 v9, v9, v8, s4
@@ -9817,7 +9817,7 @@ define <8 x bfloat> @v_fadd_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX9-NEXT:    v_add_f32_e32 v3, v3, v7
 ; GFX9-NEXT:    v_cndmask_b32_e32 v8, v9, v10, vcc
 ; GFX9-NEXT:    v_bfe_u32 v7, v3, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v9, 0x80000000, v3
+; GFX9-NEXT:    v_and_b32_e32 v9, 0xff800000, v3
 ; GFX9-NEXT:    v_add3_u32 v7, v7, v3, s4
 ; GFX9-NEXT:    v_or_b32_e32 v9, 0x400000, v9
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
@@ -9826,7 +9826,7 @@ define <8 x bfloat> @v_fadd_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v9, 16, v2
 ; GFX9-NEXT:    v_add_f32_e32 v7, v9, v7
 ; GFX9-NEXT:    v_bfe_u32 v9, v7, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v10, 0x80000000, v7
+; GFX9-NEXT:    v_and_b32_e32 v10, 0xff800000, v7
 ; GFX9-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
 ; GFX9-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX9-NEXT:    v_add3_u32 v9, v9, v7, s4
@@ -9835,7 +9835,7 @@ define <8 x bfloat> @v_fadd_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX9-NEXT:    v_add_f32_e32 v2, v2, v6
 ; GFX9-NEXT:    v_cndmask_b32_e32 v7, v9, v10, vcc
 ; GFX9-NEXT:    v_bfe_u32 v6, v2, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v9, 0x80000000, v2
+; GFX9-NEXT:    v_and_b32_e32 v9, 0xff800000, v2
 ; GFX9-NEXT:    v_add3_u32 v6, v6, v2, s4
 ; GFX9-NEXT:    v_or_b32_e32 v9, 0x400000, v9
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
@@ -9844,7 +9844,7 @@ define <8 x bfloat> @v_fadd_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v9, 16, v1
 ; GFX9-NEXT:    v_add_f32_e32 v6, v9, v6
 ; GFX9-NEXT:    v_bfe_u32 v9, v6, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v10, 0x80000000, v6
+; GFX9-NEXT:    v_and_b32_e32 v10, 0xff800000, v6
 ; GFX9-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
 ; GFX9-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX9-NEXT:    v_add3_u32 v9, v9, v6, s4
@@ -9853,7 +9853,7 @@ define <8 x bfloat> @v_fadd_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX9-NEXT:    v_add_f32_e32 v1, v1, v5
 ; GFX9-NEXT:    v_cndmask_b32_e32 v6, v9, v10, vcc
 ; GFX9-NEXT:    v_bfe_u32 v5, v1, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v9, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v9, 0xff800000, v1
 ; GFX9-NEXT:    v_add3_u32 v5, v5, v1, s4
 ; GFX9-NEXT:    v_or_b32_e32 v9, 0x400000, v9
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -9862,7 +9862,7 @@ define <8 x bfloat> @v_fadd_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v9, 16, v0
 ; GFX9-NEXT:    v_add_f32_e32 v5, v9, v5
 ; GFX9-NEXT:    v_bfe_u32 v9, v5, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v10, 0x80000000, v5
+; GFX9-NEXT:    v_and_b32_e32 v10, 0xff800000, v5
 ; GFX9-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
 ; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX9-NEXT:    v_add3_u32 v9, v9, v5, s4
@@ -9871,7 +9871,7 @@ define <8 x bfloat> @v_fadd_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX9-NEXT:    v_add_f32_e32 v0, v0, v4
 ; GFX9-NEXT:    v_cndmask_b32_e32 v5, v9, v10, vcc
 ; GFX9-NEXT:    v_bfe_u32 v4, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v9, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v9, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v4, v4, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v9, 0x400000, v9
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -9896,7 +9896,7 @@ define <8 x bfloat> @v_fadd_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v9, 16, v6
 ; GFX10-NEXT:    v_add_f32_e32 v3, v3, v7
 ; GFX10-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_bfe_u32 v10, v8, 16, 1
 ; GFX10-NEXT:    v_and_or_b32 v7, v8, s4, 0x400000
 ; GFX10-NEXT:    v_add_f32_e32 v9, v11, v9
@@ -9967,7 +9967,7 @@ define <8 x bfloat> @v_fadd_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX11-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v9, 16, v3
 ; GFX11-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_dual_add_f32 v8, v9, v8 :: v_dual_lshlrev_b32 v9, 16, v6
 ; GFX11-NEXT:    v_bfe_u32 v10, v8, 16, 1
@@ -10263,7 +10263,7 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v15, 0xffff0000, v15
 ; GFX8-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
 ; GFX8-NEXT:    v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT:    v_and_b32_e32 v18, 0x80000000, v16
+; GFX8-NEXT:    v_and_b32_e32 v18, 0xff800000, v16
 ; GFX8-NEXT:    v_add_f32_e32 v7, v7, v15
 ; GFX8-NEXT:    v_or_b32_e32 v18, 0x400000, v18
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v16, v16
@@ -10271,7 +10271,7 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v16, v17, v18, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v15, vcc, v15, v7
 ; GFX8-NEXT:    v_add_u32_e32 v15, vcc, s4, v15
-; GFX8-NEXT:    v_and_b32_e32 v17, 0x80000000, v7
+; GFX8-NEXT:    v_and_b32_e32 v17, 0xff800000, v7
 ; GFX8-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v7, v7
 ; GFX8-NEXT:    v_cndmask_b32_e32 v7, v15, v17, vcc
@@ -10283,7 +10283,7 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v14, 0xffff0000, v14
 ; GFX8-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
 ; GFX8-NEXT:    v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT:    v_and_b32_e32 v18, 0x80000000, v15
+; GFX8-NEXT:    v_and_b32_e32 v18, 0xff800000, v15
 ; GFX8-NEXT:    v_add_f32_e32 v6, v6, v14
 ; GFX8-NEXT:    v_or_b32_e32 v18, 0x400000, v18
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v15, v15
@@ -10291,7 +10291,7 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v15, v17, v18, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v14, vcc, v14, v6
 ; GFX8-NEXT:    v_add_u32_e32 v14, vcc, s4, v14
-; GFX8-NEXT:    v_and_b32_e32 v17, 0x80000000, v6
+; GFX8-NEXT:    v_and_b32_e32 v17, 0xff800000, v6
 ; GFX8-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v6, v6
 ; GFX8-NEXT:    v_cndmask_b32_e32 v6, v14, v17, vcc
@@ -10303,7 +10303,7 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v13, 0xffff0000, v13
 ; GFX8-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
 ; GFX8-NEXT:    v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT:    v_and_b32_e32 v18, 0x80000000, v14
+; GFX8-NEXT:    v_and_b32_e32 v18, 0xff800000, v14
 ; GFX8-NEXT:    v_add_f32_e32 v5, v5, v13
 ; GFX8-NEXT:    v_or_b32_e32 v18, 0x400000, v18
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v14, v14
@@ -10311,7 +10311,7 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v14, v17, v18, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v13, vcc, v13, v5
 ; GFX8-NEXT:    v_add_u32_e32 v13, vcc, s4, v13
-; GFX8-NEXT:    v_and_b32_e32 v17, 0x80000000, v5
+; GFX8-NEXT:    v_and_b32_e32 v17, 0xff800000, v5
 ; GFX8-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v5, v5
 ; GFX8-NEXT:    v_cndmask_b32_e32 v5, v13, v17, vcc
@@ -10323,7 +10323,7 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v12, 0xffff0000, v12
 ; GFX8-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
 ; GFX8-NEXT:    v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT:    v_and_b32_e32 v18, 0x80000000, v13
+; GFX8-NEXT:    v_and_b32_e32 v18, 0xff800000, v13
 ; GFX8-NEXT:    v_add_f32_e32 v4, v4, v12
 ; GFX8-NEXT:    v_or_b32_e32 v18, 0x400000, v18
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v13, v13
@@ -10331,7 +10331,7 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v13, v17, v18, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v12, vcc, v12, v4
 ; GFX8-NEXT:    v_add_u32_e32 v12, vcc, s4, v12
-; GFX8-NEXT:    v_and_b32_e32 v17, 0x80000000, v4
+; GFX8-NEXT:    v_and_b32_e32 v17, 0xff800000, v4
 ; GFX8-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v4, v4
 ; GFX8-NEXT:    v_cndmask_b32_e32 v4, v12, v17, vcc
@@ -10343,7 +10343,7 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v11, 0xffff0000, v11
 ; GFX8-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX8-NEXT:    v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT:    v_and_b32_e32 v18, 0x80000000, v12
+; GFX8-NEXT:    v_and_b32_e32 v18, 0xff800000, v12
 ; GFX8-NEXT:    v_add_f32_e32 v3, v3, v11
 ; GFX8-NEXT:    v_or_b32_e32 v18, 0x400000, v18
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v12, v12
@@ -10351,7 +10351,7 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v12, v17, v18, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v11, vcc, v11, v3
 ; GFX8-NEXT:    v_add_u32_e32 v11, vcc, s4, v11
-; GFX8-NEXT:    v_and_b32_e32 v17, 0x80000000, v3
+; GFX8-NEXT:    v_and_b32_e32 v17, 0xff800000, v3
 ; GFX8-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v11, v17, vcc
@@ -10363,7 +10363,7 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v10, 0xffff0000, v10
 ; GFX8-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX8-NEXT:    v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT:    v_and_b32_e32 v18, 0x80000000, v11
+; GFX8-NEXT:    v_and_b32_e32 v18, 0xff800000, v11
 ; GFX8-NEXT:    v_add_f32_e32 v2, v2, v10
 ; GFX8-NEXT:    v_or_b32_e32 v18, 0x400000, v18
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v11, v11
@@ -10371,7 +10371,7 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v11, v17, v18, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v10, vcc, v10, v2
 ; GFX8-NEXT:    v_add_u32_e32 v10, vcc, s4, v10
-; GFX8-NEXT:    v_and_b32_e32 v17, 0x80000000, v2
+; GFX8-NEXT:    v_and_b32_e32 v17, 0xff800000, v2
 ; GFX8-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
 ; GFX8-NEXT:    v_cndmask_b32_e32 v2, v10, v17, vcc
@@ -10383,7 +10383,7 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v9, 0xffff0000, v9
 ; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX8-NEXT:    v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT:    v_and_b32_e32 v18, 0x80000000, v10
+; GFX8-NEXT:    v_and_b32_e32 v18, 0xff800000, v10
 ; GFX8-NEXT:    v_add_f32_e32 v1, v1, v9
 ; GFX8-NEXT:    v_or_b32_e32 v18, 0x400000, v18
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v10, v10
@@ -10391,7 +10391,7 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v10, v17, v18, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v9, vcc, v9, v1
 ; GFX8-NEXT:    v_add_u32_e32 v9, vcc, s4, v9
-; GFX8-NEXT:    v_and_b32_e32 v17, 0x80000000, v1
+; GFX8-NEXT:    v_and_b32_e32 v17, 0xff800000, v1
 ; GFX8-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v1, v9, v17, vcc
@@ -10403,7 +10403,7 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v8, 0xffff0000, v8
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT:    v_and_b32_e32 v18, 0x80000000, v9
+; GFX8-NEXT:    v_and_b32_e32 v18, 0xff800000, v9
 ; GFX8-NEXT:    v_add_f32_e32 v0, v0, v8
 ; GFX8-NEXT:    v_or_b32_e32 v18, 0x400000, v18
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v9, v9
@@ -10411,7 +10411,7 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v9, v17, v18, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v8, vcc, v8, v0
 ; GFX8-NEXT:    v_add_u32_e32 v8, vcc, s4, v8
-; GFX8-NEXT:    v_and_b32_e32 v17, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v17, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v8, v17, vcc
@@ -10441,7 +10441,7 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_add_f32_e32 v16, v17, v16
 ; GFX9-NEXT:    v_bfe_u32 v17, v16, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v18, 0x80000000, v16
+; GFX9-NEXT:    v_and_b32_e32 v18, 0xff800000, v16
 ; GFX9-NEXT:    v_and_b32_e32 v15, 0xffff0000, v15
 ; GFX9-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
 ; GFX9-NEXT:    v_add3_u32 v17, v17, v16, s4
@@ -10450,7 +10450,7 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_add_f32_e32 v7, v7, v15
 ; GFX9-NEXT:    v_cndmask_b32_e32 v16, v17, v18, vcc
 ; GFX9-NEXT:    v_bfe_u32 v15, v7, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v17, 0x80000000, v7
+; GFX9-NEXT:    v_and_b32_e32 v17, 0xff800000, v7
 ; GFX9-NEXT:    v_add3_u32 v15, v15, v7, s4
 ; GFX9-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v7, v7
@@ -10459,7 +10459,7 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v17, 16, v6
 ; GFX9-NEXT:    v_add_f32_e32 v15, v17, v15
 ; GFX9-NEXT:    v_bfe_u32 v17, v15, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v18, 0x80000000, v15
+; GFX9-NEXT:    v_and_b32_e32 v18, 0xff800000, v15
 ; GFX9-NEXT:    v_and_b32_e32 v14, 0xffff0000, v14
 ; GFX9-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
 ; GFX9-NEXT:    v_add3_u32 v17, v17, v15, s4
@@ -10468,7 +10468,7 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_add_f32_e32 v6, v6, v14
 ; GFX9-NEXT:    v_cndmask_b32_e32 v15, v17, v18, vcc
 ; GFX9-NEXT:    v_bfe_u32 v14, v6, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v17, 0x80000000, v6
+; GFX9-NEXT:    v_and_b32_e32 v17, 0xff800000, v6
 ; GFX9-NEXT:    v_add3_u32 v14, v14, v6, s4
 ; GFX9-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v6, v6
@@ -10477,7 +10477,7 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v17, 16, v5
 ; GFX9-NEXT:    v_add_f32_e32 v14, v17, v14
 ; GFX9-NEXT:    v_bfe_u32 v17, v14, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v18, 0x80000000, v14
+; GFX9-NEXT:    v_and_b32_e32 v18, 0xff800000, v14
 ; GFX9-NEXT:    v_and_b32_e32 v13, 0xffff0000, v13
 ; GFX9-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
 ; GFX9-NEXT:    v_add3_u32 v17, v17, v14, s4
@@ -10486,7 +10486,7 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_add_f32_e32 v5, v5, v13
 ; GFX9-NEXT:    v_cndmask_b32_e32 v14, v17, v18, vcc
 ; GFX9-NEXT:    v_bfe_u32 v13, v5, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v17, 0x80000000, v5
+; GFX9-NEXT:    v_and_b32_e32 v17, 0xff800000, v5
 ; GFX9-NEXT:    v_add3_u32 v13, v13, v5, s4
 ; GFX9-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v5, v5
@@ -10495,7 +10495,7 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v17, 16, v4
 ; GFX9-NEXT:    v_add_f32_e32 v13, v17, v13
 ; GFX9-NEXT:    v_bfe_u32 v17, v13, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v18, 0x80000000, v13
+; GFX9-NEXT:    v_and_b32_e32 v18, 0xff800000, v13
 ; GFX9-NEXT:    v_and_b32_e32 v12, 0xffff0000, v12
 ; GFX9-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
 ; GFX9-NEXT:    v_add3_u32 v17, v17, v13, s4
@@ -10504,7 +10504,7 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_add_f32_e32 v4, v4, v12
 ; GFX9-NEXT:    v_cndmask_b32_e32 v13, v17, v18, vcc
 ; GFX9-NEXT:    v_bfe_u32 v12, v4, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v17, 0x80000000, v4
+; GFX9-NEXT:    v_and_b32_e32 v17, 0xff800000, v4
 ; GFX9-NEXT:    v_add3_u32 v12, v12, v4, s4
 ; GFX9-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v4, v4
@@ -10513,7 +10513,7 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v17, 16, v3
 ; GFX9-NEXT:    v_add_f32_e32 v12, v17, v12
 ; GFX9-NEXT:    v_bfe_u32 v17, v12, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v18, 0x80000000, v12
+; GFX9-NEXT:    v_and_b32_e32 v18, 0xff800000, v12
 ; GFX9-NEXT:    v_and_b32_e32 v11, 0xffff0000, v11
 ; GFX9-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX9-NEXT:    v_add3_u32 v17, v17, v12, s4
@@ -10522,7 +10522,7 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_add_f32_e32 v3, v3, v11
 ; GFX9-NEXT:    v_cndmask_b32_e32 v12, v17, v18, vcc
 ; GFX9-NEXT:    v_bfe_u32 v11, v3, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v17, 0x80000000, v3
+; GFX9-NEXT:    v_and_b32_e32 v17, 0xff800000, v3
 ; GFX9-NEXT:    v_add3_u32 v11, v11, v3, s4
 ; GFX9-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
@@ -10531,7 +10531,7 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v17, 16, v2
 ; GFX9-NEXT:    v_add_f32_e32 v11, v17, v11
 ; GFX9-NEXT:    v_bfe_u32 v17, v11, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v18, 0x80000000, v11
+; GFX9-NEXT:    v_and_b32_e32 v18, 0xff800000, v11
 ; GFX9-NEXT:    v_and_b32_e32 v10, 0xffff0000, v10
 ; GFX9-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX9-NEXT:    v_add3_u32 v17, v17, v11, s4
@@ -10540,7 +10540,7 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_add_f32_e32 v2, v2, v10
 ; GFX9-NEXT:    v_cndmask_b32_e32 v11, v17, v18, vcc
 ; GFX9-NEXT:    v_bfe_u32 v10, v2, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v17, 0x80000000, v2
+; GFX9-NEXT:    v_and_b32_e32 v17, 0xff800000, v2
 ; GFX9-NEXT:    v_add3_u32 v10, v10, v2, s4
 ; GFX9-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
@@ -10549,7 +10549,7 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v17, 16, v1
 ; GFX9-NEXT:    v_add_f32_e32 v10, v17, v10
 ; GFX9-NEXT:    v_bfe_u32 v17, v10, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v18, 0x80000000, v10
+; GFX9-NEXT:    v_and_b32_e32 v18, 0xff800000, v10
 ; GFX9-NEXT:    v_and_b32_e32 v9, 0xffff0000, v9
 ; GFX9-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX9-NEXT:    v_add3_u32 v17, v17, v10, s4
@@ -10558,7 +10558,7 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_add_f32_e32 v1, v1, v9
 ; GFX9-NEXT:    v_cndmask_b32_e32 v10, v17, v18, vcc
 ; GFX9-NEXT:    v_bfe_u32 v9, v1, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v17, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v17, 0xff800000, v1
 ; GFX9-NEXT:    v_add3_u32 v9, v9, v1, s4
 ; GFX9-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -10567,7 +10567,7 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v17, 16, v0
 ; GFX9-NEXT:    v_add_f32_e32 v9, v17, v9
 ; GFX9-NEXT:    v_bfe_u32 v17, v9, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v18, 0x80000000, v9
+; GFX9-NEXT:    v_and_b32_e32 v18, 0xff800000, v9
 ; GFX9-NEXT:    v_and_b32_e32 v8, 0xffff0000, v8
 ; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX9-NEXT:    v_add3_u32 v17, v17, v9, s4
@@ -10576,7 +10576,7 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_add_f32_e32 v0, v0, v8
 ; GFX9-NEXT:    v_cndmask_b32_e32 v9, v17, v18, vcc
 ; GFX9-NEXT:    v_bfe_u32 v8, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v17, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v17, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v8, v8, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -10599,7 +10599,7 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v17, 16, v7
 ; GFX10-NEXT:    v_and_b32_e32 v15, 0xffff0000, v15
 ; GFX10-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v18, 16, v6
 ; GFX10-NEXT:    v_add_f32_e32 v16, v17, v16
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v17, 16, v14
@@ -10742,7 +10742,7 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX11-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v17, 16, v7
 ; GFX11-NEXT:    v_and_b32_e32 v15, 0xffff0000, v15
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
 ; GFX11-NEXT:    v_dual_add_f32 v16, v17, v16 :: v_dual_lshlrev_b32 v17, 16, v14
 ; GFX11-NEXT:    v_and_b32_e32 v14, 0xffff0000, v14
@@ -11434,7 +11434,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v30, 0xffff0000, v30
 ; GFX8-NEXT:    v_and_b32_e32 v14, 0xffff0000, v14
 ; GFX8-NEXT:    v_add_u32_e32 v32, vcc, s4, v32
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v31
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v31
 ; GFX8-NEXT:    v_add_f32_e32 v14, v14, v30
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v31, v31
@@ -11442,7 +11442,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v31, v32, v33, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v30, vcc, v30, v14
 ; GFX8-NEXT:    v_add_u32_e32 v30, vcc, s4, v30
-; GFX8-NEXT:    v_and_b32_e32 v32, 0x80000000, v14
+; GFX8-NEXT:    v_and_b32_e32 v32, 0xff800000, v14
 ; GFX8-NEXT:    v_or_b32_e32 v32, 0x400000, v32
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v14, v14
 ; GFX8-NEXT:    v_cndmask_b32_e32 v14, v30, v32, vcc
@@ -11465,14 +11465,14 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_add_f32_e32 v30, v15, v30
 ; GFX8-NEXT:    v_bfe_u32 v15, v33, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v15, vcc, v15, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v33
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v33
 ; GFX8-NEXT:    v_add_u32_e32 v15, vcc, s4, v15
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v33, v33
 ; GFX8-NEXT:    v_bfe_u32 v33, v30, 16, 1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v15, v15, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, v33, v30
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v30
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v30
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v30, v30
@@ -11480,13 +11480,13 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_bfe_u32 v33, v32, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, v33, v32
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v32
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v32
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v32, v32
 ; GFX8-NEXT:    v_cndmask_b32_e32 v32, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v29, vcc, v29, v13
 ; GFX8-NEXT:    v_add_u32_e32 v29, vcc, s4, v29
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v13
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v13
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v13, v13
 ; GFX8-NEXT:    v_cndmask_b32_e32 v13, v29, v33, vcc
@@ -11498,7 +11498,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v28, 0xffff0000, v28
 ; GFX8-NEXT:    v_and_b32_e32 v12, 0xffff0000, v12
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v29
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v29
 ; GFX8-NEXT:    v_add_f32_e32 v12, v12, v28
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v29, v29
@@ -11506,7 +11506,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v29, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v28, vcc, v28, v12
 ; GFX8-NEXT:    v_add_u32_e32 v28, vcc, s4, v28
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v12
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v12
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v12, v12
 ; GFX8-NEXT:    v_cndmask_b32_e32 v12, v28, v33, vcc
@@ -11518,7 +11518,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v27, 0xffff0000, v27
 ; GFX8-NEXT:    v_and_b32_e32 v11, 0xffff0000, v11
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v28
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v28
 ; GFX8-NEXT:    v_add_f32_e32 v11, v11, v27
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v28, v28
@@ -11526,7 +11526,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v28, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v27, vcc, v27, v11
 ; GFX8-NEXT:    v_add_u32_e32 v27, vcc, s4, v27
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v11
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v11
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v11, v11
 ; GFX8-NEXT:    v_cndmask_b32_e32 v11, v27, v33, vcc
@@ -11538,7 +11538,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v26, 0xffff0000, v26
 ; GFX8-NEXT:    v_and_b32_e32 v10, 0xffff0000, v10
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v27
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v27
 ; GFX8-NEXT:    v_add_f32_e32 v10, v10, v26
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v27, v27
@@ -11546,7 +11546,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v27, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v26, vcc, v26, v10
 ; GFX8-NEXT:    v_add_u32_e32 v26, vcc, s4, v26
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v10
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v10
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v10, v10
 ; GFX8-NEXT:    v_cndmask_b32_e32 v10, v26, v33, vcc
@@ -11558,7 +11558,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v25, 0xffff0000, v25
 ; GFX8-NEXT:    v_and_b32_e32 v9, 0xffff0000, v9
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v26
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v26
 ; GFX8-NEXT:    v_add_f32_e32 v9, v9, v25
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v26, v26
@@ -11566,7 +11566,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v26, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v25, vcc, v25, v9
 ; GFX8-NEXT:    v_add_u32_e32 v25, vcc, s4, v25
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v9
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v9
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v9, v9
 ; GFX8-NEXT:    v_cndmask_b32_e32 v9, v25, v33, vcc
@@ -11578,7 +11578,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v24, 0xffff0000, v24
 ; GFX8-NEXT:    v_and_b32_e32 v8, 0xffff0000, v8
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v25
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v25
 ; GFX8-NEXT:    v_add_f32_e32 v8, v8, v24
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v25, v25
@@ -11586,7 +11586,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v25, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v24, vcc, v24, v8
 ; GFX8-NEXT:    v_add_u32_e32 v24, vcc, s4, v24
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v8
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v8
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v8, v8
 ; GFX8-NEXT:    v_cndmask_b32_e32 v8, v24, v33, vcc
@@ -11598,7 +11598,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v23, 0xffff0000, v23
 ; GFX8-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v24
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v24
 ; GFX8-NEXT:    v_add_f32_e32 v7, v7, v23
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v24, v24
@@ -11606,7 +11606,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v24, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v23, vcc, v23, v7
 ; GFX8-NEXT:    v_add_u32_e32 v23, vcc, s4, v23
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v7
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v7
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v7, v7
 ; GFX8-NEXT:    v_cndmask_b32_e32 v7, v23, v33, vcc
@@ -11618,7 +11618,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v22, 0xffff0000, v22
 ; GFX8-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v23
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v23
 ; GFX8-NEXT:    v_add_f32_e32 v6, v6, v22
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v23, v23
@@ -11626,7 +11626,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v23, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v22, vcc, v22, v6
 ; GFX8-NEXT:    v_add_u32_e32 v22, vcc, s4, v22
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v6
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v6
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v6, v6
 ; GFX8-NEXT:    v_cndmask_b32_e32 v6, v22, v33, vcc
@@ -11638,7 +11638,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v21, 0xffff0000, v21
 ; GFX8-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v22
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v22
 ; GFX8-NEXT:    v_add_f32_e32 v5, v5, v21
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v22, v22
@@ -11646,7 +11646,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v22, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v21, vcc, v21, v5
 ; GFX8-NEXT:    v_add_u32_e32 v21, vcc, s4, v21
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v5
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v5
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v5, v5
 ; GFX8-NEXT:    v_cndmask_b32_e32 v5, v21, v33, vcc
@@ -11658,7 +11658,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v20, 0xffff0000, v20
 ; GFX8-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v21
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v21
 ; GFX8-NEXT:    v_add_f32_e32 v4, v4, v20
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v21, v21
@@ -11666,7 +11666,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v21, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v20, vcc, v20, v4
 ; GFX8-NEXT:    v_add_u32_e32 v20, vcc, s4, v20
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v4
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v4
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v4, v4
 ; GFX8-NEXT:    v_cndmask_b32_e32 v4, v20, v33, vcc
@@ -11678,7 +11678,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v19, 0xffff0000, v19
 ; GFX8-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v20
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v20
 ; GFX8-NEXT:    v_add_f32_e32 v3, v3, v19
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v20, v20
@@ -11686,7 +11686,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v20, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v19, vcc, v19, v3
 ; GFX8-NEXT:    v_add_u32_e32 v19, vcc, s4, v19
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v3
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v3
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v19, v33, vcc
@@ -11698,7 +11698,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v18, 0xffff0000, v18
 ; GFX8-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v19
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v19
 ; GFX8-NEXT:    v_add_f32_e32 v2, v2, v18
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v19, v19
@@ -11706,7 +11706,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v19, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v18, vcc, v18, v2
 ; GFX8-NEXT:    v_add_u32_e32 v18, vcc, s4, v18
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v2
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v2
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
 ; GFX8-NEXT:    v_cndmask_b32_e32 v2, v18, v33, vcc
@@ -11718,7 +11718,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v17, 0xffff0000, v17
 ; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v18
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v18
 ; GFX8-NEXT:    v_add_f32_e32 v1, v1, v17
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v18, v18
@@ -11726,7 +11726,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v18, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v17, vcc, v17, v1
 ; GFX8-NEXT:    v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v1
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v1
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v1, v17, v33, vcc
@@ -11738,7 +11738,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v16, 0xffff0000, v16
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v17
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v17
 ; GFX8-NEXT:    v_add_f32_e32 v0, v0, v16
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v17, v17
@@ -11746,7 +11746,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v17, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v16, vcc, v16, v0
 ; GFX8-NEXT:    v_add_u32_e32 v16, vcc, s4, v16
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v16, v33, vcc
@@ -11790,7 +11790,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_add_f32_e32 v31, v32, v31
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
 ; GFX9-NEXT:    v_bfe_u32 v32, v31, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v31
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v31
 ; GFX9-NEXT:    v_and_b32_e32 v30, 0xffff0000, v30
 ; GFX9-NEXT:    v_and_b32_e32 v14, 0xffff0000, v14
 ; GFX9-NEXT:    v_add3_u32 v32, v32, v31, s4
@@ -11799,7 +11799,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_add_f32_e32 v14, v14, v30
 ; GFX9-NEXT:    v_cndmask_b32_e32 v31, v32, v33, vcc
 ; GFX9-NEXT:    v_bfe_u32 v30, v14, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v32, 0x80000000, v14
+; GFX9-NEXT:    v_and_b32_e32 v32, 0xff800000, v14
 ; GFX9-NEXT:    v_add3_u32 v30, v30, v14, s4
 ; GFX9-NEXT:    v_or_b32_e32 v32, 0x400000, v32
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v14, v14
@@ -11812,7 +11812,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    buffer_load_dword v29, off, s[0:3], s32
 ; GFX9-NEXT:    v_add_f32_e32 v30, v32, v30
 ; GFX9-NEXT:    v_bfe_u32 v32, v30, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v30
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v30
 ; GFX9-NEXT:    v_add3_u32 v32, v32, v30, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v30, v30
@@ -11825,19 +11825,19 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_and_b32_e32 v29, 0xffff0000, v29
 ; GFX9-NEXT:    v_add_f32_e32 v29, v15, v29
 ; GFX9-NEXT:    v_bfe_u32 v15, v32, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v32
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v32
 ; GFX9-NEXT:    v_add3_u32 v15, v15, v32, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v32, v32
 ; GFX9-NEXT:    v_cndmask_b32_e32 v15, v15, v33, vcc
 ; GFX9-NEXT:    v_bfe_u32 v32, v29, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v29
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v29
 ; GFX9-NEXT:    v_add3_u32 v32, v32, v29, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v29, v29
 ; GFX9-NEXT:    v_cndmask_b32_e32 v29, v32, v33, vcc
 ; GFX9-NEXT:    v_bfe_u32 v32, v13, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v13
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v13
 ; GFX9-NEXT:    v_add3_u32 v32, v32, v13, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v13, v13
@@ -11846,7 +11846,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v12
 ; GFX9-NEXT:    v_add_f32_e32 v32, v33, v32
 ; GFX9-NEXT:    v_bfe_u32 v33, v32, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v32
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v32
 ; GFX9-NEXT:    v_and_b32_e32 v28, 0xffff0000, v28
 ; GFX9-NEXT:    v_and_b32_e32 v12, 0xffff0000, v12
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v32, s4
@@ -11855,7 +11855,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_add_f32_e32 v12, v12, v28
 ; GFX9-NEXT:    v_cndmask_b32_e32 v32, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v28, v12, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v12
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v12
 ; GFX9-NEXT:    v_add3_u32 v28, v28, v12, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v12, v12
@@ -11864,7 +11864,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v11
 ; GFX9-NEXT:    v_add_f32_e32 v28, v33, v28
 ; GFX9-NEXT:    v_bfe_u32 v33, v28, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v28
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v28
 ; GFX9-NEXT:    v_and_b32_e32 v27, 0xffff0000, v27
 ; GFX9-NEXT:    v_and_b32_e32 v11, 0xffff0000, v11
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v28, s4
@@ -11873,7 +11873,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_add_f32_e32 v11, v11, v27
 ; GFX9-NEXT:    v_cndmask_b32_e32 v28, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v27, v11, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v11
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v11
 ; GFX9-NEXT:    v_add3_u32 v27, v27, v11, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v11, v11
@@ -11882,7 +11882,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v10
 ; GFX9-NEXT:    v_add_f32_e32 v27, v33, v27
 ; GFX9-NEXT:    v_bfe_u32 v33, v27, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v27
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v27
 ; GFX9-NEXT:    v_and_b32_e32 v26, 0xffff0000, v26
 ; GFX9-NEXT:    v_and_b32_e32 v10, 0xffff0000, v10
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v27, s4
@@ -11891,7 +11891,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_add_f32_e32 v10, v10, v26
 ; GFX9-NEXT:    v_cndmask_b32_e32 v27, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v26, v10, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v10
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v10
 ; GFX9-NEXT:    v_add3_u32 v26, v26, v10, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v10, v10
@@ -11900,7 +11900,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v9
 ; GFX9-NEXT:    v_add_f32_e32 v26, v33, v26
 ; GFX9-NEXT:    v_bfe_u32 v33, v26, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v26
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v26
 ; GFX9-NEXT:    v_and_b32_e32 v25, 0xffff0000, v25
 ; GFX9-NEXT:    v_and_b32_e32 v9, 0xffff0000, v9
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v26, s4
@@ -11909,7 +11909,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_add_f32_e32 v9, v9, v25
 ; GFX9-NEXT:    v_cndmask_b32_e32 v26, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v25, v9, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v9
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v9
 ; GFX9-NEXT:    v_add3_u32 v25, v25, v9, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v9, v9
@@ -11918,7 +11918,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v8
 ; GFX9-NEXT:    v_add_f32_e32 v25, v33, v25
 ; GFX9-NEXT:    v_bfe_u32 v33, v25, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v25
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v25
 ; GFX9-NEXT:    v_and_b32_e32 v24, 0xffff0000, v24
 ; GFX9-NEXT:    v_and_b32_e32 v8, 0xffff0000, v8
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v25, s4
@@ -11927,7 +11927,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_add_f32_e32 v8, v8, v24
 ; GFX9-NEXT:    v_cndmask_b32_e32 v25, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v24, v8, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v8
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v8
 ; GFX9-NEXT:    v_add3_u32 v24, v24, v8, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v8, v8
@@ -11936,7 +11936,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v7
 ; GFX9-NEXT:    v_add_f32_e32 v24, v33, v24
 ; GFX9-NEXT:    v_bfe_u32 v33, v24, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v24
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v24
 ; GFX9-NEXT:    v_and_b32_e32 v23, 0xffff0000, v23
 ; GFX9-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v24, s4
@@ -11945,7 +11945,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_add_f32_e32 v7, v7, v23
 ; GFX9-NEXT:    v_cndmask_b32_e32 v24, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v23, v7, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v7
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v7
 ; GFX9-NEXT:    v_add3_u32 v23, v23, v7, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v7, v7
@@ -11954,7 +11954,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v6
 ; GFX9-NEXT:    v_add_f32_e32 v23, v33, v23
 ; GFX9-NEXT:    v_bfe_u32 v33, v23, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v23
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v23
 ; GFX9-NEXT:    v_and_b32_e32 v22, 0xffff0000, v22
 ; GFX9-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v23, s4
@@ -11963,7 +11963,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_add_f32_e32 v6, v6, v22
 ; GFX9-NEXT:    v_cndmask_b32_e32 v23, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v22, v6, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v6
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v6
 ; GFX9-NEXT:    v_add3_u32 v22, v22, v6, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v6, v6
@@ -11972,7 +11972,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v5
 ; GFX9-NEXT:    v_add_f32_e32 v22, v33, v22
 ; GFX9-NEXT:    v_bfe_u32 v33, v22, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v22
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v22
 ; GFX9-NEXT:    v_and_b32_e32 v21, 0xffff0000, v21
 ; GFX9-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v22, s4
@@ -11981,7 +11981,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_add_f32_e32 v5, v5, v21
 ; GFX9-NEXT:    v_cndmask_b32_e32 v22, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v21, v5, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v5
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v5
 ; GFX9-NEXT:    v_add3_u32 v21, v21, v5, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v5, v5
@@ -11990,7 +11990,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v4
 ; GFX9-NEXT:    v_add_f32_e32 v21, v33, v21
 ; GFX9-NEXT:    v_bfe_u32 v33, v21, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v21
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v21
 ; GFX9-NEXT:    v_and_b32_e32 v20, 0xffff0000, v20
 ; GFX9-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v21, s4
@@ -11999,7 +11999,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_add_f32_e32 v4, v4, v20
 ; GFX9-NEXT:    v_cndmask_b32_e32 v21, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v20, v4, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v4
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v4
 ; GFX9-NEXT:    v_add3_u32 v20, v20, v4, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v4, v4
@@ -12008,7 +12008,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v3
 ; GFX9-NEXT:    v_add_f32_e32 v20, v33, v20
 ; GFX9-NEXT:    v_bfe_u32 v33, v20, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v20
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v20
 ; GFX9-NEXT:    v_and_b32_e32 v19, 0xffff0000, v19
 ; GFX9-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v20, s4
@@ -12017,7 +12017,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_add_f32_e32 v3, v3, v19
 ; GFX9-NEXT:    v_cndmask_b32_e32 v20, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v19, v3, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v3
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v3
 ; GFX9-NEXT:    v_add3_u32 v19, v19, v3, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
@@ -12026,7 +12026,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v2
 ; GFX9-NEXT:    v_add_f32_e32 v19, v33, v19
 ; GFX9-NEXT:    v_bfe_u32 v33, v19, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v19
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v19
 ; GFX9-NEXT:    v_and_b32_e32 v18, 0xffff0000, v18
 ; GFX9-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v19, s4
@@ -12035,7 +12035,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_add_f32_e32 v2, v2, v18
 ; GFX9-NEXT:    v_cndmask_b32_e32 v19, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v18, v2, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v2
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v2
 ; GFX9-NEXT:    v_add3_u32 v18, v18, v2, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
@@ -12044,7 +12044,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v1
 ; GFX9-NEXT:    v_add_f32_e32 v18, v33, v18
 ; GFX9-NEXT:    v_bfe_u32 v33, v18, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v18
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v18
 ; GFX9-NEXT:    v_and_b32_e32 v17, 0xffff0000, v17
 ; GFX9-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v18, s4
@@ -12053,7 +12053,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_add_f32_e32 v1, v1, v17
 ; GFX9-NEXT:    v_cndmask_b32_e32 v18, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v17, v1, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v1
 ; GFX9-NEXT:    v_add3_u32 v17, v17, v1, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -12062,7 +12062,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v0
 ; GFX9-NEXT:    v_add_f32_e32 v17, v33, v17
 ; GFX9-NEXT:    v_bfe_u32 v33, v17, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v17
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v17
 ; GFX9-NEXT:    v_and_b32_e32 v16, 0xffff0000, v16
 ; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v17, s4
@@ -12071,7 +12071,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_add_f32_e32 v0, v0, v16
 ; GFX9-NEXT:    v_cndmask_b32_e32 v17, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v16, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v16, v16, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -12163,7 +12163,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX10-NEXT:    v_bfe_u32 v55, v11, 16, 1
 ; GFX10-NEXT:    v_bfe_u32 v65, v49, 16, 1
 ; GFX10-NEXT:    v_bfe_u32 v67, v10, 16, 1
-; GFX10-NEXT:    s_brev_b32 s23, 1
+; GFX10-NEXT:    s_mov_b32 s23, 0xff800000
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v33, 16, v30
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v34, 16, v14
 ; GFX10-NEXT:    v_and_b32_e32 v30, 0xffff0000, v30
@@ -12385,7 +12385,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX11-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v53, 16, v24
 ; GFX11-NEXT:    v_and_b32_e32 v24, 0xffff0000, v24
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX11-NEXT:    v_dual_add_f32 v1, v1, v17 :: v_dual_lshlrev_b32 v64, 16, v7
 ; GFX11-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
@@ -12678,7 +12678,7 @@ define bfloat @v_fadd_bf16_fpimm_0(bfloat %arg0) {
 ; GFX8-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
@@ -12692,7 +12692,7 @@ define bfloat @v_fadd_bf16_fpimm_0(bfloat %arg0) {
 ; GFX9-NEXT:    v_add_f32_e32 v0, 1.0, v0
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -12704,7 +12704,7 @@ define bfloat @v_fadd_bf16_fpimm_0(bfloat %arg0) {
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_add_f32_e32 v0, 1.0, v0
 ; GFX10-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX10-NEXT:    v_and_or_b32 v2, v0, s4, 0x400000
@@ -12718,7 +12718,7 @@ define bfloat @v_fadd_bf16_fpimm_0(bfloat %arg0) {
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_add_f32_e32 v0, 1.0, v0
 ; GFX11-NEXT:    v_bfe_u32 v1, v0, 16, 1
@@ -12761,7 +12761,7 @@ define bfloat @v_fadd_bf16_fpimm_1(bfloat %arg0) {
 ; GFX8-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
@@ -12775,7 +12775,7 @@ define bfloat @v_fadd_bf16_fpimm_1(bfloat %arg0) {
 ; GFX9-NEXT:    v_add_f32_e32 v0, 0x42280000, v0
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -12787,7 +12787,7 @@ define bfloat @v_fadd_bf16_fpimm_1(bfloat %arg0) {
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_add_f32_e32 v0, 0x42280000, v0
 ; GFX10-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX10-NEXT:    v_and_or_b32 v2, v0, s4, 0x400000
@@ -12801,7 +12801,7 @@ define bfloat @v_fadd_bf16_fpimm_1(bfloat %arg0) {
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_add_f32_e32 v0, 0x42280000, v0
 ; GFX11-NEXT:    v_bfe_u32 v1, v0, 16, 1
@@ -12849,7 +12849,7 @@ define bfloat @v_fsub_bf16(bfloat %a, bfloat %b) {
 ; GFX8-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
@@ -12864,7 +12864,7 @@ define bfloat @v_fsub_bf16(bfloat %a, bfloat %b) {
 ; GFX9-NEXT:    v_sub_f32_e32 v0, v0, v1
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -12877,7 +12877,7 @@ define bfloat @v_fsub_bf16(bfloat %a, bfloat %b) {
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_sub_f32_e32 v0, v0, v1
 ; GFX10-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX10-NEXT:    v_and_or_b32 v2, v0, s4, 0x400000
@@ -12892,7 +12892,7 @@ define bfloat @v_fsub_bf16(bfloat %a, bfloat %b) {
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_sub_f32_e32 v0, v0, v1
 ; GFX11-NEXT:    v_bfe_u32 v1, v0, 16, 1
@@ -12954,7 +12954,7 @@ define <2 x bfloat> @v_fsub_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT:    v_and_b32_e32 v4, 0x80000000, v2
+; GFX8-NEXT:    v_and_b32_e32 v4, 0xff800000, v2
 ; GFX8-NEXT:    v_sub_f32_e32 v0, v0, v1
 ; GFX8-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
@@ -12962,7 +12962,7 @@ define <2 x bfloat> @v_fsub_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v2, v3, v4, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT:    v_and_b32_e32 v3, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v3, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v3, vcc
@@ -12978,7 +12978,7 @@ define <2 x bfloat> @v_fsub_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; GFX9-NEXT:    v_sub_f32_e32 v2, v3, v2
 ; GFX9-NEXT:    v_bfe_u32 v3, v2, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v4, 0x80000000, v2
+; GFX9-NEXT:    v_and_b32_e32 v4, 0xff800000, v2
 ; GFX9-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX9-NEXT:    v_add3_u32 v3, v3, v2, s4
@@ -12987,7 +12987,7 @@ define <2 x bfloat> @v_fsub_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; GFX9-NEXT:    v_sub_f32_e32 v0, v0, v1
 ; GFX9-NEXT:    v_cndmask_b32_e32 v2, v3, v4, vcc
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v3, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v3, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -13003,7 +13003,7 @@ define <2 x bfloat> @v_fsub_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v3, 16, v0
 ; GFX10-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX10-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_sub_f32_e32 v2, v3, v2
 ; GFX10-NEXT:    v_sub_f32_e32 v0, v0, v1
 ; GFX10-NEXT:    v_bfe_u32 v1, v2, 16, 1
@@ -13026,7 +13026,7 @@ define <2 x bfloat> @v_fsub_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; GFX11-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v3, 16, v0
 ; GFX11-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-NEXT:    v_sub_f32_e32 v0, v0, v1
 ; GFX11-NEXT:    v_sub_f32_e32 v2, v3, v2
@@ -13105,7 +13105,7 @@ define <3 x bfloat> @v_fsub_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
 ; GFX8-NEXT:    v_bfe_u32 v3, v1, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v3, v1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT:    v_and_b32_e32 v4, 0x80000000, v1
+; GFX8-NEXT:    v_and_b32_e32 v4, 0xff800000, v1
 ; GFX8-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v1, v3, v4, vcc
@@ -13118,7 +13118,7 @@ define <3 x bfloat> @v_fsub_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, s4, v4
-; GFX8-NEXT:    v_and_b32_e32 v5, 0x80000000, v3
+; GFX8-NEXT:    v_and_b32_e32 v5, 0xff800000, v3
 ; GFX8-NEXT:    v_sub_f32_e32 v0, v0, v2
 ; GFX8-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
@@ -13126,7 +13126,7 @@ define <3 x bfloat> @v_fsub_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v4, v5, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v2, v0
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT:    v_and_b32_e32 v4, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v4, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v2, v4, vcc
@@ -13143,7 +13143,7 @@ define <3 x bfloat> @v_fsub_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
 ; GFX9-NEXT:    v_sub_f32_e32 v1, v1, v3
 ; GFX9-NEXT:    v_bfe_u32 v3, v1, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v4, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v4, 0xff800000, v1
 ; GFX9-NEXT:    v_add3_u32 v3, v3, v1, s4
 ; GFX9-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -13152,7 +13152,7 @@ define <3 x bfloat> @v_fsub_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v4, 16, v0
 ; GFX9-NEXT:    v_sub_f32_e32 v3, v4, v3
 ; GFX9-NEXT:    v_bfe_u32 v4, v3, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v5, 0x80000000, v3
+; GFX9-NEXT:    v_and_b32_e32 v5, 0xff800000, v3
 ; GFX9-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX9-NEXT:    v_add3_u32 v4, v4, v3, s4
@@ -13161,7 +13161,7 @@ define <3 x bfloat> @v_fsub_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
 ; GFX9-NEXT:    v_sub_f32_e32 v0, v0, v2
 ; GFX9-NEXT:    v_cndmask_b32_e32 v3, v4, v5, vcc
 ; GFX9-NEXT:    v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v4, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v4, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v2, v2, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -13181,7 +13181,7 @@ define <3 x bfloat> @v_fsub_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GFX10-NEXT:    v_sub_f32_e32 v4, v5, v4
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_sub_f32_e32 v0, v0, v2
 ; GFX10-NEXT:    v_sub_f32_e32 v1, v1, v3
 ; GFX10-NEXT:    v_bfe_u32 v2, v4, 16, 1
@@ -13276,7 +13276,7 @@ define <4 x bfloat> @v_fsub_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, 0x7fff, v5
-; GFX8-NEXT:    v_and_b32_e32 v6, 0x80000000, v4
+; GFX8-NEXT:    v_and_b32_e32 v6, 0xff800000, v4
 ; GFX8-NEXT:    v_sub_f32_e32 v1, v1, v3
 ; GFX8-NEXT:    v_or_b32_e32 v6, 0x400000, v6
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v4, v4
@@ -13285,7 +13285,7 @@ define <4 x bfloat> @v_fsub_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v4, v5, v6, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v3, v1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, s4, v3
-; GFX8-NEXT:    v_and_b32_e32 v5, 0x80000000, v1
+; GFX8-NEXT:    v_and_b32_e32 v5, 0xff800000, v1
 ; GFX8-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v1, v3, v5, vcc
@@ -13297,7 +13297,7 @@ define <4 x bfloat> @v_fsub_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT:    v_and_b32_e32 v6, 0x80000000, v3
+; GFX8-NEXT:    v_and_b32_e32 v6, 0xff800000, v3
 ; GFX8-NEXT:    v_sub_f32_e32 v0, v0, v2
 ; GFX8-NEXT:    v_or_b32_e32 v6, 0x400000, v6
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
@@ -13305,7 +13305,7 @@ define <4 x bfloat> @v_fsub_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v5, v6, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v2, v0
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT:    v_and_b32_e32 v5, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v5, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v2, v5, vcc
@@ -13323,7 +13323,7 @@ define <4 x bfloat> @v_fsub_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX9-NEXT:    v_sub_f32_e32 v4, v5, v4
 ; GFX9-NEXT:    v_bfe_u32 v5, v4, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v6, 0x80000000, v4
+; GFX9-NEXT:    v_and_b32_e32 v6, 0xff800000, v4
 ; GFX9-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX9-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX9-NEXT:    v_add3_u32 v5, v5, v4, s4
@@ -13332,7 +13332,7 @@ define <4 x bfloat> @v_fsub_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX9-NEXT:    v_sub_f32_e32 v1, v1, v3
 ; GFX9-NEXT:    v_cndmask_b32_e32 v4, v5, v6, vcc
 ; GFX9-NEXT:    v_bfe_u32 v3, v1, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v5, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v5, 0xff800000, v1
 ; GFX9-NEXT:    v_add3_u32 v3, v3, v1, s4
 ; GFX9-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -13341,7 +13341,7 @@ define <4 x bfloat> @v_fsub_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v5, 16, v0
 ; GFX9-NEXT:    v_sub_f32_e32 v3, v5, v3
 ; GFX9-NEXT:    v_bfe_u32 v5, v3, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v6, 0x80000000, v3
+; GFX9-NEXT:    v_and_b32_e32 v6, 0xff800000, v3
 ; GFX9-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX9-NEXT:    v_add3_u32 v5, v5, v3, s4
@@ -13350,7 +13350,7 @@ define <4 x bfloat> @v_fsub_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX9-NEXT:    v_sub_f32_e32 v0, v0, v2
 ; GFX9-NEXT:    v_cndmask_b32_e32 v3, v5, v6, vcc
 ; GFX9-NEXT:    v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v5, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v5, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v2, v2, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -13375,7 +13375,7 @@ define <4 x bfloat> @v_fsub_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX10-NEXT:    v_sub_f32_e32 v1, v1, v3
 ; GFX10-NEXT:    v_sub_f32_e32 v5, v7, v6
 ; GFX10-NEXT:    v_bfe_u32 v3, v4, 16, 1
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_sub_f32_e32 v0, v0, v2
 ; GFX10-NEXT:    v_and_or_b32 v6, v4, s4, 0x400000
 ; GFX10-NEXT:    v_bfe_u32 v7, v5, 16, 1
@@ -13410,7 +13410,7 @@ define <4 x bfloat> @v_fsub_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX11-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v5, 16, v1
 ; GFX11-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-NEXT:    v_dual_sub_f32 v0, v0, v2 :: v_dual_and_b32 v3, 0xffff0000, v3
 ; GFX11-NEXT:    v_sub_f32_e32 v4, v5, v4
@@ -13481,7 +13481,7 @@ define bfloat @v_fmul_bf16(bfloat %a, bfloat %b) {
 ; GFX8-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
@@ -13496,7 +13496,7 @@ define bfloat @v_fmul_bf16(bfloat %a, bfloat %b) {
 ; GFX9-NEXT:    v_mul_f32_e32 v0, v0, v1
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -13509,7 +13509,7 @@ define bfloat @v_fmul_bf16(bfloat %a, bfloat %b) {
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_mul_f32_e32 v0, v0, v1
 ; GFX10-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX10-NEXT:    v_and_or_b32 v2, v0, s4, 0x400000
@@ -13524,7 +13524,7 @@ define bfloat @v_fmul_bf16(bfloat %a, bfloat %b) {
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_mul_f32_e32 v0, v0, v1
 ; GFX11-NEXT:    v_bfe_u32 v1, v0, 16, 1
@@ -13586,7 +13586,7 @@ define <2 x bfloat> @v_fmul_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT:    v_and_b32_e32 v4, 0x80000000, v2
+; GFX8-NEXT:    v_and_b32_e32 v4, 0xff800000, v2
 ; GFX8-NEXT:    v_mul_f32_e32 v0, v0, v1
 ; GFX8-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
@@ -13594,7 +13594,7 @@ define <2 x bfloat> @v_fmul_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v2, v3, v4, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT:    v_and_b32_e32 v3, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v3, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v3, vcc
@@ -13610,7 +13610,7 @@ define <2 x bfloat> @v_fmul_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; GFX9-NEXT:    v_mul_f32_e32 v2, v3, v2
 ; GFX9-NEXT:    v_bfe_u32 v3, v2, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v4, 0x80000000, v2
+; GFX9-NEXT:    v_and_b32_e32 v4, 0xff800000, v2
 ; GFX9-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX9-NEXT:    v_add3_u32 v3, v3, v2, s4
@@ -13619,7 +13619,7 @@ define <2 x bfloat> @v_fmul_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; GFX9-NEXT:    v_mul_f32_e32 v0, v0, v1
 ; GFX9-NEXT:    v_cndmask_b32_e32 v2, v3, v4, vcc
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v3, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v3, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -13635,7 +13635,7 @@ define <2 x bfloat> @v_fmul_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v3, 16, v0
 ; GFX10-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX10-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_mul_f32_e32 v2, v3, v2
 ; GFX10-NEXT:    v_mul_f32_e32 v0, v0, v1
 ; GFX10-NEXT:    v_bfe_u32 v1, v2, 16, 1
@@ -13658,7 +13658,7 @@ define <2 x bfloat> @v_fmul_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; GFX11-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v3, 16, v0
 ; GFX11-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-NEXT:    v_mul_f32_e32 v0, v0, v1
 ; GFX11-NEXT:    v_mul_f32_e32 v2, v3, v2
@@ -13737,7 +13737,7 @@ define <3 x bfloat> @v_fmul_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
 ; GFX8-NEXT:    v_bfe_u32 v3, v1, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v3, v1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT:    v_and_b32_e32 v4, 0x80000000, v1
+; GFX8-NEXT:    v_and_b32_e32 v4, 0xff800000, v1
 ; GFX8-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v1, v3, v4, vcc
@@ -13750,7 +13750,7 @@ define <3 x bfloat> @v_fmul_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, s4, v4
-; GFX8-NEXT:    v_and_b32_e32 v5, 0x80000000, v3
+; GFX8-NEXT:    v_and_b32_e32 v5, 0xff800000, v3
 ; GFX8-NEXT:    v_mul_f32_e32 v0, v0, v2
 ; GFX8-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
@@ -13758,7 +13758,7 @@ define <3 x bfloat> @v_fmul_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v4, v5, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v2, v0
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT:    v_and_b32_e32 v4, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v4, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v2, v4, vcc
@@ -13775,7 +13775,7 @@ define <3 x bfloat> @v_fmul_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
 ; GFX9-NEXT:    v_mul_f32_e32 v1, v1, v3
 ; GFX9-NEXT:    v_bfe_u32 v3, v1, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v4, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v4, 0xff800000, v1
 ; GFX9-NEXT:    v_add3_u32 v3, v3, v1, s4
 ; GFX9-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -13784,7 +13784,7 @@ define <3 x bfloat> @v_fmul_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v4, 16, v0
 ; GFX9-NEXT:    v_mul_f32_e32 v3, v4, v3
 ; GFX9-NEXT:    v_bfe_u32 v4, v3, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v5, 0x80000000, v3
+; GFX9-NEXT:    v_and_b32_e32 v5, 0xff800000, v3
 ; GFX9-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX9-NEXT:    v_add3_u32 v4, v4, v3, s4
@@ -13793,7 +13793,7 @@ define <3 x bfloat> @v_fmul_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
 ; GFX9-NEXT:    v_mul_f32_e32 v0, v0, v2
 ; GFX9-NEXT:    v_cndmask_b32_e32 v3, v4, v5, vcc
 ; GFX9-NEXT:    v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v4, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v4, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v2, v2, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -13813,7 +13813,7 @@ define <3 x bfloat> @v_fmul_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GFX10-NEXT:    v_mul_f32_e32 v4, v5, v4
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_mul_f32_e32 v0, v0, v2
 ; GFX10-NEXT:    v_mul_f32_e32 v1, v1, v3
 ; GFX10-NEXT:    v_bfe_u32 v2, v4, 16, 1
@@ -13908,7 +13908,7 @@ define <4 x bfloat> @v_fmul_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, 0x7fff, v5
-; GFX8-NEXT:    v_and_b32_e32 v6, 0x80000000, v4
+; GFX8-NEXT:    v_and_b32_e32 v6, 0xff800000, v4
 ; GFX8-NEXT:    v_mul_f32_e32 v1, v1, v3
 ; GFX8-NEXT:    v_or_b32_e32 v6, 0x400000, v6
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v4, v4
@@ -13917,7 +13917,7 @@ define <4 x bfloat> @v_fmul_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v4, v5, v6, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v3, v1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, s4, v3
-; GFX8-NEXT:    v_and_b32_e32 v5, 0x80000000, v1
+; GFX8-NEXT:    v_and_b32_e32 v5, 0xff800000, v1
 ; GFX8-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v1, v3, v5, vcc
@@ -13929,7 +13929,7 @@ define <4 x bfloat> @v_fmul_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT:    v_and_b32_e32 v6, 0x80000000, v3
+; GFX8-NEXT:    v_and_b32_e32 v6, 0xff800000, v3
 ; GFX8-NEXT:    v_mul_f32_e32 v0, v0, v2
 ; GFX8-NEXT:    v_or_b32_e32 v6, 0x400000, v6
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
@@ -13937,7 +13937,7 @@ define <4 x bfloat> @v_fmul_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v5, v6, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v2, v0
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT:    v_and_b32_e32 v5, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v5, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v2, v5, vcc
@@ -13955,7 +13955,7 @@ define <4 x bfloat> @v_fmul_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX9-NEXT:    v_mul_f32_e32 v4, v5, v4
 ; GFX9-NEXT:    v_bfe_u32 v5, v4, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v6, 0x80000000, v4
+; GFX9-NEXT:    v_and_b32_e32 v6, 0xff800000, v4
 ; GFX9-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX9-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX9-NEXT:    v_add3_u32 v5, v5, v4, s4
@@ -13964,7 +13964,7 @@ define <4 x bfloat> @v_fmul_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX9-NEXT:    v_mul_f32_e32 v1, v1, v3
 ; GFX9-NEXT:    v_cndmask_b32_e32 v4, v5, v6, vcc
 ; GFX9-NEXT:    v_bfe_u32 v3, v1, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v5, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v5, 0xff800000, v1
 ; GFX9-NEXT:    v_add3_u32 v3, v3, v1, s4
 ; GFX9-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -13973,7 +13973,7 @@ define <4 x bfloat> @v_fmul_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v5, 16, v0
 ; GFX9-NEXT:    v_mul_f32_e32 v3, v5, v3
 ; GFX9-NEXT:    v_bfe_u32 v5, v3, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v6, 0x80000000, v3
+; GFX9-NEXT:    v_and_b32_e32 v6, 0xff800000, v3
 ; GFX9-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX9-NEXT:    v_add3_u32 v5, v5, v3, s4
@@ -13982,7 +13982,7 @@ define <4 x bfloat> @v_fmul_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX9-NEXT:    v_mul_f32_e32 v0, v0, v2
 ; GFX9-NEXT:    v_cndmask_b32_e32 v3, v5, v6, vcc
 ; GFX9-NEXT:    v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v5, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v5, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v2, v2, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -14007,7 +14007,7 @@ define <4 x bfloat> @v_fmul_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX10-NEXT:    v_mul_f32_e32 v1, v1, v3
 ; GFX10-NEXT:    v_mul_f32_e32 v5, v7, v6
 ; GFX10-NEXT:    v_bfe_u32 v3, v4, 16, 1
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_mul_f32_e32 v0, v0, v2
 ; GFX10-NEXT:    v_and_or_b32 v6, v4, s4, 0x400000
 ; GFX10-NEXT:    v_bfe_u32 v7, v5, 16, 1
@@ -14042,7 +14042,7 @@ define <4 x bfloat> @v_fmul_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX11-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v5, 16, v1
 ; GFX11-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-NEXT:    v_dual_mul_f32 v0, v0, v2 :: v_dual_and_b32 v3, 0xffff0000, v3
 ; GFX11-NEXT:    v_mul_f32_e32 v4, v5, v4
@@ -14199,7 +14199,7 @@ define <8 x bfloat> @v_fmul_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
 ; GFX8-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX8-NEXT:    v_add_u32_e32 v9, vcc, 0x7fff, v9
-; GFX8-NEXT:    v_and_b32_e32 v10, 0x80000000, v8
+; GFX8-NEXT:    v_and_b32_e32 v10, 0xff800000, v8
 ; GFX8-NEXT:    v_mul_f32_e32 v3, v3, v7
 ; GFX8-NEXT:    v_or_b32_e32 v10, 0x400000, v10
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v8, v8
@@ -14208,7 +14208,7 @@ define <8 x bfloat> @v_fmul_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v8, v9, v10, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v7, vcc, v7, v3
 ; GFX8-NEXT:    v_add_u32_e32 v7, vcc, s4, v7
-; GFX8-NEXT:    v_and_b32_e32 v9, 0x80000000, v3
+; GFX8-NEXT:    v_and_b32_e32 v9, 0xff800000, v3
 ; GFX8-NEXT:    v_or_b32_e32 v9, 0x400000, v9
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v7, v9, vcc
@@ -14220,7 +14220,7 @@ define <8 x bfloat> @v_fmul_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
 ; GFX8-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX8-NEXT:    v_add_u32_e32 v9, vcc, s4, v9
-; GFX8-NEXT:    v_and_b32_e32 v10, 0x80000000, v7
+; GFX8-NEXT:    v_and_b32_e32 v10, 0xff800000, v7
 ; GFX8-NEXT:    v_mul_f32_e32 v2, v2, v6
 ; GFX8-NEXT:    v_or_b32_e32 v10, 0x400000, v10
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v7, v7
@@ -14228,7 +14228,7 @@ define <8 x bfloat> @v_fmul_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v7, v9, v10, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v6, vcc, v6, v2
 ; GFX8-NEXT:    v_add_u32_e32 v6, vcc, s4, v6
-; GFX8-NEXT:    v_and_b32_e32 v9, 0x80000000, v2
+; GFX8-NEXT:    v_and_b32_e32 v9, 0xff800000, v2
 ; GFX8-NEXT:    v_or_b32_e32 v9, 0x400000, v9
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
 ; GFX8-NEXT:    v_cndmask_b32_e32 v2, v6, v9, vcc
@@ -14240,7 +14240,7 @@ define <8 x bfloat> @v_fmul_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
 ; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX8-NEXT:    v_add_u32_e32 v9, vcc, s4, v9
-; GFX8-NEXT:    v_and_b32_e32 v10, 0x80000000, v6
+; GFX8-NEXT:    v_and_b32_e32 v10, 0xff800000, v6
 ; GFX8-NEXT:    v_mul_f32_e32 v1, v1, v5
 ; GFX8-NEXT:    v_or_b32_e32 v10, 0x400000, v10
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v6, v6
@@ -14248,7 +14248,7 @@ define <8 x bfloat> @v_fmul_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v6, v9, v10, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, v5, v1
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT:    v_and_b32_e32 v9, 0x80000000, v1
+; GFX8-NEXT:    v_and_b32_e32 v9, 0xff800000, v1
 ; GFX8-NEXT:    v_or_b32_e32 v9, 0x400000, v9
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v1, v5, v9, vcc
@@ -14260,7 +14260,7 @@ define <8 x bfloat> @v_fmul_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v9, vcc, s4, v9
-; GFX8-NEXT:    v_and_b32_e32 v10, 0x80000000, v5
+; GFX8-NEXT:    v_and_b32_e32 v10, 0xff800000, v5
 ; GFX8-NEXT:    v_mul_f32_e32 v0, v0, v4
 ; GFX8-NEXT:    v_or_b32_e32 v10, 0x400000, v10
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v5, v5
@@ -14268,7 +14268,7 @@ define <8 x bfloat> @v_fmul_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v5, v9, v10, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, v4, v0
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, 0x7fff, v4
-; GFX8-NEXT:    v_and_b32_e32 v9, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v9, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v9, 0x400000, v9
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v4, v9, vcc
@@ -14290,7 +14290,7 @@ define <8 x bfloat> @v_fmul_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX9-NEXT:    v_mul_f32_e32 v8, v9, v8
 ; GFX9-NEXT:    v_bfe_u32 v9, v8, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v10, 0x80000000, v8
+; GFX9-NEXT:    v_and_b32_e32 v10, 0xff800000, v8
 ; GFX9-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
 ; GFX9-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX9-NEXT:    v_add3_u32 v9, v9, v8, s4
@@ -14299,7 +14299,7 @@ define <8 x bfloat> @v_fmul_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX9-NEXT:    v_mul_f32_e32 v3, v3, v7
 ; GFX9-NEXT:    v_cndmask_b32_e32 v8, v9, v10, vcc
 ; GFX9-NEXT:    v_bfe_u32 v7, v3, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v9, 0x80000000, v3
+; GFX9-NEXT:    v_and_b32_e32 v9, 0xff800000, v3
 ; GFX9-NEXT:    v_add3_u32 v7, v7, v3, s4
 ; GFX9-NEXT:    v_or_b32_e32 v9, 0x400000, v9
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
@@ -14308,7 +14308,7 @@ define <8 x bfloat> @v_fmul_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v9, 16, v2
 ; GFX9-NEXT:    v_mul_f32_e32 v7, v9, v7
 ; GFX9-NEXT:    v_bfe_u32 v9, v7, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v10, 0x80000000, v7
+; GFX9-NEXT:    v_and_b32_e32 v10, 0xff800000, v7
 ; GFX9-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
 ; GFX9-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX9-NEXT:    v_add3_u32 v9, v9, v7, s4
@@ -14317,7 +14317,7 @@ define <8 x bfloat> @v_fmul_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX9-NEXT:    v_mul_f32_e32 v2, v2, v6
 ; GFX9-NEXT:    v_cndmask_b32_e32 v7, v9, v10, vcc
 ; GFX9-NEXT:    v_bfe_u32 v6, v2, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v9, 0x80000000, v2
+; GFX9-NEXT:    v_and_b32_e32 v9, 0xff800000, v2
 ; GFX9-NEXT:    v_add3_u32 v6, v6, v2, s4
 ; GFX9-NEXT:    v_or_b32_e32 v9, 0x400000, v9
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
@@ -14326,7 +14326,7 @@ define <8 x bfloat> @v_fmul_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v9, 16, v1
 ; GFX9-NEXT:    v_mul_f32_e32 v6, v9, v6
 ; GFX9-NEXT:    v_bfe_u32 v9, v6, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v10, 0x80000000, v6
+; GFX9-NEXT:    v_and_b32_e32 v10, 0xff800000, v6
 ; GFX9-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
 ; GFX9-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX9-NEXT:    v_add3_u32 v9, v9, v6, s4
@@ -14335,7 +14335,7 @@ define <8 x bfloat> @v_fmul_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX9-NEXT:    v_mul_f32_e32 v1, v1, v5
 ; GFX9-NEXT:    v_cndmask_b32_e32 v6, v9, v10, vcc
 ; GFX9-NEXT:    v_bfe_u32 v5, v1, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v9, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v9, 0xff800000, v1
 ; GFX9-NEXT:    v_add3_u32 v5, v5, v1, s4
 ; GFX9-NEXT:    v_or_b32_e32 v9, 0x400000, v9
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -14344,7 +14344,7 @@ define <8 x bfloat> @v_fmul_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v9, 16, v0
 ; GFX9-NEXT:    v_mul_f32_e32 v5, v9, v5
 ; GFX9-NEXT:    v_bfe_u32 v9, v5, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v10, 0x80000000, v5
+; GFX9-NEXT:    v_and_b32_e32 v10, 0xff800000, v5
 ; GFX9-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
 ; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX9-NEXT:    v_add3_u32 v9, v9, v5, s4
@@ -14353,7 +14353,7 @@ define <8 x bfloat> @v_fmul_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX9-NEXT:    v_mul_f32_e32 v0, v0, v4
 ; GFX9-NEXT:    v_cndmask_b32_e32 v5, v9, v10, vcc
 ; GFX9-NEXT:    v_bfe_u32 v4, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v9, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v9, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v4, v4, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v9, 0x400000, v9
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -14378,7 +14378,7 @@ define <8 x bfloat> @v_fmul_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v9, 16, v6
 ; GFX10-NEXT:    v_mul_f32_e32 v3, v3, v7
 ; GFX10-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_bfe_u32 v10, v8, 16, 1
 ; GFX10-NEXT:    v_and_or_b32 v7, v8, s4, 0x400000
 ; GFX10-NEXT:    v_mul_f32_e32 v9, v11, v9
@@ -14449,7 +14449,7 @@ define <8 x bfloat> @v_fmul_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX11-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v9, 16, v3
 ; GFX11-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_dual_mul_f32 v8, v9, v8 :: v_dual_lshlrev_b32 v9, 16, v6
 ; GFX11-NEXT:    v_bfe_u32 v10, v8, 16, 1
@@ -14745,7 +14745,7 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v15, 0xffff0000, v15
 ; GFX8-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
 ; GFX8-NEXT:    v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT:    v_and_b32_e32 v18, 0x80000000, v16
+; GFX8-NEXT:    v_and_b32_e32 v18, 0xff800000, v16
 ; GFX8-NEXT:    v_mul_f32_e32 v7, v7, v15
 ; GFX8-NEXT:    v_or_b32_e32 v18, 0x400000, v18
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v16, v16
@@ -14753,7 +14753,7 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v16, v17, v18, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v15, vcc, v15, v7
 ; GFX8-NEXT:    v_add_u32_e32 v15, vcc, s4, v15
-; GFX8-NEXT:    v_and_b32_e32 v17, 0x80000000, v7
+; GFX8-NEXT:    v_and_b32_e32 v17, 0xff800000, v7
 ; GFX8-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v7, v7
 ; GFX8-NEXT:    v_cndmask_b32_e32 v7, v15, v17, vcc
@@ -14765,7 +14765,7 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v14, 0xffff0000, v14
 ; GFX8-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
 ; GFX8-NEXT:    v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT:    v_and_b32_e32 v18, 0x80000000, v15
+; GFX8-NEXT:    v_and_b32_e32 v18, 0xff800000, v15
 ; GFX8-NEXT:    v_mul_f32_e32 v6, v6, v14
 ; GFX8-NEXT:    v_or_b32_e32 v18, 0x400000, v18
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v15, v15
@@ -14773,7 +14773,7 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v15, v17, v18, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v14, vcc, v14, v6
 ; GFX8-NEXT:    v_add_u32_e32 v14, vcc, s4, v14
-; GFX8-NEXT:    v_and_b32_e32 v17, 0x80000000, v6
+; GFX8-NEXT:    v_and_b32_e32 v17, 0xff800000, v6
 ; GFX8-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v6, v6
 ; GFX8-NEXT:    v_cndmask_b32_e32 v6, v14, v17, vcc
@@ -14785,7 +14785,7 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v13, 0xffff0000, v13
 ; GFX8-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
 ; GFX8-NEXT:    v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT:    v_and_b32_e32 v18, 0x80000000, v14
+; GFX8-NEXT:    v_and_b32_e32 v18, 0xff800000, v14
 ; GFX8-NEXT:    v_mul_f32_e32 v5, v5, v13
 ; GFX8-NEXT:    v_or_b32_e32 v18, 0x400000, v18
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v14, v14
@@ -14793,7 +14793,7 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v14, v17, v18, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v13, vcc, v13, v5
 ; GFX8-NEXT:    v_add_u32_e32 v13, vcc, s4, v13
-; GFX8-NEXT:    v_and_b32_e32 v17, 0x80000000, v5
+; GFX8-NEXT:    v_and_b32_e32 v17, 0xff800000, v5
 ; GFX8-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v5, v5
 ; GFX8-NEXT:    v_cndmask_b32_e32 v5, v13, v17, vcc
@@ -14805,7 +14805,7 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v12, 0xffff0000, v12
 ; GFX8-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
 ; GFX8-NEXT:    v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT:    v_and_b32_e32 v18, 0x80000000, v13
+; GFX8-NEXT:    v_and_b32_e32 v18, 0xff800000, v13
 ; GFX8-NEXT:    v_mul_f32_e32 v4, v4, v12
 ; GFX8-NEXT:    v_or_b32_e32 v18, 0x400000, v18
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v13, v13
@@ -14813,7 +14813,7 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v13, v17, v18, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v12, vcc, v12, v4
 ; GFX8-NEXT:    v_add_u32_e32 v12, vcc, s4, v12
-; GFX8-NEXT:    v_and_b32_e32 v17, 0x80000000, v4
+; GFX8-NEXT:    v_and_b32_e32 v17, 0xff800000, v4
 ; GFX8-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v4, v4
 ; GFX8-NEXT:    v_cndmask_b32_e32 v4, v12, v17, vcc
@@ -14825,7 +14825,7 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v11, 0xffff0000, v11
 ; GFX8-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX8-NEXT:    v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT:    v_and_b32_e32 v18, 0x80000000, v12
+; GFX8-NEXT:    v_and_b32_e32 v18, 0xff800000, v12
 ; GFX8-NEXT:    v_mul_f32_e32 v3, v3, v11
 ; GFX8-NEXT:    v_or_b32_e32 v18, 0x400000, v18
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v12, v12
@@ -14833,7 +14833,7 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v12, v17, v18, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v11, vcc, v11, v3
 ; GFX8-NEXT:    v_add_u32_e32 v11, vcc, s4, v11
-; GFX8-NEXT:    v_and_b32_e32 v17, 0x80000000, v3
+; GFX8-NEXT:    v_and_b32_e32 v17, 0xff800000, v3
 ; GFX8-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v11, v17, vcc
@@ -14845,7 +14845,7 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v10, 0xffff0000, v10
 ; GFX8-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX8-NEXT:    v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT:    v_and_b32_e32 v18, 0x80000000, v11
+; GFX8-NEXT:    v_and_b32_e32 v18, 0xff800000, v11
 ; GFX8-NEXT:    v_mul_f32_e32 v2, v2, v10
 ; GFX8-NEXT:    v_or_b32_e32 v18, 0x400000, v18
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v11, v11
@@ -14853,7 +14853,7 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v11, v17, v18, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v10, vcc, v10, v2
 ; GFX8-NEXT:    v_add_u32_e32 v10, vcc, s4, v10
-; GFX8-NEXT:    v_and_b32_e32 v17, 0x80000000, v2
+; GFX8-NEXT:    v_and_b32_e32 v17, 0xff800000, v2
 ; GFX8-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
 ; GFX8-NEXT:    v_cndmask_b32_e32 v2, v10, v17, vcc
@@ -14865,7 +14865,7 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v9, 0xffff0000, v9
 ; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX8-NEXT:    v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT:    v_and_b32_e32 v18, 0x80000000, v10
+; GFX8-NEXT:    v_and_b32_e32 v18, 0xff800000, v10
 ; GFX8-NEXT:    v_mul_f32_e32 v1, v1, v9
 ; GFX8-NEXT:    v_or_b32_e32 v18, 0x400000, v18
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v10, v10
@@ -14873,7 +14873,7 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v10, v17, v18, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v9, vcc, v9, v1
 ; GFX8-NEXT:    v_add_u32_e32 v9, vcc, s4, v9
-; GFX8-NEXT:    v_and_b32_e32 v17, 0x80000000, v1
+; GFX8-NEXT:    v_and_b32_e32 v17, 0xff800000, v1
 ; GFX8-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v1, v9, v17, vcc
@@ -14885,7 +14885,7 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v8, 0xffff0000, v8
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT:    v_and_b32_e32 v18, 0x80000000, v9
+; GFX8-NEXT:    v_and_b32_e32 v18, 0xff800000, v9
 ; GFX8-NEXT:    v_mul_f32_e32 v0, v0, v8
 ; GFX8-NEXT:    v_or_b32_e32 v18, 0x400000, v18
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v9, v9
@@ -14893,7 +14893,7 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v9, v17, v18, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v8, vcc, v8, v0
 ; GFX8-NEXT:    v_add_u32_e32 v8, vcc, s4, v8
-; GFX8-NEXT:    v_and_b32_e32 v17, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v17, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v8, v17, vcc
@@ -14923,7 +14923,7 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_mul_f32_e32 v16, v17, v16
 ; GFX9-NEXT:    v_bfe_u32 v17, v16, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v18, 0x80000000, v16
+; GFX9-NEXT:    v_and_b32_e32 v18, 0xff800000, v16
 ; GFX9-NEXT:    v_and_b32_e32 v15, 0xffff0000, v15
 ; GFX9-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
 ; GFX9-NEXT:    v_add3_u32 v17, v17, v16, s4
@@ -14932,7 +14932,7 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_mul_f32_e32 v7, v7, v15
 ; GFX9-NEXT:    v_cndmask_b32_e32 v16, v17, v18, vcc
 ; GFX9-NEXT:    v_bfe_u32 v15, v7, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v17, 0x80000000, v7
+; GFX9-NEXT:    v_and_b32_e32 v17, 0xff800000, v7
 ; GFX9-NEXT:    v_add3_u32 v15, v15, v7, s4
 ; GFX9-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v7, v7
@@ -14941,7 +14941,7 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v17, 16, v6
 ; GFX9-NEXT:    v_mul_f32_e32 v15, v17, v15
 ; GFX9-NEXT:    v_bfe_u32 v17, v15, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v18, 0x80000000, v15
+; GFX9-NEXT:    v_and_b32_e32 v18, 0xff800000, v15
 ; GFX9-NEXT:    v_and_b32_e32 v14, 0xffff0000, v14
 ; GFX9-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
 ; GFX9-NEXT:    v_add3_u32 v17, v17, v15, s4
@@ -14950,7 +14950,7 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_mul_f32_e32 v6, v6, v14
 ; GFX9-NEXT:    v_cndmask_b32_e32 v15, v17, v18, vcc
 ; GFX9-NEXT:    v_bfe_u32 v14, v6, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v17, 0x80000000, v6
+; GFX9-NEXT:    v_and_b32_e32 v17, 0xff800000, v6
 ; GFX9-NEXT:    v_add3_u32 v14, v14, v6, s4
 ; GFX9-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v6, v6
@@ -14959,7 +14959,7 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v17, 16, v5
 ; GFX9-NEXT:    v_mul_f32_e32 v14, v17, v14
 ; GFX9-NEXT:    v_bfe_u32 v17, v14, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v18, 0x80000000, v14
+; GFX9-NEXT:    v_and_b32_e32 v18, 0xff800000, v14
 ; GFX9-NEXT:    v_and_b32_e32 v13, 0xffff0000, v13
 ; GFX9-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
 ; GFX9-NEXT:    v_add3_u32 v17, v17, v14, s4
@@ -14968,7 +14968,7 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_mul_f32_e32 v5, v5, v13
 ; GFX9-NEXT:    v_cndmask_b32_e32 v14, v17, v18, vcc
 ; GFX9-NEXT:    v_bfe_u32 v13, v5, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v17, 0x80000000, v5
+; GFX9-NEXT:    v_and_b32_e32 v17, 0xff800000, v5
 ; GFX9-NEXT:    v_add3_u32 v13, v13, v5, s4
 ; GFX9-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v5, v5
@@ -14977,7 +14977,7 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v17, 16, v4
 ; GFX9-NEXT:    v_mul_f32_e32 v13, v17, v13
 ; GFX9-NEXT:    v_bfe_u32 v17, v13, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v18, 0x80000000, v13
+; GFX9-NEXT:    v_and_b32_e32 v18, 0xff800000, v13
 ; GFX9-NEXT:    v_and_b32_e32 v12, 0xffff0000, v12
 ; GFX9-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
 ; GFX9-NEXT:    v_add3_u32 v17, v17, v13, s4
@@ -14986,7 +14986,7 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_mul_f32_e32 v4, v4, v12
 ; GFX9-NEXT:    v_cndmask_b32_e32 v13, v17, v18, vcc
 ; GFX9-NEXT:    v_bfe_u32 v12, v4, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v17, 0x80000000, v4
+; GFX9-NEXT:    v_and_b32_e32 v17, 0xff800000, v4
 ; GFX9-NEXT:    v_add3_u32 v12, v12, v4, s4
 ; GFX9-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v4, v4
@@ -14995,7 +14995,7 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v17, 16, v3
 ; GFX9-NEXT:    v_mul_f32_e32 v12, v17, v12
 ; GFX9-NEXT:    v_bfe_u32 v17, v12, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v18, 0x80000000, v12
+; GFX9-NEXT:    v_and_b32_e32 v18, 0xff800000, v12
 ; GFX9-NEXT:    v_and_b32_e32 v11, 0xffff0000, v11
 ; GFX9-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX9-NEXT:    v_add3_u32 v17, v17, v12, s4
@@ -15004,7 +15004,7 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_mul_f32_e32 v3, v3, v11
 ; GFX9-NEXT:    v_cndmask_b32_e32 v12, v17, v18, vcc
 ; GFX9-NEXT:    v_bfe_u32 v11, v3, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v17, 0x80000000, v3
+; GFX9-NEXT:    v_and_b32_e32 v17, 0xff800000, v3
 ; GFX9-NEXT:    v_add3_u32 v11, v11, v3, s4
 ; GFX9-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
@@ -15013,7 +15013,7 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v17, 16, v2
 ; GFX9-NEXT:    v_mul_f32_e32 v11, v17, v11
 ; GFX9-NEXT:    v_bfe_u32 v17, v11, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v18, 0x80000000, v11
+; GFX9-NEXT:    v_and_b32_e32 v18, 0xff800000, v11
 ; GFX9-NEXT:    v_and_b32_e32 v10, 0xffff0000, v10
 ; GFX9-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX9-NEXT:    v_add3_u32 v17, v17, v11, s4
@@ -15022,7 +15022,7 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_mul_f32_e32 v2, v2, v10
 ; GFX9-NEXT:    v_cndmask_b32_e32 v11, v17, v18, vcc
 ; GFX9-NEXT:    v_bfe_u32 v10, v2, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v17, 0x80000000, v2
+; GFX9-NEXT:    v_and_b32_e32 v17, 0xff800000, v2
 ; GFX9-NEXT:    v_add3_u32 v10, v10, v2, s4
 ; GFX9-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
@@ -15031,7 +15031,7 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v17, 16, v1
 ; GFX9-NEXT:    v_mul_f32_e32 v10, v17, v10
 ; GFX9-NEXT:    v_bfe_u32 v17, v10, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v18, 0x80000000, v10
+; GFX9-NEXT:    v_and_b32_e32 v18, 0xff800000, v10
 ; GFX9-NEXT:    v_and_b32_e32 v9, 0xffff0000, v9
 ; GFX9-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX9-NEXT:    v_add3_u32 v17, v17, v10, s4
@@ -15040,7 +15040,7 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_mul_f32_e32 v1, v1, v9
 ; GFX9-NEXT:    v_cndmask_b32_e32 v10, v17, v18, vcc
 ; GFX9-NEXT:    v_bfe_u32 v9, v1, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v17, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v17, 0xff800000, v1
 ; GFX9-NEXT:    v_add3_u32 v9, v9, v1, s4
 ; GFX9-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -15049,7 +15049,7 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v17, 16, v0
 ; GFX9-NEXT:    v_mul_f32_e32 v9, v17, v9
 ; GFX9-NEXT:    v_bfe_u32 v17, v9, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v18, 0x80000000, v9
+; GFX9-NEXT:    v_and_b32_e32 v18, 0xff800000, v9
 ; GFX9-NEXT:    v_and_b32_e32 v8, 0xffff0000, v8
 ; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX9-NEXT:    v_add3_u32 v17, v17, v9, s4
@@ -15058,7 +15058,7 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_mul_f32_e32 v0, v0, v8
 ; GFX9-NEXT:    v_cndmask_b32_e32 v9, v17, v18, vcc
 ; GFX9-NEXT:    v_bfe_u32 v8, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v17, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v17, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v8, v8, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -15081,7 +15081,7 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v17, 16, v7
 ; GFX10-NEXT:    v_and_b32_e32 v15, 0xffff0000, v15
 ; GFX10-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v18, 16, v6
 ; GFX10-NEXT:    v_mul_f32_e32 v16, v17, v16
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v17, 16, v14
@@ -15224,7 +15224,7 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX11-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v17, 16, v7
 ; GFX11-NEXT:    v_and_b32_e32 v15, 0xffff0000, v15
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
 ; GFX11-NEXT:    v_dual_mul_f32 v16, v17, v16 :: v_dual_lshlrev_b32 v17, 16, v14
 ; GFX11-NEXT:    v_and_b32_e32 v14, 0xffff0000, v14
@@ -15916,7 +15916,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v30, 0xffff0000, v30
 ; GFX8-NEXT:    v_and_b32_e32 v14, 0xffff0000, v14
 ; GFX8-NEXT:    v_add_u32_e32 v32, vcc, s4, v32
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v31
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v31
 ; GFX8-NEXT:    v_mul_f32_e32 v14, v14, v30
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v31, v31
@@ -15924,7 +15924,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v31, v32, v33, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v30, vcc, v30, v14
 ; GFX8-NEXT:    v_add_u32_e32 v30, vcc, s4, v30
-; GFX8-NEXT:    v_and_b32_e32 v32, 0x80000000, v14
+; GFX8-NEXT:    v_and_b32_e32 v32, 0xff800000, v14
 ; GFX8-NEXT:    v_or_b32_e32 v32, 0x400000, v32
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v14, v14
 ; GFX8-NEXT:    v_cndmask_b32_e32 v14, v30, v32, vcc
@@ -15947,14 +15947,14 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_mul_f32_e32 v30, v15, v30
 ; GFX8-NEXT:    v_bfe_u32 v15, v33, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v15, vcc, v15, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v33
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v33
 ; GFX8-NEXT:    v_add_u32_e32 v15, vcc, s4, v15
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v33, v33
 ; GFX8-NEXT:    v_bfe_u32 v33, v30, 16, 1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v15, v15, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, v33, v30
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v30
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v30
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v30, v30
@@ -15962,13 +15962,13 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_bfe_u32 v33, v32, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, v33, v32
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v32
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v32
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v32, v32
 ; GFX8-NEXT:    v_cndmask_b32_e32 v32, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v29, vcc, v29, v13
 ; GFX8-NEXT:    v_add_u32_e32 v29, vcc, s4, v29
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v13
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v13
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v13, v13
 ; GFX8-NEXT:    v_cndmask_b32_e32 v13, v29, v33, vcc
@@ -15980,7 +15980,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v28, 0xffff0000, v28
 ; GFX8-NEXT:    v_and_b32_e32 v12, 0xffff0000, v12
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v29
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v29
 ; GFX8-NEXT:    v_mul_f32_e32 v12, v12, v28
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v29, v29
@@ -15988,7 +15988,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v29, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v28, vcc, v28, v12
 ; GFX8-NEXT:    v_add_u32_e32 v28, vcc, s4, v28
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v12
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v12
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v12, v12
 ; GFX8-NEXT:    v_cndmask_b32_e32 v12, v28, v33, vcc
@@ -16000,7 +16000,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v27, 0xffff0000, v27
 ; GFX8-NEXT:    v_and_b32_e32 v11, 0xffff0000, v11
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v28
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v28
 ; GFX8-NEXT:    v_mul_f32_e32 v11, v11, v27
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v28, v28
@@ -16008,7 +16008,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v28, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v27, vcc, v27, v11
 ; GFX8-NEXT:    v_add_u32_e32 v27, vcc, s4, v27
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v11
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v11
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v11, v11
 ; GFX8-NEXT:    v_cndmask_b32_e32 v11, v27, v33, vcc
@@ -16020,7 +16020,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v26, 0xffff0000, v26
 ; GFX8-NEXT:    v_and_b32_e32 v10, 0xffff0000, v10
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v27
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v27
 ; GFX8-NEXT:    v_mul_f32_e32 v10, v10, v26
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v27, v27
@@ -16028,7 +16028,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v27, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v26, vcc, v26, v10
 ; GFX8-NEXT:    v_add_u32_e32 v26, vcc, s4, v26
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v10
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v10
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v10, v10
 ; GFX8-NEXT:    v_cndmask_b32_e32 v10, v26, v33, vcc
@@ -16040,7 +16040,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v25, 0xffff0000, v25
 ; GFX8-NEXT:    v_and_b32_e32 v9, 0xffff0000, v9
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v26
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v26
 ; GFX8-NEXT:    v_mul_f32_e32 v9, v9, v25
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v26, v26
@@ -16048,7 +16048,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v26, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v25, vcc, v25, v9
 ; GFX8-NEXT:    v_add_u32_e32 v25, vcc, s4, v25
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v9
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v9
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v9, v9
 ; GFX8-NEXT:    v_cndmask_b32_e32 v9, v25, v33, vcc
@@ -16060,7 +16060,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v24, 0xffff0000, v24
 ; GFX8-NEXT:    v_and_b32_e32 v8, 0xffff0000, v8
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v25
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v25
 ; GFX8-NEXT:    v_mul_f32_e32 v8, v8, v24
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v25, v25
@@ -16068,7 +16068,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v25, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v24, vcc, v24, v8
 ; GFX8-NEXT:    v_add_u32_e32 v24, vcc, s4, v24
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v8
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v8
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v8, v8
 ; GFX8-NEXT:    v_cndmask_b32_e32 v8, v24, v33, vcc
@@ -16080,7 +16080,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v23, 0xffff0000, v23
 ; GFX8-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v24
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v24
 ; GFX8-NEXT:    v_mul_f32_e32 v7, v7, v23
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v24, v24
@@ -16088,7 +16088,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v24, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v23, vcc, v23, v7
 ; GFX8-NEXT:    v_add_u32_e32 v23, vcc, s4, v23
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v7
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v7
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v7, v7
 ; GFX8-NEXT:    v_cndmask_b32_e32 v7, v23, v33, vcc
@@ -16100,7 +16100,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v22, 0xffff0000, v22
 ; GFX8-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v23
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v23
 ; GFX8-NEXT:    v_mul_f32_e32 v6, v6, v22
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v23, v23
@@ -16108,7 +16108,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v23, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v22, vcc, v22, v6
 ; GFX8-NEXT:    v_add_u32_e32 v22, vcc, s4, v22
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v6
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v6
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v6, v6
 ; GFX8-NEXT:    v_cndmask_b32_e32 v6, v22, v33, vcc
@@ -16120,7 +16120,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v21, 0xffff0000, v21
 ; GFX8-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v22
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v22
 ; GFX8-NEXT:    v_mul_f32_e32 v5, v5, v21
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v22, v22
@@ -16128,7 +16128,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v22, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v21, vcc, v21, v5
 ; GFX8-NEXT:    v_add_u32_e32 v21, vcc, s4, v21
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v5
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v5
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v5, v5
 ; GFX8-NEXT:    v_cndmask_b32_e32 v5, v21, v33, vcc
@@ -16140,7 +16140,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v20, 0xffff0000, v20
 ; GFX8-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v21
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v21
 ; GFX8-NEXT:    v_mul_f32_e32 v4, v4, v20
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v21, v21
@@ -16148,7 +16148,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v21, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v20, vcc, v20, v4
 ; GFX8-NEXT:    v_add_u32_e32 v20, vcc, s4, v20
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v4
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v4
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v4, v4
 ; GFX8-NEXT:    v_cndmask_b32_e32 v4, v20, v33, vcc
@@ -16160,7 +16160,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v19, 0xffff0000, v19
 ; GFX8-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v20
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v20
 ; GFX8-NEXT:    v_mul_f32_e32 v3, v3, v19
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v20, v20
@@ -16168,7 +16168,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v20, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v19, vcc, v19, v3
 ; GFX8-NEXT:    v_add_u32_e32 v19, vcc, s4, v19
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v3
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v3
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v19, v33, vcc
@@ -16180,7 +16180,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v18, 0xffff0000, v18
 ; GFX8-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v19
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v19
 ; GFX8-NEXT:    v_mul_f32_e32 v2, v2, v18
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v19, v19
@@ -16188,7 +16188,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v19, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v18, vcc, v18, v2
 ; GFX8-NEXT:    v_add_u32_e32 v18, vcc, s4, v18
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v2
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v2
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
 ; GFX8-NEXT:    v_cndmask_b32_e32 v2, v18, v33, vcc
@@ -16200,7 +16200,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v17, 0xffff0000, v17
 ; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v18
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v18
 ; GFX8-NEXT:    v_mul_f32_e32 v1, v1, v17
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v18, v18
@@ -16208,7 +16208,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v18, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v17, vcc, v17, v1
 ; GFX8-NEXT:    v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v1
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v1
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v1, v17, v33, vcc
@@ -16220,7 +16220,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v16, 0xffff0000, v16
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v17
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v17
 ; GFX8-NEXT:    v_mul_f32_e32 v0, v0, v16
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v17, v17
@@ -16228,7 +16228,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v17, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v16, vcc, v16, v0
 ; GFX8-NEXT:    v_add_u32_e32 v16, vcc, s4, v16
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v16, v33, vcc
@@ -16272,7 +16272,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_mul_f32_e32 v31, v32, v31
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
 ; GFX9-NEXT:    v_bfe_u32 v32, v31, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v31
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v31
 ; GFX9-NEXT:    v_and_b32_e32 v30, 0xffff0000, v30
 ; GFX9-NEXT:    v_and_b32_e32 v14, 0xffff0000, v14
 ; GFX9-NEXT:    v_add3_u32 v32, v32, v31, s4
@@ -16281,7 +16281,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_mul_f32_e32 v14, v14, v30
 ; GFX9-NEXT:    v_cndmask_b32_e32 v31, v32, v33, vcc
 ; GFX9-NEXT:    v_bfe_u32 v30, v14, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v32, 0x80000000, v14
+; GFX9-NEXT:    v_and_b32_e32 v32, 0xff800000, v14
 ; GFX9-NEXT:    v_add3_u32 v30, v30, v14, s4
 ; GFX9-NEXT:    v_or_b32_e32 v32, 0x400000, v32
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v14, v14
@@ -16294,7 +16294,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    buffer_load_dword v29, off, s[0:3], s32
 ; GFX9-NEXT:    v_mul_f32_e32 v30, v32, v30
 ; GFX9-NEXT:    v_bfe_u32 v32, v30, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v30
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v30
 ; GFX9-NEXT:    v_add3_u32 v32, v32, v30, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v30, v30
@@ -16307,19 +16307,19 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_and_b32_e32 v29, 0xffff0000, v29
 ; GFX9-NEXT:    v_mul_f32_e32 v29, v15, v29
 ; GFX9-NEXT:    v_bfe_u32 v15, v32, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v32
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v32
 ; GFX9-NEXT:    v_add3_u32 v15, v15, v32, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v32, v32
 ; GFX9-NEXT:    v_cndmask_b32_e32 v15, v15, v33, vcc
 ; GFX9-NEXT:    v_bfe_u32 v32, v29, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v29
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v29
 ; GFX9-NEXT:    v_add3_u32 v32, v32, v29, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v29, v29
 ; GFX9-NEXT:    v_cndmask_b32_e32 v29, v32, v33, vcc
 ; GFX9-NEXT:    v_bfe_u32 v32, v13, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v13
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v13
 ; GFX9-NEXT:    v_add3_u32 v32, v32, v13, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v13, v13
@@ -16328,7 +16328,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v12
 ; GFX9-NEXT:    v_mul_f32_e32 v32, v33, v32
 ; GFX9-NEXT:    v_bfe_u32 v33, v32, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v32
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v32
 ; GFX9-NEXT:    v_and_b32_e32 v28, 0xffff0000, v28
 ; GFX9-NEXT:    v_and_b32_e32 v12, 0xffff0000, v12
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v32, s4
@@ -16337,7 +16337,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_mul_f32_e32 v12, v12, v28
 ; GFX9-NEXT:    v_cndmask_b32_e32 v32, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v28, v12, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v12
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v12
 ; GFX9-NEXT:    v_add3_u32 v28, v28, v12, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v12, v12
@@ -16346,7 +16346,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v11
 ; GFX9-NEXT:    v_mul_f32_e32 v28, v33, v28
 ; GFX9-NEXT:    v_bfe_u32 v33, v28, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v28
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v28
 ; GFX9-NEXT:    v_and_b32_e32 v27, 0xffff0000, v27
 ; GFX9-NEXT:    v_and_b32_e32 v11, 0xffff0000, v11
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v28, s4
@@ -16355,7 +16355,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_mul_f32_e32 v11, v11, v27
 ; GFX9-NEXT:    v_cndmask_b32_e32 v28, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v27, v11, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v11
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v11
 ; GFX9-NEXT:    v_add3_u32 v27, v27, v11, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v11, v11
@@ -16364,7 +16364,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v10
 ; GFX9-NEXT:    v_mul_f32_e32 v27, v33, v27
 ; GFX9-NEXT:    v_bfe_u32 v33, v27, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v27
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v27
 ; GFX9-NEXT:    v_and_b32_e32 v26, 0xffff0000, v26
 ; GFX9-NEXT:    v_and_b32_e32 v10, 0xffff0000, v10
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v27, s4
@@ -16373,7 +16373,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_mul_f32_e32 v10, v10, v26
 ; GFX9-NEXT:    v_cndmask_b32_e32 v27, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v26, v10, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v10
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v10
 ; GFX9-NEXT:    v_add3_u32 v26, v26, v10, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v10, v10
@@ -16382,7 +16382,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v9
 ; GFX9-NEXT:    v_mul_f32_e32 v26, v33, v26
 ; GFX9-NEXT:    v_bfe_u32 v33, v26, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v26
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v26
 ; GFX9-NEXT:    v_and_b32_e32 v25, 0xffff0000, v25
 ; GFX9-NEXT:    v_and_b32_e32 v9, 0xffff0000, v9
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v26, s4
@@ -16391,7 +16391,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_mul_f32_e32 v9, v9, v25
 ; GFX9-NEXT:    v_cndmask_b32_e32 v26, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v25, v9, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v9
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v9
 ; GFX9-NEXT:    v_add3_u32 v25, v25, v9, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v9, v9
@@ -16400,7 +16400,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v8
 ; GFX9-NEXT:    v_mul_f32_e32 v25, v33, v25
 ; GFX9-NEXT:    v_bfe_u32 v33, v25, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v25
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v25
 ; GFX9-NEXT:    v_and_b32_e32 v24, 0xffff0000, v24
 ; GFX9-NEXT:    v_and_b32_e32 v8, 0xffff0000, v8
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v25, s4
@@ -16409,7 +16409,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_mul_f32_e32 v8, v8, v24
 ; GFX9-NEXT:    v_cndmask_b32_e32 v25, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v24, v8, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v8
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v8
 ; GFX9-NEXT:    v_add3_u32 v24, v24, v8, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v8, v8
@@ -16418,7 +16418,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v7
 ; GFX9-NEXT:    v_mul_f32_e32 v24, v33, v24
 ; GFX9-NEXT:    v_bfe_u32 v33, v24, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v24
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v24
 ; GFX9-NEXT:    v_and_b32_e32 v23, 0xffff0000, v23
 ; GFX9-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v24, s4
@@ -16427,7 +16427,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_mul_f32_e32 v7, v7, v23
 ; GFX9-NEXT:    v_cndmask_b32_e32 v24, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v23, v7, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v7
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v7
 ; GFX9-NEXT:    v_add3_u32 v23, v23, v7, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v7, v7
@@ -16436,7 +16436,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v6
 ; GFX9-NEXT:    v_mul_f32_e32 v23, v33, v23
 ; GFX9-NEXT:    v_bfe_u32 v33, v23, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v23
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v23
 ; GFX9-NEXT:    v_and_b32_e32 v22, 0xffff0000, v22
 ; GFX9-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v23, s4
@@ -16445,7 +16445,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_mul_f32_e32 v6, v6, v22
 ; GFX9-NEXT:    v_cndmask_b32_e32 v23, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v22, v6, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v6
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v6
 ; GFX9-NEXT:    v_add3_u32 v22, v22, v6, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v6, v6
@@ -16454,7 +16454,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v5
 ; GFX9-NEXT:    v_mul_f32_e32 v22, v33, v22
 ; GFX9-NEXT:    v_bfe_u32 v33, v22, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v22
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v22
 ; GFX9-NEXT:    v_and_b32_e32 v21, 0xffff0000, v21
 ; GFX9-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v22, s4
@@ -16463,7 +16463,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_mul_f32_e32 v5, v5, v21
 ; GFX9-NEXT:    v_cndmask_b32_e32 v22, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v21, v5, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v5
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v5
 ; GFX9-NEXT:    v_add3_u32 v21, v21, v5, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v5, v5
@@ -16472,7 +16472,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v4
 ; GFX9-NEXT:    v_mul_f32_e32 v21, v33, v21
 ; GFX9-NEXT:    v_bfe_u32 v33, v21, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v21
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v21
 ; GFX9-NEXT:    v_and_b32_e32 v20, 0xffff0000, v20
 ; GFX9-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v21, s4
@@ -16481,7 +16481,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_mul_f32_e32 v4, v4, v20
 ; GFX9-NEXT:    v_cndmask_b32_e32 v21, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v20, v4, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v4
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v4
 ; GFX9-NEXT:    v_add3_u32 v20, v20, v4, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v4, v4
@@ -16490,7 +16490,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v3
 ; GFX9-NEXT:    v_mul_f32_e32 v20, v33, v20
 ; GFX9-NEXT:    v_bfe_u32 v33, v20, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v20
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v20
 ; GFX9-NEXT:    v_and_b32_e32 v19, 0xffff0000, v19
 ; GFX9-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v20, s4
@@ -16499,7 +16499,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_mul_f32_e32 v3, v3, v19
 ; GFX9-NEXT:    v_cndmask_b32_e32 v20, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v19, v3, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v3
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v3
 ; GFX9-NEXT:    v_add3_u32 v19, v19, v3, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
@@ -16508,7 +16508,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v2
 ; GFX9-NEXT:    v_mul_f32_e32 v19, v33, v19
 ; GFX9-NEXT:    v_bfe_u32 v33, v19, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v19
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v19
 ; GFX9-NEXT:    v_and_b32_e32 v18, 0xffff0000, v18
 ; GFX9-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v19, s4
@@ -16517,7 +16517,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_mul_f32_e32 v2, v2, v18
 ; GFX9-NEXT:    v_cndmask_b32_e32 v19, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v18, v2, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v2
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v2
 ; GFX9-NEXT:    v_add3_u32 v18, v18, v2, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
@@ -16526,7 +16526,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v1
 ; GFX9-NEXT:    v_mul_f32_e32 v18, v33, v18
 ; GFX9-NEXT:    v_bfe_u32 v33, v18, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v18
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v18
 ; GFX9-NEXT:    v_and_b32_e32 v17, 0xffff0000, v17
 ; GFX9-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v18, s4
@@ -16535,7 +16535,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_mul_f32_e32 v1, v1, v17
 ; GFX9-NEXT:    v_cndmask_b32_e32 v18, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v17, v1, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v1
 ; GFX9-NEXT:    v_add3_u32 v17, v17, v1, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -16544,7 +16544,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v0
 ; GFX9-NEXT:    v_mul_f32_e32 v17, v33, v17
 ; GFX9-NEXT:    v_bfe_u32 v33, v17, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v17
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v17
 ; GFX9-NEXT:    v_and_b32_e32 v16, 0xffff0000, v16
 ; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v17, s4
@@ -16553,7 +16553,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_mul_f32_e32 v0, v0, v16
 ; GFX9-NEXT:    v_cndmask_b32_e32 v17, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v16, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v16, v16, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -16645,7 +16645,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX10-NEXT:    v_bfe_u32 v55, v11, 16, 1
 ; GFX10-NEXT:    v_bfe_u32 v65, v49, 16, 1
 ; GFX10-NEXT:    v_bfe_u32 v67, v10, 16, 1
-; GFX10-NEXT:    s_brev_b32 s23, 1
+; GFX10-NEXT:    s_mov_b32 s23, 0xff800000
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v33, 16, v30
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v34, 16, v14
 ; GFX10-NEXT:    v_and_b32_e32 v30, 0xffff0000, v30
@@ -16867,7 +16867,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX11-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v53, 16, v24
 ; GFX11-NEXT:    v_and_b32_e32 v24, 0xffff0000, v24
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX11-NEXT:    v_dual_mul_f32 v1, v1, v17 :: v_dual_lshlrev_b32 v64, 16, v7
 ; GFX11-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
@@ -17194,7 +17194,7 @@ define bfloat @v_fdiv_bf16(bfloat %a, bfloat %b) {
 ; GFX8-NEXT:    v_div_fixup_f32 v0, v2, v1, v0
 ; GFX8-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
-; GFX8-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
 ; GFX8-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -17220,7 +17220,7 @@ define bfloat @v_fdiv_bf16(bfloat %a, bfloat %b) {
 ; GFX9-NEXT:    v_div_fmas_f32 v2, v2, v4, v5
 ; GFX9-NEXT:    v_div_fixup_f32 v0, v2, v1, v0
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -17235,7 +17235,7 @@ define bfloat @v_fdiv_bf16(bfloat %a, bfloat %b) {
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GFX10-NEXT:    v_div_scale_f32 v2, s4, v1, v1, v0
 ; GFX10-NEXT:    v_div_scale_f32 v5, vcc_lo, v0, v1, v0
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_rcp_f32_e32 v3, v2
 ; GFX10-NEXT:    v_fma_f32 v4, -v2, v3, 1.0
 ; GFX10-NEXT:    v_fmac_f32_e32 v3, v4, v3
@@ -17258,7 +17258,7 @@ define bfloat @v_fdiv_bf16(bfloat %a, bfloat %b) {
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_div_scale_f32 v2, null, v1, v1, v0
 ; GFX11-NEXT:    v_rcp_f32_e32 v3, v2
@@ -17637,7 +17637,7 @@ define bfloat @v_minnum_bf16(bfloat %a, bfloat %b) {
 ; GFX8-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
@@ -17652,7 +17652,7 @@ define bfloat @v_minnum_bf16(bfloat %a, bfloat %b) {
 ; GFX9-NEXT:    v_min_f32_e32 v0, v0, v1
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -17665,7 +17665,7 @@ define bfloat @v_minnum_bf16(bfloat %a, bfloat %b) {
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_min_f32_e32 v0, v0, v1
 ; GFX10-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX10-NEXT:    v_and_or_b32 v2, v0, s4, 0x400000
@@ -17680,7 +17680,7 @@ define bfloat @v_minnum_bf16(bfloat %a, bfloat %b) {
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_min_f32_e32 v0, v0, v1
 ; GFX11-NEXT:    v_bfe_u32 v1, v0, 16, 1
@@ -17750,7 +17750,7 @@ define <2 x bfloat> @v_minnum_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT:    v_and_b32_e32 v4, 0x80000000, v2
+; GFX8-NEXT:    v_and_b32_e32 v4, 0xff800000, v2
 ; GFX8-NEXT:    v_min_f32_e32 v0, v0, v1
 ; GFX8-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
@@ -17758,7 +17758,7 @@ define <2 x bfloat> @v_minnum_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v2, v3, v4, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT:    v_and_b32_e32 v3, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v3, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v3, vcc
@@ -17774,7 +17774,7 @@ define <2 x bfloat> @v_minnum_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; GFX9-NEXT:    v_min_f32_e32 v2, v3, v2
 ; GFX9-NEXT:    v_bfe_u32 v3, v2, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v4, 0x80000000, v2
+; GFX9-NEXT:    v_and_b32_e32 v4, 0xff800000, v2
 ; GFX9-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX9-NEXT:    v_add3_u32 v3, v3, v2, s4
@@ -17783,7 +17783,7 @@ define <2 x bfloat> @v_minnum_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; GFX9-NEXT:    v_min_f32_e32 v0, v0, v1
 ; GFX9-NEXT:    v_cndmask_b32_e32 v2, v3, v4, vcc
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v3, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v3, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -17799,7 +17799,7 @@ define <2 x bfloat> @v_minnum_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v3, 16, v0
 ; GFX10-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX10-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_min_f32_e32 v2, v3, v2
 ; GFX10-NEXT:    v_min_f32_e32 v0, v0, v1
 ; GFX10-NEXT:    v_bfe_u32 v1, v2, 16, 1
@@ -17822,7 +17822,7 @@ define <2 x bfloat> @v_minnum_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; GFX11-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v3, 16, v0
 ; GFX11-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-NEXT:    v_min_f32_e32 v0, v0, v1
 ; GFX11-NEXT:    v_min_f32_e32 v2, v3, v2
@@ -17913,7 +17913,7 @@ define <3 x bfloat> @v_minnum_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
 ; GFX8-NEXT:    v_bfe_u32 v3, v1, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v3, v1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT:    v_and_b32_e32 v4, 0x80000000, v1
+; GFX8-NEXT:    v_and_b32_e32 v4, 0xff800000, v1
 ; GFX8-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v1, v3, v4, vcc
@@ -17926,7 +17926,7 @@ define <3 x bfloat> @v_minnum_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, s4, v4
-; GFX8-NEXT:    v_and_b32_e32 v5, 0x80000000, v3
+; GFX8-NEXT:    v_and_b32_e32 v5, 0xff800000, v3
 ; GFX8-NEXT:    v_min_f32_e32 v0, v0, v2
 ; GFX8-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
@@ -17934,7 +17934,7 @@ define <3 x bfloat> @v_minnum_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v4, v5, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v2, v0
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT:    v_and_b32_e32 v4, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v4, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v2, v4, vcc
@@ -17951,7 +17951,7 @@ define <3 x bfloat> @v_minnum_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
 ; GFX9-NEXT:    v_min_f32_e32 v1, v1, v3
 ; GFX9-NEXT:    v_bfe_u32 v3, v1, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v4, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v4, 0xff800000, v1
 ; GFX9-NEXT:    v_add3_u32 v3, v3, v1, s4
 ; GFX9-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -17960,7 +17960,7 @@ define <3 x bfloat> @v_minnum_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v4, 16, v0
 ; GFX9-NEXT:    v_min_f32_e32 v3, v4, v3
 ; GFX9-NEXT:    v_bfe_u32 v4, v3, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v5, 0x80000000, v3
+; GFX9-NEXT:    v_and_b32_e32 v5, 0xff800000, v3
 ; GFX9-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX9-NEXT:    v_add3_u32 v4, v4, v3, s4
@@ -17969,7 +17969,7 @@ define <3 x bfloat> @v_minnum_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
 ; GFX9-NEXT:    v_min_f32_e32 v0, v0, v2
 ; GFX9-NEXT:    v_cndmask_b32_e32 v3, v4, v5, vcc
 ; GFX9-NEXT:    v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v4, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v4, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v2, v2, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -17989,7 +17989,7 @@ define <3 x bfloat> @v_minnum_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GFX10-NEXT:    v_min_f32_e32 v4, v5, v4
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_min_f32_e32 v0, v0, v2
 ; GFX10-NEXT:    v_min_f32_e32 v1, v1, v3
 ; GFX10-NEXT:    v_bfe_u32 v2, v4, 16, 1
@@ -18100,7 +18100,7 @@ define <4 x bfloat> @v_minnum_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, 0x7fff, v5
-; GFX8-NEXT:    v_and_b32_e32 v6, 0x80000000, v4
+; GFX8-NEXT:    v_and_b32_e32 v6, 0xff800000, v4
 ; GFX8-NEXT:    v_min_f32_e32 v1, v1, v3
 ; GFX8-NEXT:    v_or_b32_e32 v6, 0x400000, v6
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v4, v4
@@ -18109,7 +18109,7 @@ define <4 x bfloat> @v_minnum_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v4, v5, v6, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v3, v1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, s4, v3
-; GFX8-NEXT:    v_and_b32_e32 v5, 0x80000000, v1
+; GFX8-NEXT:    v_and_b32_e32 v5, 0xff800000, v1
 ; GFX8-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v1, v3, v5, vcc
@@ -18121,7 +18121,7 @@ define <4 x bfloat> @v_minnum_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT:    v_and_b32_e32 v6, 0x80000000, v3
+; GFX8-NEXT:    v_and_b32_e32 v6, 0xff800000, v3
 ; GFX8-NEXT:    v_min_f32_e32 v0, v0, v2
 ; GFX8-NEXT:    v_or_b32_e32 v6, 0x400000, v6
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
@@ -18129,7 +18129,7 @@ define <4 x bfloat> @v_minnum_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v5, v6, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v2, v0
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT:    v_and_b32_e32 v5, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v5, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v2, v5, vcc
@@ -18147,7 +18147,7 @@ define <4 x bfloat> @v_minnum_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX9-NEXT:    v_min_f32_e32 v4, v5, v4
 ; GFX9-NEXT:    v_bfe_u32 v5, v4, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v6, 0x80000000, v4
+; GFX9-NEXT:    v_and_b32_e32 v6, 0xff800000, v4
 ; GFX9-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX9-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX9-NEXT:    v_add3_u32 v5, v5, v4, s4
@@ -18156,7 +18156,7 @@ define <4 x bfloat> @v_minnum_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX9-NEXT:    v_min_f32_e32 v1, v1, v3
 ; GFX9-NEXT:    v_cndmask_b32_e32 v4, v5, v6, vcc
 ; GFX9-NEXT:    v_bfe_u32 v3, v1, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v5, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v5, 0xff800000, v1
 ; GFX9-NEXT:    v_add3_u32 v3, v3, v1, s4
 ; GFX9-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -18165,7 +18165,7 @@ define <4 x bfloat> @v_minnum_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v5, 16, v0
 ; GFX9-NEXT:    v_min_f32_e32 v3, v5, v3
 ; GFX9-NEXT:    v_bfe_u32 v5, v3, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v6, 0x80000000, v3
+; GFX9-NEXT:    v_and_b32_e32 v6, 0xff800000, v3
 ; GFX9-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX9-NEXT:    v_add3_u32 v5, v5, v3, s4
@@ -18174,7 +18174,7 @@ define <4 x bfloat> @v_minnum_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX9-NEXT:    v_min_f32_e32 v0, v0, v2
 ; GFX9-NEXT:    v_cndmask_b32_e32 v3, v5, v6, vcc
 ; GFX9-NEXT:    v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v5, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v5, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v2, v2, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -18199,7 +18199,7 @@ define <4 x bfloat> @v_minnum_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX10-NEXT:    v_min_f32_e32 v1, v1, v3
 ; GFX10-NEXT:    v_min_f32_e32 v5, v7, v6
 ; GFX10-NEXT:    v_bfe_u32 v3, v4, 16, 1
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_min_f32_e32 v0, v0, v2
 ; GFX10-NEXT:    v_and_or_b32 v6, v4, s4, 0x400000
 ; GFX10-NEXT:    v_bfe_u32 v7, v5, 16, 1
@@ -18234,7 +18234,7 @@ define <4 x bfloat> @v_minnum_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX11-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v5, 16, v1
 ; GFX11-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-NEXT:    v_dual_min_f32 v0, v0, v2 :: v_dual_and_b32 v3, 0xffff0000, v3
 ; GFX11-NEXT:    v_min_f32_e32 v4, v5, v4
@@ -18423,7 +18423,7 @@ define <8 x bfloat> @v_minnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
 ; GFX8-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX8-NEXT:    v_add_u32_e32 v9, vcc, 0x7fff, v9
-; GFX8-NEXT:    v_and_b32_e32 v10, 0x80000000, v8
+; GFX8-NEXT:    v_and_b32_e32 v10, 0xff800000, v8
 ; GFX8-NEXT:    v_min_f32_e32 v3, v3, v7
 ; GFX8-NEXT:    v_or_b32_e32 v10, 0x400000, v10
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v8, v8
@@ -18432,7 +18432,7 @@ define <8 x bfloat> @v_minnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v8, v9, v10, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v7, vcc, v7, v3
 ; GFX8-NEXT:    v_add_u32_e32 v7, vcc, s4, v7
-; GFX8-NEXT:    v_and_b32_e32 v9, 0x80000000, v3
+; GFX8-NEXT:    v_and_b32_e32 v9, 0xff800000, v3
 ; GFX8-NEXT:    v_or_b32_e32 v9, 0x400000, v9
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v7, v9, vcc
@@ -18444,7 +18444,7 @@ define <8 x bfloat> @v_minnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
 ; GFX8-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX8-NEXT:    v_add_u32_e32 v9, vcc, s4, v9
-; GFX8-NEXT:    v_and_b32_e32 v10, 0x80000000, v7
+; GFX8-NEXT:    v_and_b32_e32 v10, 0xff800000, v7
 ; GFX8-NEXT:    v_min_f32_e32 v2, v2, v6
 ; GFX8-NEXT:    v_or_b32_e32 v10, 0x400000, v10
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v7, v7
@@ -18452,7 +18452,7 @@ define <8 x bfloat> @v_minnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v7, v9, v10, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v6, vcc, v6, v2
 ; GFX8-NEXT:    v_add_u32_e32 v6, vcc, s4, v6
-; GFX8-NEXT:    v_and_b32_e32 v9, 0x80000000, v2
+; GFX8-NEXT:    v_and_b32_e32 v9, 0xff800000, v2
 ; GFX8-NEXT:    v_or_b32_e32 v9, 0x400000, v9
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
 ; GFX8-NEXT:    v_cndmask_b32_e32 v2, v6, v9, vcc
@@ -18464,7 +18464,7 @@ define <8 x bfloat> @v_minnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
 ; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX8-NEXT:    v_add_u32_e32 v9, vcc, s4, v9
-; GFX8-NEXT:    v_and_b32_e32 v10, 0x80000000, v6
+; GFX8-NEXT:    v_and_b32_e32 v10, 0xff800000, v6
 ; GFX8-NEXT:    v_min_f32_e32 v1, v1, v5
 ; GFX8-NEXT:    v_or_b32_e32 v10, 0x400000, v10
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v6, v6
@@ -18472,7 +18472,7 @@ define <8 x bfloat> @v_minnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v6, v9, v10, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, v5, v1
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT:    v_and_b32_e32 v9, 0x80000000, v1
+; GFX8-NEXT:    v_and_b32_e32 v9, 0xff800000, v1
 ; GFX8-NEXT:    v_or_b32_e32 v9, 0x400000, v9
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v1, v5, v9, vcc
@@ -18484,7 +18484,7 @@ define <8 x bfloat> @v_minnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v9, vcc, s4, v9
-; GFX8-NEXT:    v_and_b32_e32 v10, 0x80000000, v5
+; GFX8-NEXT:    v_and_b32_e32 v10, 0xff800000, v5
 ; GFX8-NEXT:    v_min_f32_e32 v0, v0, v4
 ; GFX8-NEXT:    v_or_b32_e32 v10, 0x400000, v10
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v5, v5
@@ -18492,7 +18492,7 @@ define <8 x bfloat> @v_minnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v5, v9, v10, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, v4, v0
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, 0x7fff, v4
-; GFX8-NEXT:    v_and_b32_e32 v9, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v9, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v9, 0x400000, v9
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v4, v9, vcc
@@ -18514,7 +18514,7 @@ define <8 x bfloat> @v_minnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX9-NEXT:    v_min_f32_e32 v8, v9, v8
 ; GFX9-NEXT:    v_bfe_u32 v9, v8, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v10, 0x80000000, v8
+; GFX9-NEXT:    v_and_b32_e32 v10, 0xff800000, v8
 ; GFX9-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
 ; GFX9-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX9-NEXT:    v_add3_u32 v9, v9, v8, s4
@@ -18523,7 +18523,7 @@ define <8 x bfloat> @v_minnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX9-NEXT:    v_min_f32_e32 v3, v3, v7
 ; GFX9-NEXT:    v_cndmask_b32_e32 v8, v9, v10, vcc
 ; GFX9-NEXT:    v_bfe_u32 v7, v3, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v9, 0x80000000, v3
+; GFX9-NEXT:    v_and_b32_e32 v9, 0xff800000, v3
 ; GFX9-NEXT:    v_add3_u32 v7, v7, v3, s4
 ; GFX9-NEXT:    v_or_b32_e32 v9, 0x400000, v9
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
@@ -18532,7 +18532,7 @@ define <8 x bfloat> @v_minnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v9, 16, v2
 ; GFX9-NEXT:    v_min_f32_e32 v7, v9, v7
 ; GFX9-NEXT:    v_bfe_u32 v9, v7, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v10, 0x80000000, v7
+; GFX9-NEXT:    v_and_b32_e32 v10, 0xff800000, v7
 ; GFX9-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
 ; GFX9-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX9-NEXT:    v_add3_u32 v9, v9, v7, s4
@@ -18541,7 +18541,7 @@ define <8 x bfloat> @v_minnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX9-NEXT:    v_min_f32_e32 v2, v2, v6
 ; GFX9-NEXT:    v_cndmask_b32_e32 v7, v9, v10, vcc
 ; GFX9-NEXT:    v_bfe_u32 v6, v2, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v9, 0x80000000, v2
+; GFX9-NEXT:    v_and_b32_e32 v9, 0xff800000, v2
 ; GFX9-NEXT:    v_add3_u32 v6, v6, v2, s4
 ; GFX9-NEXT:    v_or_b32_e32 v9, 0x400000, v9
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
@@ -18550,7 +18550,7 @@ define <8 x bfloat> @v_minnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v9, 16, v1
 ; GFX9-NEXT:    v_min_f32_e32 v6, v9, v6
 ; GFX9-NEXT:    v_bfe_u32 v9, v6, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v10, 0x80000000, v6
+; GFX9-NEXT:    v_and_b32_e32 v10, 0xff800000, v6
 ; GFX9-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
 ; GFX9-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX9-NEXT:    v_add3_u32 v9, v9, v6, s4
@@ -18559,7 +18559,7 @@ define <8 x bfloat> @v_minnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX9-NEXT:    v_min_f32_e32 v1, v1, v5
 ; GFX9-NEXT:    v_cndmask_b32_e32 v6, v9, v10, vcc
 ; GFX9-NEXT:    v_bfe_u32 v5, v1, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v9, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v9, 0xff800000, v1
 ; GFX9-NEXT:    v_add3_u32 v5, v5, v1, s4
 ; GFX9-NEXT:    v_or_b32_e32 v9, 0x400000, v9
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -18568,7 +18568,7 @@ define <8 x bfloat> @v_minnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v9, 16, v0
 ; GFX9-NEXT:    v_min_f32_e32 v5, v9, v5
 ; GFX9-NEXT:    v_bfe_u32 v9, v5, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v10, 0x80000000, v5
+; GFX9-NEXT:    v_and_b32_e32 v10, 0xff800000, v5
 ; GFX9-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
 ; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX9-NEXT:    v_add3_u32 v9, v9, v5, s4
@@ -18577,7 +18577,7 @@ define <8 x bfloat> @v_minnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX9-NEXT:    v_min_f32_e32 v0, v0, v4
 ; GFX9-NEXT:    v_cndmask_b32_e32 v5, v9, v10, vcc
 ; GFX9-NEXT:    v_bfe_u32 v4, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v9, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v9, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v4, v4, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v9, 0x400000, v9
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -18602,7 +18602,7 @@ define <8 x bfloat> @v_minnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v9, 16, v6
 ; GFX10-NEXT:    v_min_f32_e32 v3, v3, v7
 ; GFX10-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_bfe_u32 v10, v8, 16, 1
 ; GFX10-NEXT:    v_and_or_b32 v7, v8, s4, 0x400000
 ; GFX10-NEXT:    v_min_f32_e32 v9, v11, v9
@@ -18673,7 +18673,7 @@ define <8 x bfloat> @v_minnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX11-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v9, 16, v3
 ; GFX11-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_dual_min_f32 v8, v9, v8 :: v_dual_lshlrev_b32 v9, 16, v6
 ; GFX11-NEXT:    v_bfe_u32 v10, v8, 16, 1
@@ -19033,7 +19033,7 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v15, 0xffff0000, v15
 ; GFX8-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
 ; GFX8-NEXT:    v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT:    v_and_b32_e32 v18, 0x80000000, v16
+; GFX8-NEXT:    v_and_b32_e32 v18, 0xff800000, v16
 ; GFX8-NEXT:    v_min_f32_e32 v7, v7, v15
 ; GFX8-NEXT:    v_or_b32_e32 v18, 0x400000, v18
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v16, v16
@@ -19041,7 +19041,7 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v16, v17, v18, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v15, vcc, v15, v7
 ; GFX8-NEXT:    v_add_u32_e32 v15, vcc, s4, v15
-; GFX8-NEXT:    v_and_b32_e32 v17, 0x80000000, v7
+; GFX8-NEXT:    v_and_b32_e32 v17, 0xff800000, v7
 ; GFX8-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v7, v7
 ; GFX8-NEXT:    v_cndmask_b32_e32 v7, v15, v17, vcc
@@ -19053,7 +19053,7 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v14, 0xffff0000, v14
 ; GFX8-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
 ; GFX8-NEXT:    v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT:    v_and_b32_e32 v18, 0x80000000, v15
+; GFX8-NEXT:    v_and_b32_e32 v18, 0xff800000, v15
 ; GFX8-NEXT:    v_min_f32_e32 v6, v6, v14
 ; GFX8-NEXT:    v_or_b32_e32 v18, 0x400000, v18
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v15, v15
@@ -19061,7 +19061,7 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v15, v17, v18, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v14, vcc, v14, v6
 ; GFX8-NEXT:    v_add_u32_e32 v14, vcc, s4, v14
-; GFX8-NEXT:    v_and_b32_e32 v17, 0x80000000, v6
+; GFX8-NEXT:    v_and_b32_e32 v17, 0xff800000, v6
 ; GFX8-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v6, v6
 ; GFX8-NEXT:    v_cndmask_b32_e32 v6, v14, v17, vcc
@@ -19073,7 +19073,7 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v13, 0xffff0000, v13
 ; GFX8-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
 ; GFX8-NEXT:    v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT:    v_and_b32_e32 v18, 0x80000000, v14
+; GFX8-NEXT:    v_and_b32_e32 v18, 0xff800000, v14
 ; GFX8-NEXT:    v_min_f32_e32 v5, v5, v13
 ; GFX8-NEXT:    v_or_b32_e32 v18, 0x400000, v18
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v14, v14
@@ -19081,7 +19081,7 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v14, v17, v18, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v13, vcc, v13, v5
 ; GFX8-NEXT:    v_add_u32_e32 v13, vcc, s4, v13
-; GFX8-NEXT:    v_and_b32_e32 v17, 0x80000000, v5
+; GFX8-NEXT:    v_and_b32_e32 v17, 0xff800000, v5
 ; GFX8-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v5, v5
 ; GFX8-NEXT:    v_cndmask_b32_e32 v5, v13, v17, vcc
@@ -19093,7 +19093,7 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v12, 0xffff0000, v12
 ; GFX8-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
 ; GFX8-NEXT:    v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT:    v_and_b32_e32 v18, 0x80000000, v13
+; GFX8-NEXT:    v_and_b32_e32 v18, 0xff800000, v13
 ; GFX8-NEXT:    v_min_f32_e32 v4, v4, v12
 ; GFX8-NEXT:    v_or_b32_e32 v18, 0x400000, v18
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v13, v13
@@ -19101,7 +19101,7 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v13, v17, v18, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v12, vcc, v12, v4
 ; GFX8-NEXT:    v_add_u32_e32 v12, vcc, s4, v12
-; GFX8-NEXT:    v_and_b32_e32 v17, 0x80000000, v4
+; GFX8-NEXT:    v_and_b32_e32 v17, 0xff800000, v4
 ; GFX8-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v4, v4
 ; GFX8-NEXT:    v_cndmask_b32_e32 v4, v12, v17, vcc
@@ -19113,7 +19113,7 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v11, 0xffff0000, v11
 ; GFX8-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX8-NEXT:    v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT:    v_and_b32_e32 v18, 0x80000000, v12
+; GFX8-NEXT:    v_and_b32_e32 v18, 0xff800000, v12
 ; GFX8-NEXT:    v_min_f32_e32 v3, v3, v11
 ; GFX8-NEXT:    v_or_b32_e32 v18, 0x400000, v18
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v12, v12
@@ -19121,7 +19121,7 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v12, v17, v18, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v11, vcc, v11, v3
 ; GFX8-NEXT:    v_add_u32_e32 v11, vcc, s4, v11
-; GFX8-NEXT:    v_and_b32_e32 v17, 0x80000000, v3
+; GFX8-NEXT:    v_and_b32_e32 v17, 0xff800000, v3
 ; GFX8-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v11, v17, vcc
@@ -19133,7 +19133,7 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v10, 0xffff0000, v10
 ; GFX8-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX8-NEXT:    v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT:    v_and_b32_e32 v18, 0x80000000, v11
+; GFX8-NEXT:    v_and_b32_e32 v18, 0xff800000, v11
 ; GFX8-NEXT:    v_min_f32_e32 v2, v2, v10
 ; GFX8-NEXT:    v_or_b32_e32 v18, 0x400000, v18
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v11, v11
@@ -19141,7 +19141,7 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v11, v17, v18, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v10, vcc, v10, v2
 ; GFX8-NEXT:    v_add_u32_e32 v10, vcc, s4, v10
-; GFX8-NEXT:    v_and_b32_e32 v17, 0x80000000, v2
+; GFX8-NEXT:    v_and_b32_e32 v17, 0xff800000, v2
 ; GFX8-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
 ; GFX8-NEXT:    v_cndmask_b32_e32 v2, v10, v17, vcc
@@ -19153,7 +19153,7 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v9, 0xffff0000, v9
 ; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX8-NEXT:    v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT:    v_and_b32_e32 v18, 0x80000000, v10
+; GFX8-NEXT:    v_and_b32_e32 v18, 0xff800000, v10
 ; GFX8-NEXT:    v_min_f32_e32 v1, v1, v9
 ; GFX8-NEXT:    v_or_b32_e32 v18, 0x400000, v18
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v10, v10
@@ -19161,7 +19161,7 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v10, v17, v18, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v9, vcc, v9, v1
 ; GFX8-NEXT:    v_add_u32_e32 v9, vcc, s4, v9
-; GFX8-NEXT:    v_and_b32_e32 v17, 0x80000000, v1
+; GFX8-NEXT:    v_and_b32_e32 v17, 0xff800000, v1
 ; GFX8-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v1, v9, v17, vcc
@@ -19173,7 +19173,7 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v8, 0xffff0000, v8
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT:    v_and_b32_e32 v18, 0x80000000, v9
+; GFX8-NEXT:    v_and_b32_e32 v18, 0xff800000, v9
 ; GFX8-NEXT:    v_min_f32_e32 v0, v0, v8
 ; GFX8-NEXT:    v_or_b32_e32 v18, 0x400000, v18
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v9, v9
@@ -19181,7 +19181,7 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v9, v17, v18, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v8, vcc, v8, v0
 ; GFX8-NEXT:    v_add_u32_e32 v8, vcc, s4, v8
-; GFX8-NEXT:    v_and_b32_e32 v17, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v17, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v8, v17, vcc
@@ -19211,7 +19211,7 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_min_f32_e32 v16, v17, v16
 ; GFX9-NEXT:    v_bfe_u32 v17, v16, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v18, 0x80000000, v16
+; GFX9-NEXT:    v_and_b32_e32 v18, 0xff800000, v16
 ; GFX9-NEXT:    v_and_b32_e32 v15, 0xffff0000, v15
 ; GFX9-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
 ; GFX9-NEXT:    v_add3_u32 v17, v17, v16, s4
@@ -19220,7 +19220,7 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_min_f32_e32 v7, v7, v15
 ; GFX9-NEXT:    v_cndmask_b32_e32 v16, v17, v18, vcc
 ; GFX9-NEXT:    v_bfe_u32 v15, v7, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v17, 0x80000000, v7
+; GFX9-NEXT:    v_and_b32_e32 v17, 0xff800000, v7
 ; GFX9-NEXT:    v_add3_u32 v15, v15, v7, s4
 ; GFX9-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v7, v7
@@ -19229,7 +19229,7 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v17, 16, v6
 ; GFX9-NEXT:    v_min_f32_e32 v15, v17, v15
 ; GFX9-NEXT:    v_bfe_u32 v17, v15, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v18, 0x80000000, v15
+; GFX9-NEXT:    v_and_b32_e32 v18, 0xff800000, v15
 ; GFX9-NEXT:    v_and_b32_e32 v14, 0xffff0000, v14
 ; GFX9-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
 ; GFX9-NEXT:    v_add3_u32 v17, v17, v15, s4
@@ -19238,7 +19238,7 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_min_f32_e32 v6, v6, v14
 ; GFX9-NEXT:    v_cndmask_b32_e32 v15, v17, v18, vcc
 ; GFX9-NEXT:    v_bfe_u32 v14, v6, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v17, 0x80000000, v6
+; GFX9-NEXT:    v_and_b32_e32 v17, 0xff800000, v6
 ; GFX9-NEXT:    v_add3_u32 v14, v14, v6, s4
 ; GFX9-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v6, v6
@@ -19247,7 +19247,7 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v17, 16, v5
 ; GFX9-NEXT:    v_min_f32_e32 v14, v17, v14
 ; GFX9-NEXT:    v_bfe_u32 v17, v14, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v18, 0x80000000, v14
+; GFX9-NEXT:    v_and_b32_e32 v18, 0xff800000, v14
 ; GFX9-NEXT:    v_and_b32_e32 v13, 0xffff0000, v13
 ; GFX9-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
 ; GFX9-NEXT:    v_add3_u32 v17, v17, v14, s4
@@ -19256,7 +19256,7 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_min_f32_e32 v5, v5, v13
 ; GFX9-NEXT:    v_cndmask_b32_e32 v14, v17, v18, vcc
 ; GFX9-NEXT:    v_bfe_u32 v13, v5, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v17, 0x80000000, v5
+; GFX9-NEXT:    v_and_b32_e32 v17, 0xff800000, v5
 ; GFX9-NEXT:    v_add3_u32 v13, v13, v5, s4
 ; GFX9-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v5, v5
@@ -19265,7 +19265,7 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v17, 16, v4
 ; GFX9-NEXT:    v_min_f32_e32 v13, v17, v13
 ; GFX9-NEXT:    v_bfe_u32 v17, v13, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v18, 0x80000000, v13
+; GFX9-NEXT:    v_and_b32_e32 v18, 0xff800000, v13
 ; GFX9-NEXT:    v_and_b32_e32 v12, 0xffff0000, v12
 ; GFX9-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
 ; GFX9-NEXT:    v_add3_u32 v17, v17, v13, s4
@@ -19274,7 +19274,7 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_min_f32_e32 v4, v4, v12
 ; GFX9-NEXT:    v_cndmask_b32_e32 v13, v17, v18, vcc
 ; GFX9-NEXT:    v_bfe_u32 v12, v4, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v17, 0x80000000, v4
+; GFX9-NEXT:    v_and_b32_e32 v17, 0xff800000, v4
 ; GFX9-NEXT:    v_add3_u32 v12, v12, v4, s4
 ; GFX9-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v4, v4
@@ -19283,7 +19283,7 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v17, 16, v3
 ; GFX9-NEXT:    v_min_f32_e32 v12, v17, v12
 ; GFX9-NEXT:    v_bfe_u32 v17, v12, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v18, 0x80000000, v12
+; GFX9-NEXT:    v_and_b32_e32 v18, 0xff800000, v12
 ; GFX9-NEXT:    v_and_b32_e32 v11, 0xffff0000, v11
 ; GFX9-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX9-NEXT:    v_add3_u32 v17, v17, v12, s4
@@ -19292,7 +19292,7 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_min_f32_e32 v3, v3, v11
 ; GFX9-NEXT:    v_cndmask_b32_e32 v12, v17, v18, vcc
 ; GFX9-NEXT:    v_bfe_u32 v11, v3, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v17, 0x80000000, v3
+; GFX9-NEXT:    v_and_b32_e32 v17, 0xff800000, v3
 ; GFX9-NEXT:    v_add3_u32 v11, v11, v3, s4
 ; GFX9-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
@@ -19301,7 +19301,7 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v17, 16, v2
 ; GFX9-NEXT:    v_min_f32_e32 v11, v17, v11
 ; GFX9-NEXT:    v_bfe_u32 v17, v11, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v18, 0x80000000, v11
+; GFX9-NEXT:    v_and_b32_e32 v18, 0xff800000, v11
 ; GFX9-NEXT:    v_and_b32_e32 v10, 0xffff0000, v10
 ; GFX9-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX9-NEXT:    v_add3_u32 v17, v17, v11, s4
@@ -19310,7 +19310,7 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_min_f32_e32 v2, v2, v10
 ; GFX9-NEXT:    v_cndmask_b32_e32 v11, v17, v18, vcc
 ; GFX9-NEXT:    v_bfe_u32 v10, v2, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v17, 0x80000000, v2
+; GFX9-NEXT:    v_and_b32_e32 v17, 0xff800000, v2
 ; GFX9-NEXT:    v_add3_u32 v10, v10, v2, s4
 ; GFX9-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
@@ -19319,7 +19319,7 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v17, 16, v1
 ; GFX9-NEXT:    v_min_f32_e32 v10, v17, v10
 ; GFX9-NEXT:    v_bfe_u32 v17, v10, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v18, 0x80000000, v10
+; GFX9-NEXT:    v_and_b32_e32 v18, 0xff800000, v10
 ; GFX9-NEXT:    v_and_b32_e32 v9, 0xffff0000, v9
 ; GFX9-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX9-NEXT:    v_add3_u32 v17, v17, v10, s4
@@ -19328,7 +19328,7 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_min_f32_e32 v1, v1, v9
 ; GFX9-NEXT:    v_cndmask_b32_e32 v10, v17, v18, vcc
 ; GFX9-NEXT:    v_bfe_u32 v9, v1, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v17, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v17, 0xff800000, v1
 ; GFX9-NEXT:    v_add3_u32 v9, v9, v1, s4
 ; GFX9-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -19337,7 +19337,7 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v17, 16, v0
 ; GFX9-NEXT:    v_min_f32_e32 v9, v17, v9
 ; GFX9-NEXT:    v_bfe_u32 v17, v9, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v18, 0x80000000, v9
+; GFX9-NEXT:    v_and_b32_e32 v18, 0xff800000, v9
 ; GFX9-NEXT:    v_and_b32_e32 v8, 0xffff0000, v8
 ; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX9-NEXT:    v_add3_u32 v17, v17, v9, s4
@@ -19346,7 +19346,7 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_min_f32_e32 v0, v0, v8
 ; GFX9-NEXT:    v_cndmask_b32_e32 v9, v17, v18, vcc
 ; GFX9-NEXT:    v_bfe_u32 v8, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v17, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v17, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v8, v8, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -19369,7 +19369,7 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v17, 16, v7
 ; GFX10-NEXT:    v_and_b32_e32 v15, 0xffff0000, v15
 ; GFX10-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v18, 16, v6
 ; GFX10-NEXT:    v_min_f32_e32 v16, v17, v16
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v17, 16, v14
@@ -19512,7 +19512,7 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX11-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v17, 16, v7
 ; GFX11-NEXT:    v_and_b32_e32 v15, 0xffff0000, v15
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
 ; GFX11-NEXT:    v_dual_min_f32 v16, v17, v16 :: v_dual_lshlrev_b32 v17, 16, v14
 ; GFX11-NEXT:    v_and_b32_e32 v14, 0xffff0000, v14
@@ -20332,7 +20332,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v30, 0xffff0000, v30
 ; GFX8-NEXT:    v_and_b32_e32 v14, 0xffff0000, v14
 ; GFX8-NEXT:    v_add_u32_e32 v32, vcc, s4, v32
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v31
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v31
 ; GFX8-NEXT:    v_min_f32_e32 v14, v14, v30
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v31, v31
@@ -20340,7 +20340,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v31, v32, v33, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v30, vcc, v30, v14
 ; GFX8-NEXT:    v_add_u32_e32 v30, vcc, s4, v30
-; GFX8-NEXT:    v_and_b32_e32 v32, 0x80000000, v14
+; GFX8-NEXT:    v_and_b32_e32 v32, 0xff800000, v14
 ; GFX8-NEXT:    v_or_b32_e32 v32, 0x400000, v32
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v14, v14
 ; GFX8-NEXT:    v_cndmask_b32_e32 v14, v30, v32, vcc
@@ -20363,14 +20363,14 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_min_f32_e32 v30, v15, v30
 ; GFX8-NEXT:    v_bfe_u32 v15, v33, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v15, vcc, v15, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v33
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v33
 ; GFX8-NEXT:    v_add_u32_e32 v15, vcc, s4, v15
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v33, v33
 ; GFX8-NEXT:    v_bfe_u32 v33, v30, 16, 1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v15, v15, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, v33, v30
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v30
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v30
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v30, v30
@@ -20378,13 +20378,13 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_bfe_u32 v33, v32, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, v33, v32
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v32
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v32
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v32, v32
 ; GFX8-NEXT:    v_cndmask_b32_e32 v32, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v29, vcc, v29, v13
 ; GFX8-NEXT:    v_add_u32_e32 v29, vcc, s4, v29
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v13
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v13
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v13, v13
 ; GFX8-NEXT:    v_cndmask_b32_e32 v13, v29, v33, vcc
@@ -20396,7 +20396,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v28, 0xffff0000, v28
 ; GFX8-NEXT:    v_and_b32_e32 v12, 0xffff0000, v12
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v29
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v29
 ; GFX8-NEXT:    v_min_f32_e32 v12, v12, v28
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v29, v29
@@ -20404,7 +20404,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v29, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v28, vcc, v28, v12
 ; GFX8-NEXT:    v_add_u32_e32 v28, vcc, s4, v28
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v12
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v12
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v12, v12
 ; GFX8-NEXT:    v_cndmask_b32_e32 v12, v28, v33, vcc
@@ -20416,7 +20416,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v27, 0xffff0000, v27
 ; GFX8-NEXT:    v_and_b32_e32 v11, 0xffff0000, v11
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v28
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v28
 ; GFX8-NEXT:    v_min_f32_e32 v11, v11, v27
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v28, v28
@@ -20424,7 +20424,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v28, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v27, vcc, v27, v11
 ; GFX8-NEXT:    v_add_u32_e32 v27, vcc, s4, v27
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v11
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v11
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v11, v11
 ; GFX8-NEXT:    v_cndmask_b32_e32 v11, v27, v33, vcc
@@ -20436,7 +20436,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v26, 0xffff0000, v26
 ; GFX8-NEXT:    v_and_b32_e32 v10, 0xffff0000, v10
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v27
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v27
 ; GFX8-NEXT:    v_min_f32_e32 v10, v10, v26
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v27, v27
@@ -20444,7 +20444,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v27, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v26, vcc, v26, v10
 ; GFX8-NEXT:    v_add_u32_e32 v26, vcc, s4, v26
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v10
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v10
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v10, v10
 ; GFX8-NEXT:    v_cndmask_b32_e32 v10, v26, v33, vcc
@@ -20456,7 +20456,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v25, 0xffff0000, v25
 ; GFX8-NEXT:    v_and_b32_e32 v9, 0xffff0000, v9
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v26
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v26
 ; GFX8-NEXT:    v_min_f32_e32 v9, v9, v25
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v26, v26
@@ -20464,7 +20464,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v26, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v25, vcc, v25, v9
 ; GFX8-NEXT:    v_add_u32_e32 v25, vcc, s4, v25
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v9
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v9
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v9, v9
 ; GFX8-NEXT:    v_cndmask_b32_e32 v9, v25, v33, vcc
@@ -20476,7 +20476,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v24, 0xffff0000, v24
 ; GFX8-NEXT:    v_and_b32_e32 v8, 0xffff0000, v8
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v25
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v25
 ; GFX8-NEXT:    v_min_f32_e32 v8, v8, v24
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v25, v25
@@ -20484,7 +20484,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v25, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v24, vcc, v24, v8
 ; GFX8-NEXT:    v_add_u32_e32 v24, vcc, s4, v24
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v8
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v8
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v8, v8
 ; GFX8-NEXT:    v_cndmask_b32_e32 v8, v24, v33, vcc
@@ -20496,7 +20496,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v23, 0xffff0000, v23
 ; GFX8-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v24
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v24
 ; GFX8-NEXT:    v_min_f32_e32 v7, v7, v23
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v24, v24
@@ -20504,7 +20504,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v24, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v23, vcc, v23, v7
 ; GFX8-NEXT:    v_add_u32_e32 v23, vcc, s4, v23
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v7
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v7
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v7, v7
 ; GFX8-NEXT:    v_cndmask_b32_e32 v7, v23, v33, vcc
@@ -20516,7 +20516,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v22, 0xffff0000, v22
 ; GFX8-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v23
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v23
 ; GFX8-NEXT:    v_min_f32_e32 v6, v6, v22
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v23, v23
@@ -20524,7 +20524,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v23, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v22, vcc, v22, v6
 ; GFX8-NEXT:    v_add_u32_e32 v22, vcc, s4, v22
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v6
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v6
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v6, v6
 ; GFX8-NEXT:    v_cndmask_b32_e32 v6, v22, v33, vcc
@@ -20536,7 +20536,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v21, 0xffff0000, v21
 ; GFX8-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v22
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v22
 ; GFX8-NEXT:    v_min_f32_e32 v5, v5, v21
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v22, v22
@@ -20544,7 +20544,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v22, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v21, vcc, v21, v5
 ; GFX8-NEXT:    v_add_u32_e32 v21, vcc, s4, v21
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v5
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v5
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v5, v5
 ; GFX8-NEXT:    v_cndmask_b32_e32 v5, v21, v33, vcc
@@ -20556,7 +20556,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v20, 0xffff0000, v20
 ; GFX8-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v21
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v21
 ; GFX8-NEXT:    v_min_f32_e32 v4, v4, v20
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v21, v21
@@ -20564,7 +20564,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v21, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v20, vcc, v20, v4
 ; GFX8-NEXT:    v_add_u32_e32 v20, vcc, s4, v20
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v4
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v4
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v4, v4
 ; GFX8-NEXT:    v_cndmask_b32_e32 v4, v20, v33, vcc
@@ -20576,7 +20576,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v19, 0xffff0000, v19
 ; GFX8-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v20
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v20
 ; GFX8-NEXT:    v_min_f32_e32 v3, v3, v19
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v20, v20
@@ -20584,7 +20584,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v20, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v19, vcc, v19, v3
 ; GFX8-NEXT:    v_add_u32_e32 v19, vcc, s4, v19
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v3
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v3
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v19, v33, vcc
@@ -20596,7 +20596,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v18, 0xffff0000, v18
 ; GFX8-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v19
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v19
 ; GFX8-NEXT:    v_min_f32_e32 v2, v2, v18
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v19, v19
@@ -20604,7 +20604,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v19, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v18, vcc, v18, v2
 ; GFX8-NEXT:    v_add_u32_e32 v18, vcc, s4, v18
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v2
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v2
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
 ; GFX8-NEXT:    v_cndmask_b32_e32 v2, v18, v33, vcc
@@ -20616,7 +20616,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v17, 0xffff0000, v17
 ; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v18
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v18
 ; GFX8-NEXT:    v_min_f32_e32 v1, v1, v17
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v18, v18
@@ -20624,7 +20624,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v18, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v17, vcc, v17, v1
 ; GFX8-NEXT:    v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v1
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v1
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v1, v17, v33, vcc
@@ -20636,7 +20636,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v16, 0xffff0000, v16
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v17
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v17
 ; GFX8-NEXT:    v_min_f32_e32 v0, v0, v16
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v17, v17
@@ -20644,7 +20644,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v17, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v16, vcc, v16, v0
 ; GFX8-NEXT:    v_add_u32_e32 v16, vcc, s4, v16
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v16, v33, vcc
@@ -20688,7 +20688,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_min_f32_e32 v31, v32, v31
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
 ; GFX9-NEXT:    v_bfe_u32 v32, v31, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v31
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v31
 ; GFX9-NEXT:    v_and_b32_e32 v30, 0xffff0000, v30
 ; GFX9-NEXT:    v_and_b32_e32 v14, 0xffff0000, v14
 ; GFX9-NEXT:    v_add3_u32 v32, v32, v31, s4
@@ -20697,7 +20697,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_min_f32_e32 v14, v14, v30
 ; GFX9-NEXT:    v_cndmask_b32_e32 v31, v32, v33, vcc
 ; GFX9-NEXT:    v_bfe_u32 v30, v14, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v32, 0x80000000, v14
+; GFX9-NEXT:    v_and_b32_e32 v32, 0xff800000, v14
 ; GFX9-NEXT:    v_add3_u32 v30, v30, v14, s4
 ; GFX9-NEXT:    v_or_b32_e32 v32, 0x400000, v32
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v14, v14
@@ -20710,7 +20710,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    buffer_load_dword v29, off, s[0:3], s32
 ; GFX9-NEXT:    v_min_f32_e32 v30, v32, v30
 ; GFX9-NEXT:    v_bfe_u32 v32, v30, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v30
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v30
 ; GFX9-NEXT:    v_add3_u32 v32, v32, v30, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v30, v30
@@ -20723,19 +20723,19 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_and_b32_e32 v29, 0xffff0000, v29
 ; GFX9-NEXT:    v_min_f32_e32 v29, v15, v29
 ; GFX9-NEXT:    v_bfe_u32 v15, v32, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v32
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v32
 ; GFX9-NEXT:    v_add3_u32 v15, v15, v32, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v32, v32
 ; GFX9-NEXT:    v_cndmask_b32_e32 v15, v15, v33, vcc
 ; GFX9-NEXT:    v_bfe_u32 v32, v29, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v29
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v29
 ; GFX9-NEXT:    v_add3_u32 v32, v32, v29, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v29, v29
 ; GFX9-NEXT:    v_cndmask_b32_e32 v29, v32, v33, vcc
 ; GFX9-NEXT:    v_bfe_u32 v32, v13, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v13
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v13
 ; GFX9-NEXT:    v_add3_u32 v32, v32, v13, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v13, v13
@@ -20744,7 +20744,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v12
 ; GFX9-NEXT:    v_min_f32_e32 v32, v33, v32
 ; GFX9-NEXT:    v_bfe_u32 v33, v32, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v32
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v32
 ; GFX9-NEXT:    v_and_b32_e32 v28, 0xffff0000, v28
 ; GFX9-NEXT:    v_and_b32_e32 v12, 0xffff0000, v12
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v32, s4
@@ -20753,7 +20753,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_min_f32_e32 v12, v12, v28
 ; GFX9-NEXT:    v_cndmask_b32_e32 v32, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v28, v12, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v12
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v12
 ; GFX9-NEXT:    v_add3_u32 v28, v28, v12, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v12, v12
@@ -20762,7 +20762,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v11
 ; GFX9-NEXT:    v_min_f32_e32 v28, v33, v28
 ; GFX9-NEXT:    v_bfe_u32 v33, v28, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v28
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v28
 ; GFX9-NEXT:    v_and_b32_e32 v27, 0xffff0000, v27
 ; GFX9-NEXT:    v_and_b32_e32 v11, 0xffff0000, v11
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v28, s4
@@ -20771,7 +20771,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_min_f32_e32 v11, v11, v27
 ; GFX9-NEXT:    v_cndmask_b32_e32 v28, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v27, v11, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v11
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v11
 ; GFX9-NEXT:    v_add3_u32 v27, v27, v11, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v11, v11
@@ -20780,7 +20780,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v10
 ; GFX9-NEXT:    v_min_f32_e32 v27, v33, v27
 ; GFX9-NEXT:    v_bfe_u32 v33, v27, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v27
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v27
 ; GFX9-NEXT:    v_and_b32_e32 v26, 0xffff0000, v26
 ; GFX9-NEXT:    v_and_b32_e32 v10, 0xffff0000, v10
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v27, s4
@@ -20789,7 +20789,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_min_f32_e32 v10, v10, v26
 ; GFX9-NEXT:    v_cndmask_b32_e32 v27, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v26, v10, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v10
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v10
 ; GFX9-NEXT:    v_add3_u32 v26, v26, v10, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v10, v10
@@ -20798,7 +20798,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v9
 ; GFX9-NEXT:    v_min_f32_e32 v26, v33, v26
 ; GFX9-NEXT:    v_bfe_u32 v33, v26, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v26
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v26
 ; GFX9-NEXT:    v_and_b32_e32 v25, 0xffff0000, v25
 ; GFX9-NEXT:    v_and_b32_e32 v9, 0xffff0000, v9
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v26, s4
@@ -20807,7 +20807,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_min_f32_e32 v9, v9, v25
 ; GFX9-NEXT:    v_cndmask_b32_e32 v26, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v25, v9, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v9
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v9
 ; GFX9-NEXT:    v_add3_u32 v25, v25, v9, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v9, v9
@@ -20816,7 +20816,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v8
 ; GFX9-NEXT:    v_min_f32_e32 v25, v33, v25
 ; GFX9-NEXT:    v_bfe_u32 v33, v25, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v25
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v25
 ; GFX9-NEXT:    v_and_b32_e32 v24, 0xffff0000, v24
 ; GFX9-NEXT:    v_and_b32_e32 v8, 0xffff0000, v8
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v25, s4
@@ -20825,7 +20825,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_min_f32_e32 v8, v8, v24
 ; GFX9-NEXT:    v_cndmask_b32_e32 v25, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v24, v8, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v8
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v8
 ; GFX9-NEXT:    v_add3_u32 v24, v24, v8, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v8, v8
@@ -20834,7 +20834,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v7
 ; GFX9-NEXT:    v_min_f32_e32 v24, v33, v24
 ; GFX9-NEXT:    v_bfe_u32 v33, v24, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v24
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v24
 ; GFX9-NEXT:    v_and_b32_e32 v23, 0xffff0000, v23
 ; GFX9-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v24, s4
@@ -20843,7 +20843,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_min_f32_e32 v7, v7, v23
 ; GFX9-NEXT:    v_cndmask_b32_e32 v24, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v23, v7, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v7
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v7
 ; GFX9-NEXT:    v_add3_u32 v23, v23, v7, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v7, v7
@@ -20852,7 +20852,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v6
 ; GFX9-NEXT:    v_min_f32_e32 v23, v33, v23
 ; GFX9-NEXT:    v_bfe_u32 v33, v23, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v23
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v23
 ; GFX9-NEXT:    v_and_b32_e32 v22, 0xffff0000, v22
 ; GFX9-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v23, s4
@@ -20861,7 +20861,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_min_f32_e32 v6, v6, v22
 ; GFX9-NEXT:    v_cndmask_b32_e32 v23, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v22, v6, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v6
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v6
 ; GFX9-NEXT:    v_add3_u32 v22, v22, v6, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v6, v6
@@ -20870,7 +20870,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v5
 ; GFX9-NEXT:    v_min_f32_e32 v22, v33, v22
 ; GFX9-NEXT:    v_bfe_u32 v33, v22, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v22
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v22
 ; GFX9-NEXT:    v_and_b32_e32 v21, 0xffff0000, v21
 ; GFX9-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v22, s4
@@ -20879,7 +20879,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_min_f32_e32 v5, v5, v21
 ; GFX9-NEXT:    v_cndmask_b32_e32 v22, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v21, v5, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v5
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v5
 ; GFX9-NEXT:    v_add3_u32 v21, v21, v5, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v5, v5
@@ -20888,7 +20888,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v4
 ; GFX9-NEXT:    v_min_f32_e32 v21, v33, v21
 ; GFX9-NEXT:    v_bfe_u32 v33, v21, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v21
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v21
 ; GFX9-NEXT:    v_and_b32_e32 v20, 0xffff0000, v20
 ; GFX9-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v21, s4
@@ -20897,7 +20897,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_min_f32_e32 v4, v4, v20
 ; GFX9-NEXT:    v_cndmask_b32_e32 v21, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v20, v4, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v4
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v4
 ; GFX9-NEXT:    v_add3_u32 v20, v20, v4, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v4, v4
@@ -20906,7 +20906,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v3
 ; GFX9-NEXT:    v_min_f32_e32 v20, v33, v20
 ; GFX9-NEXT:    v_bfe_u32 v33, v20, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v20
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v20
 ; GFX9-NEXT:    v_and_b32_e32 v19, 0xffff0000, v19
 ; GFX9-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v20, s4
@@ -20915,7 +20915,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_min_f32_e32 v3, v3, v19
 ; GFX9-NEXT:    v_cndmask_b32_e32 v20, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v19, v3, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v3
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v3
 ; GFX9-NEXT:    v_add3_u32 v19, v19, v3, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
@@ -20924,7 +20924,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v2
 ; GFX9-NEXT:    v_min_f32_e32 v19, v33, v19
 ; GFX9-NEXT:    v_bfe_u32 v33, v19, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v19
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v19
 ; GFX9-NEXT:    v_and_b32_e32 v18, 0xffff0000, v18
 ; GFX9-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v19, s4
@@ -20933,7 +20933,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_min_f32_e32 v2, v2, v18
 ; GFX9-NEXT:    v_cndmask_b32_e32 v19, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v18, v2, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v2
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v2
 ; GFX9-NEXT:    v_add3_u32 v18, v18, v2, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
@@ -20942,7 +20942,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v1
 ; GFX9-NEXT:    v_min_f32_e32 v18, v33, v18
 ; GFX9-NEXT:    v_bfe_u32 v33, v18, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v18
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v18
 ; GFX9-NEXT:    v_and_b32_e32 v17, 0xffff0000, v17
 ; GFX9-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v18, s4
@@ -20951,7 +20951,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_min_f32_e32 v1, v1, v17
 ; GFX9-NEXT:    v_cndmask_b32_e32 v18, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v17, v1, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v1
 ; GFX9-NEXT:    v_add3_u32 v17, v17, v1, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -20960,7 +20960,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v0
 ; GFX9-NEXT:    v_min_f32_e32 v17, v33, v17
 ; GFX9-NEXT:    v_bfe_u32 v33, v17, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v17
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v17
 ; GFX9-NEXT:    v_and_b32_e32 v16, 0xffff0000, v16
 ; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v17, s4
@@ -20969,7 +20969,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_min_f32_e32 v0, v0, v16
 ; GFX9-NEXT:    v_cndmask_b32_e32 v17, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v16, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v16, v16, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -21061,7 +21061,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX10-NEXT:    v_bfe_u32 v55, v11, 16, 1
 ; GFX10-NEXT:    v_bfe_u32 v65, v49, 16, 1
 ; GFX10-NEXT:    v_bfe_u32 v67, v10, 16, 1
-; GFX10-NEXT:    s_brev_b32 s23, 1
+; GFX10-NEXT:    s_mov_b32 s23, 0xff800000
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v33, 16, v30
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v34, 16, v14
 ; GFX10-NEXT:    v_and_b32_e32 v30, 0xffff0000, v30
@@ -21283,7 +21283,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX11-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v53, 16, v24
 ; GFX11-NEXT:    v_and_b32_e32 v24, 0xffff0000, v24
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX11-NEXT:    v_dual_min_f32 v1, v1, v17 :: v_dual_lshlrev_b32 v64, 16, v7
 ; GFX11-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
@@ -21594,7 +21594,7 @@ define bfloat @v_maxnum_bf16(bfloat %a, bfloat %b) {
 ; GFX8-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
@@ -21609,7 +21609,7 @@ define bfloat @v_maxnum_bf16(bfloat %a, bfloat %b) {
 ; GFX9-NEXT:    v_max_f32_e32 v0, v0, v1
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -21622,7 +21622,7 @@ define bfloat @v_maxnum_bf16(bfloat %a, bfloat %b) {
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_max_f32_e32 v0, v0, v1
 ; GFX10-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX10-NEXT:    v_and_or_b32 v2, v0, s4, 0x400000
@@ -21637,7 +21637,7 @@ define bfloat @v_maxnum_bf16(bfloat %a, bfloat %b) {
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_max_f32_e32 v0, v0, v1
 ; GFX11-NEXT:    v_bfe_u32 v1, v0, 16, 1
@@ -21707,7 +21707,7 @@ define <2 x bfloat> @v_maxnum_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT:    v_and_b32_e32 v4, 0x80000000, v2
+; GFX8-NEXT:    v_and_b32_e32 v4, 0xff800000, v2
 ; GFX8-NEXT:    v_max_f32_e32 v0, v0, v1
 ; GFX8-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
@@ -21715,7 +21715,7 @@ define <2 x bfloat> @v_maxnum_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v2, v3, v4, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT:    v_and_b32_e32 v3, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v3, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v3, vcc
@@ -21731,7 +21731,7 @@ define <2 x bfloat> @v_maxnum_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; GFX9-NEXT:    v_max_f32_e32 v2, v3, v2
 ; GFX9-NEXT:    v_bfe_u32 v3, v2, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v4, 0x80000000, v2
+; GFX9-NEXT:    v_and_b32_e32 v4, 0xff800000, v2
 ; GFX9-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX9-NEXT:    v_add3_u32 v3, v3, v2, s4
@@ -21740,7 +21740,7 @@ define <2 x bfloat> @v_maxnum_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; GFX9-NEXT:    v_max_f32_e32 v0, v0, v1
 ; GFX9-NEXT:    v_cndmask_b32_e32 v2, v3, v4, vcc
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v3, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v3, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -21756,7 +21756,7 @@ define <2 x bfloat> @v_maxnum_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v3, 16, v0
 ; GFX10-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX10-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_max_f32_e32 v2, v3, v2
 ; GFX10-NEXT:    v_max_f32_e32 v0, v0, v1
 ; GFX10-NEXT:    v_bfe_u32 v1, v2, 16, 1
@@ -21779,7 +21779,7 @@ define <2 x bfloat> @v_maxnum_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; GFX11-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v3, 16, v0
 ; GFX11-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-NEXT:    v_max_f32_e32 v0, v0, v1
 ; GFX11-NEXT:    v_max_f32_e32 v2, v3, v2
@@ -21870,7 +21870,7 @@ define <3 x bfloat> @v_maxnum_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
 ; GFX8-NEXT:    v_bfe_u32 v3, v1, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v3, v1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT:    v_and_b32_e32 v4, 0x80000000, v1
+; GFX8-NEXT:    v_and_b32_e32 v4, 0xff800000, v1
 ; GFX8-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v1, v3, v4, vcc
@@ -21883,7 +21883,7 @@ define <3 x bfloat> @v_maxnum_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, s4, v4
-; GFX8-NEXT:    v_and_b32_e32 v5, 0x80000000, v3
+; GFX8-NEXT:    v_and_b32_e32 v5, 0xff800000, v3
 ; GFX8-NEXT:    v_max_f32_e32 v0, v0, v2
 ; GFX8-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
@@ -21891,7 +21891,7 @@ define <3 x bfloat> @v_maxnum_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v4, v5, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v2, v0
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT:    v_and_b32_e32 v4, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v4, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v2, v4, vcc
@@ -21908,7 +21908,7 @@ define <3 x bfloat> @v_maxnum_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
 ; GFX9-NEXT:    v_max_f32_e32 v1, v1, v3
 ; GFX9-NEXT:    v_bfe_u32 v3, v1, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v4, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v4, 0xff800000, v1
 ; GFX9-NEXT:    v_add3_u32 v3, v3, v1, s4
 ; GFX9-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -21917,7 +21917,7 @@ define <3 x bfloat> @v_maxnum_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v4, 16, v0
 ; GFX9-NEXT:    v_max_f32_e32 v3, v4, v3
 ; GFX9-NEXT:    v_bfe_u32 v4, v3, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v5, 0x80000000, v3
+; GFX9-NEXT:    v_and_b32_e32 v5, 0xff800000, v3
 ; GFX9-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX9-NEXT:    v_add3_u32 v4, v4, v3, s4
@@ -21926,7 +21926,7 @@ define <3 x bfloat> @v_maxnum_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
 ; GFX9-NEXT:    v_max_f32_e32 v0, v0, v2
 ; GFX9-NEXT:    v_cndmask_b32_e32 v3, v4, v5, vcc
 ; GFX9-NEXT:    v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v4, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v4, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v2, v2, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -21946,7 +21946,7 @@ define <3 x bfloat> @v_maxnum_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GFX10-NEXT:    v_max_f32_e32 v4, v5, v4
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_max_f32_e32 v0, v0, v2
 ; GFX10-NEXT:    v_max_f32_e32 v1, v1, v3
 ; GFX10-NEXT:    v_bfe_u32 v2, v4, 16, 1
@@ -22057,7 +22057,7 @@ define <4 x bfloat> @v_maxnum_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, 0x7fff, v5
-; GFX8-NEXT:    v_and_b32_e32 v6, 0x80000000, v4
+; GFX8-NEXT:    v_and_b32_e32 v6, 0xff800000, v4
 ; GFX8-NEXT:    v_max_f32_e32 v1, v1, v3
 ; GFX8-NEXT:    v_or_b32_e32 v6, 0x400000, v6
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v4, v4
@@ -22066,7 +22066,7 @@ define <4 x bfloat> @v_maxnum_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v4, v5, v6, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v3, v1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, s4, v3
-; GFX8-NEXT:    v_and_b32_e32 v5, 0x80000000, v1
+; GFX8-NEXT:    v_and_b32_e32 v5, 0xff800000, v1
 ; GFX8-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v1, v3, v5, vcc
@@ -22078,7 +22078,7 @@ define <4 x bfloat> @v_maxnum_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT:    v_and_b32_e32 v6, 0x80000000, v3
+; GFX8-NEXT:    v_and_b32_e32 v6, 0xff800000, v3
 ; GFX8-NEXT:    v_max_f32_e32 v0, v0, v2
 ; GFX8-NEXT:    v_or_b32_e32 v6, 0x400000, v6
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
@@ -22086,7 +22086,7 @@ define <4 x bfloat> @v_maxnum_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v5, v6, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v2, v0
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT:    v_and_b32_e32 v5, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v5, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v2, v5, vcc
@@ -22104,7 +22104,7 @@ define <4 x bfloat> @v_maxnum_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX9-NEXT:    v_max_f32_e32 v4, v5, v4
 ; GFX9-NEXT:    v_bfe_u32 v5, v4, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v6, 0x80000000, v4
+; GFX9-NEXT:    v_and_b32_e32 v6, 0xff800000, v4
 ; GFX9-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX9-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX9-NEXT:    v_add3_u32 v5, v5, v4, s4
@@ -22113,7 +22113,7 @@ define <4 x bfloat> @v_maxnum_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX9-NEXT:    v_max_f32_e32 v1, v1, v3
 ; GFX9-NEXT:    v_cndmask_b32_e32 v4, v5, v6, vcc
 ; GFX9-NEXT:    v_bfe_u32 v3, v1, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v5, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v5, 0xff800000, v1
 ; GFX9-NEXT:    v_add3_u32 v3, v3, v1, s4
 ; GFX9-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -22122,7 +22122,7 @@ define <4 x bfloat> @v_maxnum_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v5, 16, v0
 ; GFX9-NEXT:    v_max_f32_e32 v3, v5, v3
 ; GFX9-NEXT:    v_bfe_u32 v5, v3, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v6, 0x80000000, v3
+; GFX9-NEXT:    v_and_b32_e32 v6, 0xff800000, v3
 ; GFX9-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX9-NEXT:    v_add3_u32 v5, v5, v3, s4
@@ -22131,7 +22131,7 @@ define <4 x bfloat> @v_maxnum_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX9-NEXT:    v_max_f32_e32 v0, v0, v2
 ; GFX9-NEXT:    v_cndmask_b32_e32 v3, v5, v6, vcc
 ; GFX9-NEXT:    v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v5, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v5, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v2, v2, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -22156,7 +22156,7 @@ define <4 x bfloat> @v_maxnum_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX10-NEXT:    v_max_f32_e32 v1, v1, v3
 ; GFX10-NEXT:    v_max_f32_e32 v5, v7, v6
 ; GFX10-NEXT:    v_bfe_u32 v3, v4, 16, 1
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_max_f32_e32 v0, v0, v2
 ; GFX10-NEXT:    v_and_or_b32 v6, v4, s4, 0x400000
 ; GFX10-NEXT:    v_bfe_u32 v7, v5, 16, 1
@@ -22191,7 +22191,7 @@ define <4 x bfloat> @v_maxnum_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX11-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v5, 16, v1
 ; GFX11-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-NEXT:    v_dual_max_f32 v0, v0, v2 :: v_dual_and_b32 v3, 0xffff0000, v3
 ; GFX11-NEXT:    v_max_f32_e32 v4, v5, v4
@@ -22380,7 +22380,7 @@ define <8 x bfloat> @v_maxnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
 ; GFX8-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX8-NEXT:    v_add_u32_e32 v9, vcc, 0x7fff, v9
-; GFX8-NEXT:    v_and_b32_e32 v10, 0x80000000, v8
+; GFX8-NEXT:    v_and_b32_e32 v10, 0xff800000, v8
 ; GFX8-NEXT:    v_max_f32_e32 v3, v3, v7
 ; GFX8-NEXT:    v_or_b32_e32 v10, 0x400000, v10
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v8, v8
@@ -22389,7 +22389,7 @@ define <8 x bfloat> @v_maxnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v8, v9, v10, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v7, vcc, v7, v3
 ; GFX8-NEXT:    v_add_u32_e32 v7, vcc, s4, v7
-; GFX8-NEXT:    v_and_b32_e32 v9, 0x80000000, v3
+; GFX8-NEXT:    v_and_b32_e32 v9, 0xff800000, v3
 ; GFX8-NEXT:    v_or_b32_e32 v9, 0x400000, v9
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v7, v9, vcc
@@ -22401,7 +22401,7 @@ define <8 x bfloat> @v_maxnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
 ; GFX8-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX8-NEXT:    v_add_u32_e32 v9, vcc, s4, v9
-; GFX8-NEXT:    v_and_b32_e32 v10, 0x80000000, v7
+; GFX8-NEXT:    v_and_b32_e32 v10, 0xff800000, v7
 ; GFX8-NEXT:    v_max_f32_e32 v2, v2, v6
 ; GFX8-NEXT:    v_or_b32_e32 v10, 0x400000, v10
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v7, v7
@@ -22409,7 +22409,7 @@ define <8 x bfloat> @v_maxnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v7, v9, v10, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v6, vcc, v6, v2
 ; GFX8-NEXT:    v_add_u32_e32 v6, vcc, s4, v6
-; GFX8-NEXT:    v_and_b32_e32 v9, 0x80000000, v2
+; GFX8-NEXT:    v_and_b32_e32 v9, 0xff800000, v2
 ; GFX8-NEXT:    v_or_b32_e32 v9, 0x400000, v9
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
 ; GFX8-NEXT:    v_cndmask_b32_e32 v2, v6, v9, vcc
@@ -22421,7 +22421,7 @@ define <8 x bfloat> @v_maxnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
 ; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX8-NEXT:    v_add_u32_e32 v9, vcc, s4, v9
-; GFX8-NEXT:    v_and_b32_e32 v10, 0x80000000, v6
+; GFX8-NEXT:    v_and_b32_e32 v10, 0xff800000, v6
 ; GFX8-NEXT:    v_max_f32_e32 v1, v1, v5
 ; GFX8-NEXT:    v_or_b32_e32 v10, 0x400000, v10
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v6, v6
@@ -22429,7 +22429,7 @@ define <8 x bfloat> @v_maxnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v6, v9, v10, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, v5, v1
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT:    v_and_b32_e32 v9, 0x80000000, v1
+; GFX8-NEXT:    v_and_b32_e32 v9, 0xff800000, v1
 ; GFX8-NEXT:    v_or_b32_e32 v9, 0x400000, v9
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v1, v5, v9, vcc
@@ -22441,7 +22441,7 @@ define <8 x bfloat> @v_maxnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v9, vcc, s4, v9
-; GFX8-NEXT:    v_and_b32_e32 v10, 0x80000000, v5
+; GFX8-NEXT:    v_and_b32_e32 v10, 0xff800000, v5
 ; GFX8-NEXT:    v_max_f32_e32 v0, v0, v4
 ; GFX8-NEXT:    v_or_b32_e32 v10, 0x400000, v10
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v5, v5
@@ -22449,7 +22449,7 @@ define <8 x bfloat> @v_maxnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v5, v9, v10, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, v4, v0
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, 0x7fff, v4
-; GFX8-NEXT:    v_and_b32_e32 v9, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v9, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v9, 0x400000, v9
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v4, v9, vcc
@@ -22471,7 +22471,7 @@ define <8 x bfloat> @v_maxnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX9-NEXT:    v_max_f32_e32 v8, v9, v8
 ; GFX9-NEXT:    v_bfe_u32 v9, v8, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v10, 0x80000000, v8
+; GFX9-NEXT:    v_and_b32_e32 v10, 0xff800000, v8
 ; GFX9-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
 ; GFX9-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX9-NEXT:    v_add3_u32 v9, v9, v8, s4
@@ -22480,7 +22480,7 @@ define <8 x bfloat> @v_maxnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX9-NEXT:    v_max_f32_e32 v3, v3, v7
 ; GFX9-NEXT:    v_cndmask_b32_e32 v8, v9, v10, vcc
 ; GFX9-NEXT:    v_bfe_u32 v7, v3, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v9, 0x80000000, v3
+; GFX9-NEXT:    v_and_b32_e32 v9, 0xff800000, v3
 ; GFX9-NEXT:    v_add3_u32 v7, v7, v3, s4
 ; GFX9-NEXT:    v_or_b32_e32 v9, 0x400000, v9
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
@@ -22489,7 +22489,7 @@ define <8 x bfloat> @v_maxnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v9, 16, v2
 ; GFX9-NEXT:    v_max_f32_e32 v7, v9, v7
 ; GFX9-NEXT:    v_bfe_u32 v9, v7, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v10, 0x80000000, v7
+; GFX9-NEXT:    v_and_b32_e32 v10, 0xff800000, v7
 ; GFX9-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
 ; GFX9-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX9-NEXT:    v_add3_u32 v9, v9, v7, s4
@@ -22498,7 +22498,7 @@ define <8 x bfloat> @v_maxnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX9-NEXT:    v_max_f32_e32 v2, v2, v6
 ; GFX9-NEXT:    v_cndmask_b32_e32 v7, v9, v10, vcc
 ; GFX9-NEXT:    v_bfe_u32 v6, v2, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v9, 0x80000000, v2
+; GFX9-NEXT:    v_and_b32_e32 v9, 0xff800000, v2
 ; GFX9-NEXT:    v_add3_u32 v6, v6, v2, s4
 ; GFX9-NEXT:    v_or_b32_e32 v9, 0x400000, v9
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
@@ -22507,7 +22507,7 @@ define <8 x bfloat> @v_maxnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v9, 16, v1
 ; GFX9-NEXT:    v_max_f32_e32 v6, v9, v6
 ; GFX9-NEXT:    v_bfe_u32 v9, v6, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v10, 0x80000000, v6
+; GFX9-NEXT:    v_and_b32_e32 v10, 0xff800000, v6
 ; GFX9-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
 ; GFX9-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX9-NEXT:    v_add3_u32 v9, v9, v6, s4
@@ -22516,7 +22516,7 @@ define <8 x bfloat> @v_maxnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX9-NEXT:    v_max_f32_e32 v1, v1, v5
 ; GFX9-NEXT:    v_cndmask_b32_e32 v6, v9, v10, vcc
 ; GFX9-NEXT:    v_bfe_u32 v5, v1, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v9, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v9, 0xff800000, v1
 ; GFX9-NEXT:    v_add3_u32 v5, v5, v1, s4
 ; GFX9-NEXT:    v_or_b32_e32 v9, 0x400000, v9
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -22525,7 +22525,7 @@ define <8 x bfloat> @v_maxnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v9, 16, v0
 ; GFX9-NEXT:    v_max_f32_e32 v5, v9, v5
 ; GFX9-NEXT:    v_bfe_u32 v9, v5, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v10, 0x80000000, v5
+; GFX9-NEXT:    v_and_b32_e32 v10, 0xff800000, v5
 ; GFX9-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
 ; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX9-NEXT:    v_add3_u32 v9, v9, v5, s4
@@ -22534,7 +22534,7 @@ define <8 x bfloat> @v_maxnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX9-NEXT:    v_max_f32_e32 v0, v0, v4
 ; GFX9-NEXT:    v_cndmask_b32_e32 v5, v9, v10, vcc
 ; GFX9-NEXT:    v_bfe_u32 v4, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v9, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v9, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v4, v4, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v9, 0x400000, v9
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -22559,7 +22559,7 @@ define <8 x bfloat> @v_maxnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v9, 16, v6
 ; GFX10-NEXT:    v_max_f32_e32 v3, v3, v7
 ; GFX10-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_bfe_u32 v10, v8, 16, 1
 ; GFX10-NEXT:    v_and_or_b32 v7, v8, s4, 0x400000
 ; GFX10-NEXT:    v_max_f32_e32 v9, v11, v9
@@ -22630,7 +22630,7 @@ define <8 x bfloat> @v_maxnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX11-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v9, 16, v3
 ; GFX11-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_dual_max_f32 v8, v9, v8 :: v_dual_lshlrev_b32 v9, 16, v6
 ; GFX11-NEXT:    v_bfe_u32 v10, v8, 16, 1
@@ -22990,7 +22990,7 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v15, 0xffff0000, v15
 ; GFX8-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
 ; GFX8-NEXT:    v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT:    v_and_b32_e32 v18, 0x80000000, v16
+; GFX8-NEXT:    v_and_b32_e32 v18, 0xff800000, v16
 ; GFX8-NEXT:    v_max_f32_e32 v7, v7, v15
 ; GFX8-NEXT:    v_or_b32_e32 v18, 0x400000, v18
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v16, v16
@@ -22998,7 +22998,7 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v16, v17, v18, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v15, vcc, v15, v7
 ; GFX8-NEXT:    v_add_u32_e32 v15, vcc, s4, v15
-; GFX8-NEXT:    v_and_b32_e32 v17, 0x80000000, v7
+; GFX8-NEXT:    v_and_b32_e32 v17, 0xff800000, v7
 ; GFX8-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v7, v7
 ; GFX8-NEXT:    v_cndmask_b32_e32 v7, v15, v17, vcc
@@ -23010,7 +23010,7 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v14, 0xffff0000, v14
 ; GFX8-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
 ; GFX8-NEXT:    v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT:    v_and_b32_e32 v18, 0x80000000, v15
+; GFX8-NEXT:    v_and_b32_e32 v18, 0xff800000, v15
 ; GFX8-NEXT:    v_max_f32_e32 v6, v6, v14
 ; GFX8-NEXT:    v_or_b32_e32 v18, 0x400000, v18
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v15, v15
@@ -23018,7 +23018,7 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v15, v17, v18, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v14, vcc, v14, v6
 ; GFX8-NEXT:    v_add_u32_e32 v14, vcc, s4, v14
-; GFX8-NEXT:    v_and_b32_e32 v17, 0x80000000, v6
+; GFX8-NEXT:    v_and_b32_e32 v17, 0xff800000, v6
 ; GFX8-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v6, v6
 ; GFX8-NEXT:    v_cndmask_b32_e32 v6, v14, v17, vcc
@@ -23030,7 +23030,7 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v13, 0xffff0000, v13
 ; GFX8-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
 ; GFX8-NEXT:    v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT:    v_and_b32_e32 v18, 0x80000000, v14
+; GFX8-NEXT:    v_and_b32_e32 v18, 0xff800000, v14
 ; GFX8-NEXT:    v_max_f32_e32 v5, v5, v13
 ; GFX8-NEXT:    v_or_b32_e32 v18, 0x400000, v18
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v14, v14
@@ -23038,7 +23038,7 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v14, v17, v18, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v13, vcc, v13, v5
 ; GFX8-NEXT:    v_add_u32_e32 v13, vcc, s4, v13
-; GFX8-NEXT:    v_and_b32_e32 v17, 0x80000000, v5
+; GFX8-NEXT:    v_and_b32_e32 v17, 0xff800000, v5
 ; GFX8-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v5, v5
 ; GFX8-NEXT:    v_cndmask_b32_e32 v5, v13, v17, vcc
@@ -23050,7 +23050,7 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v12, 0xffff0000, v12
 ; GFX8-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
 ; GFX8-NEXT:    v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT:    v_and_b32_e32 v18, 0x80000000, v13
+; GFX8-NEXT:    v_and_b32_e32 v18, 0xff800000, v13
 ; GFX8-NEXT:    v_max_f32_e32 v4, v4, v12
 ; GFX8-NEXT:    v_or_b32_e32 v18, 0x400000, v18
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v13, v13
@@ -23058,7 +23058,7 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v13, v17, v18, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v12, vcc, v12, v4
 ; GFX8-NEXT:    v_add_u32_e32 v12, vcc, s4, v12
-; GFX8-NEXT:    v_and_b32_e32 v17, 0x80000000, v4
+; GFX8-NEXT:    v_and_b32_e32 v17, 0xff800000, v4
 ; GFX8-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v4, v4
 ; GFX8-NEXT:    v_cndmask_b32_e32 v4, v12, v17, vcc
@@ -23070,7 +23070,7 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v11, 0xffff0000, v11
 ; GFX8-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX8-NEXT:    v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT:    v_and_b32_e32 v18, 0x80000000, v12
+; GFX8-NEXT:    v_and_b32_e32 v18, 0xff800000, v12
 ; GFX8-NEXT:    v_max_f32_e32 v3, v3, v11
 ; GFX8-NEXT:    v_or_b32_e32 v18, 0x400000, v18
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v12, v12
@@ -23078,7 +23078,7 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v12, v17, v18, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v11, vcc, v11, v3
 ; GFX8-NEXT:    v_add_u32_e32 v11, vcc, s4, v11
-; GFX8-NEXT:    v_and_b32_e32 v17, 0x80000000, v3
+; GFX8-NEXT:    v_and_b32_e32 v17, 0xff800000, v3
 ; GFX8-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v11, v17, vcc
@@ -23090,7 +23090,7 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v10, 0xffff0000, v10
 ; GFX8-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX8-NEXT:    v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT:    v_and_b32_e32 v18, 0x80000000, v11
+; GFX8-NEXT:    v_and_b32_e32 v18, 0xff800000, v11
 ; GFX8-NEXT:    v_max_f32_e32 v2, v2, v10
 ; GFX8-NEXT:    v_or_b32_e32 v18, 0x400000, v18
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v11, v11
@@ -23098,7 +23098,7 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v11, v17, v18, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v10, vcc, v10, v2
 ; GFX8-NEXT:    v_add_u32_e32 v10, vcc, s4, v10
-; GFX8-NEXT:    v_and_b32_e32 v17, 0x80000000, v2
+; GFX8-NEXT:    v_and_b32_e32 v17, 0xff800000, v2
 ; GFX8-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
 ; GFX8-NEXT:    v_cndmask_b32_e32 v2, v10, v17, vcc
@@ -23110,7 +23110,7 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v9, 0xffff0000, v9
 ; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX8-NEXT:    v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT:    v_and_b32_e32 v18, 0x80000000, v10
+; GFX8-NEXT:    v_and_b32_e32 v18, 0xff800000, v10
 ; GFX8-NEXT:    v_max_f32_e32 v1, v1, v9
 ; GFX8-NEXT:    v_or_b32_e32 v18, 0x400000, v18
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v10, v10
@@ -23118,7 +23118,7 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v10, v17, v18, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v9, vcc, v9, v1
 ; GFX8-NEXT:    v_add_u32_e32 v9, vcc, s4, v9
-; GFX8-NEXT:    v_and_b32_e32 v17, 0x80000000, v1
+; GFX8-NEXT:    v_and_b32_e32 v17, 0xff800000, v1
 ; GFX8-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v1, v9, v17, vcc
@@ -23130,7 +23130,7 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v8, 0xffff0000, v8
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT:    v_and_b32_e32 v18, 0x80000000, v9
+; GFX8-NEXT:    v_and_b32_e32 v18, 0xff800000, v9
 ; GFX8-NEXT:    v_max_f32_e32 v0, v0, v8
 ; GFX8-NEXT:    v_or_b32_e32 v18, 0x400000, v18
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v9, v9
@@ -23138,7 +23138,7 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v9, v17, v18, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v8, vcc, v8, v0
 ; GFX8-NEXT:    v_add_u32_e32 v8, vcc, s4, v8
-; GFX8-NEXT:    v_and_b32_e32 v17, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v17, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v8, v17, vcc
@@ -23168,7 +23168,7 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_max_f32_e32 v16, v17, v16
 ; GFX9-NEXT:    v_bfe_u32 v17, v16, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v18, 0x80000000, v16
+; GFX9-NEXT:    v_and_b32_e32 v18, 0xff800000, v16
 ; GFX9-NEXT:    v_and_b32_e32 v15, 0xffff0000, v15
 ; GFX9-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
 ; GFX9-NEXT:    v_add3_u32 v17, v17, v16, s4
@@ -23177,7 +23177,7 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_max_f32_e32 v7, v7, v15
 ; GFX9-NEXT:    v_cndmask_b32_e32 v16, v17, v18, vcc
 ; GFX9-NEXT:    v_bfe_u32 v15, v7, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v17, 0x80000000, v7
+; GFX9-NEXT:    v_and_b32_e32 v17, 0xff800000, v7
 ; GFX9-NEXT:    v_add3_u32 v15, v15, v7, s4
 ; GFX9-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v7, v7
@@ -23186,7 +23186,7 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v17, 16, v6
 ; GFX9-NEXT:    v_max_f32_e32 v15, v17, v15
 ; GFX9-NEXT:    v_bfe_u32 v17, v15, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v18, 0x80000000, v15
+; GFX9-NEXT:    v_and_b32_e32 v18, 0xff800000, v15
 ; GFX9-NEXT:    v_and_b32_e32 v14, 0xffff0000, v14
 ; GFX9-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
 ; GFX9-NEXT:    v_add3_u32 v17, v17, v15, s4
@@ -23195,7 +23195,7 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_max_f32_e32 v6, v6, v14
 ; GFX9-NEXT:    v_cndmask_b32_e32 v15, v17, v18, vcc
 ; GFX9-NEXT:    v_bfe_u32 v14, v6, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v17, 0x80000000, v6
+; GFX9-NEXT:    v_and_b32_e32 v17, 0xff800000, v6
 ; GFX9-NEXT:    v_add3_u32 v14, v14, v6, s4
 ; GFX9-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v6, v6
@@ -23204,7 +23204,7 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v17, 16, v5
 ; GFX9-NEXT:    v_max_f32_e32 v14, v17, v14
 ; GFX9-NEXT:    v_bfe_u32 v17, v14, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v18, 0x80000000, v14
+; GFX9-NEXT:    v_and_b32_e32 v18, 0xff800000, v14
 ; GFX9-NEXT:    v_and_b32_e32 v13, 0xffff0000, v13
 ; GFX9-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
 ; GFX9-NEXT:    v_add3_u32 v17, v17, v14, s4
@@ -23213,7 +23213,7 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_max_f32_e32 v5, v5, v13
 ; GFX9-NEXT:    v_cndmask_b32_e32 v14, v17, v18, vcc
 ; GFX9-NEXT:    v_bfe_u32 v13, v5, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v17, 0x80000000, v5
+; GFX9-NEXT:    v_and_b32_e32 v17, 0xff800000, v5
 ; GFX9-NEXT:    v_add3_u32 v13, v13, v5, s4
 ; GFX9-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v5, v5
@@ -23222,7 +23222,7 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v17, 16, v4
 ; GFX9-NEXT:    v_max_f32_e32 v13, v17, v13
 ; GFX9-NEXT:    v_bfe_u32 v17, v13, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v18, 0x80000000, v13
+; GFX9-NEXT:    v_and_b32_e32 v18, 0xff800000, v13
 ; GFX9-NEXT:    v_and_b32_e32 v12, 0xffff0000, v12
 ; GFX9-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
 ; GFX9-NEXT:    v_add3_u32 v17, v17, v13, s4
@@ -23231,7 +23231,7 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_max_f32_e32 v4, v4, v12
 ; GFX9-NEXT:    v_cndmask_b32_e32 v13, v17, v18, vcc
 ; GFX9-NEXT:    v_bfe_u32 v12, v4, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v17, 0x80000000, v4
+; GFX9-NEXT:    v_and_b32_e32 v17, 0xff800000, v4
 ; GFX9-NEXT:    v_add3_u32 v12, v12, v4, s4
 ; GFX9-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v4, v4
@@ -23240,7 +23240,7 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v17, 16, v3
 ; GFX9-NEXT:    v_max_f32_e32 v12, v17, v12
 ; GFX9-NEXT:    v_bfe_u32 v17, v12, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v18, 0x80000000, v12
+; GFX9-NEXT:    v_and_b32_e32 v18, 0xff800000, v12
 ; GFX9-NEXT:    v_and_b32_e32 v11, 0xffff0000, v11
 ; GFX9-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX9-NEXT:    v_add3_u32 v17, v17, v12, s4
@@ -23249,7 +23249,7 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_max_f32_e32 v3, v3, v11
 ; GFX9-NEXT:    v_cndmask_b32_e32 v12, v17, v18, vcc
 ; GFX9-NEXT:    v_bfe_u32 v11, v3, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v17, 0x80000000, v3
+; GFX9-NEXT:    v_and_b32_e32 v17, 0xff800000, v3
 ; GFX9-NEXT:    v_add3_u32 v11, v11, v3, s4
 ; GFX9-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
@@ -23258,7 +23258,7 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v17, 16, v2
 ; GFX9-NEXT:    v_max_f32_e32 v11, v17, v11
 ; GFX9-NEXT:    v_bfe_u32 v17, v11, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v18, 0x80000000, v11
+; GFX9-NEXT:    v_and_b32_e32 v18, 0xff800000, v11
 ; GFX9-NEXT:    v_and_b32_e32 v10, 0xffff0000, v10
 ; GFX9-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX9-NEXT:    v_add3_u32 v17, v17, v11, s4
@@ -23267,7 +23267,7 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_max_f32_e32 v2, v2, v10
 ; GFX9-NEXT:    v_cndmask_b32_e32 v11, v17, v18, vcc
 ; GFX9-NEXT:    v_bfe_u32 v10, v2, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v17, 0x80000000, v2
+; GFX9-NEXT:    v_and_b32_e32 v17, 0xff800000, v2
 ; GFX9-NEXT:    v_add3_u32 v10, v10, v2, s4
 ; GFX9-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
@@ -23276,7 +23276,7 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v17, 16, v1
 ; GFX9-NEXT:    v_max_f32_e32 v10, v17, v10
 ; GFX9-NEXT:    v_bfe_u32 v17, v10, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v18, 0x80000000, v10
+; GFX9-NEXT:    v_and_b32_e32 v18, 0xff800000, v10
 ; GFX9-NEXT:    v_and_b32_e32 v9, 0xffff0000, v9
 ; GFX9-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX9-NEXT:    v_add3_u32 v17, v17, v10, s4
@@ -23285,7 +23285,7 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_max_f32_e32 v1, v1, v9
 ; GFX9-NEXT:    v_cndmask_b32_e32 v10, v17, v18, vcc
 ; GFX9-NEXT:    v_bfe_u32 v9, v1, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v17, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v17, 0xff800000, v1
 ; GFX9-NEXT:    v_add3_u32 v9, v9, v1, s4
 ; GFX9-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -23294,7 +23294,7 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v17, 16, v0
 ; GFX9-NEXT:    v_max_f32_e32 v9, v17, v9
 ; GFX9-NEXT:    v_bfe_u32 v17, v9, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v18, 0x80000000, v9
+; GFX9-NEXT:    v_and_b32_e32 v18, 0xff800000, v9
 ; GFX9-NEXT:    v_and_b32_e32 v8, 0xffff0000, v8
 ; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX9-NEXT:    v_add3_u32 v17, v17, v9, s4
@@ -23303,7 +23303,7 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX9-NEXT:    v_max_f32_e32 v0, v0, v8
 ; GFX9-NEXT:    v_cndmask_b32_e32 v9, v17, v18, vcc
 ; GFX9-NEXT:    v_bfe_u32 v8, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v17, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v17, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v8, v8, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v17, 0x400000, v17
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -23326,7 +23326,7 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v17, 16, v7
 ; GFX10-NEXT:    v_and_b32_e32 v15, 0xffff0000, v15
 ; GFX10-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v18, 16, v6
 ; GFX10-NEXT:    v_max_f32_e32 v16, v17, v16
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v17, 16, v14
@@ -23469,7 +23469,7 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX11-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v17, 16, v7
 ; GFX11-NEXT:    v_and_b32_e32 v15, 0xffff0000, v15
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
 ; GFX11-NEXT:    v_dual_max_f32 v16, v17, v16 :: v_dual_lshlrev_b32 v17, 16, v14
 ; GFX11-NEXT:    v_and_b32_e32 v14, 0xffff0000, v14
@@ -24289,7 +24289,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v30, 0xffff0000, v30
 ; GFX8-NEXT:    v_and_b32_e32 v14, 0xffff0000, v14
 ; GFX8-NEXT:    v_add_u32_e32 v32, vcc, s4, v32
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v31
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v31
 ; GFX8-NEXT:    v_max_f32_e32 v14, v14, v30
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v31, v31
@@ -24297,7 +24297,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v31, v32, v33, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v30, vcc, v30, v14
 ; GFX8-NEXT:    v_add_u32_e32 v30, vcc, s4, v30
-; GFX8-NEXT:    v_and_b32_e32 v32, 0x80000000, v14
+; GFX8-NEXT:    v_and_b32_e32 v32, 0xff800000, v14
 ; GFX8-NEXT:    v_or_b32_e32 v32, 0x400000, v32
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v14, v14
 ; GFX8-NEXT:    v_cndmask_b32_e32 v14, v30, v32, vcc
@@ -24320,14 +24320,14 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_max_f32_e32 v30, v15, v30
 ; GFX8-NEXT:    v_bfe_u32 v15, v33, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v15, vcc, v15, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v33
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v33
 ; GFX8-NEXT:    v_add_u32_e32 v15, vcc, s4, v15
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v33, v33
 ; GFX8-NEXT:    v_bfe_u32 v33, v30, 16, 1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v15, v15, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, v33, v30
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v30
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v30
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v30, v30
@@ -24335,13 +24335,13 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_bfe_u32 v33, v32, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, v33, v32
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v32
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v32
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v32, v32
 ; GFX8-NEXT:    v_cndmask_b32_e32 v32, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v29, vcc, v29, v13
 ; GFX8-NEXT:    v_add_u32_e32 v29, vcc, s4, v29
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v13
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v13
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v13, v13
 ; GFX8-NEXT:    v_cndmask_b32_e32 v13, v29, v33, vcc
@@ -24353,7 +24353,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v28, 0xffff0000, v28
 ; GFX8-NEXT:    v_and_b32_e32 v12, 0xffff0000, v12
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v29
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v29
 ; GFX8-NEXT:    v_max_f32_e32 v12, v12, v28
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v29, v29
@@ -24361,7 +24361,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v29, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v28, vcc, v28, v12
 ; GFX8-NEXT:    v_add_u32_e32 v28, vcc, s4, v28
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v12
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v12
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v12, v12
 ; GFX8-NEXT:    v_cndmask_b32_e32 v12, v28, v33, vcc
@@ -24373,7 +24373,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v27, 0xffff0000, v27
 ; GFX8-NEXT:    v_and_b32_e32 v11, 0xffff0000, v11
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v28
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v28
 ; GFX8-NEXT:    v_max_f32_e32 v11, v11, v27
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v28, v28
@@ -24381,7 +24381,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v28, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v27, vcc, v27, v11
 ; GFX8-NEXT:    v_add_u32_e32 v27, vcc, s4, v27
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v11
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v11
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v11, v11
 ; GFX8-NEXT:    v_cndmask_b32_e32 v11, v27, v33, vcc
@@ -24393,7 +24393,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v26, 0xffff0000, v26
 ; GFX8-NEXT:    v_and_b32_e32 v10, 0xffff0000, v10
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v27
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v27
 ; GFX8-NEXT:    v_max_f32_e32 v10, v10, v26
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v27, v27
@@ -24401,7 +24401,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v27, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v26, vcc, v26, v10
 ; GFX8-NEXT:    v_add_u32_e32 v26, vcc, s4, v26
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v10
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v10
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v10, v10
 ; GFX8-NEXT:    v_cndmask_b32_e32 v10, v26, v33, vcc
@@ -24413,7 +24413,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v25, 0xffff0000, v25
 ; GFX8-NEXT:    v_and_b32_e32 v9, 0xffff0000, v9
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v26
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v26
 ; GFX8-NEXT:    v_max_f32_e32 v9, v9, v25
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v26, v26
@@ -24421,7 +24421,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v26, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v25, vcc, v25, v9
 ; GFX8-NEXT:    v_add_u32_e32 v25, vcc, s4, v25
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v9
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v9
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v9, v9
 ; GFX8-NEXT:    v_cndmask_b32_e32 v9, v25, v33, vcc
@@ -24433,7 +24433,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v24, 0xffff0000, v24
 ; GFX8-NEXT:    v_and_b32_e32 v8, 0xffff0000, v8
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v25
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v25
 ; GFX8-NEXT:    v_max_f32_e32 v8, v8, v24
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v25, v25
@@ -24441,7 +24441,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v25, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v24, vcc, v24, v8
 ; GFX8-NEXT:    v_add_u32_e32 v24, vcc, s4, v24
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v8
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v8
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v8, v8
 ; GFX8-NEXT:    v_cndmask_b32_e32 v8, v24, v33, vcc
@@ -24453,7 +24453,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v23, 0xffff0000, v23
 ; GFX8-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v24
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v24
 ; GFX8-NEXT:    v_max_f32_e32 v7, v7, v23
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v24, v24
@@ -24461,7 +24461,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v24, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v23, vcc, v23, v7
 ; GFX8-NEXT:    v_add_u32_e32 v23, vcc, s4, v23
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v7
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v7
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v7, v7
 ; GFX8-NEXT:    v_cndmask_b32_e32 v7, v23, v33, vcc
@@ -24473,7 +24473,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v22, 0xffff0000, v22
 ; GFX8-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v23
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v23
 ; GFX8-NEXT:    v_max_f32_e32 v6, v6, v22
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v23, v23
@@ -24481,7 +24481,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v23, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v22, vcc, v22, v6
 ; GFX8-NEXT:    v_add_u32_e32 v22, vcc, s4, v22
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v6
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v6
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v6, v6
 ; GFX8-NEXT:    v_cndmask_b32_e32 v6, v22, v33, vcc
@@ -24493,7 +24493,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v21, 0xffff0000, v21
 ; GFX8-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v22
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v22
 ; GFX8-NEXT:    v_max_f32_e32 v5, v5, v21
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v22, v22
@@ -24501,7 +24501,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v22, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v21, vcc, v21, v5
 ; GFX8-NEXT:    v_add_u32_e32 v21, vcc, s4, v21
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v5
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v5
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v5, v5
 ; GFX8-NEXT:    v_cndmask_b32_e32 v5, v21, v33, vcc
@@ -24513,7 +24513,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v20, 0xffff0000, v20
 ; GFX8-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v21
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v21
 ; GFX8-NEXT:    v_max_f32_e32 v4, v4, v20
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v21, v21
@@ -24521,7 +24521,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v21, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v20, vcc, v20, v4
 ; GFX8-NEXT:    v_add_u32_e32 v20, vcc, s4, v20
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v4
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v4
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v4, v4
 ; GFX8-NEXT:    v_cndmask_b32_e32 v4, v20, v33, vcc
@@ -24533,7 +24533,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v19, 0xffff0000, v19
 ; GFX8-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v20
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v20
 ; GFX8-NEXT:    v_max_f32_e32 v3, v3, v19
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v20, v20
@@ -24541,7 +24541,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v20, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v19, vcc, v19, v3
 ; GFX8-NEXT:    v_add_u32_e32 v19, vcc, s4, v19
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v3
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v3
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v19, v33, vcc
@@ -24553,7 +24553,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v18, 0xffff0000, v18
 ; GFX8-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v19
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v19
 ; GFX8-NEXT:    v_max_f32_e32 v2, v2, v18
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v19, v19
@@ -24561,7 +24561,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v19, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v18, vcc, v18, v2
 ; GFX8-NEXT:    v_add_u32_e32 v18, vcc, s4, v18
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v2
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v2
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
 ; GFX8-NEXT:    v_cndmask_b32_e32 v2, v18, v33, vcc
@@ -24573,7 +24573,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v17, 0xffff0000, v17
 ; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v18
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v18
 ; GFX8-NEXT:    v_max_f32_e32 v1, v1, v17
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v18, v18
@@ -24581,7 +24581,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v18, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v17, vcc, v17, v1
 ; GFX8-NEXT:    v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v1
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v1
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v1, v17, v33, vcc
@@ -24593,7 +24593,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_and_b32_e32 v16, 0xffff0000, v16
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT:    v_and_b32_e32 v34, 0x80000000, v17
+; GFX8-NEXT:    v_and_b32_e32 v34, 0xff800000, v17
 ; GFX8-NEXT:    v_max_f32_e32 v0, v0, v16
 ; GFX8-NEXT:    v_or_b32_e32 v34, 0x400000, v34
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v17, v17
@@ -24601,7 +24601,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX8-NEXT:    v_cndmask_b32_e32 v17, v33, v34, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v16, vcc, v16, v0
 ; GFX8-NEXT:    v_add_u32_e32 v16, vcc, s4, v16
-; GFX8-NEXT:    v_and_b32_e32 v33, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v33, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v16, v33, vcc
@@ -24645,7 +24645,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_max_f32_e32 v31, v32, v31
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
 ; GFX9-NEXT:    v_bfe_u32 v32, v31, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v31
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v31
 ; GFX9-NEXT:    v_and_b32_e32 v30, 0xffff0000, v30
 ; GFX9-NEXT:    v_and_b32_e32 v14, 0xffff0000, v14
 ; GFX9-NEXT:    v_add3_u32 v32, v32, v31, s4
@@ -24654,7 +24654,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_max_f32_e32 v14, v14, v30
 ; GFX9-NEXT:    v_cndmask_b32_e32 v31, v32, v33, vcc
 ; GFX9-NEXT:    v_bfe_u32 v30, v14, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v32, 0x80000000, v14
+; GFX9-NEXT:    v_and_b32_e32 v32, 0xff800000, v14
 ; GFX9-NEXT:    v_add3_u32 v30, v30, v14, s4
 ; GFX9-NEXT:    v_or_b32_e32 v32, 0x400000, v32
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v14, v14
@@ -24667,7 +24667,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    buffer_load_dword v29, off, s[0:3], s32
 ; GFX9-NEXT:    v_max_f32_e32 v30, v32, v30
 ; GFX9-NEXT:    v_bfe_u32 v32, v30, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v30
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v30
 ; GFX9-NEXT:    v_add3_u32 v32, v32, v30, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v30, v30
@@ -24680,19 +24680,19 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_and_b32_e32 v29, 0xffff0000, v29
 ; GFX9-NEXT:    v_max_f32_e32 v29, v15, v29
 ; GFX9-NEXT:    v_bfe_u32 v15, v32, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v32
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v32
 ; GFX9-NEXT:    v_add3_u32 v15, v15, v32, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v32, v32
 ; GFX9-NEXT:    v_cndmask_b32_e32 v15, v15, v33, vcc
 ; GFX9-NEXT:    v_bfe_u32 v32, v29, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v29
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v29
 ; GFX9-NEXT:    v_add3_u32 v32, v32, v29, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v29, v29
 ; GFX9-NEXT:    v_cndmask_b32_e32 v29, v32, v33, vcc
 ; GFX9-NEXT:    v_bfe_u32 v32, v13, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v13
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v13
 ; GFX9-NEXT:    v_add3_u32 v32, v32, v13, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v13, v13
@@ -24701,7 +24701,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v12
 ; GFX9-NEXT:    v_max_f32_e32 v32, v33, v32
 ; GFX9-NEXT:    v_bfe_u32 v33, v32, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v32
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v32
 ; GFX9-NEXT:    v_and_b32_e32 v28, 0xffff0000, v28
 ; GFX9-NEXT:    v_and_b32_e32 v12, 0xffff0000, v12
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v32, s4
@@ -24710,7 +24710,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_max_f32_e32 v12, v12, v28
 ; GFX9-NEXT:    v_cndmask_b32_e32 v32, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v28, v12, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v12
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v12
 ; GFX9-NEXT:    v_add3_u32 v28, v28, v12, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v12, v12
@@ -24719,7 +24719,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v11
 ; GFX9-NEXT:    v_max_f32_e32 v28, v33, v28
 ; GFX9-NEXT:    v_bfe_u32 v33, v28, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v28
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v28
 ; GFX9-NEXT:    v_and_b32_e32 v27, 0xffff0000, v27
 ; GFX9-NEXT:    v_and_b32_e32 v11, 0xffff0000, v11
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v28, s4
@@ -24728,7 +24728,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_max_f32_e32 v11, v11, v27
 ; GFX9-NEXT:    v_cndmask_b32_e32 v28, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v27, v11, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v11
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v11
 ; GFX9-NEXT:    v_add3_u32 v27, v27, v11, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v11, v11
@@ -24737,7 +24737,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v10
 ; GFX9-NEXT:    v_max_f32_e32 v27, v33, v27
 ; GFX9-NEXT:    v_bfe_u32 v33, v27, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v27
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v27
 ; GFX9-NEXT:    v_and_b32_e32 v26, 0xffff0000, v26
 ; GFX9-NEXT:    v_and_b32_e32 v10, 0xffff0000, v10
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v27, s4
@@ -24746,7 +24746,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_max_f32_e32 v10, v10, v26
 ; GFX9-NEXT:    v_cndmask_b32_e32 v27, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v26, v10, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v10
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v10
 ; GFX9-NEXT:    v_add3_u32 v26, v26, v10, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v10, v10
@@ -24755,7 +24755,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v9
 ; GFX9-NEXT:    v_max_f32_e32 v26, v33, v26
 ; GFX9-NEXT:    v_bfe_u32 v33, v26, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v26
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v26
 ; GFX9-NEXT:    v_and_b32_e32 v25, 0xffff0000, v25
 ; GFX9-NEXT:    v_and_b32_e32 v9, 0xffff0000, v9
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v26, s4
@@ -24764,7 +24764,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_max_f32_e32 v9, v9, v25
 ; GFX9-NEXT:    v_cndmask_b32_e32 v26, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v25, v9, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v9
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v9
 ; GFX9-NEXT:    v_add3_u32 v25, v25, v9, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v9, v9
@@ -24773,7 +24773,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v8
 ; GFX9-NEXT:    v_max_f32_e32 v25, v33, v25
 ; GFX9-NEXT:    v_bfe_u32 v33, v25, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v25
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v25
 ; GFX9-NEXT:    v_and_b32_e32 v24, 0xffff0000, v24
 ; GFX9-NEXT:    v_and_b32_e32 v8, 0xffff0000, v8
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v25, s4
@@ -24782,7 +24782,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_max_f32_e32 v8, v8, v24
 ; GFX9-NEXT:    v_cndmask_b32_e32 v25, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v24, v8, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v8
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v8
 ; GFX9-NEXT:    v_add3_u32 v24, v24, v8, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v8, v8
@@ -24791,7 +24791,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v7
 ; GFX9-NEXT:    v_max_f32_e32 v24, v33, v24
 ; GFX9-NEXT:    v_bfe_u32 v33, v24, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v24
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v24
 ; GFX9-NEXT:    v_and_b32_e32 v23, 0xffff0000, v23
 ; GFX9-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v24, s4
@@ -24800,7 +24800,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_max_f32_e32 v7, v7, v23
 ; GFX9-NEXT:    v_cndmask_b32_e32 v24, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v23, v7, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v7
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v7
 ; GFX9-NEXT:    v_add3_u32 v23, v23, v7, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v7, v7
@@ -24809,7 +24809,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v6
 ; GFX9-NEXT:    v_max_f32_e32 v23, v33, v23
 ; GFX9-NEXT:    v_bfe_u32 v33, v23, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v23
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v23
 ; GFX9-NEXT:    v_and_b32_e32 v22, 0xffff0000, v22
 ; GFX9-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v23, s4
@@ -24818,7 +24818,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_max_f32_e32 v6, v6, v22
 ; GFX9-NEXT:    v_cndmask_b32_e32 v23, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v22, v6, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v6
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v6
 ; GFX9-NEXT:    v_add3_u32 v22, v22, v6, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v6, v6
@@ -24827,7 +24827,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v5
 ; GFX9-NEXT:    v_max_f32_e32 v22, v33, v22
 ; GFX9-NEXT:    v_bfe_u32 v33, v22, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v22
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v22
 ; GFX9-NEXT:    v_and_b32_e32 v21, 0xffff0000, v21
 ; GFX9-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v22, s4
@@ -24836,7 +24836,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_max_f32_e32 v5, v5, v21
 ; GFX9-NEXT:    v_cndmask_b32_e32 v22, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v21, v5, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v5
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v5
 ; GFX9-NEXT:    v_add3_u32 v21, v21, v5, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v5, v5
@@ -24845,7 +24845,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v4
 ; GFX9-NEXT:    v_max_f32_e32 v21, v33, v21
 ; GFX9-NEXT:    v_bfe_u32 v33, v21, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v21
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v21
 ; GFX9-NEXT:    v_and_b32_e32 v20, 0xffff0000, v20
 ; GFX9-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v21, s4
@@ -24854,7 +24854,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_max_f32_e32 v4, v4, v20
 ; GFX9-NEXT:    v_cndmask_b32_e32 v21, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v20, v4, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v4
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v4
 ; GFX9-NEXT:    v_add3_u32 v20, v20, v4, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v4, v4
@@ -24863,7 +24863,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v3
 ; GFX9-NEXT:    v_max_f32_e32 v20, v33, v20
 ; GFX9-NEXT:    v_bfe_u32 v33, v20, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v20
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v20
 ; GFX9-NEXT:    v_and_b32_e32 v19, 0xffff0000, v19
 ; GFX9-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v20, s4
@@ -24872,7 +24872,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_max_f32_e32 v3, v3, v19
 ; GFX9-NEXT:    v_cndmask_b32_e32 v20, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v19, v3, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v3
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v3
 ; GFX9-NEXT:    v_add3_u32 v19, v19, v3, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
@@ -24881,7 +24881,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v2
 ; GFX9-NEXT:    v_max_f32_e32 v19, v33, v19
 ; GFX9-NEXT:    v_bfe_u32 v33, v19, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v19
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v19
 ; GFX9-NEXT:    v_and_b32_e32 v18, 0xffff0000, v18
 ; GFX9-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v19, s4
@@ -24890,7 +24890,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_max_f32_e32 v2, v2, v18
 ; GFX9-NEXT:    v_cndmask_b32_e32 v19, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v18, v2, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v2
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v2
 ; GFX9-NEXT:    v_add3_u32 v18, v18, v2, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
@@ -24899,7 +24899,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v1
 ; GFX9-NEXT:    v_max_f32_e32 v18, v33, v18
 ; GFX9-NEXT:    v_bfe_u32 v33, v18, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v18
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v18
 ; GFX9-NEXT:    v_and_b32_e32 v17, 0xffff0000, v17
 ; GFX9-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v18, s4
@@ -24908,7 +24908,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_max_f32_e32 v1, v1, v17
 ; GFX9-NEXT:    v_cndmask_b32_e32 v18, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v17, v1, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v1
 ; GFX9-NEXT:    v_add3_u32 v17, v17, v1, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -24917,7 +24917,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v33, 16, v0
 ; GFX9-NEXT:    v_max_f32_e32 v17, v33, v17
 ; GFX9-NEXT:    v_bfe_u32 v33, v17, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v34, 0x80000000, v17
+; GFX9-NEXT:    v_and_b32_e32 v34, 0xff800000, v17
 ; GFX9-NEXT:    v_and_b32_e32 v16, 0xffff0000, v16
 ; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX9-NEXT:    v_add3_u32 v33, v33, v17, s4
@@ -24926,7 +24926,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX9-NEXT:    v_max_f32_e32 v0, v0, v16
 ; GFX9-NEXT:    v_cndmask_b32_e32 v17, v33, v34, vcc
 ; GFX9-NEXT:    v_bfe_u32 v16, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v33, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v33, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v16, v16, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v33, 0x400000, v33
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -25018,7 +25018,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX10-NEXT:    v_bfe_u32 v55, v11, 16, 1
 ; GFX10-NEXT:    v_bfe_u32 v65, v49, 16, 1
 ; GFX10-NEXT:    v_bfe_u32 v67, v10, 16, 1
-; GFX10-NEXT:    s_brev_b32 s23, 1
+; GFX10-NEXT:    s_mov_b32 s23, 0xff800000
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v33, 16, v30
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v34, 16, v14
 ; GFX10-NEXT:    v_and_b32_e32 v30, 0xffff0000, v30
@@ -25240,7 +25240,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX11-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v53, 16, v24
 ; GFX11-NEXT:    v_and_b32_e32 v24, 0xffff0000, v24
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX11-NEXT:    v_dual_max_f32 v1, v1, v17 :: v_dual_lshlrev_b32 v64, 16, v7
 ; GFX11-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
@@ -25586,7 +25586,7 @@ define bfloat @v_sqrt_bf16(bfloat %a) {
 ; GFX8-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
@@ -25617,7 +25617,7 @@ define bfloat @v_sqrt_bf16(bfloat %a) {
 ; GFX9-NEXT:    v_cndmask_b32_e32 v0, v1, v0, vcc
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -25641,7 +25641,7 @@ define bfloat @v_sqrt_bf16(bfloat %a) {
 ; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, v2, s4
 ; GFX10-NEXT:    v_cmp_lt_f32_e64 s4, 0, v5
 ; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, v3, s4
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_mul_f32_e32 v2, 0x37800000, v1
 ; GFX10-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc_lo
 ; GFX10-NEXT:    v_cmp_class_f32_e64 vcc_lo, v0, 0x260
@@ -25676,7 +25676,7 @@ define bfloat @v_sqrt_bf16(bfloat %a) {
 ; GFX11-NEXT:    v_cmp_lt_f32_e64 s0, 0, v5
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, v3, s0
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    v_mul_f32_e32 v2, 0x37800000, v1
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
 ; GFX11-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc_lo
@@ -25724,7 +25724,7 @@ define bfloat @v_ldexp_bf16_i32(bfloat %a, i32 %b) {
 ; GFX8-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
@@ -25738,7 +25738,7 @@ define bfloat @v_ldexp_bf16_i32(bfloat %a, i32 %b) {
 ; GFX9-NEXT:    v_ldexp_f32 v0, v0, v1
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -25750,7 +25750,7 @@ define bfloat @v_ldexp_bf16_i32(bfloat %a, i32 %b) {
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_ldexp_f32 v0, v0, v1
 ; GFX10-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX10-NEXT:    v_and_or_b32 v2, v0, s4, 0x400000
@@ -25764,7 +25764,7 @@ define bfloat @v_ldexp_bf16_i32(bfloat %a, i32 %b) {
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_ldexp_f32 v0, v0, v1
 ; GFX11-NEXT:    v_bfe_u32 v1, v0, 16, 1
@@ -25816,7 +25816,7 @@ define { bfloat, i16 } @v_frexp_bf16_i16(bfloat %a) {
 ; GFX8-NEXT:    v_bfe_u32 v2, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v2, v0
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT:    v_and_b32_e32 v3, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v3, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v2, v3, vcc
@@ -25831,7 +25831,7 @@ define { bfloat, i16 } @v_frexp_bf16_i16(bfloat %a) {
 ; GFX9-NEXT:    v_frexp_mant_f32_e32 v0, v1
 ; GFX9-NEXT:    v_bfe_u32 v2, v0, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v3, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v3, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v2, v2, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -25844,7 +25844,7 @@ define { bfloat, i16 } @v_frexp_bf16_i16(bfloat %a) {
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v1, 16, v0
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_frexp_mant_f32_e32 v0, v1
 ; GFX10-NEXT:    v_frexp_exp_i32_f32_e32 v1, v1
 ; GFX10-NEXT:    v_bfe_u32 v2, v0, 16, 1
@@ -25947,7 +25947,7 @@ define bfloat @v_log_bf16(bfloat %a) {
 ; GFX8-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
@@ -25978,7 +25978,7 @@ define bfloat @v_log_bf16(bfloat %a) {
 ; GFX9-NEXT:    v_sub_f32_e32 v0, v0, v1
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -25990,7 +25990,7 @@ define bfloat @v_log_bf16(bfloat %a) {
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_cmp_gt_f32_e32 vcc_lo, 0x800000, v0
 ; GFX10-NEXT:    v_cndmask_b32_e64 v1, 1.0, 0x4f800000, vcc_lo
 ; GFX10-NEXT:    v_mul_f32_e32 v0, v0, v1
@@ -26015,7 +26015,7 @@ define bfloat @v_log_bf16(bfloat %a) {
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_cmp_gt_f32_e32 vcc_lo, 0x800000, v0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v1, 1.0, 0x4f800000, vcc_lo
@@ -26097,7 +26097,7 @@ define bfloat @v_log2_bf16(bfloat %a) {
 ; GFX8-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
@@ -26119,7 +26119,7 @@ define bfloat @v_log2_bf16(bfloat %a) {
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
 ; GFX9-NEXT:    v_sub_f32_e32 v0, v0, v1
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -26131,7 +26131,7 @@ define bfloat @v_log2_bf16(bfloat %a) {
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_cmp_gt_f32_e32 vcc_lo, 0x800000, v0
 ; GFX10-NEXT:    v_cndmask_b32_e64 v2, 1.0, 0x4f800000, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v1, 0, 0x42000000, vcc_lo
@@ -26150,7 +26150,7 @@ define bfloat @v_log2_bf16(bfloat %a) {
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
 ; GFX11-NEXT:    v_cmp_gt_f32_e32 vcc_lo, 0x800000, v0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, 1.0, 0x4f800000, vcc_lo
@@ -26257,7 +26257,7 @@ define bfloat @v_log10_bf16(bfloat %a) {
 ; GFX8-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
@@ -26288,7 +26288,7 @@ define bfloat @v_log10_bf16(bfloat %a) {
 ; GFX9-NEXT:    v_sub_f32_e32 v0, v0, v1
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -26300,7 +26300,7 @@ define bfloat @v_log10_bf16(bfloat %a) {
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_cmp_gt_f32_e32 vcc_lo, 0x800000, v0
 ; GFX10-NEXT:    v_cndmask_b32_e64 v1, 1.0, 0x4f800000, vcc_lo
 ; GFX10-NEXT:    v_mul_f32_e32 v0, v0, v1
@@ -26325,7 +26325,7 @@ define bfloat @v_log10_bf16(bfloat %a) {
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_cmp_gt_f32_e32 vcc_lo, 0x800000, v0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v1, 1.0, 0x4f800000, vcc_lo
@@ -26442,7 +26442,7 @@ define bfloat @v_exp_bf16(bfloat %a) {
 ; GFX8-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
@@ -26473,7 +26473,7 @@ define bfloat @v_exp_bf16(bfloat %a) {
 ; GFX9-NEXT:    v_cndmask_b32_e32 v0, v2, v1, vcc
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -26485,7 +26485,7 @@ define bfloat @v_exp_bf16(bfloat %a) {
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_mul_f32_e32 v1, 0x3fb8aa3b, v0
 ; GFX10-NEXT:    v_cmp_ngt_f32_e32 vcc_lo, 0xc2ce8ed0, v0
 ; GFX10-NEXT:    v_rndne_f32_e32 v2, v1
@@ -26511,7 +26511,7 @@ define bfloat @v_exp_bf16(bfloat %a) {
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_mul_f32_e32 v1, 0x3fb8aa3b, v0
 ; GFX11-NEXT:    v_rndne_f32_e32 v2, v1
@@ -26594,7 +26594,7 @@ define bfloat @v_exp2_bf16(bfloat %a) {
 ; GFX8-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
@@ -26616,7 +26616,7 @@ define bfloat @v_exp2_bf16(bfloat %a) {
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
 ; GFX9-NEXT:    v_mul_f32_e32 v0, v0, v1
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -26628,7 +26628,7 @@ define bfloat @v_exp2_bf16(bfloat %a) {
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_cmp_gt_f32_e32 vcc_lo, 0xc2fc0000, v0
 ; GFX10-NEXT:    v_cndmask_b32_e64 v2, 0, 0x42800000, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v1, 1.0, 0x1f800000, vcc_lo
@@ -26647,7 +26647,7 @@ define bfloat @v_exp2_bf16(bfloat %a) {
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
 ; GFX11-NEXT:    v_cmp_gt_f32_e32 vcc_lo, 0xc2fc0000, v0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, 0, 0x42800000, vcc_lo
@@ -26752,7 +26752,7 @@ define bfloat @v_exp10_bf16(bfloat %a) {
 ; GFX8-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
@@ -26783,7 +26783,7 @@ define bfloat @v_exp10_bf16(bfloat %a) {
 ; GFX9-NEXT:    v_cndmask_b32_e32 v0, v2, v1, vcc
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -26795,7 +26795,7 @@ define bfloat @v_exp10_bf16(bfloat %a) {
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_mul_f32_e32 v1, 0x40549a78, v0
 ; GFX10-NEXT:    v_cmp_ngt_f32_e32 vcc_lo, 0xc23369f4, v0
 ; GFX10-NEXT:    v_rndne_f32_e32 v2, v1
@@ -26821,7 +26821,7 @@ define bfloat @v_exp10_bf16(bfloat %a) {
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_mul_f32_e32 v1, 0x40549a78, v0
 ; GFX11-NEXT:    v_rndne_f32_e32 v2, v1
@@ -26882,7 +26882,7 @@ define bfloat @v_ceil_bf16(bfloat %a) {
 ; GFX8-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
@@ -26896,7 +26896,7 @@ define bfloat @v_ceil_bf16(bfloat %a) {
 ; GFX9-NEXT:    v_ceil_f32_e32 v0, v0
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -26908,7 +26908,7 @@ define bfloat @v_ceil_bf16(bfloat %a) {
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_ceil_f32_e32 v0, v0
 ; GFX10-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX10-NEXT:    v_and_or_b32 v2, v0, s4, 0x400000
@@ -26922,7 +26922,7 @@ define bfloat @v_ceil_bf16(bfloat %a) {
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_ceil_f32_e32 v0, v0
 ; GFX11-NEXT:    v_bfe_u32 v1, v0, 16, 1
@@ -26967,7 +26967,7 @@ define bfloat @v_trunc_bf16(bfloat %a) {
 ; GFX8-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
@@ -26981,7 +26981,7 @@ define bfloat @v_trunc_bf16(bfloat %a) {
 ; GFX9-NEXT:    v_trunc_f32_e32 v0, v0
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -26993,7 +26993,7 @@ define bfloat @v_trunc_bf16(bfloat %a) {
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_trunc_f32_e32 v0, v0
 ; GFX10-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX10-NEXT:    v_and_or_b32 v2, v0, s4, 0x400000
@@ -27007,7 +27007,7 @@ define bfloat @v_trunc_bf16(bfloat %a) {
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_trunc_f32_e32 v0, v0
 ; GFX11-NEXT:    v_bfe_u32 v1, v0, 16, 1
@@ -27052,7 +27052,7 @@ define bfloat @v_rint_bf16(bfloat %a) {
 ; GFX8-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
@@ -27066,7 +27066,7 @@ define bfloat @v_rint_bf16(bfloat %a) {
 ; GFX9-NEXT:    v_rndne_f32_e32 v0, v0
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -27078,7 +27078,7 @@ define bfloat @v_rint_bf16(bfloat %a) {
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_rndne_f32_e32 v0, v0
 ; GFX10-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX10-NEXT:    v_and_or_b32 v2, v0, s4, 0x400000
@@ -27092,7 +27092,7 @@ define bfloat @v_rint_bf16(bfloat %a) {
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_rndne_f32_e32 v0, v0
 ; GFX11-NEXT:    v_bfe_u32 v1, v0, 16, 1
@@ -27137,7 +27137,7 @@ define bfloat @v_nearbyint_bf16(bfloat %a) {
 ; GFX8-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
@@ -27151,7 +27151,7 @@ define bfloat @v_nearbyint_bf16(bfloat %a) {
 ; GFX9-NEXT:    v_rndne_f32_e32 v0, v0
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -27163,7 +27163,7 @@ define bfloat @v_nearbyint_bf16(bfloat %a) {
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_rndne_f32_e32 v0, v0
 ; GFX10-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX10-NEXT:    v_and_or_b32 v2, v0, s4, 0x400000
@@ -27177,7 +27177,7 @@ define bfloat @v_nearbyint_bf16(bfloat %a) {
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_rndne_f32_e32 v0, v0
 ; GFX11-NEXT:    v_bfe_u32 v1, v0, 16, 1
@@ -27240,7 +27240,7 @@ define bfloat @v_round_bf16(bfloat %a) {
 ; GFX8-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
@@ -27260,7 +27260,7 @@ define bfloat @v_round_bf16(bfloat %a) {
 ; GFX9-NEXT:    v_add_f32_e32 v0, v1, v0
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -27276,7 +27276,7 @@ define bfloat @v_round_bf16(bfloat %a) {
 ; GFX10-NEXT:    v_sub_f32_e32 v2, v0, v1
 ; GFX10-NEXT:    v_cmp_ge_f32_e64 s4, |v2|, 0.5
 ; GFX10-NEXT:    v_cndmask_b32_e64 v2, 0, 1.0, s4
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_bfi_b32 v0, 0x7fffffff, v2, v0
 ; GFX10-NEXT:    v_add_f32_e32 v0, v1, v0
 ; GFX10-NEXT:    v_bfe_u32 v1, v0, 16, 1
@@ -27297,7 +27297,7 @@ define bfloat @v_round_bf16(bfloat %a) {
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_cmp_ge_f32_e64 s0, |v2|, 0.5
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, 0, 1.0, s0
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_bfi_b32 v0, 0x7fffffff, v2, v0
 ; GFX11-NEXT:    v_add_f32_e32 v0, v1, v0
@@ -27343,7 +27343,7 @@ define bfloat @v_roundeven_bf16(bfloat %a) {
 ; GFX8-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
@@ -27357,7 +27357,7 @@ define bfloat @v_roundeven_bf16(bfloat %a) {
 ; GFX9-NEXT:    v_rndne_f32_e32 v0, v0
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -27369,7 +27369,7 @@ define bfloat @v_roundeven_bf16(bfloat %a) {
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_rndne_f32_e32 v0, v0
 ; GFX10-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX10-NEXT:    v_and_or_b32 v2, v0, s4, 0x400000
@@ -27383,7 +27383,7 @@ define bfloat @v_roundeven_bf16(bfloat %a) {
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_rndne_f32_e32 v0, v0
 ; GFX11-NEXT:    v_bfe_u32 v1, v0, 16, 1
@@ -27428,7 +27428,7 @@ define bfloat @v_floor_bf16(bfloat %a) {
 ; GFX8-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
@@ -27442,7 +27442,7 @@ define bfloat @v_floor_bf16(bfloat %a) {
 ; GFX9-NEXT:    v_floor_f32_e32 v0, v0
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -27454,7 +27454,7 @@ define bfloat @v_floor_bf16(bfloat %a) {
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_floor_f32_e32 v0, v0
 ; GFX10-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX10-NEXT:    v_and_or_b32 v2, v0, s4, 0x400000
@@ -27468,7 +27468,7 @@ define bfloat @v_floor_bf16(bfloat %a) {
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_floor_f32_e32 v0, v0
 ; GFX11-NEXT:    v_bfe_u32 v1, v0, 16, 1
@@ -27505,7 +27505,7 @@ define bfloat @v_canonicalize_bf16(bfloat %a) {
 ; GFX8-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
@@ -27519,7 +27519,7 @@ define bfloat @v_canonicalize_bf16(bfloat %a) {
 ; GFX9-NEXT:    v_max_f32_e32 v0, v0, v0
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -27531,7 +27531,7 @@ define bfloat @v_canonicalize_bf16(bfloat %a) {
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_max_f32_e32 v0, v0, v0
 ; GFX10-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX10-NEXT:    v_and_or_b32 v2, v0, s4, 0x400000
@@ -27545,7 +27545,7 @@ define bfloat @v_canonicalize_bf16(bfloat %a) {
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_max_f32_e32 v0, v0, v0
 ; GFX11-NEXT:    v_bfe_u32 v1, v0, 16, 1
@@ -31058,7 +31058,7 @@ define bfloat @v_sitofp_i16_to_bf16(i16 %x) {
 ; GFX8-NEXT:    v_cvt_f32_i32_sdwa v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
 ; GFX8-NEXT:    v_bfe_u32 v2, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v2, v0
-; GFX8-NEXT:    v_and_b32_e32 v1, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v1, 0xff800000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 0x7fff, v2
 ; GFX8-NEXT:    v_or_b32_e32 v1, 0x400000, v1
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -31072,7 +31072,7 @@ define bfloat @v_sitofp_i16_to_bf16(i16 %x) {
 ; GFX9-NEXT:    v_cvt_f32_i32_sdwa v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -31084,7 +31084,7 @@ define bfloat @v_sitofp_i16_to_bf16(i16 %x) {
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_cvt_f32_i32_sdwa v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX10-NEXT:    v_and_or_b32 v2, v0, s4, 0x400000
 ; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
@@ -31097,7 +31097,7 @@ define bfloat @v_sitofp_i16_to_bf16(i16 %x) {
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_bfe_i32 v0, v0, 0, 16
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_cvt_f32_i32_e32 v0, v0
 ; GFX11-NEXT:    v_bfe_u32 v1, v0, 16, 1
@@ -31143,7 +31143,7 @@ define <2 x bfloat> @v_sitofp_v2i16_to_v2bf16(<2 x i16> %x) {
 ; GFX8-NEXT:    v_cvt_f32_i32_sdwa v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
 ; GFX8-NEXT:    v_bfe_u32 v3, v1, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v3, v1
-; GFX8-NEXT:    v_and_b32_e32 v2, 0x80000000, v1
+; GFX8-NEXT:    v_and_b32_e32 v2, 0xff800000, v1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, 0x7fff, v3
 ; GFX8-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -31151,7 +31151,7 @@ define <2 x bfloat> @v_sitofp_v2i16_to_v2bf16(<2 x i16> %x) {
 ; GFX8-NEXT:    v_bfe_u32 v2, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v2, v0
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT:    v_and_b32_e32 v3, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v3, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v2, v3, vcc
@@ -31166,13 +31166,13 @@ define <2 x bfloat> @v_sitofp_v2i16_to_v2bf16(<2 x i16> %x) {
 ; GFX9-NEXT:    v_cvt_f32_i32_sdwa v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
 ; GFX9-NEXT:    v_bfe_u32 v2, v1, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v3, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v3, 0xff800000, v1
 ; GFX9-NEXT:    v_add3_u32 v2, v2, v1, s4
 ; GFX9-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX9-NEXT:    v_cndmask_b32_e32 v1, v2, v3, vcc
 ; GFX9-NEXT:    v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v3, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v3, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v2, v2, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -31186,7 +31186,7 @@ define <2 x bfloat> @v_sitofp_v2i16_to_v2bf16(<2 x i16> %x) {
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_cvt_f32_i32_sdwa v1, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
 ; GFX10-NEXT:    v_cvt_f32_i32_sdwa v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_bfe_u32 v2, v1, 16, 1
 ; GFX10-NEXT:    v_bfe_u32 v3, v0, 16, 1
 ; GFX10-NEXT:    v_and_or_b32 v4, v1, s4, 0x400000
@@ -31205,7 +31205,7 @@ define <2 x bfloat> @v_sitofp_v2i16_to_v2bf16(<2 x i16> %x) {
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_bfe_i32 v1, v0, 0, 16
 ; GFX11-NEXT:    v_ashrrev_i32_e32 v0, 16, v0
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
 ; GFX11-NEXT:    v_cvt_f32_i32_e32 v1, v1
 ; GFX11-NEXT:    v_cvt_f32_i32_e32 v0, v0
@@ -31266,14 +31266,14 @@ define <3 x bfloat> @v_sitofp_v3i16_to_v3bf16(<3 x i16> %x) {
 ; GFX8-NEXT:    s_movk_i32 s4, 0x7fff
 ; GFX8-NEXT:    v_bfe_u32 v3, v1, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v3, v1
-; GFX8-NEXT:    v_and_b32_e32 v2, 0x80000000, v1
+; GFX8-NEXT:    v_and_b32_e32 v2, 0xff800000, v1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, 0x7fff, v3
 ; GFX8-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v1, v3, v2, vcc
 ; GFX8-NEXT:    v_bfe_u32 v3, v4, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v3, v4
-; GFX8-NEXT:    v_and_b32_e32 v2, 0x80000000, v4
+; GFX8-NEXT:    v_and_b32_e32 v2, 0xff800000, v4
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, s4, v3
 ; GFX8-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v4, v4
@@ -31281,7 +31281,7 @@ define <3 x bfloat> @v_sitofp_v3i16_to_v3bf16(<3 x i16> %x) {
 ; GFX8-NEXT:    v_bfe_u32 v3, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v3, v0
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT:    v_and_b32_e32 v4, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v4, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v3, v4, vcc
@@ -31298,19 +31298,19 @@ define <3 x bfloat> @v_sitofp_v3i16_to_v3bf16(<3 x i16> %x) {
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
 ; GFX9-NEXT:    v_cvt_f32_i32_sdwa v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
 ; GFX9-NEXT:    v_bfe_u32 v2, v1, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v3, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v3, 0xff800000, v1
 ; GFX9-NEXT:    v_add3_u32 v2, v2, v1, s4
 ; GFX9-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX9-NEXT:    v_cndmask_b32_e32 v1, v2, v3, vcc
 ; GFX9-NEXT:    v_bfe_u32 v2, v4, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v3, 0x80000000, v4
+; GFX9-NEXT:    v_and_b32_e32 v3, 0xff800000, v4
 ; GFX9-NEXT:    v_add3_u32 v2, v2, v4, s4
 ; GFX9-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v4, v4
 ; GFX9-NEXT:    v_cndmask_b32_e32 v2, v2, v3, vcc
 ; GFX9-NEXT:    v_bfe_u32 v3, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v4, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v4, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v3, v3, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -31325,7 +31325,7 @@ define <3 x bfloat> @v_sitofp_v3i16_to_v3bf16(<3 x i16> %x) {
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_cvt_f32_i32_sdwa v2, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
 ; GFX10-NEXT:    v_cvt_f32_i32_sdwa v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_cvt_f32_i32_sdwa v1, sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
 ; GFX10-NEXT:    v_bfe_u32 v3, v2, 16, 1
 ; GFX10-NEXT:    v_bfe_u32 v5, v0, 16, 1
@@ -31393,7 +31393,7 @@ define <4 x bfloat> @v_sitofp_v4i16_to_v4bf16(<4 x i16> %x) {
 ; GFX8-NEXT:    s_movk_i32 s4, 0x7fff
 ; GFX8-NEXT:    v_bfe_u32 v4, v2, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, v4, v2
-; GFX8-NEXT:    v_and_b32_e32 v3, 0x80000000, v2
+; GFX8-NEXT:    v_and_b32_e32 v3, 0xff800000, v2
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, 0x7fff, v4
 ; GFX8-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
@@ -31401,14 +31401,14 @@ define <4 x bfloat> @v_sitofp_v4i16_to_v4bf16(<4 x i16> %x) {
 ; GFX8-NEXT:    v_bfe_u32 v3, v1, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v3, v1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, s4, v3
-; GFX8-NEXT:    v_and_b32_e32 v4, 0x80000000, v1
+; GFX8-NEXT:    v_and_b32_e32 v4, 0xff800000, v1
 ; GFX8-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v1, v3, v4, vcc
 ; GFX8-NEXT:    v_bfe_u32 v4, v5, 16, 1
 ; GFX8-NEXT:    v_cvt_f32_i32_sdwa v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, v4, v5
-; GFX8-NEXT:    v_and_b32_e32 v3, 0x80000000, v5
+; GFX8-NEXT:    v_and_b32_e32 v3, 0xff800000, v5
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, s4, v4
 ; GFX8-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v5, v5
@@ -31416,7 +31416,7 @@ define <4 x bfloat> @v_sitofp_v4i16_to_v4bf16(<4 x i16> %x) {
 ; GFX8-NEXT:    v_bfe_u32 v4, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, v4, v0
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, 0x7fff, v4
-; GFX8-NEXT:    v_and_b32_e32 v5, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v5, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v4, v5, vcc
@@ -31434,26 +31434,26 @@ define <4 x bfloat> @v_sitofp_v4i16_to_v4bf16(<4 x i16> %x) {
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
 ; GFX9-NEXT:    v_cvt_f32_i32_sdwa v5, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
 ; GFX9-NEXT:    v_bfe_u32 v3, v2, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v4, 0x80000000, v2
+; GFX9-NEXT:    v_and_b32_e32 v4, 0xff800000, v2
 ; GFX9-NEXT:    v_add3_u32 v3, v3, v2, s4
 ; GFX9-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
 ; GFX9-NEXT:    v_cndmask_b32_e32 v2, v3, v4, vcc
 ; GFX9-NEXT:    v_bfe_u32 v3, v1, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v4, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v4, 0xff800000, v1
 ; GFX9-NEXT:    v_cvt_f32_i32_sdwa v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
 ; GFX9-NEXT:    v_add3_u32 v3, v3, v1, s4
 ; GFX9-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX9-NEXT:    v_cndmask_b32_e32 v1, v3, v4, vcc
 ; GFX9-NEXT:    v_bfe_u32 v3, v5, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v4, 0x80000000, v5
+; GFX9-NEXT:    v_and_b32_e32 v4, 0xff800000, v5
 ; GFX9-NEXT:    v_add3_u32 v3, v3, v5, s4
 ; GFX9-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v5, v5
 ; GFX9-NEXT:    v_cndmask_b32_e32 v3, v3, v4, vcc
 ; GFX9-NEXT:    v_bfe_u32 v4, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v5, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v5, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v4, v4, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -31468,7 +31468,7 @@ define <4 x bfloat> @v_sitofp_v4i16_to_v4bf16(<4 x i16> %x) {
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_cvt_f32_i32_sdwa v2, sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
 ; GFX10-NEXT:    v_cvt_f32_i32_sdwa v3, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_cvt_f32_i32_sdwa v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
 ; GFX10-NEXT:    v_cvt_f32_i32_sdwa v1, sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
 ; GFX10-NEXT:    v_bfe_u32 v5, v2, 16, 1
@@ -31501,7 +31501,7 @@ define <4 x bfloat> @v_sitofp_v4i16_to_v4bf16(<4 x i16> %x) {
 ; GFX11-NEXT:    v_bfe_i32 v2, v1, 0, 16
 ; GFX11-NEXT:    v_bfe_i32 v3, v0, 0, 16
 ; GFX11-NEXT:    v_ashrrev_i32_e32 v0, 16, v0
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    v_ashrrev_i32_e32 v1, 16, v1
 ; GFX11-NEXT:    v_cvt_f32_i32_e32 v2, v2
 ; GFX11-NEXT:    v_cvt_f32_i32_e32 v3, v3
@@ -31557,7 +31557,7 @@ define bfloat @v_sitofp_i32_to_bf16(i32 %x) {
 ; GFX8-NEXT:    v_cvt_f32_i32_e32 v0, v0
 ; GFX8-NEXT:    v_bfe_u32 v2, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v2, v0
-; GFX8-NEXT:    v_and_b32_e32 v1, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v1, 0xff800000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 0x7fff, v2
 ; GFX8-NEXT:    v_or_b32_e32 v1, 0x400000, v1
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -31571,7 +31571,7 @@ define bfloat @v_sitofp_i32_to_bf16(i32 %x) {
 ; GFX9-NEXT:    v_cvt_f32_i32_e32 v0, v0
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -31583,7 +31583,7 @@ define bfloat @v_sitofp_i32_to_bf16(i32 %x) {
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_cvt_f32_i32_e32 v0, v0
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX10-NEXT:    v_and_or_b32 v2, v0, s4, 0x400000
 ; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
@@ -31596,7 +31596,7 @@ define bfloat @v_sitofp_i32_to_bf16(i32 %x) {
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_cvt_f32_i32_e32 v0, v0
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
 ; GFX11-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX11-NEXT:    v_and_or_b32 v2, v0, s0, 0x400000
@@ -31636,14 +31636,14 @@ define <2 x bfloat> @v_sitofp_v2i32_to_v2bf16(<2 x i32> %x) {
 ; GFX8-NEXT:    v_cvt_f32_i32_e32 v1, v1
 ; GFX8-NEXT:    v_bfe_u32 v3, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v3, v0
-; GFX8-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, 0x7fff, v3
 ; GFX8-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v3, v2, vcc
 ; GFX8-NEXT:    v_bfe_u32 v3, v1, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v3, v1
-; GFX8-NEXT:    v_and_b32_e32 v2, 0x80000000, v1
+; GFX8-NEXT:    v_and_b32_e32 v2, 0xff800000, v1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, 0x7fff, v3
 ; GFX8-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -31659,13 +31659,13 @@ define <2 x bfloat> @v_sitofp_v2i32_to_v2bf16(<2 x i32> %x) {
 ; GFX9-NEXT:    v_cvt_f32_i32_e32 v1, v1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
 ; GFX9-NEXT:    v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v3, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v3, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v2, v2, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX9-NEXT:    v_cndmask_b32_e32 v0, v2, v3, vcc
 ; GFX9-NEXT:    v_bfe_u32 v2, v1, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v3, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v3, 0xff800000, v1
 ; GFX9-NEXT:    v_add3_u32 v2, v2, v1, s4
 ; GFX9-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -31679,7 +31679,7 @@ define <2 x bfloat> @v_sitofp_v2i32_to_v2bf16(<2 x i32> %x) {
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_cvt_f32_i32_e32 v0, v0
 ; GFX10-NEXT:    v_cvt_f32_i32_e32 v1, v1
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_bfe_u32 v2, v0, 16, 1
 ; GFX10-NEXT:    v_bfe_u32 v3, v1, 16, 1
 ; GFX10-NEXT:    v_and_or_b32 v4, v0, s4, 0x400000
@@ -31698,7 +31698,7 @@ define <2 x bfloat> @v_sitofp_v2i32_to_v2bf16(<2 x i32> %x) {
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_cvt_f32_i32_e32 v0, v0
 ; GFX11-NEXT:    v_cvt_f32_i32_e32 v1, v1
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
 ; GFX11-NEXT:    v_bfe_u32 v2, v0, 16, 1
 ; GFX11-NEXT:    v_bfe_u32 v3, v1, 16, 1
@@ -31749,21 +31749,21 @@ define <3 x bfloat> @v_sitofp_v3i32_to_v3bf16(<3 x i32> %x) {
 ; GFX8-NEXT:    v_cvt_f32_i32_e32 v1, v1
 ; GFX8-NEXT:    v_bfe_u32 v4, v2, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, v4, v2
-; GFX8-NEXT:    v_and_b32_e32 v3, 0x80000000, v2
+; GFX8-NEXT:    v_and_b32_e32 v3, 0xff800000, v2
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, 0x7fff, v4
 ; GFX8-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
 ; GFX8-NEXT:    v_cndmask_b32_e32 v2, v4, v3, vcc
 ; GFX8-NEXT:    v_bfe_u32 v4, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, v4, v0
-; GFX8-NEXT:    v_and_b32_e32 v3, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v3, 0xff800000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, 0x7fff, v4
 ; GFX8-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v4, v3, vcc
 ; GFX8-NEXT:    v_bfe_u32 v4, v1, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, v4, v1
-; GFX8-NEXT:    v_and_b32_e32 v3, 0x80000000, v1
+; GFX8-NEXT:    v_and_b32_e32 v3, 0xff800000, v1
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, 0x7fff, v4
 ; GFX8-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -31782,19 +31782,19 @@ define <3 x bfloat> @v_sitofp_v3i32_to_v3bf16(<3 x i32> %x) {
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
 ; GFX9-NEXT:    v_cvt_f32_i32_e32 v1, v1
 ; GFX9-NEXT:    v_bfe_u32 v3, v2, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v4, 0x80000000, v2
+; GFX9-NEXT:    v_and_b32_e32 v4, 0xff800000, v2
 ; GFX9-NEXT:    v_add3_u32 v3, v3, v2, s4
 ; GFX9-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
 ; GFX9-NEXT:    v_cndmask_b32_e32 v2, v3, v4, vcc
 ; GFX9-NEXT:    v_bfe_u32 v3, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v4, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v4, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v3, v3, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX9-NEXT:    v_cndmask_b32_e32 v0, v3, v4, vcc
 ; GFX9-NEXT:    v_bfe_u32 v3, v1, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v4, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v4, 0xff800000, v1
 ; GFX9-NEXT:    v_add3_u32 v3, v3, v1, s4
 ; GFX9-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -31809,7 +31809,7 @@ define <3 x bfloat> @v_sitofp_v3i32_to_v3bf16(<3 x i32> %x) {
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_cvt_f32_i32_e32 v0, v0
 ; GFX10-NEXT:    v_cvt_f32_i32_e32 v1, v1
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_cvt_f32_i32_e32 v2, v2
 ; GFX10-NEXT:    v_bfe_u32 v3, v0, 16, 1
 ; GFX10-NEXT:    v_bfe_u32 v5, v1, 16, 1
@@ -31869,14 +31869,14 @@ define <4 x bfloat> @v_sitofp_v4i32_to_v4bf16(<4 x i32> %x) {
 ; GFX8-NEXT:    s_movk_i32 s4, 0x7fff
 ; GFX8-NEXT:    v_bfe_u32 v5, v2, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, v5, v2
-; GFX8-NEXT:    v_and_b32_e32 v4, 0x80000000, v2
+; GFX8-NEXT:    v_and_b32_e32 v4, 0xff800000, v2
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, 0x7fff, v5
 ; GFX8-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
 ; GFX8-NEXT:    v_cndmask_b32_e32 v2, v5, v4, vcc
 ; GFX8-NEXT:    v_bfe_u32 v5, v3, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, v5, v3
-; GFX8-NEXT:    v_and_b32_e32 v4, 0x80000000, v3
+; GFX8-NEXT:    v_and_b32_e32 v4, 0xff800000, v3
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, s4, v5
 ; GFX8-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
@@ -31884,14 +31884,14 @@ define <4 x bfloat> @v_sitofp_v4i32_to_v4bf16(<4 x i32> %x) {
 ; GFX8-NEXT:    v_bfe_u32 v5, v0, 16, 1
 ; GFX8-NEXT:    v_cvt_f32_i32_e32 v1, v1
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, v5, v0
-; GFX8-NEXT:    v_and_b32_e32 v4, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v4, 0xff800000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, s4, v5
 ; GFX8-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v5, v4, vcc
 ; GFX8-NEXT:    v_bfe_u32 v5, v1, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, v5, v1
-; GFX8-NEXT:    v_and_b32_e32 v4, 0x80000000, v1
+; GFX8-NEXT:    v_and_b32_e32 v4, 0xff800000, v1
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, 0x7fff, v5
 ; GFX8-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -31910,26 +31910,26 @@ define <4 x bfloat> @v_sitofp_v4i32_to_v4bf16(<4 x i32> %x) {
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
 ; GFX9-NEXT:    v_cvt_f32_i32_e32 v0, v0
 ; GFX9-NEXT:    v_bfe_u32 v4, v2, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v5, 0x80000000, v2
+; GFX9-NEXT:    v_and_b32_e32 v5, 0xff800000, v2
 ; GFX9-NEXT:    v_add3_u32 v4, v4, v2, s4
 ; GFX9-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
 ; GFX9-NEXT:    v_cndmask_b32_e32 v2, v4, v5, vcc
 ; GFX9-NEXT:    v_bfe_u32 v4, v3, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v5, 0x80000000, v3
+; GFX9-NEXT:    v_and_b32_e32 v5, 0xff800000, v3
 ; GFX9-NEXT:    v_cvt_f32_i32_e32 v1, v1
 ; GFX9-NEXT:    v_add3_u32 v4, v4, v3, s4
 ; GFX9-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
 ; GFX9-NEXT:    v_cndmask_b32_e32 v3, v4, v5, vcc
 ; GFX9-NEXT:    v_bfe_u32 v4, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v5, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v5, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v4, v4, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX9-NEXT:    v_cndmask_b32_e32 v0, v4, v5, vcc
 ; GFX9-NEXT:    v_bfe_u32 v4, v1, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v5, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v5, 0xff800000, v1
 ; GFX9-NEXT:    v_add3_u32 v4, v4, v1, s4
 ; GFX9-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -31944,7 +31944,7 @@ define <4 x bfloat> @v_sitofp_v4i32_to_v4bf16(<4 x i32> %x) {
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_cvt_f32_i32_e32 v2, v2
 ; GFX10-NEXT:    v_cvt_f32_i32_e32 v0, v0
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_cvt_f32_i32_e32 v1, v1
 ; GFX10-NEXT:    v_cvt_f32_i32_e32 v3, v3
 ; GFX10-NEXT:    v_bfe_u32 v5, v2, 16, 1
@@ -31976,7 +31976,7 @@ define <4 x bfloat> @v_sitofp_v4i32_to_v4bf16(<4 x i32> %x) {
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_cvt_f32_i32_e32 v2, v2
 ; GFX11-NEXT:    v_cvt_f32_i32_e32 v0, v0
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    v_cvt_f32_i32_e32 v3, v3
 ; GFX11-NEXT:    v_cvt_f32_i32_e32 v1, v1
 ; GFX11-NEXT:    v_bfe_u32 v5, v2, 16, 1
@@ -32063,7 +32063,7 @@ define bfloat @v_sitofp_i64_to_bf16(i64 %x) {
 ; GFX8-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
@@ -32087,7 +32087,7 @@ define bfloat @v_sitofp_i64_to_bf16(i64 %x) {
 ; GFX9-NEXT:    v_sub_u32_e32 v1, 32, v2
 ; GFX9-NEXT:    v_ldexp_f32 v0, v0, v1
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -32100,7 +32100,7 @@ define bfloat @v_sitofp_i64_to_bf16(i64 %x) {
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_xor_b32_e32 v2, v0, v1
 ; GFX10-NEXT:    v_ffbh_i32_e32 v3, v1
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_ashrrev_i32_e32 v2, 31, v2
 ; GFX10-NEXT:    v_add_nc_u32_e32 v3, -1, v3
 ; GFX10-NEXT:    v_add_nc_u32_e32 v2, 32, v2
@@ -32124,7 +32124,7 @@ define bfloat @v_sitofp_i64_to_bf16(i64 %x) {
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_xor_b32_e32 v2, v0, v1
 ; GFX11-NEXT:    v_cls_i32_e32 v3, v1
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
 ; GFX11-NEXT:    v_ashrrev_i32_e32 v2, 31, v2
 ; GFX11-NEXT:    v_add_nc_u32_e32 v3, -1, v3
@@ -32242,7 +32242,7 @@ define <2 x bfloat> @v_sitofp_v2i64_to_v2bf16(<2 x i64> %x) {
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 32, v1
 ; GFX8-NEXT:    v_min_u32_e32 v7, v0, v1
 ; GFX8-NEXT:    v_lshlrev_b64 v[0:1], v7, v[2:3]
-; GFX8-NEXT:    v_and_b32_e32 v6, 0x80000000, v4
+; GFX8-NEXT:    v_and_b32_e32 v6, 0xff800000, v4
 ; GFX8-NEXT:    v_min_u32_e32 v0, 1, v0
 ; GFX8-NEXT:    v_or_b32_e32 v0, v1, v0
 ; GFX8-NEXT:    v_cvt_f32_i32_e32 v0, v0
@@ -32254,7 +32254,7 @@ define <2 x bfloat> @v_sitofp_v2i64_to_v2bf16(<2 x i64> %x) {
 ; GFX8-NEXT:    v_bfe_u32 v2, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v2, v0
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT:    v_and_b32_e32 v3, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v3, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v2, v3, vcc
@@ -32287,7 +32287,7 @@ define <2 x bfloat> @v_sitofp_v2i64_to_v2bf16(<2 x i64> %x) {
 ; GFX9-NEXT:    v_add_u32_e32 v1, 32, v1
 ; GFX9-NEXT:    v_min_u32_e32 v7, v0, v1
 ; GFX9-NEXT:    v_lshlrev_b64 v[0:1], v7, v[2:3]
-; GFX9-NEXT:    v_and_b32_e32 v6, 0x80000000, v4
+; GFX9-NEXT:    v_and_b32_e32 v6, 0xff800000, v4
 ; GFX9-NEXT:    v_min_u32_e32 v0, 1, v0
 ; GFX9-NEXT:    v_or_b32_e32 v0, v1, v0
 ; GFX9-NEXT:    v_cvt_f32_i32_e32 v0, v0
@@ -32297,7 +32297,7 @@ define <2 x bfloat> @v_sitofp_v2i64_to_v2bf16(<2 x i64> %x) {
 ; GFX9-NEXT:    v_sub_u32_e32 v2, 32, v7
 ; GFX9-NEXT:    v_ldexp_f32 v0, v0, v2
 ; GFX9-NEXT:    v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v3, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v3, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v2, v2, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -32313,7 +32313,7 @@ define <2 x bfloat> @v_sitofp_v2i64_to_v2bf16(<2 x i64> %x) {
 ; GFX10-NEXT:    v_xor_b32_e32 v5, v2, v3
 ; GFX10-NEXT:    v_ffbh_i32_e32 v6, v1
 ; GFX10-NEXT:    v_ffbh_i32_e32 v7, v3
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_ashrrev_i32_e32 v4, 31, v4
 ; GFX10-NEXT:    v_ashrrev_i32_e32 v5, 31, v5
 ; GFX10-NEXT:    v_add_nc_u32_e32 v6, -1, v6
@@ -32354,7 +32354,7 @@ define <2 x bfloat> @v_sitofp_v2i64_to_v2bf16(<2 x i64> %x) {
 ; GFX11-NEXT:    v_xor_b32_e32 v5, v2, v3
 ; GFX11-NEXT:    v_cls_i32_e32 v6, v1
 ; GFX11-NEXT:    v_cls_i32_e32 v7, v3
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    v_ashrrev_i32_e32 v4, 31, v4
 ; GFX11-NEXT:    v_ashrrev_i32_e32 v5, 31, v5
 ; GFX11-NEXT:    v_add_nc_u32_e32 v6, -1, v6
@@ -32515,7 +32515,7 @@ define <3 x bfloat> @v_sitofp_v3i64_to_v3bf16(<3 x i64> %x) {
 ; GFX8-NEXT:    s_movk_i32 s4, 0x7fff
 ; GFX8-NEXT:    v_min_u32_e32 v7, v7, v8
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT:    v_and_b32_e32 v6, 0x80000000, v4
+; GFX8-NEXT:    v_and_b32_e32 v6, 0xff800000, v4
 ; GFX8-NEXT:    v_lshlrev_b64 v[0:1], v7, v[0:1]
 ; GFX8-NEXT:    v_or_b32_e32 v6, 0x400000, v6
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v4, v4
@@ -32539,7 +32539,7 @@ define <3 x bfloat> @v_sitofp_v3i64_to_v3bf16(<3 x i64> %x) {
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, v4, v0
 ; GFX8-NEXT:    v_cvt_f32_i32_e32 v2, v2
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, s4, v4
-; GFX8-NEXT:    v_and_b32_e32 v5, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v5, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v4, v5, vcc
@@ -32548,7 +32548,7 @@ define <3 x bfloat> @v_sitofp_v3i64_to_v3bf16(<3 x i64> %x) {
 ; GFX8-NEXT:    v_bfe_u32 v3, v2, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v3, v2
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT:    v_and_b32_e32 v4, 0x80000000, v2
+; GFX8-NEXT:    v_and_b32_e32 v4, 0xff800000, v2
 ; GFX8-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
 ; GFX8-NEXT:    v_cndmask_b32_e32 v2, v3, v4, vcc
@@ -32583,7 +32583,7 @@ define <3 x bfloat> @v_sitofp_v3i64_to_v3bf16(<3 x i64> %x) {
 ; GFX9-NEXT:    v_ldexp_f32 v4, v4, v5
 ; GFX9-NEXT:    v_bfe_u32 v5, v4, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v6, 0x80000000, v4
+; GFX9-NEXT:    v_and_b32_e32 v6, 0xff800000, v4
 ; GFX9-NEXT:    v_add3_u32 v5, v5, v4, s4
 ; GFX9-NEXT:    v_or_b32_e32 v6, 0x400000, v6
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v4, v4
@@ -32599,7 +32599,7 @@ define <3 x bfloat> @v_sitofp_v3i64_to_v3bf16(<3 x i64> %x) {
 ; GFX9-NEXT:    v_add_u32_e32 v1, 32, v1
 ; GFX9-NEXT:    v_min_u32_e32 v8, v0, v1
 ; GFX9-NEXT:    v_lshlrev_b64 v[0:1], v8, v[2:3]
-; GFX9-NEXT:    v_and_b32_e32 v7, 0x80000000, v5
+; GFX9-NEXT:    v_and_b32_e32 v7, 0xff800000, v5
 ; GFX9-NEXT:    v_min_u32_e32 v0, 1, v0
 ; GFX9-NEXT:    v_or_b32_e32 v0, v1, v0
 ; GFX9-NEXT:    v_cvt_f32_i32_e32 v0, v0
@@ -32609,7 +32609,7 @@ define <3 x bfloat> @v_sitofp_v3i64_to_v3bf16(<3 x i64> %x) {
 ; GFX9-NEXT:    v_sub_u32_e32 v2, 32, v8
 ; GFX9-NEXT:    v_ldexp_f32 v0, v0, v2
 ; GFX9-NEXT:    v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v3, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v3, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v2, v2, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -32638,7 +32638,7 @@ define <3 x bfloat> @v_sitofp_v3i64_to_v3bf16(<3 x i64> %x) {
 ; GFX10-NEXT:    v_add_nc_u32_e32 v11, -1, v11
 ; GFX10-NEXT:    v_add_nc_u32_e32 v9, 32, v9
 ; GFX10-NEXT:    v_min_u32_e32 v8, v10, v8
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_min_u32_e32 v6, v6, v7
 ; GFX10-NEXT:    v_min_u32_e32 v7, v11, v9
 ; GFX10-NEXT:    v_lshlrev_b64 v[0:1], v8, v[0:1]
@@ -32822,7 +32822,7 @@ define <4 x bfloat> @v_sitofp_v4i64_to_v4bf16(<4 x i64> %x) {
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, 32, v5
 ; GFX8-NEXT:    v_min_u32_e32 v11, v4, v5
 ; GFX8-NEXT:    v_lshlrev_b64 v[4:5], v11, v[6:7]
-; GFX8-NEXT:    v_and_b32_e32 v10, 0x80000000, v8
+; GFX8-NEXT:    v_and_b32_e32 v10, 0xff800000, v8
 ; GFX8-NEXT:    v_or_b32_e32 v6, 0x400000, v10
 ; GFX8-NEXT:    v_min_u32_e32 v4, 1, v4
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v8, v8
@@ -32844,7 +32844,7 @@ define <4 x bfloat> @v_sitofp_v4i64_to_v4bf16(<4 x i64> %x) {
 ; GFX8-NEXT:    v_add_u32_e32 v6, vcc, v6, v4
 ; GFX8-NEXT:    v_cvt_f32_i32_e32 v0, v0
 ; GFX8-NEXT:    v_add_u32_e32 v6, vcc, s4, v6
-; GFX8-NEXT:    v_and_b32_e32 v7, 0x80000000, v4
+; GFX8-NEXT:    v_and_b32_e32 v7, 0xff800000, v4
 ; GFX8-NEXT:    v_or_b32_e32 v7, 0x400000, v7
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v4, v4
 ; GFX8-NEXT:    v_cndmask_b32_e32 v4, v6, v7, vcc
@@ -32860,7 +32860,7 @@ define <4 x bfloat> @v_sitofp_v4i64_to_v4bf16(<4 x i64> %x) {
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 32, v1
 ; GFX8-NEXT:    v_min_u32_e32 v9, v0, v1
 ; GFX8-NEXT:    v_lshlrev_b64 v[0:1], v9, v[2:3]
-; GFX8-NEXT:    v_and_b32_e32 v8, 0x80000000, v6
+; GFX8-NEXT:    v_and_b32_e32 v8, 0xff800000, v6
 ; GFX8-NEXT:    v_min_u32_e32 v0, 1, v0
 ; GFX8-NEXT:    v_or_b32_e32 v0, v1, v0
 ; GFX8-NEXT:    v_cvt_f32_i32_e32 v0, v0
@@ -32872,7 +32872,7 @@ define <4 x bfloat> @v_sitofp_v4i64_to_v4bf16(<4 x i64> %x) {
 ; GFX8-NEXT:    v_bfe_u32 v2, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v2, v0
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT:    v_and_b32_e32 v3, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v3, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v2, v3, vcc
@@ -32907,7 +32907,7 @@ define <4 x bfloat> @v_sitofp_v4i64_to_v4bf16(<4 x i64> %x) {
 ; GFX9-NEXT:    v_add_u32_e32 v5, 32, v5
 ; GFX9-NEXT:    v_min_u32_e32 v11, v4, v5
 ; GFX9-NEXT:    v_lshlrev_b64 v[4:5], v11, v[6:7]
-; GFX9-NEXT:    v_and_b32_e32 v10, 0x80000000, v8
+; GFX9-NEXT:    v_and_b32_e32 v10, 0xff800000, v8
 ; GFX9-NEXT:    v_or_b32_e32 v6, 0x400000, v10
 ; GFX9-NEXT:    v_min_u32_e32 v4, 1, v4
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v8, v8
@@ -32927,7 +32927,7 @@ define <4 x bfloat> @v_sitofp_v4i64_to_v4bf16(<4 x i64> %x) {
 ; GFX9-NEXT:    v_cvt_f32_i32_e32 v0, v0
 ; GFX9-NEXT:    v_ldexp_f32 v4, v4, v6
 ; GFX9-NEXT:    v_bfe_u32 v6, v4, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v7, 0x80000000, v4
+; GFX9-NEXT:    v_and_b32_e32 v7, 0xff800000, v4
 ; GFX9-NEXT:    v_add3_u32 v6, v6, v4, s4
 ; GFX9-NEXT:    v_or_b32_e32 v7, 0x400000, v7
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v4, v4
@@ -32943,7 +32943,7 @@ define <4 x bfloat> @v_sitofp_v4i64_to_v4bf16(<4 x i64> %x) {
 ; GFX9-NEXT:    v_add_u32_e32 v1, 32, v1
 ; GFX9-NEXT:    v_min_u32_e32 v9, v0, v1
 ; GFX9-NEXT:    v_lshlrev_b64 v[0:1], v9, v[2:3]
-; GFX9-NEXT:    v_and_b32_e32 v8, 0x80000000, v6
+; GFX9-NEXT:    v_and_b32_e32 v8, 0xff800000, v6
 ; GFX9-NEXT:    v_min_u32_e32 v0, 1, v0
 ; GFX9-NEXT:    v_or_b32_e32 v0, v1, v0
 ; GFX9-NEXT:    v_cvt_f32_i32_e32 v0, v0
@@ -32953,7 +32953,7 @@ define <4 x bfloat> @v_sitofp_v4i64_to_v4bf16(<4 x i64> %x) {
 ; GFX9-NEXT:    v_sub_u32_e32 v2, 32, v9
 ; GFX9-NEXT:    v_ldexp_f32 v0, v0, v2
 ; GFX9-NEXT:    v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v3, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v3, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v2, v2, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -32989,7 +32989,7 @@ define <4 x bfloat> @v_sitofp_v4i64_to_v4bf16(<4 x i64> %x) {
 ; GFX10-NEXT:    v_lshlrev_b64 v[4:5], v8, v[4:5]
 ; GFX10-NEXT:    v_add_nc_u32_e32 v9, 32, v9
 ; GFX10-NEXT:    v_add_nc_u32_e32 v13, -1, v13
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_lshlrev_b64 v[6:7], v10, v[6:7]
 ; GFX10-NEXT:    v_min_u32_e32 v9, v12, v9
 ; GFX10-NEXT:    v_min_u32_e32 v11, v13, v14
@@ -33065,7 +33065,7 @@ define <4 x bfloat> @v_sitofp_v4i64_to_v4bf16(<4 x i64> %x) {
 ; GFX11-NEXT:    v_lshlrev_b64 v[4:5], v8, v[4:5]
 ; GFX11-NEXT:    v_add_nc_u32_e32 v9, 32, v9
 ; GFX11-NEXT:    v_add_nc_u32_e32 v13, -1, v13
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    v_lshlrev_b64 v[6:7], v10, v[6:7]
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-NEXT:    v_min_u32_e32 v9, v12, v9
@@ -33148,7 +33148,7 @@ define bfloat @v_uitofp_i16_to_bf16(i16 %x) {
 ; GFX8-NEXT:    v_cvt_f32_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
 ; GFX8-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
-; GFX8-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
 ; GFX8-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -33162,7 +33162,7 @@ define bfloat @v_uitofp_i16_to_bf16(i16 %x) {
 ; GFX9-NEXT:    v_cvt_f32_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -33174,7 +33174,7 @@ define bfloat @v_uitofp_i16_to_bf16(i16 %x) {
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_cvt_f32_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX10-NEXT:    v_and_or_b32 v2, v0, s4, 0x400000
 ; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
@@ -33187,7 +33187,7 @@ define bfloat @v_uitofp_i16_to_bf16(i16 %x) {
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_cvt_f32_u32_e32 v0, v0
 ; GFX11-NEXT:    v_bfe_u32 v1, v0, 16, 1
@@ -33233,7 +33233,7 @@ define <2 x bfloat> @v_uitofp_v2i16_to_v2bf16(<2 x i16> %x) {
 ; GFX8-NEXT:    v_cvt_f32_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
 ; GFX8-NEXT:    v_bfe_u32 v2, v1, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v2, v1
-; GFX8-NEXT:    v_and_b32_e32 v3, 0x80000000, v1
+; GFX8-NEXT:    v_and_b32_e32 v3, 0xff800000, v1
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 0x7fff, v2
 ; GFX8-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -33241,7 +33241,7 @@ define <2 x bfloat> @v_uitofp_v2i16_to_v2bf16(<2 x i16> %x) {
 ; GFX8-NEXT:    v_bfe_u32 v2, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v2, v0
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT:    v_and_b32_e32 v3, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v3, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v2, v3, vcc
@@ -33256,13 +33256,13 @@ define <2 x bfloat> @v_uitofp_v2i16_to_v2bf16(<2 x i16> %x) {
 ; GFX9-NEXT:    v_cvt_f32_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
 ; GFX9-NEXT:    v_bfe_u32 v2, v1, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v3, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v3, 0xff800000, v1
 ; GFX9-NEXT:    v_add3_u32 v2, v2, v1, s4
 ; GFX9-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX9-NEXT:    v_cndmask_b32_e32 v1, v2, v3, vcc
 ; GFX9-NEXT:    v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v3, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v3, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v2, v2, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -33276,7 +33276,7 @@ define <2 x bfloat> @v_uitofp_v2i16_to_v2bf16(<2 x i16> %x) {
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_cvt_f32_u32_sdwa v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
 ; GFX10-NEXT:    v_cvt_f32_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_bfe_u32 v2, v1, 16, 1
 ; GFX10-NEXT:    v_bfe_u32 v3, v0, 16, 1
 ; GFX10-NEXT:    v_and_or_b32 v4, v1, s4, 0x400000
@@ -33295,7 +33295,7 @@ define <2 x bfloat> @v_uitofp_v2i16_to_v2bf16(<2 x i16> %x) {
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_and_b32_e32 v1, 0xffff, v0
 ; GFX11-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
 ; GFX11-NEXT:    v_cvt_f32_u32_e32 v1, v1
 ; GFX11-NEXT:    v_cvt_f32_u32_e32 v0, v0
@@ -33357,21 +33357,21 @@ define <3 x bfloat> @v_uitofp_v3i16_to_v3bf16(<3 x i16> %x) {
 ; GFX8-NEXT:    v_bfe_u32 v2, v1, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v2, v1
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT:    v_and_b32_e32 v3, 0x80000000, v1
+; GFX8-NEXT:    v_and_b32_e32 v3, 0xff800000, v1
 ; GFX8-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v1, v2, v3, vcc
 ; GFX8-NEXT:    v_bfe_u32 v2, v4, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v2, v4
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
-; GFX8-NEXT:    v_and_b32_e32 v3, 0x80000000, v4
+; GFX8-NEXT:    v_and_b32_e32 v3, 0xff800000, v4
 ; GFX8-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v4, v4
 ; GFX8-NEXT:    v_cndmask_b32_e32 v2, v2, v3, vcc
 ; GFX8-NEXT:    v_bfe_u32 v3, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v3, v0
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT:    v_and_b32_e32 v4, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v4, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v3, v4, vcc
@@ -33388,19 +33388,19 @@ define <3 x bfloat> @v_uitofp_v3i16_to_v3bf16(<3 x i16> %x) {
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
 ; GFX9-NEXT:    v_cvt_f32_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
 ; GFX9-NEXT:    v_bfe_u32 v2, v1, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v3, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v3, 0xff800000, v1
 ; GFX9-NEXT:    v_add3_u32 v2, v2, v1, s4
 ; GFX9-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX9-NEXT:    v_cndmask_b32_e32 v1, v2, v3, vcc
 ; GFX9-NEXT:    v_bfe_u32 v2, v4, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v3, 0x80000000, v4
+; GFX9-NEXT:    v_and_b32_e32 v3, 0xff800000, v4
 ; GFX9-NEXT:    v_add3_u32 v2, v2, v4, s4
 ; GFX9-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v4, v4
 ; GFX9-NEXT:    v_cndmask_b32_e32 v2, v2, v3, vcc
 ; GFX9-NEXT:    v_bfe_u32 v3, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v4, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v4, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v3, v3, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -33415,7 +33415,7 @@ define <3 x bfloat> @v_uitofp_v3i16_to_v3bf16(<3 x i16> %x) {
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_cvt_f32_u32_sdwa v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
 ; GFX10-NEXT:    v_cvt_f32_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_cvt_f32_u32_sdwa v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
 ; GFX10-NEXT:    v_bfe_u32 v3, v2, 16, 1
 ; GFX10-NEXT:    v_bfe_u32 v5, v0, 16, 1
@@ -33484,14 +33484,14 @@ define <4 x bfloat> @v_uitofp_v4i16_to_v4bf16(<4 x i16> %x) {
 ; GFX8-NEXT:    v_bfe_u32 v3, v2, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v3, v2
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT:    v_and_b32_e32 v4, 0x80000000, v2
+; GFX8-NEXT:    v_and_b32_e32 v4, 0xff800000, v2
 ; GFX8-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
 ; GFX8-NEXT:    v_cndmask_b32_e32 v2, v3, v4, vcc
 ; GFX8-NEXT:    v_bfe_u32 v3, v1, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v3, v1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, s4, v3
-; GFX8-NEXT:    v_and_b32_e32 v4, 0x80000000, v1
+; GFX8-NEXT:    v_and_b32_e32 v4, 0xff800000, v1
 ; GFX8-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v1, v3, v4, vcc
@@ -33499,14 +33499,14 @@ define <4 x bfloat> @v_uitofp_v4i16_to_v4bf16(<4 x i16> %x) {
 ; GFX8-NEXT:    v_cvt_f32_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v3, v5
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, s4, v3
-; GFX8-NEXT:    v_and_b32_e32 v4, 0x80000000, v5
+; GFX8-NEXT:    v_and_b32_e32 v4, 0xff800000, v5
 ; GFX8-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v5, v5
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v3, v4, vcc
 ; GFX8-NEXT:    v_bfe_u32 v4, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, v4, v0
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, 0x7fff, v4
-; GFX8-NEXT:    v_and_b32_e32 v5, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v5, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v4, v5, vcc
@@ -33524,26 +33524,26 @@ define <4 x bfloat> @v_uitofp_v4i16_to_v4bf16(<4 x i16> %x) {
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
 ; GFX9-NEXT:    v_cvt_f32_u32_sdwa v5, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
 ; GFX9-NEXT:    v_bfe_u32 v3, v2, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v4, 0x80000000, v2
+; GFX9-NEXT:    v_and_b32_e32 v4, 0xff800000, v2
 ; GFX9-NEXT:    v_add3_u32 v3, v3, v2, s4
 ; GFX9-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
 ; GFX9-NEXT:    v_cndmask_b32_e32 v2, v3, v4, vcc
 ; GFX9-NEXT:    v_bfe_u32 v3, v1, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v4, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v4, 0xff800000, v1
 ; GFX9-NEXT:    v_cvt_f32_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
 ; GFX9-NEXT:    v_add3_u32 v3, v3, v1, s4
 ; GFX9-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX9-NEXT:    v_cndmask_b32_e32 v1, v3, v4, vcc
 ; GFX9-NEXT:    v_bfe_u32 v3, v5, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v4, 0x80000000, v5
+; GFX9-NEXT:    v_and_b32_e32 v4, 0xff800000, v5
 ; GFX9-NEXT:    v_add3_u32 v3, v3, v5, s4
 ; GFX9-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v5, v5
 ; GFX9-NEXT:    v_cndmask_b32_e32 v3, v3, v4, vcc
 ; GFX9-NEXT:    v_bfe_u32 v4, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v5, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v5, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v4, v4, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -33558,7 +33558,7 @@ define <4 x bfloat> @v_uitofp_v4i16_to_v4bf16(<4 x i16> %x) {
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_cvt_f32_u32_sdwa v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
 ; GFX10-NEXT:    v_cvt_f32_u32_sdwa v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_cvt_f32_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
 ; GFX10-NEXT:    v_cvt_f32_u32_sdwa v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
 ; GFX10-NEXT:    v_bfe_u32 v4, v2, 16, 1
@@ -33589,7 +33589,7 @@ define <4 x bfloat> @v_uitofp_v4i16_to_v4bf16(<4 x i16> %x) {
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_and_b32_e32 v2, 0xffff, v1
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
 ; GFX11-NEXT:    v_cvt_f32_u32_e32 v2, v2
@@ -33652,7 +33652,7 @@ define bfloat @v_uitofp_i32_to_bf16(i32 %x) {
 ; GFX8-NEXT:    v_cvt_f32_u32_e32 v0, v0
 ; GFX8-NEXT:    v_bfe_u32 v2, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v2, v0
-; GFX8-NEXT:    v_and_b32_e32 v1, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v1, 0xff800000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 0x7fff, v2
 ; GFX8-NEXT:    v_or_b32_e32 v1, 0x400000, v1
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -33666,7 +33666,7 @@ define bfloat @v_uitofp_i32_to_bf16(i32 %x) {
 ; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, v0
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -33678,7 +33678,7 @@ define bfloat @v_uitofp_i32_to_bf16(i32 %x) {
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_cvt_f32_u32_e32 v0, v0
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX10-NEXT:    v_and_or_b32 v2, v0, s4, 0x400000
 ; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
@@ -33691,7 +33691,7 @@ define bfloat @v_uitofp_i32_to_bf16(i32 %x) {
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_cvt_f32_u32_e32 v0, v0
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
 ; GFX11-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX11-NEXT:    v_and_or_b32 v2, v0, s0, 0x400000
@@ -33731,14 +33731,14 @@ define <2 x bfloat> @v_uitofp_v2i32_to_v2bf16(<2 x i32> %x) {
 ; GFX8-NEXT:    v_cvt_f32_u32_e32 v1, v1
 ; GFX8-NEXT:    v_bfe_u32 v3, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v3, v0
-; GFX8-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, 0x7fff, v3
 ; GFX8-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v3, v2, vcc
 ; GFX8-NEXT:    v_bfe_u32 v3, v1, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v3, v1
-; GFX8-NEXT:    v_and_b32_e32 v2, 0x80000000, v1
+; GFX8-NEXT:    v_and_b32_e32 v2, 0xff800000, v1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, 0x7fff, v3
 ; GFX8-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -33754,13 +33754,13 @@ define <2 x bfloat> @v_uitofp_v2i32_to_v2bf16(<2 x i32> %x) {
 ; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, v1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
 ; GFX9-NEXT:    v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v3, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v3, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v2, v2, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX9-NEXT:    v_cndmask_b32_e32 v0, v2, v3, vcc
 ; GFX9-NEXT:    v_bfe_u32 v2, v1, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v3, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v3, 0xff800000, v1
 ; GFX9-NEXT:    v_add3_u32 v2, v2, v1, s4
 ; GFX9-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -33774,7 +33774,7 @@ define <2 x bfloat> @v_uitofp_v2i32_to_v2bf16(<2 x i32> %x) {
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_cvt_f32_u32_e32 v0, v0
 ; GFX10-NEXT:    v_cvt_f32_u32_e32 v1, v1
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_bfe_u32 v2, v0, 16, 1
 ; GFX10-NEXT:    v_bfe_u32 v3, v1, 16, 1
 ; GFX10-NEXT:    v_and_or_b32 v4, v0, s4, 0x400000
@@ -33793,7 +33793,7 @@ define <2 x bfloat> @v_uitofp_v2i32_to_v2bf16(<2 x i32> %x) {
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_cvt_f32_u32_e32 v0, v0
 ; GFX11-NEXT:    v_cvt_f32_u32_e32 v1, v1
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
 ; GFX11-NEXT:    v_bfe_u32 v2, v0, 16, 1
 ; GFX11-NEXT:    v_bfe_u32 v3, v1, 16, 1
@@ -33844,21 +33844,21 @@ define <3 x bfloat> @v_uitofp_v3i32_to_v3bf16(<3 x i32> %x) {
 ; GFX8-NEXT:    v_cvt_f32_u32_e32 v1, v1
 ; GFX8-NEXT:    v_bfe_u32 v4, v2, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, v4, v2
-; GFX8-NEXT:    v_and_b32_e32 v3, 0x80000000, v2
+; GFX8-NEXT:    v_and_b32_e32 v3, 0xff800000, v2
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, 0x7fff, v4
 ; GFX8-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
 ; GFX8-NEXT:    v_cndmask_b32_e32 v2, v4, v3, vcc
 ; GFX8-NEXT:    v_bfe_u32 v4, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, v4, v0
-; GFX8-NEXT:    v_and_b32_e32 v3, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v3, 0xff800000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, 0x7fff, v4
 ; GFX8-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v4, v3, vcc
 ; GFX8-NEXT:    v_bfe_u32 v4, v1, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, v4, v1
-; GFX8-NEXT:    v_and_b32_e32 v3, 0x80000000, v1
+; GFX8-NEXT:    v_and_b32_e32 v3, 0xff800000, v1
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, 0x7fff, v4
 ; GFX8-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -33877,19 +33877,19 @@ define <3 x bfloat> @v_uitofp_v3i32_to_v3bf16(<3 x i32> %x) {
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
 ; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, v1
 ; GFX9-NEXT:    v_bfe_u32 v3, v2, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v4, 0x80000000, v2
+; GFX9-NEXT:    v_and_b32_e32 v4, 0xff800000, v2
 ; GFX9-NEXT:    v_add3_u32 v3, v3, v2, s4
 ; GFX9-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
 ; GFX9-NEXT:    v_cndmask_b32_e32 v2, v3, v4, vcc
 ; GFX9-NEXT:    v_bfe_u32 v3, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v4, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v4, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v3, v3, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX9-NEXT:    v_cndmask_b32_e32 v0, v3, v4, vcc
 ; GFX9-NEXT:    v_bfe_u32 v3, v1, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v4, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v4, 0xff800000, v1
 ; GFX9-NEXT:    v_add3_u32 v3, v3, v1, s4
 ; GFX9-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -33904,7 +33904,7 @@ define <3 x bfloat> @v_uitofp_v3i32_to_v3bf16(<3 x i32> %x) {
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_cvt_f32_u32_e32 v0, v0
 ; GFX10-NEXT:    v_cvt_f32_u32_e32 v1, v1
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_cvt_f32_u32_e32 v2, v2
 ; GFX10-NEXT:    v_bfe_u32 v3, v0, 16, 1
 ; GFX10-NEXT:    v_bfe_u32 v5, v1, 16, 1
@@ -33964,14 +33964,14 @@ define <4 x bfloat> @v_uitofp_v4i32_to_v4bf16(<4 x i32> %x) {
 ; GFX8-NEXT:    s_movk_i32 s4, 0x7fff
 ; GFX8-NEXT:    v_bfe_u32 v5, v2, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, v5, v2
-; GFX8-NEXT:    v_and_b32_e32 v4, 0x80000000, v2
+; GFX8-NEXT:    v_and_b32_e32 v4, 0xff800000, v2
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, 0x7fff, v5
 ; GFX8-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
 ; GFX8-NEXT:    v_cndmask_b32_e32 v2, v5, v4, vcc
 ; GFX8-NEXT:    v_bfe_u32 v5, v3, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, v5, v3
-; GFX8-NEXT:    v_and_b32_e32 v4, 0x80000000, v3
+; GFX8-NEXT:    v_and_b32_e32 v4, 0xff800000, v3
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, s4, v5
 ; GFX8-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
@@ -33979,14 +33979,14 @@ define <4 x bfloat> @v_uitofp_v4i32_to_v4bf16(<4 x i32> %x) {
 ; GFX8-NEXT:    v_bfe_u32 v5, v0, 16, 1
 ; GFX8-NEXT:    v_cvt_f32_u32_e32 v1, v1
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, v5, v0
-; GFX8-NEXT:    v_and_b32_e32 v4, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v4, 0xff800000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, s4, v5
 ; GFX8-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v5, v4, vcc
 ; GFX8-NEXT:    v_bfe_u32 v5, v1, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, v5, v1
-; GFX8-NEXT:    v_and_b32_e32 v4, 0x80000000, v1
+; GFX8-NEXT:    v_and_b32_e32 v4, 0xff800000, v1
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, 0x7fff, v5
 ; GFX8-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -34005,26 +34005,26 @@ define <4 x bfloat> @v_uitofp_v4i32_to_v4bf16(<4 x i32> %x) {
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
 ; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, v0
 ; GFX9-NEXT:    v_bfe_u32 v4, v2, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v5, 0x80000000, v2
+; GFX9-NEXT:    v_and_b32_e32 v5, 0xff800000, v2
 ; GFX9-NEXT:    v_add3_u32 v4, v4, v2, s4
 ; GFX9-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
 ; GFX9-NEXT:    v_cndmask_b32_e32 v2, v4, v5, vcc
 ; GFX9-NEXT:    v_bfe_u32 v4, v3, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v5, 0x80000000, v3
+; GFX9-NEXT:    v_and_b32_e32 v5, 0xff800000, v3
 ; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, v1
 ; GFX9-NEXT:    v_add3_u32 v4, v4, v3, s4
 ; GFX9-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
 ; GFX9-NEXT:    v_cndmask_b32_e32 v3, v4, v5, vcc
 ; GFX9-NEXT:    v_bfe_u32 v4, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v5, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v5, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v4, v4, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX9-NEXT:    v_cndmask_b32_e32 v0, v4, v5, vcc
 ; GFX9-NEXT:    v_bfe_u32 v4, v1, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v5, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v5, 0xff800000, v1
 ; GFX9-NEXT:    v_add3_u32 v4, v4, v1, s4
 ; GFX9-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -34039,7 +34039,7 @@ define <4 x bfloat> @v_uitofp_v4i32_to_v4bf16(<4 x i32> %x) {
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_cvt_f32_u32_e32 v2, v2
 ; GFX10-NEXT:    v_cvt_f32_u32_e32 v0, v0
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_cvt_f32_u32_e32 v1, v1
 ; GFX10-NEXT:    v_cvt_f32_u32_e32 v3, v3
 ; GFX10-NEXT:    v_bfe_u32 v5, v2, 16, 1
@@ -34071,7 +34071,7 @@ define <4 x bfloat> @v_uitofp_v4i32_to_v4bf16(<4 x i32> %x) {
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_cvt_f32_u32_e32 v2, v2
 ; GFX11-NEXT:    v_cvt_f32_u32_e32 v0, v0
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    v_cvt_f32_u32_e32 v3, v3
 ; GFX11-NEXT:    v_cvt_f32_u32_e32 v1, v1
 ; GFX11-NEXT:    v_bfe_u32 v5, v2, 16, 1
@@ -34146,7 +34146,7 @@ define bfloat @v_uitofp_i64_to_bf16(i64 %x) {
 ; GFX8-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
@@ -34166,7 +34166,7 @@ define bfloat @v_uitofp_i64_to_bf16(i64 %x) {
 ; GFX9-NEXT:    v_sub_u32_e32 v1, 32, v2
 ; GFX9-NEXT:    v_ldexp_f32 v0, v0, v1
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -34178,7 +34178,7 @@ define bfloat @v_uitofp_i64_to_bf16(i64 %x) {
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_ffbh_u32_e32 v2, v1
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_min_u32_e32 v2, 32, v2
 ; GFX10-NEXT:    v_lshlrev_b64 v[0:1], v2, v[0:1]
 ; GFX10-NEXT:    v_min_u32_e32 v0, 1, v0
@@ -34198,7 +34198,7 @@ define bfloat @v_uitofp_i64_to_bf16(i64 %x) {
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_clz_i32_u32_e32 v2, v1
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_min_u32_e32 v2, 32, v2
 ; GFX11-NEXT:    v_lshlrev_b64 v[0:1], v2, v[0:1]
@@ -34286,7 +34286,7 @@ define <2 x bfloat> @v_uitofp_v2i64_to_v2bf16(<2 x i64> %x) {
 ; GFX8-NEXT:    v_ffbh_u32_e32 v0, v3
 ; GFX8-NEXT:    v_min_u32_e32 v7, 32, v0
 ; GFX8-NEXT:    v_lshlrev_b64 v[0:1], v7, v[2:3]
-; GFX8-NEXT:    v_and_b32_e32 v6, 0x80000000, v4
+; GFX8-NEXT:    v_and_b32_e32 v6, 0xff800000, v4
 ; GFX8-NEXT:    v_min_u32_e32 v0, 1, v0
 ; GFX8-NEXT:    v_or_b32_e32 v0, v1, v0
 ; GFX8-NEXT:    v_cvt_f32_u32_e32 v0, v0
@@ -34298,7 +34298,7 @@ define <2 x bfloat> @v_uitofp_v2i64_to_v2bf16(<2 x i64> %x) {
 ; GFX8-NEXT:    v_bfe_u32 v2, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v2, v0
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT:    v_and_b32_e32 v3, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v3, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v2, v3, vcc
@@ -34323,7 +34323,7 @@ define <2 x bfloat> @v_uitofp_v2i64_to_v2bf16(<2 x i64> %x) {
 ; GFX9-NEXT:    v_ffbh_u32_e32 v0, v3
 ; GFX9-NEXT:    v_min_u32_e32 v7, 32, v0
 ; GFX9-NEXT:    v_lshlrev_b64 v[0:1], v7, v[2:3]
-; GFX9-NEXT:    v_and_b32_e32 v6, 0x80000000, v4
+; GFX9-NEXT:    v_and_b32_e32 v6, 0xff800000, v4
 ; GFX9-NEXT:    v_min_u32_e32 v0, 1, v0
 ; GFX9-NEXT:    v_or_b32_e32 v0, v1, v0
 ; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, v0
@@ -34333,7 +34333,7 @@ define <2 x bfloat> @v_uitofp_v2i64_to_v2bf16(<2 x i64> %x) {
 ; GFX9-NEXT:    v_sub_u32_e32 v2, 32, v7
 ; GFX9-NEXT:    v_ldexp_f32 v0, v0, v2
 ; GFX9-NEXT:    v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v3, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v3, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v2, v2, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -34347,7 +34347,7 @@ define <2 x bfloat> @v_uitofp_v2i64_to_v2bf16(<2 x i64> %x) {
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_ffbh_u32_e32 v4, v1
 ; GFX10-NEXT:    v_ffbh_u32_e32 v5, v3
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_min_u32_e32 v4, 32, v4
 ; GFX10-NEXT:    v_min_u32_e32 v5, 32, v5
 ; GFX10-NEXT:    v_lshlrev_b64 v[0:1], v4, v[0:1]
@@ -34380,7 +34380,7 @@ define <2 x bfloat> @v_uitofp_v2i64_to_v2bf16(<2 x i64> %x) {
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_clz_i32_u32_e32 v4, v1
 ; GFX11-NEXT:    v_clz_i32_u32_e32 v5, v3
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
 ; GFX11-NEXT:    v_min_u32_e32 v4, 32, v4
 ; GFX11-NEXT:    v_min_u32_e32 v5, 32, v5
@@ -34503,7 +34503,7 @@ define <3 x bfloat> @v_uitofp_v3i64_to_v3bf16(<3 x i64> %x) {
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, v5, v4
 ; GFX8-NEXT:    s_movk_i32 s4, 0x7fff
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT:    v_and_b32_e32 v6, 0x80000000, v4
+; GFX8-NEXT:    v_and_b32_e32 v6, 0xff800000, v4
 ; GFX8-NEXT:    v_min_u32_e32 v0, 1, v0
 ; GFX8-NEXT:    v_or_b32_e32 v6, 0x400000, v6
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v4, v4
@@ -34522,7 +34522,7 @@ define <3 x bfloat> @v_uitofp_v3i64_to_v3bf16(<3 x i64> %x) {
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, v4, v0
 ; GFX8-NEXT:    v_cvt_f32_u32_e32 v2, v2
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, s4, v4
-; GFX8-NEXT:    v_and_b32_e32 v5, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v5, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v4, v5, vcc
@@ -34531,7 +34531,7 @@ define <3 x bfloat> @v_uitofp_v3i64_to_v3bf16(<3 x i64> %x) {
 ; GFX8-NEXT:    v_bfe_u32 v3, v2, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v3, v2
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT:    v_and_b32_e32 v4, 0x80000000, v2
+; GFX8-NEXT:    v_and_b32_e32 v4, 0xff800000, v2
 ; GFX8-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v2, v2
 ; GFX8-NEXT:    v_cndmask_b32_e32 v2, v3, v4, vcc
@@ -34558,7 +34558,7 @@ define <3 x bfloat> @v_uitofp_v3i64_to_v3bf16(<3 x i64> %x) {
 ; GFX9-NEXT:    v_ldexp_f32 v4, v4, v5
 ; GFX9-NEXT:    v_bfe_u32 v5, v4, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v6, 0x80000000, v4
+; GFX9-NEXT:    v_and_b32_e32 v6, 0xff800000, v4
 ; GFX9-NEXT:    v_add3_u32 v5, v5, v4, s4
 ; GFX9-NEXT:    v_or_b32_e32 v6, 0x400000, v6
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v4, v4
@@ -34570,7 +34570,7 @@ define <3 x bfloat> @v_uitofp_v3i64_to_v3bf16(<3 x i64> %x) {
 ; GFX9-NEXT:    v_ffbh_u32_e32 v0, v3
 ; GFX9-NEXT:    v_min_u32_e32 v8, 32, v0
 ; GFX9-NEXT:    v_lshlrev_b64 v[0:1], v8, v[2:3]
-; GFX9-NEXT:    v_and_b32_e32 v7, 0x80000000, v5
+; GFX9-NEXT:    v_and_b32_e32 v7, 0xff800000, v5
 ; GFX9-NEXT:    v_min_u32_e32 v0, 1, v0
 ; GFX9-NEXT:    v_or_b32_e32 v0, v1, v0
 ; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, v0
@@ -34580,7 +34580,7 @@ define <3 x bfloat> @v_uitofp_v3i64_to_v3bf16(<3 x i64> %x) {
 ; GFX9-NEXT:    v_sub_u32_e32 v2, 32, v8
 ; GFX9-NEXT:    v_ldexp_f32 v0, v0, v2
 ; GFX9-NEXT:    v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v3, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v3, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v2, v2, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -34596,7 +34596,7 @@ define <3 x bfloat> @v_uitofp_v3i64_to_v3bf16(<3 x i64> %x) {
 ; GFX10-NEXT:    v_ffbh_u32_e32 v6, v1
 ; GFX10-NEXT:    v_ffbh_u32_e32 v8, v3
 ; GFX10-NEXT:    v_ffbh_u32_e32 v7, v5
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_min_u32_e32 v6, 32, v6
 ; GFX10-NEXT:    v_min_u32_e32 v8, 32, v8
 ; GFX10-NEXT:    v_min_u32_e32 v7, 32, v7
@@ -34741,7 +34741,7 @@ define <4 x bfloat> @v_uitofp_v4i64_to_v4bf16(<4 x i64> %x) {
 ; GFX8-NEXT:    v_ffbh_u32_e32 v4, v7
 ; GFX8-NEXT:    v_min_u32_e32 v11, 32, v4
 ; GFX8-NEXT:    v_lshlrev_b64 v[4:5], v11, v[6:7]
-; GFX8-NEXT:    v_and_b32_e32 v10, 0x80000000, v8
+; GFX8-NEXT:    v_and_b32_e32 v10, 0xff800000, v8
 ; GFX8-NEXT:    v_min_u32_e32 v4, 1, v4
 ; GFX8-NEXT:    v_or_b32_e32 v4, v5, v4
 ; GFX8-NEXT:    v_cvt_f32_u32_e32 v4, v4
@@ -34759,7 +34759,7 @@ define <4 x bfloat> @v_uitofp_v4i64_to_v4bf16(<4 x i64> %x) {
 ; GFX8-NEXT:    v_add_u32_e32 v6, vcc, v6, v4
 ; GFX8-NEXT:    v_cvt_f32_u32_e32 v0, v0
 ; GFX8-NEXT:    v_add_u32_e32 v6, vcc, s4, v6
-; GFX8-NEXT:    v_and_b32_e32 v7, 0x80000000, v4
+; GFX8-NEXT:    v_and_b32_e32 v7, 0xff800000, v4
 ; GFX8-NEXT:    v_or_b32_e32 v7, 0x400000, v7
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v4, v4
 ; GFX8-NEXT:    v_cndmask_b32_e32 v4, v6, v7, vcc
@@ -34771,7 +34771,7 @@ define <4 x bfloat> @v_uitofp_v4i64_to_v4bf16(<4 x i64> %x) {
 ; GFX8-NEXT:    v_ffbh_u32_e32 v0, v3
 ; GFX8-NEXT:    v_min_u32_e32 v9, 32, v0
 ; GFX8-NEXT:    v_lshlrev_b64 v[0:1], v9, v[2:3]
-; GFX8-NEXT:    v_and_b32_e32 v8, 0x80000000, v6
+; GFX8-NEXT:    v_and_b32_e32 v8, 0xff800000, v6
 ; GFX8-NEXT:    v_min_u32_e32 v0, 1, v0
 ; GFX8-NEXT:    v_or_b32_e32 v0, v1, v0
 ; GFX8-NEXT:    v_cvt_f32_u32_e32 v0, v0
@@ -34783,7 +34783,7 @@ define <4 x bfloat> @v_uitofp_v4i64_to_v4bf16(<4 x i64> %x) {
 ; GFX8-NEXT:    v_bfe_u32 v2, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v2, v0
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT:    v_and_b32_e32 v3, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v3, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v2, v3, vcc
@@ -34810,7 +34810,7 @@ define <4 x bfloat> @v_uitofp_v4i64_to_v4bf16(<4 x i64> %x) {
 ; GFX9-NEXT:    v_ffbh_u32_e32 v4, v7
 ; GFX9-NEXT:    v_min_u32_e32 v11, 32, v4
 ; GFX9-NEXT:    v_lshlrev_b64 v[4:5], v11, v[6:7]
-; GFX9-NEXT:    v_and_b32_e32 v10, 0x80000000, v8
+; GFX9-NEXT:    v_and_b32_e32 v10, 0xff800000, v8
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v8, v8
 ; GFX9-NEXT:    v_ffbh_u32_e32 v8, v1
 ; GFX9-NEXT:    v_min_u32_e32 v4, 1, v4
@@ -34826,7 +34826,7 @@ define <4 x bfloat> @v_uitofp_v4i64_to_v4bf16(<4 x i64> %x) {
 ; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, v0
 ; GFX9-NEXT:    v_ldexp_f32 v4, v4, v6
 ; GFX9-NEXT:    v_bfe_u32 v6, v4, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v7, 0x80000000, v4
+; GFX9-NEXT:    v_and_b32_e32 v7, 0xff800000, v4
 ; GFX9-NEXT:    v_add3_u32 v6, v6, v4, s4
 ; GFX9-NEXT:    v_or_b32_e32 v7, 0x400000, v7
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v4, v4
@@ -34838,7 +34838,7 @@ define <4 x bfloat> @v_uitofp_v4i64_to_v4bf16(<4 x i64> %x) {
 ; GFX9-NEXT:    v_ffbh_u32_e32 v0, v3
 ; GFX9-NEXT:    v_min_u32_e32 v9, 32, v0
 ; GFX9-NEXT:    v_lshlrev_b64 v[0:1], v9, v[2:3]
-; GFX9-NEXT:    v_and_b32_e32 v8, 0x80000000, v6
+; GFX9-NEXT:    v_and_b32_e32 v8, 0xff800000, v6
 ; GFX9-NEXT:    v_min_u32_e32 v0, 1, v0
 ; GFX9-NEXT:    v_or_b32_e32 v0, v1, v0
 ; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, v0
@@ -34848,7 +34848,7 @@ define <4 x bfloat> @v_uitofp_v4i64_to_v4bf16(<4 x i64> %x) {
 ; GFX9-NEXT:    v_sub_u32_e32 v2, 32, v9
 ; GFX9-NEXT:    v_ldexp_f32 v0, v0, v2
 ; GFX9-NEXT:    v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v3, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v3, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v2, v2, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -34865,7 +34865,7 @@ define <4 x bfloat> @v_uitofp_v4i64_to_v4bf16(<4 x i64> %x) {
 ; GFX10-NEXT:    v_ffbh_u32_e32 v10, v1
 ; GFX10-NEXT:    v_ffbh_u32_e32 v11, v3
 ; GFX10-NEXT:    v_ffbh_u32_e32 v9, v7
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_min_u32_e32 v8, 32, v8
 ; GFX10-NEXT:    v_min_u32_e32 v10, 32, v10
 ; GFX10-NEXT:    v_min_u32_e32 v11, 32, v11
@@ -34925,7 +34925,7 @@ define <4 x bfloat> @v_uitofp_v4i64_to_v4bf16(<4 x i64> %x) {
 ; GFX11-NEXT:    v_clz_i32_u32_e32 v10, v1
 ; GFX11-NEXT:    v_clz_i32_u32_e32 v11, v3
 ; GFX11-NEXT:    v_clz_i32_u32_e32 v9, v7
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    v_min_u32_e32 v8, 32, v8
 ; GFX11-NEXT:    v_min_u32_e32 v10, 32, v10
 ; GFX11-NEXT:    v_min_u32_e32 v11, 32, v11
@@ -40088,7 +40088,7 @@ define bfloat @v_fma_bf16(bfloat %a, bfloat %b, bfloat %c) {
 ; GFX8-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
@@ -40104,7 +40104,7 @@ define bfloat @v_fma_bf16(bfloat %a, bfloat %b, bfloat %c) {
 ; GFX9-NEXT:    v_fma_f32 v0, v0, v1, v2
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -40118,7 +40118,7 @@ define bfloat @v_fma_bf16(bfloat %a, bfloat %b, bfloat %c) {
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_fmac_f32_e32 v2, v0, v1
 ; GFX10-NEXT:    v_bfe_u32 v0, v2, 16, 1
 ; GFX10-NEXT:    v_and_or_b32 v1, v2, s4, 0x400000
@@ -40134,7 +40134,7 @@ define bfloat @v_fma_bf16(bfloat %a, bfloat %b, bfloat %c) {
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_fmac_f32_e32 v2, v0, v1
 ; GFX11-NEXT:    v_bfe_u32 v0, v2, 16, 1
@@ -40206,7 +40206,7 @@ define <2 x bfloat> @v_fma_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloat>
 ; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, 0x7fff, v4
-; GFX8-NEXT:    v_and_b32_e32 v5, 0x80000000, v3
+; GFX8-NEXT:    v_and_b32_e32 v5, 0xff800000, v3
 ; GFX8-NEXT:    v_fma_f32 v0, v0, v1, v2
 ; GFX8-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
@@ -40214,7 +40214,7 @@ define <2 x bfloat> @v_fma_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloat>
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v4, v5, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
@@ -40234,13 +40234,13 @@ define <2 x bfloat> @v_fma_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloat>
 ; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX9-NEXT:    v_bfe_u32 v4, v3, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v5, 0x80000000, v3
+; GFX9-NEXT:    v_and_b32_e32 v5, 0xff800000, v3
 ; GFX9-NEXT:    v_fma_f32 v0, v0, v1, v2
 ; GFX9-NEXT:    v_add3_u32 v4, v4, v3, s4
 ; GFX9-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX9-NEXT:    v_cndmask_b32_e32 v3, v4, v5, vcc
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v2, 0x400000, v2
@@ -40259,7 +40259,7 @@ define <2 x bfloat> @v_fma_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloat>
 ; GFX10-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX10-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX10-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_fmac_f32_e32 v3, v5, v4
 ; GFX10-NEXT:    v_fmac_f32_e32 v2, v0, v1
 ; GFX10-NEXT:    v_bfe_u32 v0, v3, 16, 1
@@ -40284,7 +40284,7 @@ define <2 x bfloat> @v_fma_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloat>
 ; GFX11-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v3, 16, v2
 ; GFX11-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_dual_fmac_f32 v2, v0, v1 :: v_dual_fmac_f32 v3, v5, v4
 ; GFX11-NEXT:    v_bfe_u32 v1, v2, 16, 1
@@ -40375,7 +40375,7 @@ define <3 x bfloat> @v_fma_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfloat>
 ; GFX8-NEXT:    v_bfe_u32 v3, v1, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v3, v1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT:    v_and_b32_e32 v5, 0x80000000, v1
+; GFX8-NEXT:    v_and_b32_e32 v5, 0xff800000, v1
 ; GFX8-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v1, v3, v5, vcc
@@ -40390,7 +40390,7 @@ define <3 x bfloat> @v_fma_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfloat>
 ; GFX8-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT:    v_and_b32_e32 v6, 0x80000000, v3
+; GFX8-NEXT:    v_and_b32_e32 v6, 0xff800000, v3
 ; GFX8-NEXT:    v_fma_f32 v0, v0, v2, v4
 ; GFX8-NEXT:    v_or_b32_e32 v6, 0x400000, v6
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
@@ -40398,7 +40398,7 @@ define <3 x bfloat> @v_fma_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfloat>
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v5, v6, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v2, v0
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT:    v_and_b32_e32 v4, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v4, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v2, v4, vcc
@@ -40416,7 +40416,7 @@ define <3 x bfloat> @v_fma_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfloat>
 ; GFX9-NEXT:    v_fma_f32 v1, v1, v3, v5
 ; GFX9-NEXT:    v_bfe_u32 v3, v1, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v5, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v5, 0xff800000, v1
 ; GFX9-NEXT:    v_add3_u32 v3, v3, v1, s4
 ; GFX9-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -40429,13 +40429,13 @@ define <3 x bfloat> @v_fma_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfloat>
 ; GFX9-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX9-NEXT:    v_bfe_u32 v5, v3, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v6, 0x80000000, v3
+; GFX9-NEXT:    v_and_b32_e32 v6, 0xff800000, v3
 ; GFX9-NEXT:    v_fma_f32 v0, v0, v2, v4
 ; GFX9-NEXT:    v_add3_u32 v5, v5, v3, s4
 ; GFX9-NEXT:    v_or_b32_e32 v6, 0x400000, v6
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
 ; GFX9-NEXT:    v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v4, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v4, 0xff800000, v0
 ; GFX9-NEXT:    v_cndmask_b32_e32 v3, v5, v6, vcc
 ; GFX9-NEXT:    v_add3_u32 v2, v2, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v4, 0x400000, v4
@@ -40460,7 +40460,7 @@ define <3 x bfloat> @v_fma_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfloat>
 ; GFX10-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX10-NEXT:    v_fmac_f32_e32 v6, v8, v7
 ; GFX10-NEXT:    v_fmac_f32_e32 v5, v1, v3
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_fmac_f32_e32 v4, v0, v2
 ; GFX10-NEXT:    v_bfe_u32 v1, v6, 16, 1
 ; GFX10-NEXT:    v_and_or_b32 v3, v6, s4, 0x400000
@@ -40572,7 +40572,7 @@ define <4 x bfloat> @v_fma_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfloat>
 ; GFX8-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX8-NEXT:    v_add_u32_e32 v7, vcc, 0x7fff, v7
-; GFX8-NEXT:    v_and_b32_e32 v8, 0x80000000, v6
+; GFX8-NEXT:    v_and_b32_e32 v8, 0xff800000, v6
 ; GFX8-NEXT:    v_fma_f32 v1, v1, v3, v5
 ; GFX8-NEXT:    v_or_b32_e32 v8, 0x400000, v8
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v6, v6
@@ -40581,7 +40581,7 @@ define <4 x bfloat> @v_fma_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfloat>
 ; GFX8-NEXT:    v_cndmask_b32_e32 v6, v7, v8, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v3, v1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, s4, v3
-; GFX8-NEXT:    v_and_b32_e32 v5, 0x80000000, v1
+; GFX8-NEXT:    v_and_b32_e32 v5, 0xff800000, v1
 ; GFX8-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v1, v3, v5, vcc
@@ -40595,7 +40595,7 @@ define <4 x bfloat> @v_fma_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfloat>
 ; GFX8-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT:    v_and_b32_e32 v7, 0x80000000, v3
+; GFX8-NEXT:    v_and_b32_e32 v7, 0xff800000, v3
 ; GFX8-NEXT:    v_fma_f32 v0, v0, v2, v4
 ; GFX8-NEXT:    v_or_b32_e32 v7, 0x400000, v7
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
@@ -40603,7 +40603,7 @@ define <4 x bfloat> @v_fma_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfloat>
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v5, v7, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v2, v0
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT:    v_and_b32_e32 v4, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v4, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v2, v4, vcc
@@ -40625,13 +40625,13 @@ define <4 x bfloat> @v_fma_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfloat>
 ; GFX9-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX9-NEXT:    v_bfe_u32 v7, v6, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v8, 0x80000000, v6
+; GFX9-NEXT:    v_and_b32_e32 v8, 0xff800000, v6
 ; GFX9-NEXT:    v_fma_f32 v1, v1, v3, v5
 ; GFX9-NEXT:    v_add3_u32 v7, v7, v6, s4
 ; GFX9-NEXT:    v_or_b32_e32 v8, 0x400000, v8
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v6, v6
 ; GFX9-NEXT:    v_bfe_u32 v3, v1, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v5, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v5, 0xff800000, v1
 ; GFX9-NEXT:    v_cndmask_b32_e32 v6, v7, v8, vcc
 ; GFX9-NEXT:    v_add3_u32 v3, v3, v1, s4
 ; GFX9-NEXT:    v_or_b32_e32 v5, 0x400000, v5
@@ -40645,13 +40645,13 @@ define <4 x bfloat> @v_fma_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfloat>
 ; GFX9-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX9-NEXT:    v_bfe_u32 v5, v3, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v7, 0x80000000, v3
+; GFX9-NEXT:    v_and_b32_e32 v7, 0xff800000, v3
 ; GFX9-NEXT:    v_fma_f32 v0, v0, v2, v4
 ; GFX9-NEXT:    v_add3_u32 v5, v5, v3, s4
 ; GFX9-NEXT:    v_or_b32_e32 v7, 0x400000, v7
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
 ; GFX9-NEXT:    v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v4, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v4, 0xff800000, v0
 ; GFX9-NEXT:    v_cndmask_b32_e32 v3, v5, v7, vcc
 ; GFX9-NEXT:    v_add3_u32 v2, v2, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v4, 0x400000, v4
@@ -40681,7 +40681,7 @@ define <4 x bfloat> @v_fma_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfloat>
 ; GFX10-NEXT:    v_bfe_u32 v10, v6, 16, 1
 ; GFX10-NEXT:    v_fmac_f32_e32 v5, v1, v3
 ; GFX10-NEXT:    v_fmac_f32_e32 v7, v9, v8
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_fmac_f32_e32 v4, v0, v2
 ; GFX10-NEXT:    v_add3_u32 v0, v10, v6, 0x7fff
 ; GFX10-NEXT:    v_and_or_b32 v1, v6, s4, 0x400000
@@ -40717,7 +40717,7 @@ define <4 x bfloat> @v_fma_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfloat>
 ; GFX11-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v6, 16, v5
 ; GFX11-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-NEXT:    v_fmac_f32_e32 v5, v1, v3
 ; GFX11-NEXT:    v_dual_fmac_f32 v6, v8, v7 :: v_dual_lshlrev_b32 v7, 16, v4
@@ -40803,7 +40803,7 @@ define bfloat @v_fmuladd_bf16(bfloat %a, bfloat %b, bfloat %c) {
 ; GFX8-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT:    v_and_b32_e32 v3, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v3, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v3, vcc
@@ -40813,7 +40813,7 @@ define bfloat @v_fmuladd_bf16(bfloat %a, bfloat %b, bfloat %c) {
 ; GFX8-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
@@ -40828,7 +40828,7 @@ define bfloat @v_fmuladd_bf16(bfloat %a, bfloat %b, bfloat %c) {
 ; GFX9-NEXT:    v_mul_f32_e32 v0, v0, v1
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v3, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v3, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v3, 0x400000, v3
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -40837,7 +40837,7 @@ define bfloat @v_fmuladd_bf16(bfloat %a, bfloat %b, bfloat %c) {
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v1, 16, v2
 ; GFX9-NEXT:    v_add_f32_e32 v0, v0, v1
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -40850,7 +40850,7 @@ define bfloat @v_fmuladd_bf16(bfloat %a, bfloat %b, bfloat %c) {
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_mul_f32_e32 v0, v0, v1
 ; GFX10-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX10-NEXT:    v_and_or_b32 v3, v0, s4, 0x400000
@@ -40873,7 +40873,7 @@ define bfloat @v_fmuladd_bf16(bfloat %a, bfloat %b, bfloat %c) {
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_mul_f32_e32 v0, v0, v1
 ; GFX11-NEXT:    v_bfe_u32 v1, v0, 16, 1
@@ -40958,7 +40958,7 @@ define <2 x bfloat> @v_fmuladd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfl
 ; GFX8-NEXT:    v_bfe_u32 v4, v3, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, v4, v3
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, 0x7fff, v4
-; GFX8-NEXT:    v_and_b32_e32 v5, 0x80000000, v3
+; GFX8-NEXT:    v_and_b32_e32 v5, 0xff800000, v3
 ; GFX8-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v4, v5, vcc
@@ -40971,7 +40971,7 @@ define <2 x bfloat> @v_fmuladd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfl
 ; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, s4, v4
-; GFX8-NEXT:    v_and_b32_e32 v5, 0x80000000, v3
+; GFX8-NEXT:    v_and_b32_e32 v5, 0xff800000, v3
 ; GFX8-NEXT:    v_mul_f32_e32 v0, v0, v1
 ; GFX8-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
@@ -40979,7 +40979,7 @@ define <2 x bfloat> @v_fmuladd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfl
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v4, v5, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, s4, v1
-; GFX8-NEXT:    v_and_b32_e32 v4, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v4, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v4, vcc
@@ -40989,7 +40989,7 @@ define <2 x bfloat> @v_fmuladd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfl
 ; GFX8-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
@@ -41005,7 +41005,7 @@ define <2 x bfloat> @v_fmuladd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfl
 ; GFX9-NEXT:    v_mul_f32_e32 v3, v4, v3
 ; GFX9-NEXT:    v_bfe_u32 v4, v3, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v5, 0x80000000, v3
+; GFX9-NEXT:    v_and_b32_e32 v5, 0xff800000, v3
 ; GFX9-NEXT:    v_add3_u32 v4, v4, v3, s4
 ; GFX9-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
@@ -41014,7 +41014,7 @@ define <2 x bfloat> @v_fmuladd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfl
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v4, 16, v2
 ; GFX9-NEXT:    v_add_f32_e32 v3, v3, v4
 ; GFX9-NEXT:    v_bfe_u32 v4, v3, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v5, 0x80000000, v3
+; GFX9-NEXT:    v_and_b32_e32 v5, 0xff800000, v3
 ; GFX9-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX9-NEXT:    v_add3_u32 v4, v4, v3, s4
@@ -41023,7 +41023,7 @@ define <2 x bfloat> @v_fmuladd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfl
 ; GFX9-NEXT:    v_mul_f32_e32 v0, v0, v1
 ; GFX9-NEXT:    v_cndmask_b32_e32 v3, v4, v5, vcc
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v4, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v4, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -41032,7 +41032,7 @@ define <2 x bfloat> @v_fmuladd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfl
 ; GFX9-NEXT:    v_and_b32_e32 v1, 0xffff0000, v2
 ; GFX9-NEXT:    v_add_f32_e32 v0, v0, v1
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -41048,7 +41048,7 @@ define <2 x bfloat> @v_fmuladd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfl
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v4, 16, v0
 ; GFX10-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX10-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_mul_f32_e32 v3, v4, v3
 ; GFX10-NEXT:    v_mul_f32_e32 v0, v0, v1
 ; GFX10-NEXT:    v_bfe_u32 v1, v3, 16, 1
@@ -41087,7 +41087,7 @@ define <2 x bfloat> @v_fmuladd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfl
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v4, 16, v0
 ; GFX11-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX11-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
 ; GFX11-NEXT:    v_mul_f32_e32 v3, v4, v3
 ; GFX11-NEXT:    v_mul_f32_e32 v0, v0, v1
@@ -41210,7 +41210,7 @@ define <3 x bfloat> @v_fmuladd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfl
 ; GFX8-NEXT:    v_bfe_u32 v3, v1, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v3, v1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT:    v_and_b32_e32 v6, 0x80000000, v1
+; GFX8-NEXT:    v_and_b32_e32 v6, 0xff800000, v1
 ; GFX8-NEXT:    v_or_b32_e32 v6, 0x400000, v6
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v1, v3, v6, vcc
@@ -41221,7 +41221,7 @@ define <3 x bfloat> @v_fmuladd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfl
 ; GFX8-NEXT:    s_movk_i32 s4, 0x7fff
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v3, v1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, s4, v3
-; GFX8-NEXT:    v_and_b32_e32 v5, 0x80000000, v1
+; GFX8-NEXT:    v_and_b32_e32 v5, 0xff800000, v1
 ; GFX8-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v1, v3, v5, vcc
@@ -41231,7 +41231,7 @@ define <3 x bfloat> @v_fmuladd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfl
 ; GFX8-NEXT:    v_bfe_u32 v5, v3, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, v5, v3
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT:    v_and_b32_e32 v6, 0x80000000, v3
+; GFX8-NEXT:    v_and_b32_e32 v6, 0xff800000, v3
 ; GFX8-NEXT:    v_or_b32_e32 v6, 0x400000, v6
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v5, v6, vcc
@@ -41243,7 +41243,7 @@ define <3 x bfloat> @v_fmuladd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfl
 ; GFX8-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT:    v_and_b32_e32 v6, 0x80000000, v3
+; GFX8-NEXT:    v_and_b32_e32 v6, 0xff800000, v3
 ; GFX8-NEXT:    v_mul_f32_e32 v0, v0, v2
 ; GFX8-NEXT:    v_or_b32_e32 v6, 0x400000, v6
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
@@ -41251,7 +41251,7 @@ define <3 x bfloat> @v_fmuladd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfl
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v5, v6, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v2, v0
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
-; GFX8-NEXT:    v_and_b32_e32 v5, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v5, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v2, v5, vcc
@@ -41261,7 +41261,7 @@ define <3 x bfloat> @v_fmuladd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfl
 ; GFX8-NEXT:    v_bfe_u32 v2, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v2, v0
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT:    v_and_b32_e32 v4, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v4, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v2, v4, vcc
@@ -41278,7 +41278,7 @@ define <3 x bfloat> @v_fmuladd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfl
 ; GFX9-NEXT:    v_mul_f32_e32 v1, v1, v3
 ; GFX9-NEXT:    v_bfe_u32 v3, v1, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v6, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v6, 0xff800000, v1
 ; GFX9-NEXT:    v_add3_u32 v3, v3, v1, s4
 ; GFX9-NEXT:    v_or_b32_e32 v6, 0x400000, v6
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -41287,7 +41287,7 @@ define <3 x bfloat> @v_fmuladd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfl
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v3, 16, v5
 ; GFX9-NEXT:    v_add_f32_e32 v1, v1, v3
 ; GFX9-NEXT:    v_bfe_u32 v3, v1, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v5, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v5, 0xff800000, v1
 ; GFX9-NEXT:    v_add3_u32 v3, v3, v1, s4
 ; GFX9-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -41296,7 +41296,7 @@ define <3 x bfloat> @v_fmuladd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfl
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v5, 16, v0
 ; GFX9-NEXT:    v_mul_f32_e32 v3, v5, v3
 ; GFX9-NEXT:    v_bfe_u32 v5, v3, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v6, 0x80000000, v3
+; GFX9-NEXT:    v_and_b32_e32 v6, 0xff800000, v3
 ; GFX9-NEXT:    v_add3_u32 v5, v5, v3, s4
 ; GFX9-NEXT:    v_or_b32_e32 v6, 0x400000, v6
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
@@ -41305,7 +41305,7 @@ define <3 x bfloat> @v_fmuladd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfl
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v5, 16, v4
 ; GFX9-NEXT:    v_add_f32_e32 v3, v3, v5
 ; GFX9-NEXT:    v_bfe_u32 v5, v3, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v6, 0x80000000, v3
+; GFX9-NEXT:    v_and_b32_e32 v6, 0xff800000, v3
 ; GFX9-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX9-NEXT:    v_add3_u32 v5, v5, v3, s4
@@ -41314,7 +41314,7 @@ define <3 x bfloat> @v_fmuladd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfl
 ; GFX9-NEXT:    v_mul_f32_e32 v0, v0, v2
 ; GFX9-NEXT:    v_cndmask_b32_e32 v3, v5, v6, vcc
 ; GFX9-NEXT:    v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v5, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v5, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v2, v2, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -41323,7 +41323,7 @@ define <3 x bfloat> @v_fmuladd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfl
 ; GFX9-NEXT:    v_and_b32_e32 v2, 0xffff0000, v4
 ; GFX9-NEXT:    v_add_f32_e32 v0, v0, v2
 ; GFX9-NEXT:    v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v4, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v4, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v2, v2, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -41343,7 +41343,7 @@ define <3 x bfloat> @v_fmuladd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfl
 ; GFX10-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX10-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX10-NEXT:    v_mul_f32_e32 v1, v1, v3
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_mul_f32_e32 v3, v7, v6
 ; GFX10-NEXT:    v_mul_f32_e32 v0, v0, v2
 ; GFX10-NEXT:    v_bfe_u32 v2, v1, 16, 1
@@ -41492,7 +41492,7 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl
 ; GFX8-NEXT:    v_bfe_u32 v7, v6, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v7, vcc, v7, v6
 ; GFX8-NEXT:    v_add_u32_e32 v7, vcc, 0x7fff, v7
-; GFX8-NEXT:    v_and_b32_e32 v8, 0x80000000, v6
+; GFX8-NEXT:    v_and_b32_e32 v8, 0xff800000, v6
 ; GFX8-NEXT:    v_or_b32_e32 v8, 0x400000, v8
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v6, v6
 ; GFX8-NEXT:    v_cndmask_b32_e32 v6, v7, v8, vcc
@@ -41505,7 +41505,7 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl
 ; GFX8-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX8-NEXT:    v_add_u32_e32 v7, vcc, s4, v7
-; GFX8-NEXT:    v_and_b32_e32 v8, 0x80000000, v6
+; GFX8-NEXT:    v_and_b32_e32 v8, 0xff800000, v6
 ; GFX8-NEXT:    v_mul_f32_e32 v1, v1, v3
 ; GFX8-NEXT:    v_or_b32_e32 v8, 0x400000, v8
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v6, v6
@@ -41513,7 +41513,7 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl
 ; GFX8-NEXT:    v_cndmask_b32_e32 v6, v7, v8, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v3, v1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, s4, v3
-; GFX8-NEXT:    v_and_b32_e32 v7, 0x80000000, v1
+; GFX8-NEXT:    v_and_b32_e32 v7, 0xff800000, v1
 ; GFX8-NEXT:    v_or_b32_e32 v7, 0x400000, v7
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v1, v3, v7, vcc
@@ -41523,7 +41523,7 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl
 ; GFX8-NEXT:    v_bfe_u32 v3, v1, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v3, v1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, s4, v3
-; GFX8-NEXT:    v_and_b32_e32 v5, 0x80000000, v1
+; GFX8-NEXT:    v_and_b32_e32 v5, 0xff800000, v1
 ; GFX8-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v1, v3, v5, vcc
@@ -41533,7 +41533,7 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl
 ; GFX8-NEXT:    v_bfe_u32 v5, v3, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, v5, v3
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT:    v_and_b32_e32 v7, 0x80000000, v3
+; GFX8-NEXT:    v_and_b32_e32 v7, 0xff800000, v3
 ; GFX8-NEXT:    v_or_b32_e32 v7, 0x400000, v7
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v5, v7, vcc
@@ -41545,7 +41545,7 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl
 ; GFX8-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT:    v_and_b32_e32 v7, 0x80000000, v3
+; GFX8-NEXT:    v_and_b32_e32 v7, 0xff800000, v3
 ; GFX8-NEXT:    v_mul_f32_e32 v0, v0, v2
 ; GFX8-NEXT:    v_or_b32_e32 v7, 0x400000, v7
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
@@ -41553,7 +41553,7 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v5, v7, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v2, v0
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
-; GFX8-NEXT:    v_and_b32_e32 v5, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v5, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v2, v5, vcc
@@ -41563,7 +41563,7 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl
 ; GFX8-NEXT:    v_bfe_u32 v2, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v2, v0
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT:    v_and_b32_e32 v4, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v4, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v2, v4, vcc
@@ -41581,7 +41581,7 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl
 ; GFX9-NEXT:    v_mul_f32_e32 v6, v7, v6
 ; GFX9-NEXT:    v_bfe_u32 v7, v6, 16, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX9-NEXT:    v_and_b32_e32 v8, 0x80000000, v6
+; GFX9-NEXT:    v_and_b32_e32 v8, 0xff800000, v6
 ; GFX9-NEXT:    v_add3_u32 v7, v7, v6, s4
 ; GFX9-NEXT:    v_or_b32_e32 v8, 0x400000, v8
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v6, v6
@@ -41590,7 +41590,7 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v7, 16, v5
 ; GFX9-NEXT:    v_add_f32_e32 v6, v6, v7
 ; GFX9-NEXT:    v_bfe_u32 v7, v6, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v8, 0x80000000, v6
+; GFX9-NEXT:    v_and_b32_e32 v8, 0xff800000, v6
 ; GFX9-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX9-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX9-NEXT:    v_add3_u32 v7, v7, v6, s4
@@ -41599,7 +41599,7 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl
 ; GFX9-NEXT:    v_mul_f32_e32 v1, v1, v3
 ; GFX9-NEXT:    v_cndmask_b32_e32 v6, v7, v8, vcc
 ; GFX9-NEXT:    v_bfe_u32 v3, v1, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v7, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v7, 0xff800000, v1
 ; GFX9-NEXT:    v_add3_u32 v3, v3, v1, s4
 ; GFX9-NEXT:    v_or_b32_e32 v7, 0x400000, v7
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -41608,7 +41608,7 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl
 ; GFX9-NEXT:    v_and_b32_e32 v3, 0xffff0000, v5
 ; GFX9-NEXT:    v_add_f32_e32 v1, v1, v3
 ; GFX9-NEXT:    v_bfe_u32 v3, v1, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v5, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v5, 0xff800000, v1
 ; GFX9-NEXT:    v_add3_u32 v3, v3, v1, s4
 ; GFX9-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -41617,7 +41617,7 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v5, 16, v0
 ; GFX9-NEXT:    v_mul_f32_e32 v3, v5, v3
 ; GFX9-NEXT:    v_bfe_u32 v5, v3, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v7, 0x80000000, v3
+; GFX9-NEXT:    v_and_b32_e32 v7, 0xff800000, v3
 ; GFX9-NEXT:    v_add3_u32 v5, v5, v3, s4
 ; GFX9-NEXT:    v_or_b32_e32 v7, 0x400000, v7
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
@@ -41626,7 +41626,7 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v5, 16, v4
 ; GFX9-NEXT:    v_add_f32_e32 v3, v3, v5
 ; GFX9-NEXT:    v_bfe_u32 v5, v3, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v7, 0x80000000, v3
+; GFX9-NEXT:    v_and_b32_e32 v7, 0xff800000, v3
 ; GFX9-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX9-NEXT:    v_add3_u32 v5, v5, v3, s4
@@ -41635,7 +41635,7 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl
 ; GFX9-NEXT:    v_mul_f32_e32 v0, v0, v2
 ; GFX9-NEXT:    v_cndmask_b32_e32 v3, v5, v7, vcc
 ; GFX9-NEXT:    v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v5, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v5, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v2, v2, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v5, 0x400000, v5
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -41644,7 +41644,7 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl
 ; GFX9-NEXT:    v_and_b32_e32 v2, 0xffff0000, v4
 ; GFX9-NEXT:    v_add_f32_e32 v0, v0, v2
 ; GFX9-NEXT:    v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v4, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v4, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v2, v2, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -41667,7 +41667,7 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v7, 16, v2
 ; GFX10-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX10-NEXT:    v_mul_f32_e32 v1, v1, v3
-; GFX10-NEXT:    s_brev_b32 s4, 1
+; GFX10-NEXT:    s_mov_b32 s4, 0xff800000
 ; GFX10-NEXT:    v_bfe_u32 v10, v6, 16, 1
 ; GFX10-NEXT:    v_and_or_b32 v3, v6, s4, 0x400000
 ; GFX10-NEXT:    v_mul_f32_e32 v7, v9, v7
@@ -41736,7 +41736,7 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v8, 16, v5
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v6, 16, v3
 ; GFX11-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
 ; GFX11-NEXT:    v_dual_mul_f32 v6, v7, v6 :: v_dual_and_b32 v5, 0xffff0000, v5
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v7, 16, v2

diff  --git a/llvm/test/CodeGen/AMDGPU/fmed3-cast-combine.ll b/llvm/test/CodeGen/AMDGPU/fmed3-cast-combine.ll
index cfe1e46bf2c5ee..d35871e3774de7 100644
--- a/llvm/test/CodeGen/AMDGPU/fmed3-cast-combine.ll
+++ b/llvm/test/CodeGen/AMDGPU/fmed3-cast-combine.ll
@@ -790,7 +790,7 @@ define bfloat @fmed3_f32_fpext_f16_fptrunc_bf16(half %arg0, half %arg1, half %ar
 ; GFX8-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX8-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX8-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
@@ -806,7 +806,7 @@ define bfloat @fmed3_f32_fpext_f16_fptrunc_bf16(half %arg0, half %arg1, half %ar
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7fff
 ; GFX9-NEXT:    v_med3_f32 v0, v0, v1, v2
 ; GFX9-NEXT:    v_bfe_u32 v1, v0, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v2, 0x80000000, v0
+; GFX9-NEXT:    v_and_b32_e32 v2, 0xff800000, v0
 ; GFX9-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX9-NEXT:    v_or_b32_e32 v2, 0x400000, v2
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0

diff  --git a/llvm/test/CodeGen/AMDGPU/global-atomics-fp.ll b/llvm/test/CodeGen/AMDGPU/global-atomics-fp.ll
index b88aa1ce33fb37..9142858806f1cd 100644
--- a/llvm/test/CodeGen/AMDGPU/global-atomics-fp.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-atomics-fp.ll
@@ -1524,7 +1524,7 @@ define amdgpu_kernel void @global_atomic_fadd_ret_bf16_agent(ptr addrspace(1) %p
 ; GFX900-NEXT:    v_lshrrev_b32_sdwa v1, s5, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
 ; GFX900-NEXT:    v_add_f32_e32 v1, 4.0, v1
 ; GFX900-NEXT:    v_bfe_u32 v3, v1, 16, 1
-; GFX900-NEXT:    v_and_b32_e32 v4, 0x80000000, v1
+; GFX900-NEXT:    v_and_b32_e32 v4, 0xff800000, v1
 ; GFX900-NEXT:    v_add3_u32 v3, v3, v1, s4
 ; GFX900-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX900-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -1566,7 +1566,7 @@ define amdgpu_kernel void @global_atomic_fadd_ret_bf16_agent(ptr addrspace(1) %p
 ; GFX908-NEXT:    v_lshrrev_b32_sdwa v1, s5, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
 ; GFX908-NEXT:    v_add_f32_e32 v1, 4.0, v1
 ; GFX908-NEXT:    v_bfe_u32 v3, v1, 16, 1
-; GFX908-NEXT:    v_and_b32_e32 v4, 0x80000000, v1
+; GFX908-NEXT:    v_and_b32_e32 v4, 0xff800000, v1
 ; GFX908-NEXT:    v_add3_u32 v3, v3, v1, s4
 ; GFX908-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX908-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -1608,7 +1608,7 @@ define amdgpu_kernel void @global_atomic_fadd_ret_bf16_agent(ptr addrspace(1) %p
 ; GFX90A-NEXT:    v_lshrrev_b32_sdwa v1, s5, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
 ; GFX90A-NEXT:    v_add_f32_e32 v1, 4.0, v1
 ; GFX90A-NEXT:    v_bfe_u32 v2, v1, 16, 1
-; GFX90A-NEXT:    v_and_b32_e32 v4, 0x80000000, v1
+; GFX90A-NEXT:    v_and_b32_e32 v4, 0xff800000, v1
 ; GFX90A-NEXT:    v_add3_u32 v2, v2, v1, s4
 ; GFX90A-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX90A-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -1632,7 +1632,7 @@ define amdgpu_kernel void @global_atomic_fadd_ret_bf16_agent(ptr addrspace(1) %p
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
 ; GFX10-NEXT:    v_mov_b32_e32 v0, 0
-; GFX10-NEXT:    s_brev_b32 s5, 1
+; GFX10-NEXT:    s_mov_b32 s5, 0xff800000
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX10-NEXT:    s_and_b32 s0, s2, -4
 ; GFX10-NEXT:    s_mov_b32 s1, s3
@@ -1673,7 +1673,7 @@ define amdgpu_kernel void @global_atomic_fadd_ret_bf16_agent(ptr addrspace(1) %p
 ; GFX11-LABEL: global_atomic_fadd_ret_bf16_agent:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_load_b64 s[2:3], s[0:1], 0x24
-; GFX11-NEXT:    s_brev_b32 s5, 1
+; GFX11-NEXT:    s_mov_b32 s5, 0xff800000
 ; GFX11-NEXT:    v_mov_b32_e32 v0, 0
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-NEXT:    s_and_b32 s0, s2, -4
@@ -1744,7 +1744,7 @@ define amdgpu_kernel void @global_atomic_fadd_ret_bf16_system(ptr addrspace(1) %
 ; GFX900-NEXT:    v_lshrrev_b32_sdwa v1, s5, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
 ; GFX900-NEXT:    v_add_f32_e32 v1, 4.0, v1
 ; GFX900-NEXT:    v_bfe_u32 v3, v1, 16, 1
-; GFX900-NEXT:    v_and_b32_e32 v4, 0x80000000, v1
+; GFX900-NEXT:    v_and_b32_e32 v4, 0xff800000, v1
 ; GFX900-NEXT:    v_add3_u32 v3, v3, v1, s4
 ; GFX900-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX900-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -1786,7 +1786,7 @@ define amdgpu_kernel void @global_atomic_fadd_ret_bf16_system(ptr addrspace(1) %
 ; GFX908-NEXT:    v_lshrrev_b32_sdwa v1, s5, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
 ; GFX908-NEXT:    v_add_f32_e32 v1, 4.0, v1
 ; GFX908-NEXT:    v_bfe_u32 v3, v1, 16, 1
-; GFX908-NEXT:    v_and_b32_e32 v4, 0x80000000, v1
+; GFX908-NEXT:    v_and_b32_e32 v4, 0xff800000, v1
 ; GFX908-NEXT:    v_add3_u32 v3, v3, v1, s4
 ; GFX908-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX908-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -1828,7 +1828,7 @@ define amdgpu_kernel void @global_atomic_fadd_ret_bf16_system(ptr addrspace(1) %
 ; GFX90A-NEXT:    v_lshrrev_b32_sdwa v1, s5, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
 ; GFX90A-NEXT:    v_add_f32_e32 v1, 4.0, v1
 ; GFX90A-NEXT:    v_bfe_u32 v2, v1, 16, 1
-; GFX90A-NEXT:    v_and_b32_e32 v4, 0x80000000, v1
+; GFX90A-NEXT:    v_and_b32_e32 v4, 0xff800000, v1
 ; GFX90A-NEXT:    v_add3_u32 v2, v2, v1, s4
 ; GFX90A-NEXT:    v_or_b32_e32 v4, 0x400000, v4
 ; GFX90A-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
@@ -1854,7 +1854,7 @@ define amdgpu_kernel void @global_atomic_fadd_ret_bf16_system(ptr addrspace(1) %
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
 ; GFX10-NEXT:    v_mov_b32_e32 v0, 0
-; GFX10-NEXT:    s_brev_b32 s5, 1
+; GFX10-NEXT:    s_mov_b32 s5, 0xff800000
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX10-NEXT:    s_and_b32 s0, s2, -4
 ; GFX10-NEXT:    s_mov_b32 s1, s3
@@ -1895,7 +1895,7 @@ define amdgpu_kernel void @global_atomic_fadd_ret_bf16_system(ptr addrspace(1) %
 ; GFX11-LABEL: global_atomic_fadd_ret_bf16_system:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_load_b64 s[2:3], s[0:1], 0x24
-; GFX11-NEXT:    s_brev_b32 s5, 1
+; GFX11-NEXT:    s_mov_b32 s5, 0xff800000
 ; GFX11-NEXT:    v_mov_b32_e32 v0, 0
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-NEXT:    s_and_b32 s0, s2, -4

diff  --git a/llvm/test/CodeGen/AMDGPU/isel-amdgpu-cs-chain-preserve-cc.ll b/llvm/test/CodeGen/AMDGPU/isel-amdgpu-cs-chain-preserve-cc.ll
index ad788b8d55014c..6a7fb7142c293a 100644
--- a/llvm/test/CodeGen/AMDGPU/isel-amdgpu-cs-chain-preserve-cc.ll
+++ b/llvm/test/CodeGen/AMDGPU/isel-amdgpu-cs-chain-preserve-cc.ll
@@ -912,7 +912,7 @@ define amdgpu_cs_chain_preserve void @amdgpu_cs_chain_cc_bfloat(bfloat inreg %a,
   ; DAGISEL-GFX11-WF32-NEXT:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 32767
   ; DAGISEL-GFX11-WF32-NEXT:   [[V_ADD3_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD3_U32_e64 killed [[V_BFE_U32_e64_]], [[V_ADD_F32_e64_]], killed [[S_MOV_B32_]], implicit $exec
   ; DAGISEL-GFX11-WF32-NEXT:   [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 4194304
-  ; DAGISEL-GFX11-WF32-NEXT:   [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 -2147483648
+  ; DAGISEL-GFX11-WF32-NEXT:   [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 -8388608
   ; DAGISEL-GFX11-WF32-NEXT:   [[V_AND_OR_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_OR_B32_e64 [[V_ADD_F32_e64_]], killed [[S_MOV_B32_2]], killed [[S_MOV_B32_1]], implicit $exec
   ; DAGISEL-GFX11-WF32-NEXT:   [[V_CMP_U_F32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = nofpexcept V_CMP_U_F32_e64 0, [[V_ADD_F32_e64_]], 0, [[V_ADD_F32_e64_]], 0, implicit $mode, implicit $exec
   ; DAGISEL-GFX11-WF32-NEXT:   [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, killed [[V_ADD3_U32_e64_]], 0, killed [[V_AND_OR_B32_e64_]], killed [[V_CMP_U_F32_e64_]], implicit $exec
@@ -934,7 +934,7 @@ define amdgpu_cs_chain_preserve void @amdgpu_cs_chain_cc_bfloat(bfloat inreg %a,
   ; DAGISEL-GFX11-WF64-NEXT:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 32767
   ; DAGISEL-GFX11-WF64-NEXT:   [[V_ADD3_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD3_U32_e64 killed [[V_BFE_U32_e64_]], [[V_ADD_F32_e64_]], killed [[S_MOV_B32_]], implicit $exec
   ; DAGISEL-GFX11-WF64-NEXT:   [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 4194304
-  ; DAGISEL-GFX11-WF64-NEXT:   [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 -2147483648
+  ; DAGISEL-GFX11-WF64-NEXT:   [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 -8388608
   ; DAGISEL-GFX11-WF64-NEXT:   [[V_AND_OR_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_OR_B32_e64 [[V_ADD_F32_e64_]], killed [[S_MOV_B32_2]], killed [[S_MOV_B32_1]], implicit $exec
   ; DAGISEL-GFX11-WF64-NEXT:   [[V_CMP_U_F32_e64_:%[0-9]+]]:sreg_64_xexec = nofpexcept V_CMP_U_F32_e64 0, [[V_ADD_F32_e64_]], 0, [[V_ADD_F32_e64_]], 0, implicit $mode, implicit $exec
   ; DAGISEL-GFX11-WF64-NEXT:   [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, killed [[V_ADD3_U32_e64_]], 0, killed [[V_AND_OR_B32_e64_]], killed [[V_CMP_U_F32_e64_]], implicit $exec
@@ -956,7 +956,7 @@ define amdgpu_cs_chain_preserve void @amdgpu_cs_chain_cc_bfloat(bfloat inreg %a,
   ; DAGISEL-GFX10-WF32-NEXT:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 32767
   ; DAGISEL-GFX10-WF32-NEXT:   [[V_ADD3_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD3_U32_e64 killed [[V_BFE_U32_e64_]], [[V_ADD_F32_e64_]], killed [[S_MOV_B32_]], implicit $exec
   ; DAGISEL-GFX10-WF32-NEXT:   [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 4194304
-  ; DAGISEL-GFX10-WF32-NEXT:   [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 -2147483648
+  ; DAGISEL-GFX10-WF32-NEXT:   [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 -8388608
   ; DAGISEL-GFX10-WF32-NEXT:   [[V_AND_OR_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_OR_B32_e64 [[V_ADD_F32_e64_]], killed [[S_MOV_B32_2]], killed [[S_MOV_B32_1]], implicit $exec
   ; DAGISEL-GFX10-WF32-NEXT:   [[V_CMP_U_F32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = nofpexcept V_CMP_U_F32_e64 0, [[V_ADD_F32_e64_]], 0, [[V_ADD_F32_e64_]], 0, implicit $mode, implicit $exec
   ; DAGISEL-GFX10-WF32-NEXT:   [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, killed [[V_ADD3_U32_e64_]], 0, killed [[V_AND_OR_B32_e64_]], killed [[V_CMP_U_F32_e64_]], implicit $exec
@@ -978,7 +978,7 @@ define amdgpu_cs_chain_preserve void @amdgpu_cs_chain_cc_bfloat(bfloat inreg %a,
   ; DAGISEL-GFX10-WF64-NEXT:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 32767
   ; DAGISEL-GFX10-WF64-NEXT:   [[V_ADD3_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD3_U32_e64 killed [[V_BFE_U32_e64_]], [[V_ADD_F32_e64_]], killed [[S_MOV_B32_]], implicit $exec
   ; DAGISEL-GFX10-WF64-NEXT:   [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 4194304
-  ; DAGISEL-GFX10-WF64-NEXT:   [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 -2147483648
+  ; DAGISEL-GFX10-WF64-NEXT:   [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 -8388608
   ; DAGISEL-GFX10-WF64-NEXT:   [[V_AND_OR_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_OR_B32_e64 [[V_ADD_F32_e64_]], killed [[S_MOV_B32_2]], killed [[S_MOV_B32_1]], implicit $exec
   ; DAGISEL-GFX10-WF64-NEXT:   [[V_CMP_U_F32_e64_:%[0-9]+]]:sreg_64_xexec = nofpexcept V_CMP_U_F32_e64 0, [[V_ADD_F32_e64_]], 0, [[V_ADD_F32_e64_]], 0, implicit $mode, implicit $exec
   ; DAGISEL-GFX10-WF64-NEXT:   [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, killed [[V_ADD3_U32_e64_]], 0, killed [[V_AND_OR_B32_e64_]], killed [[V_CMP_U_F32_e64_]], implicit $exec

diff  --git a/llvm/test/CodeGen/AMDGPU/local-atomics-fp.ll b/llvm/test/CodeGen/AMDGPU/local-atomics-fp.ll
index e906b5327c3623..48ae98f125bf4a 100644
--- a/llvm/test/CodeGen/AMDGPU/local-atomics-fp.ll
+++ b/llvm/test/CodeGen/AMDGPU/local-atomics-fp.ll
@@ -1413,7 +1413,7 @@ define bfloat @lds_atomic_fadd_ret_bf16(ptr addrspace(3) %ptr) nounwind {
 ; VI-NEXT:    v_add_f32_e32 v3, 4.0, v3
 ; VI-NEXT:    v_bfe_u32 v6, v3, 16, 1
 ; VI-NEXT:    v_add_u32_e32 v6, vcc, v6, v3
-; VI-NEXT:    v_and_b32_e32 v7, 0x80000000, v3
+; VI-NEXT:    v_and_b32_e32 v7, 0xff800000, v3
 ; VI-NEXT:    v_add_u32_e32 v6, vcc, 0x7fff, v6
 ; VI-NEXT:    v_or_b32_e32 v7, 0x400000, v7
 ; VI-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
@@ -1451,7 +1451,7 @@ define bfloat @lds_atomic_fadd_ret_bf16(ptr addrspace(3) %ptr) nounwind {
 ; GFX9-NEXT:    v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
 ; GFX9-NEXT:    v_add_f32_e32 v3, 4.0, v3
 ; GFX9-NEXT:    v_bfe_u32 v5, v3, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v6, 0x80000000, v3
+; GFX9-NEXT:    v_and_b32_e32 v6, 0xff800000, v3
 ; GFX9-NEXT:    v_add3_u32 v5, v5, v3, s6
 ; GFX9-NEXT:    v_or_b32_e32 v6, 0x400000, v6
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
@@ -1560,7 +1560,7 @@ define void @lds_atomic_fadd_noret_bf16(ptr addrspace(3) %ptr) nounwind {
 ; VI-NEXT:    v_add_f32_e32 v4, 4.0, v4
 ; VI-NEXT:    v_bfe_u32 v6, v4, 16, 1
 ; VI-NEXT:    v_add_u32_e32 v6, vcc, v6, v4
-; VI-NEXT:    v_and_b32_e32 v7, 0x80000000, v4
+; VI-NEXT:    v_and_b32_e32 v7, 0xff800000, v4
 ; VI-NEXT:    v_add_u32_e32 v6, vcc, 0x7fff, v6
 ; VI-NEXT:    v_or_b32_e32 v7, 0x400000, v7
 ; VI-NEXT:    v_cmp_u_f32_e32 vcc, v4, v4
@@ -1597,7 +1597,7 @@ define void @lds_atomic_fadd_noret_bf16(ptr addrspace(3) %ptr) nounwind {
 ; GFX9-NEXT:    v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
 ; GFX9-NEXT:    v_add_f32_e32 v4, 4.0, v4
 ; GFX9-NEXT:    v_bfe_u32 v5, v4, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v6, 0x80000000, v4
+; GFX9-NEXT:    v_and_b32_e32 v6, 0xff800000, v4
 ; GFX9-NEXT:    v_add3_u32 v5, v5, v4, s6
 ; GFX9-NEXT:    v_or_b32_e32 v6, 0x400000, v6
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v4, v4

diff  --git a/llvm/test/CodeGen/AMDGPU/vector_shuffle.packed.ll b/llvm/test/CodeGen/AMDGPU/vector_shuffle.packed.ll
index 590b40960faab9..672c93b6adf7f4 100644
--- a/llvm/test/CodeGen/AMDGPU/vector_shuffle.packed.ll
+++ b/llvm/test/CodeGen/AMDGPU/vector_shuffle.packed.ll
@@ -4262,20 +4262,20 @@ define amdgpu_kernel void @fma_shuffle_v2bf16(ptr addrspace(1) nocapture readonl
 ; GFX9-NEXT:    v_fma_f32 v8, v12, v9, v11
 ; GFX9-NEXT:    v_fma_f32 v2, v12, v5, v2
 ; GFX9-NEXT:    v_bfe_u32 v5, v7, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v9, 0x80000000, v7
+; GFX9-NEXT:    v_and_b32_e32 v9, 0xff800000, v7
 ; GFX9-NEXT:    v_bfe_u32 v11, v1, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v12, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v12, 0xff800000, v1
 ; GFX9-NEXT:    v_add3_u32 v5, v5, v7, s2
 ; GFX9-NEXT:    v_or_b32_e32 v9, 0x400000, v9
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v7, v7
 ; GFX9-NEXT:    v_bfe_u32 v13, v8, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v14, 0x80000000, v8
+; GFX9-NEXT:    v_and_b32_e32 v14, 0xff800000, v8
 ; GFX9-NEXT:    v_add3_u32 v11, v11, v1, s2
 ; GFX9-NEXT:    v_or_b32_e32 v12, 0x400000, v12
 ; GFX9-NEXT:    v_cndmask_b32_e32 v5, v5, v9, vcc
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX9-NEXT:    v_bfe_u32 v15, v2, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v16, 0x80000000, v2
+; GFX9-NEXT:    v_and_b32_e32 v16, 0xff800000, v2
 ; GFX9-NEXT:    v_add3_u32 v13, v13, v8, s2
 ; GFX9-NEXT:    v_or_b32_e32 v14, 0x400000, v14
 ; GFX9-NEXT:    v_cndmask_b32_e32 v1, v11, v12, vcc
@@ -4298,20 +4298,20 @@ define amdgpu_kernel void @fma_shuffle_v2bf16(ptr addrspace(1) nocapture readonl
 ; GFX9-NEXT:    v_fma_f32 v2, v4, v10, v2
 ; GFX9-NEXT:    v_fma_f32 v4, v4, v6, v7
 ; GFX9-NEXT:    v_bfe_u32 v5, v1, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v6, 0x80000000, v1
+; GFX9-NEXT:    v_and_b32_e32 v6, 0xff800000, v1
 ; GFX9-NEXT:    v_bfe_u32 v7, v3, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v8, 0x80000000, v3
+; GFX9-NEXT:    v_and_b32_e32 v8, 0xff800000, v3
 ; GFX9-NEXT:    v_add3_u32 v5, v5, v1, s2
 ; GFX9-NEXT:    v_or_b32_e32 v6, 0x400000, v6
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX9-NEXT:    v_bfe_u32 v9, v2, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v10, 0x80000000, v2
+; GFX9-NEXT:    v_and_b32_e32 v10, 0xff800000, v2
 ; GFX9-NEXT:    v_add3_u32 v7, v7, v3, s2
 ; GFX9-NEXT:    v_or_b32_e32 v8, 0x400000, v8
 ; GFX9-NEXT:    v_cndmask_b32_e32 v1, v5, v6, vcc
 ; GFX9-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
 ; GFX9-NEXT:    v_bfe_u32 v11, v4, 16, 1
-; GFX9-NEXT:    v_and_b32_e32 v12, 0x80000000, v4
+; GFX9-NEXT:    v_and_b32_e32 v12, 0xff800000, v4
 ; GFX9-NEXT:    v_add3_u32 v9, v9, v2, s2
 ; GFX9-NEXT:    v_or_b32_e32 v10, 0x400000, v10
 ; GFX9-NEXT:    v_cndmask_b32_e32 v3, v7, v8, vcc
@@ -4332,7 +4332,7 @@ define amdgpu_kernel void @fma_shuffle_v2bf16(ptr addrspace(1) nocapture readonl
 ; GFX10-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x10
 ; GFX10-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x0
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v6, 3, v0
-; GFX10-NEXT:    s_brev_b32 s2, 1
+; GFX10-NEXT:    s_mov_b32 s2, 0xff800000
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX10-NEXT:    s_clause 0x2
 ; GFX10-NEXT:    global_load_dwordx2 v[0:1], v6, s[0:1]
@@ -4416,7 +4416,7 @@ define amdgpu_kernel void @fma_shuffle_v2bf16(ptr addrspace(1) nocapture readonl
 ; GFX11-NEXT:    s_load_b64 s[2:3], s[0:1], 0x10
 ; GFX11-NEXT:    s_load_b128 s[4:7], s[0:1], 0x0
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v6, 3, v0
-; GFX11-NEXT:    s_brev_b32 s0, 1
+; GFX11-NEXT:    s_mov_b32 s0, 0xff800000
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-NEXT:    s_clause 0x2
 ; GFX11-NEXT:    global_load_b64 v[0:1], v6, s[2:3]


        


More information about the llvm-commits mailing list