[llvm] 9f4746b - AMDGPU: Combine down fcopysign f64 magnitude

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 6 01:54:31 PST 2023


Author: Matt Arsenault
Date: 2023-03-06T05:54:25-04:00
New Revision: 9f4746b65f9fdea39975abeea37fd1f96475d369

URL: https://github.com/llvm/llvm-project/commit/9f4746b65f9fdea39975abeea37fd1f96475d369
DIFF: https://github.com/llvm/llvm-project/commit/9f4746b65f9fdea39975abeea37fd1f96475d369.diff

LOG: AMDGPU: Combine down fcopysign f64 magnitude

Copy through the low bits and only apply an f32
copysign to the high half. This is effectively
what we do for codegen anyway, but this provides
some combine benefits. The cases involving constants
show some small improvements.

https://reviews.llvm.org/D142682

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/SIISelLowering.cpp
    llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll
    llvm/test/CodeGen/AMDGPU/fcopysign.f64.ll
    llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index d23ef959d93c..f30cd8d55015 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -9468,13 +9468,34 @@ SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N,
 
 SDValue SITargetLowering::performFCopySignCombine(SDNode *N,
                                                   DAGCombinerInfo &DCI) const {
+  SDValue MagnitudeOp = N->getOperand(0);
   SDValue SignOp = N->getOperand(1);
-  if (SignOp.getValueType() != MVT::f64)
-    return SDValue();
-
   SelectionDAG &DAG = DCI.DAG;
   SDLoc DL(N);
 
+  // f64 fcopysign is really an f32 copysign on the high bits, so replace the
+  // lower half with a copy.
+  // fcopysign f64:x, _:y -> x.lo32, (fcopysign (f32 x.hi32), _:y)
+  if (MagnitudeOp.getValueType() == MVT::f64) {
+    SDValue MagAsVector = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32, MagnitudeOp);
+    SDValue MagLo =
+      DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, MagAsVector,
+                  DAG.getConstant(0, DL, MVT::i32));
+    SDValue MagHi =
+      DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, MagAsVector,
+                  DAG.getConstant(1, DL, MVT::i32));
+
+    SDValue HiOp =
+      DAG.getNode(ISD::FCOPYSIGN, DL, MVT::f32, MagHi, SignOp);
+
+    SDValue Vector = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2f32, MagLo, HiOp);
+
+    return DAG.getNode(ISD::BITCAST, DL, MVT::f64, Vector);
+  }
+
+  if (SignOp.getValueType() != MVT::f64)
+    return SDValue();
+
   // Reduce width of sign operand, we only need the highest bit.
   //
   // fcopysign f64:x, f64:y ->

diff  --git a/llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll b/llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll
index 396ae55313ed..1103c18b72b8 100644
--- a/llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll
@@ -872,22 +872,24 @@ define amdgpu_kernel void @v_copysign_out_f64_mag_f64_sign_f16(ptr addrspace(1)
 ; SI-NEXT:    s_mov_b32 s11, 0xf000
 ; SI-NEXT:    s_mov_b32 s14, 0
 ; SI-NEXT:    s_mov_b32 s15, s11
+; SI-NEXT:    v_mov_b32_e32 v1, 0
+; SI-NEXT:    s_mov_b64 s[2:3], s[14:15]
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 1, v0
+; SI-NEXT:    v_mov_b32_e32 v3, v1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    buffer_load_ushort v2, v[2:3], s[0:3], 0 addr64
 ; SI-NEXT:    s_mov_b64 s[12:13], s[6:7]
-; SI-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
-; SI-NEXT:    v_mov_b32_e32 v2, 0
-; SI-NEXT:    buffer_load_dwordx2 v[3:4], v[1:2], s[12:15], 0 addr64
-; SI-NEXT:    s_mov_b64 s[2:3], s[14:15]
-; SI-NEXT:    v_lshlrev_b32_e32 v1, 1, v0
-; SI-NEXT:    buffer_load_ushort v0, v[1:2], s[0:3], 0 addr64
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
+; SI-NEXT:    buffer_load_dwordx2 v[0:1], v[0:1], s[12:15], 0 addr64
 ; SI-NEXT:    s_brev_b32 s0, -2
 ; SI-NEXT:    s_mov_b32 s10, -1
 ; SI-NEXT:    s_mov_b32 s8, s4
 ; SI-NEXT:    s_mov_b32 s9, s5
+; SI-NEXT:    s_waitcnt vmcnt(1)
+; SI-NEXT:    v_cvt_f32_f16_e32 v2, v2
 ; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_cvt_f32_f16_e32 v0, v0
-; SI-NEXT:    v_bfi_b32 v4, s0, v4, v0
-; SI-NEXT:    buffer_store_dwordx2 v[3:4], off, s[8:11], 0
+; SI-NEXT:    v_bfi_b32 v1, s0, v1, v2
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[8:11], 0
 ; SI-NEXT:    s_endpgm
 ;
 ; VI-LABEL: v_copysign_out_f64_mag_f64_sign_f16:

diff  --git a/llvm/test/CodeGen/AMDGPU/fcopysign.f64.ll b/llvm/test/CodeGen/AMDGPU/fcopysign.f64.ll
index 97dd018b677c..62ffd8818e41 100644
--- a/llvm/test/CodeGen/AMDGPU/fcopysign.f64.ll
+++ b/llvm/test/CodeGen/AMDGPU/fcopysign.f64.ll
@@ -279,28 +279,26 @@ define amdgpu_kernel void @s_test_copysign_f64_0_mag(ptr addrspace(1) %out, doub
 ; SI-LABEL: s_test_copysign_f64_0_mag:
 ; SI:       ; %bb.0:
 ; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_brev_b32 s2, -2
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_mov_b32 s6, -1
-; SI-NEXT:    v_mov_b32_e32 v0, s3
+; SI-NEXT:    v_mov_b32_e32 v0, 0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    s_mov_b32 s4, s0
+; SI-NEXT:    s_and_b32 s0, s3, 0x80000000
 ; SI-NEXT:    s_mov_b32 s5, s1
-; SI-NEXT:    v_bfi_b32 v1, s2, 0, v0
-; SI-NEXT:    v_mov_b32_e32 v0, 0
+; SI-NEXT:    v_mov_b32_e32 v1, s0
 ; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
 ;
 ; VI-LABEL: s_test_copysign_f64_0_mag:
 ; VI:       ; %bb.0:
 ; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    v_mov_b32_e32 v2, 0
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_brev_b32 s2, -2
-; VI-NEXT:    v_mov_b32_e32 v2, s3
 ; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    s_and_b32 s0, s3, 0x80000000
 ; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_bfi_b32 v3, s2, 0, v2
-; VI-NEXT:    v_mov_b32_e32 v2, 0
+; VI-NEXT:    v_mov_b32_e32 v3, s0
 ; VI-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
 ; VI-NEXT:    s_endpgm
   %result = call double @llvm.copysign.f64(double 0.0, double %sign)
@@ -312,30 +310,28 @@ define amdgpu_kernel void @s_test_copysign_f64_1_mag(ptr addrspace(1) %out, doub
 ; SI-LABEL: s_test_copysign_f64_1_mag:
 ; SI:       ; %bb.0:
 ; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_brev_b32 s2, -2
-; SI-NEXT:    v_mov_b32_e32 v0, 0x3ff00000
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_mov_b32 s6, -1
-; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_mov_b32_e32 v0, 0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    s_mov_b32 s4, s0
+; SI-NEXT:    s_and_b32 s0, s3, 0x80000000
+; SI-NEXT:    s_or_b32 s0, s0, 0x3ff00000
 ; SI-NEXT:    s_mov_b32 s5, s1
-; SI-NEXT:    v_bfi_b32 v1, s2, v0, v1
-; SI-NEXT:    v_mov_b32_e32 v0, 0
+; SI-NEXT:    v_mov_b32_e32 v1, s0
 ; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
 ;
 ; VI-LABEL: s_test_copysign_f64_1_mag:
 ; VI:       ; %bb.0:
 ; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    v_mov_b32_e32 v2, 0
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_brev_b32 s2, -2
-; VI-NEXT:    v_mov_b32_e32 v2, 0x3ff00000
-; VI-NEXT:    v_mov_b32_e32 v3, s3
 ; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    s_and_b32 s0, s3, 0x80000000
+; VI-NEXT:    s_or_b32 s0, s0, 0x3ff00000
 ; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_bfi_b32 v3, s2, v2, v3
-; VI-NEXT:    v_mov_b32_e32 v2, 0
+; VI-NEXT:    v_mov_b32_e32 v3, s0
 ; VI-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
 ; VI-NEXT:    s_endpgm
   %result = call double @llvm.copysign.f64(double 1.0, double %sign)
@@ -347,30 +343,28 @@ define amdgpu_kernel void @s_test_copysign_f64_10_mag(ptr addrspace(1) %out, dou
 ; SI-LABEL: s_test_copysign_f64_10_mag:
 ; SI:       ; %bb.0:
 ; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_brev_b32 s2, -2
-; SI-NEXT:    v_mov_b32_e32 v0, 0x40240000
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_mov_b32 s6, -1
-; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_mov_b32_e32 v0, 0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    s_mov_b32 s4, s0
+; SI-NEXT:    s_and_b32 s0, s3, 0x80000000
+; SI-NEXT:    s_or_b32 s0, s0, 0x40240000
 ; SI-NEXT:    s_mov_b32 s5, s1
-; SI-NEXT:    v_bfi_b32 v1, s2, v0, v1
-; SI-NEXT:    v_mov_b32_e32 v0, 0
+; SI-NEXT:    v_mov_b32_e32 v1, s0
 ; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
 ;
 ; VI-LABEL: s_test_copysign_f64_10_mag:
 ; VI:       ; %bb.0:
 ; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    v_mov_b32_e32 v2, 0
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_brev_b32 s2, -2
-; VI-NEXT:    v_mov_b32_e32 v2, 0x40240000
-; VI-NEXT:    v_mov_b32_e32 v3, s3
 ; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    s_and_b32 s0, s3, 0x80000000
+; VI-NEXT:    s_or_b32 s0, s0, 0x40240000
 ; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_bfi_b32 v3, s2, v2, v3
-; VI-NEXT:    v_mov_b32_e32 v2, 0
+; VI-NEXT:    v_mov_b32_e32 v3, s0
 ; VI-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
 ; VI-NEXT:    s_endpgm
   %result = call double @llvm.copysign.f64(double 10.0, double %sign)
@@ -382,30 +376,28 @@ define amdgpu_kernel void @s_test_copysign_f64_neg1_mag(ptr addrspace(1) %out, d
 ; SI-LABEL: s_test_copysign_f64_neg1_mag:
 ; SI:       ; %bb.0:
 ; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_brev_b32 s2, -2
-; SI-NEXT:    v_mov_b32_e32 v0, 0xbff00000
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_mov_b32 s6, -1
-; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_mov_b32_e32 v0, 0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    s_mov_b32 s4, s0
+; SI-NEXT:    s_and_b32 s0, s3, 0x80000000
+; SI-NEXT:    s_or_b32 s0, s0, 0x3ff00000
 ; SI-NEXT:    s_mov_b32 s5, s1
-; SI-NEXT:    v_bfi_b32 v1, s2, v0, v1
-; SI-NEXT:    v_mov_b32_e32 v0, 0
+; SI-NEXT:    v_mov_b32_e32 v1, s0
 ; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
 ;
 ; VI-LABEL: s_test_copysign_f64_neg1_mag:
 ; VI:       ; %bb.0:
 ; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    v_mov_b32_e32 v2, 0
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_brev_b32 s2, -2
-; VI-NEXT:    v_mov_b32_e32 v2, 0xbff00000
-; VI-NEXT:    v_mov_b32_e32 v3, s3
 ; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    s_and_b32 s0, s3, 0x80000000
+; VI-NEXT:    s_or_b32 s0, s0, 0x3ff00000
 ; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_bfi_b32 v3, s2, v2, v3
-; VI-NEXT:    v_mov_b32_e32 v2, 0
+; VI-NEXT:    v_mov_b32_e32 v3, s0
 ; VI-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
 ; VI-NEXT:    s_endpgm
   %result = call double @llvm.copysign.f64(double -1.0, double %sign)
@@ -417,30 +409,28 @@ define amdgpu_kernel void @s_test_copysign_f64_neg10_mag(ptr addrspace(1) %out,
 ; SI-LABEL: s_test_copysign_f64_neg10_mag:
 ; SI:       ; %bb.0:
 ; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_brev_b32 s2, -2
-; SI-NEXT:    v_mov_b32_e32 v0, 0xc0240000
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_mov_b32 s6, -1
-; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_mov_b32_e32 v0, 0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    s_mov_b32 s4, s0
+; SI-NEXT:    s_and_b32 s0, s3, 0x80000000
+; SI-NEXT:    s_or_b32 s0, s0, 0x40240000
 ; SI-NEXT:    s_mov_b32 s5, s1
-; SI-NEXT:    v_bfi_b32 v1, s2, v0, v1
-; SI-NEXT:    v_mov_b32_e32 v0, 0
+; SI-NEXT:    v_mov_b32_e32 v1, s0
 ; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
 ;
 ; VI-LABEL: s_test_copysign_f64_neg10_mag:
 ; VI:       ; %bb.0:
 ; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    v_mov_b32_e32 v2, 0
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_brev_b32 s2, -2
-; VI-NEXT:    v_mov_b32_e32 v2, 0xc0240000
-; VI-NEXT:    v_mov_b32_e32 v3, s3
 ; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    s_and_b32 s0, s3, 0x80000000
+; VI-NEXT:    s_or_b32 s0, s0, 0x40240000
 ; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_bfi_b32 v3, s2, v2, v3
-; VI-NEXT:    v_mov_b32_e32 v2, 0
+; VI-NEXT:    v_mov_b32_e32 v3, s0
 ; VI-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
 ; VI-NEXT:    s_endpgm
   %result = call double @llvm.copysign.f64(double -10.0, double %sign)
@@ -462,9 +452,9 @@ define amdgpu_kernel void @s_test_copysign_v2f64(ptr addrspace(1) %out, <2 x dou
 ; SI-NEXT:    v_bfi_b32 v3, s8, v0, v1
 ; SI-NEXT:    v_mov_b32_e32 v0, s5
 ; SI-NEXT:    v_mov_b32_e32 v1, s9
-; SI-NEXT:    v_mov_b32_e32 v2, s6
 ; SI-NEXT:    v_bfi_b32 v1, s8, v0, v1
 ; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    v_mov_b32_e32 v2, s6
 ; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; SI-NEXT:    s_endpgm
 ;
@@ -476,13 +466,13 @@ define amdgpu_kernel void @s_test_copysign_v2f64(ptr addrspace(1) %out, <2 x dou
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v0, s7
 ; VI-NEXT:    v_mov_b32_e32 v1, s11
+; VI-NEXT:    v_mov_b32_e32 v2, s5
 ; VI-NEXT:    v_bfi_b32 v3, s2, v0, v1
-; VI-NEXT:    v_mov_b32_e32 v0, s5
-; VI-NEXT:    v_mov_b32_e32 v1, s9
+; VI-NEXT:    v_mov_b32_e32 v0, s9
 ; VI-NEXT:    v_mov_b32_e32 v5, s1
-; VI-NEXT:    v_mov_b32_e32 v2, s6
-; VI-NEXT:    v_bfi_b32 v1, s2, v0, v1
+; VI-NEXT:    v_bfi_b32 v1, s2, v2, v0
 ; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v2, s6
 ; VI-NEXT:    v_mov_b32_e32 v4, s0
 ; VI-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; VI-NEXT:    s_endpgm
@@ -502,16 +492,16 @@ define amdgpu_kernel void @s_test_copysign_v3f64(ptr addrspace(1) %out, <3 x dou
 ; SI-NEXT:    s_mov_b32 s2, -1
 ; SI-NEXT:    v_mov_b32_e32 v0, s7
 ; SI-NEXT:    v_mov_b32_e32 v1, s15
-; SI-NEXT:    v_mov_b32_e32 v4, s9
-; SI-NEXT:    v_mov_b32_e32 v5, s17
 ; SI-NEXT:    v_bfi_b32 v3, s10, v0, v1
 ; SI-NEXT:    v_mov_b32_e32 v0, s5
 ; SI-NEXT:    v_mov_b32_e32 v1, s13
-; SI-NEXT:    v_bfi_b32 v5, s10, v4, v5
-; SI-NEXT:    v_mov_b32_e32 v4, s8
-; SI-NEXT:    v_mov_b32_e32 v2, s6
 ; SI-NEXT:    v_bfi_b32 v1, s10, v0, v1
+; SI-NEXT:    v_mov_b32_e32 v0, s9
+; SI-NEXT:    v_mov_b32_e32 v2, s17
+; SI-NEXT:    v_bfi_b32 v5, s10, v0, v2
+; SI-NEXT:    v_mov_b32_e32 v4, s8
 ; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    v_mov_b32_e32 v2, s6
 ; SI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[0:3], 0 offset:16
 ; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; SI-NEXT:    s_endpgm
@@ -524,13 +514,13 @@ define amdgpu_kernel void @s_test_copysign_v3f64(ptr addrspace(1) %out, <3 x dou
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v0, s7
 ; VI-NEXT:    v_mov_b32_e32 v1, s15
+; VI-NEXT:    v_mov_b32_e32 v2, s5
 ; VI-NEXT:    v_bfi_b32 v3, s2, v0, v1
-; VI-NEXT:    v_mov_b32_e32 v0, s5
-; VI-NEXT:    v_mov_b32_e32 v1, s13
-; VI-NEXT:    v_mov_b32_e32 v4, s9
-; VI-NEXT:    v_mov_b32_e32 v5, s17
-; VI-NEXT:    v_bfi_b32 v1, s2, v0, v1
-; VI-NEXT:    v_bfi_b32 v5, s2, v4, v5
+; VI-NEXT:    v_mov_b32_e32 v0, s13
+; VI-NEXT:    v_bfi_b32 v1, s2, v2, v0
+; VI-NEXT:    v_mov_b32_e32 v0, s9
+; VI-NEXT:    v_mov_b32_e32 v2, s17
+; VI-NEXT:    v_bfi_b32 v5, s2, v0, v2
 ; VI-NEXT:    s_add_u32 s2, s0, 16
 ; VI-NEXT:    s_addc_u32 s3, s1, 0
 ; VI-NEXT:    v_mov_b32_e32 v7, s3
@@ -538,8 +528,8 @@ define amdgpu_kernel void @s_test_copysign_v3f64(ptr addrspace(1) %out, <3 x dou
 ; VI-NEXT:    v_mov_b32_e32 v6, s2
 ; VI-NEXT:    flat_store_dwordx2 v[6:7], v[4:5]
 ; VI-NEXT:    v_mov_b32_e32 v5, s1
-; VI-NEXT:    v_mov_b32_e32 v2, s6
 ; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v2, s6
 ; VI-NEXT:    v_mov_b32_e32 v4, s0
 ; VI-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; VI-NEXT:    s_endpgm
@@ -552,27 +542,28 @@ define amdgpu_kernel void @s_test_copysign_v4f64(ptr addrspace(1) %out, <4 x dou
 ; SI-LABEL: s_test_copysign_v4f64:
 ; SI:       ; %bb.0:
 ; SI-NEXT:    s_load_dwordx16 s[4:19], s[0:1], 0x11
-; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    s_brev_b32 s12, -2
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
 ; SI-NEXT:    s_mov_b32 s3, 0xf000
 ; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:    v_mov_b32_e32 v4, s11
-; SI-NEXT:    v_mov_b32_e32 v5, s19
 ; SI-NEXT:    v_mov_b32_e32 v0, s7
 ; SI-NEXT:    v_mov_b32_e32 v1, s15
-; SI-NEXT:    v_bfi_b32 v7, s12, v4, v5
-; SI-NEXT:    v_mov_b32_e32 v4, s9
-; SI-NEXT:    v_mov_b32_e32 v5, s17
 ; SI-NEXT:    v_bfi_b32 v3, s12, v0, v1
 ; SI-NEXT:    v_mov_b32_e32 v0, s5
 ; SI-NEXT:    v_mov_b32_e32 v1, s13
-; SI-NEXT:    v_mov_b32_e32 v6, s10
-; SI-NEXT:    v_bfi_b32 v5, s12, v4, v5
-; SI-NEXT:    v_mov_b32_e32 v4, s8
-; SI-NEXT:    v_mov_b32_e32 v2, s6
 ; SI-NEXT:    v_bfi_b32 v1, s12, v0, v1
+; SI-NEXT:    v_mov_b32_e32 v0, s11
+; SI-NEXT:    v_mov_b32_e32 v2, s19
+; SI-NEXT:    v_bfi_b32 v7, s12, v0, v2
+; SI-NEXT:    v_mov_b32_e32 v0, s9
+; SI-NEXT:    v_mov_b32_e32 v2, s17
+; SI-NEXT:    v_bfi_b32 v5, s12, v0, v2
+; SI-NEXT:    v_mov_b32_e32 v4, s8
+; SI-NEXT:    v_mov_b32_e32 v6, s10
 ; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    v_mov_b32_e32 v2, s6
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
 ; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; SI-NEXT:    s_endpgm
@@ -585,26 +576,26 @@ define amdgpu_kernel void @s_test_copysign_v4f64(ptr addrspace(1) %out, <4 x dou
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v0, s7
 ; VI-NEXT:    v_mov_b32_e32 v1, s15
-; VI-NEXT:    v_mov_b32_e32 v4, s11
-; VI-NEXT:    v_mov_b32_e32 v5, s19
+; VI-NEXT:    v_mov_b32_e32 v2, s5
 ; VI-NEXT:    v_bfi_b32 v3, s2, v0, v1
-; VI-NEXT:    v_mov_b32_e32 v0, s5
-; VI-NEXT:    v_mov_b32_e32 v1, s13
-; VI-NEXT:    v_bfi_b32 v7, s2, v4, v5
-; VI-NEXT:    v_mov_b32_e32 v4, s9
-; VI-NEXT:    v_mov_b32_e32 v5, s17
-; VI-NEXT:    v_bfi_b32 v1, s2, v0, v1
-; VI-NEXT:    v_bfi_b32 v5, s2, v4, v5
+; VI-NEXT:    v_mov_b32_e32 v0, s13
+; VI-NEXT:    v_bfi_b32 v1, s2, v2, v0
+; VI-NEXT:    v_mov_b32_e32 v0, s11
+; VI-NEXT:    v_mov_b32_e32 v2, s19
+; VI-NEXT:    v_bfi_b32 v7, s2, v0, v2
+; VI-NEXT:    v_mov_b32_e32 v0, s9
+; VI-NEXT:    v_mov_b32_e32 v2, s17
+; VI-NEXT:    v_bfi_b32 v5, s2, v0, v2
 ; VI-NEXT:    s_add_u32 s2, s0, 16
 ; VI-NEXT:    s_addc_u32 s3, s1, 0
 ; VI-NEXT:    v_mov_b32_e32 v9, s3
-; VI-NEXT:    v_mov_b32_e32 v6, s10
 ; VI-NEXT:    v_mov_b32_e32 v4, s8
+; VI-NEXT:    v_mov_b32_e32 v6, s10
 ; VI-NEXT:    v_mov_b32_e32 v8, s2
 ; VI-NEXT:    flat_store_dwordx4 v[8:9], v[4:7]
-; VI-NEXT:    v_mov_b32_e32 v2, s6
-; VI-NEXT:    v_mov_b32_e32 v5, s1
 ; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v5, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s6
 ; VI-NEXT:    v_mov_b32_e32 v4, s0
 ; VI-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; VI-NEXT:    s_endpgm

diff  --git a/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll b/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll
index 1aed1de7cf08..6b013175ff61 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll
@@ -7,33 +7,32 @@ define amdgpu_kernel void @round_f64(ptr addrspace(1) %out, double %x) #0 {
 ; SI:       ; %bb.0:
 ; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
 ; SI-NEXT:    s_mov_b32 s6, -1
-; SI-NEXT:    s_mov_b32 s9, 0xfffff
-; SI-NEXT:    s_mov_b32 s8, s6
-; SI-NEXT:    v_mov_b32_e32 v2, 0x3ff00000
+; SI-NEXT:    s_mov_b32 s5, 0xfffff
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_mov_b32 s4, s0
-; SI-NEXT:    s_bfe_u32 s0, s3, 0xb0014
-; SI-NEXT:    s_addk_i32 s0, 0xfc01
-; SI-NEXT:    s_lshr_b64 s[8:9], s[8:9], s0
-; SI-NEXT:    s_andn2_b64 s[8:9], s[2:3], s[8:9]
-; SI-NEXT:    s_and_b32 s5, s3, 0x80000000
-; SI-NEXT:    s_cmp_lt_i32 s0, 0
-; SI-NEXT:    s_cselect_b32 s8, 0, s8
-; SI-NEXT:    s_cselect_b32 s5, s5, s9
-; SI-NEXT:    s_cmp_gt_i32 s0, 51
-; SI-NEXT:    s_cselect_b32 s8, s2, s8
+; SI-NEXT:    s_bfe_u32 s8, s3, 0xb0014
+; SI-NEXT:    s_addk_i32 s8, 0xfc01
+; SI-NEXT:    s_lshr_b64 s[4:5], s[4:5], s8
+; SI-NEXT:    s_andn2_b64 s[4:5], s[2:3], s[4:5]
+; SI-NEXT:    s_and_b32 s10, s3, 0x80000000
+; SI-NEXT:    s_cmp_lt_i32 s8, 0
+; SI-NEXT:    s_cselect_b32 s4, 0, s4
+; SI-NEXT:    s_cselect_b32 s5, s10, s5
+; SI-NEXT:    s_cmp_gt_i32 s8, 51
+; SI-NEXT:    s_cselect_b32 s8, s2, s4
 ; SI-NEXT:    s_cselect_b32 s9, s3, s5
 ; SI-NEXT:    v_mov_b32_e32 v0, s8
 ; SI-NEXT:    v_mov_b32_e32 v1, s9
 ; SI-NEXT:    v_add_f64 v[0:1], s[2:3], -v[0:1]
-; SI-NEXT:    s_brev_b32 s0, -2
-; SI-NEXT:    v_mov_b32_e32 v3, s3
-; SI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[0:1]|, 0.5
-; SI-NEXT:    v_bfi_b32 v2, s0, v2, v3
-; SI-NEXT:    v_cndmask_b32_e32 v1, 0, v2, vcc
+; SI-NEXT:    s_mov_b32 s4, s0
+; SI-NEXT:    v_cmp_ge_f64_e64 s[2:3], |v[0:1]|, 0.5
+; SI-NEXT:    s_or_b32 s0, s10, 0x3ff00000
+; SI-NEXT:    s_and_b64 s[2:3], s[2:3], exec
+; SI-NEXT:    s_cselect_b32 s0, s0, 0
 ; SI-NEXT:    v_mov_b32_e32 v0, 0
+; SI-NEXT:    v_mov_b32_e32 v1, s0
 ; SI-NEXT:    v_add_f64 v[0:1], s[8:9], v[0:1]
-; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_mov_b32 s5, s1
 ; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
@@ -41,20 +40,19 @@ define amdgpu_kernel void @round_f64(ptr addrspace(1) %out, double %x) #0 {
 ; CI-LABEL: round_f64:
 ; CI:       ; %bb.0:
 ; CI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
-; CI-NEXT:    s_brev_b32 s5, -2
-; CI-NEXT:    v_mov_b32_e32 v4, 0x3ff00000
+; CI-NEXT:    s_mov_b32 s8, 0
 ; CI-NEXT:    s_mov_b32 s7, 0xf000
 ; CI-NEXT:    s_mov_b32 s6, -1
 ; CI-NEXT:    s_waitcnt lgkmcnt(0)
 ; CI-NEXT:    v_trunc_f64_e32 v[0:1], s[2:3]
-; CI-NEXT:    v_mov_b32_e32 v5, s3
-; CI-NEXT:    v_add_f64 v[2:3], s[2:3], -v[0:1]
-; CI-NEXT:    v_bfi_b32 v4, s5, v4, v5
-; CI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[2:3]|, 0.5
-; CI-NEXT:    v_mov_b32_e32 v2, 0
-; CI-NEXT:    v_cndmask_b32_e32 v3, 0, v4, vcc
-; CI-NEXT:    v_add_f64 v[0:1], v[0:1], v[2:3]
 ; CI-NEXT:    s_mov_b32 s4, s0
+; CI-NEXT:    v_add_f64 v[2:3], s[2:3], -v[0:1]
+; CI-NEXT:    s_and_b32 s0, s3, 0x80000000
+; CI-NEXT:    v_cmp_ge_f64_e64 s[2:3], |v[2:3]|, 0.5
+; CI-NEXT:    s_or_b32 s0, s0, 0x3ff00000
+; CI-NEXT:    s_and_b64 s[2:3], s[2:3], exec
+; CI-NEXT:    s_cselect_b32 s9, s0, 0
+; CI-NEXT:    v_add_f64 v[0:1], v[0:1], s[8:9]
 ; CI-NEXT:    s_mov_b32 s5, s1
 ; CI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; CI-NEXT:    s_endpgm
@@ -77,8 +75,6 @@ define amdgpu_kernel void @v_round_f64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; SI-NEXT:    s_movk_i32 s4, 0xfc01
 ; SI-NEXT:    s_mov_b32 s2, -1
 ; SI-NEXT:    s_mov_b32 s3, 0xfffff
-; SI-NEXT:    s_brev_b32 s5, -2
-; SI-NEXT:    v_mov_b32_e32 v8, 0x3ff00000
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_bfe_u32 v4, v3, 20, 11
 ; SI-NEXT:    v_add_i32_e32 v6, vcc, s4, v4
@@ -94,13 +90,13 @@ define amdgpu_kernel void @v_round_f64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; SI-NEXT:    v_cmp_lt_i32_e32 vcc, 51, v6
 ; SI-NEXT:    v_cndmask_b32_e32 v5, v5, v3, vcc
 ; SI-NEXT:    v_cndmask_b32_e32 v4, v4, v2, vcc
-; SI-NEXT:    v_add_f64 v[6:7], v[2:3], -v[4:5]
-; SI-NEXT:    v_bfi_b32 v2, s5, v8, v3
-; SI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[6:7]|, 0.5
-; SI-NEXT:    s_mov_b64 s[2:3], s[6:7]
-; SI-NEXT:    v_cndmask_b32_e32 v3, 0, v2, vcc
-; SI-NEXT:    v_mov_b32_e32 v2, 0
+; SI-NEXT:    v_add_f64 v[2:3], v[2:3], -v[4:5]
+; SI-NEXT:    v_or_b32_e32 v6, 0x3ff00000, v7
+; SI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[2:3]|, 0.5
+; SI-NEXT:    v_mov_b32_e32 v2, v1
+; SI-NEXT:    v_cndmask_b32_e32 v3, 0, v6, vcc
 ; SI-NEXT:    v_add_f64 v[2:3], v[4:5], v[2:3]
+; SI-NEXT:    s_mov_b64 s[2:3], s[6:7]
 ; SI-NEXT:    buffer_store_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64
 ; SI-NEXT:    s_endpgm
 ;
@@ -114,16 +110,15 @@ define amdgpu_kernel void @v_round_f64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; CI-NEXT:    s_waitcnt lgkmcnt(0)
 ; CI-NEXT:    s_mov_b64 s[4:5], s[2:3]
 ; CI-NEXT:    buffer_load_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64
-; CI-NEXT:    s_brev_b32 s2, -2
-; CI-NEXT:    v_mov_b32_e32 v8, 0x3ff00000
+; CI-NEXT:    s_mov_b64 s[2:3], s[6:7]
 ; CI-NEXT:    s_waitcnt vmcnt(0)
 ; CI-NEXT:    v_trunc_f64_e32 v[4:5], v[2:3]
-; CI-NEXT:    v_add_f64 v[6:7], v[2:3], -v[4:5]
-; CI-NEXT:    v_bfi_b32 v2, s2, v8, v3
-; CI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[6:7]|, 0.5
-; CI-NEXT:    s_mov_b64 s[2:3], s[6:7]
-; CI-NEXT:    v_cndmask_b32_e32 v3, 0, v2, vcc
-; CI-NEXT:    v_mov_b32_e32 v2, 0
+; CI-NEXT:    v_and_b32_e32 v6, 0x80000000, v3
+; CI-NEXT:    v_add_f64 v[2:3], v[2:3], -v[4:5]
+; CI-NEXT:    v_or_b32_e32 v6, 0x3ff00000, v6
+; CI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[2:3]|, 0.5
+; CI-NEXT:    v_mov_b32_e32 v2, v1
+; CI-NEXT:    v_cndmask_b32_e32 v3, 0, v6, vcc
 ; CI-NEXT:    v_add_f64 v[2:3], v[4:5], v[2:3]
 ; CI-NEXT:    buffer_store_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64
 ; CI-NEXT:    s_endpgm
@@ -140,16 +135,16 @@ define amdgpu_kernel void @round_v2f64(ptr addrspace(1) %out, <2 x double> %in)
 ; SI-LABEL: round_v2f64:
 ; SI:       ; %bb.0:
 ; SI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0xd
-; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
 ; SI-NEXT:    s_mov_b32 s2, -1
 ; SI-NEXT:    s_mov_b32 s9, 0xfffff
 ; SI-NEXT:    s_mov_b32 s8, s2
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    s_bfe_u32 s3, s7, 0xb0014
 ; SI-NEXT:    s_addk_i32 s3, 0xfc01
 ; SI-NEXT:    s_lshr_b64 s[10:11], s[8:9], s3
-; SI-NEXT:    s_andn2_b64 s[10:11], s[6:7], s[10:11]
 ; SI-NEXT:    s_and_b32 s12, s7, 0x80000000
+; SI-NEXT:    s_andn2_b64 s[10:11], s[6:7], s[10:11]
 ; SI-NEXT:    s_cmp_lt_i32 s3, 0
 ; SI-NEXT:    s_cselect_b32 s10, 0, s10
 ; SI-NEXT:    s_cselect_b32 s11, s12, s11
@@ -159,33 +154,32 @@ define amdgpu_kernel void @round_v2f64(ptr addrspace(1) %out, <2 x double> %in)
 ; SI-NEXT:    v_mov_b32_e32 v0, s10
 ; SI-NEXT:    v_mov_b32_e32 v1, s11
 ; SI-NEXT:    v_add_f64 v[0:1], s[6:7], -v[0:1]
-; SI-NEXT:    s_brev_b32 s3, -2
-; SI-NEXT:    v_mov_b32_e32 v4, 0x3ff00000
-; SI-NEXT:    v_mov_b32_e32 v2, s7
-; SI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[0:1]|, 0.5
-; SI-NEXT:    v_bfi_b32 v2, s3, v4, v2
-; SI-NEXT:    v_cndmask_b32_e32 v1, 0, v2, vcc
+; SI-NEXT:    s_or_b32 s3, s12, 0x3ff00000
+; SI-NEXT:    v_cmp_ge_f64_e64 s[6:7], |v[0:1]|, 0.5
 ; SI-NEXT:    v_mov_b32_e32 v0, 0
-; SI-NEXT:    s_bfe_u32 s6, s5, 0xb0014
-; SI-NEXT:    v_add_f64 v[2:3], s[10:11], v[0:1]
-; SI-NEXT:    s_add_i32 s10, s6, 0xfffffc01
-; SI-NEXT:    s_lshr_b64 s[6:7], s[8:9], s10
+; SI-NEXT:    s_and_b64 s[6:7], s[6:7], exec
+; SI-NEXT:    s_cselect_b32 s3, s3, 0
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    s_bfe_u32 s3, s5, 0xb0014
+; SI-NEXT:    s_addk_i32 s3, 0xfc01
+; SI-NEXT:    s_lshr_b64 s[6:7], s[8:9], s3
 ; SI-NEXT:    s_andn2_b64 s[6:7], s[4:5], s[6:7]
 ; SI-NEXT:    s_and_b32 s8, s5, 0x80000000
-; SI-NEXT:    s_cmp_lt_i32 s10, 0
+; SI-NEXT:    s_cmp_lt_i32 s3, 0
 ; SI-NEXT:    s_cselect_b32 s6, 0, s6
 ; SI-NEXT:    s_cselect_b32 s7, s8, s7
-; SI-NEXT:    s_cmp_gt_i32 s10, 51
+; SI-NEXT:    s_cmp_gt_i32 s3, 51
 ; SI-NEXT:    s_cselect_b32 s6, s4, s6
 ; SI-NEXT:    s_cselect_b32 s7, s5, s7
-; SI-NEXT:    v_mov_b32_e32 v0, s6
-; SI-NEXT:    v_mov_b32_e32 v1, s7
-; SI-NEXT:    v_add_f64 v[0:1], s[4:5], -v[0:1]
-; SI-NEXT:    v_mov_b32_e32 v5, s5
-; SI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[0:1]|, 0.5
-; SI-NEXT:    v_bfi_b32 v4, s3, v4, v5
-; SI-NEXT:    v_cndmask_b32_e32 v1, 0, v4, vcc
-; SI-NEXT:    v_mov_b32_e32 v0, 0
+; SI-NEXT:    v_mov_b32_e32 v2, s6
+; SI-NEXT:    v_mov_b32_e32 v3, s7
+; SI-NEXT:    v_add_f64 v[4:5], s[4:5], -v[2:3]
+; SI-NEXT:    s_or_b32 s3, s8, 0x3ff00000
+; SI-NEXT:    v_cmp_ge_f64_e64 s[4:5], |v[4:5]|, 0.5
+; SI-NEXT:    v_add_f64 v[2:3], s[10:11], v[0:1]
+; SI-NEXT:    s_and_b64 s[4:5], s[4:5], exec
+; SI-NEXT:    s_cselect_b32 s3, s3, 0
+; SI-NEXT:    v_mov_b32_e32 v1, s3
 ; SI-NEXT:    v_add_f64 v[0:1], s[6:7], v[0:1]
 ; SI-NEXT:    s_mov_b32 s3, 0xf000
 ; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
@@ -195,26 +189,25 @@ define amdgpu_kernel void @round_v2f64(ptr addrspace(1) %out, <2 x double> %in)
 ; CI:       ; %bb.0:
 ; CI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0xd
 ; CI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
-; CI-NEXT:    s_brev_b32 s2, -2
-; CI-NEXT:    v_mov_b32_e32 v6, 0x3ff00000
+; CI-NEXT:    s_mov_b32 s8, 0
 ; CI-NEXT:    s_mov_b32 s3, 0xf000
 ; CI-NEXT:    s_waitcnt lgkmcnt(0)
 ; CI-NEXT:    v_trunc_f64_e32 v[0:1], s[6:7]
-; CI-NEXT:    v_mov_b32_e32 v4, s7
-; CI-NEXT:    v_add_f64 v[2:3], s[6:7], -v[0:1]
-; CI-NEXT:    v_bfi_b32 v4, s2, v6, v4
-; CI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[2:3]|, 0.5
-; CI-NEXT:    v_mov_b32_e32 v2, 0
-; CI-NEXT:    v_cndmask_b32_e32 v3, 0, v4, vcc
 ; CI-NEXT:    v_trunc_f64_e32 v[4:5], s[4:5]
-; CI-NEXT:    v_add_f64 v[2:3], v[0:1], v[2:3]
-; CI-NEXT:    v_add_f64 v[0:1], s[4:5], -v[4:5]
-; CI-NEXT:    v_mov_b32_e32 v7, s5
-; CI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[0:1]|, 0.5
-; CI-NEXT:    v_bfi_b32 v6, s2, v6, v7
-; CI-NEXT:    v_cndmask_b32_e32 v1, 0, v6, vcc
-; CI-NEXT:    v_mov_b32_e32 v0, 0
-; CI-NEXT:    v_add_f64 v[0:1], v[4:5], v[0:1]
+; CI-NEXT:    v_add_f64 v[2:3], s[6:7], -v[0:1]
+; CI-NEXT:    s_and_b32 s2, s7, 0x80000000
+; CI-NEXT:    v_cmp_ge_f64_e64 s[6:7], |v[2:3]|, 0.5
+; CI-NEXT:    s_or_b32 s2, s2, 0x3ff00000
+; CI-NEXT:    v_add_f64 v[6:7], s[4:5], -v[4:5]
+; CI-NEXT:    s_and_b64 s[6:7], s[6:7], exec
+; CI-NEXT:    s_cselect_b32 s9, s2, 0
+; CI-NEXT:    v_cmp_ge_f64_e64 s[6:7], |v[6:7]|, 0.5
+; CI-NEXT:    s_and_b32 s2, s5, 0x80000000
+; CI-NEXT:    s_or_b32 s2, s2, 0x3ff00000
+; CI-NEXT:    s_and_b64 s[4:5], s[6:7], exec
+; CI-NEXT:    v_add_f64 v[2:3], v[0:1], s[8:9]
+; CI-NEXT:    s_cselect_b32 s9, s2, 0
+; CI-NEXT:    v_add_f64 v[0:1], v[4:5], s[8:9]
 ; CI-NEXT:    s_mov_b32 s2, -1
 ; CI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; CI-NEXT:    s_endpgm
@@ -229,92 +222,92 @@ define amdgpu_kernel void @round_v4f64(ptr addrspace(1) %out, <4 x double> %in)
 ; SI-NEXT:    s_load_dwordx8 s[4:11], s[0:1], 0x11
 ; SI-NEXT:    s_mov_b32 s2, -1
 ; SI-NEXT:    s_mov_b32 s13, 0xfffff
-; SI-NEXT:    v_mov_b32_e32 v8, 0x3ff00000
-; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_bfe_u32 s12, s7, 0xb0014
-; SI-NEXT:    s_add_i32 s16, s12, 0xfffffc01
 ; SI-NEXT:    s_mov_b32 s12, s2
-; SI-NEXT:    s_lshr_b64 s[14:15], s[12:13], s16
+; SI-NEXT:    v_mov_b32_e32 v4, 0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_bfe_u32 s3, s7, 0xb0014
+; SI-NEXT:    s_addk_i32 s3, 0xfc01
+; SI-NEXT:    s_lshr_b64 s[14:15], s[12:13], s3
+; SI-NEXT:    s_and_b32 s16, s7, 0x80000000
 ; SI-NEXT:    s_andn2_b64 s[14:15], s[6:7], s[14:15]
-; SI-NEXT:    s_and_b32 s17, s7, 0x80000000
-; SI-NEXT:    s_cmp_lt_i32 s16, 0
+; SI-NEXT:    s_cmp_lt_i32 s3, 0
 ; SI-NEXT:    s_cselect_b32 s14, 0, s14
-; SI-NEXT:    s_cselect_b32 s15, s17, s15
-; SI-NEXT:    s_cmp_gt_i32 s16, 51
+; SI-NEXT:    s_cselect_b32 s15, s16, s15
+; SI-NEXT:    s_cmp_gt_i32 s3, 51
 ; SI-NEXT:    s_cselect_b32 s14, s6, s14
 ; SI-NEXT:    s_cselect_b32 s15, s7, s15
 ; SI-NEXT:    v_mov_b32_e32 v0, s14
 ; SI-NEXT:    v_mov_b32_e32 v1, s15
 ; SI-NEXT:    v_add_f64 v[0:1], s[6:7], -v[0:1]
-; SI-NEXT:    s_brev_b32 s16, -2
-; SI-NEXT:    v_mov_b32_e32 v2, s7
-; SI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[0:1]|, 0.5
-; SI-NEXT:    v_bfi_b32 v2, s16, v8, v2
-; SI-NEXT:    v_cndmask_b32_e32 v1, 0, v2, vcc
-; SI-NEXT:    v_mov_b32_e32 v0, 0
-; SI-NEXT:    s_bfe_u32 s6, s5, 0xb0014
-; SI-NEXT:    v_add_f64 v[2:3], s[14:15], v[0:1]
-; SI-NEXT:    s_add_i32 s14, s6, 0xfffffc01
-; SI-NEXT:    s_lshr_b64 s[6:7], s[12:13], s14
+; SI-NEXT:    s_or_b32 s3, s16, 0x3ff00000
+; SI-NEXT:    v_cmp_ge_f64_e64 s[6:7], |v[0:1]|, 0.5
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_and_b64 s[6:7], s[6:7], exec
+; SI-NEXT:    s_cselect_b32 s3, s3, 0
+; SI-NEXT:    v_mov_b32_e32 v5, s3
+; SI-NEXT:    s_bfe_u32 s3, s5, 0xb0014
+; SI-NEXT:    s_addk_i32 s3, 0xfc01
+; SI-NEXT:    s_lshr_b64 s[6:7], s[12:13], s3
 ; SI-NEXT:    s_andn2_b64 s[6:7], s[4:5], s[6:7]
-; SI-NEXT:    s_and_b32 s15, s5, 0x80000000
-; SI-NEXT:    s_cmp_lt_i32 s14, 0
+; SI-NEXT:    s_and_b32 s16, s5, 0x80000000
+; SI-NEXT:    s_cmp_lt_i32 s3, 0
 ; SI-NEXT:    s_cselect_b32 s6, 0, s6
-; SI-NEXT:    s_cselect_b32 s7, s15, s7
-; SI-NEXT:    s_cmp_gt_i32 s14, 51
+; SI-NEXT:    s_cselect_b32 s7, s16, s7
+; SI-NEXT:    s_cmp_gt_i32 s3, 51
 ; SI-NEXT:    s_cselect_b32 s6, s4, s6
 ; SI-NEXT:    s_cselect_b32 s7, s5, s7
 ; SI-NEXT:    v_mov_b32_e32 v0, s6
 ; SI-NEXT:    v_mov_b32_e32 v1, s7
 ; SI-NEXT:    v_add_f64 v[0:1], s[4:5], -v[0:1]
-; SI-NEXT:    s_bfe_u32 s4, s11, 0xb0014
-; SI-NEXT:    s_add_i32 s14, s4, 0xfffffc01
-; SI-NEXT:    v_mov_b32_e32 v4, s5
-; SI-NEXT:    s_lshr_b64 s[4:5], s[12:13], s14
+; SI-NEXT:    s_or_b32 s3, s16, 0x3ff00000
+; SI-NEXT:    v_cmp_ge_f64_e64 s[4:5], |v[0:1]|, 0.5
+; SI-NEXT:    v_add_f64 v[2:3], s[14:15], v[4:5]
+; SI-NEXT:    s_and_b64 s[4:5], s[4:5], exec
+; SI-NEXT:    s_cselect_b32 s3, s3, 0
+; SI-NEXT:    v_mov_b32_e32 v5, s3
+; SI-NEXT:    s_bfe_u32 s3, s11, 0xb0014
+; SI-NEXT:    s_addk_i32 s3, 0xfc01
+; SI-NEXT:    s_lshr_b64 s[4:5], s[12:13], s3
 ; SI-NEXT:    s_andn2_b64 s[4:5], s[10:11], s[4:5]
-; SI-NEXT:    s_and_b32 s15, s11, 0x80000000
-; SI-NEXT:    s_cmp_lt_i32 s14, 0
-; SI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[0:1]|, 0.5
+; SI-NEXT:    s_and_b32 s14, s11, 0x80000000
+; SI-NEXT:    s_cmp_lt_i32 s3, 0
 ; SI-NEXT:    s_cselect_b32 s4, 0, s4
-; SI-NEXT:    s_cselect_b32 s5, s15, s5
-; SI-NEXT:    s_cmp_gt_i32 s14, 51
-; SI-NEXT:    v_bfi_b32 v4, s16, v8, v4
+; SI-NEXT:    s_cselect_b32 s5, s14, s5
+; SI-NEXT:    s_cmp_gt_i32 s3, 51
 ; SI-NEXT:    s_cselect_b32 s4, s10, s4
-; SI-NEXT:    v_cndmask_b32_e32 v1, 0, v4, vcc
 ; SI-NEXT:    s_cselect_b32 s5, s11, s5
-; SI-NEXT:    v_mov_b32_e32 v4, s4
-; SI-NEXT:    v_mov_b32_e32 v5, s5
-; SI-NEXT:    v_add_f64 v[4:5], s[10:11], -v[4:5]
-; SI-NEXT:    v_mov_b32_e32 v6, s11
-; SI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[4:5]|, 0.5
-; SI-NEXT:    v_bfi_b32 v6, s16, v8, v6
-; SI-NEXT:    v_cndmask_b32_e32 v5, 0, v6, vcc
-; SI-NEXT:    v_mov_b32_e32 v4, 0
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    v_mov_b32_e32 v1, s5
+; SI-NEXT:    v_add_f64 v[6:7], s[10:11], -v[0:1]
+; SI-NEXT:    v_add_f64 v[0:1], s[6:7], v[4:5]
+; SI-NEXT:    v_cmp_ge_f64_e64 s[6:7], |v[6:7]|, 0.5
+; SI-NEXT:    s_or_b32 s3, s14, 0x3ff00000
+; SI-NEXT:    s_and_b64 s[6:7], s[6:7], exec
+; SI-NEXT:    s_cselect_b32 s3, s3, 0
+; SI-NEXT:    v_mov_b32_e32 v5, s3
+; SI-NEXT:    s_bfe_u32 s3, s9, 0xb0014
+; SI-NEXT:    s_addk_i32 s3, 0xfc01
+; SI-NEXT:    s_lshr_b64 s[6:7], s[12:13], s3
+; SI-NEXT:    s_andn2_b64 s[6:7], s[8:9], s[6:7]
+; SI-NEXT:    s_and_b32 s10, s9, 0x80000000
+; SI-NEXT:    s_cmp_lt_i32 s3, 0
+; SI-NEXT:    s_cselect_b32 s6, 0, s6
+; SI-NEXT:    s_cselect_b32 s7, s10, s7
+; SI-NEXT:    s_cmp_gt_i32 s3, 51
+; SI-NEXT:    s_cselect_b32 s6, s8, s6
+; SI-NEXT:    s_cselect_b32 s7, s9, s7
+; SI-NEXT:    v_mov_b32_e32 v6, s6
+; SI-NEXT:    v_mov_b32_e32 v7, s7
+; SI-NEXT:    v_add_f64 v[8:9], s[8:9], -v[6:7]
 ; SI-NEXT:    v_add_f64 v[6:7], s[4:5], v[4:5]
-; SI-NEXT:    s_bfe_u32 s4, s9, 0xb0014
-; SI-NEXT:    s_add_i32 s10, s4, 0xfffffc01
-; SI-NEXT:    s_lshr_b64 s[4:5], s[12:13], s10
-; SI-NEXT:    s_andn2_b64 s[4:5], s[8:9], s[4:5]
-; SI-NEXT:    s_and_b32 s11, s9, 0x80000000
-; SI-NEXT:    s_cmp_lt_i32 s10, 0
-; SI-NEXT:    s_cselect_b32 s4, 0, s4
-; SI-NEXT:    s_cselect_b32 s5, s11, s5
-; SI-NEXT:    s_cmp_gt_i32 s10, 51
-; SI-NEXT:    s_cselect_b32 s4, s8, s4
-; SI-NEXT:    s_cselect_b32 s5, s9, s5
-; SI-NEXT:    v_mov_b32_e32 v4, s4
-; SI-NEXT:    v_mov_b32_e32 v5, s5
-; SI-NEXT:    v_add_f64 v[4:5], s[8:9], -v[4:5]
-; SI-NEXT:    v_mov_b32_e32 v9, s9
-; SI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[4:5]|, 0.5
-; SI-NEXT:    v_bfi_b32 v8, s16, v8, v9
-; SI-NEXT:    v_cndmask_b32_e32 v5, 0, v8, vcc
-; SI-NEXT:    v_mov_b32_e32 v4, 0
-; SI-NEXT:    v_mov_b32_e32 v0, 0
-; SI-NEXT:    v_add_f64 v[4:5], s[4:5], v[4:5]
+; SI-NEXT:    v_cmp_ge_f64_e64 s[4:5], |v[8:9]|, 0.5
+; SI-NEXT:    s_or_b32 s3, s10, 0x3ff00000
+; SI-NEXT:    s_and_b64 s[4:5], s[4:5], exec
+; SI-NEXT:    s_cselect_b32 s3, s3, 0
+; SI-NEXT:    v_mov_b32_e32 v5, s3
+; SI-NEXT:    v_add_f64 v[4:5], s[6:7], v[4:5]
 ; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    v_add_f64 v[0:1], s[6:7], v[0:1]
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
 ; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; SI-NEXT:    s_endpgm
@@ -322,44 +315,43 @@ define amdgpu_kernel void @round_v4f64(ptr addrspace(1) %out, <4 x double> %in)
 ; CI-LABEL: round_v4f64:
 ; CI:       ; %bb.0:
 ; CI-NEXT:    s_load_dwordx8 s[4:11], s[0:1], 0x11
-; CI-NEXT:    s_brev_b32 s12, -2
-; CI-NEXT:    v_mov_b32_e32 v12, 0x3ff00000
+; CI-NEXT:    s_mov_b32 s12, 0
 ; CI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
 ; CI-NEXT:    s_mov_b32 s3, 0xf000
 ; CI-NEXT:    s_waitcnt lgkmcnt(0)
 ; CI-NEXT:    v_trunc_f64_e32 v[0:1], s[6:7]
-; CI-NEXT:    v_mov_b32_e32 v4, s7
+; CI-NEXT:    v_trunc_f64_e32 v[4:5], s[4:5]
 ; CI-NEXT:    v_add_f64 v[2:3], s[6:7], -v[0:1]
-; CI-NEXT:    v_bfi_b32 v4, s12, v12, v4
-; CI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[2:3]|, 0.5
-; CI-NEXT:    v_trunc_f64_e32 v[8:9], s[4:5]
-; CI-NEXT:    v_cndmask_b32_e32 v3, 0, v4, vcc
-; CI-NEXT:    v_mov_b32_e32 v2, 0
-; CI-NEXT:    v_add_f64 v[2:3], v[0:1], v[2:3]
-; CI-NEXT:    v_add_f64 v[0:1], s[4:5], -v[8:9]
-; CI-NEXT:    v_mov_b32_e32 v4, s5
-; CI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[0:1]|, 0.5
-; CI-NEXT:    v_bfi_b32 v4, s12, v12, v4
-; CI-NEXT:    v_cndmask_b32_e32 v1, 0, v4, vcc
-; CI-NEXT:    v_trunc_f64_e32 v[4:5], s[10:11]
-; CI-NEXT:    v_mov_b32_e32 v10, s11
-; CI-NEXT:    v_add_f64 v[6:7], s[10:11], -v[4:5]
-; CI-NEXT:    v_bfi_b32 v10, s12, v12, v10
-; CI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[6:7]|, 0.5
-; CI-NEXT:    v_mov_b32_e32 v6, 0
-; CI-NEXT:    v_cndmask_b32_e32 v7, 0, v10, vcc
-; CI-NEXT:    v_trunc_f64_e32 v[10:11], s[8:9]
-; CI-NEXT:    v_add_f64 v[6:7], v[4:5], v[6:7]
-; CI-NEXT:    v_add_f64 v[4:5], s[8:9], -v[10:11]
-; CI-NEXT:    v_mov_b32_e32 v13, s9
-; CI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[4:5]|, 0.5
-; CI-NEXT:    v_bfi_b32 v12, s12, v12, v13
-; CI-NEXT:    v_cndmask_b32_e32 v5, 0, v12, vcc
-; CI-NEXT:    v_mov_b32_e32 v4, 0
-; CI-NEXT:    v_mov_b32_e32 v0, 0
-; CI-NEXT:    v_add_f64 v[4:5], v[10:11], v[4:5]
+; CI-NEXT:    s_and_b32 s2, s7, 0x80000000
+; CI-NEXT:    v_cmp_ge_f64_e64 s[6:7], |v[2:3]|, 0.5
+; CI-NEXT:    s_or_b32 s2, s2, 0x3ff00000
+; CI-NEXT:    v_add_f64 v[6:7], s[4:5], -v[4:5]
+; CI-NEXT:    s_and_b64 s[6:7], s[6:7], exec
+; CI-NEXT:    s_cselect_b32 s13, s2, 0
+; CI-NEXT:    v_cmp_ge_f64_e64 s[6:7], |v[6:7]|, 0.5
+; CI-NEXT:    s_and_b32 s2, s5, 0x80000000
+; CI-NEXT:    s_or_b32 s2, s2, 0x3ff00000
+; CI-NEXT:    v_trunc_f64_e32 v[6:7], s[10:11]
+; CI-NEXT:    s_and_b64 s[4:5], s[6:7], exec
+; CI-NEXT:    v_add_f64 v[2:3], v[0:1], s[12:13]
+; CI-NEXT:    s_cselect_b32 s13, s2, 0
+; CI-NEXT:    v_add_f64 v[8:9], s[10:11], -v[6:7]
+; CI-NEXT:    v_add_f64 v[0:1], v[4:5], s[12:13]
+; CI-NEXT:    v_trunc_f64_e32 v[4:5], s[8:9]
+; CI-NEXT:    v_cmp_ge_f64_e64 s[4:5], |v[8:9]|, 0.5
+; CI-NEXT:    s_and_b32 s2, s11, 0x80000000
+; CI-NEXT:    s_or_b32 s2, s2, 0x3ff00000
+; CI-NEXT:    v_add_f64 v[8:9], s[8:9], -v[4:5]
+; CI-NEXT:    s_and_b64 s[4:5], s[4:5], exec
+; CI-NEXT:    s_cselect_b32 s13, s2, 0
+; CI-NEXT:    v_cmp_ge_f64_e64 s[4:5], |v[8:9]|, 0.5
+; CI-NEXT:    s_and_b32 s2, s9, 0x80000000
+; CI-NEXT:    s_or_b32 s2, s2, 0x3ff00000
+; CI-NEXT:    s_and_b64 s[4:5], s[4:5], exec
+; CI-NEXT:    v_add_f64 v[6:7], v[6:7], s[12:13]
+; CI-NEXT:    s_cselect_b32 s13, s2, 0
+; CI-NEXT:    v_add_f64 v[4:5], v[4:5], s[12:13]
 ; CI-NEXT:    s_mov_b32 s2, -1
-; CI-NEXT:    v_add_f64 v[0:1], v[8:9], v[0:1]
 ; CI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
 ; CI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; CI-NEXT:    s_endpgm
@@ -374,174 +366,174 @@ define amdgpu_kernel void @round_v8f64(ptr addrspace(1) %out, <8 x double> %in)
 ; SI-NEXT:    s_load_dwordx16 s[4:19], s[0:1], 0x19
 ; SI-NEXT:    s_mov_b32 s2, -1
 ; SI-NEXT:    s_mov_b32 s21, 0xfffff
-; SI-NEXT:    v_mov_b32_e32 v8, 0x3ff00000
-; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_bfe_u32 s20, s7, 0xb0014
-; SI-NEXT:    s_add_i32 s24, s20, 0xfffffc01
 ; SI-NEXT:    s_mov_b32 s20, s2
-; SI-NEXT:    s_lshr_b64 s[22:23], s[20:21], s24
+; SI-NEXT:    v_mov_b32_e32 v8, 0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_bfe_u32 s3, s7, 0xb0014
+; SI-NEXT:    s_addk_i32 s3, 0xfc01
+; SI-NEXT:    s_lshr_b64 s[22:23], s[20:21], s3
+; SI-NEXT:    s_and_b32 s24, s7, 0x80000000
 ; SI-NEXT:    s_andn2_b64 s[22:23], s[6:7], s[22:23]
-; SI-NEXT:    s_and_b32 s25, s7, 0x80000000
-; SI-NEXT:    s_cmp_lt_i32 s24, 0
+; SI-NEXT:    s_cmp_lt_i32 s3, 0
 ; SI-NEXT:    s_cselect_b32 s22, 0, s22
-; SI-NEXT:    s_cselect_b32 s23, s25, s23
-; SI-NEXT:    s_cmp_gt_i32 s24, 51
+; SI-NEXT:    s_cselect_b32 s23, s24, s23
+; SI-NEXT:    s_cmp_gt_i32 s3, 51
 ; SI-NEXT:    s_cselect_b32 s22, s6, s22
 ; SI-NEXT:    s_cselect_b32 s23, s7, s23
 ; SI-NEXT:    v_mov_b32_e32 v0, s22
 ; SI-NEXT:    v_mov_b32_e32 v1, s23
 ; SI-NEXT:    v_add_f64 v[0:1], s[6:7], -v[0:1]
-; SI-NEXT:    s_brev_b32 s6, -2
-; SI-NEXT:    v_mov_b32_e32 v2, s7
-; SI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[0:1]|, 0.5
-; SI-NEXT:    v_bfi_b32 v2, s6, v8, v2
-; SI-NEXT:    s_bfe_u32 s7, s5, 0xb0014
-; SI-NEXT:    v_cndmask_b32_e32 v1, 0, v2, vcc
-; SI-NEXT:    v_mov_b32_e32 v0, 0
-; SI-NEXT:    s_addk_i32 s7, 0xfc01
-; SI-NEXT:    v_add_f64 v[2:3], s[22:23], v[0:1]
-; SI-NEXT:    s_lshr_b64 s[22:23], s[20:21], s7
-; SI-NEXT:    s_andn2_b64 s[22:23], s[4:5], s[22:23]
+; SI-NEXT:    s_or_b32 s3, s24, 0x3ff00000
+; SI-NEXT:    v_cmp_ge_f64_e64 s[6:7], |v[0:1]|, 0.5
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_and_b64 s[6:7], s[6:7], exec
+; SI-NEXT:    s_cselect_b32 s3, s3, 0
+; SI-NEXT:    v_mov_b32_e32 v9, s3
+; SI-NEXT:    s_bfe_u32 s3, s5, 0xb0014
+; SI-NEXT:    s_addk_i32 s3, 0xfc01
+; SI-NEXT:    s_lshr_b64 s[6:7], s[20:21], s3
+; SI-NEXT:    s_andn2_b64 s[6:7], s[4:5], s[6:7]
 ; SI-NEXT:    s_and_b32 s24, s5, 0x80000000
-; SI-NEXT:    s_cmp_lt_i32 s7, 0
-; SI-NEXT:    s_cselect_b32 s22, 0, s22
-; SI-NEXT:    s_cselect_b32 s23, s24, s23
-; SI-NEXT:    s_cmp_gt_i32 s7, 51
-; SI-NEXT:    s_cselect_b32 s22, s4, s22
-; SI-NEXT:    s_cselect_b32 s23, s5, s23
-; SI-NEXT:    v_mov_b32_e32 v0, s22
-; SI-NEXT:    v_mov_b32_e32 v1, s23
+; SI-NEXT:    s_cmp_lt_i32 s3, 0
+; SI-NEXT:    s_cselect_b32 s6, 0, s6
+; SI-NEXT:    s_cselect_b32 s7, s24, s7
+; SI-NEXT:    s_cmp_gt_i32 s3, 51
+; SI-NEXT:    s_cselect_b32 s6, s4, s6
+; SI-NEXT:    s_cselect_b32 s7, s5, s7
+; SI-NEXT:    v_mov_b32_e32 v0, s6
+; SI-NEXT:    v_mov_b32_e32 v1, s7
 ; SI-NEXT:    v_add_f64 v[0:1], s[4:5], -v[0:1]
-; SI-NEXT:    v_mov_b32_e32 v4, s5
-; SI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[0:1]|, 0.5
-; SI-NEXT:    s_bfe_u32 s4, s11, 0xb0014
-; SI-NEXT:    v_bfi_b32 v4, s6, v8, v4
-; SI-NEXT:    s_add_i32 s7, s4, 0xfffffc01
-; SI-NEXT:    v_cndmask_b32_e32 v1, 0, v4, vcc
-; SI-NEXT:    v_mov_b32_e32 v0, 0
-; SI-NEXT:    s_lshr_b64 s[4:5], s[20:21], s7
-; SI-NEXT:    v_add_f64 v[0:1], s[22:23], v[0:1]
+; SI-NEXT:    s_or_b32 s3, s24, 0x3ff00000
+; SI-NEXT:    v_cmp_ge_f64_e64 s[4:5], |v[0:1]|, 0.5
+; SI-NEXT:    v_add_f64 v[2:3], s[22:23], v[8:9]
+; SI-NEXT:    s_and_b64 s[4:5], s[4:5], exec
+; SI-NEXT:    s_cselect_b32 s3, s3, 0
+; SI-NEXT:    v_mov_b32_e32 v9, s3
+; SI-NEXT:    s_bfe_u32 s3, s11, 0xb0014
+; SI-NEXT:    s_addk_i32 s3, 0xfc01
+; SI-NEXT:    s_lshr_b64 s[4:5], s[20:21], s3
 ; SI-NEXT:    s_andn2_b64 s[4:5], s[10:11], s[4:5]
 ; SI-NEXT:    s_and_b32 s22, s11, 0x80000000
-; SI-NEXT:    s_cmp_lt_i32 s7, 0
+; SI-NEXT:    s_cmp_lt_i32 s3, 0
 ; SI-NEXT:    s_cselect_b32 s4, 0, s4
 ; SI-NEXT:    s_cselect_b32 s5, s22, s5
-; SI-NEXT:    s_cmp_gt_i32 s7, 51
+; SI-NEXT:    s_cmp_gt_i32 s3, 51
 ; SI-NEXT:    s_cselect_b32 s4, s10, s4
 ; SI-NEXT:    s_cselect_b32 s5, s11, s5
-; SI-NEXT:    v_mov_b32_e32 v4, s4
-; SI-NEXT:    v_mov_b32_e32 v5, s5
-; SI-NEXT:    v_add_f64 v[4:5], s[10:11], -v[4:5]
-; SI-NEXT:    v_mov_b32_e32 v6, s11
-; SI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[4:5]|, 0.5
-; SI-NEXT:    v_bfi_b32 v6, s6, v8, v6
-; SI-NEXT:    v_cndmask_b32_e32 v5, 0, v6, vcc
-; SI-NEXT:    v_mov_b32_e32 v4, 0
-; SI-NEXT:    v_add_f64 v[6:7], s[4:5], v[4:5]
-; SI-NEXT:    s_bfe_u32 s4, s9, 0xb0014
-; SI-NEXT:    s_add_i32 s7, s4, 0xfffffc01
-; SI-NEXT:    s_lshr_b64 s[4:5], s[20:21], s7
-; SI-NEXT:    s_andn2_b64 s[4:5], s[8:9], s[4:5]
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    v_mov_b32_e32 v1, s5
+; SI-NEXT:    v_add_f64 v[4:5], s[10:11], -v[0:1]
+; SI-NEXT:    v_add_f64 v[0:1], s[6:7], v[8:9]
+; SI-NEXT:    v_cmp_ge_f64_e64 s[6:7], |v[4:5]|, 0.5
+; SI-NEXT:    s_or_b32 s3, s22, 0x3ff00000
+; SI-NEXT:    s_and_b64 s[6:7], s[6:7], exec
+; SI-NEXT:    s_cselect_b32 s3, s3, 0
+; SI-NEXT:    v_mov_b32_e32 v9, s3
+; SI-NEXT:    s_bfe_u32 s3, s9, 0xb0014
+; SI-NEXT:    s_addk_i32 s3, 0xfc01
+; SI-NEXT:    s_lshr_b64 s[6:7], s[20:21], s3
+; SI-NEXT:    s_andn2_b64 s[6:7], s[8:9], s[6:7]
 ; SI-NEXT:    s_and_b32 s10, s9, 0x80000000
-; SI-NEXT:    s_cmp_lt_i32 s7, 0
-; SI-NEXT:    s_cselect_b32 s4, 0, s4
-; SI-NEXT:    s_cselect_b32 s5, s10, s5
-; SI-NEXT:    s_cmp_gt_i32 s7, 51
-; SI-NEXT:    s_cselect_b32 s4, s8, s4
-; SI-NEXT:    s_cselect_b32 s5, s9, s5
-; SI-NEXT:    v_mov_b32_e32 v4, s4
-; SI-NEXT:    v_mov_b32_e32 v5, s5
+; SI-NEXT:    s_cmp_lt_i32 s3, 0
+; SI-NEXT:    s_cselect_b32 s6, 0, s6
+; SI-NEXT:    s_cselect_b32 s7, s10, s7
+; SI-NEXT:    s_cmp_gt_i32 s3, 51
+; SI-NEXT:    s_cselect_b32 s6, s8, s6
+; SI-NEXT:    s_cselect_b32 s7, s9, s7
+; SI-NEXT:    v_mov_b32_e32 v4, s6
+; SI-NEXT:    v_mov_b32_e32 v5, s7
 ; SI-NEXT:    v_add_f64 v[4:5], s[8:9], -v[4:5]
-; SI-NEXT:    v_mov_b32_e32 v9, s9
-; SI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[4:5]|, 0.5
-; SI-NEXT:    v_bfi_b32 v9, s6, v8, v9
-; SI-NEXT:    v_cndmask_b32_e32 v5, 0, v9, vcc
-; SI-NEXT:    v_mov_b32_e32 v4, 0
-; SI-NEXT:    v_add_f64 v[4:5], s[4:5], v[4:5]
-; SI-NEXT:    s_bfe_u32 s4, s15, 0xb0014
-; SI-NEXT:    s_add_i32 s7, s4, 0xfffffc01
-; SI-NEXT:    s_lshr_b64 s[4:5], s[20:21], s7
+; SI-NEXT:    v_add_f64 v[6:7], s[4:5], v[8:9]
+; SI-NEXT:    v_cmp_ge_f64_e64 s[4:5], |v[4:5]|, 0.5
+; SI-NEXT:    s_or_b32 s3, s10, 0x3ff00000
+; SI-NEXT:    s_and_b64 s[4:5], s[4:5], exec
+; SI-NEXT:    s_cselect_b32 s3, s3, 0
+; SI-NEXT:    v_mov_b32_e32 v9, s3
+; SI-NEXT:    s_bfe_u32 s3, s15, 0xb0014
+; SI-NEXT:    s_addk_i32 s3, 0xfc01
+; SI-NEXT:    s_lshr_b64 s[4:5], s[20:21], s3
 ; SI-NEXT:    s_andn2_b64 s[4:5], s[14:15], s[4:5]
 ; SI-NEXT:    s_and_b32 s8, s15, 0x80000000
-; SI-NEXT:    s_cmp_lt_i32 s7, 0
+; SI-NEXT:    s_cmp_lt_i32 s3, 0
 ; SI-NEXT:    s_cselect_b32 s4, 0, s4
 ; SI-NEXT:    s_cselect_b32 s5, s8, s5
-; SI-NEXT:    s_cmp_gt_i32 s7, 51
-; SI-NEXT:    s_cselect_b32 s5, s15, s5
+; SI-NEXT:    s_cmp_gt_i32 s3, 51
 ; SI-NEXT:    s_cselect_b32 s4, s14, s4
-; SI-NEXT:    v_mov_b32_e32 v10, s5
-; SI-NEXT:    v_mov_b32_e32 v9, s4
-; SI-NEXT:    v_add_f64 v[9:10], s[14:15], -v[9:10]
-; SI-NEXT:    v_mov_b32_e32 v11, s15
-; SI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[9:10]|, 0.5
-; SI-NEXT:    v_bfi_b32 v11, s6, v8, v11
-; SI-NEXT:    v_cndmask_b32_e32 v10, 0, v11, vcc
-; SI-NEXT:    v_mov_b32_e32 v9, 0
-; SI-NEXT:    v_add_f64 v[10:11], s[4:5], v[9:10]
-; SI-NEXT:    s_bfe_u32 s4, s13, 0xb0014
-; SI-NEXT:    s_add_i32 s7, s4, 0xfffffc01
-; SI-NEXT:    s_lshr_b64 s[4:5], s[20:21], s7
-; SI-NEXT:    s_andn2_b64 s[4:5], s[12:13], s[4:5]
+; SI-NEXT:    s_cselect_b32 s5, s15, s5
+; SI-NEXT:    v_mov_b32_e32 v4, s4
+; SI-NEXT:    v_mov_b32_e32 v5, s5
+; SI-NEXT:    v_add_f64 v[10:11], s[14:15], -v[4:5]
+; SI-NEXT:    v_add_f64 v[4:5], s[6:7], v[8:9]
+; SI-NEXT:    v_cmp_ge_f64_e64 s[6:7], |v[10:11]|, 0.5
+; SI-NEXT:    s_or_b32 s3, s8, 0x3ff00000
+; SI-NEXT:    s_and_b64 s[6:7], s[6:7], exec
+; SI-NEXT:    s_cselect_b32 s3, s3, 0
+; SI-NEXT:    v_mov_b32_e32 v9, s3
+; SI-NEXT:    s_bfe_u32 s3, s13, 0xb0014
+; SI-NEXT:    s_addk_i32 s3, 0xfc01
+; SI-NEXT:    s_lshr_b64 s[6:7], s[20:21], s3
+; SI-NEXT:    s_andn2_b64 s[6:7], s[12:13], s[6:7]
 ; SI-NEXT:    s_and_b32 s8, s13, 0x80000000
-; SI-NEXT:    s_cmp_lt_i32 s7, 0
+; SI-NEXT:    s_cmp_lt_i32 s3, 0
+; SI-NEXT:    s_cselect_b32 s6, 0, s6
+; SI-NEXT:    s_cselect_b32 s7, s8, s7
+; SI-NEXT:    s_cmp_gt_i32 s3, 51
+; SI-NEXT:    s_cselect_b32 s7, s13, s7
+; SI-NEXT:    s_cselect_b32 s6, s12, s6
+; SI-NEXT:    v_mov_b32_e32 v11, s7
+; SI-NEXT:    v_mov_b32_e32 v10, s6
+; SI-NEXT:    v_add_f64 v[10:11], s[12:13], -v[10:11]
+; SI-NEXT:    v_add_f64 v[12:13], s[4:5], v[8:9]
+; SI-NEXT:    v_cmp_ge_f64_e64 s[4:5], |v[10:11]|, 0.5
+; SI-NEXT:    s_or_b32 s3, s8, 0x3ff00000
+; SI-NEXT:    s_and_b64 s[4:5], s[4:5], exec
+; SI-NEXT:    s_cselect_b32 s3, s3, 0
+; SI-NEXT:    v_mov_b32_e32 v9, s3
+; SI-NEXT:    s_bfe_u32 s3, s19, 0xb0014
+; SI-NEXT:    s_addk_i32 s3, 0xfc01
+; SI-NEXT:    s_lshr_b64 s[4:5], s[20:21], s3
+; SI-NEXT:    s_andn2_b64 s[4:5], s[18:19], s[4:5]
+; SI-NEXT:    s_and_b32 s8, s19, 0x80000000
+; SI-NEXT:    s_cmp_lt_i32 s3, 0
 ; SI-NEXT:    s_cselect_b32 s4, 0, s4
 ; SI-NEXT:    s_cselect_b32 s5, s8, s5
-; SI-NEXT:    s_cmp_gt_i32 s7, 51
-; SI-NEXT:    s_cselect_b32 s5, s13, s5
-; SI-NEXT:    s_cselect_b32 s4, s12, s4
-; SI-NEXT:    s_bfe_u32 s7, s19, 0xb0014
-; SI-NEXT:    s_addk_i32 s7, 0xfc01
-; SI-NEXT:    s_lshr_b64 s[8:9], s[20:21], s7
-; SI-NEXT:    v_mov_b32_e32 v13, s5
-; SI-NEXT:    s_andn2_b64 s[8:9], s[18:19], s[8:9]
-; SI-NEXT:    s_and_b32 s10, s19, 0x80000000
-; SI-NEXT:    v_mov_b32_e32 v12, s4
-; SI-NEXT:    s_cmp_lt_i32 s7, 0
-; SI-NEXT:    v_add_f64 v[12:13], s[12:13], -v[12:13]
-; SI-NEXT:    s_cselect_b32 s8, 0, s8
-; SI-NEXT:    s_cselect_b32 s9, s10, s9
-; SI-NEXT:    s_cmp_gt_i32 s7, 51
-; SI-NEXT:    s_cselect_b32 s9, s19, s9
-; SI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[12:13]|, 0.5
-; SI-NEXT:    s_cselect_b32 s8, s18, s8
-; SI-NEXT:    v_mov_b32_e32 v13, s9
-; SI-NEXT:    v_mov_b32_e32 v12, s8
-; SI-NEXT:    v_mov_b32_e32 v9, s13
-; SI-NEXT:    v_add_f64 v[12:13], s[18:19], -v[12:13]
-; SI-NEXT:    v_bfi_b32 v9, s6, v8, v9
-; SI-NEXT:    v_cndmask_b32_e32 v17, 0, v9, vcc
-; SI-NEXT:    v_mov_b32_e32 v9, s19
-; SI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[12:13]|, 0.5
-; SI-NEXT:    v_bfi_b32 v9, s6, v8, v9
-; SI-NEXT:    s_bfe_u32 s7, s17, 0xb0014
-; SI-NEXT:    v_cndmask_b32_e32 v13, 0, v9, vcc
-; SI-NEXT:    v_mov_b32_e32 v12, 0
-; SI-NEXT:    s_addk_i32 s7, 0xfc01
-; SI-NEXT:    v_add_f64 v[14:15], s[8:9], v[12:13]
-; SI-NEXT:    s_lshr_b64 s[8:9], s[20:21], s7
-; SI-NEXT:    s_andn2_b64 s[8:9], s[16:17], s[8:9]
-; SI-NEXT:    s_and_b32 s10, s17, 0x80000000
-; SI-NEXT:    s_cmp_lt_i32 s7, 0
-; SI-NEXT:    s_cselect_b32 s8, 0, s8
-; SI-NEXT:    s_cselect_b32 s9, s10, s9
-; SI-NEXT:    s_cmp_gt_i32 s7, 51
-; SI-NEXT:    s_cselect_b32 s9, s17, s9
-; SI-NEXT:    s_cselect_b32 s8, s16, s8
-; SI-NEXT:    v_mov_b32_e32 v13, s9
-; SI-NEXT:    v_mov_b32_e32 v12, s8
-; SI-NEXT:    v_add_f64 v[12:13], s[16:17], -v[12:13]
-; SI-NEXT:    v_mov_b32_e32 v9, s17
-; SI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[12:13]|, 0.5
-; SI-NEXT:    v_bfi_b32 v8, s6, v8, v9
-; SI-NEXT:    v_cndmask_b32_e32 v9, 0, v8, vcc
-; SI-NEXT:    v_mov_b32_e32 v8, 0
-; SI-NEXT:    v_mov_b32_e32 v16, 0
-; SI-NEXT:    v_add_f64 v[12:13], s[8:9], v[8:9]
+; SI-NEXT:    s_cmp_gt_i32 s3, 51
+; SI-NEXT:    s_cselect_b32 s5, s19, s5
+; SI-NEXT:    s_cselect_b32 s4, s18, s4
+; SI-NEXT:    v_mov_b32_e32 v11, s5
+; SI-NEXT:    v_mov_b32_e32 v10, s4
+; SI-NEXT:    v_add_f64 v[14:15], s[18:19], -v[10:11]
+; SI-NEXT:    v_add_f64 v[10:11], s[6:7], v[8:9]
+; SI-NEXT:    v_cmp_ge_f64_e64 s[6:7], |v[14:15]|, 0.5
+; SI-NEXT:    s_or_b32 s3, s8, 0x3ff00000
+; SI-NEXT:    s_and_b64 s[6:7], s[6:7], exec
+; SI-NEXT:    s_cselect_b32 s3, s3, 0
+; SI-NEXT:    v_mov_b32_e32 v9, s3
+; SI-NEXT:    s_bfe_u32 s3, s17, 0xb0014
+; SI-NEXT:    s_addk_i32 s3, 0xfc01
+; SI-NEXT:    s_lshr_b64 s[6:7], s[20:21], s3
+; SI-NEXT:    s_andn2_b64 s[6:7], s[16:17], s[6:7]
+; SI-NEXT:    s_and_b32 s8, s17, 0x80000000
+; SI-NEXT:    s_cmp_lt_i32 s3, 0
+; SI-NEXT:    s_cselect_b32 s6, 0, s6
+; SI-NEXT:    s_cselect_b32 s7, s8, s7
+; SI-NEXT:    s_cmp_gt_i32 s3, 51
+; SI-NEXT:    s_cselect_b32 s7, s17, s7
+; SI-NEXT:    s_cselect_b32 s6, s16, s6
+; SI-NEXT:    v_mov_b32_e32 v15, s7
+; SI-NEXT:    v_mov_b32_e32 v14, s6
+; SI-NEXT:    v_add_f64 v[14:15], s[16:17], -v[14:15]
+; SI-NEXT:    v_add_f64 v[16:17], s[4:5], v[8:9]
+; SI-NEXT:    v_cmp_ge_f64_e64 s[4:5], |v[14:15]|, 0.5
+; SI-NEXT:    s_or_b32 s3, s8, 0x3ff00000
+; SI-NEXT:    s_and_b64 s[4:5], s[4:5], exec
+; SI-NEXT:    s_cselect_b32 s3, s3, 0
+; SI-NEXT:    v_mov_b32_e32 v9, s3
+; SI-NEXT:    v_add_f64 v[14:15], s[6:7], v[8:9]
 ; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    v_add_f64 v[8:9], s[4:5], v[16:17]
-; SI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:48
-; SI-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:32
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    buffer_store_dwordx4 v[14:17], off, s[0:3], 0 offset:48
+; SI-NEXT:    buffer_store_dwordx4 v[10:13], off, s[0:3], 0 offset:32
 ; SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
 ; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; SI-NEXT:    s_endpgm
@@ -549,76 +541,75 @@ define amdgpu_kernel void @round_v8f64(ptr addrspace(1) %out, <8 x double> %in)
 ; CI-LABEL: round_v8f64:
 ; CI:       ; %bb.0:
 ; CI-NEXT:    s_load_dwordx16 s[4:19], s[0:1], 0x19
-; CI-NEXT:    s_brev_b32 s20, -2
-; CI-NEXT:    v_mov_b32_e32 v20, 0x3ff00000
+; CI-NEXT:    s_mov_b32 s20, 0
 ; CI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
 ; CI-NEXT:    s_mov_b32 s3, 0xf000
 ; CI-NEXT:    s_waitcnt lgkmcnt(0)
 ; CI-NEXT:    v_trunc_f64_e32 v[0:1], s[6:7]
-; CI-NEXT:    v_mov_b32_e32 v4, s7
-; CI-NEXT:    v_add_f64 v[2:3], s[6:7], -v[0:1]
-; CI-NEXT:    v_bfi_b32 v4, s20, v20, v4
-; CI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[2:3]|, 0.5
-; CI-NEXT:    v_mov_b32_e32 v2, 0
-; CI-NEXT:    v_cndmask_b32_e32 v3, 0, v4, vcc
 ; CI-NEXT:    v_trunc_f64_e32 v[4:5], s[4:5]
-; CI-NEXT:    v_add_f64 v[2:3], v[0:1], v[2:3]
-; CI-NEXT:    v_add_f64 v[0:1], s[4:5], -v[4:5]
-; CI-NEXT:    v_mov_b32_e32 v6, s5
-; CI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[0:1]|, 0.5
-; CI-NEXT:    v_bfi_b32 v6, s20, v20, v6
-; CI-NEXT:    v_cndmask_b32_e32 v1, 0, v6, vcc
+; CI-NEXT:    v_add_f64 v[2:3], s[6:7], -v[0:1]
+; CI-NEXT:    s_and_b32 s2, s7, 0x80000000
+; CI-NEXT:    v_cmp_ge_f64_e64 s[6:7], |v[2:3]|, 0.5
+; CI-NEXT:    s_or_b32 s2, s2, 0x3ff00000
+; CI-NEXT:    v_add_f64 v[6:7], s[4:5], -v[4:5]
+; CI-NEXT:    s_and_b64 s[6:7], s[6:7], exec
+; CI-NEXT:    s_cselect_b32 s21, s2, 0
+; CI-NEXT:    v_cmp_ge_f64_e64 s[6:7], |v[6:7]|, 0.5
+; CI-NEXT:    s_and_b32 s2, s5, 0x80000000
+; CI-NEXT:    s_or_b32 s2, s2, 0x3ff00000
 ; CI-NEXT:    v_trunc_f64_e32 v[6:7], s[10:11]
-; CI-NEXT:    v_mov_b32_e32 v0, 0
-; CI-NEXT:    v_add_f64 v[0:1], v[4:5], v[0:1]
-; CI-NEXT:    v_add_f64 v[4:5], s[10:11], -v[6:7]
-; CI-NEXT:    v_mov_b32_e32 v8, s11
-; CI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[4:5]|, 0.5
-; CI-NEXT:    v_bfi_b32 v8, s20, v20, v8
-; CI-NEXT:    v_cndmask_b32_e32 v5, 0, v8, vcc
-; CI-NEXT:    v_trunc_f64_e32 v[8:9], s[8:9]
-; CI-NEXT:    v_mov_b32_e32 v4, 0
-; CI-NEXT:    v_add_f64 v[6:7], v[6:7], v[4:5]
-; CI-NEXT:    v_add_f64 v[4:5], s[8:9], -v[8:9]
-; CI-NEXT:    v_mov_b32_e32 v10, s9
-; CI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[4:5]|, 0.5
-; CI-NEXT:    v_bfi_b32 v10, s20, v20, v10
-; CI-NEXT:    v_cndmask_b32_e32 v5, 0, v10, vcc
-; CI-NEXT:    v_trunc_f64_e32 v[10:11], s[14:15]
-; CI-NEXT:    v_mov_b32_e32 v4, 0
-; CI-NEXT:    v_add_f64 v[4:5], v[8:9], v[4:5]
-; CI-NEXT:    v_add_f64 v[8:9], s[14:15], -v[10:11]
-; CI-NEXT:    v_mov_b32_e32 v12, s15
-; CI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[8:9]|, 0.5
-; CI-NEXT:    v_bfi_b32 v12, s20, v20, v12
-; CI-NEXT:    v_trunc_f64_e32 v[16:17], s[12:13]
-; CI-NEXT:    v_cndmask_b32_e32 v9, 0, v12, vcc
-; CI-NEXT:    v_mov_b32_e32 v8, 0
-; CI-NEXT:    v_add_f64 v[10:11], v[10:11], v[8:9]
-; CI-NEXT:    v_add_f64 v[8:9], s[12:13], -v[16:17]
-; CI-NEXT:    v_mov_b32_e32 v12, s13
-; CI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[8:9]|, 0.5
-; CI-NEXT:    v_bfi_b32 v12, s20, v20, v12
-; CI-NEXT:    v_cndmask_b32_e32 v9, 0, v12, vcc
-; CI-NEXT:    v_trunc_f64_e32 v[12:13], s[18:19]
-; CI-NEXT:    v_mov_b32_e32 v18, s19
-; CI-NEXT:    v_add_f64 v[14:15], s[18:19], -v[12:13]
-; CI-NEXT:    v_bfi_b32 v18, s20, v20, v18
-; CI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[14:15]|, 0.5
-; CI-NEXT:    v_mov_b32_e32 v14, 0
-; CI-NEXT:    v_cndmask_b32_e32 v15, 0, v18, vcc
-; CI-NEXT:    v_trunc_f64_e32 v[18:19], s[16:17]
-; CI-NEXT:    v_add_f64 v[14:15], v[12:13], v[14:15]
-; CI-NEXT:    v_add_f64 v[12:13], s[16:17], -v[18:19]
-; CI-NEXT:    v_mov_b32_e32 v21, s17
-; CI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[12:13]|, 0.5
-; CI-NEXT:    v_bfi_b32 v20, s20, v20, v21
-; CI-NEXT:    v_cndmask_b32_e32 v13, 0, v20, vcc
-; CI-NEXT:    v_mov_b32_e32 v12, 0
-; CI-NEXT:    v_mov_b32_e32 v8, 0
-; CI-NEXT:    v_add_f64 v[12:13], v[18:19], v[12:13]
+; CI-NEXT:    s_and_b64 s[4:5], s[6:7], exec
+; CI-NEXT:    v_add_f64 v[2:3], v[0:1], s[20:21]
+; CI-NEXT:    s_cselect_b32 s21, s2, 0
+; CI-NEXT:    v_add_f64 v[8:9], s[10:11], -v[6:7]
+; CI-NEXT:    v_add_f64 v[0:1], v[4:5], s[20:21]
+; CI-NEXT:    v_trunc_f64_e32 v[4:5], s[8:9]
+; CI-NEXT:    v_cmp_ge_f64_e64 s[4:5], |v[8:9]|, 0.5
+; CI-NEXT:    s_and_b32 s2, s11, 0x80000000
+; CI-NEXT:    s_or_b32 s2, s2, 0x3ff00000
+; CI-NEXT:    v_add_f64 v[8:9], s[8:9], -v[4:5]
+; CI-NEXT:    s_and_b64 s[4:5], s[4:5], exec
+; CI-NEXT:    v_cmp_ge_f64_e64 s[4:5], |v[8:9]|, 0.5
+; CI-NEXT:    v_trunc_f64_e32 v[8:9], s[14:15]
+; CI-NEXT:    s_cselect_b32 s21, s2, 0
+; CI-NEXT:    s_and_b32 s2, s9, 0x80000000
+; CI-NEXT:    s_or_b32 s2, s2, 0x3ff00000
+; CI-NEXT:    v_add_f64 v[10:11], s[14:15], -v[8:9]
+; CI-NEXT:    s_and_b64 s[4:5], s[4:5], exec
+; CI-NEXT:    v_trunc_f64_e32 v[12:13], s[12:13]
+; CI-NEXT:    v_add_f64 v[6:7], v[6:7], s[20:21]
+; CI-NEXT:    s_cselect_b32 s21, s2, 0
+; CI-NEXT:    v_cmp_ge_f64_e64 s[4:5], |v[10:11]|, 0.5
+; CI-NEXT:    s_and_b32 s2, s15, 0x80000000
+; CI-NEXT:    s_or_b32 s2, s2, 0x3ff00000
+; CI-NEXT:    v_add_f64 v[14:15], s[12:13], -v[12:13]
+; CI-NEXT:    s_and_b64 s[4:5], s[4:5], exec
+; CI-NEXT:    v_add_f64 v[4:5], v[4:5], s[20:21]
+; CI-NEXT:    s_cselect_b32 s21, s2, 0
+; CI-NEXT:    v_cmp_ge_f64_e64 s[4:5], |v[14:15]|, 0.5
+; CI-NEXT:    s_and_b32 s2, s13, 0x80000000
+; CI-NEXT:    s_or_b32 s2, s2, 0x3ff00000
+; CI-NEXT:    v_trunc_f64_e32 v[14:15], s[18:19]
+; CI-NEXT:    s_and_b64 s[4:5], s[4:5], exec
+; CI-NEXT:    v_add_f64 v[10:11], v[8:9], s[20:21]
+; CI-NEXT:    s_cselect_b32 s21, s2, 0
+; CI-NEXT:    v_add_f64 v[16:17], s[18:19], -v[14:15]
+; CI-NEXT:    v_add_f64 v[8:9], v[12:13], s[20:21]
+; CI-NEXT:    v_trunc_f64_e32 v[12:13], s[16:17]
+; CI-NEXT:    v_cmp_ge_f64_e64 s[4:5], |v[16:17]|, 0.5
+; CI-NEXT:    s_and_b32 s2, s19, 0x80000000
+; CI-NEXT:    s_or_b32 s2, s2, 0x3ff00000
+; CI-NEXT:    v_add_f64 v[16:17], s[16:17], -v[12:13]
+; CI-NEXT:    s_and_b64 s[4:5], s[4:5], exec
+; CI-NEXT:    s_cselect_b32 s21, s2, 0
+; CI-NEXT:    v_cmp_ge_f64_e64 s[4:5], |v[16:17]|, 0.5
+; CI-NEXT:    s_and_b32 s2, s17, 0x80000000
+; CI-NEXT:    s_or_b32 s2, s2, 0x3ff00000
+; CI-NEXT:    s_and_b64 s[4:5], s[4:5], exec
+; CI-NEXT:    v_add_f64 v[14:15], v[14:15], s[20:21]
+; CI-NEXT:    s_cselect_b32 s21, s2, 0
+; CI-NEXT:    v_add_f64 v[12:13], v[12:13], s[20:21]
 ; CI-NEXT:    s_mov_b32 s2, -1
-; CI-NEXT:    v_add_f64 v[8:9], v[16:17], v[8:9]
 ; CI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:48
 ; CI-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:32
 ; CI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16


        


More information about the llvm-commits mailing list