[llvm] 5edc886 - [amdgpu] Add an enhanced conversion from i64 to f32.

Michael Liao via llvm-commits llvm-commits at lists.llvm.org
Wed Aug 4 12:33:20 PDT 2021


Author: Michael Liao
Date: 2021-08-04T15:33:12-04:00
New Revision: 5edc886e900b12286ad86268461a3013c329118d

URL: https://github.com/llvm/llvm-project/commit/5edc886e900b12286ad86268461a3013c329118d
DIFF: https://github.com/llvm/llvm-project/commit/5edc886e900b12286ad86268461a3013c329118d.diff

LOG: [amdgpu] Add an enhanced conversion from i64 to f32.

Reviewed By: arsenm

Differential Revision: https://reviews.llvm.org/D107187

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
    llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
    llvm/test/CodeGen/AMDGPU/GlobalISel/cvt_f32_ubyte.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sitofp.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-uitofp.mir
    llvm/test/CodeGen/AMDGPU/sint_to_fp.i64.ll
    llvm/test/CodeGen/AMDGPU/uint_to_fp.i64.ll
    llvm/test/CodeGen/AMDGPU/uint_to_fp.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index d68488ccb342a..4fefc414e5746 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -2453,87 +2453,119 @@ SDValue AMDGPUTargetLowering::LowerCTLZ_CTTZ(SDValue Op, SelectionDAG &DAG) cons
 
 SDValue AMDGPUTargetLowering::LowerINT_TO_FP32(SDValue Op, SelectionDAG &DAG,
                                                bool Signed) const {
-  // Unsigned
-  // cul2f(ulong u)
-  //{
-  //  uint lz = clz(u);
-  //  uint e = (u != 0) ? 127U + 63U - lz : 0;
-  //  u = (u << lz) & 0x7fffffffffffffffUL;
-  //  ulong t = u & 0xffffffffffUL;
-  //  uint v = (e << 23) | (uint)(u >> 40);
-  //  uint r = t > 0x8000000000UL ? 1U : (t == 0x8000000000UL ? v & 1U : 0U);
-  //  return as_float(v + r);
-  //}
-  // Signed
-  // cl2f(long l)
-  //{
-  //  long s = l >> 63;
-  //  float r = cul2f((l + s) ^ s);
-  //  return s ? -r : r;
-  //}
+  // The regular method coverting a 64-bit integer to float roughly consists of
+  // 2 steps: normalization and rounding. In fact, after normalization, the
+  // conversion from a 64-bit integer to a float is essentially the same as the
+  // one from a 32-bit integer. The only 
diff erence is that it has more
+  // trailing bits to be rounded. To leverage the native 32-bit conversion, a
+  // 64-bit integer could be preprocessed and fit into a 32-bit integer then
+  // converted into the correct float number. The basic steps for the unsigned
+  // conversion are illustrated in the following pseudo code:
+  //
+  // f32 uitofp(i64 u) {
+  //   i32 hi, lo = split(u);
+  //   // Only count the leading zeros in hi as we have native support of the
+  //   // conversion from i32 to f32. If hi is all 0s, the conversion is
+  //   // reduced to a 32-bit one automatically.
+  //   i32 shamt = clz(hi); // Return 32 if hi is all 0s.
+  //   u <<= shamt;
+  //   hi, lo = split(u);
+  //   hi |= (lo != 0) ? 1 : 0; // Adjust rounding bit in hi based on lo.
+  //   // convert it as a 32-bit integer and scale the result back.
+  //   return uitofp(hi) * 2^(32 - shamt);
+  // }
+  //
+  // The signed one follows the same principle but uses 'ffbh_i32' to count its
+  // sign bits instead. If 'ffbh_i32' is not available, its absolute value is
+  // converted instead followed by negation based its sign bit.
 
   SDLoc SL(Op);
   SDValue Src = Op.getOperand(0);
-  SDValue L = Src;
-
-  SDValue S;
-  if (Signed) {
-    const SDValue SignBit = DAG.getConstant(63, SL, MVT::i64);
-    S = DAG.getNode(ISD::SRA, SL, MVT::i64, L, SignBit);
-
-    SDValue LPlusS = DAG.getNode(ISD::ADD, SL, MVT::i64, L, S);
-    L = DAG.getNode(ISD::XOR, SL, MVT::i64, LPlusS, S);
-  }
-
-  EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(),
-                                   *DAG.getContext(), MVT::f32);
-
 
+  EVT SetCCVT =
+      getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32);
   SDValue ZeroI32 = DAG.getConstant(0, SL, MVT::i32);
-  SDValue ZeroI64 = DAG.getConstant(0, SL, MVT::i64);
-  SDValue LZ = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SL, MVT::i64, L);
-  LZ = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LZ);
-
-  SDValue K = DAG.getConstant(127U + 63U, SL, MVT::i32);
-  SDValue E = DAG.getSelect(SL, MVT::i32,
-    DAG.getSetCC(SL, SetCCVT, L, ZeroI64, ISD::SETNE),
-    DAG.getNode(ISD::SUB, SL, MVT::i32, K, LZ),
-    ZeroI32);
-
-  SDValue U = DAG.getNode(ISD::AND, SL, MVT::i64,
-    DAG.getNode(ISD::SHL, SL, MVT::i64, L, LZ),
-    DAG.getConstant((-1ULL) >> 1, SL, MVT::i64));
-
-  SDValue T = DAG.getNode(ISD::AND, SL, MVT::i64, U,
-                          DAG.getConstant(0xffffffffffULL, SL, MVT::i64));
-
-  SDValue UShl = DAG.getNode(ISD::SRL, SL, MVT::i64,
-                             U, DAG.getConstant(40, SL, MVT::i64));
 
-  SDValue V = DAG.getNode(ISD::OR, SL, MVT::i32,
-    DAG.getNode(ISD::SHL, SL, MVT::i32, E, DAG.getConstant(23, SL, MVT::i32)),
-    DAG.getNode(ISD::TRUNCATE, SL, MVT::i32,  UShl));
-
-  SDValue C = DAG.getConstant(0x8000000000ULL, SL, MVT::i64);
-  SDValue RCmp = DAG.getSetCC(SL, SetCCVT, T, C, ISD::SETUGT);
-  SDValue TCmp = DAG.getSetCC(SL, SetCCVT, T, C, ISD::SETEQ);
-
-  SDValue One = DAG.getConstant(1, SL, MVT::i32);
-
-  SDValue VTrunc1 = DAG.getNode(ISD::AND, SL, MVT::i32, V, One);
-
-  SDValue R = DAG.getSelect(SL, MVT::i32,
-    RCmp,
-    One,
-    DAG.getSelect(SL, MVT::i32, TCmp, VTrunc1, ZeroI32));
-  R = DAG.getNode(ISD::ADD, SL, MVT::i32, V, R);
-  R = DAG.getNode(ISD::BITCAST, SL, MVT::f32, R);
-
-  if (!Signed)
-    return R;
-
-  SDValue RNeg = DAG.getNode(ISD::FNEG, SL, MVT::f32, R);
-  return DAG.getSelect(SL, MVT::f32, DAG.getSExtOrTrunc(S, SL, SetCCVT), RNeg, R);
+  SDValue Lo, Hi;
+  std::tie(Lo, Hi) = split64BitValue(Src, DAG);
+  SDValue Sign;
+  SDValue ShAmt;
+  if (Signed && Subtarget->isGCN()) {
+    // We also need to consider the sign bit in Lo if Hi has just sign bits,
+    // i.e. Hi is 0 or -1. However, that only needs to take the MSB into
+    // account.
+    SDValue HasSameSign =
+        DAG.getSetCC(SL, SetCCVT, DAG.getNode(ISD::XOR, SL, MVT::i32, Lo, Hi),
+                     ZeroI32, ISD::SETGE);
+    SDValue MaxShAmt = DAG.getSelect(SL, MVT::i32, HasSameSign,
+                                     DAG.getConstant(33, SL, MVT::i32),
+                                     DAG.getConstant(32, SL, MVT::i32));
+    // Count the leading sign bits.
+    ShAmt = DAG.getNode(AMDGPUISD::FFBH_I32, SL, MVT::i32, Hi);
+    ShAmt = DAG.getSelect(SL, MVT::i32,
+                          DAG.getSetCC(SL, SetCCVT, ShAmt,
+                                       DAG.getAllOnesConstant(SL, MVT::i32),
+                                       ISD::SETNE),
+                          ShAmt, MaxShAmt);
+    // The shift amount for signed integers is [1, 33].
+    // Different from unsigned conversion, the shift should be one bit less to
+    // preserve the sign bit.
+    ShAmt = DAG.getNode(ISD::SUB, SL, MVT::i32, ShAmt,
+                        DAG.getConstant(1, SL, MVT::i32));
+  } else {
+    if (Signed) {
+      // Without 'ffbh_i32', only leading zeros could be counted. Take the
+      // absolute value first.
+      Sign = DAG.getNode(ISD::SRA, SL, MVT::i64, Src,
+                         DAG.getConstant(63, SL, MVT::i64));
+      SDValue Abs =
+          DAG.getNode(ISD::XOR, SL, MVT::i64,
+                      DAG.getNode(ISD::ADD, SL, MVT::i64, Src, Sign), Sign);
+      std::tie(Lo, Hi) = split64BitValue(Abs, DAG);
+    }
+    // Count the leading zeros.
+    ShAmt = DAG.getNode(ISD::CTLZ, SL, MVT::i32, Hi);
+    // The shift amount for signed integers is [0, 32].
+  }
+  // Normalize the given 64-bit integer.
+  SDValue Norm = DAG.getNode(ISD::SHL, SL, MVT::i64, Src, ShAmt);
+  // Split it again.
+  std::tie(Lo, Hi) = split64BitValue(Norm, DAG);
+  // Calculate the adjust bit for rounding.
+  SDValue Adjust = DAG.getSelect(
+      SL, MVT::i32, DAG.getSetCC(SL, SetCCVT, Lo, ZeroI32, ISD::SETNE),
+      DAG.getConstant(1, SL, MVT::i32), ZeroI32);
+  // Get the 32-bit normalized integer.
+  Norm = DAG.getNode(ISD::OR, SL, MVT::i32, Hi, Adjust);
+  // Convert the normalized 32-bit integer into f32.
+  unsigned Opc =
+      (Signed && Subtarget->isGCN()) ? ISD::SINT_TO_FP : ISD::UINT_TO_FP;
+  SDValue FVal = DAG.getNode(Opc, SL, MVT::f32, Norm);
+
+  // Finally, need to scale back the converted floating number as the original
+  // 64-bit integer is converted as a 32-bit one.
+  ShAmt = DAG.getNode(ISD::SUB, SL, MVT::i32, DAG.getConstant(32, SL, MVT::i32),
+                      ShAmt);
+  // On GCN, use LDEXP directly.
+  if (Subtarget->isGCN())
+    return DAG.getNode(AMDGPUISD::LDEXP, SL, MVT::f32, FVal, ShAmt);
+
+  // Otherwise, align 'ShAmt' to the exponent part and add it into the exponent
+  // part directly to emulate the multiplication of 2^ShAmt. That 8-bit
+  // exponent is enough to avoid overflowing into the sign bit.
+  SDValue Exp = DAG.getNode(ISD::SHL, SL, MVT::i32, ShAmt,
+                            DAG.getConstant(23, SL, MVT::i32));
+  SDValue IVal =
+      DAG.getNode(ISD::ADD, SL, MVT::i32,
+                  DAG.getNode(ISD::BITCAST, SL, MVT::i32, FVal), Exp);
+  if (Signed) {
+    // Set the sign bit.
+    Sign = DAG.getNode(ISD::SHL, SL, MVT::i32,
+                       DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Sign),
+                       DAG.getConstant(31, SL, MVT::i32));
+    IVal = DAG.getNode(ISD::OR, SL, MVT::i32, IVal, Sign);
+  }
+  return DAG.getNode(ISD::BITCAST, SL, MVT::f32, IVal);
 }
 
 SDValue AMDGPUTargetLowering::LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG,

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index ddd8eac9adf9d..366a555a08baf 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -812,10 +812,9 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
 
   // TODO: Split s1->s64 during regbankselect for VALU.
   auto &IToFP = getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
-    .legalFor({{S32, S32}, {S64, S32}, {S16, S32}})
-    .lowerFor({{S32, S64}})
-    .lowerIf(typeIs(1, S1))
-    .customFor({{S64, S64}});
+                    .legalFor({{S32, S32}, {S64, S32}, {S16, S32}})
+                    .lowerIf(typeIs(1, S1))
+                    .customFor({{S32, S64}, {S64, S64}});
   if (ST.has16BitInsts())
     IToFP.legalFor({{S16, S16}});
   IToFP.clampScalar(1, S32, S64)
@@ -2063,24 +2062,60 @@ bool AMDGPULegalizerInfo::legalizeITOFP(
 
   const LLT S64 = LLT::scalar(64);
   const LLT S32 = LLT::scalar(32);
+  const LLT S1 = LLT::scalar(1);
 
-  assert(MRI.getType(Src) == S64 && MRI.getType(Dst) == S64);
+  assert(MRI.getType(Src) == S64);
 
   auto Unmerge = B.buildUnmerge({S32, S32}, Src);
+  auto ThirtyTwo = B.buildConstant(S32, 32);
 
-  auto CvtHi = Signed ?
-    B.buildSITOFP(S64, Unmerge.getReg(1)) :
-    B.buildUITOFP(S64, Unmerge.getReg(1));
+  if (MRI.getType(Dst) == S64) {
+    auto CvtHi = Signed ? B.buildSITOFP(S64, Unmerge.getReg(1))
+                        : B.buildUITOFP(S64, Unmerge.getReg(1));
 
-  auto CvtLo = B.buildUITOFP(S64, Unmerge.getReg(0));
+    auto CvtLo = B.buildUITOFP(S64, Unmerge.getReg(0));
+    auto LdExp = B.buildIntrinsic(Intrinsic::amdgcn_ldexp, {S64}, false)
+                     .addUse(CvtHi.getReg(0))
+                     .addUse(ThirtyTwo.getReg(0));
 
-  auto ThirtyTwo = B.buildConstant(S32, 32);
-  auto LdExp = B.buildIntrinsic(Intrinsic::amdgcn_ldexp, {S64}, false)
-    .addUse(CvtHi.getReg(0))
-    .addUse(ThirtyTwo.getReg(0));
+    // TODO: Should this propagate fast-math-flags?
+    B.buildFAdd(Dst, LdExp, CvtLo);
+    MI.eraseFromParent();
+    return true;
+  }
 
-  // TODO: Should this propagate fast-math-flags?
-  B.buildFAdd(Dst, LdExp, CvtLo);
+  assert(MRI.getType(Dst) == S32);
+
+  auto Zero = B.buildConstant(S32, 0);
+  auto One = B.buildConstant(S32, 1);
+  auto AllOnes = B.buildConstant(S32, -1);
+
+  MachineInstrBuilder ShAmt;
+  if (Signed) {
+    auto ThirtyThree = B.buildConstant(S32, 33);
+    auto X = B.buildXor(S32, Unmerge.getReg(0), Unmerge.getReg(1));
+    auto HasSameSign = B.buildICmp(CmpInst::ICMP_SGE, S1, X, Zero);
+    auto MaxShAmt = B.buildSelect(S32, HasSameSign, ThirtyThree, ThirtyTwo);
+    auto LS = B.buildIntrinsic(Intrinsic::amdgcn_sffbh, {S32},
+                               /*HasSideEffects=*/false)
+                  .addUse(Unmerge.getReg(1));
+    auto NotAllSameBits = B.buildICmp(CmpInst::ICMP_NE, S1, LS, AllOnes);
+    auto LS2 = B.buildSelect(S32, NotAllSameBits, LS, MaxShAmt);
+    ShAmt = B.buildSub(S32, LS2, One);
+  } else
+    ShAmt = B.buildCTLZ(S32, Unmerge.getReg(1));
+  auto Norm = B.buildShl(S64, Src, ShAmt);
+  auto Unmerge2 = B.buildUnmerge({S32, S32}, Norm);
+  auto NotAllZeros =
+      B.buildICmp(CmpInst::ICMP_NE, S1, Unmerge2.getReg(0), Zero);
+  auto Adjust = B.buildSelect(S32, NotAllZeros, One, Zero);
+  auto Norm2 = B.buildOr(S32, Unmerge2.getReg(1), Adjust);
+  auto FVal = Signed ? B.buildSITOFP(S32, Norm2) : B.buildUITOFP(S32, Norm2);
+  auto Scale = B.buildSub(S32, ThirtyTwo, ShAmt);
+  B.buildIntrinsic(Intrinsic::amdgcn_ldexp, ArrayRef<Register>{Dst},
+                   /*HasSideEffects=*/false)
+      .addUse(FVal.getReg(0))
+      .addUse(Scale.getReg(0));
   MI.eraseFromParent();
   return true;
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/cvt_f32_ubyte.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/cvt_f32_ubyte.ll
index 660eb5bd537cc..a33e87975f1af 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/cvt_f32_ubyte.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/cvt_f32_ubyte.ll
@@ -1082,65 +1082,37 @@ define float @v_test_sitofp_i64_byte_to_f32(i64 %arg0) {
 ; SI-LABEL: v_test_sitofp_i64_byte_to_f32:
 ; SI:       ; %bb.0:
 ; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT:    s_movk_i32 s6, 0xff
-; SI-NEXT:    v_and_b32_e32 v0, s6, v0
-; SI-NEXT:    v_add_i32_e32 v0, vcc, 0, v0
-; SI-NEXT:    v_ffbh_u32_e32 v2, v0
-; SI-NEXT:    v_addc_u32_e64 v1, s[4:5], 0, 0, vcc
-; SI-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
-; SI-NEXT:    v_ffbh_u32_e32 v3, v1
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
-; SI-NEXT:    v_cndmask_b32_e32 v2, v3, v2, vcc
-; SI-NEXT:    v_mov_b32_e32 v3, 0xbe
-; SI-NEXT:    v_sub_i32_e32 v4, vcc, v3, v2
-; SI-NEXT:    v_lshl_b64 v[2:3], v[0:1], v2
-; SI-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[0:1]
-; SI-NEXT:    v_and_b32_e32 v1, 0x7fffffff, v3
-; SI-NEXT:    v_cndmask_b32_e32 v0, 0, v4, vcc
-; SI-NEXT:    s_mov_b32 s4, 0
-; SI-NEXT:    v_and_b32_e32 v3, s6, v3
-; SI-NEXT:    s_movk_i32 s5, 0x80
-; SI-NEXT:    v_lshrrev_b32_e32 v1, 8, v1
-; SI-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
-; SI-NEXT:    v_or_b32_e32 v0, v0, v1
-; SI-NEXT:    v_cmp_eq_u64_e32 vcc, s[4:5], v[2:3]
-; SI-NEXT:    v_and_b32_e32 v1, 1, v0
-; SI-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
-; SI-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3]
-; SI-NEXT:    v_cndmask_b32_e64 v1, v1, 1, vcc
-; SI-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; SI-NEXT:    v_ffbh_i32_e32 v2, 0
+; SI-NEXT:    v_cmp_ne_u32_e32 vcc, -1, v2
+; SI-NEXT:    v_cndmask_b32_e32 v2, 33, v2, vcc
+; SI-NEXT:    v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT:    v_mov_b32_e32 v1, 0
+; SI-NEXT:    v_subrev_i32_e32 v2, vcc, 1, v2
+; SI-NEXT:    v_lshl_b64 v[0:1], v[0:1], v2
+; SI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
+; SI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; SI-NEXT:    v_or_b32_e32 v0, v1, v0
+; SI-NEXT:    v_cvt_f32_i32_e32 v0, v0
+; SI-NEXT:    v_sub_i32_e32 v1, vcc, 32, v2
+; SI-NEXT:    v_ldexp_f32_e32 v0, v0, v1
 ; SI-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; VI-LABEL: v_test_sitofp_i64_byte_to_f32:
 ; VI:       ; %bb.0:
 ; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT:    s_movk_i32 s6, 0xff
-; VI-NEXT:    v_and_b32_e32 v0, s6, v0
-; VI-NEXT:    v_add_u32_e32 v0, vcc, 0, v0
-; VI-NEXT:    v_ffbh_u32_e32 v2, v0
-; VI-NEXT:    v_addc_u32_e64 v1, s[4:5], 0, 0, vcc
-; VI-NEXT:    v_add_u32_e32 v2, vcc, 32, v2
-; VI-NEXT:    v_ffbh_u32_e32 v3, v1
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
-; VI-NEXT:    v_cndmask_b32_e32 v2, v3, v2, vcc
-; VI-NEXT:    v_mov_b32_e32 v3, 0xbe
-; VI-NEXT:    v_sub_u32_e32 v4, vcc, v3, v2
-; VI-NEXT:    v_lshlrev_b64 v[2:3], v2, v[0:1]
-; VI-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[0:1]
-; VI-NEXT:    v_and_b32_e32 v1, 0x7fffffff, v3
-; VI-NEXT:    v_cndmask_b32_e32 v0, 0, v4, vcc
-; VI-NEXT:    s_mov_b32 s4, 0
-; VI-NEXT:    v_and_b32_e32 v3, s6, v3
-; VI-NEXT:    s_movk_i32 s5, 0x80
-; VI-NEXT:    v_lshrrev_b32_e32 v1, 8, v1
-; VI-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
-; VI-NEXT:    v_or_b32_e32 v0, v0, v1
-; VI-NEXT:    v_cmp_eq_u64_e32 vcc, s[4:5], v[2:3]
-; VI-NEXT:    v_and_b32_e32 v1, 1, v0
-; VI-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
-; VI-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3]
-; VI-NEXT:    v_cndmask_b32_e64 v1, v1, 1, vcc
-; VI-NEXT:    v_add_u32_e32 v0, vcc, v0, v1
+; VI-NEXT:    v_ffbh_i32_e32 v2, 0
+; VI-NEXT:    v_cmp_ne_u32_e32 vcc, -1, v2
+; VI-NEXT:    v_cndmask_b32_e32 v2, 33, v2, vcc
+; VI-NEXT:    v_and_b32_e32 v0, 0xff, v0
+; VI-NEXT:    v_mov_b32_e32 v1, 0
+; VI-NEXT:    v_subrev_u32_e32 v2, vcc, 1, v2
+; VI-NEXT:    v_lshlrev_b64 v[0:1], v2, v[0:1]
+; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
+; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; VI-NEXT:    v_or_b32_e32 v0, v1, v0
+; VI-NEXT:    v_cvt_f32_i32_e32 v0, v0
+; VI-NEXT:    v_sub_u32_e32 v1, vcc, 32, v2
+; VI-NEXT:    v_ldexp_f32 v0, v0, v1
 ; VI-NEXT:    s_setpc_b64 s[30:31]
   %masked = and i64 %arg0, 255
   %itofp = sitofp i64 %masked to float
@@ -1151,61 +1123,16 @@ define float @v_test_uitofp_i64_byte_to_f32(i64 %arg0) {
 ; SI-LABEL: v_test_uitofp_i64_byte_to_f32:
 ; SI:       ; %bb.0:
 ; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT:    s_movk_i32 s4, 0xff
-; SI-NEXT:    v_and_b32_e32 v0, s4, v0
-; SI-NEXT:    v_ffbh_u32_e32 v2, v0
-; SI-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
-; SI-NEXT:    v_ffbh_u32_e32 v3, 0
-; SI-NEXT:    v_cmp_eq_u32_e64 vcc, 0, 0
-; SI-NEXT:    v_cndmask_b32_e32 v2, v3, v2, vcc
-; SI-NEXT:    v_mov_b32_e32 v1, 0
-; SI-NEXT:    v_sub_i32_e32 v4, vcc, 0xbe, v2
-; SI-NEXT:    v_lshl_b64 v[2:3], v[0:1], v2
-; SI-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[0:1]
-; SI-NEXT:    v_and_b32_e32 v1, 0x7fffffff, v3
-; SI-NEXT:    v_cndmask_b32_e32 v0, 0, v4, vcc
-; SI-NEXT:    v_and_b32_e32 v3, s4, v3
-; SI-NEXT:    s_mov_b32 s4, 0
-; SI-NEXT:    s_movk_i32 s5, 0x80
-; SI-NEXT:    v_lshrrev_b32_e32 v1, 8, v1
-; SI-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
-; SI-NEXT:    v_or_b32_e32 v0, v0, v1
-; SI-NEXT:    v_cmp_eq_u64_e32 vcc, s[4:5], v[2:3]
-; SI-NEXT:    v_and_b32_e32 v1, 1, v0
-; SI-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
-; SI-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3]
-; SI-NEXT:    v_cndmask_b32_e64 v1, v1, 1, vcc
-; SI-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; SI-NEXT:    v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT:    v_cvt_f32_ubyte0_e32 v0, v0
+; SI-NEXT:    v_ldexp_f32_e64 v0, v0, 0
 ; SI-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; VI-LABEL: v_test_uitofp_i64_byte_to_f32:
 ; VI:       ; %bb.0:
 ; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT:    s_movk_i32 s4, 0xff
-; VI-NEXT:    v_and_b32_e32 v0, s4, v0
-; VI-NEXT:    v_ffbh_u32_e32 v2, v0
-; VI-NEXT:    v_add_u32_e32 v2, vcc, 32, v2
-; VI-NEXT:    v_ffbh_u32_e32 v3, 0
-; VI-NEXT:    v_cmp_eq_u32_e64 vcc, 0, 0
-; VI-NEXT:    v_cndmask_b32_e32 v2, v3, v2, vcc
-; VI-NEXT:    v_mov_b32_e32 v1, 0
-; VI-NEXT:    v_sub_u32_e32 v4, vcc, 0xbe, v2
-; VI-NEXT:    v_lshlrev_b64 v[2:3], v2, v[0:1]
-; VI-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[0:1]
-; VI-NEXT:    v_and_b32_e32 v1, 0x7fffffff, v3
-; VI-NEXT:    v_cndmask_b32_e32 v0, 0, v4, vcc
-; VI-NEXT:    v_and_b32_e32 v3, s4, v3
-; VI-NEXT:    s_mov_b32 s4, 0
-; VI-NEXT:    s_movk_i32 s5, 0x80
-; VI-NEXT:    v_lshrrev_b32_e32 v1, 8, v1
-; VI-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
-; VI-NEXT:    v_or_b32_e32 v0, v0, v1
-; VI-NEXT:    v_cmp_eq_u64_e32 vcc, s[4:5], v[2:3]
-; VI-NEXT:    v_and_b32_e32 v1, 1, v0
-; VI-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
-; VI-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3]
-; VI-NEXT:    v_cndmask_b32_e64 v1, v1, 1, vcc
-; VI-NEXT:    v_add_u32_e32 v0, vcc, v0, v1
+; VI-NEXT:    v_cvt_f32_ubyte0_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
+; VI-NEXT:    v_ldexp_f32 v0, v0, 0
 ; VI-NEXT:    s_setpc_b64 s[30:31]
   %masked = and i64 %arg0, 255
   %itofp = uitofp i64 %masked to float

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sitofp.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sitofp.mir
index 53a90c318be8f..ccbf30096c85f 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sitofp.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sitofp.mir
@@ -98,84 +98,52 @@ body: |
 
     ; GFX6-LABEL: name: test_sitofp_s64_to_s32
     ; GFX6: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
-    ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
-    ; GFX6: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY]], [[C]](s32)
     ; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
-    ; GFX6: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
-    ; GFX6: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV]], [[UV2]]
-    ; GFX6: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV1]], [[UV3]], [[UADDO1]]
-    ; GFX6: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
-    ; GFX6: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[MV]], [[ASHR]]
+    ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
     ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; GFX6: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; GFX6: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[XOR]](s64)
-    ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 190
-    ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C3]], [[CTLZ_ZERO_UNDEF]]
-    ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[XOR]](s64), [[C2]]
-    ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[SUB]], [[C1]]
-    ; GFX6: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807
-    ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[XOR]], [[CTLZ_ZERO_UNDEF]](s32)
-    ; GFX6: [[AND:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[C4]]
-    ; GFX6: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 1099511627775
-    ; GFX6: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C5]]
-    ; GFX6: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
-    ; GFX6: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C6]](s32)
-    ; GFX6: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
-    ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SELECT]], [[C7]](s32)
-    ; GFX6: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
-    ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[TRUNC]]
-    ; GFX6: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 549755813888
-    ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[AND1]](s64), [[C8]]
-    ; GFX6: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND1]](s64), [[C8]]
-    ; GFX6: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; GFX6: [[AND2:%[0-9]+]]:_(s32) = G_AND [[OR]], [[C9]]
-    ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[AND2]], [[C1]]
-    ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C9]], [[SELECT1]]
-    ; GFX6: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[OR]], [[SELECT2]]
-    ; GFX6: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[ADD]]
-    ; GFX6: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[ASHR]](s64), [[C2]]
-    ; GFX6: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s1), [[FNEG]], [[ADD]]
-    ; GFX6: $vgpr0 = COPY [[SELECT3]](s32)
+    ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; GFX6: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 33
+    ; GFX6: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[UV]], [[UV1]]
+    ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sge), [[XOR]](s32), [[C1]]
+    ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C4]], [[C]]
+    ; GFX6: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.sffbh), [[UV1]](s32)
+    ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[INT]](s32), [[C3]]
+    ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[INT]], [[SELECT]]
+    ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SELECT1]], [[C2]]
+    ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[SUB]](s32)
+    ; GFX6: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL]](s64)
+    ; GFX6: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV2]](s32), [[C1]]
+    ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[C2]], [[C1]]
+    ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV3]], [[SELECT2]]
+    ; GFX6: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[OR]](s32)
+    ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SUB]]
+    ; GFX6: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[SITOFP]](s32), [[SUB1]](s32)
+    ; GFX6: $vgpr0 = COPY [[INT1]](s32)
     ; GFX8-LABEL: name: test_sitofp_s64_to_s32
     ; GFX8: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
-    ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
-    ; GFX8: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY]], [[C]](s32)
     ; GFX8: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
-    ; GFX8: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
-    ; GFX8: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV]], [[UV2]]
-    ; GFX8: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV1]], [[UV3]], [[UADDO1]]
-    ; GFX8: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
-    ; GFX8: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[MV]], [[ASHR]]
+    ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
     ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; GFX8: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; GFX8: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[XOR]](s64)
-    ; GFX8: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 190
-    ; GFX8: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C3]], [[CTLZ_ZERO_UNDEF]]
-    ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[XOR]](s64), [[C2]]
-    ; GFX8: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[SUB]], [[C1]]
-    ; GFX8: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807
-    ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[XOR]], [[CTLZ_ZERO_UNDEF]](s32)
-    ; GFX8: [[AND:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[C4]]
-    ; GFX8: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 1099511627775
-    ; GFX8: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C5]]
-    ; GFX8: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
-    ; GFX8: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C6]](s32)
-    ; GFX8: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
-    ; GFX8: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SELECT]], [[C7]](s32)
-    ; GFX8: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
-    ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[TRUNC]]
-    ; GFX8: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 549755813888
-    ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[AND1]](s64), [[C8]]
-    ; GFX8: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND1]](s64), [[C8]]
-    ; GFX8: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; GFX8: [[AND2:%[0-9]+]]:_(s32) = G_AND [[OR]], [[C9]]
-    ; GFX8: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[AND2]], [[C1]]
-    ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C9]], [[SELECT1]]
-    ; GFX8: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[OR]], [[SELECT2]]
-    ; GFX8: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[ADD]]
-    ; GFX8: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[ASHR]](s64), [[C2]]
-    ; GFX8: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s1), [[FNEG]], [[ADD]]
-    ; GFX8: $vgpr0 = COPY [[SELECT3]](s32)
+    ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; GFX8: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; GFX8: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 33
+    ; GFX8: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[UV]], [[UV1]]
+    ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sge), [[XOR]](s32), [[C1]]
+    ; GFX8: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C4]], [[C]]
+    ; GFX8: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.sffbh), [[UV1]](s32)
+    ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[INT]](s32), [[C3]]
+    ; GFX8: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[INT]], [[SELECT]]
+    ; GFX8: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SELECT1]], [[C2]]
+    ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[SUB]](s32)
+    ; GFX8: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL]](s64)
+    ; GFX8: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV2]](s32), [[C1]]
+    ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[C2]], [[C1]]
+    ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV3]], [[SELECT2]]
+    ; GFX8: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[OR]](s32)
+    ; GFX8: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SUB]]
+    ; GFX8: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[SITOFP]](s32), [[SUB1]](s32)
+    ; GFX8: $vgpr0 = COPY [[INT1]](s32)
     %0:_(s64) = COPY $vgpr0_vgpr1
     %1:_(s32) = G_SITOFP %0
     $vgpr0 = COPY %1
@@ -190,18 +158,18 @@ body: |
     ; GFX6-LABEL: name: test_sitofp_s64_to_s64
     ; GFX6: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
     ; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+    ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
     ; GFX6: [[SITOFP:%[0-9]+]]:_(s64) = G_SITOFP [[UV1]](s32)
     ; GFX6: [[UITOFP:%[0-9]+]]:_(s64) = G_UITOFP [[UV]](s32)
-    ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
     ; GFX6: [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[SITOFP]](s64), [[C]](s32)
     ; GFX6: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[INT]], [[UITOFP]]
     ; GFX6: $vgpr0_vgpr1 = COPY [[FADD]](s64)
     ; GFX8-LABEL: name: test_sitofp_s64_to_s64
     ; GFX8: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
     ; GFX8: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+    ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
     ; GFX8: [[SITOFP:%[0-9]+]]:_(s64) = G_SITOFP [[UV1]](s32)
     ; GFX8: [[UITOFP:%[0-9]+]]:_(s64) = G_UITOFP [[UV]](s32)
-    ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
     ; GFX8: [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[SITOFP]](s64), [[C]](s32)
     ; GFX8: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[INT]], [[UITOFP]]
     ; GFX8: $vgpr0_vgpr1 = COPY [[FADD]](s64)
@@ -450,86 +418,54 @@ body: |
     ; GFX6: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
     ; GFX6: [[COPY1:%[0-9]+]]:_(s64) = COPY [[COPY]](s64)
     ; GFX6: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY1]], 33
-    ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
-    ; GFX6: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SEXT_INREG]], [[C]](s32)
     ; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT_INREG]](s64)
-    ; GFX6: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
-    ; GFX6: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV]], [[UV2]]
-    ; GFX6: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV1]], [[UV3]], [[UADDO1]]
-    ; GFX6: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
-    ; GFX6: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[MV]], [[ASHR]]
+    ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
     ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; GFX6: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; GFX6: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[XOR]](s64)
-    ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 190
-    ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C3]], [[CTLZ_ZERO_UNDEF]]
-    ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[XOR]](s64), [[C2]]
-    ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[SUB]], [[C1]]
-    ; GFX6: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807
-    ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[XOR]], [[CTLZ_ZERO_UNDEF]](s32)
-    ; GFX6: [[AND:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[C4]]
-    ; GFX6: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 1099511627775
-    ; GFX6: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C5]]
-    ; GFX6: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
-    ; GFX6: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C6]](s32)
-    ; GFX6: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
-    ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SELECT]], [[C7]](s32)
-    ; GFX6: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
-    ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[TRUNC]]
-    ; GFX6: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 549755813888
-    ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[AND1]](s64), [[C8]]
-    ; GFX6: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND1]](s64), [[C8]]
-    ; GFX6: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; GFX6: [[AND2:%[0-9]+]]:_(s32) = G_AND [[OR]], [[C9]]
-    ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[AND2]], [[C1]]
-    ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C9]], [[SELECT1]]
-    ; GFX6: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[OR]], [[SELECT2]]
-    ; GFX6: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[ADD]]
-    ; GFX6: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[ASHR]](s64), [[C2]]
-    ; GFX6: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s1), [[FNEG]], [[ADD]]
-    ; GFX6: $vgpr0 = COPY [[SELECT3]](s32)
+    ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; GFX6: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 33
+    ; GFX6: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[UV]], [[UV1]]
+    ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sge), [[XOR]](s32), [[C1]]
+    ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C4]], [[C]]
+    ; GFX6: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.sffbh), [[UV1]](s32)
+    ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[INT]](s32), [[C3]]
+    ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[INT]], [[SELECT]]
+    ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SELECT1]], [[C2]]
+    ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[SEXT_INREG]], [[SUB]](s32)
+    ; GFX6: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL]](s64)
+    ; GFX6: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV2]](s32), [[C1]]
+    ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[C2]], [[C1]]
+    ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV3]], [[SELECT2]]
+    ; GFX6: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[OR]](s32)
+    ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SUB]]
+    ; GFX6: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[SITOFP]](s32), [[SUB1]](s32)
+    ; GFX6: $vgpr0 = COPY [[INT1]](s32)
     ; GFX8-LABEL: name: test_sitofp_s33_to_s32
     ; GFX8: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
     ; GFX8: [[COPY1:%[0-9]+]]:_(s64) = COPY [[COPY]](s64)
     ; GFX8: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY1]], 33
-    ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
-    ; GFX8: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SEXT_INREG]], [[C]](s32)
     ; GFX8: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT_INREG]](s64)
-    ; GFX8: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
-    ; GFX8: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV]], [[UV2]]
-    ; GFX8: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV1]], [[UV3]], [[UADDO1]]
-    ; GFX8: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
-    ; GFX8: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[MV]], [[ASHR]]
+    ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
     ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; GFX8: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; GFX8: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[XOR]](s64)
-    ; GFX8: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 190
-    ; GFX8: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C3]], [[CTLZ_ZERO_UNDEF]]
-    ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[XOR]](s64), [[C2]]
-    ; GFX8: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[SUB]], [[C1]]
-    ; GFX8: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807
-    ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[XOR]], [[CTLZ_ZERO_UNDEF]](s32)
-    ; GFX8: [[AND:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[C4]]
-    ; GFX8: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 1099511627775
-    ; GFX8: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C5]]
-    ; GFX8: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
-    ; GFX8: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C6]](s32)
-    ; GFX8: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
-    ; GFX8: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SELECT]], [[C7]](s32)
-    ; GFX8: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
-    ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[TRUNC]]
-    ; GFX8: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 549755813888
-    ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[AND1]](s64), [[C8]]
-    ; GFX8: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND1]](s64), [[C8]]
-    ; GFX8: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; GFX8: [[AND2:%[0-9]+]]:_(s32) = G_AND [[OR]], [[C9]]
-    ; GFX8: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[AND2]], [[C1]]
-    ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C9]], [[SELECT1]]
-    ; GFX8: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[OR]], [[SELECT2]]
-    ; GFX8: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[ADD]]
-    ; GFX8: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[ASHR]](s64), [[C2]]
-    ; GFX8: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s1), [[FNEG]], [[ADD]]
-    ; GFX8: $vgpr0 = COPY [[SELECT3]](s32)
+    ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; GFX8: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; GFX8: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 33
+    ; GFX8: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[UV]], [[UV1]]
+    ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sge), [[XOR]](s32), [[C1]]
+    ; GFX8: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C4]], [[C]]
+    ; GFX8: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.sffbh), [[UV1]](s32)
+    ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[INT]](s32), [[C3]]
+    ; GFX8: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[INT]], [[SELECT]]
+    ; GFX8: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SELECT1]], [[C2]]
+    ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[SEXT_INREG]], [[SUB]](s32)
+    ; GFX8: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL]](s64)
+    ; GFX8: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV2]](s32), [[C1]]
+    ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[C2]], [[C1]]
+    ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV3]], [[SELECT2]]
+    ; GFX8: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[OR]](s32)
+    ; GFX8: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SUB]]
+    ; GFX8: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[SITOFP]](s32), [[SUB1]](s32)
+    ; GFX8: $vgpr0 = COPY [[INT1]](s32)
     %0:_(s64) = COPY $vgpr0_vgpr1
     %1:_(s33) = G_TRUNC %0
     %2:_(s32) = G_SITOFP %1
@@ -544,86 +480,54 @@ body: |
 
     ; GFX6-LABEL: name: test_sitofp_s64_to_s16
     ; GFX6: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
-    ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
-    ; GFX6: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY]], [[C]](s32)
     ; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
-    ; GFX6: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
-    ; GFX6: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV]], [[UV2]]
-    ; GFX6: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV1]], [[UV3]], [[UADDO1]]
-    ; GFX6: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
-    ; GFX6: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[MV]], [[ASHR]]
+    ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
     ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; GFX6: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; GFX6: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[XOR]](s64)
-    ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 190
-    ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C3]], [[CTLZ_ZERO_UNDEF]]
-    ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[XOR]](s64), [[C2]]
-    ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[SUB]], [[C1]]
-    ; GFX6: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807
-    ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[XOR]], [[CTLZ_ZERO_UNDEF]](s32)
-    ; GFX6: [[AND:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[C4]]
-    ; GFX6: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 1099511627775
-    ; GFX6: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C5]]
-    ; GFX6: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
-    ; GFX6: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C6]](s32)
-    ; GFX6: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
-    ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SELECT]], [[C7]](s32)
-    ; GFX6: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
-    ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[TRUNC]]
-    ; GFX6: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 549755813888
-    ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[AND1]](s64), [[C8]]
-    ; GFX6: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND1]](s64), [[C8]]
-    ; GFX6: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; GFX6: [[AND2:%[0-9]+]]:_(s32) = G_AND [[OR]], [[C9]]
-    ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[AND2]], [[C1]]
-    ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C9]], [[SELECT1]]
-    ; GFX6: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[OR]], [[SELECT2]]
-    ; GFX6: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[ADD]]
-    ; GFX6: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[ASHR]](s64), [[C2]]
-    ; GFX6: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s1), [[FNEG]], [[ADD]]
-    ; GFX6: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[SELECT3]](s32)
+    ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; GFX6: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 33
+    ; GFX6: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[UV]], [[UV1]]
+    ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sge), [[XOR]](s32), [[C1]]
+    ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C4]], [[C]]
+    ; GFX6: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.sffbh), [[UV1]](s32)
+    ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[INT]](s32), [[C3]]
+    ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[INT]], [[SELECT]]
+    ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SELECT1]], [[C2]]
+    ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[SUB]](s32)
+    ; GFX6: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL]](s64)
+    ; GFX6: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV2]](s32), [[C1]]
+    ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[C2]], [[C1]]
+    ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV3]], [[SELECT2]]
+    ; GFX6: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[OR]](s32)
+    ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SUB]]
+    ; GFX6: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[SITOFP]](s32), [[SUB1]](s32)
+    ; GFX6: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[INT1]](s32)
     ; GFX6: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FPTRUNC]](s16)
     ; GFX6: $vgpr0 = COPY [[ANYEXT]](s32)
     ; GFX8-LABEL: name: test_sitofp_s64_to_s16
     ; GFX8: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
-    ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
-    ; GFX8: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY]], [[C]](s32)
     ; GFX8: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
-    ; GFX8: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
-    ; GFX8: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV]], [[UV2]]
-    ; GFX8: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV1]], [[UV3]], [[UADDO1]]
-    ; GFX8: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
-    ; GFX8: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[MV]], [[ASHR]]
+    ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
     ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; GFX8: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; GFX8: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[XOR]](s64)
-    ; GFX8: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 190
-    ; GFX8: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C3]], [[CTLZ_ZERO_UNDEF]]
-    ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[XOR]](s64), [[C2]]
-    ; GFX8: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[SUB]], [[C1]]
-    ; GFX8: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807
-    ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[XOR]], [[CTLZ_ZERO_UNDEF]](s32)
-    ; GFX8: [[AND:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[C4]]
-    ; GFX8: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 1099511627775
-    ; GFX8: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C5]]
-    ; GFX8: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
-    ; GFX8: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C6]](s32)
-    ; GFX8: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
-    ; GFX8: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SELECT]], [[C7]](s32)
-    ; GFX8: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
-    ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[TRUNC]]
-    ; GFX8: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 549755813888
-    ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[AND1]](s64), [[C8]]
-    ; GFX8: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND1]](s64), [[C8]]
-    ; GFX8: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; GFX8: [[AND2:%[0-9]+]]:_(s32) = G_AND [[OR]], [[C9]]
-    ; GFX8: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[AND2]], [[C1]]
-    ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C9]], [[SELECT1]]
-    ; GFX8: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[OR]], [[SELECT2]]
-    ; GFX8: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[ADD]]
-    ; GFX8: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[ASHR]](s64), [[C2]]
-    ; GFX8: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s1), [[FNEG]], [[ADD]]
-    ; GFX8: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[SELECT3]](s32)
+    ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; GFX8: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; GFX8: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 33
+    ; GFX8: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[UV]], [[UV1]]
+    ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sge), [[XOR]](s32), [[C1]]
+    ; GFX8: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C4]], [[C]]
+    ; GFX8: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.sffbh), [[UV1]](s32)
+    ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[INT]](s32), [[C3]]
+    ; GFX8: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[INT]], [[SELECT]]
+    ; GFX8: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SELECT1]], [[C2]]
+    ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[SUB]](s32)
+    ; GFX8: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL]](s64)
+    ; GFX8: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV2]](s32), [[C1]]
+    ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[C2]], [[C1]]
+    ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV3]], [[SELECT2]]
+    ; GFX8: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[OR]](s32)
+    ; GFX8: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SUB]]
+    ; GFX8: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[SITOFP]](s32), [[SUB1]](s32)
+    ; GFX8: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[INT1]](s32)
     ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FPTRUNC]](s16)
     ; GFX8: $vgpr0 = COPY [[ANYEXT]](s32)
     %0:_(s64) = COPY $vgpr0_vgpr1
@@ -641,153 +545,99 @@ body: |
     ; GFX6-LABEL: name: test_sitofp_v2s64_to_v2s16
     ; GFX6: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
     ; GFX6: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
-    ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
-    ; GFX6: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[UV]], [[C]](s32)
     ; GFX6: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](s64)
-    ; GFX6: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
-    ; GFX6: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV2]], [[UV4]]
-    ; GFX6: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV3]], [[UV5]], [[UADDO1]]
-    ; GFX6: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
-    ; GFX6: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[MV]], [[ASHR]]
+    ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
     ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; GFX6: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; GFX6: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[XOR]](s64)
-    ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 190
-    ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C3]], [[CTLZ_ZERO_UNDEF]]
-    ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[XOR]](s64), [[C2]]
-    ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[SUB]], [[C1]]
-    ; GFX6: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807
-    ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[XOR]], [[CTLZ_ZERO_UNDEF]](s32)
-    ; GFX6: [[AND:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[C4]]
-    ; GFX6: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 1099511627775
-    ; GFX6: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C5]]
-    ; GFX6: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
-    ; GFX6: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C6]](s32)
-    ; GFX6: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
-    ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SELECT]], [[C7]](s32)
-    ; GFX6: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
-    ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[TRUNC]]
-    ; GFX6: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 549755813888
-    ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[AND1]](s64), [[C8]]
-    ; GFX6: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND1]](s64), [[C8]]
-    ; GFX6: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; GFX6: [[AND2:%[0-9]+]]:_(s32) = G_AND [[OR]], [[C9]]
-    ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[AND2]], [[C1]]
-    ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C9]], [[SELECT1]]
-    ; GFX6: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[OR]], [[SELECT2]]
-    ; GFX6: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[ADD]]
-    ; GFX6: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[ASHR]](s64), [[C2]]
-    ; GFX6: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s1), [[FNEG]], [[ADD]]
-    ; GFX6: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[SELECT3]](s32)
-    ; GFX6: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[C]](s32)
+    ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; GFX6: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 33
+    ; GFX6: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[UV2]], [[UV3]]
+    ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sge), [[XOR]](s32), [[C1]]
+    ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C4]], [[C]]
+    ; GFX6: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.sffbh), [[UV3]](s32)
+    ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[INT]](s32), [[C3]]
+    ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[INT]], [[SELECT]]
+    ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SELECT1]], [[C2]]
+    ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[SUB]](s32)
+    ; GFX6: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL]](s64)
+    ; GFX6: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV4]](s32), [[C1]]
+    ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[C2]], [[C1]]
+    ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV5]], [[SELECT2]]
+    ; GFX6: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[OR]](s32)
+    ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SUB]]
+    ; GFX6: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[SITOFP]](s32), [[SUB1]](s32)
+    ; GFX6: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[INT1]](s32)
     ; GFX6: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
-    ; GFX6: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR1]](s64)
-    ; GFX6: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV6]], [[UV8]]
-    ; GFX6: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV7]], [[UV9]], [[UADDO3]]
-    ; GFX6: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
-    ; GFX6: [[XOR1:%[0-9]+]]:_(s64) = G_XOR [[MV1]], [[ASHR1]]
-    ; GFX6: [[CTLZ_ZERO_UNDEF1:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[XOR1]](s64)
-    ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C3]], [[CTLZ_ZERO_UNDEF1]]
-    ; GFX6: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[XOR1]](s64), [[C2]]
-    ; GFX6: [[SELECT4:%[0-9]+]]:_(s32) = G_SELECT [[ICMP4]](s1), [[SUB1]], [[C1]]
-    ; GFX6: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[XOR1]], [[CTLZ_ZERO_UNDEF1]](s32)
-    ; GFX6: [[AND3:%[0-9]+]]:_(s64) = G_AND [[SHL2]], [[C4]]
-    ; GFX6: [[AND4:%[0-9]+]]:_(s64) = G_AND [[AND3]], [[C5]]
-    ; GFX6: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND3]], [[C6]](s32)
-    ; GFX6: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[SELECT4]], [[C7]](s32)
-    ; GFX6: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR1]](s64)
-    ; GFX6: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[TRUNC1]]
-    ; GFX6: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[AND4]](s64), [[C8]]
-    ; GFX6: [[ICMP6:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND4]](s64), [[C8]]
-    ; GFX6: [[AND5:%[0-9]+]]:_(s32) = G_AND [[OR1]], [[C9]]
-    ; GFX6: [[SELECT5:%[0-9]+]]:_(s32) = G_SELECT [[ICMP6]](s1), [[AND5]], [[C1]]
-    ; GFX6: [[SELECT6:%[0-9]+]]:_(s32) = G_SELECT [[ICMP5]](s1), [[C9]], [[SELECT5]]
-    ; GFX6: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[OR1]], [[SELECT6]]
-    ; GFX6: [[FNEG1:%[0-9]+]]:_(s32) = G_FNEG [[ADD1]]
-    ; GFX6: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[ASHR1]](s64), [[C2]]
-    ; GFX6: [[SELECT7:%[0-9]+]]:_(s32) = G_SELECT [[ICMP7]](s1), [[FNEG1]], [[ADD1]]
-    ; GFX6: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[SELECT7]](s32)
+    ; GFX6: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[UV6]], [[UV7]]
+    ; GFX6: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(sge), [[XOR1]](s32), [[C1]]
+    ; GFX6: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s1), [[C4]], [[C]]
+    ; GFX6: [[INT2:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.sffbh), [[UV7]](s32)
+    ; GFX6: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[INT2]](s32), [[C3]]
+    ; GFX6: [[SELECT4:%[0-9]+]]:_(s32) = G_SELECT [[ICMP4]](s1), [[INT2]], [[SELECT3]]
+    ; GFX6: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[SELECT4]], [[C2]]
+    ; GFX6: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[SUB2]](s32)
+    ; GFX6: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL1]](s64)
+    ; GFX6: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV8]](s32), [[C1]]
+    ; GFX6: [[SELECT5:%[0-9]+]]:_(s32) = G_SELECT [[ICMP5]](s1), [[C2]], [[C1]]
+    ; GFX6: [[OR1:%[0-9]+]]:_(s32) = G_OR [[UV9]], [[SELECT5]]
+    ; GFX6: [[SITOFP1:%[0-9]+]]:_(s32) = G_SITOFP [[OR1]](s32)
+    ; GFX6: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SUB2]]
+    ; GFX6: [[INT3:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[SITOFP1]](s32), [[SUB3]](s32)
+    ; GFX6: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[INT3]](s32)
     ; GFX6: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[FPTRUNC]](s16)
     ; GFX6: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[FPTRUNC1]](s16)
-    ; GFX6: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
-    ; GFX6: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C10]](s32)
-    ; GFX6: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL4]]
+    ; GFX6: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+    ; GFX6: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C5]](s32)
+    ; GFX6: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL2]]
     ; GFX6: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32)
     ; GFX6: $vgpr0 = COPY [[BITCAST]](<2 x s16>)
     ; GFX8-LABEL: name: test_sitofp_v2s64_to_v2s16
     ; GFX8: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
     ; GFX8: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
-    ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
-    ; GFX8: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[UV]], [[C]](s32)
     ; GFX8: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](s64)
-    ; GFX8: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
-    ; GFX8: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV2]], [[UV4]]
-    ; GFX8: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV3]], [[UV5]], [[UADDO1]]
-    ; GFX8: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
-    ; GFX8: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[MV]], [[ASHR]]
+    ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
     ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; GFX8: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; GFX8: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[XOR]](s64)
-    ; GFX8: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 190
-    ; GFX8: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C3]], [[CTLZ_ZERO_UNDEF]]
-    ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[XOR]](s64), [[C2]]
-    ; GFX8: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[SUB]], [[C1]]
-    ; GFX8: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807
-    ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[XOR]], [[CTLZ_ZERO_UNDEF]](s32)
-    ; GFX8: [[AND:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[C4]]
-    ; GFX8: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 1099511627775
-    ; GFX8: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C5]]
-    ; GFX8: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
-    ; GFX8: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C6]](s32)
-    ; GFX8: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
-    ; GFX8: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SELECT]], [[C7]](s32)
-    ; GFX8: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
-    ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[TRUNC]]
-    ; GFX8: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 549755813888
-    ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[AND1]](s64), [[C8]]
-    ; GFX8: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND1]](s64), [[C8]]
-    ; GFX8: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; GFX8: [[AND2:%[0-9]+]]:_(s32) = G_AND [[OR]], [[C9]]
-    ; GFX8: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[AND2]], [[C1]]
-    ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C9]], [[SELECT1]]
-    ; GFX8: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[OR]], [[SELECT2]]
-    ; GFX8: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[ADD]]
-    ; GFX8: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[ASHR]](s64), [[C2]]
-    ; GFX8: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s1), [[FNEG]], [[ADD]]
-    ; GFX8: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[SELECT3]](s32)
-    ; GFX8: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[C]](s32)
+    ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; GFX8: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; GFX8: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 33
+    ; GFX8: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[UV2]], [[UV3]]
+    ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sge), [[XOR]](s32), [[C1]]
+    ; GFX8: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C4]], [[C]]
+    ; GFX8: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.sffbh), [[UV3]](s32)
+    ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[INT]](s32), [[C3]]
+    ; GFX8: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[INT]], [[SELECT]]
+    ; GFX8: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SELECT1]], [[C2]]
+    ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[SUB]](s32)
+    ; GFX8: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL]](s64)
+    ; GFX8: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV4]](s32), [[C1]]
+    ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[C2]], [[C1]]
+    ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV5]], [[SELECT2]]
+    ; GFX8: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[OR]](s32)
+    ; GFX8: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SUB]]
+    ; GFX8: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[SITOFP]](s32), [[SUB1]](s32)
+    ; GFX8: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[INT1]](s32)
     ; GFX8: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
-    ; GFX8: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR1]](s64)
-    ; GFX8: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV6]], [[UV8]]
-    ; GFX8: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV7]], [[UV9]], [[UADDO3]]
-    ; GFX8: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
-    ; GFX8: [[XOR1:%[0-9]+]]:_(s64) = G_XOR [[MV1]], [[ASHR1]]
-    ; GFX8: [[CTLZ_ZERO_UNDEF1:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[XOR1]](s64)
-    ; GFX8: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C3]], [[CTLZ_ZERO_UNDEF1]]
-    ; GFX8: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[XOR1]](s64), [[C2]]
-    ; GFX8: [[SELECT4:%[0-9]+]]:_(s32) = G_SELECT [[ICMP4]](s1), [[SUB1]], [[C1]]
-    ; GFX8: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[XOR1]], [[CTLZ_ZERO_UNDEF1]](s32)
-    ; GFX8: [[AND3:%[0-9]+]]:_(s64) = G_AND [[SHL2]], [[C4]]
-    ; GFX8: [[AND4:%[0-9]+]]:_(s64) = G_AND [[AND3]], [[C5]]
-    ; GFX8: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND3]], [[C6]](s32)
-    ; GFX8: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[SELECT4]], [[C7]](s32)
-    ; GFX8: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR1]](s64)
-    ; GFX8: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[TRUNC1]]
-    ; GFX8: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[AND4]](s64), [[C8]]
-    ; GFX8: [[ICMP6:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND4]](s64), [[C8]]
-    ; GFX8: [[AND5:%[0-9]+]]:_(s32) = G_AND [[OR1]], [[C9]]
-    ; GFX8: [[SELECT5:%[0-9]+]]:_(s32) = G_SELECT [[ICMP6]](s1), [[AND5]], [[C1]]
-    ; GFX8: [[SELECT6:%[0-9]+]]:_(s32) = G_SELECT [[ICMP5]](s1), [[C9]], [[SELECT5]]
-    ; GFX8: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[OR1]], [[SELECT6]]
-    ; GFX8: [[FNEG1:%[0-9]+]]:_(s32) = G_FNEG [[ADD1]]
-    ; GFX8: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[ASHR1]](s64), [[C2]]
-    ; GFX8: [[SELECT7:%[0-9]+]]:_(s32) = G_SELECT [[ICMP7]](s1), [[FNEG1]], [[ADD1]]
-    ; GFX8: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[SELECT7]](s32)
+    ; GFX8: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[UV6]], [[UV7]]
+    ; GFX8: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(sge), [[XOR1]](s32), [[C1]]
+    ; GFX8: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s1), [[C4]], [[C]]
+    ; GFX8: [[INT2:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.sffbh), [[UV7]](s32)
+    ; GFX8: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[INT2]](s32), [[C3]]
+    ; GFX8: [[SELECT4:%[0-9]+]]:_(s32) = G_SELECT [[ICMP4]](s1), [[INT2]], [[SELECT3]]
+    ; GFX8: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[SELECT4]], [[C2]]
+    ; GFX8: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[SUB2]](s32)
+    ; GFX8: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL1]](s64)
+    ; GFX8: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV8]](s32), [[C1]]
+    ; GFX8: [[SELECT5:%[0-9]+]]:_(s32) = G_SELECT [[ICMP5]](s1), [[C2]], [[C1]]
+    ; GFX8: [[OR1:%[0-9]+]]:_(s32) = G_OR [[UV9]], [[SELECT5]]
+    ; GFX8: [[SITOFP1:%[0-9]+]]:_(s32) = G_SITOFP [[OR1]](s32)
+    ; GFX8: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SUB2]]
+    ; GFX8: [[INT3:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[SITOFP1]](s32), [[SUB3]](s32)
+    ; GFX8: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[INT3]](s32)
     ; GFX8: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[FPTRUNC]](s16)
     ; GFX8: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[FPTRUNC1]](s16)
-    ; GFX8: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
-    ; GFX8: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C10]](s32)
-    ; GFX8: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL4]]
+    ; GFX8: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+    ; GFX8: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C5]](s32)
+    ; GFX8: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL2]]
     ; GFX8: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32)
     ; GFX8: $vgpr0 = COPY [[BITCAST]](<2 x s16>)
     %0:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-uitofp.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-uitofp.mir
index 214900c18ed96..5a96b2feabb38 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-uitofp.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-uitofp.mir
@@ -73,62 +73,40 @@ body: |
 
     ; GFX6-LABEL: name: test_uitofp_s64_to_s32
     ; GFX6: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
-    ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; GFX6: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; GFX6: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[COPY]](s64)
-    ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 190
-    ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[CTLZ_ZERO_UNDEF]]
-    ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[COPY]](s64), [[C1]]
-    ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[SUB]], [[C]]
-    ; GFX6: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807
-    ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[CTLZ_ZERO_UNDEF]](s32)
-    ; GFX6: [[AND:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[C3]]
-    ; GFX6: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1099511627775
-    ; GFX6: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C4]]
-    ; GFX6: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
-    ; GFX6: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C5]](s32)
-    ; GFX6: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
-    ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SELECT]], [[C6]](s32)
-    ; GFX6: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
-    ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[TRUNC]]
-    ; GFX6: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 549755813888
-    ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[AND1]](s64), [[C7]]
-    ; GFX6: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND1]](s64), [[C7]]
-    ; GFX6: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; GFX6: [[AND2:%[0-9]+]]:_(s32) = G_AND [[OR]], [[C8]]
-    ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[AND2]], [[C]]
-    ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C8]], [[SELECT1]]
-    ; GFX6: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[OR]], [[SELECT2]]
-    ; GFX6: $vgpr0 = COPY [[ADD]](s32)
+    ; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+    ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+    ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; GFX6: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[UV1]](s32)
+    ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV1]](s32), [[C1]]
+    ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C]], [[CTLZ_ZERO_UNDEF]]
+    ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[SELECT]](s32)
+    ; GFX6: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL]](s64)
+    ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV2]](s32), [[C1]]
+    ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C2]], [[C1]]
+    ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV3]], [[SELECT1]]
+    ; GFX6: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[OR]](s32)
+    ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SELECT]]
+    ; GFX6: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[UITOFP]](s32), [[SUB]](s32)
+    ; GFX6: $vgpr0 = COPY [[INT]](s32)
     ; GFX8-LABEL: name: test_uitofp_s64_to_s32
     ; GFX8: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
-    ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; GFX8: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; GFX8: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[COPY]](s64)
-    ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 190
-    ; GFX8: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[CTLZ_ZERO_UNDEF]]
-    ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[COPY]](s64), [[C1]]
-    ; GFX8: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[SUB]], [[C]]
-    ; GFX8: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807
-    ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[CTLZ_ZERO_UNDEF]](s32)
-    ; GFX8: [[AND:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[C3]]
-    ; GFX8: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1099511627775
-    ; GFX8: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C4]]
-    ; GFX8: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
-    ; GFX8: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C5]](s32)
-    ; GFX8: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
-    ; GFX8: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SELECT]], [[C6]](s32)
-    ; GFX8: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
-    ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[TRUNC]]
-    ; GFX8: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 549755813888
-    ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[AND1]](s64), [[C7]]
-    ; GFX8: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND1]](s64), [[C7]]
-    ; GFX8: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; GFX8: [[AND2:%[0-9]+]]:_(s32) = G_AND [[OR]], [[C8]]
-    ; GFX8: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[AND2]], [[C]]
-    ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C8]], [[SELECT1]]
-    ; GFX8: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[OR]], [[SELECT2]]
-    ; GFX8: $vgpr0 = COPY [[ADD]](s32)
+    ; GFX8: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+    ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+    ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; GFX8: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[UV1]](s32)
+    ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV1]](s32), [[C1]]
+    ; GFX8: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C]], [[CTLZ_ZERO_UNDEF]]
+    ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[SELECT]](s32)
+    ; GFX8: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL]](s64)
+    ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV2]](s32), [[C1]]
+    ; GFX8: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C2]], [[C1]]
+    ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV3]], [[SELECT1]]
+    ; GFX8: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[OR]](s32)
+    ; GFX8: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SELECT]]
+    ; GFX8: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[UITOFP]](s32), [[SUB]](s32)
+    ; GFX8: $vgpr0 = COPY [[INT]](s32)
     %0:_(s64) = COPY $vgpr0_vgpr1
     %1:_(s32) = G_UITOFP %0
     $vgpr0 = COPY %1
@@ -143,18 +121,18 @@ body: |
     ; GFX6-LABEL: name: test_uitofp_s64_to_s64
     ; GFX6: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
     ; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+    ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
     ; GFX6: [[UITOFP:%[0-9]+]]:_(s64) = G_UITOFP [[UV1]](s32)
     ; GFX6: [[UITOFP1:%[0-9]+]]:_(s64) = G_UITOFP [[UV]](s32)
-    ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
     ; GFX6: [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[UITOFP]](s64), [[C]](s32)
     ; GFX6: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[INT]], [[UITOFP1]]
     ; GFX6: $vgpr0_vgpr1 = COPY [[FADD]](s64)
     ; GFX8-LABEL: name: test_uitofp_s64_to_s64
     ; GFX8: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
     ; GFX8: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+    ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
     ; GFX8: [[UITOFP:%[0-9]+]]:_(s64) = G_UITOFP [[UV1]](s32)
     ; GFX8: [[UITOFP1:%[0-9]+]]:_(s64) = G_UITOFP [[UV]](s32)
-    ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
     ; GFX8: [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[UITOFP]](s64), [[C]](s32)
     ; GFX8: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[INT]], [[UITOFP1]]
     ; GFX8: $vgpr0_vgpr1 = COPY [[FADD]](s64)
@@ -415,65 +393,43 @@ body: |
     ; GFX6: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8589934591
     ; GFX6: [[COPY1:%[0-9]+]]:_(s64) = COPY [[COPY]](s64)
     ; GFX6: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C]]
-    ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; GFX6: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; GFX6: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[AND]](s64)
-    ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 190
-    ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C3]], [[CTLZ_ZERO_UNDEF]]
-    ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[AND]](s64), [[C2]]
-    ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[SUB]], [[C1]]
-    ; GFX6: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807
-    ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[AND]], [[CTLZ_ZERO_UNDEF]](s32)
-    ; GFX6: [[AND1:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[C4]]
-    ; GFX6: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 1099511627775
-    ; GFX6: [[AND2:%[0-9]+]]:_(s64) = G_AND [[AND1]], [[C5]]
-    ; GFX6: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
-    ; GFX6: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[C6]](s32)
-    ; GFX6: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
-    ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SELECT]], [[C7]](s32)
-    ; GFX6: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
-    ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[TRUNC]]
-    ; GFX6: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 549755813888
-    ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[AND2]](s64), [[C8]]
-    ; GFX6: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND2]](s64), [[C8]]
-    ; GFX6: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; GFX6: [[AND3:%[0-9]+]]:_(s32) = G_AND [[OR]], [[C9]]
-    ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[AND3]], [[C1]]
-    ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C9]], [[SELECT1]]
-    ; GFX6: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[OR]], [[SELECT2]]
-    ; GFX6: $vgpr0 = COPY [[ADD]](s32)
+    ; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AND]](s64)
+    ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+    ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; GFX6: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[UV1]](s32)
+    ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV1]](s32), [[C2]]
+    ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C1]], [[CTLZ_ZERO_UNDEF]]
+    ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[AND]], [[SELECT]](s32)
+    ; GFX6: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL]](s64)
+    ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV2]](s32), [[C2]]
+    ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C3]], [[C2]]
+    ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV3]], [[SELECT1]]
+    ; GFX6: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[OR]](s32)
+    ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SELECT]]
+    ; GFX6: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[UITOFP]](s32), [[SUB]](s32)
+    ; GFX6: $vgpr0 = COPY [[INT]](s32)
     ; GFX8-LABEL: name: test_uitofp_s33_to_s32
     ; GFX8: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
     ; GFX8: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8589934591
     ; GFX8: [[COPY1:%[0-9]+]]:_(s64) = COPY [[COPY]](s64)
     ; GFX8: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C]]
-    ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; GFX8: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; GFX8: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[AND]](s64)
-    ; GFX8: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 190
-    ; GFX8: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C3]], [[CTLZ_ZERO_UNDEF]]
-    ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[AND]](s64), [[C2]]
-    ; GFX8: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[SUB]], [[C1]]
-    ; GFX8: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807
-    ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[AND]], [[CTLZ_ZERO_UNDEF]](s32)
-    ; GFX8: [[AND1:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[C4]]
-    ; GFX8: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 1099511627775
-    ; GFX8: [[AND2:%[0-9]+]]:_(s64) = G_AND [[AND1]], [[C5]]
-    ; GFX8: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
-    ; GFX8: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[C6]](s32)
-    ; GFX8: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
-    ; GFX8: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SELECT]], [[C7]](s32)
-    ; GFX8: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
-    ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[TRUNC]]
-    ; GFX8: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 549755813888
-    ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[AND2]](s64), [[C8]]
-    ; GFX8: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND2]](s64), [[C8]]
-    ; GFX8: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; GFX8: [[AND3:%[0-9]+]]:_(s32) = G_AND [[OR]], [[C9]]
-    ; GFX8: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[AND3]], [[C1]]
-    ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C9]], [[SELECT1]]
-    ; GFX8: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[OR]], [[SELECT2]]
-    ; GFX8: $vgpr0 = COPY [[ADD]](s32)
+    ; GFX8: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AND]](s64)
+    ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+    ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; GFX8: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; GFX8: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[UV1]](s32)
+    ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV1]](s32), [[C2]]
+    ; GFX8: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C1]], [[CTLZ_ZERO_UNDEF]]
+    ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[AND]], [[SELECT]](s32)
+    ; GFX8: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL]](s64)
+    ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV2]](s32), [[C2]]
+    ; GFX8: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C3]], [[C2]]
+    ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV3]], [[SELECT1]]
+    ; GFX8: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[OR]](s32)
+    ; GFX8: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SELECT]]
+    ; GFX8: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[UITOFP]](s32), [[SUB]](s32)
+    ; GFX8: $vgpr0 = COPY [[INT]](s32)
     %0:_(s64) = COPY $vgpr0_vgpr1
     %1:_(s33) = G_TRUNC %0
     %2:_(s32) = G_UITOFP %1
@@ -488,64 +444,42 @@ body: |
 
     ; GFX6-LABEL: name: test_uitofp_s64_to_s16
     ; GFX6: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
-    ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; GFX6: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; GFX6: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[COPY]](s64)
-    ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 190
-    ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[CTLZ_ZERO_UNDEF]]
-    ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[COPY]](s64), [[C1]]
-    ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[SUB]], [[C]]
-    ; GFX6: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807
-    ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[CTLZ_ZERO_UNDEF]](s32)
-    ; GFX6: [[AND:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[C3]]
-    ; GFX6: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1099511627775
-    ; GFX6: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C4]]
-    ; GFX6: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
-    ; GFX6: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C5]](s32)
-    ; GFX6: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
-    ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SELECT]], [[C6]](s32)
-    ; GFX6: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
-    ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[TRUNC]]
-    ; GFX6: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 549755813888
-    ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[AND1]](s64), [[C7]]
-    ; GFX6: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND1]](s64), [[C7]]
-    ; GFX6: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; GFX6: [[AND2:%[0-9]+]]:_(s32) = G_AND [[OR]], [[C8]]
-    ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[AND2]], [[C]]
-    ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C8]], [[SELECT1]]
-    ; GFX6: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[OR]], [[SELECT2]]
-    ; GFX6: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[ADD]](s32)
+    ; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+    ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+    ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; GFX6: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[UV1]](s32)
+    ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV1]](s32), [[C1]]
+    ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C]], [[CTLZ_ZERO_UNDEF]]
+    ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[SELECT]](s32)
+    ; GFX6: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL]](s64)
+    ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV2]](s32), [[C1]]
+    ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C2]], [[C1]]
+    ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV3]], [[SELECT1]]
+    ; GFX6: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[OR]](s32)
+    ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SELECT]]
+    ; GFX6: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[UITOFP]](s32), [[SUB]](s32)
+    ; GFX6: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[INT]](s32)
     ; GFX6: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FPTRUNC]](s16)
     ; GFX6: $vgpr0 = COPY [[ANYEXT]](s32)
     ; GFX8-LABEL: name: test_uitofp_s64_to_s16
     ; GFX8: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
-    ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; GFX8: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; GFX8: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[COPY]](s64)
-    ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 190
-    ; GFX8: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[CTLZ_ZERO_UNDEF]]
-    ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[COPY]](s64), [[C1]]
-    ; GFX8: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[SUB]], [[C]]
-    ; GFX8: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807
-    ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[CTLZ_ZERO_UNDEF]](s32)
-    ; GFX8: [[AND:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[C3]]
-    ; GFX8: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1099511627775
-    ; GFX8: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C4]]
-    ; GFX8: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
-    ; GFX8: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C5]](s32)
-    ; GFX8: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
-    ; GFX8: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SELECT]], [[C6]](s32)
-    ; GFX8: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
-    ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[TRUNC]]
-    ; GFX8: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 549755813888
-    ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[AND1]](s64), [[C7]]
-    ; GFX8: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND1]](s64), [[C7]]
-    ; GFX8: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; GFX8: [[AND2:%[0-9]+]]:_(s32) = G_AND [[OR]], [[C8]]
-    ; GFX8: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[AND2]], [[C]]
-    ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C8]], [[SELECT1]]
-    ; GFX8: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[OR]], [[SELECT2]]
-    ; GFX8: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[ADD]](s32)
+    ; GFX8: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+    ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+    ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; GFX8: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[UV1]](s32)
+    ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV1]](s32), [[C1]]
+    ; GFX8: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C]], [[CTLZ_ZERO_UNDEF]]
+    ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[SELECT]](s32)
+    ; GFX8: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL]](s64)
+    ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV2]](s32), [[C1]]
+    ; GFX8: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C2]], [[C1]]
+    ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV3]], [[SELECT1]]
+    ; GFX8: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[OR]](s32)
+    ; GFX8: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SELECT]]
+    ; GFX8: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[UITOFP]](s32), [[SUB]](s32)
+    ; GFX8: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[INT]](s32)
     ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FPTRUNC]](s16)
     ; GFX8: $vgpr0 = COPY [[ANYEXT]](s32)
     %0:_(s64) = COPY $vgpr0_vgpr1
@@ -563,111 +497,79 @@ body: |
     ; GFX6-LABEL: name: test_sitofp_v2s64_to_v2s16
     ; GFX6: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
     ; GFX6: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
-    ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; GFX6: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; GFX6: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[UV]](s64)
-    ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 190
-    ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[CTLZ_ZERO_UNDEF]]
-    ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV]](s64), [[C1]]
-    ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[SUB]], [[C]]
-    ; GFX6: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807
-    ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[CTLZ_ZERO_UNDEF]](s32)
-    ; GFX6: [[AND:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[C3]]
-    ; GFX6: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1099511627775
-    ; GFX6: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C4]]
-    ; GFX6: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
-    ; GFX6: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C5]](s32)
-    ; GFX6: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
-    ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SELECT]], [[C6]](s32)
-    ; GFX6: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
-    ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[TRUNC]]
-    ; GFX6: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 549755813888
-    ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[AND1]](s64), [[C7]]
-    ; GFX6: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND1]](s64), [[C7]]
-    ; GFX6: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; GFX6: [[AND2:%[0-9]+]]:_(s32) = G_AND [[OR]], [[C8]]
-    ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[AND2]], [[C]]
-    ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C8]], [[SELECT1]]
-    ; GFX6: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[OR]], [[SELECT2]]
-    ; GFX6: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[ADD]](s32)
-    ; GFX6: [[CTLZ_ZERO_UNDEF1:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[UV1]](s64)
-    ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[CTLZ_ZERO_UNDEF1]]
-    ; GFX6: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV1]](s64), [[C1]]
-    ; GFX6: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s1), [[SUB1]], [[C]]
-    ; GFX6: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[CTLZ_ZERO_UNDEF1]](s32)
-    ; GFX6: [[AND3:%[0-9]+]]:_(s64) = G_AND [[SHL2]], [[C3]]
-    ; GFX6: [[AND4:%[0-9]+]]:_(s64) = G_AND [[AND3]], [[C4]]
-    ; GFX6: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND3]], [[C5]](s32)
-    ; GFX6: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[SELECT3]], [[C6]](s32)
-    ; GFX6: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR1]](s64)
-    ; GFX6: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[TRUNC1]]
-    ; GFX6: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[AND4]](s64), [[C7]]
-    ; GFX6: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND4]](s64), [[C7]]
-    ; GFX6: [[AND5:%[0-9]+]]:_(s32) = G_AND [[OR1]], [[C8]]
-    ; GFX6: [[SELECT4:%[0-9]+]]:_(s32) = G_SELECT [[ICMP5]](s1), [[AND5]], [[C]]
-    ; GFX6: [[SELECT5:%[0-9]+]]:_(s32) = G_SELECT [[ICMP4]](s1), [[C8]], [[SELECT4]]
-    ; GFX6: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[OR1]], [[SELECT5]]
-    ; GFX6: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[ADD1]](s32)
+    ; GFX6: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](s64)
+    ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+    ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; GFX6: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[UV3]](s32)
+    ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV3]](s32), [[C1]]
+    ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C]], [[CTLZ_ZERO_UNDEF]]
+    ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[SELECT]](s32)
+    ; GFX6: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL]](s64)
+    ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV4]](s32), [[C1]]
+    ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C2]], [[C1]]
+    ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV5]], [[SELECT1]]
+    ; GFX6: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[OR]](s32)
+    ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SELECT]]
+    ; GFX6: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[UITOFP]](s32), [[SUB]](s32)
+    ; GFX6: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[INT]](s32)
+    ; GFX6: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
+    ; GFX6: [[CTLZ_ZERO_UNDEF1:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[UV7]](s32)
+    ; GFX6: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV7]](s32), [[C1]]
+    ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[C]], [[CTLZ_ZERO_UNDEF1]]
+    ; GFX6: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[SELECT2]](s32)
+    ; GFX6: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL1]](s64)
+    ; GFX6: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV8]](s32), [[C1]]
+    ; GFX6: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s1), [[C2]], [[C1]]
+    ; GFX6: [[OR1:%[0-9]+]]:_(s32) = G_OR [[UV9]], [[SELECT3]]
+    ; GFX6: [[UITOFP1:%[0-9]+]]:_(s32) = G_UITOFP [[OR1]](s32)
+    ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SELECT2]]
+    ; GFX6: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[UITOFP1]](s32), [[SUB1]](s32)
+    ; GFX6: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[INT1]](s32)
     ; GFX6: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[FPTRUNC]](s16)
     ; GFX6: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[FPTRUNC1]](s16)
-    ; GFX6: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
-    ; GFX6: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C9]](s32)
-    ; GFX6: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL4]]
+    ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+    ; GFX6: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C3]](s32)
+    ; GFX6: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL2]]
     ; GFX6: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32)
     ; GFX6: $vgpr0 = COPY [[BITCAST]](<2 x s16>)
     ; GFX8-LABEL: name: test_sitofp_v2s64_to_v2s16
     ; GFX8: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
     ; GFX8: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
-    ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; GFX8: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; GFX8: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[UV]](s64)
-    ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 190
-    ; GFX8: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[CTLZ_ZERO_UNDEF]]
-    ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV]](s64), [[C1]]
-    ; GFX8: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[SUB]], [[C]]
-    ; GFX8: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807
-    ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[CTLZ_ZERO_UNDEF]](s32)
-    ; GFX8: [[AND:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[C3]]
-    ; GFX8: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1099511627775
-    ; GFX8: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C4]]
-    ; GFX8: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
-    ; GFX8: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C5]](s32)
-    ; GFX8: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
-    ; GFX8: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SELECT]], [[C6]](s32)
-    ; GFX8: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
-    ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[TRUNC]]
-    ; GFX8: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 549755813888
-    ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[AND1]](s64), [[C7]]
-    ; GFX8: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND1]](s64), [[C7]]
-    ; GFX8: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; GFX8: [[AND2:%[0-9]+]]:_(s32) = G_AND [[OR]], [[C8]]
-    ; GFX8: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[AND2]], [[C]]
-    ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C8]], [[SELECT1]]
-    ; GFX8: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[OR]], [[SELECT2]]
-    ; GFX8: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[ADD]](s32)
-    ; GFX8: [[CTLZ_ZERO_UNDEF1:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[UV1]](s64)
-    ; GFX8: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[CTLZ_ZERO_UNDEF1]]
-    ; GFX8: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV1]](s64), [[C1]]
-    ; GFX8: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s1), [[SUB1]], [[C]]
-    ; GFX8: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[CTLZ_ZERO_UNDEF1]](s32)
-    ; GFX8: [[AND3:%[0-9]+]]:_(s64) = G_AND [[SHL2]], [[C3]]
-    ; GFX8: [[AND4:%[0-9]+]]:_(s64) = G_AND [[AND3]], [[C4]]
-    ; GFX8: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND3]], [[C5]](s32)
-    ; GFX8: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[SELECT3]], [[C6]](s32)
-    ; GFX8: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR1]](s64)
-    ; GFX8: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[TRUNC1]]
-    ; GFX8: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[AND4]](s64), [[C7]]
-    ; GFX8: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND4]](s64), [[C7]]
-    ; GFX8: [[AND5:%[0-9]+]]:_(s32) = G_AND [[OR1]], [[C8]]
-    ; GFX8: [[SELECT4:%[0-9]+]]:_(s32) = G_SELECT [[ICMP5]](s1), [[AND5]], [[C]]
-    ; GFX8: [[SELECT5:%[0-9]+]]:_(s32) = G_SELECT [[ICMP4]](s1), [[C8]], [[SELECT4]]
-    ; GFX8: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[OR1]], [[SELECT5]]
-    ; GFX8: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[ADD1]](s32)
+    ; GFX8: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](s64)
+    ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+    ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; GFX8: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[UV3]](s32)
+    ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV3]](s32), [[C1]]
+    ; GFX8: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C]], [[CTLZ_ZERO_UNDEF]]
+    ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[SELECT]](s32)
+    ; GFX8: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL]](s64)
+    ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV4]](s32), [[C1]]
+    ; GFX8: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C2]], [[C1]]
+    ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV5]], [[SELECT1]]
+    ; GFX8: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[OR]](s32)
+    ; GFX8: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SELECT]]
+    ; GFX8: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[UITOFP]](s32), [[SUB]](s32)
+    ; GFX8: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[INT]](s32)
+    ; GFX8: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
+    ; GFX8: [[CTLZ_ZERO_UNDEF1:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[UV7]](s32)
+    ; GFX8: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV7]](s32), [[C1]]
+    ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[C]], [[CTLZ_ZERO_UNDEF1]]
+    ; GFX8: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[SELECT2]](s32)
+    ; GFX8: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL1]](s64)
+    ; GFX8: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV8]](s32), [[C1]]
+    ; GFX8: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s1), [[C2]], [[C1]]
+    ; GFX8: [[OR1:%[0-9]+]]:_(s32) = G_OR [[UV9]], [[SELECT3]]
+    ; GFX8: [[UITOFP1:%[0-9]+]]:_(s32) = G_UITOFP [[OR1]](s32)
+    ; GFX8: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SELECT2]]
+    ; GFX8: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[UITOFP1]](s32), [[SUB1]](s32)
+    ; GFX8: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[INT1]](s32)
     ; GFX8: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[FPTRUNC]](s16)
     ; GFX8: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[FPTRUNC1]](s16)
-    ; GFX8: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
-    ; GFX8: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C9]](s32)
-    ; GFX8: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL4]]
+    ; GFX8: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+    ; GFX8: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C3]](s32)
+    ; GFX8: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL2]]
     ; GFX8: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32)
     ; GFX8: $vgpr0 = COPY [[BITCAST]](<2 x s16>)
     %0:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3

diff  --git a/llvm/test/CodeGen/AMDGPU/sint_to_fp.i64.ll b/llvm/test/CodeGen/AMDGPU/sint_to_fp.i64.ll
index 279be8c658e9b..06c678028a1c9 100644
--- a/llvm/test/CodeGen/AMDGPU/sint_to_fp.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/sint_to_fp.i64.ll
@@ -7,89 +7,52 @@
 define amdgpu_kernel void @s_sint_to_fp_i64_to_f16(half addrspace(1)* %out, i64 %in) #0 {
 ; GFX6-LABEL: s_sint_to_fp_i64_to_f16:
 ; GFX6:       ; %bb.0:
-; GFX6-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GFX6-NEXT:    s_mov_b32 s3, 0xf000
-; GFX6-NEXT:    s_mov_b32 s2, -1
-; GFX6-NEXT:    s_mov_b32 s8, 0
+; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GFX6-NEXT:    s_mov_b32 s7, 0xf000
+; GFX6-NEXT:    s_mov_b32 s6, -1
 ; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX6-NEXT:    s_ashr_i32 s10, s7, 31
-; GFX6-NEXT:    s_movk_i32 s9, 0x80
-; GFX6-NEXT:    s_mov_b32 s0, s4
-; GFX6-NEXT:    s_mov_b32 s1, s5
-; GFX6-NEXT:    s_mov_b32 s11, s10
-; GFX6-NEXT:    s_add_u32 s4, s6, s10
-; GFX6-NEXT:    s_addc_u32 s5, s7, s10
-; GFX6-NEXT:    s_xor_b64 s[4:5], s[4:5], s[10:11]
-; GFX6-NEXT:    s_flbit_i32_b32 s6, s4
-; GFX6-NEXT:    s_flbit_i32_b32 s7, s5
-; GFX6-NEXT:    s_add_i32 s6, s6, 32
-; GFX6-NEXT:    s_and_b32 s10, 1, s10
-; GFX6-NEXT:    v_mov_b32_e32 v0, s7
-; GFX6-NEXT:    v_mov_b32_e32 v1, s6
-; GFX6-NEXT:    v_cmp_eq_u32_e64 vcc, s5, 0
+; GFX6-NEXT:    s_mov_b32 s4, s0
+; GFX6-NEXT:    s_mov_b32 s5, s1
+; GFX6-NEXT:    s_xor_b32 s0, s2, s3
+; GFX6-NEXT:    s_flbit_i32 s8, s3
+; GFX6-NEXT:    v_cmp_gt_i32_e64 s[0:1], s0, -1
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, 32, 33, s[0:1]
+; GFX6-NEXT:    v_mov_b32_e32 v1, s8
+; GFX6-NEXT:    v_cmp_ne_u32_e64 vcc, s8, -1
 ; GFX6-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; GFX6-NEXT:    v_lshl_b64 v[0:1], s[4:5], v2
-; GFX6-NEXT:    v_sub_i32_e32 v4, vcc, 0xbe, v2
-; GFX6-NEXT:    v_and_b32_e32 v3, 0xff, v1
-; GFX6-NEXT:    v_mov_b32_e32 v2, v0
-; GFX6-NEXT:    v_cmp_ne_u64_e64 vcc, s[4:5], 0
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v4, vcc
-; GFX6-NEXT:    v_bfe_u32 v1, v1, 8, 23
-; GFX6-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
-; GFX6-NEXT:    v_or_b32_e32 v0, v0, v1
-; GFX6-NEXT:    v_and_b32_e32 v1, 1, v0
-; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[8:9], v[2:3]
-; GFX6-NEXT:    s_mov_b32 s8, 1
-; GFX6-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
-; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[2:3]
-; GFX6-NEXT:    v_cndmask_b32_e32 v1, 1, v1, vcc
-; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v1, v0
-; GFX6-NEXT:    v_cmp_eq_u32_e64 s[4:5], s10, 1
-; GFX6-NEXT:    v_cndmask_b32_e64 v0, v0, -v0, s[4:5]
+; GFX6-NEXT:    v_add_i32_e32 v0, vcc, -1, v2
+; GFX6-NEXT:    v_lshl_b64 v[0:1], s[2:3], v0
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX6-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX6-NEXT:    v_cvt_f32_i32_e32 v0, v0
+; GFX6-NEXT:    v_sub_i32_e32 v1, vcc, 33, v2
+; GFX6-NEXT:    v_ldexp_f32_e32 v0, v0, v1
 ; GFX6-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GFX6-NEXT:    buffer_store_short v0, off, s[0:3], 0
+; GFX6-NEXT:    buffer_store_short v0, off, s[4:7], 0
 ; GFX6-NEXT:    s_endpgm
 ;
 ; GFX8-LABEL: s_sint_to_fp_i64_to_f16:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; GFX8-NEXT:    s_mov_b32 s4, 0
-; GFX8-NEXT:    s_movk_i32 s5, 0x80
-; GFX8-NEXT:    v_mov_b32_e32 v0, 1
-; GFX8-NEXT:    v_mov_b32_e32 v1, s5
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX8-NEXT:    s_ashr_i32 s6, s3, 31
-; GFX8-NEXT:    s_add_u32 s2, s2, s6
-; GFX8-NEXT:    s_mov_b32 s7, s6
-; GFX8-NEXT:    s_addc_u32 s3, s3, s6
-; GFX8-NEXT:    s_xor_b64 s[2:3], s[2:3], s[6:7]
-; GFX8-NEXT:    s_flbit_i32_b32 s7, s2
-; GFX8-NEXT:    s_add_i32 s7, s7, 32
-; GFX8-NEXT:    s_flbit_i32_b32 s8, s3
-; GFX8-NEXT:    s_cmp_eq_u32 s3, 0
-; GFX8-NEXT:    s_cselect_b32 s7, s7, s8
-; GFX8-NEXT:    s_sub_i32 s8, 0xbe, s7
-; GFX8-NEXT:    s_cmp_lg_u64 s[2:3], 0
-; GFX8-NEXT:    s_cselect_b32 s8, s8, 0
-; GFX8-NEXT:    s_lshl_b64 s[2:3], s[2:3], s7
-; GFX8-NEXT:    s_bfe_u32 s7, s3, 0x170008
-; GFX8-NEXT:    s_lshl_b32 s8, s8, 23
-; GFX8-NEXT:    s_or_b32 s7, s8, s7
-; GFX8-NEXT:    s_and_b32 s3, s3, 0xff
-; GFX8-NEXT:    s_and_b32 s8, s7, 1
-; GFX8-NEXT:    v_cmp_lt_u64_e32 vcc, s[2:3], v[0:1]
-; GFX8-NEXT:    s_cmp_eq_u64 s[2:3], s[4:5]
-; GFX8-NEXT:    s_cselect_b32 s2, s8, 0
-; GFX8-NEXT:    s_cmp_lg_u64 vcc, 0
-; GFX8-NEXT:    s_cselect_b32 s2, s2, 1
-; GFX8-NEXT:    s_add_i32 s2, s7, s2
-; GFX8-NEXT:    s_and_b32 s3, 1, s6
-; GFX8-NEXT:    v_mov_b32_e32 v0, s2
-; GFX8-NEXT:    v_cmp_eq_u32_e64 s[2:3], s3, 1
-; GFX8-NEXT:    v_cndmask_b32_e64 v0, v0, -v0, s[2:3]
+; GFX8-NEXT:    s_xor_b32 s4, s2, s3
+; GFX8-NEXT:    s_cmp_gt_i32 s4, -1
+; GFX8-NEXT:    s_flbit_i32 s5, s3
+; GFX8-NEXT:    s_cselect_b32 s4, 33, 32
+; GFX8-NEXT:    s_cmp_lg_u32 s5, -1
+; GFX8-NEXT:    s_cselect_b32 s6, s5, s4
+; GFX8-NEXT:    s_add_i32 s4, s6, -1
+; GFX8-NEXT:    s_lshl_b64 s[2:3], s[2:3], s4
+; GFX8-NEXT:    v_cmp_ne_u32_e64 s[4:5], s2, 0
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX8-NEXT:    v_or_b32_e32 v0, s3, v0
+; GFX8-NEXT:    v_cvt_f32_i32_e32 v0, v0
+; GFX8-NEXT:    s_sub_i32 s2, 33, s6
+; GFX8-NEXT:    v_mov_b32_e32 v1, s1
+; GFX8-NEXT:    v_ldexp_f32 v0, v0, s2
 ; GFX8-NEXT:    v_cvt_f16_f32_e32 v2, v0
 ; GFX8-NEXT:    v_mov_b32_e32 v0, s0
-; GFX8-NEXT:    v_mov_b32_e32 v1, s1
 ; GFX8-NEXT:    flat_store_short v[0:1], v2
 ; GFX8-NEXT:    s_endpgm
   %result = sitofp i64 %in to half
@@ -105,44 +68,26 @@ define amdgpu_kernel void @v_sint_to_fp_i64_to_f16(half addrspace(1)* %out, i64
 ; GFX6-NEXT:    s_mov_b32 s6, 0
 ; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
 ; GFX6-NEXT:    v_mov_b32_e32 v2, 0
-; GFX6-NEXT:    s_mov_b32 s8, 1
 ; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX6-NEXT:    s_mov_b64 s[4:5], s[2:3]
 ; GFX6-NEXT:    buffer_load_dwordx2 v[3:4], v[1:2], s[4:7], 0 addr64
 ; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 1, v0
 ; GFX6-NEXT:    s_mov_b64 s[2:3], s[6:7]
-; GFX6-NEXT:    s_movk_i32 s7, 0x80
-; GFX6-NEXT:    s_movk_i32 s4, 0xbe
-; GFX6-NEXT:    s_mov_b32 s9, s7
 ; GFX6-NEXT:    s_waitcnt vmcnt(0)
-; GFX6-NEXT:    v_ashrrev_i32_e32 v0, 31, v4
-; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v3, v0
-; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, v4, v0, vcc
-; GFX6-NEXT:    v_and_b32_e32 v9, 1, v0
-; GFX6-NEXT:    v_xor_b32_e32 v4, v4, v0
-; GFX6-NEXT:    v_xor_b32_e32 v3, v3, v0
-; GFX6-NEXT:    v_ffbh_u32_e32 v0, v3
-; GFX6-NEXT:    v_ffbh_u32_e32 v5, v4
-; GFX6-NEXT:    v_add_i32_e32 v0, vcc, 32, v0
-; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v4
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, v5, v0, vcc
-; GFX6-NEXT:    v_lshl_b64 v[5:6], v[3:4], v0
-; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
-; GFX6-NEXT:    v_and_b32_e32 v8, 0xff, v6
-; GFX6-NEXT:    v_mov_b32_e32 v7, v5
-; GFX6-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[3:4]
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; GFX6-NEXT:    v_bfe_u32 v3, v6, 8, 23
-; GFX6-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
-; GFX6-NEXT:    v_or_b32_e32 v0, v0, v3
-; GFX6-NEXT:    v_and_b32_e32 v3, 1, v0
-; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[6:7], v[7:8]
-; GFX6-NEXT:    v_cndmask_b32_e32 v3, 0, v3, vcc
-; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[7:8]
-; GFX6-NEXT:    v_cndmask_b32_e32 v3, 1, v3, vcc
-; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v3
-; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v9
-; GFX6-NEXT:    v_cndmask_b32_e64 v0, v0, -v0, vcc
+; GFX6-NEXT:    v_xor_b32_e32 v0, v3, v4
+; GFX6-NEXT:    v_ffbh_i32_e32 v5, v4
+; GFX6-NEXT:    v_cmp_lt_i32_e32 vcc, -1, v0
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, 32, 33, vcc
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, -1, v5
+; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v5, vcc
+; GFX6-NEXT:    v_add_i32_e32 v5, vcc, -1, v0
+; GFX6-NEXT:    v_lshl_b64 v[3:4], v[3:4], v5
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v3
+; GFX6-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
+; GFX6-NEXT:    v_or_b32_e32 v3, v4, v3
+; GFX6-NEXT:    v_cvt_f32_i32_e32 v3, v3
+; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, 33, v0
+; GFX6-NEXT:    v_ldexp_f32_e32 v0, v3, v0
 ; GFX6-NEXT:    v_cvt_f16_f32_e32 v0, v0
 ; GFX6-NEXT:    buffer_store_short v0, v[1:2], s[0:3], 0 addr64
 ; GFX6-NEXT:    s_endpgm
@@ -151,52 +96,33 @@ define amdgpu_kernel void @v_sint_to_fp_i64_to_f16(half addrspace(1)* %out, i64
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
-; GFX8-NEXT:    v_mov_b32_e32 v4, 0
-; GFX8-NEXT:    v_lshlrev_b32_e32 v5, 1, v0
-; GFX8-NEXT:    s_movk_i32 s6, 0xbe
+; GFX8-NEXT:    v_mov_b32_e32 v3, 0
+; GFX8-NEXT:    v_lshlrev_b32_e32 v0, 1, v0
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX8-NEXT:    v_mov_b32_e32 v2, s3
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
-; GFX8-NEXT:    v_addc_u32_e32 v2, vcc, v2, v4, vcc
+; GFX8-NEXT:    v_addc_u32_e32 v2, vcc, v2, v3, vcc
 ; GFX8-NEXT:    flat_load_dwordx2 v[1:2], v[1:2]
-; GFX8-NEXT:    s_mov_b32 s2, 0
-; GFX8-NEXT:    s_movk_i32 s3, 0x80
-; GFX8-NEXT:    s_mov_b32 s4, 1
-; GFX8-NEXT:    s_mov_b32 s5, s3
-; GFX8-NEXT:    v_mov_b32_e32 v6, s1
 ; GFX8-NEXT:    s_waitcnt vmcnt(0)
-; GFX8-NEXT:    v_ashrrev_i32_e32 v0, 31, v2
-; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v1, v0
-; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, v2, v0, vcc
-; GFX8-NEXT:    v_and_b32_e32 v7, 1, v0
-; GFX8-NEXT:    v_xor_b32_e32 v1, v1, v0
-; GFX8-NEXT:    v_xor_b32_e32 v0, v3, v0
-; GFX8-NEXT:    v_ffbh_u32_e32 v2, v0
-; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 32, v2
-; GFX8-NEXT:    v_ffbh_u32_e32 v3, v1
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
-; GFX8-NEXT:    v_cndmask_b32_e32 v8, v3, v2, vcc
-; GFX8-NEXT:    v_lshlrev_b64 v[2:3], v8, v[0:1]
-; GFX8-NEXT:    v_sub_u32_e32 v8, vcc, s6, v8
-; GFX8-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[0:1]
-; GFX8-NEXT:    v_and_b32_e32 v1, 0xff, v3
-; GFX8-NEXT:    v_cndmask_b32_e32 v8, 0, v8, vcc
-; GFX8-NEXT:    v_mov_b32_e32 v0, v2
-; GFX8-NEXT:    v_lshlrev_b32_e32 v2, 23, v8
-; GFX8-NEXT:    v_bfe_u32 v3, v3, 8, 23
-; GFX8-NEXT:    v_or_b32_e32 v2, v2, v3
-; GFX8-NEXT:    v_cmp_eq_u64_e32 vcc, s[2:3], v[0:1]
-; GFX8-NEXT:    v_and_b32_e32 v3, 1, v2
-; GFX8-NEXT:    v_cndmask_b32_e32 v3, 0, v3, vcc
-; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, s[4:5], v[0:1]
-; GFX8-NEXT:    v_cndmask_b32_e32 v0, 1, v3, vcc
-; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v2, v0
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v7
-; GFX8-NEXT:    v_cndmask_b32_e64 v0, v0, -v0, vcc
-; GFX8-NEXT:    v_cvt_f16_f32_e32 v2, v0
-; GFX8-NEXT:    v_add_u32_e32 v0, vcc, s0, v5
-; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, v6, v4, vcc
-; GFX8-NEXT:    flat_store_short v[0:1], v2
+; GFX8-NEXT:    v_xor_b32_e32 v4, v1, v2
+; GFX8-NEXT:    v_cmp_lt_i32_e32 vcc, -1, v4
+; GFX8-NEXT:    v_ffbh_i32_e32 v5, v2
+; GFX8-NEXT:    v_cndmask_b32_e64 v4, 32, 33, vcc
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, -1, v5
+; GFX8-NEXT:    v_cndmask_b32_e32 v4, v4, v5, vcc
+; GFX8-NEXT:    v_add_u32_e32 v5, vcc, -1, v4
+; GFX8-NEXT:    v_lshlrev_b64 v[1:2], v5, v[1:2]
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v1
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX8-NEXT:    v_or_b32_e32 v1, v2, v1
+; GFX8-NEXT:    v_cvt_f32_i32_e32 v1, v1
+; GFX8-NEXT:    v_sub_u32_e32 v4, vcc, 33, v4
+; GFX8-NEXT:    v_mov_b32_e32 v2, s1
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; GFX8-NEXT:    v_ldexp_f32 v1, v1, v4
+; GFX8-NEXT:    v_cvt_f16_f32_e32 v4, v1
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, v2, v3, vcc
+; GFX8-NEXT:    flat_store_short v[0:1], v4
 ; GFX8-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
@@ -210,87 +136,50 @@ define amdgpu_kernel void @v_sint_to_fp_i64_to_f16(half addrspace(1)* %out, i64
 define amdgpu_kernel void @s_sint_to_fp_i64_to_f32(float addrspace(1)* %out, i64 %in) #0 {
 ; GFX6-LABEL: s_sint_to_fp_i64_to_f32:
 ; GFX6:       ; %bb.0:
-; GFX6-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GFX6-NEXT:    s_mov_b32 s3, 0xf000
-; GFX6-NEXT:    s_mov_b32 s2, -1
-; GFX6-NEXT:    s_mov_b32 s8, 0
+; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GFX6-NEXT:    s_mov_b32 s7, 0xf000
+; GFX6-NEXT:    s_mov_b32 s6, -1
 ; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX6-NEXT:    s_ashr_i32 s10, s7, 31
-; GFX6-NEXT:    s_movk_i32 s9, 0x80
-; GFX6-NEXT:    s_mov_b32 s0, s4
-; GFX6-NEXT:    s_mov_b32 s1, s5
-; GFX6-NEXT:    s_mov_b32 s11, s10
-; GFX6-NEXT:    s_add_u32 s4, s6, s10
-; GFX6-NEXT:    s_addc_u32 s5, s7, s10
-; GFX6-NEXT:    s_xor_b64 s[4:5], s[4:5], s[10:11]
-; GFX6-NEXT:    s_flbit_i32_b32 s6, s4
-; GFX6-NEXT:    s_flbit_i32_b32 s7, s5
-; GFX6-NEXT:    s_add_i32 s6, s6, 32
-; GFX6-NEXT:    s_and_b32 s10, 1, s10
-; GFX6-NEXT:    v_mov_b32_e32 v0, s7
-; GFX6-NEXT:    v_mov_b32_e32 v1, s6
-; GFX6-NEXT:    v_cmp_eq_u32_e64 vcc, s5, 0
+; GFX6-NEXT:    s_mov_b32 s4, s0
+; GFX6-NEXT:    s_mov_b32 s5, s1
+; GFX6-NEXT:    s_xor_b32 s0, s2, s3
+; GFX6-NEXT:    s_flbit_i32 s8, s3
+; GFX6-NEXT:    v_cmp_gt_i32_e64 s[0:1], s0, -1
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, 32, 33, s[0:1]
+; GFX6-NEXT:    v_mov_b32_e32 v1, s8
+; GFX6-NEXT:    v_cmp_ne_u32_e64 vcc, s8, -1
 ; GFX6-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; GFX6-NEXT:    v_lshl_b64 v[0:1], s[4:5], v2
-; GFX6-NEXT:    v_sub_i32_e32 v4, vcc, 0xbe, v2
-; GFX6-NEXT:    v_and_b32_e32 v3, 0xff, v1
-; GFX6-NEXT:    v_mov_b32_e32 v2, v0
-; GFX6-NEXT:    v_cmp_ne_u64_e64 vcc, s[4:5], 0
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v4, vcc
-; GFX6-NEXT:    v_bfe_u32 v1, v1, 8, 23
-; GFX6-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
-; GFX6-NEXT:    v_or_b32_e32 v0, v0, v1
-; GFX6-NEXT:    v_and_b32_e32 v1, 1, v0
-; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[8:9], v[2:3]
-; GFX6-NEXT:    s_mov_b32 s8, 1
-; GFX6-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
-; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[2:3]
-; GFX6-NEXT:    v_cndmask_b32_e32 v1, 1, v1, vcc
-; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v1, v0
-; GFX6-NEXT:    v_cmp_eq_u32_e64 s[4:5], s10, 1
-; GFX6-NEXT:    v_cndmask_b32_e64 v0, v0, -v0, s[4:5]
-; GFX6-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; GFX6-NEXT:    v_add_i32_e32 v0, vcc, -1, v2
+; GFX6-NEXT:    v_lshl_b64 v[0:1], s[2:3], v0
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX6-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX6-NEXT:    v_cvt_f32_i32_e32 v0, v0
+; GFX6-NEXT:    v_sub_i32_e32 v1, vcc, 33, v2
+; GFX6-NEXT:    v_ldexp_f32_e32 v0, v0, v1
+; GFX6-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; GFX6-NEXT:    s_endpgm
 ;
 ; GFX8-LABEL: s_sint_to_fp_i64_to_f32:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; GFX8-NEXT:    s_mov_b32 s4, 0
-; GFX8-NEXT:    s_movk_i32 s5, 0x80
-; GFX8-NEXT:    v_mov_b32_e32 v2, 1
-; GFX8-NEXT:    v_mov_b32_e32 v3, s5
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    s_xor_b32 s4, s2, s3
+; GFX8-NEXT:    s_cmp_gt_i32 s4, -1
+; GFX8-NEXT:    s_flbit_i32 s5, s3
+; GFX8-NEXT:    s_cselect_b32 s4, 33, 32
+; GFX8-NEXT:    s_cmp_lg_u32 s5, -1
+; GFX8-NEXT:    s_cselect_b32 s6, s5, s4
+; GFX8-NEXT:    s_add_i32 s4, s6, -1
+; GFX8-NEXT:    s_lshl_b64 s[2:3], s[2:3], s4
+; GFX8-NEXT:    v_cmp_ne_u32_e64 s[4:5], s2, 0
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX8-NEXT:    v_or_b32_e32 v0, s3, v0
+; GFX8-NEXT:    v_cvt_f32_i32_e32 v2, v0
 ; GFX8-NEXT:    v_mov_b32_e32 v0, s0
-; GFX8-NEXT:    s_ashr_i32 s0, s3, 31
-; GFX8-NEXT:    s_add_u32 s2, s2, s0
+; GFX8-NEXT:    s_sub_i32 s0, 33, s6
 ; GFX8-NEXT:    v_mov_b32_e32 v1, s1
-; GFX8-NEXT:    s_mov_b32 s1, s0
-; GFX8-NEXT:    s_addc_u32 s3, s3, s0
-; GFX8-NEXT:    s_xor_b64 s[2:3], s[2:3], s[0:1]
-; GFX8-NEXT:    s_flbit_i32_b32 s1, s2
-; GFX8-NEXT:    s_add_i32 s1, s1, 32
-; GFX8-NEXT:    s_flbit_i32_b32 s6, s3
-; GFX8-NEXT:    s_cmp_eq_u32 s3, 0
-; GFX8-NEXT:    s_cselect_b32 s1, s1, s6
-; GFX8-NEXT:    s_sub_i32 s6, 0xbe, s1
-; GFX8-NEXT:    s_cmp_lg_u64 s[2:3], 0
-; GFX8-NEXT:    s_cselect_b32 s6, s6, 0
-; GFX8-NEXT:    s_lshl_b64 s[2:3], s[2:3], s1
-; GFX8-NEXT:    s_bfe_u32 s1, s3, 0x170008
-; GFX8-NEXT:    s_lshl_b32 s6, s6, 23
-; GFX8-NEXT:    s_or_b32 s1, s6, s1
-; GFX8-NEXT:    s_and_b32 s3, s3, 0xff
-; GFX8-NEXT:    s_and_b32 s6, s1, 1
-; GFX8-NEXT:    v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
-; GFX8-NEXT:    s_cmp_eq_u64 s[2:3], s[4:5]
-; GFX8-NEXT:    s_cselect_b32 s2, s6, 0
-; GFX8-NEXT:    s_cmp_lg_u64 vcc, 0
-; GFX8-NEXT:    s_cselect_b32 s2, s2, 1
-; GFX8-NEXT:    s_add_i32 s1, s1, s2
-; GFX8-NEXT:    s_and_b32 s0, 1, s0
-; GFX8-NEXT:    v_mov_b32_e32 v2, s1
-; GFX8-NEXT:    v_cmp_eq_u32_e64 s[0:1], s0, 1
-; GFX8-NEXT:    v_cndmask_b32_e64 v2, v2, -v2, s[0:1]
+; GFX8-NEXT:    v_ldexp_f32 v2, v2, s0
 ; GFX8-NEXT:    flat_store_dword v[0:1], v2
 ; GFX8-NEXT:    s_endpgm
   %result = sitofp i64 %in to float
@@ -310,39 +199,22 @@ define amdgpu_kernel void @v_sint_to_fp_i64_to_f32(float addrspace(1)* %out, i64
 ; GFX6-NEXT:    s_mov_b64 s[4:5], s[2:3]
 ; GFX6-NEXT:    buffer_load_dwordx2 v[3:4], v[1:2], s[4:7], 0 addr64
 ; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
-; GFX6-NEXT:    s_movk_i32 s4, 0xbe
 ; GFX6-NEXT:    s_mov_b64 s[2:3], s[6:7]
-; GFX6-NEXT:    s_movk_i32 s7, 0x80
 ; GFX6-NEXT:    s_waitcnt vmcnt(0)
-; GFX6-NEXT:    v_ashrrev_i32_e32 v0, 31, v4
-; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v3, v0
-; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, v4, v0, vcc
-; GFX6-NEXT:    v_and_b32_e32 v9, 1, v0
-; GFX6-NEXT:    v_xor_b32_e32 v4, v4, v0
-; GFX6-NEXT:    v_xor_b32_e32 v3, v3, v0
-; GFX6-NEXT:    v_ffbh_u32_e32 v0, v3
-; GFX6-NEXT:    v_ffbh_u32_e32 v5, v4
-; GFX6-NEXT:    v_add_i32_e32 v0, vcc, 32, v0
-; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v4
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, v5, v0, vcc
-; GFX6-NEXT:    v_lshl_b64 v[5:6], v[3:4], v0
-; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
-; GFX6-NEXT:    v_and_b32_e32 v8, 0xff, v6
-; GFX6-NEXT:    v_mov_b32_e32 v7, v5
-; GFX6-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[3:4]
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; GFX6-NEXT:    v_bfe_u32 v3, v6, 8, 23
-; GFX6-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
-; GFX6-NEXT:    v_or_b32_e32 v0, v0, v3
-; GFX6-NEXT:    v_and_b32_e32 v3, 1, v0
-; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[6:7], v[7:8]
-; GFX6-NEXT:    s_mov_b32 s6, 1
-; GFX6-NEXT:    v_cndmask_b32_e32 v3, 0, v3, vcc
-; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[6:7], v[7:8]
-; GFX6-NEXT:    v_cndmask_b32_e32 v3, 1, v3, vcc
-; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v3
-; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v9
-; GFX6-NEXT:    v_cndmask_b32_e64 v0, v0, -v0, vcc
+; GFX6-NEXT:    v_xor_b32_e32 v0, v3, v4
+; GFX6-NEXT:    v_ffbh_i32_e32 v5, v4
+; GFX6-NEXT:    v_cmp_lt_i32_e32 vcc, -1, v0
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, 32, 33, vcc
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, -1, v5
+; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v5, vcc
+; GFX6-NEXT:    v_add_i32_e32 v5, vcc, -1, v0
+; GFX6-NEXT:    v_lshl_b64 v[3:4], v[3:4], v5
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v3
+; GFX6-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
+; GFX6-NEXT:    v_or_b32_e32 v3, v4, v3
+; GFX6-NEXT:    v_cvt_f32_i32_e32 v3, v3
+; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, 33, v0
+; GFX6-NEXT:    v_ldexp_f32_e32 v0, v3, v0
 ; GFX6-NEXT:    buffer_store_dword v0, v[1:2], s[0:3], 0 addr64
 ; GFX6-NEXT:    s_endpgm
 ;
@@ -350,50 +222,32 @@ define amdgpu_kernel void @v_sint_to_fp_i64_to_f32(float addrspace(1)* %out, i64
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
-; GFX8-NEXT:    v_mov_b32_e32 v4, 0
-; GFX8-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
-; GFX8-NEXT:    s_movk_i32 s4, 0xbe
+; GFX8-NEXT:    v_mov_b32_e32 v3, 0
+; GFX8-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX8-NEXT:    v_mov_b32_e32 v2, s3
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
-; GFX8-NEXT:    v_addc_u32_e32 v2, vcc, v2, v4, vcc
+; GFX8-NEXT:    v_addc_u32_e32 v2, vcc, v2, v3, vcc
 ; GFX8-NEXT:    flat_load_dwordx2 v[1:2], v[1:2]
-; GFX8-NEXT:    v_add_u32_e32 v3, vcc, s0, v0
-; GFX8-NEXT:    v_mov_b32_e32 v5, s1
-; GFX8-NEXT:    v_addc_u32_e32 v4, vcc, v5, v4, vcc
-; GFX8-NEXT:    s_mov_b32 s2, 0
-; GFX8-NEXT:    s_movk_i32 s3, 0x80
 ; GFX8-NEXT:    s_waitcnt vmcnt(0)
-; GFX8-NEXT:    v_ashrrev_i32_e32 v0, 31, v2
-; GFX8-NEXT:    v_add_u32_e32 v5, vcc, v1, v0
-; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, v2, v0, vcc
-; GFX8-NEXT:    v_xor_b32_e32 v1, v1, v0
-; GFX8-NEXT:    v_and_b32_e32 v2, 1, v0
-; GFX8-NEXT:    v_xor_b32_e32 v0, v5, v0
-; GFX8-NEXT:    v_ffbh_u32_e32 v5, v0
-; GFX8-NEXT:    v_add_u32_e32 v5, vcc, 32, v5
-; GFX8-NEXT:    v_ffbh_u32_e32 v6, v1
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
-; GFX8-NEXT:    v_cndmask_b32_e32 v7, v6, v5, vcc
-; GFX8-NEXT:    v_lshlrev_b64 v[5:6], v7, v[0:1]
-; GFX8-NEXT:    v_sub_u32_e32 v7, vcc, s4, v7
-; GFX8-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[0:1]
-; GFX8-NEXT:    v_and_b32_e32 v1, 0xff, v6
-; GFX8-NEXT:    v_cndmask_b32_e32 v7, 0, v7, vcc
-; GFX8-NEXT:    v_mov_b32_e32 v0, v5
-; GFX8-NEXT:    v_lshlrev_b32_e32 v5, 23, v7
-; GFX8-NEXT:    v_bfe_u32 v6, v6, 8, 23
-; GFX8-NEXT:    v_or_b32_e32 v5, v5, v6
-; GFX8-NEXT:    v_cmp_eq_u64_e32 vcc, s[2:3], v[0:1]
-; GFX8-NEXT:    v_and_b32_e32 v6, 1, v5
-; GFX8-NEXT:    s_mov_b32 s2, 1
-; GFX8-NEXT:    v_cndmask_b32_e32 v6, 0, v6, vcc
-; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, s[2:3], v[0:1]
-; GFX8-NEXT:    v_cndmask_b32_e32 v0, 1, v6, vcc
-; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v5, v0
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v2
-; GFX8-NEXT:    v_cndmask_b32_e64 v0, v0, -v0, vcc
-; GFX8-NEXT:    flat_store_dword v[3:4], v0
+; GFX8-NEXT:    v_xor_b32_e32 v0, v1, v2
+; GFX8-NEXT:    v_cmp_lt_i32_e32 vcc, -1, v0
+; GFX8-NEXT:    v_ffbh_i32_e32 v5, v2
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 32, 33, vcc
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, -1, v5
+; GFX8-NEXT:    v_cndmask_b32_e32 v5, v0, v5, vcc
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, -1, v5
+; GFX8-NEXT:    v_lshlrev_b64 v[0:1], v0, v[1:2]
+; GFX8-NEXT:    v_mov_b32_e32 v2, s1
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX8-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX8-NEXT:    v_cvt_f32_i32_e32 v6, v0
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, s0, v4
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, v2, v3, vcc
+; GFX8-NEXT:    v_sub_u32_e32 v2, vcc, 33, v5
+; GFX8-NEXT:    v_ldexp_f32 v2, v6, v2
+; GFX8-NEXT:    flat_store_dword v[0:1], v2
 ; GFX8-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
@@ -407,154 +261,80 @@ define amdgpu_kernel void @v_sint_to_fp_i64_to_f32(float addrspace(1)* %out, i64
 define amdgpu_kernel void @s_sint_to_fp_v2i64_to_v2f32(<2 x float> addrspace(1)* %out, <2 x i64> %in) #0{
 ; GFX6-LABEL: s_sint_to_fp_v2i64_to_v2f32:
 ; GFX6:       ; %bb.0:
-; GFX6-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
-; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0xd
-; GFX6-NEXT:    s_mov_b32 s7, 0xf000
-; GFX6-NEXT:    s_mov_b32 s6, -1
-; GFX6-NEXT:    s_mov_b32 s8, 0
-; GFX6-NEXT:    s_movk_i32 s9, 0x80
-; GFX6-NEXT:    s_movk_i32 s16, 0xff
-; GFX6-NEXT:    s_movk_i32 s17, 0xbe
+; GFX6-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0xd
+; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GFX6-NEXT:    s_mov_b32 s3, 0xf000
+; GFX6-NEXT:    s_mov_b32 s2, -1
 ; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX6-NEXT:    s_ashr_i32 s10, s3, 31
-; GFX6-NEXT:    s_mov_b32 s12, 1
-; GFX6-NEXT:    s_mov_b32 s13, s9
-; GFX6-NEXT:    s_mov_b32 s11, s10
-; GFX6-NEXT:    s_add_u32 s2, s2, s10
-; GFX6-NEXT:    s_addc_u32 s3, s3, s10
-; GFX6-NEXT:    s_ashr_i32 s14, s1, 31
-; GFX6-NEXT:    s_xor_b64 s[2:3], s[2:3], s[10:11]
-; GFX6-NEXT:    s_flbit_i32_b32 s11, s2
-; GFX6-NEXT:    s_flbit_i32_b32 s18, s3
-; GFX6-NEXT:    s_add_i32 s11, s11, 32
-; GFX6-NEXT:    s_and_b32 s10, 1, s10
-; GFX6-NEXT:    s_mov_b32 s15, s14
-; GFX6-NEXT:    v_mov_b32_e32 v0, s18
-; GFX6-NEXT:    v_mov_b32_e32 v1, s11
-; GFX6-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
-; GFX6-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; GFX6-NEXT:    s_add_u32 s0, s0, s14
-; GFX6-NEXT:    v_lshl_b64 v[0:1], s[2:3], v2
-; GFX6-NEXT:    v_sub_i32_e32 v4, vcc, s17, v2
-; GFX6-NEXT:    s_addc_u32 s1, s1, s14
-; GFX6-NEXT:    s_and_b32 s11, 1, s14
-; GFX6-NEXT:    v_and_b32_e32 v3, s16, v1
-; GFX6-NEXT:    v_mov_b32_e32 v2, v0
-; GFX6-NEXT:    v_cmp_ne_u64_e64 vcc, s[2:3], 0
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v4, vcc
-; GFX6-NEXT:    v_bfe_u32 v1, v1, 8, 23
-; GFX6-NEXT:    s_xor_b64 s[0:1], s[0:1], s[14:15]
-; GFX6-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
-; GFX6-NEXT:    s_flbit_i32_b32 s2, s0
-; GFX6-NEXT:    s_flbit_i32_b32 s3, s1
-; GFX6-NEXT:    v_or_b32_e32 v4, v0, v1
-; GFX6-NEXT:    s_add_i32 s2, s2, 32
-; GFX6-NEXT:    v_mov_b32_e32 v0, s3
-; GFX6-NEXT:    v_and_b32_e32 v1, 1, v4
-; GFX6-NEXT:    v_mov_b32_e32 v5, s2
-; GFX6-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
-; GFX6-NEXT:    v_cndmask_b32_e32 v5, v0, v5, vcc
-; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[8:9], v[2:3]
-; GFX6-NEXT:    v_cndmask_b32_e32 v6, 0, v1, vcc
-; GFX6-NEXT:    v_lshl_b64 v[0:1], s[0:1], v5
-; GFX6-NEXT:    v_sub_i32_e32 v5, vcc, s17, v5
-; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[12:13], v[2:3]
-; GFX6-NEXT:    v_cndmask_b32_e32 v6, 1, v6, vcc
-; GFX6-NEXT:    v_and_b32_e32 v3, s16, v1
-; GFX6-NEXT:    v_mov_b32_e32 v2, v0
-; GFX6-NEXT:    v_cmp_ne_u64_e64 vcc, s[0:1], 0
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v5, vcc
-; GFX6-NEXT:    v_bfe_u32 v5, v1, 8, 23
-; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v6, v4
-; GFX6-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
-; GFX6-NEXT:    v_cmp_eq_u32_e64 s[0:1], s10, 1
-; GFX6-NEXT:    v_cndmask_b32_e64 v1, v1, -v1, s[0:1]
-; GFX6-NEXT:    v_or_b32_e32 v0, v0, v5
-; GFX6-NEXT:    v_and_b32_e32 v4, 1, v0
-; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[8:9], v[2:3]
-; GFX6-NEXT:    v_cndmask_b32_e32 v4, 0, v4, vcc
-; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[12:13], v[2:3]
-; GFX6-NEXT:    v_cndmask_b32_e32 v2, 1, v4, vcc
-; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v2, v0
-; GFX6-NEXT:    v_cmp_eq_u32_e64 s[0:1], s11, 1
-; GFX6-NEXT:    v_cndmask_b32_e64 v0, v0, -v0, s[0:1]
-; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GFX6-NEXT:    s_xor_b32 s8, s6, s7
+; GFX6-NEXT:    s_flbit_i32 s10, s7
+; GFX6-NEXT:    s_xor_b32 s11, s4, s5
+; GFX6-NEXT:    s_flbit_i32 s12, s5
+; GFX6-NEXT:    v_cmp_gt_i32_e64 s[8:9], s8, -1
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, 32, 33, s[8:9]
+; GFX6-NEXT:    v_mov_b32_e32 v1, s10
+; GFX6-NEXT:    v_cmp_gt_i32_e64 s[8:9], s11, -1
+; GFX6-NEXT:    v_cndmask_b32_e64 v2, 32, 33, s[8:9]
+; GFX6-NEXT:    v_mov_b32_e32 v3, s12
+; GFX6-NEXT:    v_cmp_ne_u32_e64 vcc, s10, -1
+; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; GFX6-NEXT:    v_cmp_ne_u32_e64 vcc, s12, -1
+; GFX6-NEXT:    v_cndmask_b32_e32 v1, v2, v3, vcc
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, -1, v0
+; GFX6-NEXT:    v_sub_i32_e32 v4, vcc, 33, v0
+; GFX6-NEXT:    v_add_i32_e32 v3, vcc, -1, v1
+; GFX6-NEXT:    v_sub_i32_e32 v5, vcc, 33, v1
+; GFX6-NEXT:    v_lshl_b64 v[0:1], s[6:7], v2
+; GFX6-NEXT:    v_lshl_b64 v[2:3], s[4:5], v3
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
+; GFX6-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX6-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX6-NEXT:    v_or_b32_e32 v1, v3, v2
+; GFX6-NEXT:    v_cvt_f32_i32_e32 v0, v0
+; GFX6-NEXT:    v_cvt_f32_i32_e32 v2, v1
+; GFX6-NEXT:    v_ldexp_f32_e32 v1, v0, v4
+; GFX6-NEXT:    v_ldexp_f32_e32 v0, v2, v5
+; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; GFX6-NEXT:    s_endpgm
 ;
 ; GFX8-LABEL: s_sint_to_fp_v2i64_to_v2f32:
 ; GFX8:       ; %bb.0:
-; GFX8-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
-; GFX8-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x34
-; GFX8-NEXT:    s_movk_i32 s12, 0xbe
-; GFX8-NEXT:    s_mov_b32 s8, 0
-; GFX8-NEXT:    s_movk_i32 s9, 0x80
-; GFX8-NEXT:    s_movk_i32 s14, 0xff
+; GFX8-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x34
+; GFX8-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX8-NEXT:    s_ashr_i32 s6, s3, 31
-; GFX8-NEXT:    s_add_u32 s2, s2, s6
-; GFX8-NEXT:    s_mov_b32 s7, s6
-; GFX8-NEXT:    s_addc_u32 s3, s3, s6
-; GFX8-NEXT:    s_xor_b64 s[2:3], s[2:3], s[6:7]
-; GFX8-NEXT:    s_flbit_i32_b32 s7, s2
-; GFX8-NEXT:    s_add_i32 s7, s7, 32
-; GFX8-NEXT:    s_flbit_i32_b32 s10, s3
-; GFX8-NEXT:    s_cmp_eq_u32 s3, 0
-; GFX8-NEXT:    s_cselect_b32 s7, s7, s10
-; GFX8-NEXT:    s_sub_i32 s10, s12, s7
-; GFX8-NEXT:    s_cmp_lg_u64 s[2:3], 0
-; GFX8-NEXT:    s_cselect_b32 s10, s10, 0
-; GFX8-NEXT:    s_lshl_b64 s[2:3], s[2:3], s7
-; GFX8-NEXT:    s_bfe_u32 s7, s3, 0x170008
-; GFX8-NEXT:    s_lshl_b32 s10, s10, 23
-; GFX8-NEXT:    s_or_b32 s7, s10, s7
-; GFX8-NEXT:    s_mov_b32 s10, 1
-; GFX8-NEXT:    s_mov_b32 s11, s9
-; GFX8-NEXT:    v_mov_b32_e32 v0, s10
-; GFX8-NEXT:    s_and_b32 s3, s3, s14
-; GFX8-NEXT:    v_mov_b32_e32 v1, s11
-; GFX8-NEXT:    s_and_b32 s13, s7, 1
-; GFX8-NEXT:    v_cmp_lt_u64_e32 vcc, s[2:3], v[0:1]
-; GFX8-NEXT:    s_cmp_eq_u64 s[2:3], s[8:9]
-; GFX8-NEXT:    s_cselect_b32 s2, s13, 0
-; GFX8-NEXT:    s_cmp_lg_u64 vcc, 0
-; GFX8-NEXT:    s_cselect_b32 s2, s2, 1
-; GFX8-NEXT:    s_add_i32 s2, s7, s2
-; GFX8-NEXT:    s_and_b32 s3, 1, s6
-; GFX8-NEXT:    v_mov_b32_e32 v2, s2
-; GFX8-NEXT:    v_cmp_eq_u32_e64 s[2:3], s3, 1
-; GFX8-NEXT:    v_cndmask_b32_e64 v2, v2, -v2, s[2:3]
-; GFX8-NEXT:    s_ashr_i32 s2, s1, 31
-; GFX8-NEXT:    s_add_u32 s0, s0, s2
-; GFX8-NEXT:    s_mov_b32 s3, s2
-; GFX8-NEXT:    s_addc_u32 s1, s1, s2
-; GFX8-NEXT:    s_xor_b64 s[0:1], s[0:1], s[2:3]
-; GFX8-NEXT:    s_flbit_i32_b32 s3, s0
-; GFX8-NEXT:    s_add_i32 s3, s3, 32
-; GFX8-NEXT:    s_flbit_i32_b32 s6, s1
-; GFX8-NEXT:    s_cmp_eq_u32 s1, 0
-; GFX8-NEXT:    s_cselect_b32 s3, s3, s6
-; GFX8-NEXT:    s_sub_i32 s6, s12, s3
-; GFX8-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GFX8-NEXT:    s_cselect_b32 s6, s6, 0
-; GFX8-NEXT:    s_lshl_b64 s[0:1], s[0:1], s3
-; GFX8-NEXT:    s_bfe_u32 s3, s1, 0x170008
-; GFX8-NEXT:    s_lshl_b32 s6, s6, 23
-; GFX8-NEXT:    s_or_b32 s3, s6, s3
-; GFX8-NEXT:    s_and_b32 s1, s1, s14
-; GFX8-NEXT:    s_and_b32 s6, s3, 1
-; GFX8-NEXT:    v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
-; GFX8-NEXT:    s_cmp_eq_u64 s[0:1], s[8:9]
-; GFX8-NEXT:    s_cselect_b32 s0, s6, 0
-; GFX8-NEXT:    s_cmp_lg_u64 vcc, 0
-; GFX8-NEXT:    s_cselect_b32 s0, s0, 1
-; GFX8-NEXT:    s_add_i32 s0, s3, s0
-; GFX8-NEXT:    s_and_b32 s1, 1, s2
-; GFX8-NEXT:    v_mov_b32_e32 v0, s0
-; GFX8-NEXT:    v_cmp_eq_u32_e64 s[0:1], s1, 1
-; GFX8-NEXT:    v_mov_b32_e32 v3, s4
-; GFX8-NEXT:    v_cndmask_b32_e64 v1, v0, -v0, s[0:1]
-; GFX8-NEXT:    v_mov_b32_e32 v4, s5
-; GFX8-NEXT:    flat_store_dwordx2 v[3:4], v[1:2]
+; GFX8-NEXT:    s_xor_b32 s2, s6, s7
+; GFX8-NEXT:    s_cmp_gt_i32 s2, -1
+; GFX8-NEXT:    s_flbit_i32 s3, s7
+; GFX8-NEXT:    s_cselect_b32 s2, 33, 32
+; GFX8-NEXT:    s_cmp_lg_u32 s3, -1
+; GFX8-NEXT:    s_cselect_b32 s2, s3, s2
+; GFX8-NEXT:    s_add_i32 s3, s2, -1
+; GFX8-NEXT:    s_sub_i32 s8, 33, s2
+; GFX8-NEXT:    s_lshl_b64 s[2:3], s[6:7], s3
+; GFX8-NEXT:    v_cmp_ne_u32_e64 s[6:7], s2, 0
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[6:7]
+; GFX8-NEXT:    s_xor_b32 s2, s4, s5
+; GFX8-NEXT:    v_or_b32_e32 v0, s3, v0
+; GFX8-NEXT:    s_cmp_gt_i32 s2, -1
+; GFX8-NEXT:    s_flbit_i32 s3, s5
+; GFX8-NEXT:    s_cselect_b32 s2, 33, 32
+; GFX8-NEXT:    s_cmp_lg_u32 s3, -1
+; GFX8-NEXT:    s_cselect_b32 s6, s3, s2
+; GFX8-NEXT:    s_add_i32 s2, s6, -1
+; GFX8-NEXT:    s_lshl_b64 s[2:3], s[4:5], s2
+; GFX8-NEXT:    v_cmp_ne_u32_e64 s[4:5], s2, 0
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s[4:5]
+; GFX8-NEXT:    v_or_b32_e32 v1, s3, v1
+; GFX8-NEXT:    v_cvt_f32_i32_e32 v0, v0
+; GFX8-NEXT:    v_cvt_f32_i32_e32 v2, v1
+; GFX8-NEXT:    s_sub_i32 s2, 33, s6
+; GFX8-NEXT:    v_ldexp_f32 v1, v0, s8
+; GFX8-NEXT:    v_ldexp_f32 v0, v2, s2
+; GFX8-NEXT:    v_mov_b32_e32 v3, s1
+; GFX8-NEXT:    v_mov_b32_e32 v2, s0
+; GFX8-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
 ; GFX8-NEXT:    s_endpgm
   %result = sitofp <2 x i64> %in to <2 x float>
   store <2 x float> %result, <2 x float> addrspace(1)* %out
@@ -564,278 +344,155 @@ define amdgpu_kernel void @s_sint_to_fp_v2i64_to_v2f32(<2 x float> addrspace(1)*
 define amdgpu_kernel void @v_sint_to_fp_v4i64_to_v4f32(<4 x float> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) #0 {
 ; GFX6-LABEL: v_sint_to_fp_v4i64_to_v4f32:
 ; GFX6:       ; %bb.0:
-; GFX6-NEXT:    s_load_dwordx4 s[8:11], s[0:1], 0x9
+; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
 ; GFX6-NEXT:    s_mov_b32 s7, 0xf000
 ; GFX6-NEXT:    s_mov_b32 s6, 0
 ; GFX6-NEXT:    v_lshlrev_b32_e32 v8, 5, v0
 ; GFX6-NEXT:    v_mov_b32_e32 v9, 0
 ; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX6-NEXT:    s_mov_b64 s[4:5], s[10:11]
+; GFX6-NEXT:    s_mov_b64 s[4:5], s[2:3]
 ; GFX6-NEXT:    buffer_load_dwordx4 v[1:4], v[8:9], s[4:7], 0 addr64 offset:16
 ; GFX6-NEXT:    buffer_load_dwordx4 v[5:8], v[8:9], s[4:7], 0 addr64
 ; GFX6-NEXT:    v_lshlrev_b32_e32 v10, 4, v0
-; GFX6-NEXT:    s_movk_i32 s4, 0xbe
 ; GFX6-NEXT:    v_mov_b32_e32 v11, v9
+; GFX6-NEXT:    s_mov_b64 s[2:3], s[6:7]
 ; GFX6-NEXT:    s_waitcnt vmcnt(1)
-; GFX6-NEXT:    v_ashrrev_i32_e32 v12, 31, v4
-; GFX6-NEXT:    v_ashrrev_i32_e32 v13, 31, v2
+; GFX6-NEXT:    v_xor_b32_e32 v0, v3, v4
+; GFX6-NEXT:    v_ffbh_i32_e32 v9, v4
+; GFX6-NEXT:    v_xor_b32_e32 v12, v1, v2
+; GFX6-NEXT:    v_ffbh_i32_e32 v13, v2
 ; GFX6-NEXT:    s_waitcnt vmcnt(0)
-; GFX6-NEXT:    v_ashrrev_i32_e32 v14, 31, v8
-; GFX6-NEXT:    v_ashrrev_i32_e32 v15, 31, v6
-; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v3, v12
-; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v4, v12, vcc
-; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v1, v13
-; GFX6-NEXT:    v_addc_u32_e32 v2, vcc, v2, v13, vcc
-; GFX6-NEXT:    v_add_i32_e32 v7, vcc, v7, v14
-; GFX6-NEXT:    v_addc_u32_e32 v8, vcc, v8, v14, vcc
-; GFX6-NEXT:    v_add_i32_e32 v9, vcc, v5, v15
-; GFX6-NEXT:    v_addc_u32_e32 v6, vcc, v6, v15, vcc
-; GFX6-NEXT:    v_xor_b32_e32 v1, v3, v12
-; GFX6-NEXT:    v_xor_b32_e32 v0, v0, v12
-; GFX6-NEXT:    v_xor_b32_e32 v3, v2, v13
-; GFX6-NEXT:    v_xor_b32_e32 v2, v4, v13
-; GFX6-NEXT:    v_xor_b32_e32 v5, v8, v14
-; GFX6-NEXT:    v_xor_b32_e32 v4, v7, v14
-; GFX6-NEXT:    v_xor_b32_e32 v7, v6, v15
-; GFX6-NEXT:    v_xor_b32_e32 v6, v9, v15
-; GFX6-NEXT:    v_ffbh_u32_e32 v8, v0
-; GFX6-NEXT:    v_ffbh_u32_e32 v9, v1
-; GFX6-NEXT:    v_ffbh_u32_e32 v16, v2
-; GFX6-NEXT:    v_ffbh_u32_e32 v17, v3
-; GFX6-NEXT:    v_ffbh_u32_e32 v18, v4
-; GFX6-NEXT:    v_ffbh_u32_e32 v19, v5
-; GFX6-NEXT:    v_add_i32_e32 v8, vcc, 32, v8
-; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
-; GFX6-NEXT:    v_cndmask_b32_e32 v20, v9, v8, vcc
-; GFX6-NEXT:    v_ffbh_u32_e32 v8, v6
-; GFX6-NEXT:    v_add_i32_e32 v9, vcc, 32, v16
-; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v3
-; GFX6-NEXT:    v_cndmask_b32_e32 v16, v17, v9, vcc
-; GFX6-NEXT:    v_ffbh_u32_e32 v9, v7
-; GFX6-NEXT:    v_add_i32_e32 v17, vcc, 32, v18
-; GFX6-NEXT:    v_add_i32_e32 v8, vcc, 32, v8
-; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v5
-; GFX6-NEXT:    v_cndmask_b32_e32 v17, v19, v17, vcc
-; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v7
-; GFX6-NEXT:    v_cndmask_b32_e32 v18, v9, v8, vcc
-; GFX6-NEXT:    v_lshl_b64 v[8:9], v[2:3], v16
-; GFX6-NEXT:    v_sub_i32_e32 v16, vcc, s4, v16
-; GFX6-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[2:3]
-; GFX6-NEXT:    v_lshl_b64 v[2:3], v[4:5], v17
-; GFX6-NEXT:    v_sub_i32_e64 v17, s[0:1], s4, v17
-; GFX6-NEXT:    v_cmp_ne_u64_e64 s[0:1], 0, v[4:5]
-; GFX6-NEXT:    v_lshl_b64 v[4:5], v[6:7], v18
-; GFX6-NEXT:    v_sub_i32_e64 v18, s[2:3], s4, v18
-; GFX6-NEXT:    v_cmp_ne_u64_e64 s[2:3], 0, v[6:7]
-; GFX6-NEXT:    v_lshl_b64 v[6:7], v[0:1], v20
-; GFX6-NEXT:    v_sub_i32_e64 v19, s[4:5], s4, v20
-; GFX6-NEXT:    v_cmp_ne_u64_e64 s[4:5], 0, v[0:1]
-; GFX6-NEXT:    v_and_b32_e32 v1, 0xff, v7
-; GFX6-NEXT:    v_mov_b32_e32 v0, v6
-; GFX6-NEXT:    v_bfe_u32 v20, v7, 8, 23
-; GFX6-NEXT:    v_mov_b32_e32 v21, 0xff
-; GFX6-NEXT:    v_and_b32_e32 v7, v21, v9
-; GFX6-NEXT:    v_mov_b32_e32 v6, v8
-; GFX6-NEXT:    v_bfe_u32 v22, v9, 8, 23
-; GFX6-NEXT:    v_and_b32_e32 v9, v21, v3
-; GFX6-NEXT:    v_mov_b32_e32 v8, v2
-; GFX6-NEXT:    v_bfe_u32 v23, v3, 8, 23
-; GFX6-NEXT:    v_and_b32_e32 v3, v21, v5
-; GFX6-NEXT:    v_mov_b32_e32 v2, v4
-; GFX6-NEXT:    v_bfe_u32 v4, v5, 8, 23
-; GFX6-NEXT:    v_cndmask_b32_e64 v5, 0, v19, s[4:5]
-; GFX6-NEXT:    v_lshlrev_b32_e32 v5, 23, v5
-; GFX6-NEXT:    v_or_b32_e32 v5, v5, v20
-; GFX6-NEXT:    v_cndmask_b32_e32 v16, 0, v16, vcc
-; GFX6-NEXT:    v_lshlrev_b32_e32 v16, 23, v16
-; GFX6-NEXT:    v_or_b32_e32 v16, v16, v22
-; GFX6-NEXT:    s_mov_b64 s[10:11], s[6:7]
-; GFX6-NEXT:    s_movk_i32 s7, 0x80
-; GFX6-NEXT:    s_mov_b32 s4, 1
-; GFX6-NEXT:    s_mov_b32 s5, s7
-; GFX6-NEXT:    v_and_b32_e32 v12, 1, v12
-; GFX6-NEXT:    v_and_b32_e32 v13, 1, v13
-; GFX6-NEXT:    v_and_b32_e32 v14, 1, v14
-; GFX6-NEXT:    v_and_b32_e32 v15, 1, v15
-; GFX6-NEXT:    v_cndmask_b32_e64 v17, 0, v17, s[0:1]
-; GFX6-NEXT:    v_cndmask_b32_e64 v18, 0, v18, s[2:3]
-; GFX6-NEXT:    v_lshlrev_b32_e32 v17, 23, v17
-; GFX6-NEXT:    v_lshlrev_b32_e32 v18, 23, v18
-; GFX6-NEXT:    v_or_b32_e32 v17, v17, v23
-; GFX6-NEXT:    v_or_b32_e32 v4, v18, v4
-; GFX6-NEXT:    v_and_b32_e32 v18, 1, v5
-; GFX6-NEXT:    v_and_b32_e32 v19, 1, v16
-; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[6:7], v[0:1]
-; GFX6-NEXT:    v_cndmask_b32_e32 v18, 0, v18, vcc
-; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[4:5], v[0:1]
-; GFX6-NEXT:    v_and_b32_e32 v0, 1, v17
-; GFX6-NEXT:    v_and_b32_e32 v1, 1, v4
-; GFX6-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[6:7], v[6:7]
-; GFX6-NEXT:    v_cndmask_b32_e64 v19, 0, v19, s[0:1]
-; GFX6-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[6:7], v[8:9]
-; GFX6-NEXT:    v_cndmask_b32_e64 v0, 0, v0, s[0:1]
-; GFX6-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[6:7], v[2:3]
-; GFX6-NEXT:    v_cndmask_b32_e64 v1, 0, v1, s[0:1]
-; GFX6-NEXT:    v_cndmask_b32_e32 v18, 1, v18, vcc
-; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[4:5], v[6:7]
-; GFX6-NEXT:    v_cndmask_b32_e32 v6, 1, v19, vcc
-; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[4:5], v[8:9]
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
-; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[4:5], v[2:3]
-; GFX6-NEXT:    v_cndmask_b32_e32 v1, 1, v1, vcc
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v5, v18
-; GFX6-NEXT:    v_add_i32_e32 v5, vcc, v16, v6
-; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v17, v0
-; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v4, v1
-; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v12
-; GFX6-NEXT:    v_cndmask_b32_e64 v3, v2, -v2, vcc
-; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v13
-; GFX6-NEXT:    v_cndmask_b32_e64 v2, v5, -v5, vcc
-; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v14
-; GFX6-NEXT:    v_cndmask_b32_e64 v1, v0, -v0, vcc
-; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v15
-; GFX6-NEXT:    v_cndmask_b32_e64 v0, v4, -v4, vcc
-; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], v[10:11], s[8:11], 0 addr64
+; GFX6-NEXT:    v_xor_b32_e32 v14, v7, v8
+; GFX6-NEXT:    v_ffbh_i32_e32 v15, v8
+; GFX6-NEXT:    v_xor_b32_e32 v16, v5, v6
+; GFX6-NEXT:    v_ffbh_i32_e32 v17, v6
+; GFX6-NEXT:    v_cmp_lt_i32_e32 vcc, -1, v0
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, 32, 33, vcc
+; GFX6-NEXT:    v_cmp_lt_i32_e32 vcc, -1, v12
+; GFX6-NEXT:    v_cndmask_b32_e64 v12, 32, 33, vcc
+; GFX6-NEXT:    v_cmp_lt_i32_e32 vcc, -1, v14
+; GFX6-NEXT:    v_cndmask_b32_e64 v14, 32, 33, vcc
+; GFX6-NEXT:    v_cmp_lt_i32_e32 vcc, -1, v16
+; GFX6-NEXT:    v_cndmask_b32_e64 v16, 32, 33, vcc
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, -1, v9
+; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v9, vcc
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, -1, v13
+; GFX6-NEXT:    v_cndmask_b32_e32 v9, v12, v13, vcc
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, -1, v15
+; GFX6-NEXT:    v_cndmask_b32_e32 v12, v14, v15, vcc
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, -1, v17
+; GFX6-NEXT:    v_cndmask_b32_e32 v13, v16, v17, vcc
+; GFX6-NEXT:    v_add_i32_e32 v14, vcc, -1, v0
+; GFX6-NEXT:    v_sub_i32_e32 v15, vcc, 33, v0
+; GFX6-NEXT:    v_add_i32_e32 v0, vcc, -1, v9
+; GFX6-NEXT:    v_sub_i32_e32 v9, vcc, 33, v9
+; GFX6-NEXT:    v_add_i32_e32 v16, vcc, -1, v12
+; GFX6-NEXT:    v_sub_i32_e32 v12, vcc, 33, v12
+; GFX6-NEXT:    v_add_i32_e32 v17, vcc, -1, v13
+; GFX6-NEXT:    v_sub_i32_e32 v13, vcc, 33, v13
+; GFX6-NEXT:    v_lshl_b64 v[3:4], v[3:4], v14
+; GFX6-NEXT:    v_lshl_b64 v[0:1], v[1:2], v0
+; GFX6-NEXT:    v_lshl_b64 v[7:8], v[7:8], v16
+; GFX6-NEXT:    v_lshl_b64 v[5:6], v[5:6], v17
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v3
+; GFX6-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v7
+; GFX6-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v5
+; GFX6-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
+; GFX6-NEXT:    v_or_b32_e32 v2, v4, v2
+; GFX6-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX6-NEXT:    v_or_b32_e32 v1, v8, v3
+; GFX6-NEXT:    v_or_b32_e32 v3, v6, v5
+; GFX6-NEXT:    v_cvt_f32_i32_e32 v2, v2
+; GFX6-NEXT:    v_cvt_f32_i32_e32 v0, v0
+; GFX6-NEXT:    v_cvt_f32_i32_e32 v1, v1
+; GFX6-NEXT:    v_cvt_f32_i32_e32 v4, v3
+; GFX6-NEXT:    v_ldexp_f32_e32 v3, v2, v15
+; GFX6-NEXT:    v_ldexp_f32_e32 v2, v0, v9
+; GFX6-NEXT:    v_ldexp_f32_e32 v1, v1, v12
+; GFX6-NEXT:    v_ldexp_f32_e32 v0, v4, v13
+; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], v[10:11], s[0:3], 0 addr64
 ; GFX6-NEXT:    s_endpgm
 ;
 ; GFX8-LABEL: v_sint_to_fp_v4i64_to_v4f32:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 5, v0
-; GFX8-NEXT:    v_mov_b32_e32 v2, 0
+; GFX8-NEXT:    v_mov_b32_e32 v10, 0
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v0, 4, v0
-; GFX8-NEXT:    s_movk_i32 s8, 0xbe
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX8-NEXT:    v_add_u32_e32 v4, vcc, s2, v1
-; GFX8-NEXT:    v_mov_b32_e32 v3, s3
-; GFX8-NEXT:    v_addc_u32_e32 v5, vcc, v3, v2, vcc
-; GFX8-NEXT:    v_mov_b32_e32 v1, s1
-; GFX8-NEXT:    v_add_u32_e32 v8, vcc, s0, v0
-; GFX8-NEXT:    v_addc_u32_e32 v9, vcc, v1, v2, vcc
-; GFX8-NEXT:    v_add_u32_e32 v0, vcc, 16, v4
-; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, 0, v5, vcc
-; GFX8-NEXT:    flat_load_dwordx4 v[0:3], v[0:1]
-; GFX8-NEXT:    flat_load_dwordx4 v[4:7], v[4:5]
-; GFX8-NEXT:    s_mov_b32 s4, 0
-; GFX8-NEXT:    s_movk_i32 s5, 0x80
-; GFX8-NEXT:    s_mov_b32 s6, 1
-; GFX8-NEXT:    s_mov_b32 s7, s5
-; GFX8-NEXT:    v_mov_b32_e32 v12, 0xff
+; GFX8-NEXT:    v_mov_b32_e32 v2, s3
+; GFX8-NEXT:    v_add_u32_e32 v5, vcc, s2, v1
+; GFX8-NEXT:    v_addc_u32_e32 v6, vcc, v2, v10, vcc
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 16, v5
+; GFX8-NEXT:    v_addc_u32_e32 v2, vcc, 0, v6, vcc
+; GFX8-NEXT:    flat_load_dwordx4 v[1:4], v[1:2]
+; GFX8-NEXT:    flat_load_dwordx4 v[5:8], v[5:6]
+; GFX8-NEXT:    v_add_u32_e32 v9, vcc, s0, v0
+; GFX8-NEXT:    v_mov_b32_e32 v11, s1
+; GFX8-NEXT:    v_addc_u32_e32 v10, vcc, v11, v10, vcc
 ; GFX8-NEXT:    s_waitcnt vmcnt(1)
-; GFX8-NEXT:    v_ashrrev_i32_e32 v13, 31, v3
+; GFX8-NEXT:    v_xor_b32_e32 v14, v3, v4
 ; GFX8-NEXT:    s_waitcnt vmcnt(0)
-; GFX8-NEXT:    v_ashrrev_i32_e32 v10, 31, v7
-; GFX8-NEXT:    v_add_u32_e32 v6, vcc, v6, v10
-; GFX8-NEXT:    v_ashrrev_i32_e32 v11, 31, v5
-; GFX8-NEXT:    v_addc_u32_e32 v7, vcc, v7, v10, vcc
-; GFX8-NEXT:    v_add_u32_e32 v4, vcc, v4, v11
-; GFX8-NEXT:    v_addc_u32_e32 v5, vcc, v5, v11, vcc
-; GFX8-NEXT:    v_add_u32_e32 v16, vcc, v2, v13
-; GFX8-NEXT:    v_xor_b32_e32 v2, v6, v10
-; GFX8-NEXT:    v_xor_b32_e32 v4, v4, v11
-; GFX8-NEXT:    v_addc_u32_e32 v17, vcc, v3, v13, vcc
-; GFX8-NEXT:    v_xor_b32_e32 v3, v7, v10
-; GFX8-NEXT:    v_and_b32_e32 v14, 1, v10
-; GFX8-NEXT:    v_ffbh_u32_e32 v10, v2
-; GFX8-NEXT:    v_xor_b32_e32 v6, v16, v13
-; GFX8-NEXT:    v_ffbh_u32_e32 v16, v4
-; GFX8-NEXT:    v_add_u32_e32 v10, vcc, 32, v10
-; GFX8-NEXT:    v_add_u32_e32 v16, vcc, 32, v16
-; GFX8-NEXT:    v_and_b32_e32 v15, 1, v11
-; GFX8-NEXT:    v_xor_b32_e32 v5, v5, v11
-; GFX8-NEXT:    v_ffbh_u32_e32 v11, v3
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v3
-; GFX8-NEXT:    v_cndmask_b32_e32 v18, v11, v10, vcc
-; GFX8-NEXT:    v_xor_b32_e32 v7, v17, v13
-; GFX8-NEXT:    v_ffbh_u32_e32 v17, v5
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v5
-; GFX8-NEXT:    v_cndmask_b32_e32 v16, v17, v16, vcc
-; GFX8-NEXT:    v_sub_u32_e32 v17, vcc, s8, v18
-; GFX8-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[2:3]
-; GFX8-NEXT:    v_lshlrev_b64 v[10:11], v18, v[2:3]
-; GFX8-NEXT:    v_cndmask_b32_e32 v17, 0, v17, vcc
-; GFX8-NEXT:    v_mov_b32_e32 v2, v10
-; GFX8-NEXT:    v_and_b32_e32 v3, 0xff, v11
-; GFX8-NEXT:    v_bfe_u32 v10, v11, 8, 23
-; GFX8-NEXT:    v_lshlrev_b32_e32 v17, 23, v17
-; GFX8-NEXT:    v_or_b32_e32 v10, v17, v10
-; GFX8-NEXT:    v_cmp_eq_u64_e32 vcc, s[4:5], v[2:3]
-; GFX8-NEXT:    v_and_b32_e32 v17, 1, v10
-; GFX8-NEXT:    v_cndmask_b32_e32 v17, 0, v17, vcc
-; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, s[6:7], v[2:3]
-; GFX8-NEXT:    v_lshlrev_b64 v[2:3], v16, v[4:5]
-; GFX8-NEXT:    v_sub_u32_e64 v16, s[0:1], s8, v16
-; GFX8-NEXT:    v_cmp_ne_u64_e64 s[0:1], 0, v[4:5]
-; GFX8-NEXT:    v_mov_b32_e32 v4, v2
-; GFX8-NEXT:    v_and_b32_e32 v5, v12, v3
-; GFX8-NEXT:    v_bfe_u32 v2, v3, 8, 23
-; GFX8-NEXT:    v_cndmask_b32_e64 v3, 0, v16, s[0:1]
-; GFX8-NEXT:    v_lshlrev_b32_e32 v3, 23, v3
-; GFX8-NEXT:    v_or_b32_e32 v2, v3, v2
-; GFX8-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[4:5], v[4:5]
-; GFX8-NEXT:    v_and_b32_e32 v3, 1, v2
-; GFX8-NEXT:    v_cndmask_b32_e32 v16, 1, v17, vcc
-; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, s[6:7], v[4:5]
-; GFX8-NEXT:    v_cndmask_b32_e64 v3, 0, v3, s[0:1]
-; GFX8-NEXT:    v_cndmask_b32_e32 v3, 1, v3, vcc
-; GFX8-NEXT:    v_add_u32_e32 v4, vcc, v10, v16
-; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v2, v3
-; GFX8-NEXT:    v_ffbh_u32_e32 v11, v6
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v14
-; GFX8-NEXT:    v_cndmask_b32_e64 v3, v4, -v4, vcc
-; GFX8-NEXT:    v_ffbh_u32_e32 v18, v7
-; GFX8-NEXT:    v_add_u32_e64 v11, s[2:3], 32, v11
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v7
-; GFX8-NEXT:    v_cndmask_b32_e32 v14, v18, v11, vcc
-; GFX8-NEXT:    v_lshlrev_b64 v[4:5], v14, v[6:7]
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v15
-; GFX8-NEXT:    v_cndmask_b32_e64 v2, v2, -v2, vcc
-; GFX8-NEXT:    v_mov_b32_e32 v10, v4
-; GFX8-NEXT:    v_sub_u32_e32 v4, vcc, s8, v14
-; GFX8-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
-; GFX8-NEXT:    v_and_b32_e32 v11, v12, v5
-; GFX8-NEXT:    v_cndmask_b32_e32 v4, 0, v4, vcc
-; GFX8-NEXT:    v_lshlrev_b32_e32 v4, 23, v4
-; GFX8-NEXT:    v_bfe_u32 v5, v5, 8, 23
-; GFX8-NEXT:    v_or_b32_e32 v4, v4, v5
-; GFX8-NEXT:    v_cmp_eq_u64_e32 vcc, s[4:5], v[10:11]
-; GFX8-NEXT:    v_and_b32_e32 v5, 1, v4
-; GFX8-NEXT:    v_cndmask_b32_e32 v5, 0, v5, vcc
-; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, s[6:7], v[10:11]
-; GFX8-NEXT:    v_cndmask_b32_e32 v5, 1, v5, vcc
-; GFX8-NEXT:    v_add_u32_e32 v4, vcc, v4, v5
-; GFX8-NEXT:    v_and_b32_e32 v5, 1, v13
-; GFX8-NEXT:    v_ashrrev_i32_e32 v13, 31, v1
-; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v13
-; GFX8-NEXT:    v_xor_b32_e32 v0, v0, v13
-; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, v1, v13, vcc
-; GFX8-NEXT:    v_ffbh_u32_e32 v6, v0
-; GFX8-NEXT:    v_xor_b32_e32 v1, v1, v13
-; GFX8-NEXT:    v_add_u32_e32 v6, vcc, 32, v6
-; GFX8-NEXT:    v_ffbh_u32_e32 v7, v1
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
-; GFX8-NEXT:    v_cndmask_b32_e32 v14, v7, v6, vcc
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v5
-; GFX8-NEXT:    v_cndmask_b32_e64 v5, v4, -v4, vcc
-; GFX8-NEXT:    v_sub_u32_e32 v4, vcc, s8, v14
-; GFX8-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[0:1]
-; GFX8-NEXT:    v_lshlrev_b64 v[6:7], v14, v[0:1]
-; GFX8-NEXT:    v_cndmask_b32_e32 v0, 0, v4, vcc
-; GFX8-NEXT:    v_and_b32_e32 v11, v12, v7
-; GFX8-NEXT:    v_mov_b32_e32 v10, v6
-; GFX8-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
-; GFX8-NEXT:    v_bfe_u32 v1, v7, 8, 23
-; GFX8-NEXT:    v_or_b32_e32 v0, v0, v1
-; GFX8-NEXT:    v_cmp_eq_u64_e32 vcc, s[4:5], v[10:11]
-; GFX8-NEXT:    v_and_b32_e32 v1, 1, v0
-; GFX8-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
-; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, s[6:7], v[10:11]
-; GFX8-NEXT:    v_cndmask_b32_e32 v1, 1, v1, vcc
-; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v1
-; GFX8-NEXT:    v_and_b32_e32 v1, 1, v13
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
-; GFX8-NEXT:    v_cndmask_b32_e64 v4, v0, -v0, vcc
-; GFX8-NEXT:    flat_store_dwordx4 v[8:9], v[2:5]
+; GFX8-NEXT:    v_xor_b32_e32 v0, v7, v8
+; GFX8-NEXT:    v_cmp_lt_i32_e32 vcc, -1, v0
+; GFX8-NEXT:    v_xor_b32_e32 v12, v5, v6
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 32, 33, vcc
+; GFX8-NEXT:    v_cmp_lt_i32_e32 vcc, -1, v12
+; GFX8-NEXT:    v_cndmask_b32_e64 v12, 32, 33, vcc
+; GFX8-NEXT:    v_cmp_lt_i32_e32 vcc, -1, v14
+; GFX8-NEXT:    v_xor_b32_e32 v16, v1, v2
+; GFX8-NEXT:    v_cndmask_b32_e64 v14, 32, 33, vcc
+; GFX8-NEXT:    v_cmp_lt_i32_e32 vcc, -1, v16
+; GFX8-NEXT:    v_ffbh_i32_e32 v11, v8
+; GFX8-NEXT:    v_cndmask_b32_e64 v16, 32, 33, vcc
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, -1, v11
+; GFX8-NEXT:    v_ffbh_i32_e32 v13, v6
+; GFX8-NEXT:    v_cndmask_b32_e32 v0, v0, v11, vcc
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, -1, v13
+; GFX8-NEXT:    v_ffbh_i32_e32 v15, v4
+; GFX8-NEXT:    v_cndmask_b32_e32 v11, v12, v13, vcc
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, -1, v15
+; GFX8-NEXT:    v_ffbh_i32_e32 v17, v2
+; GFX8-NEXT:    v_cndmask_b32_e32 v12, v14, v15, vcc
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, -1, v17
+; GFX8-NEXT:    v_cndmask_b32_e32 v13, v16, v17, vcc
+; GFX8-NEXT:    v_add_u32_e32 v14, vcc, -1, v0
+; GFX8-NEXT:    v_sub_u32_e32 v15, vcc, 33, v0
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, -1, v11
+; GFX8-NEXT:    v_lshlrev_b64 v[7:8], v14, v[7:8]
+; GFX8-NEXT:    v_add_u32_e32 v16, vcc, -1, v12
+; GFX8-NEXT:    v_add_u32_e32 v17, vcc, -1, v13
+; GFX8-NEXT:    v_lshlrev_b64 v[5:6], v0, v[5:6]
+; GFX8-NEXT:    v_sub_u32_e32 v11, vcc, 33, v11
+; GFX8-NEXT:    v_sub_u32_e32 v12, vcc, 33, v12
+; GFX8-NEXT:    v_sub_u32_e32 v13, vcc, 33, v13
+; GFX8-NEXT:    v_lshlrev_b64 v[3:4], v16, v[3:4]
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v7
+; GFX8-NEXT:    v_lshlrev_b64 v[0:1], v17, v[1:2]
+; GFX8-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v5
+; GFX8-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v3
+; GFX8-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX8-NEXT:    v_or_b32_e32 v3, v4, v3
+; GFX8-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX8-NEXT:    v_or_b32_e32 v2, v8, v2
+; GFX8-NEXT:    v_or_b32_e32 v5, v6, v5
+; GFX8-NEXT:    v_cvt_f32_i32_e32 v1, v2
+; GFX8-NEXT:    v_cvt_f32_i32_e32 v3, v3
+; GFX8-NEXT:    v_cvt_f32_i32_e32 v2, v5
+; GFX8-NEXT:    v_cvt_f32_i32_e32 v4, v0
+; GFX8-NEXT:    v_ldexp_f32 v1, v1, v15
+; GFX8-NEXT:    v_ldexp_f32 v3, v3, v12
+; GFX8-NEXT:    v_ldexp_f32 v0, v2, v11
+; GFX8-NEXT:    v_ldexp_f32 v2, v4, v13
+; GFX8-NEXT:    flat_store_dwordx4 v[9:10], v[0:3]
 ; GFX8-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %in.gep = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %in, i32 %tid
@@ -849,159 +506,84 @@ define amdgpu_kernel void @v_sint_to_fp_v4i64_to_v4f32(<4 x float> addrspace(1)*
 define amdgpu_kernel void @s_sint_to_fp_v2i64_to_v2f16(<2 x half> addrspace(1)* %out, <2 x i64> %in) #0{
 ; GFX6-LABEL: s_sint_to_fp_v2i64_to_v2f16:
 ; GFX6:       ; %bb.0:
-; GFX6-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
-; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0xd
-; GFX6-NEXT:    s_mov_b32 s7, 0xf000
-; GFX6-NEXT:    s_mov_b32 s6, -1
-; GFX6-NEXT:    s_mov_b32 s8, 0
-; GFX6-NEXT:    s_movk_i32 s9, 0x80
-; GFX6-NEXT:    s_movk_i32 s16, 0xff
-; GFX6-NEXT:    s_movk_i32 s17, 0xbe
+; GFX6-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0xd
+; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GFX6-NEXT:    s_mov_b32 s3, 0xf000
+; GFX6-NEXT:    s_mov_b32 s2, -1
 ; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX6-NEXT:    s_ashr_i32 s10, s3, 31
-; GFX6-NEXT:    s_mov_b32 s12, 1
-; GFX6-NEXT:    s_mov_b32 s13, s9
-; GFX6-NEXT:    s_mov_b32 s11, s10
-; GFX6-NEXT:    s_add_u32 s2, s2, s10
-; GFX6-NEXT:    s_addc_u32 s3, s3, s10
-; GFX6-NEXT:    s_ashr_i32 s14, s1, 31
-; GFX6-NEXT:    s_xor_b64 s[2:3], s[2:3], s[10:11]
-; GFX6-NEXT:    s_flbit_i32_b32 s11, s2
-; GFX6-NEXT:    s_flbit_i32_b32 s18, s3
-; GFX6-NEXT:    s_add_i32 s11, s11, 32
-; GFX6-NEXT:    s_and_b32 s10, 1, s10
-; GFX6-NEXT:    s_mov_b32 s15, s14
-; GFX6-NEXT:    v_mov_b32_e32 v0, s18
-; GFX6-NEXT:    v_mov_b32_e32 v1, s11
-; GFX6-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
-; GFX6-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; GFX6-NEXT:    s_add_u32 s0, s0, s14
-; GFX6-NEXT:    v_lshl_b64 v[0:1], s[2:3], v2
-; GFX6-NEXT:    v_sub_i32_e32 v4, vcc, s17, v2
-; GFX6-NEXT:    s_addc_u32 s1, s1, s14
-; GFX6-NEXT:    s_and_b32 s11, 1, s14
-; GFX6-NEXT:    v_and_b32_e32 v3, s16, v1
-; GFX6-NEXT:    v_mov_b32_e32 v2, v0
-; GFX6-NEXT:    v_cmp_ne_u64_e64 vcc, s[2:3], 0
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v4, vcc
-; GFX6-NEXT:    v_bfe_u32 v1, v1, 8, 23
-; GFX6-NEXT:    s_xor_b64 s[0:1], s[0:1], s[14:15]
-; GFX6-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
-; GFX6-NEXT:    s_flbit_i32_b32 s2, s0
-; GFX6-NEXT:    s_flbit_i32_b32 s3, s1
-; GFX6-NEXT:    v_or_b32_e32 v4, v0, v1
-; GFX6-NEXT:    s_add_i32 s2, s2, 32
-; GFX6-NEXT:    v_mov_b32_e32 v0, s3
-; GFX6-NEXT:    v_and_b32_e32 v1, 1, v4
-; GFX6-NEXT:    v_mov_b32_e32 v5, s2
-; GFX6-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
-; GFX6-NEXT:    v_cndmask_b32_e32 v5, v0, v5, vcc
-; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[8:9], v[2:3]
-; GFX6-NEXT:    v_cndmask_b32_e32 v6, 0, v1, vcc
-; GFX6-NEXT:    v_lshl_b64 v[0:1], s[0:1], v5
-; GFX6-NEXT:    v_sub_i32_e32 v5, vcc, s17, v5
-; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[12:13], v[2:3]
-; GFX6-NEXT:    v_cndmask_b32_e32 v6, 1, v6, vcc
-; GFX6-NEXT:    v_and_b32_e32 v3, s16, v1
-; GFX6-NEXT:    v_mov_b32_e32 v2, v0
-; GFX6-NEXT:    v_cmp_ne_u64_e64 vcc, s[0:1], 0
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v5, vcc
-; GFX6-NEXT:    v_bfe_u32 v1, v1, 8, 23
-; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v6, v4
-; GFX6-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
-; GFX6-NEXT:    v_cmp_eq_u32_e64 s[0:1], s10, 1
-; GFX6-NEXT:    v_cndmask_b32_e64 v4, v4, -v4, s[0:1]
-; GFX6-NEXT:    v_or_b32_e32 v0, v0, v1
-; GFX6-NEXT:    v_cvt_f16_f32_e32 v1, v4
-; GFX6-NEXT:    v_and_b32_e32 v4, 1, v0
-; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[8:9], v[2:3]
-; GFX6-NEXT:    v_cndmask_b32_e32 v4, 0, v4, vcc
-; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[12:13], v[2:3]
-; GFX6-NEXT:    v_cndmask_b32_e32 v2, 1, v4, vcc
-; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v2, v0
-; GFX6-NEXT:    v_cmp_eq_u32_e64 s[0:1], s11, 1
-; GFX6-NEXT:    v_cndmask_b32_e64 v0, v0, -v0, s[0:1]
+; GFX6-NEXT:    s_xor_b32 s8, s6, s7
+; GFX6-NEXT:    s_flbit_i32 s10, s7
+; GFX6-NEXT:    s_xor_b32 s11, s4, s5
+; GFX6-NEXT:    s_flbit_i32 s12, s5
+; GFX6-NEXT:    v_cmp_gt_i32_e64 s[8:9], s8, -1
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, 32, 33, s[8:9]
+; GFX6-NEXT:    v_mov_b32_e32 v1, s10
+; GFX6-NEXT:    v_cmp_gt_i32_e64 s[8:9], s11, -1
+; GFX6-NEXT:    v_cndmask_b32_e64 v2, 32, 33, s[8:9]
+; GFX6-NEXT:    v_mov_b32_e32 v3, s12
+; GFX6-NEXT:    v_cmp_ne_u32_e64 vcc, s10, -1
+; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; GFX6-NEXT:    v_cmp_ne_u32_e64 vcc, s12, -1
+; GFX6-NEXT:    v_cndmask_b32_e32 v1, v2, v3, vcc
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, -1, v0
+; GFX6-NEXT:    v_sub_i32_e32 v4, vcc, 33, v0
+; GFX6-NEXT:    v_add_i32_e32 v3, vcc, -1, v1
+; GFX6-NEXT:    v_sub_i32_e32 v5, vcc, 33, v1
+; GFX6-NEXT:    v_lshl_b64 v[0:1], s[6:7], v2
+; GFX6-NEXT:    v_lshl_b64 v[2:3], s[4:5], v3
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
+; GFX6-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX6-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX6-NEXT:    v_or_b32_e32 v1, v3, v2
+; GFX6-NEXT:    v_cvt_f32_i32_e32 v0, v0
+; GFX6-NEXT:    v_cvt_f32_i32_e32 v1, v1
+; GFX6-NEXT:    v_ldexp_f32_e32 v0, v0, v4
+; GFX6-NEXT:    v_ldexp_f32_e32 v1, v1, v5
 ; GFX6-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GFX6-NEXT:    v_or_b32_e32 v0, v0, v1
-; GFX6-NEXT:    buffer_store_dword v0, off, s[4:7], 0
+; GFX6-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
+; GFX6-NEXT:    v_cvt_f16_f32_e32 v1, v1
+; GFX6-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX6-NEXT:    buffer_store_dword v0, off, s[0:3], 0
 ; GFX6-NEXT:    s_endpgm
 ;
 ; GFX8-LABEL: s_sint_to_fp_v2i64_to_v2f16:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x34
-; GFX8-NEXT:    s_movk_i32 s12, 0xbe
-; GFX8-NEXT:    s_mov_b32 s2, 0
-; GFX8-NEXT:    s_movk_i32 s3, 0x80
-; GFX8-NEXT:    s_movk_i32 s14, 0xff
-; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX8-NEXT:    s_ashr_i32 s8, s7, 31
-; GFX8-NEXT:    s_add_u32 s6, s6, s8
-; GFX8-NEXT:    s_mov_b32 s9, s8
-; GFX8-NEXT:    s_addc_u32 s7, s7, s8
-; GFX8-NEXT:    s_xor_b64 s[6:7], s[6:7], s[8:9]
-; GFX8-NEXT:    s_flbit_i32_b32 s9, s6
-; GFX8-NEXT:    s_add_i32 s9, s9, 32
-; GFX8-NEXT:    s_flbit_i32_b32 s10, s7
-; GFX8-NEXT:    s_cmp_eq_u32 s7, 0
-; GFX8-NEXT:    s_cselect_b32 s9, s9, s10
-; GFX8-NEXT:    s_sub_i32 s10, s12, s9
-; GFX8-NEXT:    s_cmp_lg_u64 s[6:7], 0
-; GFX8-NEXT:    s_cselect_b32 s10, s10, 0
-; GFX8-NEXT:    s_lshl_b64 s[6:7], s[6:7], s9
-; GFX8-NEXT:    s_bfe_u32 s9, s7, 0x170008
-; GFX8-NEXT:    s_lshl_b32 s10, s10, 23
-; GFX8-NEXT:    s_or_b32 s9, s10, s9
-; GFX8-NEXT:    s_mov_b32 s10, 1
-; GFX8-NEXT:    s_mov_b32 s11, s3
-; GFX8-NEXT:    v_mov_b32_e32 v0, s10
-; GFX8-NEXT:    s_and_b32 s7, s7, s14
-; GFX8-NEXT:    v_mov_b32_e32 v1, s11
-; GFX8-NEXT:    s_and_b32 s13, s9, 1
-; GFX8-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
-; GFX8-NEXT:    s_cmp_eq_u64 s[6:7], s[2:3]
-; GFX8-NEXT:    s_cselect_b32 s6, s13, 0
-; GFX8-NEXT:    s_cmp_lg_u64 vcc, 0
-; GFX8-NEXT:    s_cselect_b32 s6, s6, 1
-; GFX8-NEXT:    s_add_i32 s6, s9, s6
-; GFX8-NEXT:    s_and_b32 s7, 1, s8
-; GFX8-NEXT:    v_mov_b32_e32 v2, s6
-; GFX8-NEXT:    v_cmp_eq_u32_e64 s[6:7], s7, 1
-; GFX8-NEXT:    v_cndmask_b32_e64 v2, v2, -v2, s[6:7]
-; GFX8-NEXT:    s_ashr_i32 s6, s5, 31
-; GFX8-NEXT:    s_add_u32 s4, s4, s6
-; GFX8-NEXT:    s_mov_b32 s7, s6
-; GFX8-NEXT:    s_addc_u32 s5, s5, s6
-; GFX8-NEXT:    s_xor_b64 s[4:5], s[4:5], s[6:7]
-; GFX8-NEXT:    s_flbit_i32_b32 s7, s4
-; GFX8-NEXT:    s_add_i32 s7, s7, 32
-; GFX8-NEXT:    s_flbit_i32_b32 s8, s5
-; GFX8-NEXT:    s_cmp_eq_u32 s5, 0
-; GFX8-NEXT:    s_cselect_b32 s7, s7, s8
-; GFX8-NEXT:    s_sub_i32 s8, s12, s7
-; GFX8-NEXT:    s_cmp_lg_u64 s[4:5], 0
-; GFX8-NEXT:    s_cselect_b32 s8, s8, 0
-; GFX8-NEXT:    s_lshl_b64 s[4:5], s[4:5], s7
-; GFX8-NEXT:    s_bfe_u32 s7, s5, 0x170008
-; GFX8-NEXT:    s_lshl_b32 s8, s8, 23
-; GFX8-NEXT:    s_or_b32 s7, s8, s7
-; GFX8-NEXT:    s_and_b32 s5, s5, s14
-; GFX8-NEXT:    s_and_b32 s8, s7, 1
-; GFX8-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[0:1]
-; GFX8-NEXT:    s_cmp_eq_u64 s[4:5], s[2:3]
-; GFX8-NEXT:    s_cselect_b32 s2, s8, 0
-; GFX8-NEXT:    s_cmp_lg_u64 vcc, 0
-; GFX8-NEXT:    s_cselect_b32 s2, s2, 1
-; GFX8-NEXT:    s_add_i32 s2, s7, s2
-; GFX8-NEXT:    s_and_b32 s3, 1, s6
-; GFX8-NEXT:    v_mov_b32_e32 v0, s2
-; GFX8-NEXT:    v_cmp_eq_u32_e64 s[2:3], s3, 1
-; GFX8-NEXT:    v_cndmask_b32_e64 v0, v0, -v0, s[2:3]
 ; GFX8-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
-; GFX8-NEXT:    v_cvt_f16_f32_sdwa v2, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
-; GFX8-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GFX8-NEXT:    v_or_b32_e32 v2, v0, v2
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    s_xor_b32 s2, s6, s7
+; GFX8-NEXT:    s_cmp_gt_i32 s2, -1
+; GFX8-NEXT:    s_flbit_i32 s3, s7
+; GFX8-NEXT:    s_cselect_b32 s2, 33, 32
+; GFX8-NEXT:    s_cmp_lg_u32 s3, -1
+; GFX8-NEXT:    s_cselect_b32 s2, s3, s2
+; GFX8-NEXT:    s_add_i32 s3, s2, -1
+; GFX8-NEXT:    s_sub_i32 s8, 33, s2
+; GFX8-NEXT:    s_lshl_b64 s[2:3], s[6:7], s3
+; GFX8-NEXT:    v_cmp_ne_u32_e64 s[6:7], s2, 0
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[6:7]
+; GFX8-NEXT:    s_xor_b32 s2, s4, s5
+; GFX8-NEXT:    v_or_b32_e32 v0, s3, v0
+; GFX8-NEXT:    s_cmp_gt_i32 s2, -1
+; GFX8-NEXT:    s_flbit_i32 s3, s5
+; GFX8-NEXT:    s_cselect_b32 s2, 33, 32
+; GFX8-NEXT:    s_cmp_lg_u32 s3, -1
+; GFX8-NEXT:    s_cselect_b32 s6, s3, s2
+; GFX8-NEXT:    s_add_i32 s2, s6, -1
+; GFX8-NEXT:    s_lshl_b64 s[2:3], s[4:5], s2
+; GFX8-NEXT:    v_cmp_ne_u32_e64 s[4:5], s2, 0
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s[4:5]
+; GFX8-NEXT:    v_or_b32_e32 v1, s3, v1
+; GFX8-NEXT:    v_cvt_f32_i32_e32 v0, v0
+; GFX8-NEXT:    v_cvt_f32_i32_e32 v1, v1
+; GFX8-NEXT:    s_sub_i32 s2, 33, s6
+; GFX8-NEXT:    v_ldexp_f32 v0, v0, s8
+; GFX8-NEXT:    v_ldexp_f32 v1, v1, s2
+; GFX8-NEXT:    v_cvt_f16_f32_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+; GFX8-NEXT:    v_cvt_f16_f32_e32 v1, v1
+; GFX8-NEXT:    v_or_b32_e32 v2, v1, v0
 ; GFX8-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX8-NEXT:    v_mov_b32_e32 v1, s1
 ; GFX8-NEXT:    flat_store_dword v[0:1], v2
@@ -1014,291 +596,168 @@ define amdgpu_kernel void @s_sint_to_fp_v2i64_to_v2f16(<2 x half> addrspace(1)*
 define amdgpu_kernel void @v_sint_to_fp_v4i64_to_v4f16(<4 x half> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) #0 {
 ; GFX6-LABEL: v_sint_to_fp_v4i64_to_v4f16:
 ; GFX6:       ; %bb.0:
-; GFX6-NEXT:    s_load_dwordx4 s[8:11], s[0:1], 0x9
+; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
 ; GFX6-NEXT:    s_mov_b32 s7, 0xf000
 ; GFX6-NEXT:    s_mov_b32 s6, 0
 ; GFX6-NEXT:    v_lshlrev_b32_e32 v8, 5, v0
 ; GFX6-NEXT:    v_mov_b32_e32 v9, 0
 ; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX6-NEXT:    s_mov_b64 s[4:5], s[10:11]
+; GFX6-NEXT:    s_mov_b64 s[4:5], s[2:3]
 ; GFX6-NEXT:    buffer_load_dwordx4 v[1:4], v[8:9], s[4:7], 0 addr64 offset:16
 ; GFX6-NEXT:    buffer_load_dwordx4 v[5:8], v[8:9], s[4:7], 0 addr64
 ; GFX6-NEXT:    v_lshlrev_b32_e32 v10, 3, v0
-; GFX6-NEXT:    s_movk_i32 s4, 0xbe
 ; GFX6-NEXT:    v_mov_b32_e32 v11, v9
+; GFX6-NEXT:    s_mov_b64 s[2:3], s[6:7]
 ; GFX6-NEXT:    s_waitcnt vmcnt(1)
-; GFX6-NEXT:    v_ashrrev_i32_e32 v12, 31, v4
-; GFX6-NEXT:    v_ashrrev_i32_e32 v13, 31, v2
+; GFX6-NEXT:    v_xor_b32_e32 v0, v3, v4
+; GFX6-NEXT:    v_ffbh_i32_e32 v9, v4
+; GFX6-NEXT:    v_xor_b32_e32 v12, v1, v2
+; GFX6-NEXT:    v_ffbh_i32_e32 v13, v2
 ; GFX6-NEXT:    s_waitcnt vmcnt(0)
-; GFX6-NEXT:    v_ashrrev_i32_e32 v14, 31, v8
-; GFX6-NEXT:    v_ashrrev_i32_e32 v15, 31, v6
-; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v3, v12
-; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v4, v12, vcc
-; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v1, v13
-; GFX6-NEXT:    v_addc_u32_e32 v2, vcc, v2, v13, vcc
-; GFX6-NEXT:    v_add_i32_e32 v7, vcc, v7, v14
-; GFX6-NEXT:    v_addc_u32_e32 v8, vcc, v8, v14, vcc
-; GFX6-NEXT:    v_add_i32_e32 v9, vcc, v5, v15
-; GFX6-NEXT:    v_addc_u32_e32 v6, vcc, v6, v15, vcc
-; GFX6-NEXT:    v_xor_b32_e32 v1, v3, v12
-; GFX6-NEXT:    v_xor_b32_e32 v0, v0, v12
-; GFX6-NEXT:    v_xor_b32_e32 v3, v2, v13
-; GFX6-NEXT:    v_xor_b32_e32 v2, v4, v13
-; GFX6-NEXT:    v_xor_b32_e32 v5, v8, v14
-; GFX6-NEXT:    v_xor_b32_e32 v4, v7, v14
-; GFX6-NEXT:    v_xor_b32_e32 v7, v6, v15
-; GFX6-NEXT:    v_xor_b32_e32 v6, v9, v15
-; GFX6-NEXT:    v_ffbh_u32_e32 v8, v0
-; GFX6-NEXT:    v_ffbh_u32_e32 v9, v1
-; GFX6-NEXT:    v_ffbh_u32_e32 v16, v2
-; GFX6-NEXT:    v_ffbh_u32_e32 v17, v3
-; GFX6-NEXT:    v_ffbh_u32_e32 v18, v4
-; GFX6-NEXT:    v_ffbh_u32_e32 v19, v5
-; GFX6-NEXT:    v_add_i32_e32 v8, vcc, 32, v8
-; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
-; GFX6-NEXT:    v_cndmask_b32_e32 v20, v9, v8, vcc
-; GFX6-NEXT:    v_ffbh_u32_e32 v8, v6
-; GFX6-NEXT:    v_add_i32_e32 v9, vcc, 32, v16
-; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v3
-; GFX6-NEXT:    v_cndmask_b32_e32 v16, v17, v9, vcc
-; GFX6-NEXT:    v_ffbh_u32_e32 v9, v7
-; GFX6-NEXT:    v_add_i32_e32 v17, vcc, 32, v18
-; GFX6-NEXT:    v_add_i32_e32 v8, vcc, 32, v8
-; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v5
-; GFX6-NEXT:    v_cndmask_b32_e32 v17, v19, v17, vcc
-; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v7
-; GFX6-NEXT:    v_cndmask_b32_e32 v18, v9, v8, vcc
-; GFX6-NEXT:    v_lshl_b64 v[8:9], v[2:3], v16
-; GFX6-NEXT:    v_sub_i32_e32 v16, vcc, s4, v16
-; GFX6-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[2:3]
-; GFX6-NEXT:    v_lshl_b64 v[2:3], v[4:5], v17
-; GFX6-NEXT:    v_sub_i32_e64 v17, s[0:1], s4, v17
-; GFX6-NEXT:    v_cmp_ne_u64_e64 s[0:1], 0, v[4:5]
-; GFX6-NEXT:    v_lshl_b64 v[4:5], v[6:7], v18
-; GFX6-NEXT:    v_sub_i32_e64 v18, s[2:3], s4, v18
-; GFX6-NEXT:    v_cmp_ne_u64_e64 s[2:3], 0, v[6:7]
-; GFX6-NEXT:    v_lshl_b64 v[6:7], v[0:1], v20
-; GFX6-NEXT:    v_sub_i32_e64 v19, s[4:5], s4, v20
-; GFX6-NEXT:    v_cmp_ne_u64_e64 s[4:5], 0, v[0:1]
-; GFX6-NEXT:    v_and_b32_e32 v1, 0xff, v7
-; GFX6-NEXT:    v_mov_b32_e32 v0, v6
-; GFX6-NEXT:    v_bfe_u32 v20, v7, 8, 23
-; GFX6-NEXT:    v_mov_b32_e32 v21, 0xff
-; GFX6-NEXT:    v_and_b32_e32 v7, v21, v9
-; GFX6-NEXT:    v_mov_b32_e32 v6, v8
-; GFX6-NEXT:    v_bfe_u32 v22, v9, 8, 23
-; GFX6-NEXT:    v_and_b32_e32 v9, v21, v3
-; GFX6-NEXT:    v_mov_b32_e32 v8, v2
-; GFX6-NEXT:    v_bfe_u32 v23, v3, 8, 23
-; GFX6-NEXT:    v_and_b32_e32 v3, v21, v5
-; GFX6-NEXT:    v_mov_b32_e32 v2, v4
-; GFX6-NEXT:    v_bfe_u32 v4, v5, 8, 23
-; GFX6-NEXT:    v_cndmask_b32_e64 v5, 0, v19, s[4:5]
-; GFX6-NEXT:    v_lshlrev_b32_e32 v5, 23, v5
-; GFX6-NEXT:    v_or_b32_e32 v5, v5, v20
-; GFX6-NEXT:    v_cndmask_b32_e32 v16, 0, v16, vcc
-; GFX6-NEXT:    v_lshlrev_b32_e32 v16, 23, v16
-; GFX6-NEXT:    v_or_b32_e32 v16, v16, v22
-; GFX6-NEXT:    s_mov_b64 s[10:11], s[6:7]
-; GFX6-NEXT:    s_movk_i32 s7, 0x80
-; GFX6-NEXT:    s_mov_b32 s4, 1
-; GFX6-NEXT:    s_mov_b32 s5, s7
-; GFX6-NEXT:    v_and_b32_e32 v12, 1, v12
-; GFX6-NEXT:    v_and_b32_e32 v13, 1, v13
-; GFX6-NEXT:    v_and_b32_e32 v14, 1, v14
-; GFX6-NEXT:    v_and_b32_e32 v15, 1, v15
-; GFX6-NEXT:    v_cndmask_b32_e64 v17, 0, v17, s[0:1]
-; GFX6-NEXT:    v_cndmask_b32_e64 v18, 0, v18, s[2:3]
-; GFX6-NEXT:    v_lshlrev_b32_e32 v17, 23, v17
-; GFX6-NEXT:    v_lshlrev_b32_e32 v18, 23, v18
-; GFX6-NEXT:    v_or_b32_e32 v17, v17, v23
-; GFX6-NEXT:    v_or_b32_e32 v4, v18, v4
-; GFX6-NEXT:    v_and_b32_e32 v18, 1, v5
-; GFX6-NEXT:    v_and_b32_e32 v19, 1, v16
-; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[6:7], v[0:1]
-; GFX6-NEXT:    v_cndmask_b32_e32 v18, 0, v18, vcc
-; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[4:5], v[0:1]
-; GFX6-NEXT:    v_and_b32_e32 v0, 1, v17
-; GFX6-NEXT:    v_and_b32_e32 v1, 1, v4
-; GFX6-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[6:7], v[6:7]
-; GFX6-NEXT:    v_cndmask_b32_e64 v19, 0, v19, s[0:1]
-; GFX6-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[6:7], v[8:9]
-; GFX6-NEXT:    v_cndmask_b32_e64 v0, 0, v0, s[0:1]
-; GFX6-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[6:7], v[2:3]
-; GFX6-NEXT:    v_cndmask_b32_e64 v1, 0, v1, s[0:1]
-; GFX6-NEXT:    v_cndmask_b32_e32 v18, 1, v18, vcc
-; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[4:5], v[6:7]
-; GFX6-NEXT:    v_cndmask_b32_e32 v6, 1, v19, vcc
-; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[4:5], v[8:9]
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
-; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[4:5], v[2:3]
-; GFX6-NEXT:    v_cndmask_b32_e32 v1, 1, v1, vcc
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v5, v18
-; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v16, v6
-; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v17, v0
-; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v4, v1
-; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v12
-; GFX6-NEXT:    v_cndmask_b32_e64 v2, v2, -v2, vcc
-; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v13
-; GFX6-NEXT:    v_cndmask_b32_e64 v3, v3, -v3, vcc
-; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v14
-; GFX6-NEXT:    v_cndmask_b32_e64 v0, v0, -v0, vcc
-; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v15
-; GFX6-NEXT:    v_cndmask_b32_e64 v1, v1, -v1, vcc
+; GFX6-NEXT:    v_xor_b32_e32 v14, v7, v8
+; GFX6-NEXT:    v_ffbh_i32_e32 v15, v8
+; GFX6-NEXT:    v_xor_b32_e32 v16, v5, v6
+; GFX6-NEXT:    v_ffbh_i32_e32 v17, v6
+; GFX6-NEXT:    v_cmp_lt_i32_e32 vcc, -1, v0
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, 32, 33, vcc
+; GFX6-NEXT:    v_cmp_lt_i32_e32 vcc, -1, v12
+; GFX6-NEXT:    v_cndmask_b32_e64 v12, 32, 33, vcc
+; GFX6-NEXT:    v_cmp_lt_i32_e32 vcc, -1, v14
+; GFX6-NEXT:    v_cndmask_b32_e64 v14, 32, 33, vcc
+; GFX6-NEXT:    v_cmp_lt_i32_e32 vcc, -1, v16
+; GFX6-NEXT:    v_cndmask_b32_e64 v16, 32, 33, vcc
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, -1, v9
+; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v9, vcc
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, -1, v13
+; GFX6-NEXT:    v_cndmask_b32_e32 v9, v12, v13, vcc
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, -1, v15
+; GFX6-NEXT:    v_cndmask_b32_e32 v12, v14, v15, vcc
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, -1, v17
+; GFX6-NEXT:    v_cndmask_b32_e32 v13, v16, v17, vcc
+; GFX6-NEXT:    v_add_i32_e32 v14, vcc, -1, v0
+; GFX6-NEXT:    v_sub_i32_e32 v15, vcc, 33, v0
+; GFX6-NEXT:    v_add_i32_e32 v0, vcc, -1, v9
+; GFX6-NEXT:    v_sub_i32_e32 v9, vcc, 33, v9
+; GFX6-NEXT:    v_add_i32_e32 v16, vcc, -1, v12
+; GFX6-NEXT:    v_sub_i32_e32 v12, vcc, 33, v12
+; GFX6-NEXT:    v_add_i32_e32 v17, vcc, -1, v13
+; GFX6-NEXT:    v_sub_i32_e32 v13, vcc, 33, v13
+; GFX6-NEXT:    v_lshl_b64 v[3:4], v[3:4], v14
+; GFX6-NEXT:    v_lshl_b64 v[0:1], v[1:2], v0
+; GFX6-NEXT:    v_lshl_b64 v[7:8], v[7:8], v16
+; GFX6-NEXT:    v_lshl_b64 v[5:6], v[5:6], v17
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v3
+; GFX6-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v7
+; GFX6-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v5
+; GFX6-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
+; GFX6-NEXT:    v_or_b32_e32 v2, v4, v2
+; GFX6-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX6-NEXT:    v_or_b32_e32 v1, v8, v3
+; GFX6-NEXT:    v_or_b32_e32 v3, v6, v5
+; GFX6-NEXT:    v_cvt_f32_i32_e32 v2, v2
+; GFX6-NEXT:    v_cvt_f32_i32_e32 v0, v0
+; GFX6-NEXT:    v_cvt_f32_i32_e32 v1, v1
+; GFX6-NEXT:    v_cvt_f32_i32_e32 v3, v3
+; GFX6-NEXT:    v_ldexp_f32_e32 v2, v2, v15
+; GFX6-NEXT:    v_ldexp_f32_e32 v0, v0, v9
+; GFX6-NEXT:    v_ldexp_f32_e32 v1, v1, v12
+; GFX6-NEXT:    v_ldexp_f32_e32 v3, v3, v13
 ; GFX6-NEXT:    v_cvt_f16_f32_e32 v2, v2
-; GFX6-NEXT:    v_cvt_f16_f32_e32 v3, v3
 ; GFX6-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GFX6-NEXT:    v_cvt_f16_f32_e32 v4, v1
-; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 16, v2
-; GFX6-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX6-NEXT:    v_or_b32_e32 v1, v3, v1
-; GFX6-NEXT:    v_or_b32_e32 v0, v4, v0
-; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], v[10:11], s[8:11], 0 addr64
+; GFX6-NEXT:    v_cvt_f16_f32_e32 v1, v1
+; GFX6-NEXT:    v_cvt_f16_f32_e32 v3, v3
+; GFX6-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
+; GFX6-NEXT:    v_lshlrev_b32_e32 v4, 16, v1
+; GFX6-NEXT:    v_or_b32_e32 v1, v0, v2
+; GFX6-NEXT:    v_or_b32_e32 v0, v3, v4
+; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], v[10:11], s[0:3], 0 addr64
 ; GFX6-NEXT:    s_endpgm
 ;
 ; GFX8-LABEL: v_sint_to_fp_v4i64_to_v4f16:
 ; GFX8:       ; %bb.0:
-; GFX8-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX8-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 5, v0
-; GFX8-NEXT:    v_mov_b32_e32 v11, 0
-; GFX8-NEXT:    s_movk_i32 s8, 0xbe
-; GFX8-NEXT:    s_mov_b32 s2, 0
+; GFX8-NEXT:    v_mov_b32_e32 v9, 0
+; GFX8-NEXT:    v_lshlrev_b32_e32 v10, 3, v0
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX8-NEXT:    v_add_u32_e32 v5, vcc, s6, v1
-; GFX8-NEXT:    v_mov_b32_e32 v2, s7
-; GFX8-NEXT:    v_addc_u32_e32 v6, vcc, v2, v11, vcc
+; GFX8-NEXT:    v_mov_b32_e32 v2, s3
+; GFX8-NEXT:    v_add_u32_e32 v5, vcc, s2, v1
+; GFX8-NEXT:    v_addc_u32_e32 v6, vcc, v2, v9, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 16, v5
 ; GFX8-NEXT:    v_addc_u32_e32 v2, vcc, 0, v6, vcc
 ; GFX8-NEXT:    flat_load_dwordx4 v[1:4], v[1:2]
 ; GFX8-NEXT:    flat_load_dwordx4 v[5:8], v[5:6]
-; GFX8-NEXT:    v_mov_b32_e32 v12, 0xff
-; GFX8-NEXT:    v_lshlrev_b32_e32 v19, 3, v0
-; GFX8-NEXT:    s_movk_i32 s3, 0x80
-; GFX8-NEXT:    s_mov_b32 s6, 1
-; GFX8-NEXT:    s_mov_b32 s7, s3
+; GFX8-NEXT:    v_mov_b32_e32 v11, s1
 ; GFX8-NEXT:    s_waitcnt vmcnt(1)
-; GFX8-NEXT:    v_ashrrev_i32_e32 v15, 31, v4
+; GFX8-NEXT:    v_xor_b32_e32 v15, v3, v4
 ; GFX8-NEXT:    s_waitcnt vmcnt(0)
-; GFX8-NEXT:    v_ashrrev_i32_e32 v13, 31, v8
-; GFX8-NEXT:    v_add_u32_e32 v7, vcc, v7, v13
-; GFX8-NEXT:    v_ashrrev_i32_e32 v14, 31, v6
-; GFX8-NEXT:    v_addc_u32_e32 v8, vcc, v8, v13, vcc
-; GFX8-NEXT:    v_add_u32_e32 v5, vcc, v5, v14
-; GFX8-NEXT:    v_addc_u32_e32 v6, vcc, v6, v14, vcc
-; GFX8-NEXT:    v_add_u32_e32 v9, vcc, v3, v15
-; GFX8-NEXT:    v_xor_b32_e32 v3, v7, v13
-; GFX8-NEXT:    v_xor_b32_e32 v7, v9, v15
-; GFX8-NEXT:    v_ffbh_u32_e32 v9, v3
-; GFX8-NEXT:    v_addc_u32_e32 v10, vcc, v4, v15, vcc
-; GFX8-NEXT:    v_xor_b32_e32 v4, v8, v13
-; GFX8-NEXT:    v_xor_b32_e32 v5, v5, v14
-; GFX8-NEXT:    v_add_u32_e32 v9, vcc, 32, v9
-; GFX8-NEXT:    v_xor_b32_e32 v8, v10, v15
-; GFX8-NEXT:    v_ffbh_u32_e32 v10, v4
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v4
-; GFX8-NEXT:    v_ffbh_u32_e32 v16, v5
-; GFX8-NEXT:    v_cndmask_b32_e32 v18, v10, v9, vcc
-; GFX8-NEXT:    v_xor_b32_e32 v6, v6, v14
-; GFX8-NEXT:    v_add_u32_e32 v10, vcc, 32, v16
-; GFX8-NEXT:    v_ffbh_u32_e32 v17, v6
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v6
-; GFX8-NEXT:    v_cndmask_b32_e32 v10, v17, v10, vcc
-; GFX8-NEXT:    v_sub_u32_e32 v16, vcc, s8, v10
-; GFX8-NEXT:    v_ffbh_u32_e32 v9, v7
-; GFX8-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[5:6]
-; GFX8-NEXT:    v_lshlrev_b64 v[5:6], v10, v[5:6]
-; GFX8-NEXT:    v_add_u32_e64 v9, s[0:1], 32, v9
-; GFX8-NEXT:    v_cndmask_b32_e32 v0, 0, v16, vcc
-; GFX8-NEXT:    v_ffbh_u32_e32 v10, v8
-; GFX8-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v8
-; GFX8-NEXT:    v_cndmask_b32_e64 v17, v10, v9, s[0:1]
-; GFX8-NEXT:    v_mov_b32_e32 v9, v5
-; GFX8-NEXT:    v_and_b32_e32 v10, v12, v6
-; GFX8-NEXT:    v_bfe_u32 v5, v6, 8, 23
-; GFX8-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
-; GFX8-NEXT:    v_or_b32_e32 v0, v0, v5
-; GFX8-NEXT:    v_cmp_eq_u64_e32 vcc, s[2:3], v[9:10]
-; GFX8-NEXT:    v_and_b32_e32 v5, 1, v0
-; GFX8-NEXT:    v_cndmask_b32_e32 v16, 0, v5, vcc
-; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, s[6:7], v[9:10]
-; GFX8-NEXT:    v_sub_u32_e64 v9, s[0:1], s8, v18
-; GFX8-NEXT:    v_cmp_ne_u64_e64 s[0:1], 0, v[3:4]
-; GFX8-NEXT:    v_lshlrev_b64 v[5:6], v18, v[3:4]
-; GFX8-NEXT:    v_cndmask_b32_e64 v9, 0, v9, s[0:1]
-; GFX8-NEXT:    v_mov_b32_e32 v3, v5
-; GFX8-NEXT:    v_and_b32_e32 v4, 0xff, v6
-; GFX8-NEXT:    v_bfe_u32 v5, v6, 8, 23
-; GFX8-NEXT:    v_lshlrev_b32_e32 v9, 23, v9
-; GFX8-NEXT:    v_or_b32_e32 v5, v9, v5
-; GFX8-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[2:3], v[3:4]
-; GFX8-NEXT:    v_and_b32_e32 v9, 1, v5
-; GFX8-NEXT:    v_cndmask_b32_e64 v9, 0, v9, s[0:1]
-; GFX8-NEXT:    v_cmp_gt_u64_e64 s[0:1], s[6:7], v[3:4]
-; GFX8-NEXT:    v_and_b32_e32 v6, 1, v13
-; GFX8-NEXT:    v_and_b32_e32 v13, 1, v14
-; GFX8-NEXT:    v_cndmask_b32_e32 v14, 1, v16, vcc
-; GFX8-NEXT:    v_cndmask_b32_e64 v9, 1, v9, s[0:1]
-; GFX8-NEXT:    v_add_u32_e32 v5, vcc, v5, v9
-; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v14
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v6
-; GFX8-NEXT:    v_cndmask_b32_e64 v5, v5, -v5, vcc
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v13
-; GFX8-NEXT:    v_cndmask_b32_e64 v0, v0, -v0, vcc
-; GFX8-NEXT:    v_cvt_f16_f32_e32 v13, v0
-; GFX8-NEXT:    v_sub_u32_e32 v0, vcc, s8, v17
-; GFX8-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[7:8]
-; GFX8-NEXT:    v_lshlrev_b64 v[3:4], v17, v[7:8]
-; GFX8-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; GFX8-NEXT:    v_cvt_f16_f32_sdwa v9, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
-; GFX8-NEXT:    v_mov_b32_e32 v5, v3
-; GFX8-NEXT:    v_and_b32_e32 v6, v12, v4
-; GFX8-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
-; GFX8-NEXT:    v_bfe_u32 v3, v4, 8, 23
-; GFX8-NEXT:    v_or_b32_e32 v0, v0, v3
-; GFX8-NEXT:    v_cmp_eq_u64_e32 vcc, s[2:3], v[5:6]
-; GFX8-NEXT:    v_and_b32_e32 v3, 1, v0
-; GFX8-NEXT:    v_cndmask_b32_e32 v3, 0, v3, vcc
-; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, s[6:7], v[5:6]
-; GFX8-NEXT:    v_ashrrev_i32_e32 v6, 31, v2
-; GFX8-NEXT:    v_cndmask_b32_e32 v3, 1, v3, vcc
-; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v3
-; GFX8-NEXT:    v_and_b32_e32 v3, 1, v15
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v3
-; GFX8-NEXT:    v_cndmask_b32_e64 v4, v0, -v0, vcc
-; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v1, v6
-; GFX8-NEXT:    v_xor_b32_e32 v0, v0, v6
-; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, v2, v6, vcc
-; GFX8-NEXT:    v_ffbh_u32_e32 v2, v0
-; GFX8-NEXT:    v_xor_b32_e32 v1, v1, v6
-; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 32, v2
-; GFX8-NEXT:    v_ffbh_u32_e32 v3, v1
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
-; GFX8-NEXT:    v_cndmask_b32_e32 v7, v3, v2, vcc
-; GFX8-NEXT:    v_lshlrev_b64 v[2:3], v7, v[0:1]
-; GFX8-NEXT:    v_cvt_f16_f32_sdwa v8, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
-; GFX8-NEXT:    v_mov_b32_e32 v4, v2
-; GFX8-NEXT:    v_sub_u32_e32 v2, vcc, s8, v7
-; GFX8-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[0:1]
-; GFX8-NEXT:    v_and_b32_e32 v5, v12, v3
-; GFX8-NEXT:    v_cndmask_b32_e32 v0, 0, v2, vcc
-; GFX8-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
-; GFX8-NEXT:    v_bfe_u32 v1, v3, 8, 23
-; GFX8-NEXT:    v_or_b32_e32 v0, v0, v1
-; GFX8-NEXT:    v_cmp_eq_u64_e32 vcc, s[2:3], v[4:5]
-; GFX8-NEXT:    v_and_b32_e32 v1, 1, v0
-; GFX8-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
-; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, s[6:7], v[4:5]
-; GFX8-NEXT:    v_mov_b32_e32 v10, s5
-; GFX8-NEXT:    v_cndmask_b32_e32 v1, 1, v1, vcc
-; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v1
-; GFX8-NEXT:    v_and_b32_e32 v1, 1, v6
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
-; GFX8-NEXT:    v_cndmask_b32_e64 v0, v0, -v0, vcc
-; GFX8-NEXT:    v_cvt_f16_f32_e32 v3, v0
-; GFX8-NEXT:    v_add_u32_e32 v0, vcc, s4, v19
-; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, v10, v11, vcc
-; GFX8-NEXT:    v_or_b32_e32 v2, v13, v9
-; GFX8-NEXT:    v_or_b32_e32 v3, v3, v8
+; GFX8-NEXT:    v_xor_b32_e32 v0, v7, v8
+; GFX8-NEXT:    v_cmp_lt_i32_e32 vcc, -1, v0
+; GFX8-NEXT:    v_xor_b32_e32 v13, v5, v6
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 32, 33, vcc
+; GFX8-NEXT:    v_cmp_lt_i32_e32 vcc, -1, v13
+; GFX8-NEXT:    v_cndmask_b32_e64 v13, 32, 33, vcc
+; GFX8-NEXT:    v_cmp_lt_i32_e32 vcc, -1, v15
+; GFX8-NEXT:    v_xor_b32_e32 v17, v1, v2
+; GFX8-NEXT:    v_cndmask_b32_e64 v15, 32, 33, vcc
+; GFX8-NEXT:    v_cmp_lt_i32_e32 vcc, -1, v17
+; GFX8-NEXT:    v_ffbh_i32_e32 v12, v8
+; GFX8-NEXT:    v_cndmask_b32_e64 v17, 32, 33, vcc
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, -1, v12
+; GFX8-NEXT:    v_ffbh_i32_e32 v14, v6
+; GFX8-NEXT:    v_cndmask_b32_e32 v0, v0, v12, vcc
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, -1, v14
+; GFX8-NEXT:    v_ffbh_i32_e32 v16, v4
+; GFX8-NEXT:    v_cndmask_b32_e32 v12, v13, v14, vcc
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, -1, v16
+; GFX8-NEXT:    v_ffbh_i32_e32 v18, v2
+; GFX8-NEXT:    v_cndmask_b32_e32 v13, v15, v16, vcc
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, -1, v18
+; GFX8-NEXT:    v_cndmask_b32_e32 v14, v17, v18, vcc
+; GFX8-NEXT:    v_add_u32_e32 v15, vcc, -1, v0
+; GFX8-NEXT:    v_sub_u32_e32 v16, vcc, 33, v0
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, -1, v12
+; GFX8-NEXT:    v_lshlrev_b64 v[7:8], v15, v[7:8]
+; GFX8-NEXT:    v_add_u32_e32 v17, vcc, -1, v13
+; GFX8-NEXT:    v_add_u32_e32 v18, vcc, -1, v14
+; GFX8-NEXT:    v_lshlrev_b64 v[5:6], v0, v[5:6]
+; GFX8-NEXT:    v_sub_u32_e32 v12, vcc, 33, v12
+; GFX8-NEXT:    v_sub_u32_e32 v13, vcc, 33, v13
+; GFX8-NEXT:    v_sub_u32_e32 v14, vcc, 33, v14
+; GFX8-NEXT:    v_lshlrev_b64 v[3:4], v17, v[3:4]
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v7
+; GFX8-NEXT:    v_lshlrev_b64 v[0:1], v18, v[1:2]
+; GFX8-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v5
+; GFX8-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v3
+; GFX8-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX8-NEXT:    v_or_b32_e32 v3, v4, v3
+; GFX8-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX8-NEXT:    v_or_b32_e32 v2, v8, v2
+; GFX8-NEXT:    v_or_b32_e32 v5, v6, v5
+; GFX8-NEXT:    v_cvt_f32_i32_e32 v1, v2
+; GFX8-NEXT:    v_cvt_f32_i32_e32 v2, v5
+; GFX8-NEXT:    v_cvt_f32_i32_e32 v3, v3
+; GFX8-NEXT:    v_cvt_f32_i32_e32 v0, v0
+; GFX8-NEXT:    v_ldexp_f32 v1, v1, v16
+; GFX8-NEXT:    v_ldexp_f32 v2, v2, v12
+; GFX8-NEXT:    v_ldexp_f32 v3, v3, v13
+; GFX8-NEXT:    v_ldexp_f32 v0, v0, v14
+; GFX8-NEXT:    v_cvt_f16_f32_e32 v5, v0
+; GFX8-NEXT:    v_cvt_f16_f32_sdwa v3, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+; GFX8-NEXT:    v_cvt_f16_f32_sdwa v4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+; GFX8-NEXT:    v_cvt_f16_f32_e32 v2, v2
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, s0, v10
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, v11, v9, vcc
+; GFX8-NEXT:    v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT:    v_or_b32_e32 v2, v2, v4
 ; GFX8-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
 ; GFX8-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()

diff  --git a/llvm/test/CodeGen/AMDGPU/uint_to_fp.i64.ll b/llvm/test/CodeGen/AMDGPU/uint_to_fp.i64.ll
index 45ebeceb1f149..7abd17ab49f04 100644
--- a/llvm/test/CodeGen/AMDGPU/uint_to_fp.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/uint_to_fp.i64.ll
@@ -9,35 +9,21 @@ define amdgpu_kernel void @s_uint_to_fp_i64_to_f16(half addrspace(1)* %out, i64
 ; GFX6:       ; %bb.0:
 ; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
 ; GFX6-NEXT:    s_mov_b32 s7, 0xf000
-; GFX6-NEXT:    s_mov_b32 s6, -1
-; GFX6-NEXT:    s_mov_b32 s8, 0
 ; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX6-NEXT:    s_flbit_i32_b32 s4, s2
-; GFX6-NEXT:    s_flbit_i32_b32 s10, s3
-; GFX6-NEXT:    s_add_i32 s11, s4, 32
-; GFX6-NEXT:    s_movk_i32 s9, 0x80
+; GFX6-NEXT:    s_flbit_i32_b32 s8, s3
+; GFX6-NEXT:    s_mov_b32 s6, -1
 ; GFX6-NEXT:    s_mov_b32 s4, s0
 ; GFX6-NEXT:    s_mov_b32 s5, s1
-; GFX6-NEXT:    v_mov_b32_e32 v0, s10
-; GFX6-NEXT:    v_mov_b32_e32 v1, s11
-; GFX6-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
-; GFX6-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
+; GFX6-NEXT:    v_mov_b32_e32 v0, s8
+; GFX6-NEXT:    v_cmp_ne_u32_e64 vcc, s3, 0
+; GFX6-NEXT:    v_cndmask_b32_e32 v2, 32, v0, vcc
 ; GFX6-NEXT:    v_lshl_b64 v[0:1], s[2:3], v2
-; GFX6-NEXT:    v_sub_i32_e32 v4, vcc, 0xbe, v2
-; GFX6-NEXT:    v_and_b32_e32 v3, 0xff, v1
-; GFX6-NEXT:    v_mov_b32_e32 v2, v0
-; GFX6-NEXT:    v_cmp_ne_u64_e64 vcc, s[2:3], 0
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v4, vcc
-; GFX6-NEXT:    v_bfe_u32 v1, v1, 8, 23
-; GFX6-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
-; GFX6-NEXT:    v_or_b32_e32 v0, v0, v1
-; GFX6-NEXT:    v_and_b32_e32 v1, 1, v0
-; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[8:9], v[2:3]
-; GFX6-NEXT:    s_mov_b32 s8, 1
-; GFX6-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
-; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[2:3]
-; GFX6-NEXT:    v_cndmask_b32_e32 v1, 1, v1, vcc
-; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v1, v0
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX6-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, v0
+; GFX6-NEXT:    v_sub_i32_e32 v1, vcc, 32, v2
+; GFX6-NEXT:    v_ldexp_f32_e32 v0, v0, v1
 ; GFX6-NEXT:    v_cvt_f16_f32_e32 v0, v0
 ; GFX6-NEXT:    buffer_store_short v0, off, s[4:7], 0
 ; GFX6-NEXT:    s_endpgm
@@ -45,34 +31,20 @@ define amdgpu_kernel void @s_uint_to_fp_i64_to_f16(half addrspace(1)* %out, i64
 ; GFX8-LABEL: s_uint_to_fp_i64_to_f16:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; GFX8-NEXT:    s_mov_b32 s4, 0
-; GFX8-NEXT:    s_movk_i32 s5, 0x80
-; GFX8-NEXT:    v_mov_b32_e32 v0, 1
-; GFX8-NEXT:    v_mov_b32_e32 v1, s5
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX8-NEXT:    s_flbit_i32_b32 s6, s2
-; GFX8-NEXT:    s_add_i32 s6, s6, 32
-; GFX8-NEXT:    s_flbit_i32_b32 s7, s3
-; GFX8-NEXT:    s_cmp_eq_u32 s3, 0
-; GFX8-NEXT:    s_cselect_b32 s6, s6, s7
-; GFX8-NEXT:    s_sub_i32 s7, 0xbe, s6
-; GFX8-NEXT:    s_cmp_lg_u64 s[2:3], 0
-; GFX8-NEXT:    s_cselect_b32 s7, s7, 0
+; GFX8-NEXT:    s_flbit_i32_b32 s4, s3
+; GFX8-NEXT:    s_cmp_lg_u32 s3, 0
+; GFX8-NEXT:    s_cselect_b32 s6, s4, 32
 ; GFX8-NEXT:    s_lshl_b64 s[2:3], s[2:3], s6
-; GFX8-NEXT:    s_lshl_b32 s6, s7, 23
-; GFX8-NEXT:    s_bfe_u32 s7, s3, 0x170008
-; GFX8-NEXT:    s_or_b32 s6, s6, s7
-; GFX8-NEXT:    s_and_b32 s3, s3, 0xff
-; GFX8-NEXT:    v_cmp_lt_u64_e32 vcc, s[2:3], v[0:1]
-; GFX8-NEXT:    s_and_b32 s7, s6, 1
-; GFX8-NEXT:    s_cmp_eq_u64 s[2:3], s[4:5]
-; GFX8-NEXT:    s_cselect_b32 s2, s7, 0
-; GFX8-NEXT:    s_cmp_lg_u64 vcc, 0
-; GFX8-NEXT:    s_cselect_b32 s2, s2, 1
-; GFX8-NEXT:    s_add_i32 s6, s6, s2
-; GFX8-NEXT:    v_cvt_f16_f32_e32 v2, s6
-; GFX8-NEXT:    v_mov_b32_e32 v0, s0
+; GFX8-NEXT:    v_cmp_ne_u32_e64 s[4:5], s2, 0
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX8-NEXT:    v_or_b32_e32 v0, s3, v0
+; GFX8-NEXT:    v_cvt_f32_u32_e32 v0, v0
+; GFX8-NEXT:    s_sub_i32 s2, 32, s6
 ; GFX8-NEXT:    v_mov_b32_e32 v1, s1
+; GFX8-NEXT:    v_ldexp_f32 v0, v0, s2
+; GFX8-NEXT:    v_cvt_f16_f32_e32 v2, v0
+; GFX8-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX8-NEXT:    flat_store_short v[0:1], v2
 ; GFX8-NEXT:    s_endpgm
   %result = uitofp i64 %in to half
@@ -88,36 +60,22 @@ define amdgpu_kernel void @v_uint_to_fp_i64_to_f16(half addrspace(1)* %out, i64
 ; GFX6-NEXT:    s_mov_b32 s6, 0
 ; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
 ; GFX6-NEXT:    v_mov_b32_e32 v2, 0
-; GFX6-NEXT:    s_mov_b32 s8, 1
 ; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX6-NEXT:    s_mov_b64 s[4:5], s[2:3]
 ; GFX6-NEXT:    buffer_load_dwordx2 v[3:4], v[1:2], s[4:7], 0 addr64
 ; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 1, v0
 ; GFX6-NEXT:    s_mov_b64 s[2:3], s[6:7]
-; GFX6-NEXT:    s_movk_i32 s7, 0x80
-; GFX6-NEXT:    s_movk_i32 s4, 0xbe
-; GFX6-NEXT:    s_mov_b32 s9, s7
 ; GFX6-NEXT:    s_waitcnt vmcnt(0)
-; GFX6-NEXT:    v_ffbh_u32_e32 v0, v3
-; GFX6-NEXT:    v_ffbh_u32_e32 v5, v4
-; GFX6-NEXT:    v_add_i32_e32 v0, vcc, 32, v0
-; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v4
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, v5, v0, vcc
-; GFX6-NEXT:    v_lshl_b64 v[5:6], v[3:4], v0
-; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
-; GFX6-NEXT:    v_and_b32_e32 v8, 0xff, v6
-; GFX6-NEXT:    v_mov_b32_e32 v7, v5
-; GFX6-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[3:4]
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; GFX6-NEXT:    v_bfe_u32 v3, v6, 8, 23
-; GFX6-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
-; GFX6-NEXT:    v_or_b32_e32 v0, v0, v3
-; GFX6-NEXT:    v_and_b32_e32 v3, 1, v0
-; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[6:7], v[7:8]
-; GFX6-NEXT:    v_cndmask_b32_e32 v3, 0, v3, vcc
-; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[7:8]
-; GFX6-NEXT:    v_cndmask_b32_e32 v3, 1, v3, vcc
-; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v3
+; GFX6-NEXT:    v_ffbh_u32_e32 v0, v4
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v4
+; GFX6-NEXT:    v_cndmask_b32_e32 v0, 32, v0, vcc
+; GFX6-NEXT:    v_lshl_b64 v[3:4], v[3:4], v0
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v3
+; GFX6-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
+; GFX6-NEXT:    v_or_b32_e32 v3, v4, v3
+; GFX6-NEXT:    v_cvt_f32_u32_e32 v3, v3
+; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, 32, v0
+; GFX6-NEXT:    v_ldexp_f32_e32 v0, v3, v0
 ; GFX6-NEXT:    v_cvt_f16_f32_e32 v0, v0
 ; GFX6-NEXT:    buffer_store_short v0, v[1:2], s[0:3], 0 addr64
 ; GFX6-NEXT:    s_endpgm
@@ -126,44 +84,29 @@ define amdgpu_kernel void @v_uint_to_fp_i64_to_f16(half addrspace(1)* %out, i64
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
-; GFX8-NEXT:    v_mov_b32_e32 v5, 0
-; GFX8-NEXT:    v_lshlrev_b32_e32 v6, 1, v0
-; GFX8-NEXT:    s_movk_i32 s6, 0xbe
+; GFX8-NEXT:    v_mov_b32_e32 v3, 0
+; GFX8-NEXT:    v_lshlrev_b32_e32 v0, 1, v0
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX8-NEXT:    v_mov_b32_e32 v2, s3
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
-; GFX8-NEXT:    v_addc_u32_e32 v2, vcc, v2, v5, vcc
+; GFX8-NEXT:    v_addc_u32_e32 v2, vcc, v2, v3, vcc
 ; GFX8-NEXT:    flat_load_dwordx2 v[1:2], v[1:2]
-; GFX8-NEXT:    s_mov_b32 s2, 0
-; GFX8-NEXT:    s_movk_i32 s3, 0x80
-; GFX8-NEXT:    s_mov_b32 s4, 1
-; GFX8-NEXT:    s_mov_b32 s5, s3
-; GFX8-NEXT:    v_mov_b32_e32 v7, s1
 ; GFX8-NEXT:    s_waitcnt vmcnt(0)
-; GFX8-NEXT:    v_ffbh_u32_e32 v0, v1
-; GFX8-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
-; GFX8-NEXT:    v_ffbh_u32_e32 v3, v2
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
-; GFX8-NEXT:    v_cndmask_b32_e32 v0, v3, v0, vcc
-; GFX8-NEXT:    v_lshlrev_b64 v[3:4], v0, v[1:2]
-; GFX8-NEXT:    v_sub_u32_e32 v0, vcc, s6, v0
-; GFX8-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[1:2]
-; GFX8-NEXT:    v_and_b32_e32 v1, 0xff, v4
-; GFX8-NEXT:    v_cndmask_b32_e32 v2, 0, v0, vcc
-; GFX8-NEXT:    v_mov_b32_e32 v0, v3
-; GFX8-NEXT:    v_lshlrev_b32_e32 v2, 23, v2
-; GFX8-NEXT:    v_bfe_u32 v3, v4, 8, 23
-; GFX8-NEXT:    v_or_b32_e32 v2, v2, v3
-; GFX8-NEXT:    v_cmp_eq_u64_e32 vcc, s[2:3], v[0:1]
-; GFX8-NEXT:    v_and_b32_e32 v3, 1, v2
-; GFX8-NEXT:    v_cndmask_b32_e32 v3, 0, v3, vcc
-; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, s[4:5], v[0:1]
-; GFX8-NEXT:    v_cndmask_b32_e32 v0, 1, v3, vcc
-; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v2, v0
-; GFX8-NEXT:    v_cvt_f16_f32_e32 v2, v0
-; GFX8-NEXT:    v_add_u32_e32 v0, vcc, s0, v6
-; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, v7, v5, vcc
-; GFX8-NEXT:    flat_store_short v[0:1], v2
+; GFX8-NEXT:    v_ffbh_u32_e32 v4, v2
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
+; GFX8-NEXT:    v_cndmask_b32_e32 v4, 32, v4, vcc
+; GFX8-NEXT:    v_lshlrev_b64 v[1:2], v4, v[1:2]
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v1
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX8-NEXT:    v_or_b32_e32 v1, v2, v1
+; GFX8-NEXT:    v_cvt_f32_u32_e32 v1, v1
+; GFX8-NEXT:    v_sub_u32_e32 v4, vcc, 32, v4
+; GFX8-NEXT:    v_mov_b32_e32 v2, s1
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; GFX8-NEXT:    v_ldexp_f32 v1, v1, v4
+; GFX8-NEXT:    v_cvt_f16_f32_e32 v4, v1
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, v2, v3, vcc
+; GFX8-NEXT:    flat_store_short v[0:1], v4
 ; GFX8-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
@@ -179,69 +122,40 @@ define amdgpu_kernel void @s_uint_to_fp_i64_to_f32(float addrspace(1)* %out, i64
 ; GFX6:       ; %bb.0:
 ; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
 ; GFX6-NEXT:    s_mov_b32 s7, 0xf000
-; GFX6-NEXT:    s_mov_b32 s6, -1
-; GFX6-NEXT:    s_mov_b32 s8, 0
 ; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX6-NEXT:    s_flbit_i32_b32 s4, s2
-; GFX6-NEXT:    s_flbit_i32_b32 s10, s3
-; GFX6-NEXT:    s_add_i32 s11, s4, 32
-; GFX6-NEXT:    s_movk_i32 s9, 0x80
+; GFX6-NEXT:    s_flbit_i32_b32 s8, s3
+; GFX6-NEXT:    s_mov_b32 s6, -1
 ; GFX6-NEXT:    s_mov_b32 s4, s0
 ; GFX6-NEXT:    s_mov_b32 s5, s1
-; GFX6-NEXT:    v_mov_b32_e32 v0, s10
-; GFX6-NEXT:    v_mov_b32_e32 v1, s11
-; GFX6-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
-; GFX6-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
+; GFX6-NEXT:    v_mov_b32_e32 v0, s8
+; GFX6-NEXT:    v_cmp_ne_u32_e64 vcc, s3, 0
+; GFX6-NEXT:    v_cndmask_b32_e32 v2, 32, v0, vcc
 ; GFX6-NEXT:    v_lshl_b64 v[0:1], s[2:3], v2
-; GFX6-NEXT:    v_sub_i32_e32 v4, vcc, 0xbe, v2
-; GFX6-NEXT:    v_and_b32_e32 v3, 0xff, v1
-; GFX6-NEXT:    v_mov_b32_e32 v2, v0
-; GFX6-NEXT:    v_cmp_ne_u64_e64 vcc, s[2:3], 0
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v4, vcc
-; GFX6-NEXT:    v_bfe_u32 v1, v1, 8, 23
-; GFX6-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
-; GFX6-NEXT:    v_or_b32_e32 v0, v0, v1
-; GFX6-NEXT:    v_and_b32_e32 v1, 1, v0
-; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[8:9], v[2:3]
-; GFX6-NEXT:    s_mov_b32 s8, 1
-; GFX6-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
-; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[2:3]
-; GFX6-NEXT:    v_cndmask_b32_e32 v1, 1, v1, vcc
-; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v1, v0
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX6-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, v0
+; GFX6-NEXT:    v_sub_i32_e32 v1, vcc, 32, v2
+; GFX6-NEXT:    v_ldexp_f32_e32 v0, v0, v1
 ; GFX6-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; GFX6-NEXT:    s_endpgm
 ;
 ; GFX8-LABEL: s_uint_to_fp_i64_to_f32:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; GFX8-NEXT:    v_mov_b32_e32 v2, 1
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX8-NEXT:    v_mov_b32_e32 v0, s0
-; GFX8-NEXT:    s_flbit_i32_b32 s0, s2
-; GFX8-NEXT:    s_add_i32 s5, s0, 32
 ; GFX8-NEXT:    s_flbit_i32_b32 s4, s3
-; GFX8-NEXT:    s_cmp_eq_u32 s3, 0
-; GFX8-NEXT:    s_cselect_b32 s4, s5, s4
-; GFX8-NEXT:    s_sub_i32 s5, 0xbe, s4
-; GFX8-NEXT:    s_cmp_lg_u64 s[2:3], 0
-; GFX8-NEXT:    s_cselect_b32 s5, s5, 0
-; GFX8-NEXT:    s_lshl_b64 s[2:3], s[2:3], s4
+; GFX8-NEXT:    s_cmp_lg_u32 s3, 0
+; GFX8-NEXT:    s_cselect_b32 s6, s4, 32
+; GFX8-NEXT:    s_lshl_b64 s[2:3], s[2:3], s6
+; GFX8-NEXT:    v_cmp_ne_u32_e64 s[4:5], s2, 0
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GFX8-NEXT:    v_or_b32_e32 v0, s3, v0
+; GFX8-NEXT:    v_cvt_f32_u32_e32 v2, v0
+; GFX8-NEXT:    v_mov_b32_e32 v0, s0
+; GFX8-NEXT:    s_sub_i32 s0, 32, s6
 ; GFX8-NEXT:    v_mov_b32_e32 v1, s1
-; GFX8-NEXT:    s_mov_b32 s0, 0
-; GFX8-NEXT:    s_movk_i32 s1, 0x80
-; GFX8-NEXT:    s_bfe_u32 s4, s3, 0x170008
-; GFX8-NEXT:    s_lshl_b32 s5, s5, 23
-; GFX8-NEXT:    s_or_b32 s4, s5, s4
-; GFX8-NEXT:    s_and_b32 s3, s3, 0xff
-; GFX8-NEXT:    v_mov_b32_e32 v3, s1
-; GFX8-NEXT:    s_and_b32 s5, s4, 1
-; GFX8-NEXT:    v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
-; GFX8-NEXT:    s_cmp_eq_u64 s[2:3], s[0:1]
-; GFX8-NEXT:    s_cselect_b32 s0, s5, 0
-; GFX8-NEXT:    s_cmp_lg_u64 vcc, 0
-; GFX8-NEXT:    s_cselect_b32 s0, s0, 1
-; GFX8-NEXT:    s_add_i32 s0, s4, s0
-; GFX8-NEXT:    v_mov_b32_e32 v2, s0
+; GFX8-NEXT:    v_ldexp_f32 v2, v2, s0
 ; GFX8-NEXT:    flat_store_dword v[0:1], v2
 ; GFX8-NEXT:    s_endpgm
   %result = uitofp i64 %in to float
@@ -261,31 +175,18 @@ define amdgpu_kernel void @v_uint_to_fp_i64_to_f32(float addrspace(1)* %out, i64
 ; GFX6-NEXT:    s_mov_b64 s[4:5], s[2:3]
 ; GFX6-NEXT:    buffer_load_dwordx2 v[3:4], v[1:2], s[4:7], 0 addr64
 ; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
-; GFX6-NEXT:    s_movk_i32 s4, 0xbe
 ; GFX6-NEXT:    s_mov_b64 s[2:3], s[6:7]
-; GFX6-NEXT:    s_movk_i32 s7, 0x80
 ; GFX6-NEXT:    s_waitcnt vmcnt(0)
-; GFX6-NEXT:    v_ffbh_u32_e32 v0, v3
-; GFX6-NEXT:    v_ffbh_u32_e32 v5, v4
-; GFX6-NEXT:    v_add_i32_e32 v0, vcc, 32, v0
-; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v4
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, v5, v0, vcc
-; GFX6-NEXT:    v_lshl_b64 v[5:6], v[3:4], v0
-; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
-; GFX6-NEXT:    v_and_b32_e32 v8, 0xff, v6
-; GFX6-NEXT:    v_mov_b32_e32 v7, v5
-; GFX6-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[3:4]
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; GFX6-NEXT:    v_bfe_u32 v3, v6, 8, 23
-; GFX6-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
-; GFX6-NEXT:    v_or_b32_e32 v0, v0, v3
-; GFX6-NEXT:    v_and_b32_e32 v3, 1, v0
-; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[6:7], v[7:8]
-; GFX6-NEXT:    s_mov_b32 s6, 1
-; GFX6-NEXT:    v_cndmask_b32_e32 v3, 0, v3, vcc
-; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[6:7], v[7:8]
-; GFX6-NEXT:    v_cndmask_b32_e32 v3, 1, v3, vcc
-; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v3
+; GFX6-NEXT:    v_ffbh_u32_e32 v0, v4
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v4
+; GFX6-NEXT:    v_cndmask_b32_e32 v0, 32, v0, vcc
+; GFX6-NEXT:    v_lshl_b64 v[3:4], v[3:4], v0
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v3
+; GFX6-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
+; GFX6-NEXT:    v_or_b32_e32 v3, v4, v3
+; GFX6-NEXT:    v_cvt_f32_u32_e32 v3, v3
+; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, 32, v0
+; GFX6-NEXT:    v_ldexp_f32_e32 v0, v3, v0
 ; GFX6-NEXT:    buffer_store_dword v0, v[1:2], s[0:3], 0 addr64
 ; GFX6-NEXT:    s_endpgm
 ;
@@ -293,42 +194,28 @@ define amdgpu_kernel void @v_uint_to_fp_i64_to_f32(float addrspace(1)* %out, i64
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
-; GFX8-NEXT:    v_mov_b32_e32 v4, 0
-; GFX8-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
-; GFX8-NEXT:    s_movk_i32 s4, 0xbe
+; GFX8-NEXT:    v_mov_b32_e32 v3, 0
+; GFX8-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX8-NEXT:    v_mov_b32_e32 v2, s3
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
-; GFX8-NEXT:    v_addc_u32_e32 v2, vcc, v2, v4, vcc
+; GFX8-NEXT:    v_addc_u32_e32 v2, vcc, v2, v3, vcc
 ; GFX8-NEXT:    flat_load_dwordx2 v[1:2], v[1:2]
-; GFX8-NEXT:    v_add_u32_e32 v3, vcc, s0, v0
-; GFX8-NEXT:    v_mov_b32_e32 v5, s1
-; GFX8-NEXT:    v_addc_u32_e32 v4, vcc, v5, v4, vcc
-; GFX8-NEXT:    s_mov_b32 s2, 0
-; GFX8-NEXT:    s_movk_i32 s3, 0x80
 ; GFX8-NEXT:    s_waitcnt vmcnt(0)
-; GFX8-NEXT:    v_ffbh_u32_e32 v0, v1
-; GFX8-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
-; GFX8-NEXT:    v_ffbh_u32_e32 v5, v2
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
-; GFX8-NEXT:    v_cndmask_b32_e32 v0, v5, v0, vcc
-; GFX8-NEXT:    v_lshlrev_b64 v[5:6], v0, v[1:2]
-; GFX8-NEXT:    v_sub_u32_e32 v0, vcc, s4, v0
-; GFX8-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[1:2]
-; GFX8-NEXT:    v_and_b32_e32 v1, 0xff, v6
-; GFX8-NEXT:    v_cndmask_b32_e32 v2, 0, v0, vcc
-; GFX8-NEXT:    v_mov_b32_e32 v0, v5
-; GFX8-NEXT:    v_lshlrev_b32_e32 v2, 23, v2
-; GFX8-NEXT:    v_bfe_u32 v5, v6, 8, 23
-; GFX8-NEXT:    v_or_b32_e32 v2, v2, v5
-; GFX8-NEXT:    v_cmp_eq_u64_e32 vcc, s[2:3], v[0:1]
-; GFX8-NEXT:    v_and_b32_e32 v5, 1, v2
-; GFX8-NEXT:    s_mov_b32 s2, 1
-; GFX8-NEXT:    v_cndmask_b32_e32 v5, 0, v5, vcc
-; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, s[2:3], v[0:1]
-; GFX8-NEXT:    v_cndmask_b32_e32 v0, 1, v5, vcc
-; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v2, v0
-; GFX8-NEXT:    flat_store_dword v[3:4], v0
+; GFX8-NEXT:    v_ffbh_u32_e32 v0, v2
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
+; GFX8-NEXT:    v_cndmask_b32_e32 v5, 32, v0, vcc
+; GFX8-NEXT:    v_lshlrev_b64 v[0:1], v5, v[1:2]
+; GFX8-NEXT:    v_mov_b32_e32 v2, s1
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX8-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX8-NEXT:    v_cvt_f32_u32_e32 v6, v0
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, s0, v4
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, v2, v3, vcc
+; GFX8-NEXT:    v_sub_u32_e32 v2, vcc, 32, v5
+; GFX8-NEXT:    v_ldexp_f32 v2, v6, v2
+; GFX8-NEXT:    flat_store_dword v[0:1], v2
 ; GFX8-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
@@ -342,121 +229,63 @@ define amdgpu_kernel void @v_uint_to_fp_i64_to_f32(float addrspace(1)* %out, i64
 define amdgpu_kernel void @s_uint_to_fp_v2i64_to_v2f32(<2 x float> addrspace(1)* %out, <2 x i64> %in) #0{
 ; GFX6-LABEL: s_uint_to_fp_v2i64_to_v2f32:
 ; GFX6:       ; %bb.0:
-; GFX6-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
-; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0xd
-; GFX6-NEXT:    s_mov_b32 s7, 0xf000
-; GFX6-NEXT:    s_mov_b32 s6, -1
-; GFX6-NEXT:    s_mov_b32 s8, 0
-; GFX6-NEXT:    s_movk_i32 s9, 0x80
-; GFX6-NEXT:    s_movk_i32 s12, 0xff
-; GFX6-NEXT:    s_movk_i32 s13, 0xbe
-; GFX6-NEXT:    s_mov_b32 s10, 1
-; GFX6-NEXT:    s_mov_b32 s11, s9
+; GFX6-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0xd
+; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GFX6-NEXT:    s_mov_b32 s3, 0xf000
+; GFX6-NEXT:    s_mov_b32 s2, -1
 ; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX6-NEXT:    s_flbit_i32_b32 s14, s2
-; GFX6-NEXT:    s_flbit_i32_b32 s15, s3
-; GFX6-NEXT:    s_flbit_i32_b32 s16, s0
-; GFX6-NEXT:    s_flbit_i32_b32 s17, s1
-; GFX6-NEXT:    s_add_i32 s14, s14, 32
-; GFX6-NEXT:    v_mov_b32_e32 v0, s15
-; GFX6-NEXT:    s_add_i32 s15, s16, 32
-; GFX6-NEXT:    v_mov_b32_e32 v1, s17
-; GFX6-NEXT:    v_mov_b32_e32 v2, s14
-; GFX6-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
-; GFX6-NEXT:    v_cndmask_b32_e32 v2, v0, v2, vcc
-; GFX6-NEXT:    v_mov_b32_e32 v0, s15
-; GFX6-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
-; GFX6-NEXT:    v_cndmask_b32_e32 v4, v1, v0, vcc
-; GFX6-NEXT:    v_lshl_b64 v[0:1], s[2:3], v2
-; GFX6-NEXT:    v_sub_i32_e32 v6, vcc, s13, v2
-; GFX6-NEXT:    v_lshl_b64 v[2:3], s[0:1], v4
-; GFX6-NEXT:    v_sub_i32_e32 v7, vcc, s13, v4
-; GFX6-NEXT:    v_and_b32_e32 v5, s12, v1
-; GFX6-NEXT:    v_mov_b32_e32 v4, v0
-; GFX6-NEXT:    v_cmp_ne_u64_e64 vcc, s[2:3], 0
-; GFX6-NEXT:    v_cndmask_b32_e32 v6, 0, v6, vcc
-; GFX6-NEXT:    v_bfe_u32 v8, v1, 8, 23
-; GFX6-NEXT:    v_and_b32_e32 v1, s12, v3
-; GFX6-NEXT:    v_mov_b32_e32 v0, v2
-; GFX6-NEXT:    v_cmp_ne_u64_e64 vcc, s[0:1], 0
-; GFX6-NEXT:    v_cndmask_b32_e32 v2, 0, v7, vcc
-; GFX6-NEXT:    v_bfe_u32 v3, v3, 8, 23
-; GFX6-NEXT:    v_lshlrev_b32_e32 v6, 23, v6
-; GFX6-NEXT:    v_lshlrev_b32_e32 v2, 23, v2
-; GFX6-NEXT:    v_or_b32_e32 v6, v6, v8
-; GFX6-NEXT:    v_or_b32_e32 v2, v2, v3
-; GFX6-NEXT:    v_and_b32_e32 v3, 1, v6
-; GFX6-NEXT:    v_and_b32_e32 v7, 1, v2
-; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[8:9], v[4:5]
-; GFX6-NEXT:    v_cndmask_b32_e32 v3, 0, v3, vcc
-; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[8:9], v[0:1]
-; GFX6-NEXT:    v_cndmask_b32_e32 v7, 0, v7, vcc
-; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[10:11], v[4:5]
-; GFX6-NEXT:    v_cndmask_b32_e32 v3, 1, v3, vcc
-; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[10:11], v[0:1]
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, 1, v7, vcc
-; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v3, v6
-; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GFX6-NEXT:    s_flbit_i32_b32 s8, s7
+; GFX6-NEXT:    s_flbit_i32_b32 s9, s5
+; GFX6-NEXT:    v_mov_b32_e32 v0, s8
+; GFX6-NEXT:    v_cmp_ne_u32_e64 vcc, s7, 0
+; GFX6-NEXT:    v_cndmask_b32_e32 v2, 32, v0, vcc
+; GFX6-NEXT:    v_mov_b32_e32 v0, s9
+; GFX6-NEXT:    v_cmp_ne_u32_e64 vcc, s5, 0
+; GFX6-NEXT:    v_cndmask_b32_e32 v4, 32, v0, vcc
+; GFX6-NEXT:    v_lshl_b64 v[0:1], s[6:7], v2
+; GFX6-NEXT:    v_sub_i32_e32 v5, vcc, 32, v2
+; GFX6-NEXT:    v_lshl_b64 v[2:3], s[4:5], v4
+; GFX6-NEXT:    v_sub_i32_e32 v4, vcc, 32, v4
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
+; GFX6-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX6-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX6-NEXT:    v_or_b32_e32 v1, v3, v2
+; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, v0
+; GFX6-NEXT:    v_cvt_f32_u32_e32 v2, v1
+; GFX6-NEXT:    v_ldexp_f32_e32 v1, v0, v5
+; GFX6-NEXT:    v_ldexp_f32_e32 v0, v2, v4
+; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; GFX6-NEXT:    s_endpgm
 ;
 ; GFX8-LABEL: s_uint_to_fp_v2i64_to_v2f32:
 ; GFX8:       ; %bb.0:
-; GFX8-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x24
-; GFX8-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x34
-; GFX8-NEXT:    s_movk_i32 s10, 0xbe
-; GFX8-NEXT:    s_mov_b32 s4, 0
-; GFX8-NEXT:    s_movk_i32 s5, 0x80
-; GFX8-NEXT:    s_movk_i32 s13, 0xff
+; GFX8-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x34
+; GFX8-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX8-NEXT:    s_flbit_i32_b32 s8, s2
-; GFX8-NEXT:    s_add_i32 s8, s8, 32
-; GFX8-NEXT:    s_flbit_i32_b32 s9, s3
-; GFX8-NEXT:    s_cmp_eq_u32 s3, 0
-; GFX8-NEXT:    s_cselect_b32 s8, s8, s9
-; GFX8-NEXT:    s_sub_i32 s9, s10, s8
-; GFX8-NEXT:    s_cmp_lg_u64 s[2:3], 0
-; GFX8-NEXT:    s_cselect_b32 s9, s9, 0
-; GFX8-NEXT:    s_lshl_b64 s[2:3], s[2:3], s8
-; GFX8-NEXT:    s_bfe_u32 s8, s3, 0x170008
-; GFX8-NEXT:    s_lshl_b32 s9, s9, 23
-; GFX8-NEXT:    s_or_b32 s11, s9, s8
-; GFX8-NEXT:    s_mov_b32 s8, 1
-; GFX8-NEXT:    s_mov_b32 s9, s5
-; GFX8-NEXT:    v_mov_b32_e32 v0, s8
-; GFX8-NEXT:    s_and_b32 s3, s3, s13
-; GFX8-NEXT:    v_mov_b32_e32 v1, s9
-; GFX8-NEXT:    s_and_b32 s12, s11, 1
-; GFX8-NEXT:    v_cmp_lt_u64_e32 vcc, s[2:3], v[0:1]
-; GFX8-NEXT:    s_cmp_eq_u64 s[2:3], s[4:5]
-; GFX8-NEXT:    s_cselect_b32 s2, s12, 0
-; GFX8-NEXT:    s_cmp_lg_u64 vcc, 0
-; GFX8-NEXT:    s_cselect_b32 s2, s2, 1
-; GFX8-NEXT:    s_add_i32 s11, s11, s2
-; GFX8-NEXT:    s_flbit_i32_b32 s2, s0
-; GFX8-NEXT:    s_add_i32 s2, s2, 32
-; GFX8-NEXT:    s_flbit_i32_b32 s3, s1
-; GFX8-NEXT:    s_cmp_eq_u32 s1, 0
-; GFX8-NEXT:    s_cselect_b32 s2, s2, s3
-; GFX8-NEXT:    s_sub_i32 s3, s10, s2
-; GFX8-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GFX8-NEXT:    s_cselect_b32 s3, s3, 0
-; GFX8-NEXT:    s_lshl_b64 s[0:1], s[0:1], s2
-; GFX8-NEXT:    s_bfe_u32 s2, s1, 0x170008
-; GFX8-NEXT:    s_lshl_b32 s3, s3, 23
-; GFX8-NEXT:    s_or_b32 s2, s3, s2
-; GFX8-NEXT:    s_and_b32 s1, s1, s13
-; GFX8-NEXT:    s_and_b32 s3, s2, 1
-; GFX8-NEXT:    v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
-; GFX8-NEXT:    s_cmp_eq_u64 s[0:1], s[4:5]
-; GFX8-NEXT:    s_cselect_b32 s0, s3, 0
-; GFX8-NEXT:    s_cmp_lg_u64 vcc, 0
-; GFX8-NEXT:    s_cselect_b32 s0, s0, 1
-; GFX8-NEXT:    s_add_i32 s2, s2, s0
-; GFX8-NEXT:    v_mov_b32_e32 v2, s6
-; GFX8-NEXT:    v_mov_b32_e32 v0, s2
-; GFX8-NEXT:    v_mov_b32_e32 v1, s11
-; GFX8-NEXT:    v_mov_b32_e32 v3, s7
+; GFX8-NEXT:    s_flbit_i32_b32 s2, s7
+; GFX8-NEXT:    s_cmp_lg_u32 s7, 0
+; GFX8-NEXT:    s_cselect_b32 s9, s2, 32
+; GFX8-NEXT:    s_lshl_b64 s[2:3], s[6:7], s9
+; GFX8-NEXT:    s_sub_i32 s9, 32, s9
+; GFX8-NEXT:    v_cmp_ne_u32_e64 s[6:7], s2, 0
+; GFX8-NEXT:    s_flbit_i32_b32 s8, s5
+; GFX8-NEXT:    s_cmp_lg_u32 s5, 0
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[6:7]
+; GFX8-NEXT:    s_cselect_b32 s6, s8, 32
+; GFX8-NEXT:    v_or_b32_e32 v0, s3, v0
+; GFX8-NEXT:    s_lshl_b64 s[2:3], s[4:5], s6
+; GFX8-NEXT:    v_cmp_ne_u32_e64 s[4:5], s2, 0
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s[4:5]
+; GFX8-NEXT:    v_or_b32_e32 v1, s3, v1
+; GFX8-NEXT:    v_cvt_f32_u32_e32 v0, v0
+; GFX8-NEXT:    v_cvt_f32_u32_e32 v2, v1
+; GFX8-NEXT:    s_sub_i32 s2, 32, s6
+; GFX8-NEXT:    v_ldexp_f32 v1, v0, s9
+; GFX8-NEXT:    v_ldexp_f32 v0, v2, s2
+; GFX8-NEXT:    v_mov_b32_e32 v3, s1
+; GFX8-NEXT:    v_mov_b32_e32 v2, s0
 ; GFX8-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
 ; GFX8-NEXT:    s_endpgm
   %result = uitofp <2 x i64> %in to <2 x float>
@@ -467,213 +296,123 @@ define amdgpu_kernel void @s_uint_to_fp_v2i64_to_v2f32(<2 x float> addrspace(1)*
 define amdgpu_kernel void @v_uint_to_fp_v4i64_to_v4f32(<4 x float> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) #0 {
 ; GFX6-LABEL: v_uint_to_fp_v4i64_to_v4f32:
 ; GFX6:       ; %bb.0:
-; GFX6-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GFX6-NEXT:    s_mov_b32 s11, 0xf000
-; GFX6-NEXT:    s_mov_b32 s10, 0
-; GFX6-NEXT:    v_lshlrev_b32_e32 v7, 5, v0
-; GFX6-NEXT:    v_mov_b32_e32 v8, 0
-; GFX6-NEXT:    v_lshlrev_b32_e32 v9, 4, v0
-; GFX6-NEXT:    v_mov_b32_e32 v15, 0xff
+; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GFX6-NEXT:    s_mov_b32 s7, 0xf000
+; GFX6-NEXT:    s_mov_b32 s6, 0
+; GFX6-NEXT:    v_lshlrev_b32_e32 v8, 5, v0
+; GFX6-NEXT:    v_mov_b32_e32 v9, 0
 ; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX6-NEXT:    s_mov_b64 s[8:9], s[6:7]
-; GFX6-NEXT:    buffer_load_dwordx4 v[0:3], v[7:8], s[8:11], 0 addr64
-; GFX6-NEXT:    buffer_load_dwordx4 v[4:7], v[7:8], s[8:11], 0 addr64 offset:16
-; GFX6-NEXT:    s_movk_i32 s2, 0xbe
-; GFX6-NEXT:    s_mov_b64 s[6:7], s[10:11]
-; GFX6-NEXT:    s_movk_i32 s11, 0x80
-; GFX6-NEXT:    s_mov_b32 s8, 1
-; GFX6-NEXT:    v_mov_b32_e32 v10, v8
-; GFX6-NEXT:    s_mov_b32 s9, s11
+; GFX6-NEXT:    s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT:    buffer_load_dwordx4 v[1:4], v[8:9], s[4:7], 0 addr64 offset:16
+; GFX6-NEXT:    buffer_load_dwordx4 v[5:8], v[8:9], s[4:7], 0 addr64
+; GFX6-NEXT:    v_lshlrev_b32_e32 v10, 4, v0
+; GFX6-NEXT:    v_mov_b32_e32 v11, v9
+; GFX6-NEXT:    s_mov_b64 s[2:3], s[6:7]
+; GFX6-NEXT:    s_waitcnt vmcnt(1)
+; GFX6-NEXT:    v_ffbh_u32_e32 v0, v4
+; GFX6-NEXT:    v_ffbh_u32_e32 v9, v2
 ; GFX6-NEXT:    s_waitcnt vmcnt(0)
-; GFX6-NEXT:    v_ffbh_u32_e32 v8, v6
-; GFX6-NEXT:    v_ffbh_u32_e32 v11, v7
-; GFX6-NEXT:    v_ffbh_u32_e32 v12, v4
-; GFX6-NEXT:    v_ffbh_u32_e32 v13, v5
-; GFX6-NEXT:    v_ffbh_u32_e32 v14, v2
-; GFX6-NEXT:    v_ffbh_u32_e32 v16, v3
-; GFX6-NEXT:    v_ffbh_u32_e32 v17, v0
-; GFX6-NEXT:    v_ffbh_u32_e32 v18, v1
-; GFX6-NEXT:    v_add_i32_e32 v8, vcc, 32, v8
-; GFX6-NEXT:    v_add_i32_e32 v12, vcc, 32, v12
-; GFX6-NEXT:    v_add_i32_e32 v14, vcc, 32, v14
-; GFX6-NEXT:    v_add_i32_e32 v17, vcc, 32, v17
-; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v7
-; GFX6-NEXT:    v_cndmask_b32_e32 v8, v11, v8, vcc
-; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v5
-; GFX6-NEXT:    v_cndmask_b32_e32 v19, v13, v12, vcc
-; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v3
-; GFX6-NEXT:    v_cndmask_b32_e32 v16, v16, v14, vcc
-; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
-; GFX6-NEXT:    v_cndmask_b32_e32 v17, v18, v17, vcc
-; GFX6-NEXT:    v_lshl_b64 v[11:12], v[6:7], v8
-; GFX6-NEXT:    v_sub_i32_e32 v8, vcc, s2, v8
-; GFX6-NEXT:    v_lshl_b64 v[13:14], v[2:3], v16
-; GFX6-NEXT:    v_sub_i32_e32 v16, vcc, s2, v16
-; GFX6-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[2:3]
-; GFX6-NEXT:    v_sub_i32_e64 v18, s[0:1], s2, v17
-; GFX6-NEXT:    v_cmp_ne_u64_e64 s[0:1], 0, v[0:1]
-; GFX6-NEXT:    v_lshl_b64 v[0:1], v[0:1], v17
-; GFX6-NEXT:    v_and_b32_e32 v3, 0xff, v12
-; GFX6-NEXT:    v_mov_b32_e32 v2, v11
-; GFX6-NEXT:    v_bfe_u32 v17, v12, 8, 23
-; GFX6-NEXT:    v_lshl_b64 v[11:12], v[4:5], v19
-; GFX6-NEXT:    v_sub_i32_e64 v19, s[2:3], s2, v19
-; GFX6-NEXT:    v_cmp_ne_u64_e64 s[2:3], 0, v[6:7]
-; GFX6-NEXT:    v_cndmask_b32_e64 v8, 0, v8, s[2:3]
-; GFX6-NEXT:    v_cmp_ne_u64_e64 s[2:3], 0, v[4:5]
-; GFX6-NEXT:    v_and_b32_e32 v5, v15, v12
-; GFX6-NEXT:    v_mov_b32_e32 v4, v11
-; GFX6-NEXT:    v_cndmask_b32_e64 v19, 0, v19, s[2:3]
-; GFX6-NEXT:    v_bfe_u32 v20, v12, 8, 23
-; GFX6-NEXT:    v_and_b32_e32 v7, v15, v14
-; GFX6-NEXT:    v_mov_b32_e32 v6, v13
-; GFX6-NEXT:    v_cndmask_b32_e32 v13, 0, v16, vcc
-; GFX6-NEXT:    v_bfe_u32 v14, v14, 8, 23
-; GFX6-NEXT:    v_and_b32_e32 v12, v15, v1
-; GFX6-NEXT:    v_mov_b32_e32 v11, v0
-; GFX6-NEXT:    v_cndmask_b32_e64 v0, 0, v18, s[0:1]
-; GFX6-NEXT:    v_bfe_u32 v1, v1, 8, 23
-; GFX6-NEXT:    v_lshlrev_b32_e32 v8, 23, v8
-; GFX6-NEXT:    v_lshlrev_b32_e32 v15, 23, v19
-; GFX6-NEXT:    v_lshlrev_b32_e32 v13, 23, v13
-; GFX6-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
-; GFX6-NEXT:    v_or_b32_e32 v8, v8, v17
-; GFX6-NEXT:    v_or_b32_e32 v15, v15, v20
-; GFX6-NEXT:    v_or_b32_e32 v13, v13, v14
-; GFX6-NEXT:    v_or_b32_e32 v0, v0, v1
-; GFX6-NEXT:    v_and_b32_e32 v1, 1, v8
-; GFX6-NEXT:    v_and_b32_e32 v14, 1, v15
-; GFX6-NEXT:    v_and_b32_e32 v16, 1, v13
-; GFX6-NEXT:    v_and_b32_e32 v17, 1, v0
-; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[10:11], v[2:3]
-; GFX6-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
-; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[10:11], v[4:5]
-; GFX6-NEXT:    v_cndmask_b32_e32 v14, 0, v14, vcc
-; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[10:11], v[6:7]
-; GFX6-NEXT:    v_cndmask_b32_e32 v16, 0, v16, vcc
-; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[10:11], v[11:12]
-; GFX6-NEXT:    v_cndmask_b32_e32 v17, 0, v17, vcc
-; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[2:3]
-; GFX6-NEXT:    v_cndmask_b32_e32 v1, 1, v1, vcc
-; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[4:5]
-; GFX6-NEXT:    v_cndmask_b32_e32 v2, 1, v14, vcc
-; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[6:7]
-; GFX6-NEXT:    v_cndmask_b32_e32 v4, 1, v16, vcc
-; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[11:12]
-; GFX6-NEXT:    v_cndmask_b32_e32 v5, 1, v17, vcc
-; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v8, v1
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v15, v2
-; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v13, v4
-; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v5
-; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], v[9:10], s[4:7], 0 addr64
+; GFX6-NEXT:    v_ffbh_u32_e32 v12, v8
+; GFX6-NEXT:    v_ffbh_u32_e32 v13, v6
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v4
+; GFX6-NEXT:    v_cndmask_b32_e32 v0, 32, v0, vcc
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
+; GFX6-NEXT:    v_cndmask_b32_e32 v9, 32, v9, vcc
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX6-NEXT:    v_cndmask_b32_e32 v12, 32, v12, vcc
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v6
+; GFX6-NEXT:    v_cndmask_b32_e32 v13, 32, v13, vcc
+; GFX6-NEXT:    v_lshl_b64 v[3:4], v[3:4], v0
+; GFX6-NEXT:    v_sub_i32_e32 v14, vcc, 32, v0
+; GFX6-NEXT:    v_lshl_b64 v[0:1], v[1:2], v9
+; GFX6-NEXT:    v_sub_i32_e32 v2, vcc, 32, v9
+; GFX6-NEXT:    v_lshl_b64 v[7:8], v[7:8], v12
+; GFX6-NEXT:    v_sub_i32_e32 v9, vcc, 32, v12
+; GFX6-NEXT:    v_lshl_b64 v[5:6], v[5:6], v13
+; GFX6-NEXT:    v_sub_i32_e32 v12, vcc, 32, v13
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v3
+; GFX6-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v7
+; GFX6-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v5
+; GFX6-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
+; GFX6-NEXT:    v_or_b32_e32 v3, v4, v3
+; GFX6-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX6-NEXT:    v_or_b32_e32 v1, v8, v7
+; GFX6-NEXT:    v_or_b32_e32 v4, v6, v5
+; GFX6-NEXT:    v_cvt_f32_u32_e32 v3, v3
+; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, v0
+; GFX6-NEXT:    v_cvt_f32_u32_e32 v1, v1
+; GFX6-NEXT:    v_cvt_f32_u32_e32 v4, v4
+; GFX6-NEXT:    v_ldexp_f32_e32 v3, v3, v14
+; GFX6-NEXT:    v_ldexp_f32_e32 v2, v0, v2
+; GFX6-NEXT:    v_ldexp_f32_e32 v1, v1, v9
+; GFX6-NEXT:    v_ldexp_f32_e32 v0, v4, v12
+; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], v[10:11], s[0:3], 0 addr64
 ; GFX6-NEXT:    s_endpgm
 ;
 ; GFX8-LABEL: v_uint_to_fp_v4i64_to_v4f32:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 5, v0
-; GFX8-NEXT:    v_mov_b32_e32 v2, 0
+; GFX8-NEXT:    v_mov_b32_e32 v10, 0
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v0, 4, v0
-; GFX8-NEXT:    s_movk_i32 s6, 0xbe
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX8-NEXT:    v_add_u32_e32 v4, vcc, s2, v1
-; GFX8-NEXT:    v_mov_b32_e32 v3, s3
-; GFX8-NEXT:    v_addc_u32_e32 v5, vcc, v3, v2, vcc
-; GFX8-NEXT:    v_mov_b32_e32 v1, s1
-; GFX8-NEXT:    v_add_u32_e32 v8, vcc, s0, v0
-; GFX8-NEXT:    v_addc_u32_e32 v9, vcc, v1, v2, vcc
-; GFX8-NEXT:    v_add_u32_e32 v0, vcc, 16, v4
-; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, 0, v5, vcc
-; GFX8-NEXT:    flat_load_dwordx4 v[0:3], v[0:1]
-; GFX8-NEXT:    flat_load_dwordx4 v[4:7], v[4:5]
-; GFX8-NEXT:    s_mov_b32 s4, 0
-; GFX8-NEXT:    s_movk_i32 s5, 0x80
-; GFX8-NEXT:    v_mov_b32_e32 v14, 0xff
-; GFX8-NEXT:    s_mov_b32 s2, 1
-; GFX8-NEXT:    s_mov_b32 s3, s5
+; GFX8-NEXT:    v_mov_b32_e32 v2, s3
+; GFX8-NEXT:    v_add_u32_e32 v5, vcc, s2, v1
+; GFX8-NEXT:    v_addc_u32_e32 v6, vcc, v2, v10, vcc
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 16, v5
+; GFX8-NEXT:    v_addc_u32_e32 v2, vcc, 0, v6, vcc
+; GFX8-NEXT:    flat_load_dwordx4 v[1:4], v[1:2]
+; GFX8-NEXT:    flat_load_dwordx4 v[5:8], v[5:6]
+; GFX8-NEXT:    v_add_u32_e32 v9, vcc, s0, v0
+; GFX8-NEXT:    v_mov_b32_e32 v11, s1
+; GFX8-NEXT:    v_addc_u32_e32 v10, vcc, v11, v10, vcc
 ; GFX8-NEXT:    s_waitcnt vmcnt(1)
-; GFX8-NEXT:    v_ffbh_u32_e32 v15, v2
-; GFX8-NEXT:    s_waitcnt vmcnt(0)
-; GFX8-NEXT:    v_ffbh_u32_e32 v10, v6
 ; GFX8-NEXT:    v_ffbh_u32_e32 v12, v4
-; GFX8-NEXT:    v_add_u32_e32 v10, vcc, 32, v10
-; GFX8-NEXT:    v_add_u32_e32 v12, vcc, 32, v12
-; GFX8-NEXT:    v_add_u32_e32 v15, vcc, 32, v15
-; GFX8-NEXT:    v_ffbh_u32_e32 v11, v7
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v7
-; GFX8-NEXT:    v_cndmask_b32_e32 v17, v11, v10, vcc
-; GFX8-NEXT:    v_ffbh_u32_e32 v13, v5
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v5
-; GFX8-NEXT:    v_cndmask_b32_e32 v18, v13, v12, vcc
-; GFX8-NEXT:    v_ffbh_u32_e32 v16, v3
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v3
-; GFX8-NEXT:    v_cndmask_b32_e32 v15, v16, v15, vcc
-; GFX8-NEXT:    v_lshlrev_b64 v[10:11], v17, v[6:7]
-; GFX8-NEXT:    v_sub_u32_e32 v16, vcc, s6, v17
-; GFX8-NEXT:    v_sub_u32_e32 v17, vcc, s6, v18
-; GFX8-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[4:5]
-; GFX8-NEXT:    v_cmp_ne_u64_e64 s[0:1], 0, v[6:7]
-; GFX8-NEXT:    v_cndmask_b32_e32 v17, 0, v17, vcc
-; GFX8-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[2:3]
-; GFX8-NEXT:    v_lshlrev_b64 v[12:13], v18, v[4:5]
-; GFX8-NEXT:    v_cndmask_b32_e64 v16, 0, v16, s[0:1]
-; GFX8-NEXT:    v_lshlrev_b64 v[4:5], v15, v[2:3]
-; GFX8-NEXT:    v_sub_u32_e64 v15, s[0:1], s6, v15
-; GFX8-NEXT:    v_cndmask_b32_e32 v2, 0, v15, vcc
-; GFX8-NEXT:    v_mov_b32_e32 v6, v10
-; GFX8-NEXT:    v_and_b32_e32 v7, 0xff, v11
-; GFX8-NEXT:    v_bfe_u32 v18, v11, 8, 23
-; GFX8-NEXT:    v_lshlrev_b32_e32 v16, 23, v16
-; GFX8-NEXT:    v_mov_b32_e32 v10, v12
-; GFX8-NEXT:    v_mov_b32_e32 v12, v4
-; GFX8-NEXT:    v_or_b32_e32 v16, v16, v18
-; GFX8-NEXT:    v_bfe_u32 v4, v5, 8, 23
-; GFX8-NEXT:    v_lshlrev_b32_e32 v2, 23, v2
-; GFX8-NEXT:    v_cmp_eq_u64_e32 vcc, s[4:5], v[6:7]
-; GFX8-NEXT:    v_or_b32_e32 v4, v2, v4
-; GFX8-NEXT:    v_and_b32_e32 v2, 1, v16
-; GFX8-NEXT:    v_and_b32_e32 v11, v14, v13
-; GFX8-NEXT:    v_bfe_u32 v19, v13, 8, 23
-; GFX8-NEXT:    v_and_b32_e32 v13, v14, v5
-; GFX8-NEXT:    v_ffbh_u32_e32 v5, v0
-; GFX8-NEXT:    v_add_u32_e64 v5, s[0:1], 32, v5
-; GFX8-NEXT:    v_lshlrev_b32_e32 v3, 23, v17
-; GFX8-NEXT:    v_cndmask_b32_e32 v2, 0, v2, vcc
-; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, s[2:3], v[6:7]
-; GFX8-NEXT:    v_or_b32_e32 v15, v3, v19
-; GFX8-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[4:5], v[10:11]
-; GFX8-NEXT:    v_and_b32_e32 v3, 1, v15
-; GFX8-NEXT:    v_cndmask_b32_e32 v2, 1, v2, vcc
-; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, s[2:3], v[10:11]
-; GFX8-NEXT:    v_cndmask_b32_e64 v3, 0, v3, s[0:1]
-; GFX8-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[4:5], v[12:13]
-; GFX8-NEXT:    v_and_b32_e32 v6, 1, v4
-; GFX8-NEXT:    v_cndmask_b32_e32 v7, 1, v3, vcc
-; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, s[2:3], v[12:13]
-; GFX8-NEXT:    v_cndmask_b32_e64 v6, 0, v6, s[0:1]
-; GFX8-NEXT:    v_cndmask_b32_e32 v10, 1, v6, vcc
-; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v16, v2
-; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v15, v7
-; GFX8-NEXT:    v_ffbh_u32_e32 v18, v1
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
-; GFX8-NEXT:    v_cndmask_b32_e32 v12, v18, v5, vcc
-; GFX8-NEXT:    v_add_u32_e32 v5, vcc, v4, v10
-; GFX8-NEXT:    v_sub_u32_e32 v4, vcc, s6, v12
-; GFX8-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[0:1]
-; GFX8-NEXT:    v_lshlrev_b64 v[6:7], v12, v[0:1]
-; GFX8-NEXT:    v_cndmask_b32_e32 v0, 0, v4, vcc
-; GFX8-NEXT:    v_and_b32_e32 v11, v14, v7
-; GFX8-NEXT:    v_mov_b32_e32 v10, v6
-; GFX8-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
-; GFX8-NEXT:    v_bfe_u32 v1, v7, 8, 23
-; GFX8-NEXT:    v_or_b32_e32 v0, v0, v1
-; GFX8-NEXT:    v_cmp_eq_u64_e32 vcc, s[4:5], v[10:11]
-; GFX8-NEXT:    v_and_b32_e32 v1, 1, v0
-; GFX8-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
-; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, s[2:3], v[10:11]
-; GFX8-NEXT:    v_cndmask_b32_e32 v1, 1, v1, vcc
-; GFX8-NEXT:    v_add_u32_e32 v4, vcc, v0, v1
-; GFX8-NEXT:    flat_store_dwordx4 v[8:9], v[2:5]
+; GFX8-NEXT:    s_waitcnt vmcnt(0)
+; GFX8-NEXT:    v_ffbh_u32_e32 v0, v8
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX8-NEXT:    v_cndmask_b32_e32 v0, 32, v0, vcc
+; GFX8-NEXT:    v_ffbh_u32_e32 v11, v6
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v6
+; GFX8-NEXT:    v_cndmask_b32_e32 v11, 32, v11, vcc
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v4
+; GFX8-NEXT:    v_cndmask_b32_e32 v12, 32, v12, vcc
+; GFX8-NEXT:    v_ffbh_u32_e32 v13, v2
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
+; GFX8-NEXT:    v_cndmask_b32_e32 v13, 32, v13, vcc
+; GFX8-NEXT:    v_lshlrev_b64 v[7:8], v0, v[7:8]
+; GFX8-NEXT:    v_sub_u32_e32 v14, vcc, 32, v0
+; GFX8-NEXT:    v_lshlrev_b64 v[5:6], v11, v[5:6]
+; GFX8-NEXT:    v_lshlrev_b64 v[3:4], v12, v[3:4]
+; GFX8-NEXT:    v_lshlrev_b64 v[0:1], v13, v[1:2]
+; GFX8-NEXT:    v_sub_u32_e32 v11, vcc, 32, v11
+; GFX8-NEXT:    v_sub_u32_e32 v12, vcc, 32, v12
+; GFX8-NEXT:    v_sub_u32_e32 v2, vcc, 32, v13
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v7
+; GFX8-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v5
+; GFX8-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v3
+; GFX8-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX8-NEXT:    v_or_b32_e32 v3, v4, v3
+; GFX8-NEXT:    v_or_b32_e32 v5, v6, v5
+; GFX8-NEXT:    v_or_b32_e32 v7, v8, v7
+; GFX8-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX8-NEXT:    v_cvt_f32_u32_e32 v1, v7
+; GFX8-NEXT:    v_cvt_f32_u32_e32 v3, v3
+; GFX8-NEXT:    v_cvt_f32_u32_e32 v4, v5
+; GFX8-NEXT:    v_cvt_f32_u32_e32 v5, v0
+; GFX8-NEXT:    v_ldexp_f32 v1, v1, v14
+; GFX8-NEXT:    v_ldexp_f32 v3, v3, v12
+; GFX8-NEXT:    v_ldexp_f32 v0, v4, v11
+; GFX8-NEXT:    v_ldexp_f32 v2, v5, v2
+; GFX8-NEXT:    flat_store_dwordx4 v[9:10], v[0:3]
 ; GFX8-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %in.gep = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %in, i32 %tid
@@ -687,126 +426,68 @@ define amdgpu_kernel void @v_uint_to_fp_v4i64_to_v4f32(<4 x float> addrspace(1)*
 define amdgpu_kernel void @s_uint_to_fp_v2i64_to_v2f16(<2 x half> addrspace(1)* %out, <2 x i64> %in) #0{
 ; GFX6-LABEL: s_uint_to_fp_v2i64_to_v2f16:
 ; GFX6:       ; %bb.0:
-; GFX6-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
-; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0xd
-; GFX6-NEXT:    s_mov_b32 s7, 0xf000
-; GFX6-NEXT:    s_mov_b32 s6, -1
-; GFX6-NEXT:    s_mov_b32 s8, 0
-; GFX6-NEXT:    s_movk_i32 s9, 0x80
-; GFX6-NEXT:    s_movk_i32 s12, 0xff
-; GFX6-NEXT:    s_movk_i32 s13, 0xbe
-; GFX6-NEXT:    s_mov_b32 s10, 1
-; GFX6-NEXT:    s_mov_b32 s11, s9
+; GFX6-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0xd
+; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GFX6-NEXT:    s_mov_b32 s3, 0xf000
+; GFX6-NEXT:    s_mov_b32 s2, -1
 ; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX6-NEXT:    s_flbit_i32_b32 s14, s2
-; GFX6-NEXT:    s_flbit_i32_b32 s15, s3
-; GFX6-NEXT:    s_flbit_i32_b32 s16, s0
-; GFX6-NEXT:    s_flbit_i32_b32 s17, s1
-; GFX6-NEXT:    s_add_i32 s14, s14, 32
-; GFX6-NEXT:    v_mov_b32_e32 v0, s15
-; GFX6-NEXT:    s_add_i32 s15, s16, 32
-; GFX6-NEXT:    v_mov_b32_e32 v1, s17
-; GFX6-NEXT:    v_mov_b32_e32 v2, s14
-; GFX6-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
-; GFX6-NEXT:    v_cndmask_b32_e32 v2, v0, v2, vcc
-; GFX6-NEXT:    v_mov_b32_e32 v0, s15
-; GFX6-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
-; GFX6-NEXT:    v_cndmask_b32_e32 v4, v1, v0, vcc
-; GFX6-NEXT:    v_lshl_b64 v[0:1], s[2:3], v2
-; GFX6-NEXT:    v_sub_i32_e32 v6, vcc, s13, v2
-; GFX6-NEXT:    v_lshl_b64 v[2:3], s[0:1], v4
-; GFX6-NEXT:    v_sub_i32_e32 v7, vcc, s13, v4
-; GFX6-NEXT:    v_and_b32_e32 v5, s12, v1
-; GFX6-NEXT:    v_mov_b32_e32 v4, v0
-; GFX6-NEXT:    v_cmp_ne_u64_e64 vcc, s[2:3], 0
-; GFX6-NEXT:    v_cndmask_b32_e32 v6, 0, v6, vcc
-; GFX6-NEXT:    v_bfe_u32 v8, v1, 8, 23
-; GFX6-NEXT:    v_and_b32_e32 v1, s12, v3
-; GFX6-NEXT:    v_mov_b32_e32 v0, v2
-; GFX6-NEXT:    v_cmp_ne_u64_e64 vcc, s[0:1], 0
-; GFX6-NEXT:    v_cndmask_b32_e32 v2, 0, v7, vcc
-; GFX6-NEXT:    v_bfe_u32 v3, v3, 8, 23
-; GFX6-NEXT:    v_lshlrev_b32_e32 v6, 23, v6
-; GFX6-NEXT:    v_lshlrev_b32_e32 v2, 23, v2
-; GFX6-NEXT:    v_or_b32_e32 v6, v6, v8
-; GFX6-NEXT:    v_or_b32_e32 v2, v2, v3
-; GFX6-NEXT:    v_and_b32_e32 v3, 1, v6
-; GFX6-NEXT:    v_and_b32_e32 v7, 1, v2
-; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[8:9], v[4:5]
-; GFX6-NEXT:    v_cndmask_b32_e32 v3, 0, v3, vcc
-; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[8:9], v[0:1]
-; GFX6-NEXT:    v_cndmask_b32_e32 v7, 0, v7, vcc
-; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[10:11], v[4:5]
-; GFX6-NEXT:    v_cndmask_b32_e32 v3, 1, v3, vcc
-; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[10:11], v[0:1]
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, 1, v7, vcc
-; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v3, v6
-; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GFX6-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT:    s_flbit_i32_b32 s8, s7
+; GFX6-NEXT:    s_flbit_i32_b32 s9, s5
+; GFX6-NEXT:    v_mov_b32_e32 v0, s8
+; GFX6-NEXT:    v_cmp_ne_u32_e64 vcc, s7, 0
+; GFX6-NEXT:    v_cndmask_b32_e32 v2, 32, v0, vcc
+; GFX6-NEXT:    v_mov_b32_e32 v0, s9
+; GFX6-NEXT:    v_cmp_ne_u32_e64 vcc, s5, 0
+; GFX6-NEXT:    v_cndmask_b32_e32 v4, 32, v0, vcc
+; GFX6-NEXT:    v_lshl_b64 v[0:1], s[6:7], v2
+; GFX6-NEXT:    v_sub_i32_e32 v5, vcc, 32, v2
+; GFX6-NEXT:    v_lshl_b64 v[2:3], s[4:5], v4
+; GFX6-NEXT:    v_sub_i32_e32 v4, vcc, 32, v4
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
+; GFX6-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX6-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX6-NEXT:    v_or_b32_e32 v1, v3, v2
+; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, v0
+; GFX6-NEXT:    v_cvt_f32_u32_e32 v1, v1
+; GFX6-NEXT:    v_ldexp_f32_e32 v0, v0, v5
+; GFX6-NEXT:    v_ldexp_f32_e32 v1, v1, v4
 ; GFX6-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GFX6-NEXT:    v_or_b32_e32 v0, v0, v1
-; GFX6-NEXT:    buffer_store_dword v0, off, s[4:7], 0
+; GFX6-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
+; GFX6-NEXT:    v_cvt_f16_f32_e32 v1, v1
+; GFX6-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX6-NEXT:    buffer_store_dword v0, off, s[0:3], 0
 ; GFX6-NEXT:    s_endpgm
 ;
 ; GFX8-LABEL: s_uint_to_fp_v2i64_to_v2f16:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x34
-; GFX8-NEXT:    s_movk_i32 s10, 0xbe
-; GFX8-NEXT:    s_mov_b32 s2, 0
-; GFX8-NEXT:    s_movk_i32 s3, 0x80
-; GFX8-NEXT:    s_movk_i32 s13, 0xff
-; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX8-NEXT:    s_flbit_i32_b32 s8, s6
-; GFX8-NEXT:    s_add_i32 s8, s8, 32
-; GFX8-NEXT:    s_flbit_i32_b32 s9, s7
-; GFX8-NEXT:    s_cmp_eq_u32 s7, 0
-; GFX8-NEXT:    s_cselect_b32 s8, s8, s9
-; GFX8-NEXT:    s_sub_i32 s9, s10, s8
-; GFX8-NEXT:    s_cmp_lg_u64 s[6:7], 0
-; GFX8-NEXT:    s_cselect_b32 s9, s9, 0
-; GFX8-NEXT:    s_lshl_b64 s[6:7], s[6:7], s8
-; GFX8-NEXT:    s_bfe_u32 s8, s7, 0x170008
-; GFX8-NEXT:    s_lshl_b32 s9, s9, 23
-; GFX8-NEXT:    s_or_b32 s11, s9, s8
-; GFX8-NEXT:    s_mov_b32 s8, 1
-; GFX8-NEXT:    s_mov_b32 s9, s3
-; GFX8-NEXT:    v_mov_b32_e32 v0, s8
-; GFX8-NEXT:    s_and_b32 s7, s7, s13
-; GFX8-NEXT:    v_mov_b32_e32 v1, s9
-; GFX8-NEXT:    s_and_b32 s12, s11, 1
-; GFX8-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
-; GFX8-NEXT:    s_cmp_eq_u64 s[6:7], s[2:3]
-; GFX8-NEXT:    s_cselect_b32 s6, s12, 0
-; GFX8-NEXT:    s_cmp_lg_u64 vcc, 0
-; GFX8-NEXT:    s_cselect_b32 s6, s6, 1
-; GFX8-NEXT:    s_add_i32 s11, s11, s6
-; GFX8-NEXT:    s_flbit_i32_b32 s6, s4
-; GFX8-NEXT:    s_add_i32 s6, s6, 32
-; GFX8-NEXT:    s_flbit_i32_b32 s7, s5
-; GFX8-NEXT:    s_cmp_eq_u32 s5, 0
-; GFX8-NEXT:    s_cselect_b32 s6, s6, s7
-; GFX8-NEXT:    s_sub_i32 s7, s10, s6
-; GFX8-NEXT:    s_cmp_lg_u64 s[4:5], 0
-; GFX8-NEXT:    s_cselect_b32 s7, s7, 0
-; GFX8-NEXT:    s_lshl_b64 s[4:5], s[4:5], s6
-; GFX8-NEXT:    s_bfe_u32 s6, s5, 0x170008
-; GFX8-NEXT:    s_lshl_b32 s7, s7, 23
-; GFX8-NEXT:    s_or_b32 s6, s7, s6
-; GFX8-NEXT:    s_and_b32 s5, s5, s13
-; GFX8-NEXT:    s_and_b32 s7, s6, 1
-; GFX8-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[0:1]
-; GFX8-NEXT:    s_cmp_eq_u64 s[4:5], s[2:3]
-; GFX8-NEXT:    s_cselect_b32 s2, s7, 0
-; GFX8-NEXT:    s_cmp_lg_u64 vcc, 0
-; GFX8-NEXT:    s_cselect_b32 s2, s2, 1
-; GFX8-NEXT:    v_mov_b32_e32 v2, s11
-; GFX8-NEXT:    s_add_i32 s6, s6, s2
 ; GFX8-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
-; GFX8-NEXT:    v_cvt_f16_f32_sdwa v2, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
-; GFX8-NEXT:    v_cvt_f16_f32_e32 v0, s6
-; GFX8-NEXT:    v_or_b32_e32 v2, v0, v2
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    s_flbit_i32_b32 s2, s7
+; GFX8-NEXT:    s_cmp_lg_u32 s7, 0
+; GFX8-NEXT:    s_cselect_b32 s9, s2, 32
+; GFX8-NEXT:    s_lshl_b64 s[2:3], s[6:7], s9
+; GFX8-NEXT:    s_sub_i32 s9, 32, s9
+; GFX8-NEXT:    v_cmp_ne_u32_e64 s[6:7], s2, 0
+; GFX8-NEXT:    s_flbit_i32_b32 s8, s5
+; GFX8-NEXT:    s_cmp_lg_u32 s5, 0
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[6:7]
+; GFX8-NEXT:    s_cselect_b32 s6, s8, 32
+; GFX8-NEXT:    v_or_b32_e32 v0, s3, v0
+; GFX8-NEXT:    s_lshl_b64 s[2:3], s[4:5], s6
+; GFX8-NEXT:    v_cmp_ne_u32_e64 s[4:5], s2, 0
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s[4:5]
+; GFX8-NEXT:    v_or_b32_e32 v1, s3, v1
+; GFX8-NEXT:    v_cvt_f32_u32_e32 v0, v0
+; GFX8-NEXT:    v_cvt_f32_u32_e32 v1, v1
+; GFX8-NEXT:    s_sub_i32 s2, 32, s6
+; GFX8-NEXT:    v_ldexp_f32 v0, v0, s9
+; GFX8-NEXT:    v_ldexp_f32 v1, v1, s2
+; GFX8-NEXT:    v_cvt_f16_f32_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+; GFX8-NEXT:    v_cvt_f16_f32_e32 v1, v1
+; GFX8-NEXT:    v_or_b32_e32 v2, v1, v0
 ; GFX8-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX8-NEXT:    v_mov_b32_e32 v1, s1
 ; GFX8-NEXT:    flat_store_dword v[0:1], v2
@@ -819,226 +500,136 @@ define amdgpu_kernel void @s_uint_to_fp_v2i64_to_v2f16(<2 x half> addrspace(1)*
 define amdgpu_kernel void @v_uint_to_fp_v4i64_to_v4f16(<4 x half> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) #0 {
 ; GFX6-LABEL: v_uint_to_fp_v4i64_to_v4f16:
 ; GFX6:       ; %bb.0:
-; GFX6-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GFX6-NEXT:    s_mov_b32 s11, 0xf000
-; GFX6-NEXT:    s_mov_b32 s10, 0
-; GFX6-NEXT:    v_lshlrev_b32_e32 v7, 5, v0
-; GFX6-NEXT:    v_mov_b32_e32 v8, 0
-; GFX6-NEXT:    v_lshlrev_b32_e32 v9, 3, v0
-; GFX6-NEXT:    v_mov_b32_e32 v15, 0xff
+; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GFX6-NEXT:    s_mov_b32 s7, 0xf000
+; GFX6-NEXT:    s_mov_b32 s6, 0
+; GFX6-NEXT:    v_lshlrev_b32_e32 v8, 5, v0
+; GFX6-NEXT:    v_mov_b32_e32 v9, 0
 ; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX6-NEXT:    s_mov_b64 s[8:9], s[6:7]
-; GFX6-NEXT:    buffer_load_dwordx4 v[0:3], v[7:8], s[8:11], 0 addr64
-; GFX6-NEXT:    buffer_load_dwordx4 v[4:7], v[7:8], s[8:11], 0 addr64 offset:16
-; GFX6-NEXT:    s_movk_i32 s2, 0xbe
-; GFX6-NEXT:    s_mov_b64 s[6:7], s[10:11]
-; GFX6-NEXT:    s_movk_i32 s11, 0x80
-; GFX6-NEXT:    s_mov_b32 s8, 1
-; GFX6-NEXT:    v_mov_b32_e32 v10, v8
-; GFX6-NEXT:    s_mov_b32 s9, s11
+; GFX6-NEXT:    s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT:    buffer_load_dwordx4 v[1:4], v[8:9], s[4:7], 0 addr64 offset:16
+; GFX6-NEXT:    buffer_load_dwordx4 v[5:8], v[8:9], s[4:7], 0 addr64
+; GFX6-NEXT:    v_lshlrev_b32_e32 v10, 3, v0
+; GFX6-NEXT:    v_mov_b32_e32 v11, v9
+; GFX6-NEXT:    s_mov_b64 s[2:3], s[6:7]
+; GFX6-NEXT:    s_waitcnt vmcnt(1)
+; GFX6-NEXT:    v_ffbh_u32_e32 v0, v4
+; GFX6-NEXT:    v_ffbh_u32_e32 v9, v2
 ; GFX6-NEXT:    s_waitcnt vmcnt(0)
-; GFX6-NEXT:    v_ffbh_u32_e32 v8, v6
-; GFX6-NEXT:    v_ffbh_u32_e32 v11, v7
-; GFX6-NEXT:    v_ffbh_u32_e32 v12, v4
-; GFX6-NEXT:    v_ffbh_u32_e32 v13, v5
-; GFX6-NEXT:    v_ffbh_u32_e32 v14, v2
-; GFX6-NEXT:    v_ffbh_u32_e32 v16, v3
-; GFX6-NEXT:    v_ffbh_u32_e32 v17, v0
-; GFX6-NEXT:    v_ffbh_u32_e32 v18, v1
-; GFX6-NEXT:    v_add_i32_e32 v8, vcc, 32, v8
-; GFX6-NEXT:    v_add_i32_e32 v12, vcc, 32, v12
-; GFX6-NEXT:    v_add_i32_e32 v14, vcc, 32, v14
-; GFX6-NEXT:    v_add_i32_e32 v17, vcc, 32, v17
-; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v7
-; GFX6-NEXT:    v_cndmask_b32_e32 v8, v11, v8, vcc
-; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v5
-; GFX6-NEXT:    v_cndmask_b32_e32 v19, v13, v12, vcc
-; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v3
-; GFX6-NEXT:    v_cndmask_b32_e32 v16, v16, v14, vcc
-; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
-; GFX6-NEXT:    v_cndmask_b32_e32 v17, v18, v17, vcc
-; GFX6-NEXT:    v_lshl_b64 v[11:12], v[6:7], v8
-; GFX6-NEXT:    v_sub_i32_e32 v8, vcc, s2, v8
-; GFX6-NEXT:    v_lshl_b64 v[13:14], v[2:3], v16
-; GFX6-NEXT:    v_sub_i32_e32 v16, vcc, s2, v16
-; GFX6-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[2:3]
-; GFX6-NEXT:    v_sub_i32_e64 v18, s[0:1], s2, v17
-; GFX6-NEXT:    v_cmp_ne_u64_e64 s[0:1], 0, v[0:1]
-; GFX6-NEXT:    v_lshl_b64 v[0:1], v[0:1], v17
-; GFX6-NEXT:    v_and_b32_e32 v3, 0xff, v12
-; GFX6-NEXT:    v_mov_b32_e32 v2, v11
-; GFX6-NEXT:    v_bfe_u32 v17, v12, 8, 23
-; GFX6-NEXT:    v_lshl_b64 v[11:12], v[4:5], v19
-; GFX6-NEXT:    v_sub_i32_e64 v19, s[2:3], s2, v19
-; GFX6-NEXT:    v_cmp_ne_u64_e64 s[2:3], 0, v[6:7]
-; GFX6-NEXT:    v_cndmask_b32_e64 v8, 0, v8, s[2:3]
-; GFX6-NEXT:    v_cmp_ne_u64_e64 s[2:3], 0, v[4:5]
-; GFX6-NEXT:    v_and_b32_e32 v5, v15, v12
-; GFX6-NEXT:    v_mov_b32_e32 v4, v11
-; GFX6-NEXT:    v_cndmask_b32_e64 v19, 0, v19, s[2:3]
-; GFX6-NEXT:    v_bfe_u32 v20, v12, 8, 23
-; GFX6-NEXT:    v_and_b32_e32 v7, v15, v14
-; GFX6-NEXT:    v_mov_b32_e32 v6, v13
-; GFX6-NEXT:    v_cndmask_b32_e32 v13, 0, v16, vcc
-; GFX6-NEXT:    v_bfe_u32 v14, v14, 8, 23
-; GFX6-NEXT:    v_and_b32_e32 v12, v15, v1
-; GFX6-NEXT:    v_mov_b32_e32 v11, v0
-; GFX6-NEXT:    v_cndmask_b32_e64 v0, 0, v18, s[0:1]
-; GFX6-NEXT:    v_bfe_u32 v1, v1, 8, 23
-; GFX6-NEXT:    v_lshlrev_b32_e32 v8, 23, v8
-; GFX6-NEXT:    v_lshlrev_b32_e32 v15, 23, v19
-; GFX6-NEXT:    v_lshlrev_b32_e32 v13, 23, v13
-; GFX6-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
-; GFX6-NEXT:    v_or_b32_e32 v8, v8, v17
-; GFX6-NEXT:    v_or_b32_e32 v15, v15, v20
-; GFX6-NEXT:    v_or_b32_e32 v13, v13, v14
-; GFX6-NEXT:    v_or_b32_e32 v0, v0, v1
-; GFX6-NEXT:    v_and_b32_e32 v1, 1, v8
-; GFX6-NEXT:    v_and_b32_e32 v14, 1, v15
-; GFX6-NEXT:    v_and_b32_e32 v16, 1, v13
-; GFX6-NEXT:    v_and_b32_e32 v17, 1, v0
-; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[10:11], v[2:3]
-; GFX6-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
-; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[10:11], v[4:5]
-; GFX6-NEXT:    v_cndmask_b32_e32 v14, 0, v14, vcc
-; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[10:11], v[6:7]
-; GFX6-NEXT:    v_cndmask_b32_e32 v16, 0, v16, vcc
-; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[10:11], v[11:12]
-; GFX6-NEXT:    v_cndmask_b32_e32 v17, 0, v17, vcc
-; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[2:3]
-; GFX6-NEXT:    v_cndmask_b32_e32 v1, 1, v1, vcc
-; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[4:5]
-; GFX6-NEXT:    v_cndmask_b32_e32 v2, 1, v14, vcc
-; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[6:7]
-; GFX6-NEXT:    v_cndmask_b32_e32 v3, 1, v16, vcc
-; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[11:12]
-; GFX6-NEXT:    v_cndmask_b32_e32 v4, 1, v17, vcc
-; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v8, v1
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v15, v2
-; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v13, v3
-; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v4
-; GFX6-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GFX6-NEXT:    v_cvt_f16_f32_e32 v2, v2
+; GFX6-NEXT:    v_ffbh_u32_e32 v12, v8
+; GFX6-NEXT:    v_ffbh_u32_e32 v13, v6
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v4
+; GFX6-NEXT:    v_cndmask_b32_e32 v0, 32, v0, vcc
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
+; GFX6-NEXT:    v_cndmask_b32_e32 v9, 32, v9, vcc
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX6-NEXT:    v_cndmask_b32_e32 v12, 32, v12, vcc
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v6
+; GFX6-NEXT:    v_cndmask_b32_e32 v13, 32, v13, vcc
+; GFX6-NEXT:    v_lshl_b64 v[3:4], v[3:4], v0
+; GFX6-NEXT:    v_sub_i32_e32 v14, vcc, 32, v0
+; GFX6-NEXT:    v_lshl_b64 v[0:1], v[1:2], v9
+; GFX6-NEXT:    v_sub_i32_e32 v2, vcc, 32, v9
+; GFX6-NEXT:    v_lshl_b64 v[7:8], v[7:8], v12
+; GFX6-NEXT:    v_sub_i32_e32 v9, vcc, 32, v12
+; GFX6-NEXT:    v_lshl_b64 v[5:6], v[5:6], v13
+; GFX6-NEXT:    v_sub_i32_e32 v12, vcc, 32, v13
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v3
+; GFX6-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v7
+; GFX6-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
+; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v5
+; GFX6-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
+; GFX6-NEXT:    v_or_b32_e32 v3, v4, v3
+; GFX6-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX6-NEXT:    v_or_b32_e32 v1, v8, v7
+; GFX6-NEXT:    v_or_b32_e32 v4, v6, v5
+; GFX6-NEXT:    v_cvt_f32_u32_e32 v3, v3
+; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, v0
+; GFX6-NEXT:    v_cvt_f32_u32_e32 v1, v1
+; GFX6-NEXT:    v_cvt_f32_u32_e32 v4, v4
+; GFX6-NEXT:    v_ldexp_f32_e32 v3, v3, v14
+; GFX6-NEXT:    v_ldexp_f32_e32 v0, v0, v2
+; GFX6-NEXT:    v_ldexp_f32_e32 v1, v1, v9
+; GFX6-NEXT:    v_ldexp_f32_e32 v2, v4, v12
 ; GFX6-NEXT:    v_cvt_f16_f32_e32 v3, v3
 ; GFX6-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT:    v_cvt_f16_f32_e32 v1, v1
+; GFX6-NEXT:    v_cvt_f16_f32_e32 v2, v2
 ; GFX6-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
-; GFX6-NEXT:    v_or_b32_e32 v1, v2, v1
-; GFX6-NEXT:    v_or_b32_e32 v0, v0, v3
-; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], v[9:10], s[4:7], 0 addr64
+; GFX6-NEXT:    v_lshlrev_b32_e32 v4, 16, v1
+; GFX6-NEXT:    v_or_b32_e32 v1, v0, v3
+; GFX6-NEXT:    v_or_b32_e32 v0, v2, v4
+; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], v[10:11], s[0:3], 0 addr64
 ; GFX6-NEXT:    s_endpgm
 ;
 ; GFX8-LABEL: v_uint_to_fp_v4i64_to_v4f16:
 ; GFX8:       ; %bb.0:
-; GFX8-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX8-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 5, v0
-; GFX8-NEXT:    v_mov_b32_e32 v13, 0
-; GFX8-NEXT:    s_movk_i32 s8, 0xbe
-; GFX8-NEXT:    v_mov_b32_e32 v14, 0xff
+; GFX8-NEXT:    v_mov_b32_e32 v9, 0
+; GFX8-NEXT:    v_lshlrev_b32_e32 v10, 3, v0
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX8-NEXT:    v_add_u32_e32 v5, vcc, s6, v1
-; GFX8-NEXT:    v_mov_b32_e32 v2, s7
-; GFX8-NEXT:    v_addc_u32_e32 v6, vcc, v2, v13, vcc
+; GFX8-NEXT:    v_mov_b32_e32 v2, s3
+; GFX8-NEXT:    v_add_u32_e32 v5, vcc, s2, v1
+; GFX8-NEXT:    v_addc_u32_e32 v6, vcc, v2, v9, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 16, v5
 ; GFX8-NEXT:    v_addc_u32_e32 v2, vcc, 0, v6, vcc
 ; GFX8-NEXT:    flat_load_dwordx4 v[1:4], v[1:2]
 ; GFX8-NEXT:    flat_load_dwordx4 v[5:8], v[5:6]
-; GFX8-NEXT:    s_mov_b32 s2, 0
-; GFX8-NEXT:    s_movk_i32 s3, 0x80
-; GFX8-NEXT:    s_mov_b32 s6, 1
-; GFX8-NEXT:    s_mov_b32 s7, s3
-; GFX8-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
+; GFX8-NEXT:    v_mov_b32_e32 v11, s1
 ; GFX8-NEXT:    s_waitcnt vmcnt(1)
-; GFX8-NEXT:    v_ffbh_u32_e32 v15, v3
+; GFX8-NEXT:    v_ffbh_u32_e32 v13, v4
 ; GFX8-NEXT:    s_waitcnt vmcnt(0)
-; GFX8-NEXT:    v_ffbh_u32_e32 v9, v7
-; GFX8-NEXT:    v_ffbh_u32_e32 v11, v5
-; GFX8-NEXT:    v_ffbh_u32_e32 v17, v1
-; GFX8-NEXT:    v_add_u32_e32 v9, vcc, 32, v9
-; GFX8-NEXT:    v_add_u32_e32 v11, vcc, 32, v11
-; GFX8-NEXT:    v_add_u32_e32 v15, vcc, 32, v15
-; GFX8-NEXT:    v_add_u32_e32 v17, vcc, 32, v17
-; GFX8-NEXT:    v_ffbh_u32_e32 v10, v8
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v8
-; GFX8-NEXT:    v_cndmask_b32_e32 v19, v10, v9, vcc
+; GFX8-NEXT:    v_ffbh_u32_e32 v0, v8
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX8-NEXT:    v_cndmask_b32_e32 v0, 32, v0, vcc
 ; GFX8-NEXT:    v_ffbh_u32_e32 v12, v6
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v6
-; GFX8-NEXT:    v_cndmask_b32_e32 v20, v12, v11, vcc
-; GFX8-NEXT:    v_ffbh_u32_e32 v16, v4
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v4
-; GFX8-NEXT:    v_cndmask_b32_e32 v15, v16, v15, vcc
-; GFX8-NEXT:    v_ffbh_u32_e32 v18, v2
-; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
-; GFX8-NEXT:    v_cndmask_b32_e32 v16, v18, v17, vcc
-; GFX8-NEXT:    v_sub_u32_e32 v17, vcc, s8, v19
-; GFX8-NEXT:    v_sub_u32_e32 v18, vcc, s8, v20
-; GFX8-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[5:6]
-; GFX8-NEXT:    v_lshlrev_b64 v[11:12], v20, v[5:6]
-; GFX8-NEXT:    v_lshlrev_b64 v[9:10], v19, v[7:8]
-; GFX8-NEXT:    v_cmp_ne_u64_e64 s[0:1], 0, v[7:8]
-; GFX8-NEXT:    v_lshlrev_b64 v[5:6], v15, v[3:4]
-; GFX8-NEXT:    v_cndmask_b32_e32 v18, 0, v18, vcc
-; GFX8-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[3:4]
-; GFX8-NEXT:    v_mov_b32_e32 v7, v9
-; GFX8-NEXT:    v_mov_b32_e32 v9, v11
-; GFX8-NEXT:    v_mov_b32_e32 v11, v5
-; GFX8-NEXT:    v_and_b32_e32 v8, 0xff, v10
-; GFX8-NEXT:    v_bfe_u32 v19, v10, 8, 23
-; GFX8-NEXT:    v_and_b32_e32 v10, v14, v12
-; GFX8-NEXT:    v_bfe_u32 v20, v12, 8, 23
-; GFX8-NEXT:    v_and_b32_e32 v12, v14, v6
-; GFX8-NEXT:    v_bfe_u32 v5, v6, 8, 23
-; GFX8-NEXT:    v_cndmask_b32_e64 v6, 0, v17, s[0:1]
-; GFX8-NEXT:    v_sub_u32_e64 v15, s[0:1], s8, v15
-; GFX8-NEXT:    v_cndmask_b32_e32 v3, 0, v15, vcc
-; GFX8-NEXT:    v_lshlrev_b32_e32 v6, 23, v6
-; GFX8-NEXT:    v_lshlrev_b32_e32 v3, 23, v3
-; GFX8-NEXT:    v_or_b32_e32 v6, v6, v19
-; GFX8-NEXT:    v_cmp_eq_u64_e32 vcc, s[2:3], v[7:8]
-; GFX8-NEXT:    v_or_b32_e32 v3, v3, v5
-; GFX8-NEXT:    v_and_b32_e32 v5, 1, v6
-; GFX8-NEXT:    v_lshlrev_b32_e32 v4, 23, v18
-; GFX8-NEXT:    v_cndmask_b32_e32 v5, 0, v5, vcc
-; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, s[6:7], v[7:8]
-; GFX8-NEXT:    v_or_b32_e32 v4, v4, v20
-; GFX8-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[2:3], v[9:10]
-; GFX8-NEXT:    v_and_b32_e32 v15, 1, v4
-; GFX8-NEXT:    v_cndmask_b32_e32 v5, 1, v5, vcc
-; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, s[6:7], v[9:10]
-; GFX8-NEXT:    v_cndmask_b32_e64 v8, 0, v15, s[0:1]
-; GFX8-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[2:3], v[11:12]
-; GFX8-NEXT:    v_and_b32_e32 v7, 1, v3
-; GFX8-NEXT:    v_cndmask_b32_e32 v8, 1, v8, vcc
-; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, s[6:7], v[11:12]
-; GFX8-NEXT:    v_cndmask_b32_e64 v7, 0, v7, s[0:1]
-; GFX8-NEXT:    v_cndmask_b32_e32 v7, 1, v7, vcc
-; GFX8-NEXT:    v_add_u32_e32 v4, vcc, v4, v8
-; GFX8-NEXT:    v_add_u32_e32 v5, vcc, v6, v5
-; GFX8-NEXT:    v_add_u32_e32 v6, vcc, v3, v7
-; GFX8-NEXT:    v_cvt_f16_f32_e32 v8, v4
-; GFX8-NEXT:    v_lshlrev_b64 v[3:4], v16, v[1:2]
-; GFX8-NEXT:    v_cvt_f16_f32_sdwa v7, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
-; GFX8-NEXT:    v_mov_b32_e32 v5, v3
-; GFX8-NEXT:    v_sub_u32_e32 v3, vcc, s8, v16
-; GFX8-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[1:2]
-; GFX8-NEXT:    v_cvt_f16_f32_sdwa v9, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
-; GFX8-NEXT:    v_cndmask_b32_e32 v1, 0, v3, vcc
-; GFX8-NEXT:    v_and_b32_e32 v6, v14, v4
-; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 23, v1
-; GFX8-NEXT:    v_bfe_u32 v2, v4, 8, 23
-; GFX8-NEXT:    v_or_b32_e32 v1, v1, v2
-; GFX8-NEXT:    v_cmp_eq_u64_e32 vcc, s[2:3], v[5:6]
-; GFX8-NEXT:    v_and_b32_e32 v2, 1, v1
-; GFX8-NEXT:    v_cndmask_b32_e32 v2, 0, v2, vcc
-; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, s[6:7], v[5:6]
-; GFX8-NEXT:    v_mov_b32_e32 v17, s5
-; GFX8-NEXT:    v_cndmask_b32_e32 v2, 1, v2, vcc
-; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v2
-; GFX8-NEXT:    v_cvt_f16_f32_e32 v3, v1
-; GFX8-NEXT:    v_add_u32_e32 v0, vcc, s4, v0
-; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, v17, v13, vcc
-; GFX8-NEXT:    v_or_b32_e32 v2, v8, v7
-; GFX8-NEXT:    v_or_b32_e32 v3, v3, v9
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v6
+; GFX8-NEXT:    v_cndmask_b32_e32 v12, 32, v12, vcc
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v4
+; GFX8-NEXT:    v_cndmask_b32_e32 v13, 32, v13, vcc
+; GFX8-NEXT:    v_ffbh_u32_e32 v14, v2
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
+; GFX8-NEXT:    v_cndmask_b32_e32 v14, 32, v14, vcc
+; GFX8-NEXT:    v_lshlrev_b64 v[7:8], v0, v[7:8]
+; GFX8-NEXT:    v_sub_u32_e32 v15, vcc, 32, v0
+; GFX8-NEXT:    v_lshlrev_b64 v[5:6], v12, v[5:6]
+; GFX8-NEXT:    v_lshlrev_b64 v[3:4], v13, v[3:4]
+; GFX8-NEXT:    v_lshlrev_b64 v[0:1], v14, v[1:2]
+; GFX8-NEXT:    v_sub_u32_e32 v12, vcc, 32, v12
+; GFX8-NEXT:    v_sub_u32_e32 v13, vcc, 32, v13
+; GFX8-NEXT:    v_sub_u32_e32 v2, vcc, 32, v14
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v7
+; GFX8-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v5
+; GFX8-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v3
+; GFX8-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
+; GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX8-NEXT:    v_or_b32_e32 v3, v4, v3
+; GFX8-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX8-NEXT:    v_or_b32_e32 v7, v8, v7
+; GFX8-NEXT:    v_or_b32_e32 v5, v6, v5
+; GFX8-NEXT:    v_cvt_f32_u32_e32 v1, v7
+; GFX8-NEXT:    v_cvt_f32_u32_e32 v4, v5
+; GFX8-NEXT:    v_cvt_f32_u32_e32 v3, v3
+; GFX8-NEXT:    v_cvt_f32_u32_e32 v0, v0
+; GFX8-NEXT:    v_ldexp_f32 v1, v1, v15
+; GFX8-NEXT:    v_ldexp_f32 v4, v4, v12
+; GFX8-NEXT:    v_ldexp_f32 v3, v3, v13
+; GFX8-NEXT:    v_ldexp_f32 v0, v0, v2
+; GFX8-NEXT:    v_cvt_f16_f32_e32 v5, v0
+; GFX8-NEXT:    v_cvt_f16_f32_sdwa v3, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+; GFX8-NEXT:    v_cvt_f16_f32_sdwa v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+; GFX8-NEXT:    v_cvt_f16_f32_e32 v4, v4
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, s0, v10
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, v11, v9, vcc
+; GFX8-NEXT:    v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT:    v_or_b32_e32 v2, v4, v2
 ; GFX8-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
 ; GFX8-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()

diff  --git a/llvm/test/CodeGen/AMDGPU/uint_to_fp.ll b/llvm/test/CodeGen/AMDGPU/uint_to_fp.ll
index 1beddba4d22e0..ae11d56ec02ee 100644
--- a/llvm/test/CodeGen/AMDGPU/uint_to_fp.ll
+++ b/llvm/test/CodeGen/AMDGPU/uint_to_fp.ll
@@ -118,13 +118,8 @@ define amdgpu_kernel void @v_uint_to_fp_i1_f32_load(float addrspace(1)* %out, i1
 ; FIXME: Repeated here to test r600
 ; FUNC-LABEL: {{^}}s_uint_to_fp_i64_to_f32:
 ; R600: FFBH_UINT
-; R600: FFBH_UINT
-; R600: CNDE_INT
 ; R600: CNDE_INT
-
-; R600-DAG: SETGT_UINT
-; R600-DAG: SETGT_UINT
-; R600-DAG: SETE_INT
+; R600: UINT_TO_FLT
 
 define amdgpu_kernel void @s_uint_to_fp_i64_to_f32(float addrspace(1)* %out, i64 %in) #0 {
 entry:


        


More information about the llvm-commits mailing list