[llvm] 027d3b7 - [AMDGPU] Generate checks for i64 to fp conversions

Jay Foad via llvm-commits llvm-commits at lists.llvm.org
Wed Aug 4 07:49:29 PDT 2021


Author: Jay Foad
Date: 2021-08-04T15:39:46+01:00
New Revision: 027d3b747e7d8e82d9cc35f8b3689fec5fd09779

URL: https://github.com/llvm/llvm-project/commit/027d3b747e7d8e82d9cc35f8b3689fec5fd09779
DIFF: https://github.com/llvm/llvm-project/commit/027d3b747e7d8e82d9cc35f8b3689fec5fd09779.diff

LOG: [AMDGPU] Generate checks for i64 to fp conversions

Differential Revision: https://reviews.llvm.org/D107429

Added: 
    

Modified: 
    llvm/test/CodeGen/AMDGPU/sint_to_fp.i64.ll
    llvm/test/CodeGen/AMDGPU/uint_to_fp.i64.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AMDGPU/sint_to_fp.i64.ll b/llvm/test/CodeGen/AMDGPU/sint_to_fp.i64.ll
index d22dfcf6c6d8..279be8c658e9 100644
--- a/llvm/test/CodeGen/AMDGPU/sint_to_fp.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/sint_to_fp.i64.ll
@@ -1,33 +1,203 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN,FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN,FUNC %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck --check-prefixes=GFX6 %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck --check-prefixes=GFX8 %s
 
 ; FIXME: This should be merged with sint_to_fp.ll, but s_sint_to_fp_v2i64 crashes on r600
 
-; FUNC-LABEL: {{^}}s_sint_to_fp_i64_to_f16:
 define amdgpu_kernel void @s_sint_to_fp_i64_to_f16(half addrspace(1)* %out, i64 %in) #0 {
+; GFX6-LABEL: s_sint_to_fp_i64_to_f16:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GFX6-NEXT:    s_mov_b32 s3, 0xf000
+; GFX6-NEXT:    s_mov_b32 s2, -1
+; GFX6-NEXT:    s_mov_b32 s8, 0
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    s_ashr_i32 s10, s7, 31
+; GFX6-NEXT:    s_movk_i32 s9, 0x80
+; GFX6-NEXT:    s_mov_b32 s0, s4
+; GFX6-NEXT:    s_mov_b32 s1, s5
+; GFX6-NEXT:    s_mov_b32 s11, s10
+; GFX6-NEXT:    s_add_u32 s4, s6, s10
+; GFX6-NEXT:    s_addc_u32 s5, s7, s10
+; GFX6-NEXT:    s_xor_b64 s[4:5], s[4:5], s[10:11]
+; GFX6-NEXT:    s_flbit_i32_b32 s6, s4
+; GFX6-NEXT:    s_flbit_i32_b32 s7, s5
+; GFX6-NEXT:    s_add_i32 s6, s6, 32
+; GFX6-NEXT:    s_and_b32 s10, 1, s10
+; GFX6-NEXT:    v_mov_b32_e32 v0, s7
+; GFX6-NEXT:    v_mov_b32_e32 v1, s6
+; GFX6-NEXT:    v_cmp_eq_u32_e64 vcc, s5, 0
+; GFX6-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
+; GFX6-NEXT:    v_lshl_b64 v[0:1], s[4:5], v2
+; GFX6-NEXT:    v_sub_i32_e32 v4, vcc, 0xbe, v2
+; GFX6-NEXT:    v_and_b32_e32 v3, 0xff, v1
+; GFX6-NEXT:    v_mov_b32_e32 v2, v0
+; GFX6-NEXT:    v_cmp_ne_u64_e64 vcc, s[4:5], 0
+; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v4, vcc
+; GFX6-NEXT:    v_bfe_u32 v1, v1, 8, 23
+; GFX6-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
+; GFX6-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT:    v_and_b32_e32 v1, 1, v0
+; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[8:9], v[2:3]
+; GFX6-NEXT:    s_mov_b32 s8, 1
+; GFX6-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
+; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[2:3]
+; GFX6-NEXT:    v_cndmask_b32_e32 v1, 1, v1, vcc
+; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v1, v0
+; GFX6-NEXT:    v_cmp_eq_u32_e64 s[4:5], s10, 1
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, v0, -v0, s[4:5]
+; GFX6-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; GFX6-NEXT:    buffer_store_short v0, off, s[0:3], 0
+; GFX6-NEXT:    s_endpgm
+;
+; GFX8-LABEL: s_sint_to_fp_i64_to_f16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GFX8-NEXT:    s_mov_b32 s4, 0
+; GFX8-NEXT:    s_movk_i32 s5, 0x80
+; GFX8-NEXT:    v_mov_b32_e32 v0, 1
+; GFX8-NEXT:    v_mov_b32_e32 v1, s5
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    s_ashr_i32 s6, s3, 31
+; GFX8-NEXT:    s_add_u32 s2, s2, s6
+; GFX8-NEXT:    s_mov_b32 s7, s6
+; GFX8-NEXT:    s_addc_u32 s3, s3, s6
+; GFX8-NEXT:    s_xor_b64 s[2:3], s[2:3], s[6:7]
+; GFX8-NEXT:    s_flbit_i32_b32 s7, s2
+; GFX8-NEXT:    s_add_i32 s7, s7, 32
+; GFX8-NEXT:    s_flbit_i32_b32 s8, s3
+; GFX8-NEXT:    s_cmp_eq_u32 s3, 0
+; GFX8-NEXT:    s_cselect_b32 s7, s7, s8
+; GFX8-NEXT:    s_sub_i32 s8, 0xbe, s7
+; GFX8-NEXT:    s_cmp_lg_u64 s[2:3], 0
+; GFX8-NEXT:    s_cselect_b32 s8, s8, 0
+; GFX8-NEXT:    s_lshl_b64 s[2:3], s[2:3], s7
+; GFX8-NEXT:    s_bfe_u32 s7, s3, 0x170008
+; GFX8-NEXT:    s_lshl_b32 s8, s8, 23
+; GFX8-NEXT:    s_or_b32 s7, s8, s7
+; GFX8-NEXT:    s_and_b32 s3, s3, 0xff
+; GFX8-NEXT:    s_and_b32 s8, s7, 1
+; GFX8-NEXT:    v_cmp_lt_u64_e32 vcc, s[2:3], v[0:1]
+; GFX8-NEXT:    s_cmp_eq_u64 s[2:3], s[4:5]
+; GFX8-NEXT:    s_cselect_b32 s2, s8, 0
+; GFX8-NEXT:    s_cmp_lg_u64 vcc, 0
+; GFX8-NEXT:    s_cselect_b32 s2, s2, 1
+; GFX8-NEXT:    s_add_i32 s2, s7, s2
+; GFX8-NEXT:    s_and_b32 s3, 1, s6
+; GFX8-NEXT:    v_mov_b32_e32 v0, s2
+; GFX8-NEXT:    v_cmp_eq_u32_e64 s[2:3], s3, 1
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, v0, -v0, s[2:3]
+; GFX8-NEXT:    v_cvt_f16_f32_e32 v2, v0
+; GFX8-NEXT:    v_mov_b32_e32 v0, s0
+; GFX8-NEXT:    v_mov_b32_e32 v1, s1
+; GFX8-NEXT:    flat_store_short v[0:1], v2
+; GFX8-NEXT:    s_endpgm
   %result = sitofp i64 %in to half
   store half %result, half addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}v_sint_to_fp_i64_to_f16:
-; GCN: {{buffer|flat}}_load_dwordx2
-
-; GCN: v_ashrrev_i32_e32 v{{[0-9]+}}, 31, v{{[0-9]+}}
-; GCN: v_xor_b32
-
-; GCN: v_ffbh_u32
-; GCN: v_ffbh_u32
-; GCN: v_cndmask
-; GCN: v_cndmask
-
-; GCN-DAG: v_cmp_eq_u64
-; GCN-DAG: v_cmp_gt_u64
-
-; GCN: v_cndmask_b32_e64 [[SIGN_SEL:v[0-9]+]], v{{[0-9]+}}, -v{{[0-9]+}}
-; GCN: v_cvt_f16_f32_e32 [[SIGN_SEL_F16:v[0-9]+]], [[SIGN_SEL]]
-; GCN: {{buffer|flat}}_store_short {{.*}}[[SIGN_SEL_F16]]
 define amdgpu_kernel void @v_sint_to_fp_i64_to_f16(half addrspace(1)* %out, i64 addrspace(1)* %in) #0 {
+; GFX6-LABEL: v_sint_to_fp_i64_to_f16:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GFX6-NEXT:    s_mov_b32 s7, 0xf000
+; GFX6-NEXT:    s_mov_b32 s6, 0
+; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
+; GFX6-NEXT:    v_mov_b32_e32 v2, 0
+; GFX6-NEXT:    s_mov_b32 s8, 1
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT:    buffer_load_dwordx2 v[3:4], v[1:2], s[4:7], 0 addr64
+; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 1, v0
+; GFX6-NEXT:    s_mov_b64 s[2:3], s[6:7]
+; GFX6-NEXT:    s_movk_i32 s7, 0x80
+; GFX6-NEXT:    s_movk_i32 s4, 0xbe
+; GFX6-NEXT:    s_mov_b32 s9, s7
+; GFX6-NEXT:    s_waitcnt vmcnt(0)
+; GFX6-NEXT:    v_ashrrev_i32_e32 v0, 31, v4
+; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v3, v0
+; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, v4, v0, vcc
+; GFX6-NEXT:    v_and_b32_e32 v9, 1, v0
+; GFX6-NEXT:    v_xor_b32_e32 v4, v4, v0
+; GFX6-NEXT:    v_xor_b32_e32 v3, v3, v0
+; GFX6-NEXT:    v_ffbh_u32_e32 v0, v3
+; GFX6-NEXT:    v_ffbh_u32_e32 v5, v4
+; GFX6-NEXT:    v_add_i32_e32 v0, vcc, 32, v0
+; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v4
+; GFX6-NEXT:    v_cndmask_b32_e32 v0, v5, v0, vcc
+; GFX6-NEXT:    v_lshl_b64 v[5:6], v[3:4], v0
+; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
+; GFX6-NEXT:    v_and_b32_e32 v8, 0xff, v6
+; GFX6-NEXT:    v_mov_b32_e32 v7, v5
+; GFX6-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[3:4]
+; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GFX6-NEXT:    v_bfe_u32 v3, v6, 8, 23
+; GFX6-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
+; GFX6-NEXT:    v_or_b32_e32 v0, v0, v3
+; GFX6-NEXT:    v_and_b32_e32 v3, 1, v0
+; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[6:7], v[7:8]
+; GFX6-NEXT:    v_cndmask_b32_e32 v3, 0, v3, vcc
+; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[7:8]
+; GFX6-NEXT:    v_cndmask_b32_e32 v3, 1, v3, vcc
+; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v3
+; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v9
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, v0, -v0, vcc
+; GFX6-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; GFX6-NEXT:    buffer_store_short v0, v[1:2], s[0:3], 0 addr64
+; GFX6-NEXT:    s_endpgm
+;
+; GFX8-LABEL: v_sint_to_fp_i64_to_f16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
+; GFX8-NEXT:    v_mov_b32_e32 v4, 0
+; GFX8-NEXT:    v_lshlrev_b32_e32 v5, 1, v0
+; GFX8-NEXT:    s_movk_i32 s6, 0xbe
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_mov_b32_e32 v2, s3
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
+; GFX8-NEXT:    v_addc_u32_e32 v2, vcc, v2, v4, vcc
+; GFX8-NEXT:    flat_load_dwordx2 v[1:2], v[1:2]
+; GFX8-NEXT:    s_mov_b32 s2, 0
+; GFX8-NEXT:    s_movk_i32 s3, 0x80
+; GFX8-NEXT:    s_mov_b32 s4, 1
+; GFX8-NEXT:    s_mov_b32 s5, s3
+; GFX8-NEXT:    v_mov_b32_e32 v6, s1
+; GFX8-NEXT:    s_waitcnt vmcnt(0)
+; GFX8-NEXT:    v_ashrrev_i32_e32 v0, 31, v2
+; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v1, v0
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, v2, v0, vcc
+; GFX8-NEXT:    v_and_b32_e32 v7, 1, v0
+; GFX8-NEXT:    v_xor_b32_e32 v1, v1, v0
+; GFX8-NEXT:    v_xor_b32_e32 v0, v3, v0
+; GFX8-NEXT:    v_ffbh_u32_e32 v2, v0
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 32, v2
+; GFX8-NEXT:    v_ffbh_u32_e32 v3, v1
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
+; GFX8-NEXT:    v_cndmask_b32_e32 v8, v3, v2, vcc
+; GFX8-NEXT:    v_lshlrev_b64 v[2:3], v8, v[0:1]
+; GFX8-NEXT:    v_sub_u32_e32 v8, vcc, s6, v8
+; GFX8-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; GFX8-NEXT:    v_and_b32_e32 v1, 0xff, v3
+; GFX8-NEXT:    v_cndmask_b32_e32 v8, 0, v8, vcc
+; GFX8-NEXT:    v_mov_b32_e32 v0, v2
+; GFX8-NEXT:    v_lshlrev_b32_e32 v2, 23, v8
+; GFX8-NEXT:    v_bfe_u32 v3, v3, 8, 23
+; GFX8-NEXT:    v_or_b32_e32 v2, v2, v3
+; GFX8-NEXT:    v_cmp_eq_u64_e32 vcc, s[2:3], v[0:1]
+; GFX8-NEXT:    v_and_b32_e32 v3, 1, v2
+; GFX8-NEXT:    v_cndmask_b32_e32 v3, 0, v3, vcc
+; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, s[4:5], v[0:1]
+; GFX8-NEXT:    v_cndmask_b32_e32 v0, 1, v3, vcc
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v2, v0
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v7
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, v0, -v0, vcc
+; GFX8-NEXT:    v_cvt_f16_f32_e32 v2, v0
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, s0, v5
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, v6, v4, vcc
+; GFX8-NEXT:    flat_store_short v[0:1], v2
+; GFX8-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
   %out.gep = getelementptr half, half addrspace(1)* %out, i32 %tid
@@ -37,30 +207,194 @@ define amdgpu_kernel void @v_sint_to_fp_i64_to_f16(half addrspace(1)* %out, i64
   ret void
 }
 
-; FUNC-LABEL: {{^}}s_sint_to_fp_i64_to_f32:
 define amdgpu_kernel void @s_sint_to_fp_i64_to_f32(float addrspace(1)* %out, i64 %in) #0 {
+; GFX6-LABEL: s_sint_to_fp_i64_to_f32:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GFX6-NEXT:    s_mov_b32 s3, 0xf000
+; GFX6-NEXT:    s_mov_b32 s2, -1
+; GFX6-NEXT:    s_mov_b32 s8, 0
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    s_ashr_i32 s10, s7, 31
+; GFX6-NEXT:    s_movk_i32 s9, 0x80
+; GFX6-NEXT:    s_mov_b32 s0, s4
+; GFX6-NEXT:    s_mov_b32 s1, s5
+; GFX6-NEXT:    s_mov_b32 s11, s10
+; GFX6-NEXT:    s_add_u32 s4, s6, s10
+; GFX6-NEXT:    s_addc_u32 s5, s7, s10
+; GFX6-NEXT:    s_xor_b64 s[4:5], s[4:5], s[10:11]
+; GFX6-NEXT:    s_flbit_i32_b32 s6, s4
+; GFX6-NEXT:    s_flbit_i32_b32 s7, s5
+; GFX6-NEXT:    s_add_i32 s6, s6, 32
+; GFX6-NEXT:    s_and_b32 s10, 1, s10
+; GFX6-NEXT:    v_mov_b32_e32 v0, s7
+; GFX6-NEXT:    v_mov_b32_e32 v1, s6
+; GFX6-NEXT:    v_cmp_eq_u32_e64 vcc, s5, 0
+; GFX6-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
+; GFX6-NEXT:    v_lshl_b64 v[0:1], s[4:5], v2
+; GFX6-NEXT:    v_sub_i32_e32 v4, vcc, 0xbe, v2
+; GFX6-NEXT:    v_and_b32_e32 v3, 0xff, v1
+; GFX6-NEXT:    v_mov_b32_e32 v2, v0
+; GFX6-NEXT:    v_cmp_ne_u64_e64 vcc, s[4:5], 0
+; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v4, vcc
+; GFX6-NEXT:    v_bfe_u32 v1, v1, 8, 23
+; GFX6-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
+; GFX6-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT:    v_and_b32_e32 v1, 1, v0
+; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[8:9], v[2:3]
+; GFX6-NEXT:    s_mov_b32 s8, 1
+; GFX6-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
+; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[2:3]
+; GFX6-NEXT:    v_cndmask_b32_e32 v1, 1, v1, vcc
+; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v1, v0
+; GFX6-NEXT:    v_cmp_eq_u32_e64 s[4:5], s10, 1
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, v0, -v0, s[4:5]
+; GFX6-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; GFX6-NEXT:    s_endpgm
+;
+; GFX8-LABEL: s_sint_to_fp_i64_to_f32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GFX8-NEXT:    s_mov_b32 s4, 0
+; GFX8-NEXT:    s_movk_i32 s5, 0x80
+; GFX8-NEXT:    v_mov_b32_e32 v2, 1
+; GFX8-NEXT:    v_mov_b32_e32 v3, s5
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_mov_b32_e32 v0, s0
+; GFX8-NEXT:    s_ashr_i32 s0, s3, 31
+; GFX8-NEXT:    s_add_u32 s2, s2, s0
+; GFX8-NEXT:    v_mov_b32_e32 v1, s1
+; GFX8-NEXT:    s_mov_b32 s1, s0
+; GFX8-NEXT:    s_addc_u32 s3, s3, s0
+; GFX8-NEXT:    s_xor_b64 s[2:3], s[2:3], s[0:1]
+; GFX8-NEXT:    s_flbit_i32_b32 s1, s2
+; GFX8-NEXT:    s_add_i32 s1, s1, 32
+; GFX8-NEXT:    s_flbit_i32_b32 s6, s3
+; GFX8-NEXT:    s_cmp_eq_u32 s3, 0
+; GFX8-NEXT:    s_cselect_b32 s1, s1, s6
+; GFX8-NEXT:    s_sub_i32 s6, 0xbe, s1
+; GFX8-NEXT:    s_cmp_lg_u64 s[2:3], 0
+; GFX8-NEXT:    s_cselect_b32 s6, s6, 0
+; GFX8-NEXT:    s_lshl_b64 s[2:3], s[2:3], s1
+; GFX8-NEXT:    s_bfe_u32 s1, s3, 0x170008
+; GFX8-NEXT:    s_lshl_b32 s6, s6, 23
+; GFX8-NEXT:    s_or_b32 s1, s6, s1
+; GFX8-NEXT:    s_and_b32 s3, s3, 0xff
+; GFX8-NEXT:    s_and_b32 s6, s1, 1
+; GFX8-NEXT:    v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
+; GFX8-NEXT:    s_cmp_eq_u64 s[2:3], s[4:5]
+; GFX8-NEXT:    s_cselect_b32 s2, s6, 0
+; GFX8-NEXT:    s_cmp_lg_u64 vcc, 0
+; GFX8-NEXT:    s_cselect_b32 s2, s2, 1
+; GFX8-NEXT:    s_add_i32 s1, s1, s2
+; GFX8-NEXT:    s_and_b32 s0, 1, s0
+; GFX8-NEXT:    v_mov_b32_e32 v2, s1
+; GFX8-NEXT:    v_cmp_eq_u32_e64 s[0:1], s0, 1
+; GFX8-NEXT:    v_cndmask_b32_e64 v2, v2, -v2, s[0:1]
+; GFX8-NEXT:    flat_store_dword v[0:1], v2
+; GFX8-NEXT:    s_endpgm
   %result = sitofp i64 %in to float
   store float %result, float addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}v_sint_to_fp_i64_to_f32:
-; GCN: {{buffer|flat}}_load_dwordx2
-
-; GCN: v_ashrrev_i32_e32 v{{[0-9]+}}, 31, v{{[0-9]+}}
-; GCN: v_xor_b32
-
-; GCN: v_ffbh_u32
-; GCN: v_ffbh_u32
-; GCN: v_cndmask
-; GCN: v_cndmask
-
-; GCN-DAG: v_cmp_eq_u64
-; GCN-DAG: v_cmp_gt_u64
-
-; GCN: v_cndmask_b32_e64 [[SIGN_SEL:v[0-9]+]], v{{[0-9]+}}, -v{{[0-9]+}}
-; GCN: {{buffer|flat}}_store_dword {{.*}}[[SIGN_SEL]]
 define amdgpu_kernel void @v_sint_to_fp_i64_to_f32(float addrspace(1)* %out, i64 addrspace(1)* %in) #0 {
+; GFX6-LABEL: v_sint_to_fp_i64_to_f32:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GFX6-NEXT:    s_mov_b32 s7, 0xf000
+; GFX6-NEXT:    s_mov_b32 s6, 0
+; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
+; GFX6-NEXT:    v_mov_b32_e32 v2, 0
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT:    buffer_load_dwordx2 v[3:4], v[1:2], s[4:7], 0 addr64
+; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; GFX6-NEXT:    s_movk_i32 s4, 0xbe
+; GFX6-NEXT:    s_mov_b64 s[2:3], s[6:7]
+; GFX6-NEXT:    s_movk_i32 s7, 0x80
+; GFX6-NEXT:    s_waitcnt vmcnt(0)
+; GFX6-NEXT:    v_ashrrev_i32_e32 v0, 31, v4
+; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v3, v0
+; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, v4, v0, vcc
+; GFX6-NEXT:    v_and_b32_e32 v9, 1, v0
+; GFX6-NEXT:    v_xor_b32_e32 v4, v4, v0
+; GFX6-NEXT:    v_xor_b32_e32 v3, v3, v0
+; GFX6-NEXT:    v_ffbh_u32_e32 v0, v3
+; GFX6-NEXT:    v_ffbh_u32_e32 v5, v4
+; GFX6-NEXT:    v_add_i32_e32 v0, vcc, 32, v0
+; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v4
+; GFX6-NEXT:    v_cndmask_b32_e32 v0, v5, v0, vcc
+; GFX6-NEXT:    v_lshl_b64 v[5:6], v[3:4], v0
+; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
+; GFX6-NEXT:    v_and_b32_e32 v8, 0xff, v6
+; GFX6-NEXT:    v_mov_b32_e32 v7, v5
+; GFX6-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[3:4]
+; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GFX6-NEXT:    v_bfe_u32 v3, v6, 8, 23
+; GFX6-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
+; GFX6-NEXT:    v_or_b32_e32 v0, v0, v3
+; GFX6-NEXT:    v_and_b32_e32 v3, 1, v0
+; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[6:7], v[7:8]
+; GFX6-NEXT:    s_mov_b32 s6, 1
+; GFX6-NEXT:    v_cndmask_b32_e32 v3, 0, v3, vcc
+; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[6:7], v[7:8]
+; GFX6-NEXT:    v_cndmask_b32_e32 v3, 1, v3, vcc
+; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v3
+; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v9
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, v0, -v0, vcc
+; GFX6-NEXT:    buffer_store_dword v0, v[1:2], s[0:3], 0 addr64
+; GFX6-NEXT:    s_endpgm
+;
+; GFX8-LABEL: v_sint_to_fp_i64_to_f32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
+; GFX8-NEXT:    v_mov_b32_e32 v4, 0
+; GFX8-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; GFX8-NEXT:    s_movk_i32 s4, 0xbe
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_mov_b32_e32 v2, s3
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
+; GFX8-NEXT:    v_addc_u32_e32 v2, vcc, v2, v4, vcc
+; GFX8-NEXT:    flat_load_dwordx2 v[1:2], v[1:2]
+; GFX8-NEXT:    v_add_u32_e32 v3, vcc, s0, v0
+; GFX8-NEXT:    v_mov_b32_e32 v5, s1
+; GFX8-NEXT:    v_addc_u32_e32 v4, vcc, v5, v4, vcc
+; GFX8-NEXT:    s_mov_b32 s2, 0
+; GFX8-NEXT:    s_movk_i32 s3, 0x80
+; GFX8-NEXT:    s_waitcnt vmcnt(0)
+; GFX8-NEXT:    v_ashrrev_i32_e32 v0, 31, v2
+; GFX8-NEXT:    v_add_u32_e32 v5, vcc, v1, v0
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, v2, v0, vcc
+; GFX8-NEXT:    v_xor_b32_e32 v1, v1, v0
+; GFX8-NEXT:    v_and_b32_e32 v2, 1, v0
+; GFX8-NEXT:    v_xor_b32_e32 v0, v5, v0
+; GFX8-NEXT:    v_ffbh_u32_e32 v5, v0
+; GFX8-NEXT:    v_add_u32_e32 v5, vcc, 32, v5
+; GFX8-NEXT:    v_ffbh_u32_e32 v6, v1
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
+; GFX8-NEXT:    v_cndmask_b32_e32 v7, v6, v5, vcc
+; GFX8-NEXT:    v_lshlrev_b64 v[5:6], v7, v[0:1]
+; GFX8-NEXT:    v_sub_u32_e32 v7, vcc, s4, v7
+; GFX8-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; GFX8-NEXT:    v_and_b32_e32 v1, 0xff, v6
+; GFX8-NEXT:    v_cndmask_b32_e32 v7, 0, v7, vcc
+; GFX8-NEXT:    v_mov_b32_e32 v0, v5
+; GFX8-NEXT:    v_lshlrev_b32_e32 v5, 23, v7
+; GFX8-NEXT:    v_bfe_u32 v6, v6, 8, 23
+; GFX8-NEXT:    v_or_b32_e32 v5, v5, v6
+; GFX8-NEXT:    v_cmp_eq_u64_e32 vcc, s[2:3], v[0:1]
+; GFX8-NEXT:    v_and_b32_e32 v6, 1, v5
+; GFX8-NEXT:    s_mov_b32 s2, 1
+; GFX8-NEXT:    v_cndmask_b32_e32 v6, 0, v6, vcc
+; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, s[2:3], v[0:1]
+; GFX8-NEXT:    v_cndmask_b32_e32 v0, 1, v6, vcc
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v5, v0
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v2
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, v0, -v0, vcc
+; GFX8-NEXT:    flat_store_dword v[3:4], v0
+; GFX8-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
   %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
@@ -70,16 +404,439 @@ define amdgpu_kernel void @v_sint_to_fp_i64_to_f32(float addrspace(1)* %out, i64
   ret void
 }
 
-; FUNC-LABEL: {{^}}s_sint_to_fp_v2i64_to_v2f32:
-; GCN-NOT: v_and_b32_e32 v{{[0-9]+}}, -1,
 define amdgpu_kernel void @s_sint_to_fp_v2i64_to_v2f32(<2 x float> addrspace(1)* %out, <2 x i64> %in) #0{
+; GFX6-LABEL: s_sint_to_fp_v2i64_to_v2f32:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
+; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0xd
+; GFX6-NEXT:    s_mov_b32 s7, 0xf000
+; GFX6-NEXT:    s_mov_b32 s6, -1
+; GFX6-NEXT:    s_mov_b32 s8, 0
+; GFX6-NEXT:    s_movk_i32 s9, 0x80
+; GFX6-NEXT:    s_movk_i32 s16, 0xff
+; GFX6-NEXT:    s_movk_i32 s17, 0xbe
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    s_ashr_i32 s10, s3, 31
+; GFX6-NEXT:    s_mov_b32 s12, 1
+; GFX6-NEXT:    s_mov_b32 s13, s9
+; GFX6-NEXT:    s_mov_b32 s11, s10
+; GFX6-NEXT:    s_add_u32 s2, s2, s10
+; GFX6-NEXT:    s_addc_u32 s3, s3, s10
+; GFX6-NEXT:    s_ashr_i32 s14, s1, 31
+; GFX6-NEXT:    s_xor_b64 s[2:3], s[2:3], s[10:11]
+; GFX6-NEXT:    s_flbit_i32_b32 s11, s2
+; GFX6-NEXT:    s_flbit_i32_b32 s18, s3
+; GFX6-NEXT:    s_add_i32 s11, s11, 32
+; GFX6-NEXT:    s_and_b32 s10, 1, s10
+; GFX6-NEXT:    s_mov_b32 s15, s14
+; GFX6-NEXT:    v_mov_b32_e32 v0, s18
+; GFX6-NEXT:    v_mov_b32_e32 v1, s11
+; GFX6-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
+; GFX6-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
+; GFX6-NEXT:    s_add_u32 s0, s0, s14
+; GFX6-NEXT:    v_lshl_b64 v[0:1], s[2:3], v2
+; GFX6-NEXT:    v_sub_i32_e32 v4, vcc, s17, v2
+; GFX6-NEXT:    s_addc_u32 s1, s1, s14
+; GFX6-NEXT:    s_and_b32 s11, 1, s14
+; GFX6-NEXT:    v_and_b32_e32 v3, s16, v1
+; GFX6-NEXT:    v_mov_b32_e32 v2, v0
+; GFX6-NEXT:    v_cmp_ne_u64_e64 vcc, s[2:3], 0
+; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v4, vcc
+; GFX6-NEXT:    v_bfe_u32 v1, v1, 8, 23
+; GFX6-NEXT:    s_xor_b64 s[0:1], s[0:1], s[14:15]
+; GFX6-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
+; GFX6-NEXT:    s_flbit_i32_b32 s2, s0
+; GFX6-NEXT:    s_flbit_i32_b32 s3, s1
+; GFX6-NEXT:    v_or_b32_e32 v4, v0, v1
+; GFX6-NEXT:    s_add_i32 s2, s2, 32
+; GFX6-NEXT:    v_mov_b32_e32 v0, s3
+; GFX6-NEXT:    v_and_b32_e32 v1, 1, v4
+; GFX6-NEXT:    v_mov_b32_e32 v5, s2
+; GFX6-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
+; GFX6-NEXT:    v_cndmask_b32_e32 v5, v0, v5, vcc
+; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[8:9], v[2:3]
+; GFX6-NEXT:    v_cndmask_b32_e32 v6, 0, v1, vcc
+; GFX6-NEXT:    v_lshl_b64 v[0:1], s[0:1], v5
+; GFX6-NEXT:    v_sub_i32_e32 v5, vcc, s17, v5
+; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[12:13], v[2:3]
+; GFX6-NEXT:    v_cndmask_b32_e32 v6, 1, v6, vcc
+; GFX6-NEXT:    v_and_b32_e32 v3, s16, v1
+; GFX6-NEXT:    v_mov_b32_e32 v2, v0
+; GFX6-NEXT:    v_cmp_ne_u64_e64 vcc, s[0:1], 0
+; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v5, vcc
+; GFX6-NEXT:    v_bfe_u32 v5, v1, 8, 23
+; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v6, v4
+; GFX6-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
+; GFX6-NEXT:    v_cmp_eq_u32_e64 s[0:1], s10, 1
+; GFX6-NEXT:    v_cndmask_b32_e64 v1, v1, -v1, s[0:1]
+; GFX6-NEXT:    v_or_b32_e32 v0, v0, v5
+; GFX6-NEXT:    v_and_b32_e32 v4, 1, v0
+; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[8:9], v[2:3]
+; GFX6-NEXT:    v_cndmask_b32_e32 v4, 0, v4, vcc
+; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[12:13], v[2:3]
+; GFX6-NEXT:    v_cndmask_b32_e32 v2, 1, v4, vcc
+; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v2, v0
+; GFX6-NEXT:    v_cmp_eq_u32_e64 s[0:1], s11, 1
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, v0, -v0, s[0:1]
+; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GFX6-NEXT:    s_endpgm
+;
+; GFX8-LABEL: s_sint_to_fp_v2i64_to_v2f32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
+; GFX8-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x34
+; GFX8-NEXT:    s_movk_i32 s12, 0xbe
+; GFX8-NEXT:    s_mov_b32 s8, 0
+; GFX8-NEXT:    s_movk_i32 s9, 0x80
+; GFX8-NEXT:    s_movk_i32 s14, 0xff
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    s_ashr_i32 s6, s3, 31
+; GFX8-NEXT:    s_add_u32 s2, s2, s6
+; GFX8-NEXT:    s_mov_b32 s7, s6
+; GFX8-NEXT:    s_addc_u32 s3, s3, s6
+; GFX8-NEXT:    s_xor_b64 s[2:3], s[2:3], s[6:7]
+; GFX8-NEXT:    s_flbit_i32_b32 s7, s2
+; GFX8-NEXT:    s_add_i32 s7, s7, 32
+; GFX8-NEXT:    s_flbit_i32_b32 s10, s3
+; GFX8-NEXT:    s_cmp_eq_u32 s3, 0
+; GFX8-NEXT:    s_cselect_b32 s7, s7, s10
+; GFX8-NEXT:    s_sub_i32 s10, s12, s7
+; GFX8-NEXT:    s_cmp_lg_u64 s[2:3], 0
+; GFX8-NEXT:    s_cselect_b32 s10, s10, 0
+; GFX8-NEXT:    s_lshl_b64 s[2:3], s[2:3], s7
+; GFX8-NEXT:    s_bfe_u32 s7, s3, 0x170008
+; GFX8-NEXT:    s_lshl_b32 s10, s10, 23
+; GFX8-NEXT:    s_or_b32 s7, s10, s7
+; GFX8-NEXT:    s_mov_b32 s10, 1
+; GFX8-NEXT:    s_mov_b32 s11, s9
+; GFX8-NEXT:    v_mov_b32_e32 v0, s10
+; GFX8-NEXT:    s_and_b32 s3, s3, s14
+; GFX8-NEXT:    v_mov_b32_e32 v1, s11
+; GFX8-NEXT:    s_and_b32 s13, s7, 1
+; GFX8-NEXT:    v_cmp_lt_u64_e32 vcc, s[2:3], v[0:1]
+; GFX8-NEXT:    s_cmp_eq_u64 s[2:3], s[8:9]
+; GFX8-NEXT:    s_cselect_b32 s2, s13, 0
+; GFX8-NEXT:    s_cmp_lg_u64 vcc, 0
+; GFX8-NEXT:    s_cselect_b32 s2, s2, 1
+; GFX8-NEXT:    s_add_i32 s2, s7, s2
+; GFX8-NEXT:    s_and_b32 s3, 1, s6
+; GFX8-NEXT:    v_mov_b32_e32 v2, s2
+; GFX8-NEXT:    v_cmp_eq_u32_e64 s[2:3], s3, 1
+; GFX8-NEXT:    v_cndmask_b32_e64 v2, v2, -v2, s[2:3]
+; GFX8-NEXT:    s_ashr_i32 s2, s1, 31
+; GFX8-NEXT:    s_add_u32 s0, s0, s2
+; GFX8-NEXT:    s_mov_b32 s3, s2
+; GFX8-NEXT:    s_addc_u32 s1, s1, s2
+; GFX8-NEXT:    s_xor_b64 s[0:1], s[0:1], s[2:3]
+; GFX8-NEXT:    s_flbit_i32_b32 s3, s0
+; GFX8-NEXT:    s_add_i32 s3, s3, 32
+; GFX8-NEXT:    s_flbit_i32_b32 s6, s1
+; GFX8-NEXT:    s_cmp_eq_u32 s1, 0
+; GFX8-NEXT:    s_cselect_b32 s3, s3, s6
+; GFX8-NEXT:    s_sub_i32 s6, s12, s3
+; GFX8-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; GFX8-NEXT:    s_cselect_b32 s6, s6, 0
+; GFX8-NEXT:    s_lshl_b64 s[0:1], s[0:1], s3
+; GFX8-NEXT:    s_bfe_u32 s3, s1, 0x170008
+; GFX8-NEXT:    s_lshl_b32 s6, s6, 23
+; GFX8-NEXT:    s_or_b32 s3, s6, s3
+; GFX8-NEXT:    s_and_b32 s1, s1, s14
+; GFX8-NEXT:    s_and_b32 s6, s3, 1
+; GFX8-NEXT:    v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX8-NEXT:    s_cmp_eq_u64 s[0:1], s[8:9]
+; GFX8-NEXT:    s_cselect_b32 s0, s6, 0
+; GFX8-NEXT:    s_cmp_lg_u64 vcc, 0
+; GFX8-NEXT:    s_cselect_b32 s0, s0, 1
+; GFX8-NEXT:    s_add_i32 s0, s3, s0
+; GFX8-NEXT:    s_and_b32 s1, 1, s2
+; GFX8-NEXT:    v_mov_b32_e32 v0, s0
+; GFX8-NEXT:    v_cmp_eq_u32_e64 s[0:1], s1, 1
+; GFX8-NEXT:    v_mov_b32_e32 v3, s4
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, v0, -v0, s[0:1]
+; GFX8-NEXT:    v_mov_b32_e32 v4, s5
+; GFX8-NEXT:    flat_store_dwordx2 v[3:4], v[1:2]
+; GFX8-NEXT:    s_endpgm
   %result = sitofp <2 x i64> %in to <2 x float>
   store <2 x float> %result, <2 x float> addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}v_sint_to_fp_v4i64_to_v4f32:
 define amdgpu_kernel void @v_sint_to_fp_v4i64_to_v4f32(<4 x float> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) #0 {
+; GFX6-LABEL: v_sint_to_fp_v4i64_to_v4f32:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_load_dwordx4 s[8:11], s[0:1], 0x9
+; GFX6-NEXT:    s_mov_b32 s7, 0xf000
+; GFX6-NEXT:    s_mov_b32 s6, 0
+; GFX6-NEXT:    v_lshlrev_b32_e32 v8, 5, v0
+; GFX6-NEXT:    v_mov_b32_e32 v9, 0
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    s_mov_b64 s[4:5], s[10:11]
+; GFX6-NEXT:    buffer_load_dwordx4 v[1:4], v[8:9], s[4:7], 0 addr64 offset:16
+; GFX6-NEXT:    buffer_load_dwordx4 v[5:8], v[8:9], s[4:7], 0 addr64
+; GFX6-NEXT:    v_lshlrev_b32_e32 v10, 4, v0
+; GFX6-NEXT:    s_movk_i32 s4, 0xbe
+; GFX6-NEXT:    v_mov_b32_e32 v11, v9
+; GFX6-NEXT:    s_waitcnt vmcnt(1)
+; GFX6-NEXT:    v_ashrrev_i32_e32 v12, 31, v4
+; GFX6-NEXT:    v_ashrrev_i32_e32 v13, 31, v2
+; GFX6-NEXT:    s_waitcnt vmcnt(0)
+; GFX6-NEXT:    v_ashrrev_i32_e32 v14, 31, v8
+; GFX6-NEXT:    v_ashrrev_i32_e32 v15, 31, v6
+; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v3, v12
+; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v4, v12, vcc
+; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v1, v13
+; GFX6-NEXT:    v_addc_u32_e32 v2, vcc, v2, v13, vcc
+; GFX6-NEXT:    v_add_i32_e32 v7, vcc, v7, v14
+; GFX6-NEXT:    v_addc_u32_e32 v8, vcc, v8, v14, vcc
+; GFX6-NEXT:    v_add_i32_e32 v9, vcc, v5, v15
+; GFX6-NEXT:    v_addc_u32_e32 v6, vcc, v6, v15, vcc
+; GFX6-NEXT:    v_xor_b32_e32 v1, v3, v12
+; GFX6-NEXT:    v_xor_b32_e32 v0, v0, v12
+; GFX6-NEXT:    v_xor_b32_e32 v3, v2, v13
+; GFX6-NEXT:    v_xor_b32_e32 v2, v4, v13
+; GFX6-NEXT:    v_xor_b32_e32 v5, v8, v14
+; GFX6-NEXT:    v_xor_b32_e32 v4, v7, v14
+; GFX6-NEXT:    v_xor_b32_e32 v7, v6, v15
+; GFX6-NEXT:    v_xor_b32_e32 v6, v9, v15
+; GFX6-NEXT:    v_ffbh_u32_e32 v8, v0
+; GFX6-NEXT:    v_ffbh_u32_e32 v9, v1
+; GFX6-NEXT:    v_ffbh_u32_e32 v16, v2
+; GFX6-NEXT:    v_ffbh_u32_e32 v17, v3
+; GFX6-NEXT:    v_ffbh_u32_e32 v18, v4
+; GFX6-NEXT:    v_ffbh_u32_e32 v19, v5
+; GFX6-NEXT:    v_add_i32_e32 v8, vcc, 32, v8
+; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
+; GFX6-NEXT:    v_cndmask_b32_e32 v20, v9, v8, vcc
+; GFX6-NEXT:    v_ffbh_u32_e32 v8, v6
+; GFX6-NEXT:    v_add_i32_e32 v9, vcc, 32, v16
+; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v3
+; GFX6-NEXT:    v_cndmask_b32_e32 v16, v17, v9, vcc
+; GFX6-NEXT:    v_ffbh_u32_e32 v9, v7
+; GFX6-NEXT:    v_add_i32_e32 v17, vcc, 32, v18
+; GFX6-NEXT:    v_add_i32_e32 v8, vcc, 32, v8
+; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v5
+; GFX6-NEXT:    v_cndmask_b32_e32 v17, v19, v17, vcc
+; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v7
+; GFX6-NEXT:    v_cndmask_b32_e32 v18, v9, v8, vcc
+; GFX6-NEXT:    v_lshl_b64 v[8:9], v[2:3], v16
+; GFX6-NEXT:    v_sub_i32_e32 v16, vcc, s4, v16
+; GFX6-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[2:3]
+; GFX6-NEXT:    v_lshl_b64 v[2:3], v[4:5], v17
+; GFX6-NEXT:    v_sub_i32_e64 v17, s[0:1], s4, v17
+; GFX6-NEXT:    v_cmp_ne_u64_e64 s[0:1], 0, v[4:5]
+; GFX6-NEXT:    v_lshl_b64 v[4:5], v[6:7], v18
+; GFX6-NEXT:    v_sub_i32_e64 v18, s[2:3], s4, v18
+; GFX6-NEXT:    v_cmp_ne_u64_e64 s[2:3], 0, v[6:7]
+; GFX6-NEXT:    v_lshl_b64 v[6:7], v[0:1], v20
+; GFX6-NEXT:    v_sub_i32_e64 v19, s[4:5], s4, v20
+; GFX6-NEXT:    v_cmp_ne_u64_e64 s[4:5], 0, v[0:1]
+; GFX6-NEXT:    v_and_b32_e32 v1, 0xff, v7
+; GFX6-NEXT:    v_mov_b32_e32 v0, v6
+; GFX6-NEXT:    v_bfe_u32 v20, v7, 8, 23
+; GFX6-NEXT:    v_mov_b32_e32 v21, 0xff
+; GFX6-NEXT:    v_and_b32_e32 v7, v21, v9
+; GFX6-NEXT:    v_mov_b32_e32 v6, v8
+; GFX6-NEXT:    v_bfe_u32 v22, v9, 8, 23
+; GFX6-NEXT:    v_and_b32_e32 v9, v21, v3
+; GFX6-NEXT:    v_mov_b32_e32 v8, v2
+; GFX6-NEXT:    v_bfe_u32 v23, v3, 8, 23
+; GFX6-NEXT:    v_and_b32_e32 v3, v21, v5
+; GFX6-NEXT:    v_mov_b32_e32 v2, v4
+; GFX6-NEXT:    v_bfe_u32 v4, v5, 8, 23
+; GFX6-NEXT:    v_cndmask_b32_e64 v5, 0, v19, s[4:5]
+; GFX6-NEXT:    v_lshlrev_b32_e32 v5, 23, v5
+; GFX6-NEXT:    v_or_b32_e32 v5, v5, v20
+; GFX6-NEXT:    v_cndmask_b32_e32 v16, 0, v16, vcc
+; GFX6-NEXT:    v_lshlrev_b32_e32 v16, 23, v16
+; GFX6-NEXT:    v_or_b32_e32 v16, v16, v22
+; GFX6-NEXT:    s_mov_b64 s[10:11], s[6:7]
+; GFX6-NEXT:    s_movk_i32 s7, 0x80
+; GFX6-NEXT:    s_mov_b32 s4, 1
+; GFX6-NEXT:    s_mov_b32 s5, s7
+; GFX6-NEXT:    v_and_b32_e32 v12, 1, v12
+; GFX6-NEXT:    v_and_b32_e32 v13, 1, v13
+; GFX6-NEXT:    v_and_b32_e32 v14, 1, v14
+; GFX6-NEXT:    v_and_b32_e32 v15, 1, v15
+; GFX6-NEXT:    v_cndmask_b32_e64 v17, 0, v17, s[0:1]
+; GFX6-NEXT:    v_cndmask_b32_e64 v18, 0, v18, s[2:3]
+; GFX6-NEXT:    v_lshlrev_b32_e32 v17, 23, v17
+; GFX6-NEXT:    v_lshlrev_b32_e32 v18, 23, v18
+; GFX6-NEXT:    v_or_b32_e32 v17, v17, v23
+; GFX6-NEXT:    v_or_b32_e32 v4, v18, v4
+; GFX6-NEXT:    v_and_b32_e32 v18, 1, v5
+; GFX6-NEXT:    v_and_b32_e32 v19, 1, v16
+; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[6:7], v[0:1]
+; GFX6-NEXT:    v_cndmask_b32_e32 v18, 0, v18, vcc
+; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[4:5], v[0:1]
+; GFX6-NEXT:    v_and_b32_e32 v0, 1, v17
+; GFX6-NEXT:    v_and_b32_e32 v1, 1, v4
+; GFX6-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[6:7], v[6:7]
+; GFX6-NEXT:    v_cndmask_b32_e64 v19, 0, v19, s[0:1]
+; GFX6-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[6:7], v[8:9]
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, 0, v0, s[0:1]
+; GFX6-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[6:7], v[2:3]
+; GFX6-NEXT:    v_cndmask_b32_e64 v1, 0, v1, s[0:1]
+; GFX6-NEXT:    v_cndmask_b32_e32 v18, 1, v18, vcc
+; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[4:5], v[6:7]
+; GFX6-NEXT:    v_cndmask_b32_e32 v6, 1, v19, vcc
+; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[4:5], v[8:9]
+; GFX6-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[4:5], v[2:3]
+; GFX6-NEXT:    v_cndmask_b32_e32 v1, 1, v1, vcc
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v5, v18
+; GFX6-NEXT:    v_add_i32_e32 v5, vcc, v16, v6
+; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v17, v0
+; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v4, v1
+; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v12
+; GFX6-NEXT:    v_cndmask_b32_e64 v3, v2, -v2, vcc
+; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v13
+; GFX6-NEXT:    v_cndmask_b32_e64 v2, v5, -v5, vcc
+; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v14
+; GFX6-NEXT:    v_cndmask_b32_e64 v1, v0, -v0, vcc
+; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v15
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, v4, -v4, vcc
+; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], v[10:11], s[8:11], 0 addr64
+; GFX6-NEXT:    s_endpgm
+;
+; GFX8-LABEL: v_sint_to_fp_v4i64_to_v4f32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 5, v0
+; GFX8-NEXT:    v_mov_b32_e32 v2, 0
+; GFX8-NEXT:    v_lshlrev_b32_e32 v0, 4, v0
+; GFX8-NEXT:    s_movk_i32 s8, 0xbe
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v4, vcc, s2, v1
+; GFX8-NEXT:    v_mov_b32_e32 v3, s3
+; GFX8-NEXT:    v_addc_u32_e32 v5, vcc, v3, v2, vcc
+; GFX8-NEXT:    v_mov_b32_e32 v1, s1
+; GFX8-NEXT:    v_add_u32_e32 v8, vcc, s0, v0
+; GFX8-NEXT:    v_addc_u32_e32 v9, vcc, v1, v2, vcc
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, 16, v4
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, 0, v5, vcc
+; GFX8-NEXT:    flat_load_dwordx4 v[0:3], v[0:1]
+; GFX8-NEXT:    flat_load_dwordx4 v[4:7], v[4:5]
+; GFX8-NEXT:    s_mov_b32 s4, 0
+; GFX8-NEXT:    s_movk_i32 s5, 0x80
+; GFX8-NEXT:    s_mov_b32 s6, 1
+; GFX8-NEXT:    s_mov_b32 s7, s5
+; GFX8-NEXT:    v_mov_b32_e32 v12, 0xff
+; GFX8-NEXT:    s_waitcnt vmcnt(1)
+; GFX8-NEXT:    v_ashrrev_i32_e32 v13, 31, v3
+; GFX8-NEXT:    s_waitcnt vmcnt(0)
+; GFX8-NEXT:    v_ashrrev_i32_e32 v10, 31, v7
+; GFX8-NEXT:    v_add_u32_e32 v6, vcc, v6, v10
+; GFX8-NEXT:    v_ashrrev_i32_e32 v11, 31, v5
+; GFX8-NEXT:    v_addc_u32_e32 v7, vcc, v7, v10, vcc
+; GFX8-NEXT:    v_add_u32_e32 v4, vcc, v4, v11
+; GFX8-NEXT:    v_addc_u32_e32 v5, vcc, v5, v11, vcc
+; GFX8-NEXT:    v_add_u32_e32 v16, vcc, v2, v13
+; GFX8-NEXT:    v_xor_b32_e32 v2, v6, v10
+; GFX8-NEXT:    v_xor_b32_e32 v4, v4, v11
+; GFX8-NEXT:    v_addc_u32_e32 v17, vcc, v3, v13, vcc
+; GFX8-NEXT:    v_xor_b32_e32 v3, v7, v10
+; GFX8-NEXT:    v_and_b32_e32 v14, 1, v10
+; GFX8-NEXT:    v_ffbh_u32_e32 v10, v2
+; GFX8-NEXT:    v_xor_b32_e32 v6, v16, v13
+; GFX8-NEXT:    v_ffbh_u32_e32 v16, v4
+; GFX8-NEXT:    v_add_u32_e32 v10, vcc, 32, v10
+; GFX8-NEXT:    v_add_u32_e32 v16, vcc, 32, v16
+; GFX8-NEXT:    v_and_b32_e32 v15, 1, v11
+; GFX8-NEXT:    v_xor_b32_e32 v5, v5, v11
+; GFX8-NEXT:    v_ffbh_u32_e32 v11, v3
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v3
+; GFX8-NEXT:    v_cndmask_b32_e32 v18, v11, v10, vcc
+; GFX8-NEXT:    v_xor_b32_e32 v7, v17, v13
+; GFX8-NEXT:    v_ffbh_u32_e32 v17, v5
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v5
+; GFX8-NEXT:    v_cndmask_b32_e32 v16, v17, v16, vcc
+; GFX8-NEXT:    v_sub_u32_e32 v17, vcc, s8, v18
+; GFX8-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[2:3]
+; GFX8-NEXT:    v_lshlrev_b64 v[10:11], v18, v[2:3]
+; GFX8-NEXT:    v_cndmask_b32_e32 v17, 0, v17, vcc
+; GFX8-NEXT:    v_mov_b32_e32 v2, v10
+; GFX8-NEXT:    v_and_b32_e32 v3, 0xff, v11
+; GFX8-NEXT:    v_bfe_u32 v10, v11, 8, 23
+; GFX8-NEXT:    v_lshlrev_b32_e32 v17, 23, v17
+; GFX8-NEXT:    v_or_b32_e32 v10, v17, v10
+; GFX8-NEXT:    v_cmp_eq_u64_e32 vcc, s[4:5], v[2:3]
+; GFX8-NEXT:    v_and_b32_e32 v17, 1, v10
+; GFX8-NEXT:    v_cndmask_b32_e32 v17, 0, v17, vcc
+; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, s[6:7], v[2:3]
+; GFX8-NEXT:    v_lshlrev_b64 v[2:3], v16, v[4:5]
+; GFX8-NEXT:    v_sub_u32_e64 v16, s[0:1], s8, v16
+; GFX8-NEXT:    v_cmp_ne_u64_e64 s[0:1], 0, v[4:5]
+; GFX8-NEXT:    v_mov_b32_e32 v4, v2
+; GFX8-NEXT:    v_and_b32_e32 v5, v12, v3
+; GFX8-NEXT:    v_bfe_u32 v2, v3, 8, 23
+; GFX8-NEXT:    v_cndmask_b32_e64 v3, 0, v16, s[0:1]
+; GFX8-NEXT:    v_lshlrev_b32_e32 v3, 23, v3
+; GFX8-NEXT:    v_or_b32_e32 v2, v3, v2
+; GFX8-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[4:5], v[4:5]
+; GFX8-NEXT:    v_and_b32_e32 v3, 1, v2
+; GFX8-NEXT:    v_cndmask_b32_e32 v16, 1, v17, vcc
+; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, s[6:7], v[4:5]
+; GFX8-NEXT:    v_cndmask_b32_e64 v3, 0, v3, s[0:1]
+; GFX8-NEXT:    v_cndmask_b32_e32 v3, 1, v3, vcc
+; GFX8-NEXT:    v_add_u32_e32 v4, vcc, v10, v16
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v2, v3
+; GFX8-NEXT:    v_ffbh_u32_e32 v11, v6
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v14
+; GFX8-NEXT:    v_cndmask_b32_e64 v3, v4, -v4, vcc
+; GFX8-NEXT:    v_ffbh_u32_e32 v18, v7
+; GFX8-NEXT:    v_add_u32_e64 v11, s[2:3], 32, v11
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v7
+; GFX8-NEXT:    v_cndmask_b32_e32 v14, v18, v11, vcc
+; GFX8-NEXT:    v_lshlrev_b64 v[4:5], v14, v[6:7]
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v15
+; GFX8-NEXT:    v_cndmask_b32_e64 v2, v2, -v2, vcc
+; GFX8-NEXT:    v_mov_b32_e32 v10, v4
+; GFX8-NEXT:    v_sub_u32_e32 v4, vcc, s8, v14
+; GFX8-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
+; GFX8-NEXT:    v_and_b32_e32 v11, v12, v5
+; GFX8-NEXT:    v_cndmask_b32_e32 v4, 0, v4, vcc
+; GFX8-NEXT:    v_lshlrev_b32_e32 v4, 23, v4
+; GFX8-NEXT:    v_bfe_u32 v5, v5, 8, 23
+; GFX8-NEXT:    v_or_b32_e32 v4, v4, v5
+; GFX8-NEXT:    v_cmp_eq_u64_e32 vcc, s[4:5], v[10:11]
+; GFX8-NEXT:    v_and_b32_e32 v5, 1, v4
+; GFX8-NEXT:    v_cndmask_b32_e32 v5, 0, v5, vcc
+; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, s[6:7], v[10:11]
+; GFX8-NEXT:    v_cndmask_b32_e32 v5, 1, v5, vcc
+; GFX8-NEXT:    v_add_u32_e32 v4, vcc, v4, v5
+; GFX8-NEXT:    v_and_b32_e32 v5, 1, v13
+; GFX8-NEXT:    v_ashrrev_i32_e32 v13, 31, v1
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v13
+; GFX8-NEXT:    v_xor_b32_e32 v0, v0, v13
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, v1, v13, vcc
+; GFX8-NEXT:    v_ffbh_u32_e32 v6, v0
+; GFX8-NEXT:    v_xor_b32_e32 v1, v1, v13
+; GFX8-NEXT:    v_add_u32_e32 v6, vcc, 32, v6
+; GFX8-NEXT:    v_ffbh_u32_e32 v7, v1
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
+; GFX8-NEXT:    v_cndmask_b32_e32 v14, v7, v6, vcc
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v5
+; GFX8-NEXT:    v_cndmask_b32_e64 v5, v4, -v4, vcc
+; GFX8-NEXT:    v_sub_u32_e32 v4, vcc, s8, v14
+; GFX8-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; GFX8-NEXT:    v_lshlrev_b64 v[6:7], v14, v[0:1]
+; GFX8-NEXT:    v_cndmask_b32_e32 v0, 0, v4, vcc
+; GFX8-NEXT:    v_and_b32_e32 v11, v12, v7
+; GFX8-NEXT:    v_mov_b32_e32 v10, v6
+; GFX8-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
+; GFX8-NEXT:    v_bfe_u32 v1, v7, 8, 23
+; GFX8-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT:    v_cmp_eq_u64_e32 vcc, s[4:5], v[10:11]
+; GFX8-NEXT:    v_and_b32_e32 v1, 1, v0
+; GFX8-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
+; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, s[6:7], v[10:11]
+; GFX8-NEXT:    v_cndmask_b32_e32 v1, 1, v1, vcc
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v1
+; GFX8-NEXT:    v_and_b32_e32 v1, 1, v13
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
+; GFX8-NEXT:    v_cndmask_b32_e64 v4, v0, -v0, vcc
+; GFX8-NEXT:    flat_store_dwordx4 v[8:9], v[2:5]
+; GFX8-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %in.gep = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %in, i32 %tid
   %out.gep = getelementptr <4 x float>, <4 x float> addrspace(1)* %out, i32 %tid
@@ -89,16 +846,461 @@ define amdgpu_kernel void @v_sint_to_fp_v4i64_to_v4f32(<4 x float> addrspace(1)*
   ret void
 }
 
-; FUNC-LABEL: {{^}}s_sint_to_fp_v2i64_to_v2f16:
-; GCN-NOT: v_and_b32_e32 v{{[0-9]+}}, -1,
 define amdgpu_kernel void @s_sint_to_fp_v2i64_to_v2f16(<2 x half> addrspace(1)* %out, <2 x i64> %in) #0{
+; GFX6-LABEL: s_sint_to_fp_v2i64_to_v2f16:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
+; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0xd
+; GFX6-NEXT:    s_mov_b32 s7, 0xf000
+; GFX6-NEXT:    s_mov_b32 s6, -1
+; GFX6-NEXT:    s_mov_b32 s8, 0
+; GFX6-NEXT:    s_movk_i32 s9, 0x80
+; GFX6-NEXT:    s_movk_i32 s16, 0xff
+; GFX6-NEXT:    s_movk_i32 s17, 0xbe
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    s_ashr_i32 s10, s3, 31
+; GFX6-NEXT:    s_mov_b32 s12, 1
+; GFX6-NEXT:    s_mov_b32 s13, s9
+; GFX6-NEXT:    s_mov_b32 s11, s10
+; GFX6-NEXT:    s_add_u32 s2, s2, s10
+; GFX6-NEXT:    s_addc_u32 s3, s3, s10
+; GFX6-NEXT:    s_ashr_i32 s14, s1, 31
+; GFX6-NEXT:    s_xor_b64 s[2:3], s[2:3], s[10:11]
+; GFX6-NEXT:    s_flbit_i32_b32 s11, s2
+; GFX6-NEXT:    s_flbit_i32_b32 s18, s3
+; GFX6-NEXT:    s_add_i32 s11, s11, 32
+; GFX6-NEXT:    s_and_b32 s10, 1, s10
+; GFX6-NEXT:    s_mov_b32 s15, s14
+; GFX6-NEXT:    v_mov_b32_e32 v0, s18
+; GFX6-NEXT:    v_mov_b32_e32 v1, s11
+; GFX6-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
+; GFX6-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
+; GFX6-NEXT:    s_add_u32 s0, s0, s14
+; GFX6-NEXT:    v_lshl_b64 v[0:1], s[2:3], v2
+; GFX6-NEXT:    v_sub_i32_e32 v4, vcc, s17, v2
+; GFX6-NEXT:    s_addc_u32 s1, s1, s14
+; GFX6-NEXT:    s_and_b32 s11, 1, s14
+; GFX6-NEXT:    v_and_b32_e32 v3, s16, v1
+; GFX6-NEXT:    v_mov_b32_e32 v2, v0
+; GFX6-NEXT:    v_cmp_ne_u64_e64 vcc, s[2:3], 0
+; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v4, vcc
+; GFX6-NEXT:    v_bfe_u32 v1, v1, 8, 23
+; GFX6-NEXT:    s_xor_b64 s[0:1], s[0:1], s[14:15]
+; GFX6-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
+; GFX6-NEXT:    s_flbit_i32_b32 s2, s0
+; GFX6-NEXT:    s_flbit_i32_b32 s3, s1
+; GFX6-NEXT:    v_or_b32_e32 v4, v0, v1
+; GFX6-NEXT:    s_add_i32 s2, s2, 32
+; GFX6-NEXT:    v_mov_b32_e32 v0, s3
+; GFX6-NEXT:    v_and_b32_e32 v1, 1, v4
+; GFX6-NEXT:    v_mov_b32_e32 v5, s2
+; GFX6-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
+; GFX6-NEXT:    v_cndmask_b32_e32 v5, v0, v5, vcc
+; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[8:9], v[2:3]
+; GFX6-NEXT:    v_cndmask_b32_e32 v6, 0, v1, vcc
+; GFX6-NEXT:    v_lshl_b64 v[0:1], s[0:1], v5
+; GFX6-NEXT:    v_sub_i32_e32 v5, vcc, s17, v5
+; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[12:13], v[2:3]
+; GFX6-NEXT:    v_cndmask_b32_e32 v6, 1, v6, vcc
+; GFX6-NEXT:    v_and_b32_e32 v3, s16, v1
+; GFX6-NEXT:    v_mov_b32_e32 v2, v0
+; GFX6-NEXT:    v_cmp_ne_u64_e64 vcc, s[0:1], 0
+; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v5, vcc
+; GFX6-NEXT:    v_bfe_u32 v1, v1, 8, 23
+; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v6, v4
+; GFX6-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
+; GFX6-NEXT:    v_cmp_eq_u32_e64 s[0:1], s10, 1
+; GFX6-NEXT:    v_cndmask_b32_e64 v4, v4, -v4, s[0:1]
+; GFX6-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT:    v_cvt_f16_f32_e32 v1, v4
+; GFX6-NEXT:    v_and_b32_e32 v4, 1, v0
+; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[8:9], v[2:3]
+; GFX6-NEXT:    v_cndmask_b32_e32 v4, 0, v4, vcc
+; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[12:13], v[2:3]
+; GFX6-NEXT:    v_cndmask_b32_e32 v2, 1, v4, vcc
+; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v2, v0
+; GFX6-NEXT:    v_cmp_eq_u32_e64 s[0:1], s11, 1
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, v0, -v0, s[0:1]
+; GFX6-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; GFX6-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT:    buffer_store_dword v0, off, s[4:7], 0
+; GFX6-NEXT:    s_endpgm
+;
+; GFX8-LABEL: s_sint_to_fp_v2i64_to_v2f16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x34
+; GFX8-NEXT:    s_movk_i32 s12, 0xbe
+; GFX8-NEXT:    s_mov_b32 s2, 0
+; GFX8-NEXT:    s_movk_i32 s3, 0x80
+; GFX8-NEXT:    s_movk_i32 s14, 0xff
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    s_ashr_i32 s8, s7, 31
+; GFX8-NEXT:    s_add_u32 s6, s6, s8
+; GFX8-NEXT:    s_mov_b32 s9, s8
+; GFX8-NEXT:    s_addc_u32 s7, s7, s8
+; GFX8-NEXT:    s_xor_b64 s[6:7], s[6:7], s[8:9]
+; GFX8-NEXT:    s_flbit_i32_b32 s9, s6
+; GFX8-NEXT:    s_add_i32 s9, s9, 32
+; GFX8-NEXT:    s_flbit_i32_b32 s10, s7
+; GFX8-NEXT:    s_cmp_eq_u32 s7, 0
+; GFX8-NEXT:    s_cselect_b32 s9, s9, s10
+; GFX8-NEXT:    s_sub_i32 s10, s12, s9
+; GFX8-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; GFX8-NEXT:    s_cselect_b32 s10, s10, 0
+; GFX8-NEXT:    s_lshl_b64 s[6:7], s[6:7], s9
+; GFX8-NEXT:    s_bfe_u32 s9, s7, 0x170008
+; GFX8-NEXT:    s_lshl_b32 s10, s10, 23
+; GFX8-NEXT:    s_or_b32 s9, s10, s9
+; GFX8-NEXT:    s_mov_b32 s10, 1
+; GFX8-NEXT:    s_mov_b32 s11, s3
+; GFX8-NEXT:    v_mov_b32_e32 v0, s10
+; GFX8-NEXT:    s_and_b32 s7, s7, s14
+; GFX8-NEXT:    v_mov_b32_e32 v1, s11
+; GFX8-NEXT:    s_and_b32 s13, s9, 1
+; GFX8-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
+; GFX8-NEXT:    s_cmp_eq_u64 s[6:7], s[2:3]
+; GFX8-NEXT:    s_cselect_b32 s6, s13, 0
+; GFX8-NEXT:    s_cmp_lg_u64 vcc, 0
+; GFX8-NEXT:    s_cselect_b32 s6, s6, 1
+; GFX8-NEXT:    s_add_i32 s6, s9, s6
+; GFX8-NEXT:    s_and_b32 s7, 1, s8
+; GFX8-NEXT:    v_mov_b32_e32 v2, s6
+; GFX8-NEXT:    v_cmp_eq_u32_e64 s[6:7], s7, 1
+; GFX8-NEXT:    v_cndmask_b32_e64 v2, v2, -v2, s[6:7]
+; GFX8-NEXT:    s_ashr_i32 s6, s5, 31
+; GFX8-NEXT:    s_add_u32 s4, s4, s6
+; GFX8-NEXT:    s_mov_b32 s7, s6
+; GFX8-NEXT:    s_addc_u32 s5, s5, s6
+; GFX8-NEXT:    s_xor_b64 s[4:5], s[4:5], s[6:7]
+; GFX8-NEXT:    s_flbit_i32_b32 s7, s4
+; GFX8-NEXT:    s_add_i32 s7, s7, 32
+; GFX8-NEXT:    s_flbit_i32_b32 s8, s5
+; GFX8-NEXT:    s_cmp_eq_u32 s5, 0
+; GFX8-NEXT:    s_cselect_b32 s7, s7, s8
+; GFX8-NEXT:    s_sub_i32 s8, s12, s7
+; GFX8-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GFX8-NEXT:    s_cselect_b32 s8, s8, 0
+; GFX8-NEXT:    s_lshl_b64 s[4:5], s[4:5], s7
+; GFX8-NEXT:    s_bfe_u32 s7, s5, 0x170008
+; GFX8-NEXT:    s_lshl_b32 s8, s8, 23
+; GFX8-NEXT:    s_or_b32 s7, s8, s7
+; GFX8-NEXT:    s_and_b32 s5, s5, s14
+; GFX8-NEXT:    s_and_b32 s8, s7, 1
+; GFX8-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[0:1]
+; GFX8-NEXT:    s_cmp_eq_u64 s[4:5], s[2:3]
+; GFX8-NEXT:    s_cselect_b32 s2, s8, 0
+; GFX8-NEXT:    s_cmp_lg_u64 vcc, 0
+; GFX8-NEXT:    s_cselect_b32 s2, s2, 1
+; GFX8-NEXT:    s_add_i32 s2, s7, s2
+; GFX8-NEXT:    s_and_b32 s3, 1, s6
+; GFX8-NEXT:    v_mov_b32_e32 v0, s2
+; GFX8-NEXT:    v_cmp_eq_u32_e64 s[2:3], s3, 1
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, v0, -v0, s[2:3]
+; GFX8-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX8-NEXT:    v_cvt_f16_f32_sdwa v2, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+; GFX8-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; GFX8-NEXT:    v_or_b32_e32 v2, v0, v2
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_mov_b32_e32 v0, s0
+; GFX8-NEXT:    v_mov_b32_e32 v1, s1
+; GFX8-NEXT:    flat_store_dword v[0:1], v2
+; GFX8-NEXT:    s_endpgm
   %result = sitofp <2 x i64> %in to <2 x half>
   store <2 x half> %result, <2 x half> addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}v_sint_to_fp_v4i64_to_v4f16:
 define amdgpu_kernel void @v_sint_to_fp_v4i64_to_v4f16(<4 x half> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) #0 {
+; GFX6-LABEL: v_sint_to_fp_v4i64_to_v4f16:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_load_dwordx4 s[8:11], s[0:1], 0x9
+; GFX6-NEXT:    s_mov_b32 s7, 0xf000
+; GFX6-NEXT:    s_mov_b32 s6, 0
+; GFX6-NEXT:    v_lshlrev_b32_e32 v8, 5, v0
+; GFX6-NEXT:    v_mov_b32_e32 v9, 0
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    s_mov_b64 s[4:5], s[10:11]
+; GFX6-NEXT:    buffer_load_dwordx4 v[1:4], v[8:9], s[4:7], 0 addr64 offset:16
+; GFX6-NEXT:    buffer_load_dwordx4 v[5:8], v[8:9], s[4:7], 0 addr64
+; GFX6-NEXT:    v_lshlrev_b32_e32 v10, 3, v0
+; GFX6-NEXT:    s_movk_i32 s4, 0xbe
+; GFX6-NEXT:    v_mov_b32_e32 v11, v9
+; GFX6-NEXT:    s_waitcnt vmcnt(1)
+; GFX6-NEXT:    v_ashrrev_i32_e32 v12, 31, v4
+; GFX6-NEXT:    v_ashrrev_i32_e32 v13, 31, v2
+; GFX6-NEXT:    s_waitcnt vmcnt(0)
+; GFX6-NEXT:    v_ashrrev_i32_e32 v14, 31, v8
+; GFX6-NEXT:    v_ashrrev_i32_e32 v15, 31, v6
+; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v3, v12
+; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v4, v12, vcc
+; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v1, v13
+; GFX6-NEXT:    v_addc_u32_e32 v2, vcc, v2, v13, vcc
+; GFX6-NEXT:    v_add_i32_e32 v7, vcc, v7, v14
+; GFX6-NEXT:    v_addc_u32_e32 v8, vcc, v8, v14, vcc
+; GFX6-NEXT:    v_add_i32_e32 v9, vcc, v5, v15
+; GFX6-NEXT:    v_addc_u32_e32 v6, vcc, v6, v15, vcc
+; GFX6-NEXT:    v_xor_b32_e32 v1, v3, v12
+; GFX6-NEXT:    v_xor_b32_e32 v0, v0, v12
+; GFX6-NEXT:    v_xor_b32_e32 v3, v2, v13
+; GFX6-NEXT:    v_xor_b32_e32 v2, v4, v13
+; GFX6-NEXT:    v_xor_b32_e32 v5, v8, v14
+; GFX6-NEXT:    v_xor_b32_e32 v4, v7, v14
+; GFX6-NEXT:    v_xor_b32_e32 v7, v6, v15
+; GFX6-NEXT:    v_xor_b32_e32 v6, v9, v15
+; GFX6-NEXT:    v_ffbh_u32_e32 v8, v0
+; GFX6-NEXT:    v_ffbh_u32_e32 v9, v1
+; GFX6-NEXT:    v_ffbh_u32_e32 v16, v2
+; GFX6-NEXT:    v_ffbh_u32_e32 v17, v3
+; GFX6-NEXT:    v_ffbh_u32_e32 v18, v4
+; GFX6-NEXT:    v_ffbh_u32_e32 v19, v5
+; GFX6-NEXT:    v_add_i32_e32 v8, vcc, 32, v8
+; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
+; GFX6-NEXT:    v_cndmask_b32_e32 v20, v9, v8, vcc
+; GFX6-NEXT:    v_ffbh_u32_e32 v8, v6
+; GFX6-NEXT:    v_add_i32_e32 v9, vcc, 32, v16
+; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v3
+; GFX6-NEXT:    v_cndmask_b32_e32 v16, v17, v9, vcc
+; GFX6-NEXT:    v_ffbh_u32_e32 v9, v7
+; GFX6-NEXT:    v_add_i32_e32 v17, vcc, 32, v18
+; GFX6-NEXT:    v_add_i32_e32 v8, vcc, 32, v8
+; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v5
+; GFX6-NEXT:    v_cndmask_b32_e32 v17, v19, v17, vcc
+; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v7
+; GFX6-NEXT:    v_cndmask_b32_e32 v18, v9, v8, vcc
+; GFX6-NEXT:    v_lshl_b64 v[8:9], v[2:3], v16
+; GFX6-NEXT:    v_sub_i32_e32 v16, vcc, s4, v16
+; GFX6-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[2:3]
+; GFX6-NEXT:    v_lshl_b64 v[2:3], v[4:5], v17
+; GFX6-NEXT:    v_sub_i32_e64 v17, s[0:1], s4, v17
+; GFX6-NEXT:    v_cmp_ne_u64_e64 s[0:1], 0, v[4:5]
+; GFX6-NEXT:    v_lshl_b64 v[4:5], v[6:7], v18
+; GFX6-NEXT:    v_sub_i32_e64 v18, s[2:3], s4, v18
+; GFX6-NEXT:    v_cmp_ne_u64_e64 s[2:3], 0, v[6:7]
+; GFX6-NEXT:    v_lshl_b64 v[6:7], v[0:1], v20
+; GFX6-NEXT:    v_sub_i32_e64 v19, s[4:5], s4, v20
+; GFX6-NEXT:    v_cmp_ne_u64_e64 s[4:5], 0, v[0:1]
+; GFX6-NEXT:    v_and_b32_e32 v1, 0xff, v7
+; GFX6-NEXT:    v_mov_b32_e32 v0, v6
+; GFX6-NEXT:    v_bfe_u32 v20, v7, 8, 23
+; GFX6-NEXT:    v_mov_b32_e32 v21, 0xff
+; GFX6-NEXT:    v_and_b32_e32 v7, v21, v9
+; GFX6-NEXT:    v_mov_b32_e32 v6, v8
+; GFX6-NEXT:    v_bfe_u32 v22, v9, 8, 23
+; GFX6-NEXT:    v_and_b32_e32 v9, v21, v3
+; GFX6-NEXT:    v_mov_b32_e32 v8, v2
+; GFX6-NEXT:    v_bfe_u32 v23, v3, 8, 23
+; GFX6-NEXT:    v_and_b32_e32 v3, v21, v5
+; GFX6-NEXT:    v_mov_b32_e32 v2, v4
+; GFX6-NEXT:    v_bfe_u32 v4, v5, 8, 23
+; GFX6-NEXT:    v_cndmask_b32_e64 v5, 0, v19, s[4:5]
+; GFX6-NEXT:    v_lshlrev_b32_e32 v5, 23, v5
+; GFX6-NEXT:    v_or_b32_e32 v5, v5, v20
+; GFX6-NEXT:    v_cndmask_b32_e32 v16, 0, v16, vcc
+; GFX6-NEXT:    v_lshlrev_b32_e32 v16, 23, v16
+; GFX6-NEXT:    v_or_b32_e32 v16, v16, v22
+; GFX6-NEXT:    s_mov_b64 s[10:11], s[6:7]
+; GFX6-NEXT:    s_movk_i32 s7, 0x80
+; GFX6-NEXT:    s_mov_b32 s4, 1
+; GFX6-NEXT:    s_mov_b32 s5, s7
+; GFX6-NEXT:    v_and_b32_e32 v12, 1, v12
+; GFX6-NEXT:    v_and_b32_e32 v13, 1, v13
+; GFX6-NEXT:    v_and_b32_e32 v14, 1, v14
+; GFX6-NEXT:    v_and_b32_e32 v15, 1, v15
+; GFX6-NEXT:    v_cndmask_b32_e64 v17, 0, v17, s[0:1]
+; GFX6-NEXT:    v_cndmask_b32_e64 v18, 0, v18, s[2:3]
+; GFX6-NEXT:    v_lshlrev_b32_e32 v17, 23, v17
+; GFX6-NEXT:    v_lshlrev_b32_e32 v18, 23, v18
+; GFX6-NEXT:    v_or_b32_e32 v17, v17, v23
+; GFX6-NEXT:    v_or_b32_e32 v4, v18, v4
+; GFX6-NEXT:    v_and_b32_e32 v18, 1, v5
+; GFX6-NEXT:    v_and_b32_e32 v19, 1, v16
+; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[6:7], v[0:1]
+; GFX6-NEXT:    v_cndmask_b32_e32 v18, 0, v18, vcc
+; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[4:5], v[0:1]
+; GFX6-NEXT:    v_and_b32_e32 v0, 1, v17
+; GFX6-NEXT:    v_and_b32_e32 v1, 1, v4
+; GFX6-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[6:7], v[6:7]
+; GFX6-NEXT:    v_cndmask_b32_e64 v19, 0, v19, s[0:1]
+; GFX6-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[6:7], v[8:9]
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, 0, v0, s[0:1]
+; GFX6-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[6:7], v[2:3]
+; GFX6-NEXT:    v_cndmask_b32_e64 v1, 0, v1, s[0:1]
+; GFX6-NEXT:    v_cndmask_b32_e32 v18, 1, v18, vcc
+; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[4:5], v[6:7]
+; GFX6-NEXT:    v_cndmask_b32_e32 v6, 1, v19, vcc
+; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[4:5], v[8:9]
+; GFX6-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[4:5], v[2:3]
+; GFX6-NEXT:    v_cndmask_b32_e32 v1, 1, v1, vcc
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v5, v18
+; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v16, v6
+; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v17, v0
+; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v4, v1
+; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v12
+; GFX6-NEXT:    v_cndmask_b32_e64 v2, v2, -v2, vcc
+; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v13
+; GFX6-NEXT:    v_cndmask_b32_e64 v3, v3, -v3, vcc
+; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v14
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, v0, -v0, vcc
+; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v15
+; GFX6-NEXT:    v_cndmask_b32_e64 v1, v1, -v1, vcc
+; GFX6-NEXT:    v_cvt_f16_f32_e32 v2, v2
+; GFX6-NEXT:    v_cvt_f16_f32_e32 v3, v3
+; GFX6-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; GFX6-NEXT:    v_cvt_f16_f32_e32 v4, v1
+; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 16, v2
+; GFX6-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
+; GFX6-NEXT:    v_or_b32_e32 v1, v3, v1
+; GFX6-NEXT:    v_or_b32_e32 v0, v4, v0
+; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], v[10:11], s[8:11], 0 addr64
+; GFX6-NEXT:    s_endpgm
+;
+; GFX8-LABEL: v_sint_to_fp_v4i64_to_v4f16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 5, v0
+; GFX8-NEXT:    v_mov_b32_e32 v11, 0
+; GFX8-NEXT:    s_movk_i32 s8, 0xbe
+; GFX8-NEXT:    s_mov_b32 s2, 0
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v5, vcc, s6, v1
+; GFX8-NEXT:    v_mov_b32_e32 v2, s7
+; GFX8-NEXT:    v_addc_u32_e32 v6, vcc, v2, v11, vcc
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 16, v5
+; GFX8-NEXT:    v_addc_u32_e32 v2, vcc, 0, v6, vcc
+; GFX8-NEXT:    flat_load_dwordx4 v[1:4], v[1:2]
+; GFX8-NEXT:    flat_load_dwordx4 v[5:8], v[5:6]
+; GFX8-NEXT:    v_mov_b32_e32 v12, 0xff
+; GFX8-NEXT:    v_lshlrev_b32_e32 v19, 3, v0
+; GFX8-NEXT:    s_movk_i32 s3, 0x80
+; GFX8-NEXT:    s_mov_b32 s6, 1
+; GFX8-NEXT:    s_mov_b32 s7, s3
+; GFX8-NEXT:    s_waitcnt vmcnt(1)
+; GFX8-NEXT:    v_ashrrev_i32_e32 v15, 31, v4
+; GFX8-NEXT:    s_waitcnt vmcnt(0)
+; GFX8-NEXT:    v_ashrrev_i32_e32 v13, 31, v8
+; GFX8-NEXT:    v_add_u32_e32 v7, vcc, v7, v13
+; GFX8-NEXT:    v_ashrrev_i32_e32 v14, 31, v6
+; GFX8-NEXT:    v_addc_u32_e32 v8, vcc, v8, v13, vcc
+; GFX8-NEXT:    v_add_u32_e32 v5, vcc, v5, v14
+; GFX8-NEXT:    v_addc_u32_e32 v6, vcc, v6, v14, vcc
+; GFX8-NEXT:    v_add_u32_e32 v9, vcc, v3, v15
+; GFX8-NEXT:    v_xor_b32_e32 v3, v7, v13
+; GFX8-NEXT:    v_xor_b32_e32 v7, v9, v15
+; GFX8-NEXT:    v_ffbh_u32_e32 v9, v3
+; GFX8-NEXT:    v_addc_u32_e32 v10, vcc, v4, v15, vcc
+; GFX8-NEXT:    v_xor_b32_e32 v4, v8, v13
+; GFX8-NEXT:    v_xor_b32_e32 v5, v5, v14
+; GFX8-NEXT:    v_add_u32_e32 v9, vcc, 32, v9
+; GFX8-NEXT:    v_xor_b32_e32 v8, v10, v15
+; GFX8-NEXT:    v_ffbh_u32_e32 v10, v4
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v4
+; GFX8-NEXT:    v_ffbh_u32_e32 v16, v5
+; GFX8-NEXT:    v_cndmask_b32_e32 v18, v10, v9, vcc
+; GFX8-NEXT:    v_xor_b32_e32 v6, v6, v14
+; GFX8-NEXT:    v_add_u32_e32 v10, vcc, 32, v16
+; GFX8-NEXT:    v_ffbh_u32_e32 v17, v6
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v6
+; GFX8-NEXT:    v_cndmask_b32_e32 v10, v17, v10, vcc
+; GFX8-NEXT:    v_sub_u32_e32 v16, vcc, s8, v10
+; GFX8-NEXT:    v_ffbh_u32_e32 v9, v7
+; GFX8-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[5:6]
+; GFX8-NEXT:    v_lshlrev_b64 v[5:6], v10, v[5:6]
+; GFX8-NEXT:    v_add_u32_e64 v9, s[0:1], 32, v9
+; GFX8-NEXT:    v_cndmask_b32_e32 v0, 0, v16, vcc
+; GFX8-NEXT:    v_ffbh_u32_e32 v10, v8
+; GFX8-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v8
+; GFX8-NEXT:    v_cndmask_b32_e64 v17, v10, v9, s[0:1]
+; GFX8-NEXT:    v_mov_b32_e32 v9, v5
+; GFX8-NEXT:    v_and_b32_e32 v10, v12, v6
+; GFX8-NEXT:    v_bfe_u32 v5, v6, 8, 23
+; GFX8-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
+; GFX8-NEXT:    v_or_b32_e32 v0, v0, v5
+; GFX8-NEXT:    v_cmp_eq_u64_e32 vcc, s[2:3], v[9:10]
+; GFX8-NEXT:    v_and_b32_e32 v5, 1, v0
+; GFX8-NEXT:    v_cndmask_b32_e32 v16, 0, v5, vcc
+; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, s[6:7], v[9:10]
+; GFX8-NEXT:    v_sub_u32_e64 v9, s[0:1], s8, v18
+; GFX8-NEXT:    v_cmp_ne_u64_e64 s[0:1], 0, v[3:4]
+; GFX8-NEXT:    v_lshlrev_b64 v[5:6], v18, v[3:4]
+; GFX8-NEXT:    v_cndmask_b32_e64 v9, 0, v9, s[0:1]
+; GFX8-NEXT:    v_mov_b32_e32 v3, v5
+; GFX8-NEXT:    v_and_b32_e32 v4, 0xff, v6
+; GFX8-NEXT:    v_bfe_u32 v5, v6, 8, 23
+; GFX8-NEXT:    v_lshlrev_b32_e32 v9, 23, v9
+; GFX8-NEXT:    v_or_b32_e32 v5, v9, v5
+; GFX8-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[2:3], v[3:4]
+; GFX8-NEXT:    v_and_b32_e32 v9, 1, v5
+; GFX8-NEXT:    v_cndmask_b32_e64 v9, 0, v9, s[0:1]
+; GFX8-NEXT:    v_cmp_gt_u64_e64 s[0:1], s[6:7], v[3:4]
+; GFX8-NEXT:    v_and_b32_e32 v6, 1, v13
+; GFX8-NEXT:    v_and_b32_e32 v13, 1, v14
+; GFX8-NEXT:    v_cndmask_b32_e32 v14, 1, v16, vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v9, 1, v9, s[0:1]
+; GFX8-NEXT:    v_add_u32_e32 v5, vcc, v5, v9
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v14
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v6
+; GFX8-NEXT:    v_cndmask_b32_e64 v5, v5, -v5, vcc
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v13
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, v0, -v0, vcc
+; GFX8-NEXT:    v_cvt_f16_f32_e32 v13, v0
+; GFX8-NEXT:    v_sub_u32_e32 v0, vcc, s8, v17
+; GFX8-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[7:8]
+; GFX8-NEXT:    v_lshlrev_b64 v[3:4], v17, v[7:8]
+; GFX8-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GFX8-NEXT:    v_cvt_f16_f32_sdwa v9, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+; GFX8-NEXT:    v_mov_b32_e32 v5, v3
+; GFX8-NEXT:    v_and_b32_e32 v6, v12, v4
+; GFX8-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
+; GFX8-NEXT:    v_bfe_u32 v3, v4, 8, 23
+; GFX8-NEXT:    v_or_b32_e32 v0, v0, v3
+; GFX8-NEXT:    v_cmp_eq_u64_e32 vcc, s[2:3], v[5:6]
+; GFX8-NEXT:    v_and_b32_e32 v3, 1, v0
+; GFX8-NEXT:    v_cndmask_b32_e32 v3, 0, v3, vcc
+; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, s[6:7], v[5:6]
+; GFX8-NEXT:    v_ashrrev_i32_e32 v6, 31, v2
+; GFX8-NEXT:    v_cndmask_b32_e32 v3, 1, v3, vcc
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v3
+; GFX8-NEXT:    v_and_b32_e32 v3, 1, v15
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v3
+; GFX8-NEXT:    v_cndmask_b32_e64 v4, v0, -v0, vcc
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v1, v6
+; GFX8-NEXT:    v_xor_b32_e32 v0, v0, v6
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, v2, v6, vcc
+; GFX8-NEXT:    v_ffbh_u32_e32 v2, v0
+; GFX8-NEXT:    v_xor_b32_e32 v1, v1, v6
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 32, v2
+; GFX8-NEXT:    v_ffbh_u32_e32 v3, v1
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
+; GFX8-NEXT:    v_cndmask_b32_e32 v7, v3, v2, vcc
+; GFX8-NEXT:    v_lshlrev_b64 v[2:3], v7, v[0:1]
+; GFX8-NEXT:    v_cvt_f16_f32_sdwa v8, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+; GFX8-NEXT:    v_mov_b32_e32 v4, v2
+; GFX8-NEXT:    v_sub_u32_e32 v2, vcc, s8, v7
+; GFX8-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; GFX8-NEXT:    v_and_b32_e32 v5, v12, v3
+; GFX8-NEXT:    v_cndmask_b32_e32 v0, 0, v2, vcc
+; GFX8-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
+; GFX8-NEXT:    v_bfe_u32 v1, v3, 8, 23
+; GFX8-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT:    v_cmp_eq_u64_e32 vcc, s[2:3], v[4:5]
+; GFX8-NEXT:    v_and_b32_e32 v1, 1, v0
+; GFX8-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
+; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, s[6:7], v[4:5]
+; GFX8-NEXT:    v_mov_b32_e32 v10, s5
+; GFX8-NEXT:    v_cndmask_b32_e32 v1, 1, v1, vcc
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v1
+; GFX8-NEXT:    v_and_b32_e32 v1, 1, v6
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, v0, -v0, vcc
+; GFX8-NEXT:    v_cvt_f16_f32_e32 v3, v0
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, s4, v19
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, v10, v11, vcc
+; GFX8-NEXT:    v_or_b32_e32 v2, v13, v9
+; GFX8-NEXT:    v_or_b32_e32 v3, v3, v8
+; GFX8-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %in.gep = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %in, i32 %tid
   %out.gep = getelementptr <4 x half>, <4 x half> addrspace(1)* %out, i32 %tid

diff  --git a/llvm/test/CodeGen/AMDGPU/uint_to_fp.i64.ll b/llvm/test/CodeGen/AMDGPU/uint_to_fp.i64.ll
index aa1971a3fdbe..45ebeceb1f14 100644
--- a/llvm/test/CodeGen/AMDGPU/uint_to_fp.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/uint_to_fp.i64.ll
@@ -1,30 +1,170 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN,FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN,FUNC %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck --check-prefixes=GFX6 %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck --check-prefixes=GFX8 %s
 
 ; FIXME: This should be merged with uint_to_fp.ll, but s_uint_to_fp_v2i64 crashes on r600
 
-; FUNC-LABEL: {{^}}s_uint_to_fp_i64_to_f16:
 define amdgpu_kernel void @s_uint_to_fp_i64_to_f16(half addrspace(1)* %out, i64 %in) #0 {
+; GFX6-LABEL: s_uint_to_fp_i64_to_f16:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GFX6-NEXT:    s_mov_b32 s7, 0xf000
+; GFX6-NEXT:    s_mov_b32 s6, -1
+; GFX6-NEXT:    s_mov_b32 s8, 0
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    s_flbit_i32_b32 s4, s2
+; GFX6-NEXT:    s_flbit_i32_b32 s10, s3
+; GFX6-NEXT:    s_add_i32 s11, s4, 32
+; GFX6-NEXT:    s_movk_i32 s9, 0x80
+; GFX6-NEXT:    s_mov_b32 s4, s0
+; GFX6-NEXT:    s_mov_b32 s5, s1
+; GFX6-NEXT:    v_mov_b32_e32 v0, s10
+; GFX6-NEXT:    v_mov_b32_e32 v1, s11
+; GFX6-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
+; GFX6-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
+; GFX6-NEXT:    v_lshl_b64 v[0:1], s[2:3], v2
+; GFX6-NEXT:    v_sub_i32_e32 v4, vcc, 0xbe, v2
+; GFX6-NEXT:    v_and_b32_e32 v3, 0xff, v1
+; GFX6-NEXT:    v_mov_b32_e32 v2, v0
+; GFX6-NEXT:    v_cmp_ne_u64_e64 vcc, s[2:3], 0
+; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v4, vcc
+; GFX6-NEXT:    v_bfe_u32 v1, v1, 8, 23
+; GFX6-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
+; GFX6-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT:    v_and_b32_e32 v1, 1, v0
+; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[8:9], v[2:3]
+; GFX6-NEXT:    s_mov_b32 s8, 1
+; GFX6-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
+; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[2:3]
+; GFX6-NEXT:    v_cndmask_b32_e32 v1, 1, v1, vcc
+; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v1, v0
+; GFX6-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; GFX6-NEXT:    buffer_store_short v0, off, s[4:7], 0
+; GFX6-NEXT:    s_endpgm
+;
+; GFX8-LABEL: s_uint_to_fp_i64_to_f16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GFX8-NEXT:    s_mov_b32 s4, 0
+; GFX8-NEXT:    s_movk_i32 s5, 0x80
+; GFX8-NEXT:    v_mov_b32_e32 v0, 1
+; GFX8-NEXT:    v_mov_b32_e32 v1, s5
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    s_flbit_i32_b32 s6, s2
+; GFX8-NEXT:    s_add_i32 s6, s6, 32
+; GFX8-NEXT:    s_flbit_i32_b32 s7, s3
+; GFX8-NEXT:    s_cmp_eq_u32 s3, 0
+; GFX8-NEXT:    s_cselect_b32 s6, s6, s7
+; GFX8-NEXT:    s_sub_i32 s7, 0xbe, s6
+; GFX8-NEXT:    s_cmp_lg_u64 s[2:3], 0
+; GFX8-NEXT:    s_cselect_b32 s7, s7, 0
+; GFX8-NEXT:    s_lshl_b64 s[2:3], s[2:3], s6
+; GFX8-NEXT:    s_lshl_b32 s6, s7, 23
+; GFX8-NEXT:    s_bfe_u32 s7, s3, 0x170008
+; GFX8-NEXT:    s_or_b32 s6, s6, s7
+; GFX8-NEXT:    s_and_b32 s3, s3, 0xff
+; GFX8-NEXT:    v_cmp_lt_u64_e32 vcc, s[2:3], v[0:1]
+; GFX8-NEXT:    s_and_b32 s7, s6, 1
+; GFX8-NEXT:    s_cmp_eq_u64 s[2:3], s[4:5]
+; GFX8-NEXT:    s_cselect_b32 s2, s7, 0
+; GFX8-NEXT:    s_cmp_lg_u64 vcc, 0
+; GFX8-NEXT:    s_cselect_b32 s2, s2, 1
+; GFX8-NEXT:    s_add_i32 s6, s6, s2
+; GFX8-NEXT:    v_cvt_f16_f32_e32 v2, s6
+; GFX8-NEXT:    v_mov_b32_e32 v0, s0
+; GFX8-NEXT:    v_mov_b32_e32 v1, s1
+; GFX8-NEXT:    flat_store_short v[0:1], v2
+; GFX8-NEXT:    s_endpgm
   %result = uitofp i64 %in to half
   store half %result, half addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}v_uint_to_fp_i64_to_f16:
-; GCN: {{buffer|flat}}_load_dwordx2
-
-; GCN: v_ffbh_u32
-; GCN: v_ffbh_u32
-; GCN: v_cndmask
-; GCN: v_cndmask
-
-; GCN-DAG: v_cmp_eq_u64
-; GCN-DAG: v_cmp_gt_u64
-
-; GCN: v_add_{{[iu]}}32_e32 [[VR:v[0-9]+]]
-; GCN: v_cvt_f16_f32_e32 [[VR_F16:v[0-9]+]], [[VR]]
-; GCN: {{buffer|flat}}_store_short {{.*}}[[VR_F16]]
 define amdgpu_kernel void @v_uint_to_fp_i64_to_f16(half addrspace(1)* %out, i64 addrspace(1)* %in) #0 {
+; GFX6-LABEL: v_uint_to_fp_i64_to_f16:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GFX6-NEXT:    s_mov_b32 s7, 0xf000
+; GFX6-NEXT:    s_mov_b32 s6, 0
+; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
+; GFX6-NEXT:    v_mov_b32_e32 v2, 0
+; GFX6-NEXT:    s_mov_b32 s8, 1
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT:    buffer_load_dwordx2 v[3:4], v[1:2], s[4:7], 0 addr64
+; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 1, v0
+; GFX6-NEXT:    s_mov_b64 s[2:3], s[6:7]
+; GFX6-NEXT:    s_movk_i32 s7, 0x80
+; GFX6-NEXT:    s_movk_i32 s4, 0xbe
+; GFX6-NEXT:    s_mov_b32 s9, s7
+; GFX6-NEXT:    s_waitcnt vmcnt(0)
+; GFX6-NEXT:    v_ffbh_u32_e32 v0, v3
+; GFX6-NEXT:    v_ffbh_u32_e32 v5, v4
+; GFX6-NEXT:    v_add_i32_e32 v0, vcc, 32, v0
+; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v4
+; GFX6-NEXT:    v_cndmask_b32_e32 v0, v5, v0, vcc
+; GFX6-NEXT:    v_lshl_b64 v[5:6], v[3:4], v0
+; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
+; GFX6-NEXT:    v_and_b32_e32 v8, 0xff, v6
+; GFX6-NEXT:    v_mov_b32_e32 v7, v5
+; GFX6-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[3:4]
+; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GFX6-NEXT:    v_bfe_u32 v3, v6, 8, 23
+; GFX6-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
+; GFX6-NEXT:    v_or_b32_e32 v0, v0, v3
+; GFX6-NEXT:    v_and_b32_e32 v3, 1, v0
+; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[6:7], v[7:8]
+; GFX6-NEXT:    v_cndmask_b32_e32 v3, 0, v3, vcc
+; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[7:8]
+; GFX6-NEXT:    v_cndmask_b32_e32 v3, 1, v3, vcc
+; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v3
+; GFX6-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; GFX6-NEXT:    buffer_store_short v0, v[1:2], s[0:3], 0 addr64
+; GFX6-NEXT:    s_endpgm
+;
+; GFX8-LABEL: v_uint_to_fp_i64_to_f16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
+; GFX8-NEXT:    v_mov_b32_e32 v5, 0
+; GFX8-NEXT:    v_lshlrev_b32_e32 v6, 1, v0
+; GFX8-NEXT:    s_movk_i32 s6, 0xbe
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_mov_b32_e32 v2, s3
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
+; GFX8-NEXT:    v_addc_u32_e32 v2, vcc, v2, v5, vcc
+; GFX8-NEXT:    flat_load_dwordx2 v[1:2], v[1:2]
+; GFX8-NEXT:    s_mov_b32 s2, 0
+; GFX8-NEXT:    s_movk_i32 s3, 0x80
+; GFX8-NEXT:    s_mov_b32 s4, 1
+; GFX8-NEXT:    s_mov_b32 s5, s3
+; GFX8-NEXT:    v_mov_b32_e32 v7, s1
+; GFX8-NEXT:    s_waitcnt vmcnt(0)
+; GFX8-NEXT:    v_ffbh_u32_e32 v0, v1
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
+; GFX8-NEXT:    v_ffbh_u32_e32 v3, v2
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
+; GFX8-NEXT:    v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX8-NEXT:    v_lshlrev_b64 v[3:4], v0, v[1:2]
+; GFX8-NEXT:    v_sub_u32_e32 v0, vcc, s6, v0
+; GFX8-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[1:2]
+; GFX8-NEXT:    v_and_b32_e32 v1, 0xff, v4
+; GFX8-NEXT:    v_cndmask_b32_e32 v2, 0, v0, vcc
+; GFX8-NEXT:    v_mov_b32_e32 v0, v3
+; GFX8-NEXT:    v_lshlrev_b32_e32 v2, 23, v2
+; GFX8-NEXT:    v_bfe_u32 v3, v4, 8, 23
+; GFX8-NEXT:    v_or_b32_e32 v2, v2, v3
+; GFX8-NEXT:    v_cmp_eq_u64_e32 vcc, s[2:3], v[0:1]
+; GFX8-NEXT:    v_and_b32_e32 v3, 1, v2
+; GFX8-NEXT:    v_cndmask_b32_e32 v3, 0, v3, vcc
+; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, s[4:5], v[0:1]
+; GFX8-NEXT:    v_cndmask_b32_e32 v0, 1, v3, vcc
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v2, v0
+; GFX8-NEXT:    v_cvt_f16_f32_e32 v2, v0
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, s0, v6
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, v7, v5, vcc
+; GFX8-NEXT:    flat_store_short v[0:1], v2
+; GFX8-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
   %out.gep = getelementptr half, half addrspace(1)* %out, i32 %tid
@@ -34,27 +174,162 @@ define amdgpu_kernel void @v_uint_to_fp_i64_to_f16(half addrspace(1)* %out, i64
   ret void
 }
 
-; FUNC-LABEL: {{^}}s_uint_to_fp_i64_to_f32:
 define amdgpu_kernel void @s_uint_to_fp_i64_to_f32(float addrspace(1)* %out, i64 %in) #0 {
+; GFX6-LABEL: s_uint_to_fp_i64_to_f32:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GFX6-NEXT:    s_mov_b32 s7, 0xf000
+; GFX6-NEXT:    s_mov_b32 s6, -1
+; GFX6-NEXT:    s_mov_b32 s8, 0
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    s_flbit_i32_b32 s4, s2
+; GFX6-NEXT:    s_flbit_i32_b32 s10, s3
+; GFX6-NEXT:    s_add_i32 s11, s4, 32
+; GFX6-NEXT:    s_movk_i32 s9, 0x80
+; GFX6-NEXT:    s_mov_b32 s4, s0
+; GFX6-NEXT:    s_mov_b32 s5, s1
+; GFX6-NEXT:    v_mov_b32_e32 v0, s10
+; GFX6-NEXT:    v_mov_b32_e32 v1, s11
+; GFX6-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
+; GFX6-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
+; GFX6-NEXT:    v_lshl_b64 v[0:1], s[2:3], v2
+; GFX6-NEXT:    v_sub_i32_e32 v4, vcc, 0xbe, v2
+; GFX6-NEXT:    v_and_b32_e32 v3, 0xff, v1
+; GFX6-NEXT:    v_mov_b32_e32 v2, v0
+; GFX6-NEXT:    v_cmp_ne_u64_e64 vcc, s[2:3], 0
+; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v4, vcc
+; GFX6-NEXT:    v_bfe_u32 v1, v1, 8, 23
+; GFX6-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
+; GFX6-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT:    v_and_b32_e32 v1, 1, v0
+; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[8:9], v[2:3]
+; GFX6-NEXT:    s_mov_b32 s8, 1
+; GFX6-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
+; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[2:3]
+; GFX6-NEXT:    v_cndmask_b32_e32 v1, 1, v1, vcc
+; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v1, v0
+; GFX6-NEXT:    buffer_store_dword v0, off, s[4:7], 0
+; GFX6-NEXT:    s_endpgm
+;
+; GFX8-LABEL: s_uint_to_fp_i64_to_f32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GFX8-NEXT:    v_mov_b32_e32 v2, 1
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_mov_b32_e32 v0, s0
+; GFX8-NEXT:    s_flbit_i32_b32 s0, s2
+; GFX8-NEXT:    s_add_i32 s5, s0, 32
+; GFX8-NEXT:    s_flbit_i32_b32 s4, s3
+; GFX8-NEXT:    s_cmp_eq_u32 s3, 0
+; GFX8-NEXT:    s_cselect_b32 s4, s5, s4
+; GFX8-NEXT:    s_sub_i32 s5, 0xbe, s4
+; GFX8-NEXT:    s_cmp_lg_u64 s[2:3], 0
+; GFX8-NEXT:    s_cselect_b32 s5, s5, 0
+; GFX8-NEXT:    s_lshl_b64 s[2:3], s[2:3], s4
+; GFX8-NEXT:    v_mov_b32_e32 v1, s1
+; GFX8-NEXT:    s_mov_b32 s0, 0
+; GFX8-NEXT:    s_movk_i32 s1, 0x80
+; GFX8-NEXT:    s_bfe_u32 s4, s3, 0x170008
+; GFX8-NEXT:    s_lshl_b32 s5, s5, 23
+; GFX8-NEXT:    s_or_b32 s4, s5, s4
+; GFX8-NEXT:    s_and_b32 s3, s3, 0xff
+; GFX8-NEXT:    v_mov_b32_e32 v3, s1
+; GFX8-NEXT:    s_and_b32 s5, s4, 1
+; GFX8-NEXT:    v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
+; GFX8-NEXT:    s_cmp_eq_u64 s[2:3], s[0:1]
+; GFX8-NEXT:    s_cselect_b32 s0, s5, 0
+; GFX8-NEXT:    s_cmp_lg_u64 vcc, 0
+; GFX8-NEXT:    s_cselect_b32 s0, s0, 1
+; GFX8-NEXT:    s_add_i32 s0, s4, s0
+; GFX8-NEXT:    v_mov_b32_e32 v2, s0
+; GFX8-NEXT:    flat_store_dword v[0:1], v2
+; GFX8-NEXT:    s_endpgm
   %result = uitofp i64 %in to float
   store float %result, float addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}v_uint_to_fp_i64_to_f32:
-; GCN: {{buffer|flat}}_load_dwordx2
-
-; GCN: v_ffbh_u32
-; GCN: v_ffbh_u32
-; GCN: v_cndmask
-; GCN: v_cndmask
-
-; GCN-DAG: v_cmp_eq_u64
-; GCN-DAG: v_cmp_gt_u64
-
-; GCN: v_add_{{[iu]}}32_e32 [[VR:v[0-9]+]]
-; GCN: {{buffer|flat}}_store_dword {{.*}}[[VR]]
 define amdgpu_kernel void @v_uint_to_fp_i64_to_f32(float addrspace(1)* %out, i64 addrspace(1)* %in) #0 {
+; GFX6-LABEL: v_uint_to_fp_i64_to_f32:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GFX6-NEXT:    s_mov_b32 s7, 0xf000
+; GFX6-NEXT:    s_mov_b32 s6, 0
+; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
+; GFX6-NEXT:    v_mov_b32_e32 v2, 0
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT:    buffer_load_dwordx2 v[3:4], v[1:2], s[4:7], 0 addr64
+; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; GFX6-NEXT:    s_movk_i32 s4, 0xbe
+; GFX6-NEXT:    s_mov_b64 s[2:3], s[6:7]
+; GFX6-NEXT:    s_movk_i32 s7, 0x80
+; GFX6-NEXT:    s_waitcnt vmcnt(0)
+; GFX6-NEXT:    v_ffbh_u32_e32 v0, v3
+; GFX6-NEXT:    v_ffbh_u32_e32 v5, v4
+; GFX6-NEXT:    v_add_i32_e32 v0, vcc, 32, v0
+; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v4
+; GFX6-NEXT:    v_cndmask_b32_e32 v0, v5, v0, vcc
+; GFX6-NEXT:    v_lshl_b64 v[5:6], v[3:4], v0
+; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
+; GFX6-NEXT:    v_and_b32_e32 v8, 0xff, v6
+; GFX6-NEXT:    v_mov_b32_e32 v7, v5
+; GFX6-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[3:4]
+; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GFX6-NEXT:    v_bfe_u32 v3, v6, 8, 23
+; GFX6-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
+; GFX6-NEXT:    v_or_b32_e32 v0, v0, v3
+; GFX6-NEXT:    v_and_b32_e32 v3, 1, v0
+; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[6:7], v[7:8]
+; GFX6-NEXT:    s_mov_b32 s6, 1
+; GFX6-NEXT:    v_cndmask_b32_e32 v3, 0, v3, vcc
+; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[6:7], v[7:8]
+; GFX6-NEXT:    v_cndmask_b32_e32 v3, 1, v3, vcc
+; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v3
+; GFX6-NEXT:    buffer_store_dword v0, v[1:2], s[0:3], 0 addr64
+; GFX6-NEXT:    s_endpgm
+;
+; GFX8-LABEL: v_uint_to_fp_i64_to_f32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
+; GFX8-NEXT:    v_mov_b32_e32 v4, 0
+; GFX8-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; GFX8-NEXT:    s_movk_i32 s4, 0xbe
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_mov_b32_e32 v2, s3
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
+; GFX8-NEXT:    v_addc_u32_e32 v2, vcc, v2, v4, vcc
+; GFX8-NEXT:    flat_load_dwordx2 v[1:2], v[1:2]
+; GFX8-NEXT:    v_add_u32_e32 v3, vcc, s0, v0
+; GFX8-NEXT:    v_mov_b32_e32 v5, s1
+; GFX8-NEXT:    v_addc_u32_e32 v4, vcc, v5, v4, vcc
+; GFX8-NEXT:    s_mov_b32 s2, 0
+; GFX8-NEXT:    s_movk_i32 s3, 0x80
+; GFX8-NEXT:    s_waitcnt vmcnt(0)
+; GFX8-NEXT:    v_ffbh_u32_e32 v0, v1
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, 32, v0
+; GFX8-NEXT:    v_ffbh_u32_e32 v5, v2
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
+; GFX8-NEXT:    v_cndmask_b32_e32 v0, v5, v0, vcc
+; GFX8-NEXT:    v_lshlrev_b64 v[5:6], v0, v[1:2]
+; GFX8-NEXT:    v_sub_u32_e32 v0, vcc, s4, v0
+; GFX8-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[1:2]
+; GFX8-NEXT:    v_and_b32_e32 v1, 0xff, v6
+; GFX8-NEXT:    v_cndmask_b32_e32 v2, 0, v0, vcc
+; GFX8-NEXT:    v_mov_b32_e32 v0, v5
+; GFX8-NEXT:    v_lshlrev_b32_e32 v2, 23, v2
+; GFX8-NEXT:    v_bfe_u32 v5, v6, 8, 23
+; GFX8-NEXT:    v_or_b32_e32 v2, v2, v5
+; GFX8-NEXT:    v_cmp_eq_u64_e32 vcc, s[2:3], v[0:1]
+; GFX8-NEXT:    v_and_b32_e32 v5, 1, v2
+; GFX8-NEXT:    s_mov_b32 s2, 1
+; GFX8-NEXT:    v_cndmask_b32_e32 v5, 0, v5, vcc
+; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, s[2:3], v[0:1]
+; GFX8-NEXT:    v_cndmask_b32_e32 v0, 1, v5, vcc
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v2, v0
+; GFX8-NEXT:    flat_store_dword v[3:4], v0
+; GFX8-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
   %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
@@ -64,15 +339,342 @@ define amdgpu_kernel void @v_uint_to_fp_i64_to_f32(float addrspace(1)* %out, i64
   ret void
 }
 
-; FUNC-LABEL: {{^}}s_uint_to_fp_v2i64_to_v2f32:
 define amdgpu_kernel void @s_uint_to_fp_v2i64_to_v2f32(<2 x float> addrspace(1)* %out, <2 x i64> %in) #0{
+; GFX6-LABEL: s_uint_to_fp_v2i64_to_v2f32:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
+; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0xd
+; GFX6-NEXT:    s_mov_b32 s7, 0xf000
+; GFX6-NEXT:    s_mov_b32 s6, -1
+; GFX6-NEXT:    s_mov_b32 s8, 0
+; GFX6-NEXT:    s_movk_i32 s9, 0x80
+; GFX6-NEXT:    s_movk_i32 s12, 0xff
+; GFX6-NEXT:    s_movk_i32 s13, 0xbe
+; GFX6-NEXT:    s_mov_b32 s10, 1
+; GFX6-NEXT:    s_mov_b32 s11, s9
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    s_flbit_i32_b32 s14, s2
+; GFX6-NEXT:    s_flbit_i32_b32 s15, s3
+; GFX6-NEXT:    s_flbit_i32_b32 s16, s0
+; GFX6-NEXT:    s_flbit_i32_b32 s17, s1
+; GFX6-NEXT:    s_add_i32 s14, s14, 32
+; GFX6-NEXT:    v_mov_b32_e32 v0, s15
+; GFX6-NEXT:    s_add_i32 s15, s16, 32
+; GFX6-NEXT:    v_mov_b32_e32 v1, s17
+; GFX6-NEXT:    v_mov_b32_e32 v2, s14
+; GFX6-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
+; GFX6-NEXT:    v_cndmask_b32_e32 v2, v0, v2, vcc
+; GFX6-NEXT:    v_mov_b32_e32 v0, s15
+; GFX6-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
+; GFX6-NEXT:    v_cndmask_b32_e32 v4, v1, v0, vcc
+; GFX6-NEXT:    v_lshl_b64 v[0:1], s[2:3], v2
+; GFX6-NEXT:    v_sub_i32_e32 v6, vcc, s13, v2
+; GFX6-NEXT:    v_lshl_b64 v[2:3], s[0:1], v4
+; GFX6-NEXT:    v_sub_i32_e32 v7, vcc, s13, v4
+; GFX6-NEXT:    v_and_b32_e32 v5, s12, v1
+; GFX6-NEXT:    v_mov_b32_e32 v4, v0
+; GFX6-NEXT:    v_cmp_ne_u64_e64 vcc, s[2:3], 0
+; GFX6-NEXT:    v_cndmask_b32_e32 v6, 0, v6, vcc
+; GFX6-NEXT:    v_bfe_u32 v8, v1, 8, 23
+; GFX6-NEXT:    v_and_b32_e32 v1, s12, v3
+; GFX6-NEXT:    v_mov_b32_e32 v0, v2
+; GFX6-NEXT:    v_cmp_ne_u64_e64 vcc, s[0:1], 0
+; GFX6-NEXT:    v_cndmask_b32_e32 v2, 0, v7, vcc
+; GFX6-NEXT:    v_bfe_u32 v3, v3, 8, 23
+; GFX6-NEXT:    v_lshlrev_b32_e32 v6, 23, v6
+; GFX6-NEXT:    v_lshlrev_b32_e32 v2, 23, v2
+; GFX6-NEXT:    v_or_b32_e32 v6, v6, v8
+; GFX6-NEXT:    v_or_b32_e32 v2, v2, v3
+; GFX6-NEXT:    v_and_b32_e32 v3, 1, v6
+; GFX6-NEXT:    v_and_b32_e32 v7, 1, v2
+; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[8:9], v[4:5]
+; GFX6-NEXT:    v_cndmask_b32_e32 v3, 0, v3, vcc
+; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[8:9], v[0:1]
+; GFX6-NEXT:    v_cndmask_b32_e32 v7, 0, v7, vcc
+; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[10:11], v[4:5]
+; GFX6-NEXT:    v_cndmask_b32_e32 v3, 1, v3, vcc
+; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[10:11], v[0:1]
+; GFX6-NEXT:    v_cndmask_b32_e32 v0, 1, v7, vcc
+; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v3, v6
+; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GFX6-NEXT:    s_endpgm
+;
+; GFX8-LABEL: s_uint_to_fp_v2i64_to_v2f32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x24
+; GFX8-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x34
+; GFX8-NEXT:    s_movk_i32 s10, 0xbe
+; GFX8-NEXT:    s_mov_b32 s4, 0
+; GFX8-NEXT:    s_movk_i32 s5, 0x80
+; GFX8-NEXT:    s_movk_i32 s13, 0xff
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    s_flbit_i32_b32 s8, s2
+; GFX8-NEXT:    s_add_i32 s8, s8, 32
+; GFX8-NEXT:    s_flbit_i32_b32 s9, s3
+; GFX8-NEXT:    s_cmp_eq_u32 s3, 0
+; GFX8-NEXT:    s_cselect_b32 s8, s8, s9
+; GFX8-NEXT:    s_sub_i32 s9, s10, s8
+; GFX8-NEXT:    s_cmp_lg_u64 s[2:3], 0
+; GFX8-NEXT:    s_cselect_b32 s9, s9, 0
+; GFX8-NEXT:    s_lshl_b64 s[2:3], s[2:3], s8
+; GFX8-NEXT:    s_bfe_u32 s8, s3, 0x170008
+; GFX8-NEXT:    s_lshl_b32 s9, s9, 23
+; GFX8-NEXT:    s_or_b32 s11, s9, s8
+; GFX8-NEXT:    s_mov_b32 s8, 1
+; GFX8-NEXT:    s_mov_b32 s9, s5
+; GFX8-NEXT:    v_mov_b32_e32 v0, s8
+; GFX8-NEXT:    s_and_b32 s3, s3, s13
+; GFX8-NEXT:    v_mov_b32_e32 v1, s9
+; GFX8-NEXT:    s_and_b32 s12, s11, 1
+; GFX8-NEXT:    v_cmp_lt_u64_e32 vcc, s[2:3], v[0:1]
+; GFX8-NEXT:    s_cmp_eq_u64 s[2:3], s[4:5]
+; GFX8-NEXT:    s_cselect_b32 s2, s12, 0
+; GFX8-NEXT:    s_cmp_lg_u64 vcc, 0
+; GFX8-NEXT:    s_cselect_b32 s2, s2, 1
+; GFX8-NEXT:    s_add_i32 s11, s11, s2
+; GFX8-NEXT:    s_flbit_i32_b32 s2, s0
+; GFX8-NEXT:    s_add_i32 s2, s2, 32
+; GFX8-NEXT:    s_flbit_i32_b32 s3, s1
+; GFX8-NEXT:    s_cmp_eq_u32 s1, 0
+; GFX8-NEXT:    s_cselect_b32 s2, s2, s3
+; GFX8-NEXT:    s_sub_i32 s3, s10, s2
+; GFX8-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; GFX8-NEXT:    s_cselect_b32 s3, s3, 0
+; GFX8-NEXT:    s_lshl_b64 s[0:1], s[0:1], s2
+; GFX8-NEXT:    s_bfe_u32 s2, s1, 0x170008
+; GFX8-NEXT:    s_lshl_b32 s3, s3, 23
+; GFX8-NEXT:    s_or_b32 s2, s3, s2
+; GFX8-NEXT:    s_and_b32 s1, s1, s13
+; GFX8-NEXT:    s_and_b32 s3, s2, 1
+; GFX8-NEXT:    v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX8-NEXT:    s_cmp_eq_u64 s[0:1], s[4:5]
+; GFX8-NEXT:    s_cselect_b32 s0, s3, 0
+; GFX8-NEXT:    s_cmp_lg_u64 vcc, 0
+; GFX8-NEXT:    s_cselect_b32 s0, s0, 1
+; GFX8-NEXT:    s_add_i32 s2, s2, s0
+; GFX8-NEXT:    v_mov_b32_e32 v2, s6
+; GFX8-NEXT:    v_mov_b32_e32 v0, s2
+; GFX8-NEXT:    v_mov_b32_e32 v1, s11
+; GFX8-NEXT:    v_mov_b32_e32 v3, s7
+; GFX8-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT:    s_endpgm
   %result = uitofp <2 x i64> %in to <2 x float>
   store <2 x float> %result, <2 x float> addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}v_uint_to_fp_v4i64_to_v4f32:
 define amdgpu_kernel void @v_uint_to_fp_v4i64_to_v4f32(<4 x float> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) #0 {
+; GFX6-LABEL: v_uint_to_fp_v4i64_to_v4f32:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GFX6-NEXT:    s_mov_b32 s11, 0xf000
+; GFX6-NEXT:    s_mov_b32 s10, 0
+; GFX6-NEXT:    v_lshlrev_b32_e32 v7, 5, v0
+; GFX6-NEXT:    v_mov_b32_e32 v8, 0
+; GFX6-NEXT:    v_lshlrev_b32_e32 v9, 4, v0
+; GFX6-NEXT:    v_mov_b32_e32 v15, 0xff
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    s_mov_b64 s[8:9], s[6:7]
+; GFX6-NEXT:    buffer_load_dwordx4 v[0:3], v[7:8], s[8:11], 0 addr64
+; GFX6-NEXT:    buffer_load_dwordx4 v[4:7], v[7:8], s[8:11], 0 addr64 offset:16
+; GFX6-NEXT:    s_movk_i32 s2, 0xbe
+; GFX6-NEXT:    s_mov_b64 s[6:7], s[10:11]
+; GFX6-NEXT:    s_movk_i32 s11, 0x80
+; GFX6-NEXT:    s_mov_b32 s8, 1
+; GFX6-NEXT:    v_mov_b32_e32 v10, v8
+; GFX6-NEXT:    s_mov_b32 s9, s11
+; GFX6-NEXT:    s_waitcnt vmcnt(0)
+; GFX6-NEXT:    v_ffbh_u32_e32 v8, v6
+; GFX6-NEXT:    v_ffbh_u32_e32 v11, v7
+; GFX6-NEXT:    v_ffbh_u32_e32 v12, v4
+; GFX6-NEXT:    v_ffbh_u32_e32 v13, v5
+; GFX6-NEXT:    v_ffbh_u32_e32 v14, v2
+; GFX6-NEXT:    v_ffbh_u32_e32 v16, v3
+; GFX6-NEXT:    v_ffbh_u32_e32 v17, v0
+; GFX6-NEXT:    v_ffbh_u32_e32 v18, v1
+; GFX6-NEXT:    v_add_i32_e32 v8, vcc, 32, v8
+; GFX6-NEXT:    v_add_i32_e32 v12, vcc, 32, v12
+; GFX6-NEXT:    v_add_i32_e32 v14, vcc, 32, v14
+; GFX6-NEXT:    v_add_i32_e32 v17, vcc, 32, v17
+; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v7
+; GFX6-NEXT:    v_cndmask_b32_e32 v8, v11, v8, vcc
+; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v5
+; GFX6-NEXT:    v_cndmask_b32_e32 v19, v13, v12, vcc
+; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v3
+; GFX6-NEXT:    v_cndmask_b32_e32 v16, v16, v14, vcc
+; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
+; GFX6-NEXT:    v_cndmask_b32_e32 v17, v18, v17, vcc
+; GFX6-NEXT:    v_lshl_b64 v[11:12], v[6:7], v8
+; GFX6-NEXT:    v_sub_i32_e32 v8, vcc, s2, v8
+; GFX6-NEXT:    v_lshl_b64 v[13:14], v[2:3], v16
+; GFX6-NEXT:    v_sub_i32_e32 v16, vcc, s2, v16
+; GFX6-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[2:3]
+; GFX6-NEXT:    v_sub_i32_e64 v18, s[0:1], s2, v17
+; GFX6-NEXT:    v_cmp_ne_u64_e64 s[0:1], 0, v[0:1]
+; GFX6-NEXT:    v_lshl_b64 v[0:1], v[0:1], v17
+; GFX6-NEXT:    v_and_b32_e32 v3, 0xff, v12
+; GFX6-NEXT:    v_mov_b32_e32 v2, v11
+; GFX6-NEXT:    v_bfe_u32 v17, v12, 8, 23
+; GFX6-NEXT:    v_lshl_b64 v[11:12], v[4:5], v19
+; GFX6-NEXT:    v_sub_i32_e64 v19, s[2:3], s2, v19
+; GFX6-NEXT:    v_cmp_ne_u64_e64 s[2:3], 0, v[6:7]
+; GFX6-NEXT:    v_cndmask_b32_e64 v8, 0, v8, s[2:3]
+; GFX6-NEXT:    v_cmp_ne_u64_e64 s[2:3], 0, v[4:5]
+; GFX6-NEXT:    v_and_b32_e32 v5, v15, v12
+; GFX6-NEXT:    v_mov_b32_e32 v4, v11
+; GFX6-NEXT:    v_cndmask_b32_e64 v19, 0, v19, s[2:3]
+; GFX6-NEXT:    v_bfe_u32 v20, v12, 8, 23
+; GFX6-NEXT:    v_and_b32_e32 v7, v15, v14
+; GFX6-NEXT:    v_mov_b32_e32 v6, v13
+; GFX6-NEXT:    v_cndmask_b32_e32 v13, 0, v16, vcc
+; GFX6-NEXT:    v_bfe_u32 v14, v14, 8, 23
+; GFX6-NEXT:    v_and_b32_e32 v12, v15, v1
+; GFX6-NEXT:    v_mov_b32_e32 v11, v0
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, 0, v18, s[0:1]
+; GFX6-NEXT:    v_bfe_u32 v1, v1, 8, 23
+; GFX6-NEXT:    v_lshlrev_b32_e32 v8, 23, v8
+; GFX6-NEXT:    v_lshlrev_b32_e32 v15, 23, v19
+; GFX6-NEXT:    v_lshlrev_b32_e32 v13, 23, v13
+; GFX6-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
+; GFX6-NEXT:    v_or_b32_e32 v8, v8, v17
+; GFX6-NEXT:    v_or_b32_e32 v15, v15, v20
+; GFX6-NEXT:    v_or_b32_e32 v13, v13, v14
+; GFX6-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT:    v_and_b32_e32 v1, 1, v8
+; GFX6-NEXT:    v_and_b32_e32 v14, 1, v15
+; GFX6-NEXT:    v_and_b32_e32 v16, 1, v13
+; GFX6-NEXT:    v_and_b32_e32 v17, 1, v0
+; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[10:11], v[2:3]
+; GFX6-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
+; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[10:11], v[4:5]
+; GFX6-NEXT:    v_cndmask_b32_e32 v14, 0, v14, vcc
+; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[10:11], v[6:7]
+; GFX6-NEXT:    v_cndmask_b32_e32 v16, 0, v16, vcc
+; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[10:11], v[11:12]
+; GFX6-NEXT:    v_cndmask_b32_e32 v17, 0, v17, vcc
+; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[2:3]
+; GFX6-NEXT:    v_cndmask_b32_e32 v1, 1, v1, vcc
+; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[4:5]
+; GFX6-NEXT:    v_cndmask_b32_e32 v2, 1, v14, vcc
+; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[6:7]
+; GFX6-NEXT:    v_cndmask_b32_e32 v4, 1, v16, vcc
+; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[11:12]
+; GFX6-NEXT:    v_cndmask_b32_e32 v5, 1, v17, vcc
+; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v8, v1
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v15, v2
+; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v13, v4
+; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v5
+; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], v[9:10], s[4:7], 0 addr64
+; GFX6-NEXT:    s_endpgm
+;
+; GFX8-LABEL: v_uint_to_fp_v4i64_to_v4f32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 5, v0
+; GFX8-NEXT:    v_mov_b32_e32 v2, 0
+; GFX8-NEXT:    v_lshlrev_b32_e32 v0, 4, v0
+; GFX8-NEXT:    s_movk_i32 s6, 0xbe
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v4, vcc, s2, v1
+; GFX8-NEXT:    v_mov_b32_e32 v3, s3
+; GFX8-NEXT:    v_addc_u32_e32 v5, vcc, v3, v2, vcc
+; GFX8-NEXT:    v_mov_b32_e32 v1, s1
+; GFX8-NEXT:    v_add_u32_e32 v8, vcc, s0, v0
+; GFX8-NEXT:    v_addc_u32_e32 v9, vcc, v1, v2, vcc
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, 16, v4
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, 0, v5, vcc
+; GFX8-NEXT:    flat_load_dwordx4 v[0:3], v[0:1]
+; GFX8-NEXT:    flat_load_dwordx4 v[4:7], v[4:5]
+; GFX8-NEXT:    s_mov_b32 s4, 0
+; GFX8-NEXT:    s_movk_i32 s5, 0x80
+; GFX8-NEXT:    v_mov_b32_e32 v14, 0xff
+; GFX8-NEXT:    s_mov_b32 s2, 1
+; GFX8-NEXT:    s_mov_b32 s3, s5
+; GFX8-NEXT:    s_waitcnt vmcnt(1)
+; GFX8-NEXT:    v_ffbh_u32_e32 v15, v2
+; GFX8-NEXT:    s_waitcnt vmcnt(0)
+; GFX8-NEXT:    v_ffbh_u32_e32 v10, v6
+; GFX8-NEXT:    v_ffbh_u32_e32 v12, v4
+; GFX8-NEXT:    v_add_u32_e32 v10, vcc, 32, v10
+; GFX8-NEXT:    v_add_u32_e32 v12, vcc, 32, v12
+; GFX8-NEXT:    v_add_u32_e32 v15, vcc, 32, v15
+; GFX8-NEXT:    v_ffbh_u32_e32 v11, v7
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v7
+; GFX8-NEXT:    v_cndmask_b32_e32 v17, v11, v10, vcc
+; GFX8-NEXT:    v_ffbh_u32_e32 v13, v5
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v5
+; GFX8-NEXT:    v_cndmask_b32_e32 v18, v13, v12, vcc
+; GFX8-NEXT:    v_ffbh_u32_e32 v16, v3
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v3
+; GFX8-NEXT:    v_cndmask_b32_e32 v15, v16, v15, vcc
+; GFX8-NEXT:    v_lshlrev_b64 v[10:11], v17, v[6:7]
+; GFX8-NEXT:    v_sub_u32_e32 v16, vcc, s6, v17
+; GFX8-NEXT:    v_sub_u32_e32 v17, vcc, s6, v18
+; GFX8-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[4:5]
+; GFX8-NEXT:    v_cmp_ne_u64_e64 s[0:1], 0, v[6:7]
+; GFX8-NEXT:    v_cndmask_b32_e32 v17, 0, v17, vcc
+; GFX8-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[2:3]
+; GFX8-NEXT:    v_lshlrev_b64 v[12:13], v18, v[4:5]
+; GFX8-NEXT:    v_cndmask_b32_e64 v16, 0, v16, s[0:1]
+; GFX8-NEXT:    v_lshlrev_b64 v[4:5], v15, v[2:3]
+; GFX8-NEXT:    v_sub_u32_e64 v15, s[0:1], s6, v15
+; GFX8-NEXT:    v_cndmask_b32_e32 v2, 0, v15, vcc
+; GFX8-NEXT:    v_mov_b32_e32 v6, v10
+; GFX8-NEXT:    v_and_b32_e32 v7, 0xff, v11
+; GFX8-NEXT:    v_bfe_u32 v18, v11, 8, 23
+; GFX8-NEXT:    v_lshlrev_b32_e32 v16, 23, v16
+; GFX8-NEXT:    v_mov_b32_e32 v10, v12
+; GFX8-NEXT:    v_mov_b32_e32 v12, v4
+; GFX8-NEXT:    v_or_b32_e32 v16, v16, v18
+; GFX8-NEXT:    v_bfe_u32 v4, v5, 8, 23
+; GFX8-NEXT:    v_lshlrev_b32_e32 v2, 23, v2
+; GFX8-NEXT:    v_cmp_eq_u64_e32 vcc, s[4:5], v[6:7]
+; GFX8-NEXT:    v_or_b32_e32 v4, v2, v4
+; GFX8-NEXT:    v_and_b32_e32 v2, 1, v16
+; GFX8-NEXT:    v_and_b32_e32 v11, v14, v13
+; GFX8-NEXT:    v_bfe_u32 v19, v13, 8, 23
+; GFX8-NEXT:    v_and_b32_e32 v13, v14, v5
+; GFX8-NEXT:    v_ffbh_u32_e32 v5, v0
+; GFX8-NEXT:    v_add_u32_e64 v5, s[0:1], 32, v5
+; GFX8-NEXT:    v_lshlrev_b32_e32 v3, 23, v17
+; GFX8-NEXT:    v_cndmask_b32_e32 v2, 0, v2, vcc
+; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, s[2:3], v[6:7]
+; GFX8-NEXT:    v_or_b32_e32 v15, v3, v19
+; GFX8-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[4:5], v[10:11]
+; GFX8-NEXT:    v_and_b32_e32 v3, 1, v15
+; GFX8-NEXT:    v_cndmask_b32_e32 v2, 1, v2, vcc
+; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, s[2:3], v[10:11]
+; GFX8-NEXT:    v_cndmask_b32_e64 v3, 0, v3, s[0:1]
+; GFX8-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[4:5], v[12:13]
+; GFX8-NEXT:    v_and_b32_e32 v6, 1, v4
+; GFX8-NEXT:    v_cndmask_b32_e32 v7, 1, v3, vcc
+; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, s[2:3], v[12:13]
+; GFX8-NEXT:    v_cndmask_b32_e64 v6, 0, v6, s[0:1]
+; GFX8-NEXT:    v_cndmask_b32_e32 v10, 1, v6, vcc
+; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v16, v2
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v15, v7
+; GFX8-NEXT:    v_ffbh_u32_e32 v18, v1
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
+; GFX8-NEXT:    v_cndmask_b32_e32 v12, v18, v5, vcc
+; GFX8-NEXT:    v_add_u32_e32 v5, vcc, v4, v10
+; GFX8-NEXT:    v_sub_u32_e32 v4, vcc, s6, v12
+; GFX8-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; GFX8-NEXT:    v_lshlrev_b64 v[6:7], v12, v[0:1]
+; GFX8-NEXT:    v_cndmask_b32_e32 v0, 0, v4, vcc
+; GFX8-NEXT:    v_and_b32_e32 v11, v14, v7
+; GFX8-NEXT:    v_mov_b32_e32 v10, v6
+; GFX8-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
+; GFX8-NEXT:    v_bfe_u32 v1, v7, 8, 23
+; GFX8-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT:    v_cmp_eq_u64_e32 vcc, s[4:5], v[10:11]
+; GFX8-NEXT:    v_and_b32_e32 v1, 1, v0
+; GFX8-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
+; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, s[2:3], v[10:11]
+; GFX8-NEXT:    v_cndmask_b32_e32 v1, 1, v1, vcc
+; GFX8-NEXT:    v_add_u32_e32 v4, vcc, v0, v1
+; GFX8-NEXT:    flat_store_dwordx4 v[8:9], v[2:5]
+; GFX8-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %in.gep = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %in, i32 %tid
   %out.gep = getelementptr <4 x float>, <4 x float> addrspace(1)* %out, i32 %tid
@@ -82,15 +684,363 @@ define amdgpu_kernel void @v_uint_to_fp_v4i64_to_v4f32(<4 x float> addrspace(1)*
   ret void
 }
 
-; FUNC-LABEL: {{^}}s_uint_to_fp_v2i64_to_v2f16:
 define amdgpu_kernel void @s_uint_to_fp_v2i64_to_v2f16(<2 x half> addrspace(1)* %out, <2 x i64> %in) #0{
+; GFX6-LABEL: s_uint_to_fp_v2i64_to_v2f16:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
+; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0xd
+; GFX6-NEXT:    s_mov_b32 s7, 0xf000
+; GFX6-NEXT:    s_mov_b32 s6, -1
+; GFX6-NEXT:    s_mov_b32 s8, 0
+; GFX6-NEXT:    s_movk_i32 s9, 0x80
+; GFX6-NEXT:    s_movk_i32 s12, 0xff
+; GFX6-NEXT:    s_movk_i32 s13, 0xbe
+; GFX6-NEXT:    s_mov_b32 s10, 1
+; GFX6-NEXT:    s_mov_b32 s11, s9
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    s_flbit_i32_b32 s14, s2
+; GFX6-NEXT:    s_flbit_i32_b32 s15, s3
+; GFX6-NEXT:    s_flbit_i32_b32 s16, s0
+; GFX6-NEXT:    s_flbit_i32_b32 s17, s1
+; GFX6-NEXT:    s_add_i32 s14, s14, 32
+; GFX6-NEXT:    v_mov_b32_e32 v0, s15
+; GFX6-NEXT:    s_add_i32 s15, s16, 32
+; GFX6-NEXT:    v_mov_b32_e32 v1, s17
+; GFX6-NEXT:    v_mov_b32_e32 v2, s14
+; GFX6-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
+; GFX6-NEXT:    v_cndmask_b32_e32 v2, v0, v2, vcc
+; GFX6-NEXT:    v_mov_b32_e32 v0, s15
+; GFX6-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
+; GFX6-NEXT:    v_cndmask_b32_e32 v4, v1, v0, vcc
+; GFX6-NEXT:    v_lshl_b64 v[0:1], s[2:3], v2
+; GFX6-NEXT:    v_sub_i32_e32 v6, vcc, s13, v2
+; GFX6-NEXT:    v_lshl_b64 v[2:3], s[0:1], v4
+; GFX6-NEXT:    v_sub_i32_e32 v7, vcc, s13, v4
+; GFX6-NEXT:    v_and_b32_e32 v5, s12, v1
+; GFX6-NEXT:    v_mov_b32_e32 v4, v0
+; GFX6-NEXT:    v_cmp_ne_u64_e64 vcc, s[2:3], 0
+; GFX6-NEXT:    v_cndmask_b32_e32 v6, 0, v6, vcc
+; GFX6-NEXT:    v_bfe_u32 v8, v1, 8, 23
+; GFX6-NEXT:    v_and_b32_e32 v1, s12, v3
+; GFX6-NEXT:    v_mov_b32_e32 v0, v2
+; GFX6-NEXT:    v_cmp_ne_u64_e64 vcc, s[0:1], 0
+; GFX6-NEXT:    v_cndmask_b32_e32 v2, 0, v7, vcc
+; GFX6-NEXT:    v_bfe_u32 v3, v3, 8, 23
+; GFX6-NEXT:    v_lshlrev_b32_e32 v6, 23, v6
+; GFX6-NEXT:    v_lshlrev_b32_e32 v2, 23, v2
+; GFX6-NEXT:    v_or_b32_e32 v6, v6, v8
+; GFX6-NEXT:    v_or_b32_e32 v2, v2, v3
+; GFX6-NEXT:    v_and_b32_e32 v3, 1, v6
+; GFX6-NEXT:    v_and_b32_e32 v7, 1, v2
+; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[8:9], v[4:5]
+; GFX6-NEXT:    v_cndmask_b32_e32 v3, 0, v3, vcc
+; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[8:9], v[0:1]
+; GFX6-NEXT:    v_cndmask_b32_e32 v7, 0, v7, vcc
+; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[10:11], v[4:5]
+; GFX6-NEXT:    v_cndmask_b32_e32 v3, 1, v3, vcc
+; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[10:11], v[0:1]
+; GFX6-NEXT:    v_cndmask_b32_e32 v0, 1, v7, vcc
+; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v3, v6
+; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GFX6-NEXT:    v_cvt_f16_f32_e32 v1, v1
+; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; GFX6-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT:    buffer_store_dword v0, off, s[4:7], 0
+; GFX6-NEXT:    s_endpgm
+;
+; GFX8-LABEL: s_uint_to_fp_v2i64_to_v2f16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x34
+; GFX8-NEXT:    s_movk_i32 s10, 0xbe
+; GFX8-NEXT:    s_mov_b32 s2, 0
+; GFX8-NEXT:    s_movk_i32 s3, 0x80
+; GFX8-NEXT:    s_movk_i32 s13, 0xff
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    s_flbit_i32_b32 s8, s6
+; GFX8-NEXT:    s_add_i32 s8, s8, 32
+; GFX8-NEXT:    s_flbit_i32_b32 s9, s7
+; GFX8-NEXT:    s_cmp_eq_u32 s7, 0
+; GFX8-NEXT:    s_cselect_b32 s8, s8, s9
+; GFX8-NEXT:    s_sub_i32 s9, s10, s8
+; GFX8-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; GFX8-NEXT:    s_cselect_b32 s9, s9, 0
+; GFX8-NEXT:    s_lshl_b64 s[6:7], s[6:7], s8
+; GFX8-NEXT:    s_bfe_u32 s8, s7, 0x170008
+; GFX8-NEXT:    s_lshl_b32 s9, s9, 23
+; GFX8-NEXT:    s_or_b32 s11, s9, s8
+; GFX8-NEXT:    s_mov_b32 s8, 1
+; GFX8-NEXT:    s_mov_b32 s9, s3
+; GFX8-NEXT:    v_mov_b32_e32 v0, s8
+; GFX8-NEXT:    s_and_b32 s7, s7, s13
+; GFX8-NEXT:    v_mov_b32_e32 v1, s9
+; GFX8-NEXT:    s_and_b32 s12, s11, 1
+; GFX8-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
+; GFX8-NEXT:    s_cmp_eq_u64 s[6:7], s[2:3]
+; GFX8-NEXT:    s_cselect_b32 s6, s12, 0
+; GFX8-NEXT:    s_cmp_lg_u64 vcc, 0
+; GFX8-NEXT:    s_cselect_b32 s6, s6, 1
+; GFX8-NEXT:    s_add_i32 s11, s11, s6
+; GFX8-NEXT:    s_flbit_i32_b32 s6, s4
+; GFX8-NEXT:    s_add_i32 s6, s6, 32
+; GFX8-NEXT:    s_flbit_i32_b32 s7, s5
+; GFX8-NEXT:    s_cmp_eq_u32 s5, 0
+; GFX8-NEXT:    s_cselect_b32 s6, s6, s7
+; GFX8-NEXT:    s_sub_i32 s7, s10, s6
+; GFX8-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GFX8-NEXT:    s_cselect_b32 s7, s7, 0
+; GFX8-NEXT:    s_lshl_b64 s[4:5], s[4:5], s6
+; GFX8-NEXT:    s_bfe_u32 s6, s5, 0x170008
+; GFX8-NEXT:    s_lshl_b32 s7, s7, 23
+; GFX8-NEXT:    s_or_b32 s6, s7, s6
+; GFX8-NEXT:    s_and_b32 s5, s5, s13
+; GFX8-NEXT:    s_and_b32 s7, s6, 1
+; GFX8-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[0:1]
+; GFX8-NEXT:    s_cmp_eq_u64 s[4:5], s[2:3]
+; GFX8-NEXT:    s_cselect_b32 s2, s7, 0
+; GFX8-NEXT:    s_cmp_lg_u64 vcc, 0
+; GFX8-NEXT:    s_cselect_b32 s2, s2, 1
+; GFX8-NEXT:    v_mov_b32_e32 v2, s11
+; GFX8-NEXT:    s_add_i32 s6, s6, s2
+; GFX8-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX8-NEXT:    v_cvt_f16_f32_sdwa v2, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+; GFX8-NEXT:    v_cvt_f16_f32_e32 v0, s6
+; GFX8-NEXT:    v_or_b32_e32 v2, v0, v2
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_mov_b32_e32 v0, s0
+; GFX8-NEXT:    v_mov_b32_e32 v1, s1
+; GFX8-NEXT:    flat_store_dword v[0:1], v2
+; GFX8-NEXT:    s_endpgm
   %result = uitofp <2 x i64> %in to <2 x half>
   store <2 x half> %result, <2 x half> addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}v_uint_to_fp_v4i64_to_v4f16:
 define amdgpu_kernel void @v_uint_to_fp_v4i64_to_v4f16(<4 x half> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) #0 {
+; GFX6-LABEL: v_uint_to_fp_v4i64_to_v4f16:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GFX6-NEXT:    s_mov_b32 s11, 0xf000
+; GFX6-NEXT:    s_mov_b32 s10, 0
+; GFX6-NEXT:    v_lshlrev_b32_e32 v7, 5, v0
+; GFX6-NEXT:    v_mov_b32_e32 v8, 0
+; GFX6-NEXT:    v_lshlrev_b32_e32 v9, 3, v0
+; GFX6-NEXT:    v_mov_b32_e32 v15, 0xff
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    s_mov_b64 s[8:9], s[6:7]
+; GFX6-NEXT:    buffer_load_dwordx4 v[0:3], v[7:8], s[8:11], 0 addr64
+; GFX6-NEXT:    buffer_load_dwordx4 v[4:7], v[7:8], s[8:11], 0 addr64 offset:16
+; GFX6-NEXT:    s_movk_i32 s2, 0xbe
+; GFX6-NEXT:    s_mov_b64 s[6:7], s[10:11]
+; GFX6-NEXT:    s_movk_i32 s11, 0x80
+; GFX6-NEXT:    s_mov_b32 s8, 1
+; GFX6-NEXT:    v_mov_b32_e32 v10, v8
+; GFX6-NEXT:    s_mov_b32 s9, s11
+; GFX6-NEXT:    s_waitcnt vmcnt(0)
+; GFX6-NEXT:    v_ffbh_u32_e32 v8, v6
+; GFX6-NEXT:    v_ffbh_u32_e32 v11, v7
+; GFX6-NEXT:    v_ffbh_u32_e32 v12, v4
+; GFX6-NEXT:    v_ffbh_u32_e32 v13, v5
+; GFX6-NEXT:    v_ffbh_u32_e32 v14, v2
+; GFX6-NEXT:    v_ffbh_u32_e32 v16, v3
+; GFX6-NEXT:    v_ffbh_u32_e32 v17, v0
+; GFX6-NEXT:    v_ffbh_u32_e32 v18, v1
+; GFX6-NEXT:    v_add_i32_e32 v8, vcc, 32, v8
+; GFX6-NEXT:    v_add_i32_e32 v12, vcc, 32, v12
+; GFX6-NEXT:    v_add_i32_e32 v14, vcc, 32, v14
+; GFX6-NEXT:    v_add_i32_e32 v17, vcc, 32, v17
+; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v7
+; GFX6-NEXT:    v_cndmask_b32_e32 v8, v11, v8, vcc
+; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v5
+; GFX6-NEXT:    v_cndmask_b32_e32 v19, v13, v12, vcc
+; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v3
+; GFX6-NEXT:    v_cndmask_b32_e32 v16, v16, v14, vcc
+; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
+; GFX6-NEXT:    v_cndmask_b32_e32 v17, v18, v17, vcc
+; GFX6-NEXT:    v_lshl_b64 v[11:12], v[6:7], v8
+; GFX6-NEXT:    v_sub_i32_e32 v8, vcc, s2, v8
+; GFX6-NEXT:    v_lshl_b64 v[13:14], v[2:3], v16
+; GFX6-NEXT:    v_sub_i32_e32 v16, vcc, s2, v16
+; GFX6-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[2:3]
+; GFX6-NEXT:    v_sub_i32_e64 v18, s[0:1], s2, v17
+; GFX6-NEXT:    v_cmp_ne_u64_e64 s[0:1], 0, v[0:1]
+; GFX6-NEXT:    v_lshl_b64 v[0:1], v[0:1], v17
+; GFX6-NEXT:    v_and_b32_e32 v3, 0xff, v12
+; GFX6-NEXT:    v_mov_b32_e32 v2, v11
+; GFX6-NEXT:    v_bfe_u32 v17, v12, 8, 23
+; GFX6-NEXT:    v_lshl_b64 v[11:12], v[4:5], v19
+; GFX6-NEXT:    v_sub_i32_e64 v19, s[2:3], s2, v19
+; GFX6-NEXT:    v_cmp_ne_u64_e64 s[2:3], 0, v[6:7]
+; GFX6-NEXT:    v_cndmask_b32_e64 v8, 0, v8, s[2:3]
+; GFX6-NEXT:    v_cmp_ne_u64_e64 s[2:3], 0, v[4:5]
+; GFX6-NEXT:    v_and_b32_e32 v5, v15, v12
+; GFX6-NEXT:    v_mov_b32_e32 v4, v11
+; GFX6-NEXT:    v_cndmask_b32_e64 v19, 0, v19, s[2:3]
+; GFX6-NEXT:    v_bfe_u32 v20, v12, 8, 23
+; GFX6-NEXT:    v_and_b32_e32 v7, v15, v14
+; GFX6-NEXT:    v_mov_b32_e32 v6, v13
+; GFX6-NEXT:    v_cndmask_b32_e32 v13, 0, v16, vcc
+; GFX6-NEXT:    v_bfe_u32 v14, v14, 8, 23
+; GFX6-NEXT:    v_and_b32_e32 v12, v15, v1
+; GFX6-NEXT:    v_mov_b32_e32 v11, v0
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, 0, v18, s[0:1]
+; GFX6-NEXT:    v_bfe_u32 v1, v1, 8, 23
+; GFX6-NEXT:    v_lshlrev_b32_e32 v8, 23, v8
+; GFX6-NEXT:    v_lshlrev_b32_e32 v15, 23, v19
+; GFX6-NEXT:    v_lshlrev_b32_e32 v13, 23, v13
+; GFX6-NEXT:    v_lshlrev_b32_e32 v0, 23, v0
+; GFX6-NEXT:    v_or_b32_e32 v8, v8, v17
+; GFX6-NEXT:    v_or_b32_e32 v15, v15, v20
+; GFX6-NEXT:    v_or_b32_e32 v13, v13, v14
+; GFX6-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT:    v_and_b32_e32 v1, 1, v8
+; GFX6-NEXT:    v_and_b32_e32 v14, 1, v15
+; GFX6-NEXT:    v_and_b32_e32 v16, 1, v13
+; GFX6-NEXT:    v_and_b32_e32 v17, 1, v0
+; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[10:11], v[2:3]
+; GFX6-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
+; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[10:11], v[4:5]
+; GFX6-NEXT:    v_cndmask_b32_e32 v14, 0, v14, vcc
+; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[10:11], v[6:7]
+; GFX6-NEXT:    v_cndmask_b32_e32 v16, 0, v16, vcc
+; GFX6-NEXT:    v_cmp_eq_u64_e32 vcc, s[10:11], v[11:12]
+; GFX6-NEXT:    v_cndmask_b32_e32 v17, 0, v17, vcc
+; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[2:3]
+; GFX6-NEXT:    v_cndmask_b32_e32 v1, 1, v1, vcc
+; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[4:5]
+; GFX6-NEXT:    v_cndmask_b32_e32 v2, 1, v14, vcc
+; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[6:7]
+; GFX6-NEXT:    v_cndmask_b32_e32 v3, 1, v16, vcc
+; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[11:12]
+; GFX6-NEXT:    v_cndmask_b32_e32 v4, 1, v17, vcc
+; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v8, v1
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v15, v2
+; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v13, v3
+; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v4
+; GFX6-NEXT:    v_cvt_f16_f32_e32 v1, v1
+; GFX6-NEXT:    v_cvt_f16_f32_e32 v2, v2
+; GFX6-NEXT:    v_cvt_f16_f32_e32 v3, v3
+; GFX6-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT:    v_or_b32_e32 v1, v2, v1
+; GFX6-NEXT:    v_or_b32_e32 v0, v0, v3
+; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], v[9:10], s[4:7], 0 addr64
+; GFX6-NEXT:    s_endpgm
+;
+; GFX8-LABEL: v_uint_to_fp_v4i64_to_v4f16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 5, v0
+; GFX8-NEXT:    v_mov_b32_e32 v13, 0
+; GFX8-NEXT:    s_movk_i32 s8, 0xbe
+; GFX8-NEXT:    v_mov_b32_e32 v14, 0xff
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_add_u32_e32 v5, vcc, s6, v1
+; GFX8-NEXT:    v_mov_b32_e32 v2, s7
+; GFX8-NEXT:    v_addc_u32_e32 v6, vcc, v2, v13, vcc
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 16, v5
+; GFX8-NEXT:    v_addc_u32_e32 v2, vcc, 0, v6, vcc
+; GFX8-NEXT:    flat_load_dwordx4 v[1:4], v[1:2]
+; GFX8-NEXT:    flat_load_dwordx4 v[5:8], v[5:6]
+; GFX8-NEXT:    s_mov_b32 s2, 0
+; GFX8-NEXT:    s_movk_i32 s3, 0x80
+; GFX8-NEXT:    s_mov_b32 s6, 1
+; GFX8-NEXT:    s_mov_b32 s7, s3
+; GFX8-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
+; GFX8-NEXT:    s_waitcnt vmcnt(1)
+; GFX8-NEXT:    v_ffbh_u32_e32 v15, v3
+; GFX8-NEXT:    s_waitcnt vmcnt(0)
+; GFX8-NEXT:    v_ffbh_u32_e32 v9, v7
+; GFX8-NEXT:    v_ffbh_u32_e32 v11, v5
+; GFX8-NEXT:    v_ffbh_u32_e32 v17, v1
+; GFX8-NEXT:    v_add_u32_e32 v9, vcc, 32, v9
+; GFX8-NEXT:    v_add_u32_e32 v11, vcc, 32, v11
+; GFX8-NEXT:    v_add_u32_e32 v15, vcc, 32, v15
+; GFX8-NEXT:    v_add_u32_e32 v17, vcc, 32, v17
+; GFX8-NEXT:    v_ffbh_u32_e32 v10, v8
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v8
+; GFX8-NEXT:    v_cndmask_b32_e32 v19, v10, v9, vcc
+; GFX8-NEXT:    v_ffbh_u32_e32 v12, v6
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v6
+; GFX8-NEXT:    v_cndmask_b32_e32 v20, v12, v11, vcc
+; GFX8-NEXT:    v_ffbh_u32_e32 v16, v4
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v4
+; GFX8-NEXT:    v_cndmask_b32_e32 v15, v16, v15, vcc
+; GFX8-NEXT:    v_ffbh_u32_e32 v18, v2
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
+; GFX8-NEXT:    v_cndmask_b32_e32 v16, v18, v17, vcc
+; GFX8-NEXT:    v_sub_u32_e32 v17, vcc, s8, v19
+; GFX8-NEXT:    v_sub_u32_e32 v18, vcc, s8, v20
+; GFX8-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[5:6]
+; GFX8-NEXT:    v_lshlrev_b64 v[11:12], v20, v[5:6]
+; GFX8-NEXT:    v_lshlrev_b64 v[9:10], v19, v[7:8]
+; GFX8-NEXT:    v_cmp_ne_u64_e64 s[0:1], 0, v[7:8]
+; GFX8-NEXT:    v_lshlrev_b64 v[5:6], v15, v[3:4]
+; GFX8-NEXT:    v_cndmask_b32_e32 v18, 0, v18, vcc
+; GFX8-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[3:4]
+; GFX8-NEXT:    v_mov_b32_e32 v7, v9
+; GFX8-NEXT:    v_mov_b32_e32 v9, v11
+; GFX8-NEXT:    v_mov_b32_e32 v11, v5
+; GFX8-NEXT:    v_and_b32_e32 v8, 0xff, v10
+; GFX8-NEXT:    v_bfe_u32 v19, v10, 8, 23
+; GFX8-NEXT:    v_and_b32_e32 v10, v14, v12
+; GFX8-NEXT:    v_bfe_u32 v20, v12, 8, 23
+; GFX8-NEXT:    v_and_b32_e32 v12, v14, v6
+; GFX8-NEXT:    v_bfe_u32 v5, v6, 8, 23
+; GFX8-NEXT:    v_cndmask_b32_e64 v6, 0, v17, s[0:1]
+; GFX8-NEXT:    v_sub_u32_e64 v15, s[0:1], s8, v15
+; GFX8-NEXT:    v_cndmask_b32_e32 v3, 0, v15, vcc
+; GFX8-NEXT:    v_lshlrev_b32_e32 v6, 23, v6
+; GFX8-NEXT:    v_lshlrev_b32_e32 v3, 23, v3
+; GFX8-NEXT:    v_or_b32_e32 v6, v6, v19
+; GFX8-NEXT:    v_cmp_eq_u64_e32 vcc, s[2:3], v[7:8]
+; GFX8-NEXT:    v_or_b32_e32 v3, v3, v5
+; GFX8-NEXT:    v_and_b32_e32 v5, 1, v6
+; GFX8-NEXT:    v_lshlrev_b32_e32 v4, 23, v18
+; GFX8-NEXT:    v_cndmask_b32_e32 v5, 0, v5, vcc
+; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, s[6:7], v[7:8]
+; GFX8-NEXT:    v_or_b32_e32 v4, v4, v20
+; GFX8-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[2:3], v[9:10]
+; GFX8-NEXT:    v_and_b32_e32 v15, 1, v4
+; GFX8-NEXT:    v_cndmask_b32_e32 v5, 1, v5, vcc
+; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, s[6:7], v[9:10]
+; GFX8-NEXT:    v_cndmask_b32_e64 v8, 0, v15, s[0:1]
+; GFX8-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[2:3], v[11:12]
+; GFX8-NEXT:    v_and_b32_e32 v7, 1, v3
+; GFX8-NEXT:    v_cndmask_b32_e32 v8, 1, v8, vcc
+; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, s[6:7], v[11:12]
+; GFX8-NEXT:    v_cndmask_b32_e64 v7, 0, v7, s[0:1]
+; GFX8-NEXT:    v_cndmask_b32_e32 v7, 1, v7, vcc
+; GFX8-NEXT:    v_add_u32_e32 v4, vcc, v4, v8
+; GFX8-NEXT:    v_add_u32_e32 v5, vcc, v6, v5
+; GFX8-NEXT:    v_add_u32_e32 v6, vcc, v3, v7
+; GFX8-NEXT:    v_cvt_f16_f32_e32 v8, v4
+; GFX8-NEXT:    v_lshlrev_b64 v[3:4], v16, v[1:2]
+; GFX8-NEXT:    v_cvt_f16_f32_sdwa v7, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+; GFX8-NEXT:    v_mov_b32_e32 v5, v3
+; GFX8-NEXT:    v_sub_u32_e32 v3, vcc, s8, v16
+; GFX8-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[1:2]
+; GFX8-NEXT:    v_cvt_f16_f32_sdwa v9, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+; GFX8-NEXT:    v_cndmask_b32_e32 v1, 0, v3, vcc
+; GFX8-NEXT:    v_and_b32_e32 v6, v14, v4
+; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 23, v1
+; GFX8-NEXT:    v_bfe_u32 v2, v4, 8, 23
+; GFX8-NEXT:    v_or_b32_e32 v1, v1, v2
+; GFX8-NEXT:    v_cmp_eq_u64_e32 vcc, s[2:3], v[5:6]
+; GFX8-NEXT:    v_and_b32_e32 v2, 1, v1
+; GFX8-NEXT:    v_cndmask_b32_e32 v2, 0, v2, vcc
+; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, s[6:7], v[5:6]
+; GFX8-NEXT:    v_mov_b32_e32 v17, s5
+; GFX8-NEXT:    v_cndmask_b32_e32 v2, 1, v2, vcc
+; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v2
+; GFX8-NEXT:    v_cvt_f16_f32_e32 v3, v1
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, s4, v0
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, v17, v13, vcc
+; GFX8-NEXT:    v_or_b32_e32 v2, v8, v7
+; GFX8-NEXT:    v_or_b32_e32 v3, v3, v9
+; GFX8-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %in.gep = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %in, i32 %tid
   %out.gep = getelementptr <4 x half>, <4 x half> addrspace(1)* %out, i32 %tid


        


More information about the llvm-commits mailing list