[llvm] 0fdce16 - [AMDGPU] Regenerate fp2int tests. NFCI.

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Thu May 6 03:55:27 PDT 2021


Author: Simon Pilgrim
Date: 2021-05-06T11:55:14+01:00
New Revision: 0fdce16efb281ab52e1aa5a7a760aebcb7a59163

URL: https://github.com/llvm/llvm-project/commit/0fdce16efb281ab52e1aa5a7a760aebcb7a59163
DIFF: https://github.com/llvm/llvm-project/commit/0fdce16efb281ab52e1aa5a7a760aebcb7a59163.diff

LOG: [AMDGPU] Regenerate fp2int tests. NFCI.

Added: 
    

Modified: 
    llvm/test/CodeGen/AMDGPU/fp_to_sint.ll
    llvm/test/CodeGen/AMDGPU/fp_to_uint.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AMDGPU/fp_to_sint.ll b/llvm/test/CodeGen/AMDGPU/fp_to_sint.ll
index 5b47d9cbe1a7..5e7f1078fcd5 100644
--- a/llvm/test/CodeGen/AMDGPU/fp_to_sint.ll
+++ b/llvm/test/CodeGen/AMDGPU/fp_to_sint.ll
@@ -1,257 +1,1107 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap %s --check-prefixes=SI,FUNC,GCN
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap %s --check-prefixes=FUNC,GCN
-; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -allow-deprecated-dag-overlap %s --check-prefixes=EG,FUNC
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck %s --check-prefixes=SI
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck %s --check-prefixes=VI
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck %s --check-prefixes=EG
 
 declare float @llvm.fabs.f32(float) #1
 
-; FUNC-LABEL: {{^}}fp_to_sint_i32:
-; EG: FLT_TO_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
-; SI: v_cvt_i32_f32_e32
-; SI: s_endpgm
 define amdgpu_kernel void @fp_to_sint_i32(i32 addrspace(1)* %out, float %in) {
+; SI-LABEL: fp_to_sint_i32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s4, s[0:1], 0xb
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_cvt_i32_f32_e32 v0, s4
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: fp_to_sint_i32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[0:1], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_mov_b32 s3, 0xf000
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_cvt_i32_f32_e32 v0, s2
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT:    s_endpgm
+;
+; EG-LABEL: fp_to_sint_i32:
+; EG:       ; %bb.0:
+; EG-NEXT:    ALU 3, @4, KC0[CB0:0-32], KC1[]
+; EG-NEXT:    MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT:    CF_END
+; EG-NEXT:    PAD
+; EG-NEXT:    ALU clause starting at 4:
+; EG-NEXT:     TRUNC * T0.W, KC0[2].Z,
+; EG-NEXT:     FLT_TO_INT T0.X, PV.W,
+; EG-NEXT:     LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT:    2(2.802597e-45), 0(0.000000e+00)
   %conv = fptosi float %in to i32
   store i32 %conv, i32 addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}fp_to_sint_i32_fabs:
-; SI: v_cvt_i32_f32_e64 v{{[0-9]+}}, |s{{[0-9]+}}|{{$}}
 define amdgpu_kernel void @fp_to_sint_i32_fabs(i32 addrspace(1)* %out, float %in) {
+; SI-LABEL: fp_to_sint_i32_fabs:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s4, s[0:1], 0xb
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_cvt_i32_f32_e64 v0, |s4|
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: fp_to_sint_i32_fabs:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[0:1], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_mov_b32 s3, 0xf000
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_cvt_i32_f32_e64 v0, |s2|
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT:    s_endpgm
+;
+; EG-LABEL: fp_to_sint_i32_fabs:
+; EG:       ; %bb.0:
+; EG-NEXT:    ALU 3, @4, KC0[CB0:0-32], KC1[]
+; EG-NEXT:    MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT:    CF_END
+; EG-NEXT:    PAD
+; EG-NEXT:    ALU clause starting at 4:
+; EG-NEXT:     TRUNC * T0.W, |KC0[2].Z|,
+; EG-NEXT:     FLT_TO_INT T0.X, PV.W,
+; EG-NEXT:     LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT:    2(2.802597e-45), 0(0.000000e+00)
   %in.fabs = call float @llvm.fabs.f32(float %in)
   %conv = fptosi float %in.fabs to i32
   store i32 %conv, i32 addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}fp_to_sint_v2i32:
-; EG: FLT_TO_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
-; EG: FLT_TO_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
-; SI: v_cvt_i32_f32_e32
-; SI: v_cvt_i32_f32_e32
 define amdgpu_kernel void @fp_to_sint_v2i32(<2 x i32> addrspace(1)* %out, <2 x float> %in) {
+; SI-LABEL: fp_to_sint_v2i32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xb
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_cvt_i32_f32_e32 v1, s5
+; SI-NEXT:    v_cvt_i32_f32_e32 v0, s4
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: fp_to_sint_v2i32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_cvt_i32_f32_e32 v1, s3
+; VI-NEXT:    v_cvt_i32_f32_e32 v0, s2
+; VI-NEXT:    s_mov_b32 s3, 0xf000
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; VI-NEXT:    s_endpgm
+;
+; EG-LABEL: fp_to_sint_v2i32:
+; EG:       ; %bb.0:
+; EG-NEXT:    ALU 5, @4, KC0[CB0:0-32], KC1[]
+; EG-NEXT:    MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1
+; EG-NEXT:    CF_END
+; EG-NEXT:    PAD
+; EG-NEXT:    ALU clause starting at 4:
+; EG-NEXT:     TRUNC * T0.W, KC0[3].X,
+; EG-NEXT:     FLT_TO_INT T0.Y, PV.W,
+; EG-NEXT:     TRUNC * T0.W, KC0[2].W,
+; EG-NEXT:     FLT_TO_INT T0.X, PV.W,
+; EG-NEXT:     LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT:    2(2.802597e-45), 0(0.000000e+00)
   %result = fptosi <2 x float> %in to <2 x i32>
   store <2 x i32> %result, <2 x i32> addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}fp_to_sint_v4i32:
-; EG: FLT_TO_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
-; EG: FLT_TO_INT {{\** *}}T{{[0-9]+\.[XYZW]}}
-; EG: FLT_TO_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
-; EG: FLT_TO_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
-; SI: v_cvt_i32_f32_e32
-; SI: v_cvt_i32_f32_e32
-; SI: v_cvt_i32_f32_e32
-; SI: v_cvt_i32_f32_e32
 define amdgpu_kernel void @fp_to_sint_v4i32(<4 x i32> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
+; SI-LABEL: fp_to_sint_v4i32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_load_dwordx4 s[4:7], s[2:3], 0x0
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_cvt_i32_f32_e32 v3, s7
+; SI-NEXT:    v_cvt_i32_f32_e32 v2, s6
+; SI-NEXT:    v_cvt_i32_f32_e32 v1, s5
+; SI-NEXT:    v_cvt_i32_f32_e32 v0, s4
+; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: fp_to_sint_v4i32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_load_dwordx4 s[4:7], s[2:3], 0x0
+; VI-NEXT:    s_mov_b32 s3, 0xf000
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_cvt_i32_f32_e32 v3, s7
+; VI-NEXT:    v_cvt_i32_f32_e32 v2, s6
+; VI-NEXT:    v_cvt_i32_f32_e32 v1, s5
+; VI-NEXT:    v_cvt_i32_f32_e32 v0, s4
+; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; VI-NEXT:    s_endpgm
+;
+; EG-LABEL: fp_to_sint_v4i32:
+; EG:       ; %bb.0:
+; EG-NEXT:    ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT:    TEX 0 @6
+; EG-NEXT:    ALU 9, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT:    MEM_RAT_CACHELESS STORE_RAW T0.XYZW, T1.X, 1
+; EG-NEXT:    CF_END
+; EG-NEXT:    PAD
+; EG-NEXT:    Fetch clause starting at 6:
+; EG-NEXT:     VTX_READ_128 T0.XYZW, T0.X, 0, #1
+; EG-NEXT:    ALU clause starting at 8:
+; EG-NEXT:     MOV * T0.X, KC0[2].Z,
+; EG-NEXT:    ALU clause starting at 9:
+; EG-NEXT:     TRUNC T0.W, T0.W,
+; EG-NEXT:     TRUNC * T1.W, T0.Z,
+; EG-NEXT:     FLT_TO_INT * T0.W, PV.W,
+; EG-NEXT:     FLT_TO_INT T0.Z, T1.W,
+; EG-NEXT:     TRUNC * T1.W, T0.Y,
+; EG-NEXT:     FLT_TO_INT T0.Y, PV.W,
+; EG-NEXT:     TRUNC * T1.W, T0.X,
+; EG-NEXT:     FLT_TO_INT T0.X, PV.W,
+; EG-NEXT:     LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT:    2(2.802597e-45), 0(0.000000e+00)
   %value = load <4 x float>, <4 x float> addrspace(1) * %in
   %result = fptosi <4 x float> %value to <4 x i32>
   store <4 x i32> %result, <4 x i32> addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}fp_to_sint_i64:
-
-; EG-DAG: AND_INT
-; EG-DAG: LSHR
-; EG-DAG: SUB_INT
-; EG-DAG: AND_INT
-; EG-DAG: ASHR
-; EG-DAG: AND_INT
-; EG-DAG: OR_INT
-; EG-DAG: SUB_INT
-; EG-DAG: LSHL
-; EG-DAG: LSHL
-; EG-DAG: SUB_INT
-; EG-DAG: LSHR
-; EG-DAG: LSHR
-; EG-DAG: SETGT_UINT
-; EG-DAG: SETGT_INT
-; EG-DAG: XOR_INT
-; EG-DAG: XOR_INT
-; EG: SUB_INT
-; EG-DAG: SUB_INT
-; EG-DAG: CNDE_INT
-; EG-DAG: CNDE_INT
-
 ; Check that the compiler doesn't crash with a "cannot select" error
-; SI: s_endpgm
 define amdgpu_kernel void @fp_to_sint_i64 (i64 addrspace(1)* %out, float %in) {
+; SI-LABEL: fp_to_sint_i64:
+; SI:       ; %bb.0: ; %entry
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
+; SI-NEXT:    s_load_dword s0, s[0:1], 0xb
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_mov_b32 s1, 0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_bfe_u32 s8, s0, 0x80017
+; SI-NEXT:    s_and_b32 s2, s0, 0x7fffff
+; SI-NEXT:    s_ashr_i32 s9, s0, 31
+; SI-NEXT:    s_add_i32 s3, s8, 0xffffff6a
+; SI-NEXT:    s_or_b32 s0, s2, 0x800000
+; SI-NEXT:    s_sub_i32 s10, 0x96, s8
+; SI-NEXT:    s_ashr_i32 s11, s9, 31
+; SI-NEXT:    s_lshl_b64 s[2:3], s[0:1], s3
+; SI-NEXT:    s_lshr_b64 s[0:1], s[0:1], s10
+; SI-NEXT:    s_addk_i32 s8, 0xff81
+; SI-NEXT:    v_mov_b32_e32 v0, s11
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_cmp_gt_i32_e64 vcc, s8, 23
+; SI-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
+; SI-NEXT:    v_mov_b32_e32 v2, s0
+; SI-NEXT:    v_mov_b32_e32 v3, s2
+; SI-NEXT:    v_cndmask_b32_e32 v2, v2, v3, vcc
+; SI-NEXT:    v_xor_b32_e32 v1, s11, v1
+; SI-NEXT:    v_xor_b32_e32 v2, s9, v2
+; SI-NEXT:    v_subrev_i32_e32 v2, vcc, s9, v2
+; SI-NEXT:    v_subb_u32_e32 v0, vcc, v1, v0, vcc
+; SI-NEXT:    v_cmp_lt_i32_e64 s[0:1], s8, 0
+; SI-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[0:1]
+; SI-NEXT:    v_cndmask_b32_e64 v0, v2, 0, s[0:1]
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: fp_to_sint_i64:
+; VI:       ; %bb.0: ; %entry
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
+; VI-NEXT:    s_load_dword s8, s[0:1], 0x2c
+; VI-NEXT:    s_mov_b32 s1, 0
+; VI-NEXT:    s_mov_b32 s7, 0xf000
+; VI-NEXT:    s_mov_b32 s6, -1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_bfe_u32 s9, s8, 0x80017
+; VI-NEXT:    s_and_b32 s0, s8, 0x7fffff
+; VI-NEXT:    s_add_i32 s2, s9, 0xffffff6a
+; VI-NEXT:    s_bitset1_b32 s0, 23
+; VI-NEXT:    s_sub_i32 s10, 0x96, s9
+; VI-NEXT:    s_lshl_b64 s[2:3], s[0:1], s2
+; VI-NEXT:    s_lshr_b64 s[0:1], s[0:1], s10
+; VI-NEXT:    s_addk_i32 s9, 0xff81
+; VI-NEXT:    v_mov_b32_e32 v0, s1
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_cmp_gt_i32_e64 vcc, s9, 23
+; VI-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v1, s0
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    s_ashr_i32 s0, s8, 31
+; VI-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
+; VI-NEXT:    s_ashr_i32 s1, s0, 31
+; VI-NEXT:    v_xor_b32_e32 v1, s0, v1
+; VI-NEXT:    v_xor_b32_e32 v0, s1, v0
+; VI-NEXT:    v_mov_b32_e32 v2, s1
+; VI-NEXT:    v_subrev_u32_e32 v3, vcc, s0, v1
+; VI-NEXT:    v_subb_u32_e32 v0, vcc, v0, v2, vcc
+; VI-NEXT:    v_cmp_lt_i32_e64 s[0:1], s9, 0
+; VI-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[0:1]
+; VI-NEXT:    v_cndmask_b32_e64 v0, v3, 0, s[0:1]
+; VI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; VI-NEXT:    s_endpgm
+;
+; EG-LABEL: fp_to_sint_i64:
+; EG:       ; %bb.0: ; %entry
+; EG-NEXT:    ALU 42, @4, KC0[CB0:0-32], KC1[]
+; EG-NEXT:    MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1
+; EG-NEXT:    CF_END
+; EG-NEXT:    PAD
+; EG-NEXT:    ALU clause starting at 4:
+; EG-NEXT:     MOV * T0.W, literal.x,
+; EG-NEXT:    8(1.121039e-44), 0(0.000000e+00)
+; EG-NEXT:     BFE_UINT T0.W, KC0[2].Z, literal.x, PV.W,
+; EG-NEXT:     AND_INT * T1.W, KC0[2].Z, literal.y,
+; EG-NEXT:    23(3.222986e-44), 8388607(1.175494e-38)
+; EG-NEXT:     SUB_INT T2.W, literal.x, PV.W,
+; EG-NEXT:     OR_INT * T1.W, PS, literal.y,
+; EG-NEXT:    181(2.536350e-43), 8388608(1.175494e-38)
+; EG-NEXT:     LSHR * T2.W, PS, PV.W,
+; EG-NEXT:     ADD_INT T0.X, T0.W, literal.x,
+; EG-NEXT:     LSHR T0.Y, PV.W, 1,
+; EG-NEXT:     ADD_INT T0.Z, T0.W, literal.y,
+; EG-NEXT:     SUB_INT T2.W, literal.z, T0.W,
+; EG-NEXT:     ADD_INT * T0.W, T0.W, literal.w,
+; EG-NEXT:    -127(nan), -150(nan)
+; EG-NEXT:    150(2.101948e-43), -182(nan)
+; EG-NEXT:     LSHL T1.X, T1.W, PS,
+; EG-NEXT:     SETGT_UINT T1.Y, PV.W, literal.x,
+; EG-NEXT:     LSHR T1.Z, T1.W, PV.W,
+; EG-NEXT:     SETGT_UINT T0.W, PV.Z, literal.x,
+; EG-NEXT:     LSHL * T1.W, T1.W, PV.Z,
+; EG-NEXT:    31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT:     CNDE_INT T2.Y, PV.W, PS, 0.0,
+; EG-NEXT:     CNDE_INT T0.Z, PV.Y, PV.Z, 0.0,
+; EG-NEXT:     CNDE_INT T0.W, PV.W, T0.Y, PV.X,
+; EG-NEXT:     SETGT_INT * T1.W, T0.X, literal.x,
+; EG-NEXT:    23(3.222986e-44), 0(0.000000e+00)
+; EG-NEXT:     CNDE_INT T1.Z, PS, 0.0, PV.W,
+; EG-NEXT:     CNDE_INT T0.W, PS, PV.Z, PV.Y,
+; EG-NEXT:     ASHR * T1.W, KC0[2].Z, literal.x,
+; EG-NEXT:    31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT:     XOR_INT T0.W, PV.W, PS,
+; EG-NEXT:     XOR_INT * T2.W, PV.Z, PS,
+; EG-NEXT:     SUB_INT T2.W, PS, T1.W,
+; EG-NEXT:     SUBB_UINT * T3.W, PV.W, T1.W,
+; EG-NEXT:     SUB_INT T2.W, PV.W, PS,
+; EG-NEXT:     SETGT_INT * T3.W, T0.X, literal.x,
+; EG-NEXT:    -1(nan), 0(0.000000e+00)
+; EG-NEXT:     CNDE_INT T0.Y, PS, 0.0, PV.W,
+; EG-NEXT:     SUB_INT * T0.W, T0.W, T1.W,
+; EG-NEXT:     CNDE_INT T0.X, T3.W, 0.0, PV.W,
+; EG-NEXT:     LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT:    2(2.802597e-45), 0(0.000000e+00)
 entry:
   %0 = fptosi float %in to i64
   store i64 %0, i64 addrspace(1)* %out
   ret void
 }
 
-; FUNC: {{^}}fp_to_sint_v2i64:
-; EG-DAG: AND_INT
-; EG-DAG: LSHR
-; EG-DAG: SUB_INT
-; EG-DAG: AND_INT
-; EG-DAG: ASHR
-; EG-DAG: AND_INT
-; EG-DAG: OR_INT
-; EG-DAG: SUB_INT
-; EG-DAG: LSHL
-; EG-DAG: LSHL
-; EG-DAG: SUB_INT
-; EG-DAG: LSHR
-; EG-DAG: LSHR
-; EG-DAG: SETGT_UINT
-; EG-DAG: SETGT_INT
-; EG-DAG: XOR_INT
-; EG-DAG: XOR_INT
-; EG-DAG: SUB_INT
-; EG-DAG: SUB_INT
-; EG-DAG: CNDE_INT
-; EG-DAG: CNDE_INT
-; EG-DAG: AND_INT
-; EG-DAG: LSHR
-; EG-DAG: SUB_INT
-; EG-DAG: AND_INT
-; EG-DAG: ASHR
-; EG-DAG: AND_INT
-; EG-DAG: OR_INT
-; EG-DAG: SUB_INT
-; EG-DAG: LSHL
-; EG-DAG: LSHL
-; EG-DAG: SUB_INT
-; EG-DAG: LSHR
-; EG-DAG: LSHR
-; EG-DAG: SETGT_UINT
-; EG-DAG: SETGT_INT
-; EG-DAG: XOR_INT
-; EG-DAG: XOR_INT
-; EG-DAG: SUB_INT
-; EG-DAG: SUB_INT
-; EG-DAG: CNDE_INT
-; EG-DAG: CNDE_INT
-
-; SI: s_endpgm
 define amdgpu_kernel void @fp_to_sint_v2i64(<2 x i64> addrspace(1)* %out, <2 x float> %x) {
+; SI-LABEL: fp_to_sint_v2i64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0xb
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_movk_i32 s8, 0xff6a
+; SI-NEXT:    s_mov_b32 s2, 0x7fffff
+; SI-NEXT:    s_mov_b32 s10, 0x800000
+; SI-NEXT:    s_mov_b32 s3, 0
+; SI-NEXT:    s_movk_i32 s9, 0x96
+; SI-NEXT:    s_movk_i32 s11, 0xff81
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_bfe_u32 s12, s1, 0x80017
+; SI-NEXT:    s_and_b32 s13, s1, s2
+; SI-NEXT:    s_ashr_i32 s14, s1, 31
+; SI-NEXT:    s_bfe_u32 s1, s0, 0x80017
+; SI-NEXT:    s_and_b32 s15, s0, s2
+; SI-NEXT:    s_ashr_i32 s16, s0, 31
+; SI-NEXT:    s_add_i32 s0, s12, s8
+; SI-NEXT:    s_or_b32 s2, s13, s10
+; SI-NEXT:    s_sub_i32 s13, s9, s12
+; SI-NEXT:    s_add_i32 s12, s12, s11
+; SI-NEXT:    s_ashr_i32 s17, s14, 31
+; SI-NEXT:    s_add_i32 s18, s1, s8
+; SI-NEXT:    s_sub_i32 s19, s9, s1
+; SI-NEXT:    s_add_i32 s11, s1, s11
+; SI-NEXT:    s_ashr_i32 s20, s16, 31
+; SI-NEXT:    s_lshl_b64 s[0:1], s[2:3], s0
+; SI-NEXT:    s_lshr_b64 s[8:9], s[2:3], s13
+; SI-NEXT:    v_mov_b32_e32 v0, s17
+; SI-NEXT:    s_or_b32 s2, s15, s10
+; SI-NEXT:    v_mov_b32_e32 v1, s20
+; SI-NEXT:    v_mov_b32_e32 v2, s9
+; SI-NEXT:    v_mov_b32_e32 v3, s1
+; SI-NEXT:    v_cmp_gt_i32_e64 vcc, s12, 23
+; SI-NEXT:    v_cndmask_b32_e32 v2, v2, v3, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s8
+; SI-NEXT:    v_mov_b32_e32 v4, s0
+; SI-NEXT:    s_lshl_b64 s[0:1], s[2:3], s18
+; SI-NEXT:    s_lshr_b64 s[2:3], s[2:3], s19
+; SI-NEXT:    v_cndmask_b32_e32 v3, v3, v4, vcc
+; SI-NEXT:    v_xor_b32_e32 v2, s17, v2
+; SI-NEXT:    v_mov_b32_e32 v4, s3
+; SI-NEXT:    v_mov_b32_e32 v5, s1
+; SI-NEXT:    v_cmp_gt_i32_e64 vcc, s11, 23
+; SI-NEXT:    v_cndmask_b32_e32 v4, v4, v5, vcc
+; SI-NEXT:    v_mov_b32_e32 v5, s2
+; SI-NEXT:    v_mov_b32_e32 v6, s0
+; SI-NEXT:    v_xor_b32_e32 v3, s14, v3
+; SI-NEXT:    v_cndmask_b32_e32 v5, v5, v6, vcc
+; SI-NEXT:    v_xor_b32_e32 v4, s20, v4
+; SI-NEXT:    v_subrev_i32_e32 v6, vcc, s14, v3
+; SI-NEXT:    v_subb_u32_e32 v0, vcc, v2, v0, vcc
+; SI-NEXT:    v_xor_b32_e32 v5, s16, v5
+; SI-NEXT:    v_cmp_lt_i32_e64 s[0:1], s12, 0
+; SI-NEXT:    v_cndmask_b32_e64 v3, v0, 0, s[0:1]
+; SI-NEXT:    v_cndmask_b32_e64 v2, v6, 0, s[0:1]
+; SI-NEXT:    v_subrev_i32_e32 v0, vcc, s16, v5
+; SI-NEXT:    v_subb_u32_e32 v1, vcc, v4, v1, vcc
+; SI-NEXT:    v_cmp_lt_i32_e64 s[0:1], s11, 0
+; SI-NEXT:    v_cndmask_b32_e64 v1, v1, 0, s[0:1]
+; SI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[0:1]
+; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: fp_to_sint_v2i64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x2c
+; VI-NEXT:    s_mov_b32 s14, 0x7fffff
+; VI-NEXT:    s_movk_i32 s12, 0xff6a
+; VI-NEXT:    s_mov_b32 s15, 0x800000
+; VI-NEXT:    s_movk_i32 s16, 0x96
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_bfe_u32 s13, s1, 0x80017
+; VI-NEXT:    s_and_b32 s2, s1, s14
+; VI-NEXT:    s_add_i32 s8, s13, s12
+; VI-NEXT:    s_or_b32 s2, s2, s15
+; VI-NEXT:    s_mov_b32 s3, 0
+; VI-NEXT:    s_sub_i32 s10, s16, s13
+; VI-NEXT:    s_movk_i32 s17, 0xff81
+; VI-NEXT:    s_lshl_b64 s[8:9], s[2:3], s8
+; VI-NEXT:    s_lshr_b64 s[10:11], s[2:3], s10
+; VI-NEXT:    s_add_i32 s13, s13, s17
+; VI-NEXT:    v_mov_b32_e32 v0, s11
+; VI-NEXT:    v_mov_b32_e32 v1, s9
+; VI-NEXT:    v_cmp_gt_i32_e64 vcc, s13, 23
+; VI-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v2, s8
+; VI-NEXT:    v_mov_b32_e32 v1, s10
+; VI-NEXT:    s_ashr_i32 s1, s1, 31
+; VI-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
+; VI-NEXT:    s_ashr_i32 s2, s1, 31
+; VI-NEXT:    v_xor_b32_e32 v1, s1, v1
+; VI-NEXT:    v_subrev_u32_e32 v1, vcc, s1, v1
+; VI-NEXT:    v_xor_b32_e32 v0, s2, v0
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    s_and_b32 s2, s0, s14
+; VI-NEXT:    s_bfe_u32 s1, s0, 0x80017
+; VI-NEXT:    v_subb_u32_e32 v0, vcc, v0, v2, vcc
+; VI-NEXT:    v_cmp_lt_i32_e64 s[8:9], s13, 0
+; VI-NEXT:    v_cndmask_b32_e64 v3, v0, 0, s[8:9]
+; VI-NEXT:    v_cndmask_b32_e64 v2, v1, 0, s[8:9]
+; VI-NEXT:    s_add_i32 s8, s1, s12
+; VI-NEXT:    s_or_b32 s2, s2, s15
+; VI-NEXT:    s_sub_i32 s10, s16, s1
+; VI-NEXT:    s_lshl_b64 s[8:9], s[2:3], s8
+; VI-NEXT:    s_lshr_b64 s[2:3], s[2:3], s10
+; VI-NEXT:    s_add_i32 s1, s1, s17
+; VI-NEXT:    v_mov_b32_e32 v0, s3
+; VI-NEXT:    v_mov_b32_e32 v1, s9
+; VI-NEXT:    v_cmp_gt_i32_e64 vcc, s1, 23
+; VI-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v1, s2
+; VI-NEXT:    v_mov_b32_e32 v4, s8
+; VI-NEXT:    s_ashr_i32 s0, s0, 31
+; VI-NEXT:    v_cndmask_b32_e32 v1, v1, v4, vcc
+; VI-NEXT:    s_ashr_i32 s2, s0, 31
+; VI-NEXT:    v_xor_b32_e32 v1, s0, v1
+; VI-NEXT:    v_subrev_u32_e32 v5, vcc, s0, v1
+; VI-NEXT:    v_xor_b32_e32 v0, s2, v0
+; VI-NEXT:    v_mov_b32_e32 v4, s2
+; VI-NEXT:    v_subb_u32_e32 v0, vcc, v0, v4, vcc
+; VI-NEXT:    v_cmp_lt_i32_e64 s[0:1], s1, 0
+; VI-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[0:1]
+; VI-NEXT:    s_mov_b32 s7, 0xf000
+; VI-NEXT:    s_mov_b32 s6, -1
+; VI-NEXT:    v_cndmask_b32_e64 v0, v5, 0, s[0:1]
+; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
+; VI-NEXT:    s_endpgm
+;
+; EG-LABEL: fp_to_sint_v2i64:
+; EG:       ; %bb.0:
+; EG-NEXT:    ALU 79, @4, KC0[CB0:0-32], KC1[]
+; EG-NEXT:    MEM_RAT_CACHELESS STORE_RAW T3.XYZW, T0.X, 1
+; EG-NEXT:    CF_END
+; EG-NEXT:    PAD
+; EG-NEXT:    ALU clause starting at 4:
+; EG-NEXT:     MOV * T0.W, literal.x,
+; EG-NEXT:    8(1.121039e-44), 0(0.000000e+00)
+; EG-NEXT:     BFE_UINT T1.W, KC0[2].W, literal.x, PV.W,
+; EG-NEXT:     AND_INT * T2.W, KC0[2].W, literal.y,
+; EG-NEXT:    23(3.222986e-44), 8388607(1.175494e-38)
+; EG-NEXT:     AND_INT T0.Y, KC0[3].X, literal.x,
+; EG-NEXT:     ADD_INT T0.Z, PV.W, literal.y,
+; EG-NEXT:     OR_INT T2.W, PS, literal.z,
+; EG-NEXT:     SUB_INT * T3.W, literal.w, PV.W,
+; EG-NEXT:    8388607(1.175494e-38), -150(nan)
+; EG-NEXT:    8388608(1.175494e-38), 150(2.101948e-43)
+; EG-NEXT:     BFE_UINT T0.X, KC0[3].X, literal.x, T0.W,
+; EG-NEXT:     SETGT_UINT T1.Y, PS, literal.y,
+; EG-NEXT:     LSHR T1.Z, PV.W, PS,
+; EG-NEXT:     SETGT_UINT T0.W, PV.Z, literal.y,
+; EG-NEXT:     LSHL * T3.W, PV.W, PV.Z,
+; EG-NEXT:    23(3.222986e-44), 31(4.344025e-44)
+; EG-NEXT:     CNDE_INT T1.X, PV.W, PS, 0.0,
+; EG-NEXT:     CNDE_INT T1.Y, PV.Y, PV.Z, 0.0,
+; EG-NEXT:     ADD_INT T0.Z, PV.X, literal.x,
+; EG-NEXT:     OR_INT T3.W, T0.Y, literal.y,
+; EG-NEXT:     SUB_INT * T4.W, literal.z, PV.X,
+; EG-NEXT:    -150(nan), 8388608(1.175494e-38)
+; EG-NEXT:    150(2.101948e-43), 0(0.000000e+00)
+; EG-NEXT:     SUB_INT T2.X, literal.x, T0.X,
+; EG-NEXT:     SETGT_UINT T0.Y, PS, literal.y,
+; EG-NEXT:     LSHR T1.Z, PV.W, PS,
+; EG-NEXT:     SETGT_UINT T4.W, PV.Z, literal.y,
+; EG-NEXT:     LSHL * T5.W, PV.W, PV.Z,
+; EG-NEXT:    181(2.536350e-43), 31(4.344025e-44)
+; EG-NEXT:     CNDE_INT T3.X, PV.W, PS, 0.0,
+; EG-NEXT:     CNDE_INT T0.Y, PV.Y, PV.Z, 0.0,
+; EG-NEXT:     ADD_INT T0.Z, T0.X, literal.x,
+; EG-NEXT:     LSHR T5.W, T3.W, PV.X,
+; EG-NEXT:     SUB_INT * T6.W, literal.y, T1.W,
+; EG-NEXT:    -182(nan), 181(2.536350e-43)
+; EG-NEXT:     ADD_INT T2.X, T1.W, literal.x,
+; EG-NEXT:     LSHR T2.Y, T2.W, PS, BS:VEC_120/SCL_212
+; EG-NEXT:     ADD_INT T1.Z, T0.X, literal.y,
+; EG-NEXT:     LSHR T5.W, PV.W, 1,
+; EG-NEXT:     LSHL * T3.W, T3.W, PV.Z,
+; EG-NEXT:    -182(nan), -127(nan)
+; EG-NEXT:     CNDE_INT T0.X, T4.W, PV.W, PS,
+; EG-NEXT:     SETGT_INT T3.Y, PV.Z, literal.x,
+; EG-NEXT:     ADD_INT T0.Z, T1.W, literal.y, BS:VEC_120/SCL_212
+; EG-NEXT:     LSHR T1.W, PV.Y, 1,
+; EG-NEXT:     LSHL * T2.W, T2.W, PV.X,
+; EG-NEXT:    23(3.222986e-44), -127(nan)
+; EG-NEXT:     CNDE_INT T2.X, T0.W, PV.W, PS,
+; EG-NEXT:     SETGT_INT T2.Y, PV.Z, literal.x,
+; EG-NEXT:     CNDE_INT T2.Z, PV.Y, 0.0, PV.X,
+; EG-NEXT:     CNDE_INT T0.W, PV.Y, T0.Y, T3.X,
+; EG-NEXT:     ASHR * T1.W, KC0[3].X, literal.y,
+; EG-NEXT:    23(3.222986e-44), 31(4.344025e-44)
+; EG-NEXT:     XOR_INT T0.X, PV.W, PS,
+; EG-NEXT:     XOR_INT T0.Y, PV.Z, PS,
+; EG-NEXT:     CNDE_INT T2.Z, PV.Y, 0.0, PV.X,
+; EG-NEXT:     CNDE_INT T0.W, PV.Y, T1.Y, T1.X,
+; EG-NEXT:     ASHR * T2.W, KC0[2].W, literal.x,
+; EG-NEXT:    31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT:     XOR_INT T1.Y, PV.W, PS,
+; EG-NEXT:     XOR_INT T2.Z, PV.Z, PS,
+; EG-NEXT:     SUB_INT T0.W, PV.Y, T1.W,
+; EG-NEXT:     SUBB_UINT * T3.W, PV.X, T1.W,
+; EG-NEXT:     SUB_INT T0.Y, PV.W, PS,
+; EG-NEXT:     SETGT_INT T1.Z, T1.Z, literal.x,
+; EG-NEXT:     SUB_INT T0.W, PV.Z, T2.W,
+; EG-NEXT:     SUBB_UINT * T3.W, PV.Y, T2.W,
+; EG-NEXT:    -1(nan), 0(0.000000e+00)
+; EG-NEXT:     SUB_INT T2.Z, PV.W, PS,
+; EG-NEXT:     SETGT_INT T0.W, T0.Z, literal.x,
+; EG-NEXT:     CNDE_INT * T3.W, PV.Z, 0.0, PV.Y, BS:VEC_021/SCL_122
+; EG-NEXT:    -1(nan), 0(0.000000e+00)
+; EG-NEXT:     CNDE_INT T3.Y, PV.W, 0.0, PV.Z,
+; EG-NEXT:     SUB_INT * T1.W, T0.X, T1.W,
+; EG-NEXT:     CNDE_INT T3.Z, T1.Z, 0.0, PV.W,
+; EG-NEXT:     SUB_INT * T1.W, T1.Y, T2.W,
+; EG-NEXT:     CNDE_INT T3.X, T0.W, 0.0, PV.W,
+; EG-NEXT:     LSHR * T0.X, KC0[2].Y, literal.x,
+; EG-NEXT:    2(2.802597e-45), 0(0.000000e+00)
   %conv = fptosi <2 x float> %x to <2 x i64>
   store <2 x i64> %conv, <2 x i64> addrspace(1)* %out
   ret void
 }
 
-; FUNC: {{^}}fp_to_sint_v4i64:
-; EG-DAG: AND_INT
-; EG-DAG: LSHR
-; EG-DAG: SUB_INT
-; EG-DAG: AND_INT
-; EG-DAG: ASHR
-; EG-DAG: AND_INT
-; EG-DAG: OR_INT
-; EG-DAG: SUB_INT
-; EG-DAG: LSHL
-; EG-DAG: LSHL
-; EG-DAG: SUB_INT
-; EG-DAG: LSHR
-; EG-DAG: LSHR
-; EG-DAG: SETGT_UINT
-; EG-DAG: SETGT_INT
-; EG-DAG: XOR_INT
-; EG-DAG: XOR_INT
-; EG-DAG: SUB_INT
-; EG-DAG: SUB_INT
-; EG-DAG: CNDE_INT
-; EG-DAG: CNDE_INT
-; EG-DAG: AND_INT
-; EG-DAG: LSHR
-; EG-DAG: SUB_INT
-; EG-DAG: AND_INT
-; EG-DAG: ASHR
-; EG-DAG: AND_INT
-; EG-DAG: OR_INT
-; EG-DAG: SUB_INT
-; EG-DAG: LSHL
-; EG-DAG: LSHL
-; EG-DAG: SUB_INT
-; EG-DAG: LSHR
-; EG-DAG: LSHR
-; EG-DAG: SETGT_UINT
-; EG-DAG: SETGT_INT
-; EG-DAG: XOR_INT
-; EG-DAG: XOR_INT
-; EG-DAG: SUB_INT
-; EG-DAG: SUB_INT
-; EG-DAG: CNDE_INT
-; EG-DAG: CNDE_INT
-; EG-DAG: AND_INT
-; EG-DAG: LSHR
-; EG-DAG: SUB_INT
-; EG-DAG: AND_INT
-; EG-DAG: ASHR
-; EG-DAG: AND_INT
-; EG-DAG: OR_INT
-; EG-DAG: SUB_INT
-; EG-DAG: LSHL
-; EG-DAG: LSHL
-; EG-DAG: SUB_INT
-; EG-DAG: LSHR
-; EG-DAG: LSHR
-; EG-DAG: SETGT_UINT
-; EG-DAG: SETGT_INT
-; EG-DAG: XOR_INT
-; EG-DAG: XOR_INT
-; EG-DAG: SUB_INT
-; EG-DAG: SUB_INT
-; EG-DAG: CNDE_INT
-; EG-DAG: CNDE_INT
-; EG-DAG: AND_INT
-; EG-DAG: LSHR
-; EG-DAG: SUB_INT
-; EG-DAG: AND_INT
-; EG-DAG: ASHR
-; EG-DAG: AND_INT
-; EG-DAG: OR_INT
-; EG-DAG: SUB_INT
-; EG-DAG: LSHL
-; EG-DAG: LSHL
-; EG-DAG: SUB_INT
-; EG-DAG: LSHR
-; EG-DAG: LSHR
-; EG-DAG: SETGT_UINT
-; EG-DAG: SETGT_INT
-; EG-DAG: XOR_INT
-; EG-DAG: XOR_INT
-; EG-DAG: SUB_INT
-; EG-DAG: SUB_INT
-; EG-DAG: CNDE_INT
-; EG-DAG: CNDE_INT
-
-; SI: s_endpgm
 define amdgpu_kernel void @fp_to_sint_v4i64(<4 x i64> addrspace(1)* %out, <4 x float> %x) {
+; SI-LABEL: fp_to_sint_v4i64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0xd
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_movk_i32 s10, 0xff6a
+; SI-NEXT:    s_mov_b32 s8, 0x7fffff
+; SI-NEXT:    s_mov_b32 s11, 0x800000
+; SI-NEXT:    s_mov_b32 s9, 0
+; SI-NEXT:    s_movk_i32 s12, 0x96
+; SI-NEXT:    s_movk_i32 s13, 0xff81
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_bfe_u32 s14, s1, 0x80017
+; SI-NEXT:    s_and_b32 s15, s1, s8
+; SI-NEXT:    s_ashr_i32 s16, s1, 31
+; SI-NEXT:    s_bfe_u32 s1, s0, 0x80017
+; SI-NEXT:    s_and_b32 s17, s0, s8
+; SI-NEXT:    s_ashr_i32 s18, s0, 31
+; SI-NEXT:    s_bfe_u32 s0, s3, 0x80017
+; SI-NEXT:    s_and_b32 s19, s3, s8
+; SI-NEXT:    s_ashr_i32 s20, s3, 31
+; SI-NEXT:    s_bfe_u32 s3, s2, 0x80017
+; SI-NEXT:    s_and_b32 s21, s2, s8
+; SI-NEXT:    s_ashr_i32 s22, s2, 31
+; SI-NEXT:    s_add_i32 s2, s14, s10
+; SI-NEXT:    s_or_b32 s8, s15, s11
+; SI-NEXT:    s_sub_i32 s15, s12, s14
+; SI-NEXT:    s_add_i32 s14, s14, s13
+; SI-NEXT:    s_ashr_i32 s23, s16, 31
+; SI-NEXT:    s_add_i32 s24, s1, s10
+; SI-NEXT:    s_sub_i32 s25, s12, s1
+; SI-NEXT:    s_add_i32 s26, s1, s13
+; SI-NEXT:    s_ashr_i32 s27, s18, 31
+; SI-NEXT:    s_add_i32 s28, s0, s10
+; SI-NEXT:    s_sub_i32 s29, s12, s0
+; SI-NEXT:    s_add_i32 s30, s0, s13
+; SI-NEXT:    s_ashr_i32 s31, s20, 31
+; SI-NEXT:    s_add_i32 s10, s3, s10
+; SI-NEXT:    s_sub_i32 s12, s12, s3
+; SI-NEXT:    s_add_i32 s13, s3, s13
+; SI-NEXT:    s_ashr_i32 s33, s22, 31
+; SI-NEXT:    s_lshl_b64 s[0:1], s[8:9], s2
+; SI-NEXT:    s_lshr_b64 s[2:3], s[8:9], s15
+; SI-NEXT:    v_mov_b32_e32 v0, s23
+; SI-NEXT:    s_or_b32 s8, s17, s11
+; SI-NEXT:    v_mov_b32_e32 v1, s27
+; SI-NEXT:    v_mov_b32_e32 v4, s31
+; SI-NEXT:    v_mov_b32_e32 v5, s33
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_mov_b32_e32 v3, s1
+; SI-NEXT:    v_cmp_gt_i32_e64 vcc, s14, 23
+; SI-NEXT:    v_cndmask_b32_e32 v2, v2, v3, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s2
+; SI-NEXT:    v_mov_b32_e32 v6, s0
+; SI-NEXT:    s_lshl_b64 s[0:1], s[8:9], s24
+; SI-NEXT:    s_lshr_b64 s[2:3], s[8:9], s25
+; SI-NEXT:    s_or_b32 s8, s19, s11
+; SI-NEXT:    v_cndmask_b32_e32 v3, v3, v6, vcc
+; SI-NEXT:    v_xor_b32_e32 v2, s23, v2
+; SI-NEXT:    v_mov_b32_e32 v6, s3
+; SI-NEXT:    v_mov_b32_e32 v7, s1
+; SI-NEXT:    v_cmp_gt_i32_e64 vcc, s26, 23
+; SI-NEXT:    v_cndmask_b32_e32 v6, v6, v7, vcc
+; SI-NEXT:    v_mov_b32_e32 v7, s2
+; SI-NEXT:    v_mov_b32_e32 v8, s0
+; SI-NEXT:    s_lshl_b64 s[0:1], s[8:9], s28
+; SI-NEXT:    s_lshr_b64 s[2:3], s[8:9], s29
+; SI-NEXT:    s_or_b32 s8, s21, s11
+; SI-NEXT:    v_xor_b32_e32 v3, s16, v3
+; SI-NEXT:    v_cndmask_b32_e32 v7, v7, v8, vcc
+; SI-NEXT:    v_xor_b32_e32 v6, s27, v6
+; SI-NEXT:    v_mov_b32_e32 v8, s3
+; SI-NEXT:    v_mov_b32_e32 v9, s1
+; SI-NEXT:    v_cmp_gt_i32_e64 vcc, s30, 23
+; SI-NEXT:    v_cndmask_b32_e32 v8, v8, v9, vcc
+; SI-NEXT:    v_mov_b32_e32 v9, s2
+; SI-NEXT:    v_mov_b32_e32 v10, s0
+; SI-NEXT:    s_lshl_b64 s[2:3], s[8:9], s10
+; SI-NEXT:    s_lshr_b64 s[8:9], s[8:9], s12
+; SI-NEXT:    v_subrev_i32_e64 v11, s[0:1], s16, v3
+; SI-NEXT:    v_subb_u32_e64 v0, s[0:1], v2, v0, s[0:1]
+; SI-NEXT:    v_xor_b32_e32 v7, s18, v7
+; SI-NEXT:    v_cndmask_b32_e32 v9, v9, v10, vcc
+; SI-NEXT:    v_xor_b32_e32 v8, s31, v8
+; SI-NEXT:    v_mov_b32_e32 v2, s9
+; SI-NEXT:    v_mov_b32_e32 v3, s3
+; SI-NEXT:    v_cmp_gt_i32_e64 vcc, s13, 23
+; SI-NEXT:    v_cndmask_b32_e32 v10, v2, v3, vcc
+; SI-NEXT:    v_mov_b32_e32 v12, s8
+; SI-NEXT:    v_mov_b32_e32 v13, s2
+; SI-NEXT:    v_cmp_lt_i32_e64 s[0:1], s14, 0
+; SI-NEXT:    v_cndmask_b32_e64 v3, v0, 0, s[0:1]
+; SI-NEXT:    v_cndmask_b32_e64 v2, v11, 0, s[0:1]
+; SI-NEXT:    v_subrev_i32_e64 v0, s[0:1], s18, v7
+; SI-NEXT:    v_subb_u32_e64 v1, s[0:1], v6, v1, s[0:1]
+; SI-NEXT:    v_xor_b32_e32 v6, s20, v9
+; SI-NEXT:    v_cndmask_b32_e32 v7, v12, v13, vcc
+; SI-NEXT:    v_xor_b32_e32 v9, s33, v10
+; SI-NEXT:    v_cmp_lt_i32_e64 s[0:1], s26, 0
+; SI-NEXT:    v_cndmask_b32_e64 v1, v1, 0, s[0:1]
+; SI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[0:1]
+; SI-NEXT:    v_subrev_i32_e32 v6, vcc, s20, v6
+; SI-NEXT:    v_subb_u32_e32 v4, vcc, v8, v4, vcc
+; SI-NEXT:    v_xor_b32_e32 v8, s22, v7
+; SI-NEXT:    v_cmp_lt_i32_e64 s[0:1], s30, 0
+; SI-NEXT:    v_cndmask_b32_e64 v7, v4, 0, s[0:1]
+; SI-NEXT:    v_cndmask_b32_e64 v6, v6, 0, s[0:1]
+; SI-NEXT:    v_subrev_i32_e32 v4, vcc, s22, v8
+; SI-NEXT:    v_subb_u32_e32 v5, vcc, v9, v5, vcc
+; SI-NEXT:    v_cmp_lt_i32_e64 s[0:1], s13, 0
+; SI-NEXT:    v_cndmask_b32_e64 v5, v5, 0, s[0:1]
+; SI-NEXT:    v_cndmask_b32_e64 v4, v4, 0, s[0:1]
+; SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[4:7], 0 offset:16
+; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: fp_to_sint_v4i64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x34
+; VI-NEXT:    s_mov_b32 s16, 0x7fffff
+; VI-NEXT:    s_movk_i32 s14, 0xff6a
+; VI-NEXT:    s_mov_b32 s17, 0x800000
+; VI-NEXT:    s_movk_i32 s18, 0x96
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_bfe_u32 s15, s1, 0x80017
+; VI-NEXT:    s_and_b32 s8, s1, s16
+; VI-NEXT:    s_add_i32 s10, s15, s14
+; VI-NEXT:    s_or_b32 s8, s8, s17
+; VI-NEXT:    s_mov_b32 s9, 0
+; VI-NEXT:    s_sub_i32 s12, s18, s15
+; VI-NEXT:    s_movk_i32 s19, 0xff81
+; VI-NEXT:    s_lshl_b64 s[10:11], s[8:9], s10
+; VI-NEXT:    s_lshr_b64 s[12:13], s[8:9], s12
+; VI-NEXT:    s_add_i32 s15, s15, s19
+; VI-NEXT:    v_mov_b32_e32 v0, s13
+; VI-NEXT:    v_mov_b32_e32 v1, s11
+; VI-NEXT:    v_cmp_gt_i32_e64 vcc, s15, 23
+; VI-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v2, s10
+; VI-NEXT:    v_mov_b32_e32 v1, s12
+; VI-NEXT:    s_ashr_i32 s1, s1, 31
+; VI-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
+; VI-NEXT:    s_ashr_i32 s8, s1, 31
+; VI-NEXT:    v_xor_b32_e32 v1, s1, v1
+; VI-NEXT:    v_subrev_u32_e32 v1, vcc, s1, v1
+; VI-NEXT:    v_xor_b32_e32 v0, s8, v0
+; VI-NEXT:    v_mov_b32_e32 v2, s8
+; VI-NEXT:    s_and_b32 s8, s0, s16
+; VI-NEXT:    s_bfe_u32 s1, s0, 0x80017
+; VI-NEXT:    v_subb_u32_e32 v0, vcc, v0, v2, vcc
+; VI-NEXT:    v_cmp_lt_i32_e64 s[10:11], s15, 0
+; VI-NEXT:    v_cndmask_b32_e64 v3, v0, 0, s[10:11]
+; VI-NEXT:    v_cndmask_b32_e64 v2, v1, 0, s[10:11]
+; VI-NEXT:    s_add_i32 s10, s1, s14
+; VI-NEXT:    s_or_b32 s8, s8, s17
+; VI-NEXT:    s_sub_i32 s12, s18, s1
+; VI-NEXT:    s_lshl_b64 s[10:11], s[8:9], s10
+; VI-NEXT:    s_lshr_b64 s[12:13], s[8:9], s12
+; VI-NEXT:    s_add_i32 s1, s1, s19
+; VI-NEXT:    v_mov_b32_e32 v0, s13
+; VI-NEXT:    v_mov_b32_e32 v1, s11
+; VI-NEXT:    v_cmp_gt_i32_e64 vcc, s1, 23
+; VI-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v1, s12
+; VI-NEXT:    v_mov_b32_e32 v4, s10
+; VI-NEXT:    s_ashr_i32 s0, s0, 31
+; VI-NEXT:    v_cndmask_b32_e32 v1, v1, v4, vcc
+; VI-NEXT:    s_ashr_i32 s8, s0, 31
+; VI-NEXT:    v_xor_b32_e32 v1, s0, v1
+; VI-NEXT:    v_subrev_u32_e32 v5, vcc, s0, v1
+; VI-NEXT:    v_xor_b32_e32 v0, s8, v0
+; VI-NEXT:    v_mov_b32_e32 v4, s8
+; VI-NEXT:    v_subb_u32_e32 v0, vcc, v0, v4, vcc
+; VI-NEXT:    v_cmp_lt_i32_e64 s[0:1], s1, 0
+; VI-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[0:1]
+; VI-NEXT:    v_cndmask_b32_e64 v0, v5, 0, s[0:1]
+; VI-NEXT:    s_bfe_u32 s12, s3, 0x80017
+; VI-NEXT:    s_and_b32 s1, s3, s16
+; VI-NEXT:    s_add_i32 s0, s12, s14
+; VI-NEXT:    s_or_b32 s8, s1, s17
+; VI-NEXT:    s_sub_i32 s10, s18, s12
+; VI-NEXT:    s_lshl_b64 s[0:1], s[8:9], s0
+; VI-NEXT:    s_lshr_b64 s[10:11], s[8:9], s10
+; VI-NEXT:    s_add_i32 s12, s12, s19
+; VI-NEXT:    v_mov_b32_e32 v4, s11
+; VI-NEXT:    v_mov_b32_e32 v5, s1
+; VI-NEXT:    v_cmp_gt_i32_e64 vcc, s12, 23
+; VI-NEXT:    v_cndmask_b32_e32 v4, v4, v5, vcc
+; VI-NEXT:    v_mov_b32_e32 v6, s0
+; VI-NEXT:    v_mov_b32_e32 v5, s10
+; VI-NEXT:    s_ashr_i32 s0, s3, 31
+; VI-NEXT:    v_cndmask_b32_e32 v5, v5, v6, vcc
+; VI-NEXT:    s_ashr_i32 s1, s0, 31
+; VI-NEXT:    v_xor_b32_e32 v5, s0, v5
+; VI-NEXT:    v_xor_b32_e32 v4, s1, v4
+; VI-NEXT:    v_mov_b32_e32 v6, s1
+; VI-NEXT:    v_subrev_u32_e32 v5, vcc, s0, v5
+; VI-NEXT:    v_subb_u32_e32 v4, vcc, v4, v6, vcc
+; VI-NEXT:    v_cmp_lt_i32_e64 s[0:1], s12, 0
+; VI-NEXT:    s_bfe_u32 s3, s2, 0x80017
+; VI-NEXT:    v_cndmask_b32_e64 v7, v4, 0, s[0:1]
+; VI-NEXT:    v_cndmask_b32_e64 v6, v5, 0, s[0:1]
+; VI-NEXT:    s_and_b32 s1, s2, s16
+; VI-NEXT:    s_add_i32 s0, s3, s14
+; VI-NEXT:    s_or_b32 s8, s1, s17
+; VI-NEXT:    s_sub_i32 s10, s18, s3
+; VI-NEXT:    s_lshl_b64 s[0:1], s[8:9], s0
+; VI-NEXT:    s_lshr_b64 s[8:9], s[8:9], s10
+; VI-NEXT:    s_add_i32 s3, s3, s19
+; VI-NEXT:    v_mov_b32_e32 v4, s9
+; VI-NEXT:    v_mov_b32_e32 v5, s1
+; VI-NEXT:    v_cmp_gt_i32_e64 vcc, s3, 23
+; VI-NEXT:    v_cndmask_b32_e32 v4, v4, v5, vcc
+; VI-NEXT:    v_mov_b32_e32 v8, s0
+; VI-NEXT:    v_mov_b32_e32 v5, s8
+; VI-NEXT:    s_ashr_i32 s0, s2, 31
+; VI-NEXT:    v_cndmask_b32_e32 v5, v5, v8, vcc
+; VI-NEXT:    s_ashr_i32 s1, s0, 31
+; VI-NEXT:    v_xor_b32_e32 v5, s0, v5
+; VI-NEXT:    v_xor_b32_e32 v4, s1, v4
+; VI-NEXT:    v_mov_b32_e32 v8, s1
+; VI-NEXT:    v_subrev_u32_e32 v9, vcc, s0, v5
+; VI-NEXT:    v_subb_u32_e32 v4, vcc, v4, v8, vcc
+; VI-NEXT:    v_cmp_lt_i32_e64 s[0:1], s3, 0
+; VI-NEXT:    v_cndmask_b32_e64 v5, v4, 0, s[0:1]
+; VI-NEXT:    s_mov_b32 s7, 0xf000
+; VI-NEXT:    s_mov_b32 s6, -1
+; VI-NEXT:    v_cndmask_b32_e64 v4, v9, 0, s[0:1]
+; VI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[4:7], 0 offset:16
+; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
+; VI-NEXT:    s_endpgm
+;
+; EG-LABEL: fp_to_sint_v4i64:
+; EG:       ; %bb.0:
+; EG-NEXT:    ALU 99, @6, KC0[CB0:0-32], KC1[]
+; EG-NEXT:    ALU 64, @106, KC0[CB0:0-32], KC1[]
+; EG-NEXT:    MEM_RAT_CACHELESS STORE_RAW T1.XYZW, T0.X, 0
+; EG-NEXT:    MEM_RAT_CACHELESS STORE_RAW T5.XYZW, T2.X, 1
+; EG-NEXT:    CF_END
+; EG-NEXT:    PAD
+; EG-NEXT:    ALU clause starting at 6:
+; EG-NEXT:     MOV * T0.W, literal.x,
+; EG-NEXT:    8(1.121039e-44), 0(0.000000e+00)
+; EG-NEXT:     BFE_UINT T1.W, KC0[3].Z, literal.x, PV.W,
+; EG-NEXT:     AND_INT * T2.W, KC0[3].Z, literal.y,
+; EG-NEXT:    23(3.222986e-44), 8388607(1.175494e-38)
+; EG-NEXT:     ADD_INT T0.Z, PV.W, literal.x,
+; EG-NEXT:     SUB_INT T3.W, literal.y, PV.W,
+; EG-NEXT:     OR_INT * T2.W, PS, literal.z,
+; EG-NEXT:    -127(nan), 181(2.536350e-43)
+; EG-NEXT:    8388608(1.175494e-38), 0(0.000000e+00)
+; EG-NEXT:     BFE_UINT T0.X, KC0[4].X, literal.x, T0.W,
+; EG-NEXT:     AND_INT T0.Y, KC0[4].X, literal.y,
+; EG-NEXT:     ADD_INT T1.Z, T1.W, literal.z,
+; EG-NEXT:     ADD_INT T4.W, T1.W, literal.w,
+; EG-NEXT:     LSHR * T3.W, PS, PV.W,
+; EG-NEXT:    23(3.222986e-44), 8388607(1.175494e-38)
+; EG-NEXT:    -150(nan), -182(nan)
+; EG-NEXT:     LSHR T1.X, PS, 1,
+; EG-NEXT:     LSHL T1.Y, T2.W, PV.W,
+; EG-NEXT:     SETGT_UINT T2.Z, PV.Z, literal.x,
+; EG-NEXT:     OR_INT T3.W, PV.Y, literal.y,
+; EG-NEXT:     ADD_INT * T4.W, PV.X, literal.z,
+; EG-NEXT:    31(4.344025e-44), 8388608(1.175494e-38)
+; EG-NEXT:    -150(nan), 0(0.000000e+00)
+; EG-NEXT:     SUB_INT T2.X, literal.x, T1.W,
+; EG-NEXT:     SETGT_UINT T0.Y, PS, literal.y,
+; EG-NEXT:     LSHL T3.Z, PV.W, PS,
+; EG-NEXT:     CNDE_INT T1.W, PV.Z, PV.X, PV.Y,
+; EG-NEXT:     SETGT_INT * T4.W, T0.Z, literal.z,
+; EG-NEXT:    150(2.101948e-43), 31(4.344025e-44)
+; EG-NEXT:    23(3.222986e-44), 0(0.000000e+00)
+; EG-NEXT:     CNDE_INT T1.Y, PS, 0.0, PV.W,
+; EG-NEXT:     CNDE_INT T3.Z, PV.Y, PV.Z, 0.0,
+; EG-NEXT:     SETGT_UINT T1.W, PV.X, literal.x,
+; EG-NEXT:     SUB_INT * T5.W, literal.y, T0.X,
+; EG-NEXT:    31(4.344025e-44), 181(2.536350e-43)
+; EG-NEXT:     LSHR T1.X, T2.W, T2.X,
+; EG-NEXT:     LSHL T2.Y, T2.W, T1.Z,
+; EG-NEXT:     SUB_INT T1.Z, literal.x, T0.X, BS:VEC_021/SCL_122
+; EG-NEXT:     ADD_INT T2.W, T0.X, literal.y,
+; EG-NEXT:     LSHR * T5.W, T3.W, PS,
+; EG-NEXT:    150(2.101948e-43), -182(nan)
+; EG-NEXT:     ADD_INT T0.X, T0.X, literal.x,
+; EG-NEXT:     LSHR T3.Y, PS, 1,
+; EG-NEXT:     LSHL T4.Z, T3.W, PV.W,
+; EG-NEXT:     SETGT_UINT T2.W, PV.Z, literal.y,
+; EG-NEXT:     LSHR * T3.W, T3.W, PV.Z,
+; EG-NEXT:    -127(nan), 31(4.344025e-44)
+; EG-NEXT:     CNDE_INT T2.X, PV.W, PS, 0.0,
+; EG-NEXT:     CNDE_INT T0.Y, T0.Y, PV.Y, PV.Z,
+; EG-NEXT:     SETGT_INT T1.Z, PV.X, literal.x,
+; EG-NEXT:     CNDE_INT T2.W, T2.Z, T2.Y, 0.0,
+; EG-NEXT:     CNDE_INT * T1.W, T1.W, T1.X, 0.0,
+; EG-NEXT:    23(3.222986e-44), 0(0.000000e+00)
+; EG-NEXT:     CNDE_INT T1.X, T4.W, PS, PV.W,
+; EG-NEXT:     ASHR T2.Y, KC0[3].Z, literal.x,
+; EG-NEXT:     CNDE_INT T2.Z, PV.Z, 0.0, PV.Y,
+; EG-NEXT:     CNDE_INT T1.W, PV.Z, PV.X, T3.Z,
+; EG-NEXT:     ASHR * T2.W, KC0[4].X, literal.x,
+; EG-NEXT:    31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT:     BFE_UINT T2.X, KC0[3].Y, literal.x, T0.W,
+; EG-NEXT:     XOR_INT T0.Y, PV.W, PS,
+; EG-NEXT:     XOR_INT T1.Z, PV.Z, PS,
+; EG-NEXT:     XOR_INT T1.W, PV.X, PV.Y,
+; EG-NEXT:     XOR_INT * T3.W, T1.Y, PV.Y,
+; EG-NEXT:    23(3.222986e-44), 0(0.000000e+00)
+; EG-NEXT:     AND_INT T1.X, KC0[3].Y, literal.x,
+; EG-NEXT:     SUB_INT T1.Y, PS, T2.Y,
+; EG-NEXT:     SUBB_UINT T2.Z, PV.W, T2.Y,
+; EG-NEXT:     SUB_INT T3.W, PV.Z, T2.W,
+; EG-NEXT:     SUBB_UINT * T4.W, PV.Y, T2.W,
+; EG-NEXT:    8388607(1.175494e-38), 0(0.000000e+00)
+; EG-NEXT:     SUB_INT T3.Y, PV.W, PS,
+; EG-NEXT:     SUB_INT T1.Z, PV.Y, PV.Z,
+; EG-NEXT:     OR_INT T3.W, PV.X, literal.x,
+; EG-NEXT:     SUB_INT * T4.W, literal.y, T2.X,
+; EG-NEXT:    8388608(1.175494e-38), 150(2.101948e-43)
+; EG-NEXT:     SETGT_INT T1.X, T0.Z, literal.x,
+; EG-NEXT:     SETGT_UINT T1.Y, PS, literal.y,
+; EG-NEXT:     LSHR T0.Z, PV.W, PS,
+; EG-NEXT:     SUB_INT T4.W, literal.z, T2.X,
+; EG-NEXT:     AND_INT * T5.W, KC0[3].W, literal.w,
+; EG-NEXT:    -1(nan), 31(4.344025e-44)
+; EG-NEXT:    181(2.536350e-43), 8388607(1.175494e-38)
+; EG-NEXT:     OR_INT T3.X, PS, literal.x,
+; EG-NEXT:     ADD_INT T4.Y, T2.X, literal.y,
+; EG-NEXT:     ADD_INT T2.Z, T2.X, literal.z,
+; EG-NEXT:     BFE_UINT T0.W, KC0[3].W, literal.w, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT:     LSHR * T4.W, T3.W, PV.W,
+; EG-NEXT:    8388608(1.175494e-38), -150(nan)
+; EG-NEXT:    -182(nan), 23(3.222986e-44)
+; EG-NEXT:     ADD_INT T4.X, PV.W, literal.x,
+; EG-NEXT:     ADD_INT T5.Y, T2.X, literal.y,
+; EG-NEXT:     LSHR T3.Z, PS, 1,
+; EG-NEXT:     LSHL T4.W, T3.W, PV.Z,
+; EG-NEXT:     SETGT_UINT * T5.W, PV.Y, literal.z,
+; EG-NEXT:    -150(nan), -127(nan)
+; EG-NEXT:    31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT:     LSHL T2.X, T3.W, T4.Y,
+; EG-NEXT:     CNDE_INT * T4.Y, PS, PV.Z, PV.W,
+; EG-NEXT:    ALU clause starting at 106:
+; EG-NEXT:     SETGT_INT T2.Z, T5.Y, literal.x,
+; EG-NEXT:     SETGT_UINT T3.W, T4.X, literal.y,
+; EG-NEXT:     LSHL * T4.W, T3.X, T4.X,
+; EG-NEXT:    23(3.222986e-44), 31(4.344025e-44)
+; EG-NEXT:     CNDE_INT T4.X, PV.W, PS, 0.0,
+; EG-NEXT:     CNDE_INT T4.Y, PV.Z, 0.0, T4.Y, BS:VEC_021/SCL_122
+; EG-NEXT:     SUB_INT T3.Z, literal.x, T0.W,
+; EG-NEXT:     CNDE_INT T4.W, T5.W, T2.X, 0.0,
+; EG-NEXT:     CNDE_INT * T5.W, T1.Y, T0.Z, 0.0,
+; EG-NEXT:    181(2.536350e-43), 0(0.000000e+00)
+; EG-NEXT:     CNDE_INT T2.X, T2.Z, PS, PV.W,
+; EG-NEXT:     ASHR T1.Y, KC0[3].Y, literal.x,
+; EG-NEXT:     SUB_INT T0.Z, literal.y, T0.W,
+; EG-NEXT:     ADD_INT T4.W, T0.W, literal.z,
+; EG-NEXT:     LSHR * T5.W, T3.X, PV.Z,
+; EG-NEXT:    31(4.344025e-44), 150(2.101948e-43)
+; EG-NEXT:    -182(nan), 0(0.000000e+00)
+; EG-NEXT:     ADD_INT T5.X, T0.W, literal.x,
+; EG-NEXT:     LSHR T6.Y, PS, 1,
+; EG-NEXT:     LSHL T2.Z, T3.X, PV.W,
+; EG-NEXT:     SETGT_UINT T0.W, PV.Z, literal.y,
+; EG-NEXT:     LSHR * T4.W, T3.X, PV.Z,
+; EG-NEXT:    -127(nan), 31(4.344025e-44)
+; EG-NEXT:     CNDE_INT T3.X, PV.W, PS, 0.0,
+; EG-NEXT:     CNDE_INT T6.Y, T3.W, PV.Y, PV.Z,
+; EG-NEXT:     SETGT_INT T0.Z, PV.X, literal.x,
+; EG-NEXT:     XOR_INT T0.W, T2.X, T1.Y,
+; EG-NEXT:     XOR_INT * T3.W, T4.Y, T1.Y,
+; EG-NEXT:    23(3.222986e-44), 0(0.000000e+00)
+; EG-NEXT:     SUB_INT T2.X, PS, T1.Y,
+; EG-NEXT:     SUBB_UINT T4.Y, PV.W, T1.Y,
+; EG-NEXT:     CNDE_INT T2.Z, PV.Z, 0.0, PV.Y,
+; EG-NEXT:     CNDE_INT T3.W, PV.Z, PV.X, T4.X,
+; EG-NEXT:     ASHR * T4.W, KC0[3].W, literal.x,
+; EG-NEXT:    31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT:     XOR_INT T3.X, PV.W, PS,
+; EG-NEXT:     XOR_INT T6.Y, PV.Z, PS,
+; EG-NEXT:     SUB_INT T0.Z, PV.X, PV.Y,
+; EG-NEXT:     SETGT_INT T3.W, T5.Y, literal.x,
+; EG-NEXT:     CNDE_INT * T5.W, T1.X, 0.0, T1.Z, BS:VEC_021/SCL_122
+; EG-NEXT:    -1(nan), 0(0.000000e+00)
+; EG-NEXT:     SETGT_INT T0.X, T0.X, literal.x,
+; EG-NEXT:     CNDE_INT T5.Y, PV.W, 0.0, PV.Z,
+; EG-NEXT:     SUB_INT T0.Z, T1.W, T2.Y,
+; EG-NEXT:     SUB_INT T1.W, PV.Y, T4.W,
+; EG-NEXT:     SUBB_UINT * T6.W, PV.X, T4.W,
+; EG-NEXT:    -1(nan), 0(0.000000e+00)
+; EG-NEXT:     SUB_INT T2.X, PV.W, PS,
+; EG-NEXT:     SETGT_INT T2.Y, T5.X, literal.x,
+; EG-NEXT:     CNDE_INT T5.Z, T1.X, 0.0, PV.Z, BS:VEC_120/SCL_212
+; EG-NEXT:     SUB_INT T0.W, T0.W, T1.Y,
+; EG-NEXT:     CNDE_INT * T1.W, PV.X, 0.0, T3.Y, BS:VEC_021/SCL_122
+; EG-NEXT:    -1(nan), 0(0.000000e+00)
+; EG-NEXT:     CNDE_INT T5.X, T3.W, 0.0, PV.W,
+; EG-NEXT:     CNDE_INT T1.Y, PV.Y, 0.0, PV.X,
+; EG-NEXT:     SUB_INT T0.W, T0.Y, T2.W,
+; EG-NEXT:     LSHR * T2.X, KC0[2].Y, literal.x,
+; EG-NEXT:    2(2.802597e-45), 0(0.000000e+00)
+; EG-NEXT:     CNDE_INT T1.Z, T0.X, 0.0, PV.W,
+; EG-NEXT:     SUB_INT * T0.W, T3.X, T4.W, BS:VEC_120/SCL_212
+; EG-NEXT:     CNDE_INT T1.X, T2.Y, 0.0, PV.W,
+; EG-NEXT:     ADD_INT * T0.W, KC0[2].Y, literal.x,
+; EG-NEXT:    16(2.242078e-44), 0(0.000000e+00)
+; EG-NEXT:     LSHR * T0.X, PV.W, literal.x,
+; EG-NEXT:    2(2.802597e-45), 0(0.000000e+00)
   %conv = fptosi <4 x float> %x to <4 x i64>
   store <4 x i64> %conv, <4 x i64> addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}fp_to_uint_f32_to_i1:
-; SI: v_cmp_eq_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, -1.0, s{{[0-9]+}}
-
-; EG: AND_INT
-; EG: SETE_DX10 {{[*]?}} T{{[0-9]+}}.{{[XYZW]}}, KC0[2].Z, literal.y,
-; EG-NEXT: -1082130432(-1.000000e+00)
 define amdgpu_kernel void @fp_to_uint_f32_to_i1(i1 addrspace(1)* %out, float %in) #0 {
+; SI-LABEL: fp_to_uint_f32_to_i1:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s4, s[0:1], 0xb
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_cmp_eq_f32_e64 s[4:5], -1.0, s4
+; SI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; SI-NEXT:    buffer_store_byte v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: fp_to_uint_f32_to_i1:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
+; VI-NEXT:    s_load_dword s0, s[0:1], 0x2c
+; VI-NEXT:    s_mov_b32 s7, 0xf000
+; VI-NEXT:    s_mov_b32 s6, -1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_cmp_eq_f32_e64 s[0:1], -1.0, s0
+; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; VI-NEXT:    buffer_store_byte v0, off, s[4:7], 0
+; VI-NEXT:    s_endpgm
+;
+; EG-LABEL: fp_to_uint_f32_to_i1:
+; EG:       ; %bb.0:
+; EG-NEXT:    ALU 12, @4, KC0[CB0:0-32], KC1[]
+; EG-NEXT:    MEM_RAT MSKOR T0.XW, T1.X
+; EG-NEXT:    CF_END
+; EG-NEXT:    PAD
+; EG-NEXT:    ALU clause starting at 4:
+; EG-NEXT:     AND_INT T0.W, KC0[2].Y, literal.x,
+; EG-NEXT:     SETE_DX10 * T1.W, KC0[2].Z, literal.y,
+; EG-NEXT:    3(4.203895e-45), -1082130432(-1.000000e+00)
+; EG-NEXT:     AND_INT T1.W, PS, 1,
+; EG-NEXT:     LSHL * T0.W, PV.W, literal.x,
+; EG-NEXT:    3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT:     LSHL T0.X, PV.W, PS,
+; EG-NEXT:     LSHL * T0.W, literal.x, PS,
+; EG-NEXT:    255(3.573311e-43), 0(0.000000e+00)
+; EG-NEXT:     MOV T0.Y, 0.0,
+; EG-NEXT:     MOV * T0.Z, 0.0,
+; EG-NEXT:     LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT:    2(2.802597e-45), 0(0.000000e+00)
   %conv = fptosi float %in to i1
   store i1 %conv, i1 addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}fp_to_uint_fabs_f32_to_i1:
-; SI: v_cmp_eq_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, -1.0, |s{{[0-9]+}}|
 define amdgpu_kernel void @fp_to_uint_fabs_f32_to_i1(i1 addrspace(1)* %out, float %in) #0 {
+; SI-LABEL: fp_to_uint_fabs_f32_to_i1:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s4, s[0:1], 0xb
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_cmp_eq_f32_e64 s[4:5], -1.0, |s4|
+; SI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; SI-NEXT:    buffer_store_byte v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: fp_to_uint_fabs_f32_to_i1:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
+; VI-NEXT:    s_load_dword s0, s[0:1], 0x2c
+; VI-NEXT:    s_mov_b32 s7, 0xf000
+; VI-NEXT:    s_mov_b32 s6, -1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_cmp_eq_f32_e64 s[0:1], -1.0, |s0|
+; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; VI-NEXT:    buffer_store_byte v0, off, s[4:7], 0
+; VI-NEXT:    s_endpgm
+;
+; EG-LABEL: fp_to_uint_fabs_f32_to_i1:
+; EG:       ; %bb.0:
+; EG-NEXT:    ALU 12, @4, KC0[CB0:0-32], KC1[]
+; EG-NEXT:    MEM_RAT MSKOR T0.XW, T1.X
+; EG-NEXT:    CF_END
+; EG-NEXT:    PAD
+; EG-NEXT:    ALU clause starting at 4:
+; EG-NEXT:     AND_INT T0.W, KC0[2].Y, literal.x,
+; EG-NEXT:     SETE_DX10 * T1.W, |KC0[2].Z|, literal.y,
+; EG-NEXT:    3(4.203895e-45), -1082130432(-1.000000e+00)
+; EG-NEXT:     AND_INT T1.W, PS, 1,
+; EG-NEXT:     LSHL * T0.W, PV.W, literal.x,
+; EG-NEXT:    3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT:     LSHL T0.X, PV.W, PS,
+; EG-NEXT:     LSHL * T0.W, literal.x, PS,
+; EG-NEXT:    255(3.573311e-43), 0(0.000000e+00)
+; EG-NEXT:     MOV T0.Y, 0.0,
+; EG-NEXT:     MOV * T0.Z, 0.0,
+; EG-NEXT:     LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT:    2(2.802597e-45), 0(0.000000e+00)
   %in.fabs = call float @llvm.fabs.f32(float %in)
   %conv = fptosi float %in.fabs to i1
   store i1 %conv, i1 addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}fp_to_sint_f32_i16:
-; GCN: v_cvt_i32_f32_e32 [[VAL:v[0-9]+]], s{{[0-9]+}}
-; GCN: buffer_store_short [[VAL]]
 define amdgpu_kernel void @fp_to_sint_f32_i16(i16 addrspace(1)* %out, float %in) #0 {
+; SI-LABEL: fp_to_sint_f32_i16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s4, s[0:1], 0xb
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_cvt_i32_f32_e32 v0, s4
+; SI-NEXT:    buffer_store_short v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: fp_to_sint_f32_i16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[0:1], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_mov_b32 s3, 0xf000
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_cvt_i32_f32_e32 v0, s2
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    buffer_store_short v0, off, s[0:3], 0
+; VI-NEXT:    s_endpgm
+;
+; EG-LABEL: fp_to_sint_f32_i16:
+; EG:       ; %bb.0:
+; EG-NEXT:    ALU 13, @4, KC0[CB0:0-32], KC1[]
+; EG-NEXT:    MEM_RAT MSKOR T0.XW, T1.X
+; EG-NEXT:    CF_END
+; EG-NEXT:    PAD
+; EG-NEXT:    ALU clause starting at 4:
+; EG-NEXT:     TRUNC T0.W, KC0[2].Z,
+; EG-NEXT:     AND_INT * T1.W, KC0[2].Y, literal.x,
+; EG-NEXT:    3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT:     FLT_TO_INT * T0.W, PV.W,
+; EG-NEXT:     AND_INT T0.W, PV.W, literal.x,
+; EG-NEXT:     LSHL * T1.W, T1.W, literal.y,
+; EG-NEXT:    65535(9.183409e-41), 3(4.203895e-45)
+; EG-NEXT:     LSHL T0.X, PV.W, PS,
+; EG-NEXT:     LSHL * T0.W, literal.x, PS,
+; EG-NEXT:    65535(9.183409e-41), 0(0.000000e+00)
+; EG-NEXT:     MOV T0.Y, 0.0,
+; EG-NEXT:     MOV * T0.Z, 0.0,
+; EG-NEXT:     LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT:    2(2.802597e-45), 0(0.000000e+00)
   %sint = fptosi float %in to i16
   store i16 %sint, i16 addrspace(1)* %out
   ret void

diff  --git a/llvm/test/CodeGen/AMDGPU/fp_to_uint.ll b/llvm/test/CodeGen/AMDGPU/fp_to_uint.ll
index dce798f41841..558af38cc691 100644
--- a/llvm/test/CodeGen/AMDGPU/fp_to_uint.ll
+++ b/llvm/test/CodeGen/AMDGPU/fp_to_uint.ll
@@ -1,248 +1,1441 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap %s -check-prefixes=GCN,FUNC
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap %s -check-prefixes=GCN,FUNC
-; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -allow-deprecated-dag-overlap %s -check-prefix=EG -check-prefix=FUNC
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck %s -check-prefixes=SI
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck %s -check-prefixes=VI
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck  %s -check-prefixes=EG
 
 declare float @llvm.fabs.f32(float) #1
 
-; FUNC-LABEL: {{^}}fp_to_uint_f32_to_i32:
-; EG: FLT_TO_UINT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
-
-; GCN: v_cvt_u32_f32_e32
-; GCN: s_endpgm
 define amdgpu_kernel void @fp_to_uint_f32_to_i32 (i32 addrspace(1)* %out, float %in) {
+; SI-LABEL: fp_to_uint_f32_to_i32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s4, s[0:1], 0xb
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_cvt_u32_f32_e32 v0, s4
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: fp_to_uint_f32_to_i32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[0:1], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_mov_b32 s3, 0xf000
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_cvt_u32_f32_e32 v0, s2
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT:    s_endpgm
+;
+; EG-LABEL: fp_to_uint_f32_to_i32:
+; EG:       ; %bb.0:
+; EG-NEXT:    ALU 3, @4, KC0[CB0:0-32], KC1[]
+; EG-NEXT:    MEM_RAT_CACHELESS STORE_RAW T1.X, T0.X, 1
+; EG-NEXT:    CF_END
+; EG-NEXT:    PAD
+; EG-NEXT:    ALU clause starting at 4:
+; EG-NEXT:     TRUNC * T0.W, KC0[2].Z,
+; EG-NEXT:     LSHR T0.X, KC0[2].Y, literal.x,
+; EG-NEXT:     FLT_TO_UINT * T1.X, PV.W,
+; EG-NEXT:    2(2.802597e-45), 0(0.000000e+00)
   %conv = fptoui float %in to i32
   store i32 %conv, i32 addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}fp_to_uint_v2f32_to_v2i32:
-; EG: FLT_TO_UINT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
-; EG: FLT_TO_UINT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-
-; GCN: v_cvt_u32_f32_e32
-; GCN: v_cvt_u32_f32_e32
 define amdgpu_kernel void @fp_to_uint_v2f32_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x float> %in) {
+; SI-LABEL: fp_to_uint_v2f32_to_v2i32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xb
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_cvt_u32_f32_e32 v1, s5
+; SI-NEXT:    v_cvt_u32_f32_e32 v0, s4
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: fp_to_uint_v2f32_to_v2i32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_cvt_u32_f32_e32 v1, s3
+; VI-NEXT:    v_cvt_u32_f32_e32 v0, s2
+; VI-NEXT:    s_mov_b32 s3, 0xf000
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; VI-NEXT:    s_endpgm
+;
+; EG-LABEL: fp_to_uint_v2f32_to_v2i32:
+; EG:       ; %bb.0:
+; EG-NEXT:    ALU 5, @4, KC0[CB0:0-32], KC1[]
+; EG-NEXT:    MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1
+; EG-NEXT:    CF_END
+; EG-NEXT:    PAD
+; EG-NEXT:    ALU clause starting at 4:
+; EG-NEXT:     TRUNC T0.W, KC0[3].X,
+; EG-NEXT:     TRUNC * T1.W, KC0[2].W,
+; EG-NEXT:     FLT_TO_UINT * T0.Y, PV.W,
+; EG-NEXT:     LSHR T1.X, KC0[2].Y, literal.x,
+; EG-NEXT:     FLT_TO_UINT * T0.X, T1.W,
+; EG-NEXT:    2(2.802597e-45), 0(0.000000e+00)
   %result = fptoui <2 x float> %in to <2 x i32>
   store <2 x i32> %result, <2 x i32> addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}fp_to_uint_v4f32_to_v4i32:
-; EG: FLT_TO_UINT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
-; EG: FLT_TO_UINT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; EG: FLT_TO_UINT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
-; EG: FLT_TO_UINT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW]}}
-; GCN: v_cvt_u32_f32_e32
-; GCN: v_cvt_u32_f32_e32
-; GCN: v_cvt_u32_f32_e32
-; GCN: v_cvt_u32_f32_e32
-
 define amdgpu_kernel void @fp_to_uint_v4f32_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
+; SI-LABEL: fp_to_uint_v4f32_to_v4i32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_load_dwordx4 s[4:7], s[2:3], 0x0
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_cvt_u32_f32_e32 v3, s7
+; SI-NEXT:    v_cvt_u32_f32_e32 v2, s6
+; SI-NEXT:    v_cvt_u32_f32_e32 v1, s5
+; SI-NEXT:    v_cvt_u32_f32_e32 v0, s4
+; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: fp_to_uint_v4f32_to_v4i32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_load_dwordx4 s[4:7], s[2:3], 0x0
+; VI-NEXT:    s_mov_b32 s3, 0xf000
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_cvt_u32_f32_e32 v3, s7
+; VI-NEXT:    v_cvt_u32_f32_e32 v2, s6
+; VI-NEXT:    v_cvt_u32_f32_e32 v1, s5
+; VI-NEXT:    v_cvt_u32_f32_e32 v0, s4
+; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; VI-NEXT:    s_endpgm
+;
+; EG-LABEL: fp_to_uint_v4f32_to_v4i32:
+; EG:       ; %bb.0:
+; EG-NEXT:    ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT:    TEX 0 @6
+; EG-NEXT:    ALU 9, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT:    MEM_RAT_CACHELESS STORE_RAW T0.XYZW, T1.X, 1
+; EG-NEXT:    CF_END
+; EG-NEXT:    PAD
+; EG-NEXT:    Fetch clause starting at 6:
+; EG-NEXT:     VTX_READ_128 T0.XYZW, T0.X, 0, #1
+; EG-NEXT:    ALU clause starting at 8:
+; EG-NEXT:     MOV * T0.X, KC0[2].Z,
+; EG-NEXT:    ALU clause starting at 9:
+; EG-NEXT:     TRUNC T0.W, T0.W,
+; EG-NEXT:     TRUNC * T1.W, T0.Z,
+; EG-NEXT:     FLT_TO_UINT * T0.W, PV.W,
+; EG-NEXT:     TRUNC T2.W, T0.Y,
+; EG-NEXT:     FLT_TO_UINT * T0.Z, T1.W,
+; EG-NEXT:     TRUNC T1.W, T0.X,
+; EG-NEXT:     FLT_TO_UINT * T0.Y, PV.W,
+; EG-NEXT:     LSHR T1.X, KC0[2].Y, literal.x,
+; EG-NEXT:     FLT_TO_UINT * T0.X, PV.W,
+; EG-NEXT:    2(2.802597e-45), 0(0.000000e+00)
   %value = load <4 x float>, <4 x float> addrspace(1) * %in
   %result = fptoui <4 x float> %value to <4 x i32>
   store <4 x i32> %result, <4 x i32> addrspace(1)* %out
   ret void
 }
 
-; FUNC: {{^}}fp_to_uint_f32_to_i64:
-; EG-DAG: AND_INT
-; EG-DAG: LSHR
-; EG-DAG: SUB_INT
-; EG-DAG: AND_INT
-; EG-DAG: ASHR
-; EG-DAG: AND_INT
-; EG-DAG: OR_INT
-; EG-DAG: SUB_INT
-; EG-DAG: LSHL
-; EG-DAG: LSHL
-; EG-DAG: SUB_INT
-; EG-DAG: LSHR
-; EG-DAG: LSHR
-; EG-DAG: SETGT_UINT
-; EG-DAG: SETGT_INT
-; EG-DAG: XOR_INT
-; EG-DAG: XOR_INT
-; EG: SUB_INT
-; EG-DAG: SUB_INT
-; EG-DAG: CNDE_INT
-; EG-DAG: CNDE_INT
-
-; GCN: s_endpgm
 define amdgpu_kernel void @fp_to_uint_f32_to_i64(i64 addrspace(1)* %out, float %x) {
+; SI-LABEL: fp_to_uint_f32_to_i64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
+; SI-NEXT:    s_load_dword s8, s[0:1], 0xb
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_movk_i32 s9, 0xff6a
+; SI-NEXT:    s_mov_b32 s2, 0x7fffff
+; SI-NEXT:    s_mov_b32 s10, 0x800000
+; SI-NEXT:    s_mov_b32 s1, 0
+; SI-NEXT:    s_movk_i32 s11, 0x96
+; SI-NEXT:    s_movk_i32 s12, 0xff81
+; SI-NEXT:    v_mov_b32_e32 v4, 0x5f000000
+; SI-NEXT:    v_mov_b32_e32 v1, 0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_bfe_u32 s3, s8, 0x80017
+; SI-NEXT:    s_and_b32 s0, s8, s2
+; SI-NEXT:    s_ashr_i32 s13, s8, 31
+; SI-NEXT:    v_sub_f32_e32 v0, s8, v4
+; SI-NEXT:    s_add_i32 s14, s3, s9
+; SI-NEXT:    s_or_b32 s0, s0, s10
+; SI-NEXT:    s_sub_i32 s15, s11, s3
+; SI-NEXT:    s_add_i32 s16, s3, s12
+; SI-NEXT:    s_ashr_i32 s17, s13, 31
+; SI-NEXT:    v_bfe_u32 v2, v0, 23, 8
+; SI-NEXT:    v_and_b32_e32 v3, s2, v0
+; SI-NEXT:    v_ashrrev_i32_e32 v5, 31, v0
+; SI-NEXT:    s_lshl_b64 s[2:3], s[0:1], s14
+; SI-NEXT:    s_lshr_b64 s[0:1], s[0:1], s15
+; SI-NEXT:    v_mov_b32_e32 v6, s17
+; SI-NEXT:    v_add_i32_e32 v7, vcc, s9, v2
+; SI-NEXT:    v_or_b32_e32 v0, s10, v3
+; SI-NEXT:    v_sub_i32_e32 v8, vcc, s11, v2
+; SI-NEXT:    v_add_i32_e32 v9, vcc, s12, v2
+; SI-NEXT:    v_ashrrev_i32_e32 v10, 31, v5
+; SI-NEXT:    v_mov_b32_e32 v2, s1
+; SI-NEXT:    v_mov_b32_e32 v3, s3
+; SI-NEXT:    v_cmp_gt_i32_e64 vcc, s16, 23
+; SI-NEXT:    v_cndmask_b32_e32 v11, v2, v3, vcc
+; SI-NEXT:    v_mov_b32_e32 v12, s0
+; SI-NEXT:    v_mov_b32_e32 v13, s2
+; SI-NEXT:    v_lshl_b64 v[2:3], v[0:1], v7
+; SI-NEXT:    v_lshr_b64 v[0:1], v[0:1], v8
+; SI-NEXT:    v_cndmask_b32_e32 v7, v12, v13, vcc
+; SI-NEXT:    v_xor_b32_e32 v8, s17, v11
+; SI-NEXT:    v_cmp_lt_i32_e32 vcc, 23, v9
+; SI-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
+; SI-NEXT:    v_xor_b32_e32 v2, s13, v7
+; SI-NEXT:    v_xor_b32_e32 v0, v0, v5
+; SI-NEXT:    v_xor_b32_e32 v1, v1, v10
+; SI-NEXT:    v_subrev_i32_e32 v2, vcc, s13, v2
+; SI-NEXT:    v_subb_u32_e32 v3, vcc, v8, v6, vcc
+; SI-NEXT:    v_sub_i32_e32 v0, vcc, v0, v5
+; SI-NEXT:    v_subb_u32_e32 v1, vcc, v1, v10, vcc
+; SI-NEXT:    v_cmp_lt_i32_e64 s[0:1], s16, 0
+; SI-NEXT:    v_cndmask_b32_e64 v2, v2, 0, s[0:1]
+; SI-NEXT:    v_cmp_gt_i32_e32 vcc, 0, v9
+; SI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; SI-NEXT:    v_cndmask_b32_e64 v3, v3, 0, s[0:1]
+; SI-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
+; SI-NEXT:    v_cmp_lt_f32_e32 vcc, s8, v4
+; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
+; SI-NEXT:    v_xor_b32_e32 v1, 0x80000000, v1
+; SI-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: fp_to_uint_f32_to_i64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
+; VI-NEXT:    s_load_dword s9, s[0:1], 0x2c
+; VI-NEXT:    s_mov_b32 s11, 0x7fffff
+; VI-NEXT:    s_movk_i32 s8, 0xff6a
+; VI-NEXT:    s_mov_b32 s12, 0x800000
+; VI-NEXT:    s_movk_i32 s13, 0x96
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_bfe_u32 s10, s9, 0x80017
+; VI-NEXT:    s_and_b32 s0, s9, s11
+; VI-NEXT:    s_add_i32 s2, s10, s8
+; VI-NEXT:    s_or_b32 s0, s0, s12
+; VI-NEXT:    s_mov_b32 s1, 0
+; VI-NEXT:    s_sub_i32 s14, s13, s10
+; VI-NEXT:    s_lshl_b64 s[2:3], s[0:1], s2
+; VI-NEXT:    s_lshr_b64 s[0:1], s[0:1], s14
+; VI-NEXT:    s_movk_i32 s14, 0xff81
+; VI-NEXT:    s_add_i32 s10, s10, s14
+; VI-NEXT:    v_mov_b32_e32 v0, s1
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_cmp_gt_i32_e64 vcc, s10, 23
+; VI-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v1, s0
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    s_ashr_i32 s0, s9, 31
+; VI-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
+; VI-NEXT:    s_ashr_i32 s1, s0, 31
+; VI-NEXT:    v_xor_b32_e32 v1, s0, v1
+; VI-NEXT:    v_mov_b32_e32 v6, 0x5f000000
+; VI-NEXT:    v_sub_f32_e32 v7, s9, v6
+; VI-NEXT:    v_xor_b32_e32 v0, s1, v0
+; VI-NEXT:    v_mov_b32_e32 v2, s1
+; VI-NEXT:    v_subrev_u32_e32 v1, vcc, s0, v1
+; VI-NEXT:    v_subb_u32_e32 v4, vcc, v0, v2, vcc
+; VI-NEXT:    v_bfe_u32 v8, v7, 23, 8
+; VI-NEXT:    v_and_b32_e32 v0, s11, v7
+; VI-NEXT:    v_cmp_lt_i32_e64 s[2:3], s10, 0
+; VI-NEXT:    v_cndmask_b32_e64 v5, v1, 0, s[2:3]
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s8, v8
+; VI-NEXT:    v_sub_u32_e32 v9, vcc, s13, v8
+; VI-NEXT:    v_or_b32_e32 v0, s12, v0
+; VI-NEXT:    v_mov_b32_e32 v1, 0
+; VI-NEXT:    v_add_u32_e32 v8, vcc, s14, v8
+; VI-NEXT:    v_lshlrev_b64 v[2:3], v2, v[0:1]
+; VI-NEXT:    v_lshrrev_b64 v[0:1], v9, v[0:1]
+; VI-NEXT:    v_cmp_lt_i32_e32 vcc, 23, v8
+; VI-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
+; VI-NEXT:    v_ashrrev_i32_e32 v2, 31, v7
+; VI-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
+; VI-NEXT:    v_xor_b32_e32 v0, v0, v2
+; VI-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
+; VI-NEXT:    v_sub_u32_e32 v0, vcc, v0, v2
+; VI-NEXT:    v_xor_b32_e32 v1, v1, v3
+; VI-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc
+; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 0, v8
+; VI-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
+; VI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; VI-NEXT:    v_cmp_lt_f32_e64 s[0:1], s9, v6
+; VI-NEXT:    v_cndmask_b32_e64 v2, v4, 0, s[2:3]
+; VI-NEXT:    v_xor_b32_e32 v1, 0x80000000, v1
+; VI-NEXT:    s_mov_b32 s7, 0xf000
+; VI-NEXT:    s_mov_b32 s6, -1
+; VI-NEXT:    v_cndmask_b32_e64 v0, v0, v5, s[0:1]
+; VI-NEXT:    v_cndmask_b32_e64 v1, v1, v2, s[0:1]
+; VI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; VI-NEXT:    s_endpgm
+;
+; EG-LABEL: fp_to_uint_f32_to_i64:
+; EG:       ; %bb.0:
+; EG-NEXT:    ALU 42, @4, KC0[CB0:0-32], KC1[]
+; EG-NEXT:    MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1
+; EG-NEXT:    CF_END
+; EG-NEXT:    PAD
+; EG-NEXT:    ALU clause starting at 4:
+; EG-NEXT:     MOV * T0.W, literal.x,
+; EG-NEXT:    8(1.121039e-44), 0(0.000000e+00)
+; EG-NEXT:     BFE_UINT T0.W, KC0[2].Z, literal.x, PV.W,
+; EG-NEXT:     AND_INT * T1.W, KC0[2].Z, literal.y,
+; EG-NEXT:    23(3.222986e-44), 8388607(1.175494e-38)
+; EG-NEXT:     SUB_INT T2.W, literal.x, PV.W,
+; EG-NEXT:     OR_INT * T1.W, PS, literal.y,
+; EG-NEXT:    181(2.536350e-43), 8388608(1.175494e-38)
+; EG-NEXT:     LSHR * T2.W, PS, PV.W,
+; EG-NEXT:     ADD_INT T0.X, T0.W, literal.x,
+; EG-NEXT:     LSHR T0.Y, PV.W, 1,
+; EG-NEXT:     ADD_INT T0.Z, T0.W, literal.y,
+; EG-NEXT:     SUB_INT T2.W, literal.z, T0.W,
+; EG-NEXT:     ADD_INT * T0.W, T0.W, literal.w,
+; EG-NEXT:    -127(nan), -150(nan)
+; EG-NEXT:    150(2.101948e-43), -182(nan)
+; EG-NEXT:     LSHL T1.X, T1.W, PS,
+; EG-NEXT:     SETGT_UINT T1.Y, PV.W, literal.x,
+; EG-NEXT:     LSHR T1.Z, T1.W, PV.W,
+; EG-NEXT:     SETGT_UINT T0.W, PV.Z, literal.x,
+; EG-NEXT:     LSHL * T1.W, T1.W, PV.Z,
+; EG-NEXT:    31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT:     CNDE_INT T2.Y, PV.W, PS, 0.0,
+; EG-NEXT:     CNDE_INT T0.Z, PV.Y, PV.Z, 0.0,
+; EG-NEXT:     CNDE_INT T0.W, PV.W, T0.Y, PV.X,
+; EG-NEXT:     SETGT_INT * T1.W, T0.X, literal.x,
+; EG-NEXT:    23(3.222986e-44), 0(0.000000e+00)
+; EG-NEXT:     CNDE_INT T1.Z, PS, 0.0, PV.W,
+; EG-NEXT:     CNDE_INT T0.W, PS, PV.Z, PV.Y,
+; EG-NEXT:     ASHR * T1.W, KC0[2].Z, literal.x,
+; EG-NEXT:    31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT:     XOR_INT T0.W, PV.W, PS,
+; EG-NEXT:     XOR_INT * T2.W, PV.Z, PS,
+; EG-NEXT:     SUB_INT T2.W, PS, T1.W,
+; EG-NEXT:     SUBB_UINT * T3.W, PV.W, T1.W,
+; EG-NEXT:     SUB_INT T2.W, PV.W, PS,
+; EG-NEXT:     SETGT_INT * T3.W, T0.X, literal.x,
+; EG-NEXT:    -1(nan), 0(0.000000e+00)
+; EG-NEXT:     CNDE_INT T0.Y, PS, 0.0, PV.W,
+; EG-NEXT:     SUB_INT * T0.W, T0.W, T1.W,
+; EG-NEXT:     CNDE_INT T0.X, T3.W, 0.0, PV.W,
+; EG-NEXT:     LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT:    2(2.802597e-45), 0(0.000000e+00)
   %conv = fptoui float %x to i64
   store i64 %conv, i64 addrspace(1)* %out
   ret void
 }
 
-; FUNC: {{^}}fp_to_uint_v2f32_to_v2i64:
-; EG-DAG: AND_INT
-; EG-DAG: LSHR
-; EG-DAG: SUB_INT
-; EG-DAG: AND_INT
-; EG-DAG: ASHR
-; EG-DAG: AND_INT
-; EG-DAG: OR_INT
-; EG-DAG: SUB_INT
-; EG-DAG: LSHL
-; EG-DAG: LSHL
-; EG-DAG: SUB_INT
-; EG-DAG: LSHR
-; EG-DAG: LSHR
-; EG-DAG: SETGT_UINT
-; EG-DAG: SETGT_INT
-; EG-DAG: XOR_INT
-; EG-DAG: XOR_INT
-; EG-DAG: SUB_INT
-; EG-DAG: SUB_INT
-; EG-DAG: CNDE_INT
-; EG-DAG: CNDE_INT
-; EG-DAG: AND_INT
-; EG-DAG: LSHR
-; EG-DAG: SUB_INT
-; EG-DAG: AND_INT
-; EG-DAG: ASHR
-; EG-DAG: AND_INT
-; EG-DAG: OR_INT
-; EG-DAG: SUB_INT
-; EG-DAG: LSHL
-; EG-DAG: LSHL
-; EG-DAG: SUB_INT
-; EG-DAG: LSHR
-; EG-DAG: LSHR
-; EG-DAG: SETGT_UINT
-; EG-DAG: SETGT_INT
-; EG-DAG: XOR_INT
-; EG-DAG: XOR_INT
-; EG-DAG: SUB_INT
-; EG-DAG: SUB_INT
-; EG-DAG: CNDE_INT
-; EG-DAG: CNDE_INT
-
-; GCN: s_endpgm
 define amdgpu_kernel void @fp_to_uint_v2f32_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x float> %x) {
+; SI-LABEL: fp_to_uint_v2f32_to_v2i64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
+; SI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xb
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_movk_i32 s12, 0xff6a
+; SI-NEXT:    s_mov_b32 s8, 0x7fffff
+; SI-NEXT:    s_mov_b32 s13, 0x800000
+; SI-NEXT:    s_mov_b32 s1, 0
+; SI-NEXT:    s_movk_i32 s14, 0x96
+; SI-NEXT:    s_movk_i32 s15, 0xff81
+; SI-NEXT:    v_mov_b32_e32 v6, 0x5f000000
+; SI-NEXT:    v_mov_b32_e32 v1, 0
+; SI-NEXT:    s_brev_b32 s16, 1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_bfe_u32 s9, s3, 0x80017
+; SI-NEXT:    s_and_b32 s0, s3, s8
+; SI-NEXT:    s_ashr_i32 s17, s3, 31
+; SI-NEXT:    v_sub_f32_e32 v0, s3, v6
+; SI-NEXT:    s_bfe_u32 s10, s2, 0x80017
+; SI-NEXT:    s_and_b32 s18, s2, s8
+; SI-NEXT:    s_ashr_i32 s19, s2, 31
+; SI-NEXT:    v_sub_f32_e32 v2, s2, v6
+; SI-NEXT:    s_add_i32 s11, s9, s12
+; SI-NEXT:    s_or_b32 s0, s0, s13
+; SI-NEXT:    s_sub_i32 s20, s14, s9
+; SI-NEXT:    s_add_i32 s21, s9, s15
+; SI-NEXT:    s_ashr_i32 s22, s17, 31
+; SI-NEXT:    v_bfe_u32 v3, v0, 23, 8
+; SI-NEXT:    v_and_b32_e32 v4, s8, v0
+; SI-NEXT:    v_ashrrev_i32_e32 v7, 31, v0
+; SI-NEXT:    s_add_i32 s23, s10, s12
+; SI-NEXT:    s_sub_i32 s24, s14, s10
+; SI-NEXT:    s_add_i32 s25, s10, s15
+; SI-NEXT:    s_ashr_i32 s26, s19, 31
+; SI-NEXT:    v_bfe_u32 v5, v2, 23, 8
+; SI-NEXT:    v_and_b32_e32 v8, s8, v2
+; SI-NEXT:    v_ashrrev_i32_e32 v9, 31, v2
+; SI-NEXT:    s_lshl_b64 s[8:9], s[0:1], s11
+; SI-NEXT:    s_lshr_b64 s[10:11], s[0:1], s20
+; SI-NEXT:    v_mov_b32_e32 v10, s22
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s12, v3
+; SI-NEXT:    v_or_b32_e32 v0, s13, v4
+; SI-NEXT:    v_sub_i32_e32 v4, vcc, s14, v3
+; SI-NEXT:    v_add_i32_e32 v11, vcc, s15, v3
+; SI-NEXT:    v_ashrrev_i32_e32 v12, 31, v7
+; SI-NEXT:    s_or_b32 s0, s18, s13
+; SI-NEXT:    v_mov_b32_e32 v13, s26
+; SI-NEXT:    v_add_i32_e32 v14, vcc, s12, v5
+; SI-NEXT:    v_sub_i32_e32 v15, vcc, s14, v5
+; SI-NEXT:    v_add_i32_e32 v16, vcc, s15, v5
+; SI-NEXT:    v_ashrrev_i32_e32 v17, 31, v9
+; SI-NEXT:    v_mov_b32_e32 v3, s11
+; SI-NEXT:    v_mov_b32_e32 v5, s9
+; SI-NEXT:    v_cmp_gt_i32_e64 vcc, s21, 23
+; SI-NEXT:    v_cndmask_b32_e32 v18, v3, v5, vcc
+; SI-NEXT:    v_mov_b32_e32 v19, s10
+; SI-NEXT:    v_lshl_b64 v[2:3], v[0:1], v2
+; SI-NEXT:    v_lshr_b64 v[4:5], v[0:1], v4
+; SI-NEXT:    v_or_b32_e32 v0, s13, v8
+; SI-NEXT:    v_mov_b32_e32 v8, s8
+; SI-NEXT:    s_lshl_b64 s[8:9], s[0:1], s23
+; SI-NEXT:    s_lshr_b64 s[0:1], s[0:1], s24
+; SI-NEXT:    v_cndmask_b32_e32 v8, v19, v8, vcc
+; SI-NEXT:    v_xor_b32_e32 v18, s22, v18
+; SI-NEXT:    v_cmp_lt_i32_e32 vcc, 23, v11
+; SI-NEXT:    v_cndmask_b32_e32 v5, v5, v3, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v4, v4, v2, vcc
+; SI-NEXT:    v_mov_b32_e32 v2, s1
+; SI-NEXT:    v_mov_b32_e32 v3, s9
+; SI-NEXT:    v_cmp_gt_i32_e64 vcc, s25, 23
+; SI-NEXT:    v_cndmask_b32_e32 v19, v2, v3, vcc
+; SI-NEXT:    v_mov_b32_e32 v20, s0
+; SI-NEXT:    v_lshl_b64 v[2:3], v[0:1], v14
+; SI-NEXT:    v_lshr_b64 v[0:1], v[0:1], v15
+; SI-NEXT:    v_mov_b32_e32 v14, s8
+; SI-NEXT:    v_xor_b32_e32 v8, s17, v8
+; SI-NEXT:    v_xor_b32_e32 v4, v4, v7
+; SI-NEXT:    v_xor_b32_e32 v5, v5, v12
+; SI-NEXT:    v_cndmask_b32_e32 v14, v20, v14, vcc
+; SI-NEXT:    v_xor_b32_e32 v15, s26, v19
+; SI-NEXT:    v_cmp_lt_i32_e32 vcc, 23, v16
+; SI-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
+; SI-NEXT:    v_subrev_i32_e32 v2, vcc, s17, v8
+; SI-NEXT:    v_subb_u32_e32 v3, vcc, v18, v10, vcc
+; SI-NEXT:    v_sub_i32_e32 v4, vcc, v4, v7
+; SI-NEXT:    v_subb_u32_e32 v5, vcc, v5, v12, vcc
+; SI-NEXT:    v_xor_b32_e32 v7, s19, v14
+; SI-NEXT:    v_xor_b32_e32 v0, v0, v9
+; SI-NEXT:    v_xor_b32_e32 v1, v1, v17
+; SI-NEXT:    v_cmp_lt_i32_e64 s[8:9], s21, 0
+; SI-NEXT:    v_cndmask_b32_e64 v2, v2, 0, s[8:9]
+; SI-NEXT:    v_cmp_gt_i32_e32 vcc, 0, v11
+; SI-NEXT:    v_cndmask_b32_e64 v4, v4, 0, vcc
+; SI-NEXT:    v_subrev_i32_e64 v7, s[0:1], s19, v7
+; SI-NEXT:    v_subb_u32_e64 v8, s[0:1], v15, v13, s[0:1]
+; SI-NEXT:    v_sub_i32_e64 v0, s[0:1], v0, v9
+; SI-NEXT:    v_subb_u32_e64 v1, s[0:1], v1, v17, s[0:1]
+; SI-NEXT:    v_cndmask_b32_e64 v3, v3, 0, s[8:9]
+; SI-NEXT:    v_cndmask_b32_e64 v5, v5, 0, vcc
+; SI-NEXT:    v_cmp_lt_f32_e32 vcc, s3, v6
+; SI-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
+; SI-NEXT:    v_cmp_lt_i32_e64 s[8:9], s25, 0
+; SI-NEXT:    v_cndmask_b32_e64 v4, v7, 0, s[8:9]
+; SI-NEXT:    v_cmp_gt_i32_e64 s[0:1], 0, v16
+; SI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[0:1]
+; SI-NEXT:    v_xor_b32_e32 v5, s16, v5
+; SI-NEXT:    v_cndmask_b32_e64 v7, v8, 0, s[8:9]
+; SI-NEXT:    v_cndmask_b32_e64 v1, v1, 0, s[0:1]
+; SI-NEXT:    v_cmp_lt_f32_e64 s[0:1], s2, v6
+; SI-NEXT:    v_cndmask_b32_e64 v0, v0, v4, s[0:1]
+; SI-NEXT:    v_cndmask_b32_e32 v3, v5, v3, vcc
+; SI-NEXT:    v_xor_b32_e32 v1, s16, v1
+; SI-NEXT:    v_cndmask_b32_e64 v1, v1, v7, s[0:1]
+; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: fp_to_uint_v2f32_to_v2i64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x2c
+; VI-NEXT:    s_mov_b32 s17, 0x7fffff
+; VI-NEXT:    s_movk_i32 s16, 0xff6a
+; VI-NEXT:    s_mov_b32 s18, 0x800000
+; VI-NEXT:    s_movk_i32 s19, 0x96
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_bfe_u32 s12, s5, 0x80017
+; VI-NEXT:    s_and_b32 s1, s5, s17
+; VI-NEXT:    s_add_i32 s0, s12, s16
+; VI-NEXT:    s_or_b32 s6, s1, s18
+; VI-NEXT:    s_mov_b32 s7, 0
+; VI-NEXT:    s_sub_i32 s2, s19, s12
+; VI-NEXT:    s_movk_i32 s20, 0xff81
+; VI-NEXT:    s_lshl_b64 s[0:1], s[6:7], s0
+; VI-NEXT:    s_lshr_b64 s[2:3], s[6:7], s2
+; VI-NEXT:    s_add_i32 s12, s12, s20
+; VI-NEXT:    v_cmp_gt_i32_e64 vcc, s12, 23
+; VI-NEXT:    v_mov_b32_e32 v0, s3
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s2
+; VI-NEXT:    s_ashr_i32 s0, s5, 31
+; VI-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
+; VI-NEXT:    s_ashr_i32 s1, s0, 31
+; VI-NEXT:    v_xor_b32_e32 v1, s0, v1
+; VI-NEXT:    v_mov_b32_e32 v8, 0x5f000000
+; VI-NEXT:    v_sub_f32_e32 v9, s5, v8
+; VI-NEXT:    v_xor_b32_e32 v0, s1, v0
+; VI-NEXT:    v_mov_b32_e32 v2, s1
+; VI-NEXT:    v_subrev_u32_e32 v1, vcc, s0, v1
+; VI-NEXT:    v_subb_u32_e32 v6, vcc, v0, v2, vcc
+; VI-NEXT:    v_bfe_u32 v10, v9, 23, 8
+; VI-NEXT:    v_and_b32_e32 v0, s17, v9
+; VI-NEXT:    v_cmp_lt_i32_e64 s[12:13], s12, 0
+; VI-NEXT:    v_cndmask_b32_e64 v7, v1, 0, s[12:13]
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s16, v10
+; VI-NEXT:    v_or_b32_e32 v0, s18, v0
+; VI-NEXT:    v_mov_b32_e32 v1, 0
+; VI-NEXT:    v_sub_u32_e32 v4, vcc, s19, v10
+; VI-NEXT:    v_lshlrev_b64 v[2:3], v2, v[0:1]
+; VI-NEXT:    v_lshrrev_b64 v[4:5], v4, v[0:1]
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s20, v10
+; VI-NEXT:    v_cmp_lt_i32_e32 vcc, 23, v0
+; VI-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
+; VI-NEXT:    v_ashrrev_i32_e32 v4, 31, v9
+; VI-NEXT:    s_and_b32 s6, s4, s17
+; VI-NEXT:    v_cndmask_b32_e32 v3, v5, v3, vcc
+; VI-NEXT:    v_xor_b32_e32 v2, v2, v4
+; VI-NEXT:    v_ashrrev_i32_e32 v5, 31, v4
+; VI-NEXT:    v_cmp_lt_f32_e64 s[2:3], s5, v8
+; VI-NEXT:    s_bfe_u32 s5, s4, 0x80017
+; VI-NEXT:    v_xor_b32_e32 v3, v3, v5
+; VI-NEXT:    v_sub_u32_e32 v2, vcc, v2, v4
+; VI-NEXT:    v_cmp_gt_i32_e64 s[0:1], 0, v0
+; VI-NEXT:    s_add_i32 s14, s5, s16
+; VI-NEXT:    s_or_b32 s6, s6, s18
+; VI-NEXT:    s_sub_i32 s21, s19, s5
+; VI-NEXT:    v_cndmask_b32_e64 v0, v2, 0, s[0:1]
+; VI-NEXT:    s_lshl_b64 s[14:15], s[6:7], s14
+; VI-NEXT:    v_subb_u32_e32 v5, vcc, v3, v5, vcc
+; VI-NEXT:    s_add_i32 s5, s5, s20
+; VI-NEXT:    s_lshr_b64 s[6:7], s[6:7], s21
+; VI-NEXT:    v_cndmask_b32_e64 v2, v0, v7, s[2:3]
+; VI-NEXT:    v_mov_b32_e32 v0, s7
+; VI-NEXT:    v_mov_b32_e32 v3, s15
+; VI-NEXT:    v_cmp_gt_i32_e64 vcc, s5, 23
+; VI-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s6
+; VI-NEXT:    v_mov_b32_e32 v4, s14
+; VI-NEXT:    s_ashr_i32 s6, s4, 31
+; VI-NEXT:    v_cndmask_b32_e32 v3, v3, v4, vcc
+; VI-NEXT:    s_ashr_i32 s7, s6, 31
+; VI-NEXT:    v_xor_b32_e32 v3, s6, v3
+; VI-NEXT:    v_sub_f32_e32 v10, s4, v8
+; VI-NEXT:    v_xor_b32_e32 v0, s7, v0
+; VI-NEXT:    v_mov_b32_e32 v4, s7
+; VI-NEXT:    v_subrev_u32_e32 v3, vcc, s6, v3
+; VI-NEXT:    v_cmp_lt_i32_e64 s[6:7], s5, 0
+; VI-NEXT:    v_subb_u32_e32 v7, vcc, v0, v4, vcc
+; VI-NEXT:    v_and_b32_e32 v0, s17, v10
+; VI-NEXT:    v_bfe_u32 v11, v10, 23, 8
+; VI-NEXT:    v_cndmask_b32_e64 v9, v3, 0, s[6:7]
+; VI-NEXT:    v_add_u32_e32 v3, vcc, s16, v11
+; VI-NEXT:    v_or_b32_e32 v0, s18, v0
+; VI-NEXT:    v_sub_u32_e32 v12, vcc, s19, v11
+; VI-NEXT:    v_add_u32_e32 v11, vcc, s20, v11
+; VI-NEXT:    v_lshlrev_b64 v[3:4], v3, v[0:1]
+; VI-NEXT:    v_lshrrev_b64 v[0:1], v12, v[0:1]
+; VI-NEXT:    v_cmp_lt_i32_e32 vcc, 23, v11
+; VI-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
+; VI-NEXT:    v_ashrrev_i32_e32 v3, 31, v10
+; VI-NEXT:    v_cndmask_b32_e32 v1, v1, v4, vcc
+; VI-NEXT:    v_xor_b32_e32 v0, v0, v3
+; VI-NEXT:    v_ashrrev_i32_e32 v4, 31, v3
+; VI-NEXT:    v_sub_u32_e32 v0, vcc, v0, v3
+; VI-NEXT:    v_xor_b32_e32 v1, v1, v4
+; VI-NEXT:    v_subb_u32_e32 v1, vcc, v1, v4, vcc
+; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 0, v11
+; VI-NEXT:    v_cndmask_b32_e64 v4, v5, 0, s[0:1]
+; VI-NEXT:    s_brev_b32 s0, 1
+; VI-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
+; VI-NEXT:    v_cndmask_b32_e64 v3, v6, 0, s[12:13]
+; VI-NEXT:    v_xor_b32_e32 v4, s0, v4
+; VI-NEXT:    v_cndmask_b32_e64 v3, v4, v3, s[2:3]
+; VI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; VI-NEXT:    v_cmp_lt_f32_e64 s[4:5], s4, v8
+; VI-NEXT:    v_cndmask_b32_e64 v4, v7, 0, s[6:7]
+; VI-NEXT:    v_xor_b32_e32 v1, s0, v1
+; VI-NEXT:    s_mov_b32 s11, 0xf000
+; VI-NEXT:    s_mov_b32 s10, -1
+; VI-NEXT:    v_cndmask_b32_e64 v0, v0, v9, s[4:5]
+; VI-NEXT:    v_cndmask_b32_e64 v1, v1, v4, s[4:5]
+; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[8:11], 0
+; VI-NEXT:    s_endpgm
+;
+; EG-LABEL: fp_to_uint_v2f32_to_v2i64:
+; EG:       ; %bb.0:
+; EG-NEXT:    ALU 79, @4, KC0[CB0:0-32], KC1[]
+; EG-NEXT:    MEM_RAT_CACHELESS STORE_RAW T3.XYZW, T0.X, 1
+; EG-NEXT:    CF_END
+; EG-NEXT:    PAD
+; EG-NEXT:    ALU clause starting at 4:
+; EG-NEXT:     MOV * T0.W, literal.x,
+; EG-NEXT:    8(1.121039e-44), 0(0.000000e+00)
+; EG-NEXT:     BFE_UINT T1.W, KC0[2].W, literal.x, PV.W,
+; EG-NEXT:     AND_INT * T2.W, KC0[2].W, literal.y,
+; EG-NEXT:    23(3.222986e-44), 8388607(1.175494e-38)
+; EG-NEXT:     AND_INT T0.Y, KC0[3].X, literal.x,
+; EG-NEXT:     ADD_INT T0.Z, PV.W, literal.y,
+; EG-NEXT:     OR_INT T2.W, PS, literal.z,
+; EG-NEXT:     SUB_INT * T3.W, literal.w, PV.W,
+; EG-NEXT:    8388607(1.175494e-38), -150(nan)
+; EG-NEXT:    8388608(1.175494e-38), 150(2.101948e-43)
+; EG-NEXT:     BFE_UINT T0.X, KC0[3].X, literal.x, T0.W,
+; EG-NEXT:     SETGT_UINT T1.Y, PS, literal.y,
+; EG-NEXT:     LSHR T1.Z, PV.W, PS,
+; EG-NEXT:     SETGT_UINT T0.W, PV.Z, literal.y,
+; EG-NEXT:     LSHL * T3.W, PV.W, PV.Z,
+; EG-NEXT:    23(3.222986e-44), 31(4.344025e-44)
+; EG-NEXT:     CNDE_INT T1.X, PV.W, PS, 0.0,
+; EG-NEXT:     CNDE_INT T1.Y, PV.Y, PV.Z, 0.0,
+; EG-NEXT:     ADD_INT T0.Z, PV.X, literal.x,
+; EG-NEXT:     OR_INT T3.W, T0.Y, literal.y,
+; EG-NEXT:     SUB_INT * T4.W, literal.z, PV.X,
+; EG-NEXT:    -150(nan), 8388608(1.175494e-38)
+; EG-NEXT:    150(2.101948e-43), 0(0.000000e+00)
+; EG-NEXT:     SUB_INT T2.X, literal.x, T0.X,
+; EG-NEXT:     SETGT_UINT T0.Y, PS, literal.y,
+; EG-NEXT:     LSHR T1.Z, PV.W, PS,
+; EG-NEXT:     SETGT_UINT T4.W, PV.Z, literal.y,
+; EG-NEXT:     LSHL * T5.W, PV.W, PV.Z,
+; EG-NEXT:    181(2.536350e-43), 31(4.344025e-44)
+; EG-NEXT:     CNDE_INT T3.X, PV.W, PS, 0.0,
+; EG-NEXT:     CNDE_INT T0.Y, PV.Y, PV.Z, 0.0,
+; EG-NEXT:     ADD_INT T0.Z, T0.X, literal.x,
+; EG-NEXT:     LSHR T5.W, T3.W, PV.X,
+; EG-NEXT:     SUB_INT * T6.W, literal.y, T1.W,
+; EG-NEXT:    -182(nan), 181(2.536350e-43)
+; EG-NEXT:     ADD_INT T2.X, T1.W, literal.x,
+; EG-NEXT:     LSHR T2.Y, T2.W, PS, BS:VEC_120/SCL_212
+; EG-NEXT:     ADD_INT T1.Z, T0.X, literal.y,
+; EG-NEXT:     LSHR T5.W, PV.W, 1,
+; EG-NEXT:     LSHL * T3.W, T3.W, PV.Z,
+; EG-NEXT:    -182(nan), -127(nan)
+; EG-NEXT:     CNDE_INT T0.X, T4.W, PV.W, PS,
+; EG-NEXT:     SETGT_INT T3.Y, PV.Z, literal.x,
+; EG-NEXT:     ADD_INT T0.Z, T1.W, literal.y, BS:VEC_120/SCL_212
+; EG-NEXT:     LSHR T1.W, PV.Y, 1,
+; EG-NEXT:     LSHL * T2.W, T2.W, PV.X,
+; EG-NEXT:    23(3.222986e-44), -127(nan)
+; EG-NEXT:     CNDE_INT T2.X, T0.W, PV.W, PS,
+; EG-NEXT:     SETGT_INT T2.Y, PV.Z, literal.x,
+; EG-NEXT:     CNDE_INT T2.Z, PV.Y, 0.0, PV.X,
+; EG-NEXT:     CNDE_INT T0.W, PV.Y, T0.Y, T3.X,
+; EG-NEXT:     ASHR * T1.W, KC0[3].X, literal.y,
+; EG-NEXT:    23(3.222986e-44), 31(4.344025e-44)
+; EG-NEXT:     XOR_INT T0.X, PV.W, PS,
+; EG-NEXT:     XOR_INT T0.Y, PV.Z, PS,
+; EG-NEXT:     CNDE_INT T2.Z, PV.Y, 0.0, PV.X,
+; EG-NEXT:     CNDE_INT T0.W, PV.Y, T1.Y, T1.X,
+; EG-NEXT:     ASHR * T2.W, KC0[2].W, literal.x,
+; EG-NEXT:    31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT:     XOR_INT T1.Y, PV.W, PS,
+; EG-NEXT:     XOR_INT T2.Z, PV.Z, PS,
+; EG-NEXT:     SUB_INT T0.W, PV.Y, T1.W,
+; EG-NEXT:     SUBB_UINT * T3.W, PV.X, T1.W,
+; EG-NEXT:     SUB_INT T0.Y, PV.W, PS,
+; EG-NEXT:     SETGT_INT T1.Z, T1.Z, literal.x,
+; EG-NEXT:     SUB_INT T0.W, PV.Z, T2.W,
+; EG-NEXT:     SUBB_UINT * T3.W, PV.Y, T2.W,
+; EG-NEXT:    -1(nan), 0(0.000000e+00)
+; EG-NEXT:     SUB_INT T2.Z, PV.W, PS,
+; EG-NEXT:     SETGT_INT T0.W, T0.Z, literal.x,
+; EG-NEXT:     CNDE_INT * T3.W, PV.Z, 0.0, PV.Y, BS:VEC_021/SCL_122
+; EG-NEXT:    -1(nan), 0(0.000000e+00)
+; EG-NEXT:     CNDE_INT T3.Y, PV.W, 0.0, PV.Z,
+; EG-NEXT:     SUB_INT * T1.W, T0.X, T1.W,
+; EG-NEXT:     CNDE_INT T3.Z, T1.Z, 0.0, PV.W,
+; EG-NEXT:     SUB_INT * T1.W, T1.Y, T2.W,
+; EG-NEXT:     CNDE_INT T3.X, T0.W, 0.0, PV.W,
+; EG-NEXT:     LSHR * T0.X, KC0[2].Y, literal.x,
+; EG-NEXT:    2(2.802597e-45), 0(0.000000e+00)
   %conv = fptoui <2 x float> %x to <2 x i64>
   store <2 x i64> %conv, <2 x i64> addrspace(1)* %out
   ret void
 }
 
-; FUNC: {{^}}fp_to_uint_v4f32_to_v4i64:
-; EG-DAG: AND_INT
-; EG-DAG: LSHR
-; EG-DAG: SUB_INT
-; EG-DAG: AND_INT
-; EG-DAG: ASHR
-; EG-DAG: AND_INT
-; EG-DAG: OR_INT
-; EG-DAG: SUB_INT
-; EG-DAG: LSHL
-; EG-DAG: LSHL
-; EG-DAG: SUB_INT
-; EG-DAG: LSHR
-; EG-DAG: LSHR
-; EG-DAG: SETGT_UINT
-; EG-DAG: SETGT_INT
-; EG-DAG: XOR_INT
-; EG-DAG: XOR_INT
-; EG-DAG: SUB_INT
-; EG-DAG: SUB_INT
-; EG-DAG: CNDE_INT
-; EG-DAG: CNDE_INT
-; EG-DAG: AND_INT
-; EG-DAG: LSHR
-; EG-DAG: SUB_INT
-; EG-DAG: AND_INT
-; EG-DAG: ASHR
-; EG-DAG: AND_INT
-; EG-DAG: OR_INT
-; EG-DAG: SUB_INT
-; EG-DAG: LSHL
-; EG-DAG: LSHL
-; EG-DAG: SUB_INT
-; EG-DAG: LSHR
-; EG-DAG: LSHR
-; EG-DAG: SETGT_UINT
-; EG-DAG: SETGT_INT
-; EG-DAG: XOR_INT
-; EG-DAG: XOR_INT
-; EG-DAG: SUB_INT
-; EG-DAG: SUB_INT
-; EG-DAG: CNDE_INT
-; EG-DAG: CNDE_INT
-; EG-DAG: AND_INT
-; EG-DAG: LSHR
-; EG-DAG: SUB_INT
-; EG-DAG: AND_INT
-; EG-DAG: ASHR
-; EG-DAG: AND_INT
-; EG-DAG: OR_INT
-; EG-DAG: SUB_INT
-; EG-DAG: LSHL
-; EG-DAG: LSHL
-; EG-DAG: SUB_INT
-; EG-DAG: LSHR
-; EG-DAG: LSHR
-; EG-DAG: SETGT_UINT
-; EG-DAG: SETGT_INT
-; EG-DAG: XOR_INT
-; EG-DAG: XOR_INT
-; EG-DAG: SUB_INT
-; EG-DAG: SUB_INT
-; EG-DAG: CNDE_INT
-; EG-DAG: CNDE_INT
-; EG-DAG: AND_INT
-; EG-DAG: LSHR
-; EG-DAG: SUB_INT
-; EG-DAG: AND_INT
-; EG-DAG: ASHR
-; EG-DAG: AND_INT
-; EG-DAG: OR_INT
-; EG-DAG: SUB_INT
-; EG-DAG: LSHL
-; EG-DAG: LSHL
-; EG-DAG: SUB_INT
-; EG-DAG: LSHR
-; EG-DAG: LSHR
-; EG-DAG: SETGT_UINT
-; EG-DAG: SETGT_INT
-; EG-DAG: XOR_INT
-; EG-DAG: XOR_INT
-; EG-DAG: SUB_INT
-; EG-DAG: SUB_INT
-; EG-DAG: CNDE_INT
-; EG-DAG: CNDE_INT
-
-; GCN: s_endpgm
 define amdgpu_kernel void @fp_to_uint_v4f32_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x float> %x) {
+; SI-LABEL: fp_to_uint_v4f32_to_v4i64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[8:11], s[0:1], 0xd
+; SI-NEXT:    s_movk_i32 s6, 0xff6a
+; SI-NEXT:    s_mov_b32 s7, 0x7fffff
+; SI-NEXT:    s_mov_b32 s18, 0x800000
+; SI-NEXT:    s_mov_b32 s13, 0
+; SI-NEXT:    s_movk_i32 s19, 0x96
+; SI-NEXT:    s_movk_i32 s20, 0xff81
+; SI-NEXT:    v_mov_b32_e32 v7, 0x5f000000
+; SI-NEXT:    v_mov_b32_e32 v1, 0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_bfe_u32 s2, s9, 0x80017
+; SI-NEXT:    s_and_b32 s3, s9, s7
+; SI-NEXT:    v_sub_f32_e32 v6, s9, v7
+; SI-NEXT:    s_bfe_u32 s4, s8, 0x80017
+; SI-NEXT:    s_and_b32 s14, s8, s7
+; SI-NEXT:    v_sub_f32_e32 v8, s8, v7
+; SI-NEXT:    v_sub_f32_e32 v9, s11, v7
+; SI-NEXT:    v_sub_f32_e32 v10, s10, v7
+; SI-NEXT:    s_add_i32 s5, s2, s6
+; SI-NEXT:    s_or_b32 s12, s3, s18
+; SI-NEXT:    s_sub_i32 s15, s19, s2
+; SI-NEXT:    s_add_i32 s21, s2, s20
+; SI-NEXT:    v_bfe_u32 v2, v6, 23, 8
+; SI-NEXT:    v_and_b32_e32 v0, s7, v6
+; SI-NEXT:    s_add_i32 s16, s4, s6
+; SI-NEXT:    s_sub_i32 s17, s19, s4
+; SI-NEXT:    s_add_i32 s22, s4, s20
+; SI-NEXT:    v_bfe_u32 v3, v8, 23, 8
+; SI-NEXT:    v_and_b32_e32 v11, s7, v8
+; SI-NEXT:    v_bfe_u32 v12, v9, 23, 8
+; SI-NEXT:    v_and_b32_e32 v13, s7, v9
+; SI-NEXT:    v_bfe_u32 v14, v10, 23, 8
+; SI-NEXT:    v_add_i32_e32 v4, vcc, s6, v2
+; SI-NEXT:    v_or_b32_e32 v0, s18, v0
+; SI-NEXT:    v_sub_i32_e32 v5, vcc, s19, v2
+; SI-NEXT:    v_add_i32_e32 v15, vcc, s20, v2
+; SI-NEXT:    v_add_i32_e32 v16, vcc, s6, v3
+; SI-NEXT:    v_sub_i32_e32 v17, vcc, s19, v3
+; SI-NEXT:    v_add_i32_e32 v18, vcc, s20, v3
+; SI-NEXT:    v_lshl_b64 v[2:3], v[0:1], v4
+; SI-NEXT:    v_lshr_b64 v[4:5], v[0:1], v5
+; SI-NEXT:    v_cmp_lt_i32_e32 vcc, 23, v15
+; SI-NEXT:    v_cndmask_b32_e32 v19, v5, v3, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v20, v4, v2, vcc
+; SI-NEXT:    v_add_i32_e32 v21, vcc, s6, v12
+; SI-NEXT:    v_sub_i32_e32 v22, vcc, s19, v12
+; SI-NEXT:    v_add_i32_e32 v12, vcc, s20, v12
+; SI-NEXT:    v_or_b32_e32 v0, s18, v11
+; SI-NEXT:    v_lshl_b64 v[2:3], v[0:1], v16
+; SI-NEXT:    v_lshr_b64 v[4:5], v[0:1], v17
+; SI-NEXT:    v_or_b32_e32 v0, s18, v13
+; SI-NEXT:    v_cmp_lt_i32_e32 vcc, 23, v18
+; SI-NEXT:    v_cndmask_b32_e32 v11, v5, v3, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v13, v4, v2, vcc
+; SI-NEXT:    v_lshl_b64 v[2:3], v[0:1], v21
+; SI-NEXT:    v_lshr_b64 v[4:5], v[0:1], v22
+; SI-NEXT:    v_cmp_lt_i32_e32 vcc, 23, v12
+; SI-NEXT:    v_cndmask_b32_e32 v5, v5, v3, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v4, v4, v2, vcc
+; SI-NEXT:    v_and_b32_e32 v0, s7, v10
+; SI-NEXT:    s_lshl_b64 s[2:3], s[12:13], s5
+; SI-NEXT:    s_lshr_b64 s[4:5], s[12:13], s15
+; SI-NEXT:    s_or_b32 s12, s14, s18
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s6, v14
+; SI-NEXT:    v_sub_i32_e32 v16, vcc, s19, v14
+; SI-NEXT:    v_add_i32_e32 v14, vcc, s20, v14
+; SI-NEXT:    v_or_b32_e32 v0, s18, v0
+; SI-NEXT:    v_lshl_b64 v[2:3], v[0:1], v2
+; SI-NEXT:    v_lshr_b64 v[0:1], v[0:1], v16
+; SI-NEXT:    v_cmp_lt_i32_e32 vcc, 23, v14
+; SI-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
+; SI-NEXT:    v_cndmask_b32_e32 v16, v0, v2, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s5
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_cmp_gt_i32_e64 vcc, s21, 23
+; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
+; SI-NEXT:    v_mov_b32_e32 v2, s4
+; SI-NEXT:    v_mov_b32_e32 v3, s2
+; SI-NEXT:    s_lshl_b64 s[2:3], s[12:13], s16
+; SI-NEXT:    s_lshr_b64 s[4:5], s[12:13], s17
+; SI-NEXT:    v_cndmask_b32_e32 v2, v2, v3, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_mov_b32_e32 v17, s3
+; SI-NEXT:    v_cmp_gt_i32_e64 vcc, s22, 23
+; SI-NEXT:    v_cndmask_b32_e32 v3, v3, v17, vcc
+; SI-NEXT:    v_mov_b32_e32 v17, s4
+; SI-NEXT:    v_mov_b32_e32 v21, s2
+; SI-NEXT:    v_cndmask_b32_e32 v17, v17, v21, vcc
+; SI-NEXT:    s_bfe_u32 s2, s11, 0x80017
+; SI-NEXT:    s_and_b32 s3, s11, s7
+; SI-NEXT:    s_add_i32 s4, s2, s6
+; SI-NEXT:    s_sub_i32 s5, s19, s2
+; SI-NEXT:    s_or_b32 s12, s3, s18
+; SI-NEXT:    s_lshl_b64 s[14:15], s[12:13], s4
+; SI-NEXT:    s_lshr_b64 s[16:17], s[12:13], s5
+; SI-NEXT:    s_add_i32 s24, s2, s20
+; SI-NEXT:    v_mov_b32_e32 v21, s17
+; SI-NEXT:    v_mov_b32_e32 v22, s15
+; SI-NEXT:    v_cmp_gt_i32_e64 vcc, s24, 23
+; SI-NEXT:    v_cndmask_b32_e32 v21, v21, v22, vcc
+; SI-NEXT:    s_ashr_i32 s2, s9, 31
+; SI-NEXT:    s_ashr_i32 s3, s2, 31
+; SI-NEXT:    v_xor_b32_e32 v0, s3, v0
+; SI-NEXT:    v_xor_b32_e32 v2, s2, v2
+; SI-NEXT:    v_mov_b32_e32 v22, s3
+; SI-NEXT:    v_subrev_i32_e64 v2, s[2:3], s2, v2
+; SI-NEXT:    v_subb_u32_e64 v22, s[2:3], v0, v22, s[2:3]
+; SI-NEXT:    v_ashrrev_i32_e32 v0, 31, v6
+; SI-NEXT:    v_xor_b32_e32 v6, v20, v0
+; SI-NEXT:    v_ashrrev_i32_e32 v20, 31, v0
+; SI-NEXT:    v_xor_b32_e32 v19, v19, v20
+; SI-NEXT:    v_sub_i32_e64 v0, s[2:3], v6, v0
+; SI-NEXT:    v_subb_u32_e64 v6, s[2:3], v19, v20, s[2:3]
+; SI-NEXT:    v_cmp_gt_i32_e64 s[2:3], 0, v15
+; SI-NEXT:    s_ashr_i32 s4, s8, 31
+; SI-NEXT:    s_ashr_i32 s5, s4, 31
+; SI-NEXT:    v_xor_b32_e32 v3, s5, v3
+; SI-NEXT:    v_xor_b32_e32 v15, s4, v17
+; SI-NEXT:    v_mov_b32_e32 v17, s5
+; SI-NEXT:    v_subrev_i32_e64 v15, s[4:5], s4, v15
+; SI-NEXT:    v_subb_u32_e64 v17, s[4:5], v3, v17, s[4:5]
+; SI-NEXT:    v_ashrrev_i32_e32 v3, 31, v8
+; SI-NEXT:    v_xor_b32_e32 v8, v13, v3
+; SI-NEXT:    v_ashrrev_i32_e32 v13, 31, v3
+; SI-NEXT:    v_xor_b32_e32 v11, v11, v13
+; SI-NEXT:    v_sub_i32_e64 v3, s[4:5], v8, v3
+; SI-NEXT:    s_bfe_u32 s12, s10, 0x80017
+; SI-NEXT:    s_and_b32 s7, s10, s7
+; SI-NEXT:    s_add_i32 s15, s12, s6
+; SI-NEXT:    s_sub_i32 s23, s19, s12
+; SI-NEXT:    s_add_i32 s25, s12, s20
+; SI-NEXT:    s_or_b32 s12, s7, s18
+; SI-NEXT:    v_cmp_lt_i32_e64 s[18:19], s21, 0
+; SI-NEXT:    v_cndmask_b32_e64 v2, v2, 0, s[18:19]
+; SI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[2:3]
+; SI-NEXT:    v_subb_u32_e64 v8, s[4:5], v11, v13, s[4:5]
+; SI-NEXT:    v_cmp_gt_i32_e64 s[4:5], 0, v18
+; SI-NEXT:    v_cmp_lt_f32_e64 s[6:7], s9, v7
+; SI-NEXT:    v_cndmask_b32_e64 v2, v0, v2, s[6:7]
+; SI-NEXT:    v_cmp_lt_i32_e64 s[20:21], s22, 0
+; SI-NEXT:    v_cndmask_b32_e64 v0, v15, 0, s[20:21]
+; SI-NEXT:    v_cndmask_b32_e64 v3, v3, 0, s[4:5]
+; SI-NEXT:    v_cmp_lt_f32_e64 s[8:9], s8, v7
+; SI-NEXT:    v_cndmask_b32_e64 v0, v3, v0, s[8:9]
+; SI-NEXT:    v_mov_b32_e32 v3, s16
+; SI-NEXT:    v_mov_b32_e32 v11, s14
+; SI-NEXT:    s_lshl_b64 s[16:17], s[12:13], s15
+; SI-NEXT:    s_lshr_b64 s[22:23], s[12:13], s23
+; SI-NEXT:    v_cndmask_b32_e32 v11, v3, v11, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s23
+; SI-NEXT:    v_mov_b32_e32 v13, s17
+; SI-NEXT:    v_cmp_gt_i32_e64 vcc, s25, 23
+; SI-NEXT:    v_cndmask_b32_e32 v13, v3, v13, vcc
+; SI-NEXT:    s_load_dwordx2 s[12:13], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s15, 0xf000
+; SI-NEXT:    s_mov_b32 s14, -1
+; SI-NEXT:    s_brev_b32 s17, 1
+; SI-NEXT:    s_ashr_i32 s0, s11, 31
+; SI-NEXT:    s_ashr_i32 s1, s10, 31
+; SI-NEXT:    s_ashr_i32 s23, s0, 31
+; SI-NEXT:    v_ashrrev_i32_e32 v9, 31, v9
+; SI-NEXT:    s_ashr_i32 s26, s1, 31
+; SI-NEXT:    v_ashrrev_i32_e32 v10, 31, v10
+; SI-NEXT:    v_cndmask_b32_e64 v3, v22, 0, s[18:19]
+; SI-NEXT:    v_cndmask_b32_e64 v6, v6, 0, s[2:3]
+; SI-NEXT:    v_xor_b32_e32 v6, s17, v6
+; SI-NEXT:    v_cndmask_b32_e64 v3, v6, v3, s[6:7]
+; SI-NEXT:    v_mov_b32_e32 v6, s22
+; SI-NEXT:    v_mov_b32_e32 v15, s16
+; SI-NEXT:    v_cndmask_b32_e32 v6, v6, v15, vcc
+; SI-NEXT:    v_mov_b32_e32 v15, s23
+; SI-NEXT:    v_xor_b32_e32 v18, s23, v21
+; SI-NEXT:    v_xor_b32_e32 v11, s0, v11
+; SI-NEXT:    v_subrev_i32_e32 v11, vcc, s0, v11
+; SI-NEXT:    v_subb_u32_e32 v15, vcc, v18, v15, vcc
+; SI-NEXT:    v_ashrrev_i32_e32 v18, 31, v9
+; SI-NEXT:    v_xor_b32_e32 v4, v4, v9
+; SI-NEXT:    v_xor_b32_e32 v5, v5, v18
+; SI-NEXT:    v_sub_i32_e32 v4, vcc, v4, v9
+; SI-NEXT:    v_mov_b32_e32 v9, s26
+; SI-NEXT:    v_subb_u32_e32 v5, vcc, v5, v18, vcc
+; SI-NEXT:    v_ashrrev_i32_e32 v18, 31, v10
+; SI-NEXT:    v_xor_b32_e32 v13, s26, v13
+; SI-NEXT:    v_cndmask_b32_e64 v17, v17, 0, s[20:21]
+; SI-NEXT:    v_cndmask_b32_e64 v8, v8, 0, s[4:5]
+; SI-NEXT:    v_xor_b32_e32 v6, s1, v6
+; SI-NEXT:    v_xor_b32_e32 v16, v16, v10
+; SI-NEXT:    v_xor_b32_e32 v1, v1, v18
+; SI-NEXT:    v_xor_b32_e32 v8, s17, v8
+; SI-NEXT:    v_cmp_lt_i32_e64 s[2:3], s24, 0
+; SI-NEXT:    v_cndmask_b32_e64 v11, v11, 0, s[2:3]
+; SI-NEXT:    v_cmp_gt_i32_e32 vcc, 0, v12
+; SI-NEXT:    v_cndmask_b32_e64 v4, v4, 0, vcc
+; SI-NEXT:    v_subrev_i32_e64 v12, s[0:1], s1, v6
+; SI-NEXT:    v_subb_u32_e64 v9, s[0:1], v13, v9, s[0:1]
+; SI-NEXT:    v_sub_i32_e64 v10, s[0:1], v16, v10
+; SI-NEXT:    v_subb_u32_e64 v13, s[0:1], v1, v18, s[0:1]
+; SI-NEXT:    v_cndmask_b32_e64 v15, v15, 0, s[2:3]
+; SI-NEXT:    v_cndmask_b32_e64 v5, v5, 0, vcc
+; SI-NEXT:    v_cndmask_b32_e64 v1, v8, v17, s[8:9]
+; SI-NEXT:    v_cmp_lt_f32_e32 vcc, s11, v7
+; SI-NEXT:    v_cndmask_b32_e32 v6, v4, v11, vcc
+; SI-NEXT:    v_cmp_lt_i32_e64 s[2:3], s25, 0
+; SI-NEXT:    v_cndmask_b32_e64 v4, v12, 0, s[2:3]
+; SI-NEXT:    v_cmp_gt_i32_e64 s[0:1], 0, v14
+; SI-NEXT:    v_cndmask_b32_e64 v8, v10, 0, s[0:1]
+; SI-NEXT:    v_xor_b32_e32 v5, s17, v5
+; SI-NEXT:    v_cndmask_b32_e64 v9, v9, 0, s[2:3]
+; SI-NEXT:    v_cndmask_b32_e64 v10, v13, 0, s[0:1]
+; SI-NEXT:    v_cmp_lt_f32_e64 s[0:1], s10, v7
+; SI-NEXT:    v_cndmask_b32_e64 v4, v8, v4, s[0:1]
+; SI-NEXT:    v_cndmask_b32_e32 v7, v5, v15, vcc
+; SI-NEXT:    v_xor_b32_e32 v5, s17, v10
+; SI-NEXT:    v_cndmask_b32_e64 v5, v5, v9, s[0:1]
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[12:15], 0 offset:16
+; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[12:15], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: fp_to_uint_v4f32_to_v4i64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x24
+; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x34
+; VI-NEXT:    s_mov_b32 s21, 0x7fffff
+; VI-NEXT:    s_movk_i32 s20, 0xff6a
+; VI-NEXT:    s_mov_b32 s22, 0x800000
+; VI-NEXT:    s_movk_i32 s23, 0x96
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_bfe_u32 s14, s5, 0x80017
+; VI-NEXT:    s_and_b32 s1, s5, s21
+; VI-NEXT:    s_add_i32 s0, s14, s20
+; VI-NEXT:    s_or_b32 s12, s1, s22
+; VI-NEXT:    s_mov_b32 s13, 0
+; VI-NEXT:    s_sub_i32 s2, s23, s14
+; VI-NEXT:    s_movk_i32 s24, 0xff81
+; VI-NEXT:    s_lshl_b64 s[0:1], s[12:13], s0
+; VI-NEXT:    s_lshr_b64 s[2:3], s[12:13], s2
+; VI-NEXT:    s_add_i32 s14, s14, s24
+; VI-NEXT:    v_cmp_gt_i32_e64 vcc, s14, 23
+; VI-NEXT:    v_mov_b32_e32 v0, s3
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s2
+; VI-NEXT:    s_ashr_i32 s0, s5, 31
+; VI-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
+; VI-NEXT:    v_mov_b32_e32 v10, 0x5f000000
+; VI-NEXT:    v_xor_b32_e32 v1, s0, v1
+; VI-NEXT:    s_ashr_i32 s1, s0, 31
+; VI-NEXT:    v_sub_f32_e32 v8, s5, v10
+; VI-NEXT:    v_subrev_u32_e32 v1, vcc, s0, v1
+; VI-NEXT:    v_cmp_lt_i32_e64 s[14:15], s14, 0
+; VI-NEXT:    v_cndmask_b32_e64 v7, v1, 0, s[14:15]
+; VI-NEXT:    v_and_b32_e32 v1, s21, v8
+; VI-NEXT:    v_xor_b32_e32 v0, s1, v0
+; VI-NEXT:    v_mov_b32_e32 v2, s1
+; VI-NEXT:    v_bfe_u32 v9, v8, 23, 8
+; VI-NEXT:    v_subb_u32_e32 v6, vcc, v0, v2, vcc
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s20, v9
+; VI-NEXT:    v_or_b32_e32 v4, s22, v1
+; VI-NEXT:    v_mov_b32_e32 v5, 0
+; VI-NEXT:    v_sub_u32_e32 v2, vcc, s23, v9
+; VI-NEXT:    v_lshlrev_b64 v[0:1], v0, v[4:5]
+; VI-NEXT:    v_lshrrev_b64 v[2:3], v2, v[4:5]
+; VI-NEXT:    v_add_u32_e32 v4, vcc, s24, v9
+; VI-NEXT:    v_cmp_lt_i32_e32 vcc, 23, v4
+; VI-NEXT:    v_cndmask_b32_e32 v0, v2, v0, vcc
+; VI-NEXT:    v_ashrrev_i32_e32 v2, 31, v8
+; VI-NEXT:    s_and_b32 s12, s4, s21
+; VI-NEXT:    v_cndmask_b32_e32 v1, v3, v1, vcc
+; VI-NEXT:    v_xor_b32_e32 v0, v0, v2
+; VI-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
+; VI-NEXT:    v_cmp_lt_f32_e64 s[2:3], s5, v10
+; VI-NEXT:    s_bfe_u32 s5, s4, 0x80017
+; VI-NEXT:    v_xor_b32_e32 v1, v1, v3
+; VI-NEXT:    v_sub_u32_e32 v0, vcc, v0, v2
+; VI-NEXT:    v_cmp_gt_i32_e64 s[0:1], 0, v4
+; VI-NEXT:    s_add_i32 s16, s5, s20
+; VI-NEXT:    s_or_b32 s12, s12, s22
+; VI-NEXT:    s_sub_i32 s18, s23, s5
+; VI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[0:1]
+; VI-NEXT:    v_subb_u32_e32 v8, vcc, v1, v3, vcc
+; VI-NEXT:    s_lshl_b64 s[16:17], s[12:13], s16
+; VI-NEXT:    s_lshr_b64 s[18:19], s[12:13], s18
+; VI-NEXT:    s_add_i32 s5, s5, s24
+; VI-NEXT:    v_cndmask_b32_e64 v2, v0, v7, s[2:3]
+; VI-NEXT:    v_mov_b32_e32 v0, s19
+; VI-NEXT:    v_mov_b32_e32 v1, s17
+; VI-NEXT:    v_cmp_gt_i32_e64 vcc, s5, 23
+; VI-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v1, s18
+; VI-NEXT:    v_mov_b32_e32 v3, s16
+; VI-NEXT:    s_ashr_i32 s12, s4, 31
+; VI-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
+; VI-NEXT:    s_ashr_i32 s16, s12, 31
+; VI-NEXT:    v_xor_b32_e32 v1, s12, v1
+; VI-NEXT:    v_sub_f32_e32 v11, s4, v10
+; VI-NEXT:    v_xor_b32_e32 v0, s16, v0
+; VI-NEXT:    v_mov_b32_e32 v3, s16
+; VI-NEXT:    v_subrev_u32_e32 v1, vcc, s12, v1
+; VI-NEXT:    v_cmp_lt_i32_e64 s[16:17], s5, 0
+; VI-NEXT:    v_cndmask_b32_e64 v9, v1, 0, s[16:17]
+; VI-NEXT:    v_and_b32_e32 v1, s21, v11
+; VI-NEXT:    v_bfe_u32 v12, v11, 23, 8
+; VI-NEXT:    v_subb_u32_e32 v7, vcc, v0, v3, vcc
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s20, v12
+; VI-NEXT:    v_or_b32_e32 v4, s22, v1
+; VI-NEXT:    v_sub_u32_e32 v3, vcc, s23, v12
+; VI-NEXT:    v_add_u32_e32 v12, vcc, s24, v12
+; VI-NEXT:    v_lshlrev_b64 v[0:1], v0, v[4:5]
+; VI-NEXT:    v_lshrrev_b64 v[3:4], v3, v[4:5]
+; VI-NEXT:    v_cmp_lt_i32_e32 vcc, 23, v12
+; VI-NEXT:    v_cndmask_b32_e32 v0, v3, v0, vcc
+; VI-NEXT:    v_ashrrev_i32_e32 v3, 31, v11
+; VI-NEXT:    v_cndmask_b32_e32 v1, v4, v1, vcc
+; VI-NEXT:    v_xor_b32_e32 v0, v0, v3
+; VI-NEXT:    v_ashrrev_i32_e32 v4, 31, v3
+; VI-NEXT:    v_sub_u32_e32 v0, vcc, v0, v3
+; VI-NEXT:    v_xor_b32_e32 v1, v1, v4
+; VI-NEXT:    v_subb_u32_e32 v1, vcc, v1, v4, vcc
+; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 0, v12
+; VI-NEXT:    v_cndmask_b32_e64 v4, v8, 0, s[0:1]
+; VI-NEXT:    s_brev_b32 s18, 1
+; VI-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
+; VI-NEXT:    v_cndmask_b32_e64 v3, v6, 0, s[14:15]
+; VI-NEXT:    v_xor_b32_e32 v4, s18, v4
+; VI-NEXT:    v_cndmask_b32_e64 v3, v4, v3, s[2:3]
+; VI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; VI-NEXT:    v_cmp_lt_f32_e64 s[4:5], s4, v10
+; VI-NEXT:    v_cndmask_b32_e64 v4, v7, 0, s[16:17]
+; VI-NEXT:    v_xor_b32_e32 v1, s18, v1
+; VI-NEXT:    s_and_b32 s1, s7, s21
+; VI-NEXT:    v_cndmask_b32_e64 v0, v0, v9, s[4:5]
+; VI-NEXT:    v_cndmask_b32_e64 v1, v1, v4, s[4:5]
+; VI-NEXT:    s_bfe_u32 s4, s7, 0x80017
+; VI-NEXT:    s_add_i32 s0, s4, s20
+; VI-NEXT:    s_or_b32 s12, s1, s22
+; VI-NEXT:    s_sub_i32 s2, s23, s4
+; VI-NEXT:    s_lshl_b64 s[0:1], s[12:13], s0
+; VI-NEXT:    s_add_i32 s4, s4, s24
+; VI-NEXT:    s_lshr_b64 s[2:3], s[12:13], s2
+; VI-NEXT:    v_mov_b32_e32 v4, s3
+; VI-NEXT:    v_mov_b32_e32 v6, s1
+; VI-NEXT:    v_cmp_gt_i32_e64 vcc, s4, 23
+; VI-NEXT:    v_cndmask_b32_e32 v4, v4, v6, vcc
+; VI-NEXT:    v_mov_b32_e32 v6, s2
+; VI-NEXT:    v_mov_b32_e32 v7, s0
+; VI-NEXT:    s_ashr_i32 s0, s7, 31
+; VI-NEXT:    v_cndmask_b32_e32 v6, v6, v7, vcc
+; VI-NEXT:    s_ashr_i32 s1, s0, 31
+; VI-NEXT:    v_xor_b32_e32 v6, s0, v6
+; VI-NEXT:    v_sub_f32_e32 v13, s7, v10
+; VI-NEXT:    v_xor_b32_e32 v4, s1, v4
+; VI-NEXT:    v_mov_b32_e32 v7, s1
+; VI-NEXT:    v_subrev_u32_e32 v6, vcc, s0, v6
+; VI-NEXT:    v_subb_u32_e32 v11, vcc, v4, v7, vcc
+; VI-NEXT:    v_and_b32_e32 v4, s21, v13
+; VI-NEXT:    v_cmp_lt_i32_e64 s[14:15], s4, 0
+; VI-NEXT:    v_bfe_u32 v14, v13, 23, 8
+; VI-NEXT:    v_cndmask_b32_e64 v12, v6, 0, s[14:15]
+; VI-NEXT:    v_add_u32_e32 v6, vcc, s20, v14
+; VI-NEXT:    v_or_b32_e32 v4, s22, v4
+; VI-NEXT:    v_sub_u32_e32 v8, vcc, s23, v14
+; VI-NEXT:    v_lshlrev_b64 v[6:7], v6, v[4:5]
+; VI-NEXT:    v_lshrrev_b64 v[8:9], v8, v[4:5]
+; VI-NEXT:    v_add_u32_e32 v4, vcc, s24, v14
+; VI-NEXT:    v_cmp_lt_i32_e32 vcc, 23, v4
+; VI-NEXT:    v_cndmask_b32_e32 v6, v8, v6, vcc
+; VI-NEXT:    v_ashrrev_i32_e32 v8, 31, v13
+; VI-NEXT:    v_cndmask_b32_e32 v7, v9, v7, vcc
+; VI-NEXT:    v_xor_b32_e32 v6, v6, v8
+; VI-NEXT:    v_ashrrev_i32_e32 v9, 31, v8
+; VI-NEXT:    v_cmp_lt_f32_e64 s[2:3], s7, v10
+; VI-NEXT:    s_bfe_u32 s7, s6, 0x80017
+; VI-NEXT:    s_and_b32 s5, s6, s21
+; VI-NEXT:    v_xor_b32_e32 v7, v7, v9
+; VI-NEXT:    v_sub_u32_e32 v6, vcc, v6, v8
+; VI-NEXT:    v_cmp_gt_i32_e64 s[0:1], 0, v4
+; VI-NEXT:    s_add_i32 s4, s7, s20
+; VI-NEXT:    s_or_b32 s12, s5, s22
+; VI-NEXT:    s_sub_i32 s16, s23, s7
+; VI-NEXT:    v_cndmask_b32_e64 v4, v6, 0, s[0:1]
+; VI-NEXT:    s_lshl_b64 s[4:5], s[12:13], s4
+; VI-NEXT:    v_subb_u32_e32 v9, vcc, v7, v9, vcc
+; VI-NEXT:    s_add_i32 s7, s7, s24
+; VI-NEXT:    s_lshr_b64 s[12:13], s[12:13], s16
+; VI-NEXT:    v_cndmask_b32_e64 v6, v4, v12, s[2:3]
+; VI-NEXT:    v_mov_b32_e32 v4, s13
+; VI-NEXT:    v_mov_b32_e32 v7, s5
+; VI-NEXT:    v_cmp_gt_i32_e64 vcc, s7, 23
+; VI-NEXT:    v_cndmask_b32_e32 v4, v4, v7, vcc
+; VI-NEXT:    v_mov_b32_e32 v7, s12
+; VI-NEXT:    v_mov_b32_e32 v8, s4
+; VI-NEXT:    s_ashr_i32 s4, s6, 31
+; VI-NEXT:    v_cndmask_b32_e32 v7, v7, v8, vcc
+; VI-NEXT:    s_ashr_i32 s5, s4, 31
+; VI-NEXT:    v_xor_b32_e32 v7, s4, v7
+; VI-NEXT:    v_sub_f32_e32 v14, s6, v10
+; VI-NEXT:    v_xor_b32_e32 v4, s5, v4
+; VI-NEXT:    v_mov_b32_e32 v8, s5
+; VI-NEXT:    v_subrev_u32_e32 v7, vcc, s4, v7
+; VI-NEXT:    v_subb_u32_e32 v12, vcc, v4, v8, vcc
+; VI-NEXT:    v_and_b32_e32 v4, s21, v14
+; VI-NEXT:    v_cmp_lt_i32_e64 s[12:13], s7, 0
+; VI-NEXT:    v_bfe_u32 v15, v14, 23, 8
+; VI-NEXT:    v_cndmask_b32_e64 v13, v7, 0, s[12:13]
+; VI-NEXT:    v_add_u32_e32 v7, vcc, s20, v15
+; VI-NEXT:    v_or_b32_e32 v4, s22, v4
+; VI-NEXT:    v_sub_u32_e32 v16, vcc, s23, v15
+; VI-NEXT:    v_add_u32_e32 v15, vcc, s24, v15
+; VI-NEXT:    v_lshlrev_b64 v[7:8], v7, v[4:5]
+; VI-NEXT:    v_lshrrev_b64 v[4:5], v16, v[4:5]
+; VI-NEXT:    v_cmp_lt_i32_e32 vcc, 23, v15
+; VI-NEXT:    v_cndmask_b32_e32 v4, v4, v7, vcc
+; VI-NEXT:    v_ashrrev_i32_e32 v7, 31, v14
+; VI-NEXT:    v_cndmask_b32_e32 v5, v5, v8, vcc
+; VI-NEXT:    v_xor_b32_e32 v4, v4, v7
+; VI-NEXT:    v_ashrrev_i32_e32 v8, 31, v7
+; VI-NEXT:    v_sub_u32_e32 v4, vcc, v4, v7
+; VI-NEXT:    v_xor_b32_e32 v5, v5, v8
+; VI-NEXT:    v_subb_u32_e32 v5, vcc, v5, v8, vcc
+; VI-NEXT:    v_cmp_gt_i32_e32 vcc, 0, v15
+; VI-NEXT:    v_cndmask_b32_e64 v8, v9, 0, s[0:1]
+; VI-NEXT:    v_cndmask_b32_e64 v5, v5, 0, vcc
+; VI-NEXT:    v_cndmask_b32_e64 v7, v11, 0, s[14:15]
+; VI-NEXT:    v_xor_b32_e32 v8, s18, v8
+; VI-NEXT:    v_cndmask_b32_e64 v7, v8, v7, s[2:3]
+; VI-NEXT:    v_cndmask_b32_e64 v4, v4, 0, vcc
+; VI-NEXT:    v_cmp_lt_f32_e64 s[4:5], s6, v10
+; VI-NEXT:    v_cndmask_b32_e64 v8, v12, 0, s[12:13]
+; VI-NEXT:    v_xor_b32_e32 v5, s18, v5
+; VI-NEXT:    s_mov_b32 s11, 0xf000
+; VI-NEXT:    s_mov_b32 s10, -1
+; VI-NEXT:    v_cndmask_b32_e64 v4, v4, v13, s[4:5]
+; VI-NEXT:    v_cndmask_b32_e64 v5, v5, v8, s[4:5]
+; VI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[8:11], 0 offset:16
+; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[8:11], 0
+; VI-NEXT:    s_endpgm
+;
+; EG-LABEL: fp_to_uint_v4f32_to_v4i64:
+; EG:       ; %bb.0:
+; EG-NEXT:    ALU 99, @6, KC0[CB0:0-32], KC1[]
+; EG-NEXT:    ALU 64, @106, KC0[CB0:0-32], KC1[]
+; EG-NEXT:    MEM_RAT_CACHELESS STORE_RAW T1.XYZW, T0.X, 0
+; EG-NEXT:    MEM_RAT_CACHELESS STORE_RAW T5.XYZW, T2.X, 1
+; EG-NEXT:    CF_END
+; EG-NEXT:    PAD
+; EG-NEXT:    ALU clause starting at 6:
+; EG-NEXT:     MOV * T0.W, literal.x,
+; EG-NEXT:    8(1.121039e-44), 0(0.000000e+00)
+; EG-NEXT:     BFE_UINT T1.W, KC0[3].Z, literal.x, PV.W,
+; EG-NEXT:     AND_INT * T2.W, KC0[3].Z, literal.y,
+; EG-NEXT:    23(3.222986e-44), 8388607(1.175494e-38)
+; EG-NEXT:     ADD_INT T0.Z, PV.W, literal.x,
+; EG-NEXT:     SUB_INT T3.W, literal.y, PV.W,
+; EG-NEXT:     OR_INT * T2.W, PS, literal.z,
+; EG-NEXT:    -127(nan), 181(2.536350e-43)
+; EG-NEXT:    8388608(1.175494e-38), 0(0.000000e+00)
+; EG-NEXT:     BFE_UINT T0.X, KC0[4].X, literal.x, T0.W,
+; EG-NEXT:     AND_INT T0.Y, KC0[4].X, literal.y,
+; EG-NEXT:     ADD_INT T1.Z, T1.W, literal.z,
+; EG-NEXT:     ADD_INT T4.W, T1.W, literal.w,
+; EG-NEXT:     LSHR * T3.W, PS, PV.W,
+; EG-NEXT:    23(3.222986e-44), 8388607(1.175494e-38)
+; EG-NEXT:    -150(nan), -182(nan)
+; EG-NEXT:     LSHR T1.X, PS, 1,
+; EG-NEXT:     LSHL T1.Y, T2.W, PV.W,
+; EG-NEXT:     SETGT_UINT T2.Z, PV.Z, literal.x,
+; EG-NEXT:     OR_INT T3.W, PV.Y, literal.y,
+; EG-NEXT:     ADD_INT * T4.W, PV.X, literal.z,
+; EG-NEXT:    31(4.344025e-44), 8388608(1.175494e-38)
+; EG-NEXT:    -150(nan), 0(0.000000e+00)
+; EG-NEXT:     SUB_INT T2.X, literal.x, T1.W,
+; EG-NEXT:     SETGT_UINT T0.Y, PS, literal.y,
+; EG-NEXT:     LSHL T3.Z, PV.W, PS,
+; EG-NEXT:     CNDE_INT T1.W, PV.Z, PV.X, PV.Y,
+; EG-NEXT:     SETGT_INT * T4.W, T0.Z, literal.z,
+; EG-NEXT:    150(2.101948e-43), 31(4.344025e-44)
+; EG-NEXT:    23(3.222986e-44), 0(0.000000e+00)
+; EG-NEXT:     CNDE_INT T1.Y, PS, 0.0, PV.W,
+; EG-NEXT:     CNDE_INT T3.Z, PV.Y, PV.Z, 0.0,
+; EG-NEXT:     SETGT_UINT T1.W, PV.X, literal.x,
+; EG-NEXT:     SUB_INT * T5.W, literal.y, T0.X,
+; EG-NEXT:    31(4.344025e-44), 181(2.536350e-43)
+; EG-NEXT:     LSHR T1.X, T2.W, T2.X,
+; EG-NEXT:     LSHL T2.Y, T2.W, T1.Z,
+; EG-NEXT:     SUB_INT T1.Z, literal.x, T0.X, BS:VEC_021/SCL_122
+; EG-NEXT:     ADD_INT T2.W, T0.X, literal.y,
+; EG-NEXT:     LSHR * T5.W, T3.W, PS,
+; EG-NEXT:    150(2.101948e-43), -182(nan)
+; EG-NEXT:     ADD_INT T0.X, T0.X, literal.x,
+; EG-NEXT:     LSHR T3.Y, PS, 1,
+; EG-NEXT:     LSHL T4.Z, T3.W, PV.W,
+; EG-NEXT:     SETGT_UINT T2.W, PV.Z, literal.y,
+; EG-NEXT:     LSHR * T3.W, T3.W, PV.Z,
+; EG-NEXT:    -127(nan), 31(4.344025e-44)
+; EG-NEXT:     CNDE_INT T2.X, PV.W, PS, 0.0,
+; EG-NEXT:     CNDE_INT T0.Y, T0.Y, PV.Y, PV.Z,
+; EG-NEXT:     SETGT_INT T1.Z, PV.X, literal.x,
+; EG-NEXT:     CNDE_INT T2.W, T2.Z, T2.Y, 0.0,
+; EG-NEXT:     CNDE_INT * T1.W, T1.W, T1.X, 0.0,
+; EG-NEXT:    23(3.222986e-44), 0(0.000000e+00)
+; EG-NEXT:     CNDE_INT T1.X, T4.W, PS, PV.W,
+; EG-NEXT:     ASHR T2.Y, KC0[3].Z, literal.x,
+; EG-NEXT:     CNDE_INT T2.Z, PV.Z, 0.0, PV.Y,
+; EG-NEXT:     CNDE_INT T1.W, PV.Z, PV.X, T3.Z,
+; EG-NEXT:     ASHR * T2.W, KC0[4].X, literal.x,
+; EG-NEXT:    31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT:     BFE_UINT T2.X, KC0[3].Y, literal.x, T0.W,
+; EG-NEXT:     XOR_INT T0.Y, PV.W, PS,
+; EG-NEXT:     XOR_INT T1.Z, PV.Z, PS,
+; EG-NEXT:     XOR_INT T1.W, PV.X, PV.Y,
+; EG-NEXT:     XOR_INT * T3.W, T1.Y, PV.Y,
+; EG-NEXT:    23(3.222986e-44), 0(0.000000e+00)
+; EG-NEXT:     AND_INT T1.X, KC0[3].Y, literal.x,
+; EG-NEXT:     SUB_INT T1.Y, PS, T2.Y,
+; EG-NEXT:     SUBB_UINT T2.Z, PV.W, T2.Y,
+; EG-NEXT:     SUB_INT T3.W, PV.Z, T2.W,
+; EG-NEXT:     SUBB_UINT * T4.W, PV.Y, T2.W,
+; EG-NEXT:    8388607(1.175494e-38), 0(0.000000e+00)
+; EG-NEXT:     SUB_INT T3.Y, PV.W, PS,
+; EG-NEXT:     SUB_INT T1.Z, PV.Y, PV.Z,
+; EG-NEXT:     OR_INT T3.W, PV.X, literal.x,
+; EG-NEXT:     SUB_INT * T4.W, literal.y, T2.X,
+; EG-NEXT:    8388608(1.175494e-38), 150(2.101948e-43)
+; EG-NEXT:     SETGT_INT T1.X, T0.Z, literal.x,
+; EG-NEXT:     SETGT_UINT T1.Y, PS, literal.y,
+; EG-NEXT:     LSHR T0.Z, PV.W, PS,
+; EG-NEXT:     SUB_INT T4.W, literal.z, T2.X,
+; EG-NEXT:     AND_INT * T5.W, KC0[3].W, literal.w,
+; EG-NEXT:    -1(nan), 31(4.344025e-44)
+; EG-NEXT:    181(2.536350e-43), 8388607(1.175494e-38)
+; EG-NEXT:     OR_INT T3.X, PS, literal.x,
+; EG-NEXT:     ADD_INT T4.Y, T2.X, literal.y,
+; EG-NEXT:     ADD_INT T2.Z, T2.X, literal.z,
+; EG-NEXT:     BFE_UINT T0.W, KC0[3].W, literal.w, T0.W, BS:VEC_021/SCL_122
+; EG-NEXT:     LSHR * T4.W, T3.W, PV.W,
+; EG-NEXT:    8388608(1.175494e-38), -150(nan)
+; EG-NEXT:    -182(nan), 23(3.222986e-44)
+; EG-NEXT:     ADD_INT T4.X, PV.W, literal.x,
+; EG-NEXT:     ADD_INT T5.Y, T2.X, literal.y,
+; EG-NEXT:     LSHR T3.Z, PS, 1,
+; EG-NEXT:     LSHL T4.W, T3.W, PV.Z,
+; EG-NEXT:     SETGT_UINT * T5.W, PV.Y, literal.z,
+; EG-NEXT:    -150(nan), -127(nan)
+; EG-NEXT:    31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT:     LSHL T2.X, T3.W, T4.Y,
+; EG-NEXT:     CNDE_INT * T4.Y, PS, PV.Z, PV.W,
+; EG-NEXT:    ALU clause starting at 106:
+; EG-NEXT:     SETGT_INT T2.Z, T5.Y, literal.x,
+; EG-NEXT:     SETGT_UINT T3.W, T4.X, literal.y,
+; EG-NEXT:     LSHL * T4.W, T3.X, T4.X,
+; EG-NEXT:    23(3.222986e-44), 31(4.344025e-44)
+; EG-NEXT:     CNDE_INT T4.X, PV.W, PS, 0.0,
+; EG-NEXT:     CNDE_INT T4.Y, PV.Z, 0.0, T4.Y, BS:VEC_021/SCL_122
+; EG-NEXT:     SUB_INT T3.Z, literal.x, T0.W,
+; EG-NEXT:     CNDE_INT T4.W, T5.W, T2.X, 0.0,
+; EG-NEXT:     CNDE_INT * T5.W, T1.Y, T0.Z, 0.0,
+; EG-NEXT:    181(2.536350e-43), 0(0.000000e+00)
+; EG-NEXT:     CNDE_INT T2.X, T2.Z, PS, PV.W,
+; EG-NEXT:     ASHR T1.Y, KC0[3].Y, literal.x,
+; EG-NEXT:     SUB_INT T0.Z, literal.y, T0.W,
+; EG-NEXT:     ADD_INT T4.W, T0.W, literal.z,
+; EG-NEXT:     LSHR * T5.W, T3.X, PV.Z,
+; EG-NEXT:    31(4.344025e-44), 150(2.101948e-43)
+; EG-NEXT:    -182(nan), 0(0.000000e+00)
+; EG-NEXT:     ADD_INT T5.X, T0.W, literal.x,
+; EG-NEXT:     LSHR T6.Y, PS, 1,
+; EG-NEXT:     LSHL T2.Z, T3.X, PV.W,
+; EG-NEXT:     SETGT_UINT T0.W, PV.Z, literal.y,
+; EG-NEXT:     LSHR * T4.W, T3.X, PV.Z,
+; EG-NEXT:    -127(nan), 31(4.344025e-44)
+; EG-NEXT:     CNDE_INT T3.X, PV.W, PS, 0.0,
+; EG-NEXT:     CNDE_INT T6.Y, T3.W, PV.Y, PV.Z,
+; EG-NEXT:     SETGT_INT T0.Z, PV.X, literal.x,
+; EG-NEXT:     XOR_INT T0.W, T2.X, T1.Y,
+; EG-NEXT:     XOR_INT * T3.W, T4.Y, T1.Y,
+; EG-NEXT:    23(3.222986e-44), 0(0.000000e+00)
+; EG-NEXT:     SUB_INT T2.X, PS, T1.Y,
+; EG-NEXT:     SUBB_UINT T4.Y, PV.W, T1.Y,
+; EG-NEXT:     CNDE_INT T2.Z, PV.Z, 0.0, PV.Y,
+; EG-NEXT:     CNDE_INT T3.W, PV.Z, PV.X, T4.X,
+; EG-NEXT:     ASHR * T4.W, KC0[3].W, literal.x,
+; EG-NEXT:    31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT:     XOR_INT T3.X, PV.W, PS,
+; EG-NEXT:     XOR_INT T6.Y, PV.Z, PS,
+; EG-NEXT:     SUB_INT T0.Z, PV.X, PV.Y,
+; EG-NEXT:     SETGT_INT T3.W, T5.Y, literal.x,
+; EG-NEXT:     CNDE_INT * T5.W, T1.X, 0.0, T1.Z, BS:VEC_021/SCL_122
+; EG-NEXT:    -1(nan), 0(0.000000e+00)
+; EG-NEXT:     SETGT_INT T0.X, T0.X, literal.x,
+; EG-NEXT:     CNDE_INT T5.Y, PV.W, 0.0, PV.Z,
+; EG-NEXT:     SUB_INT T0.Z, T1.W, T2.Y,
+; EG-NEXT:     SUB_INT T1.W, PV.Y, T4.W,
+; EG-NEXT:     SUBB_UINT * T6.W, PV.X, T4.W,
+; EG-NEXT:    -1(nan), 0(0.000000e+00)
+; EG-NEXT:     SUB_INT T2.X, PV.W, PS,
+; EG-NEXT:     SETGT_INT T2.Y, T5.X, literal.x,
+; EG-NEXT:     CNDE_INT T5.Z, T1.X, 0.0, PV.Z, BS:VEC_120/SCL_212
+; EG-NEXT:     SUB_INT T0.W, T0.W, T1.Y,
+; EG-NEXT:     CNDE_INT * T1.W, PV.X, 0.0, T3.Y, BS:VEC_021/SCL_122
+; EG-NEXT:    -1(nan), 0(0.000000e+00)
+; EG-NEXT:     CNDE_INT T5.X, T3.W, 0.0, PV.W,
+; EG-NEXT:     CNDE_INT T1.Y, PV.Y, 0.0, PV.X,
+; EG-NEXT:     SUB_INT T0.W, T0.Y, T2.W,
+; EG-NEXT:     LSHR * T2.X, KC0[2].Y, literal.x,
+; EG-NEXT:    2(2.802597e-45), 0(0.000000e+00)
+; EG-NEXT:     CNDE_INT T1.Z, T0.X, 0.0, PV.W,
+; EG-NEXT:     SUB_INT * T0.W, T3.X, T4.W, BS:VEC_120/SCL_212
+; EG-NEXT:     CNDE_INT T1.X, T2.Y, 0.0, PV.W,
+; EG-NEXT:     ADD_INT * T0.W, KC0[2].Y, literal.x,
+; EG-NEXT:    16(2.242078e-44), 0(0.000000e+00)
+; EG-NEXT:     LSHR * T0.X, PV.W, literal.x,
+; EG-NEXT:    2(2.802597e-45), 0(0.000000e+00)
   %conv = fptoui <4 x float> %x to <4 x i64>
   store <4 x i64> %conv, <4 x i64> addrspace(1)* %out
   ret void
 }
 
-
-; FUNC-LABEL: {{^}}fp_to_uint_f32_to_i1:
-; GCN: v_cmp_eq_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, 1.0, s{{[0-9]+}}
-
-; EG: AND_INT
-; EG: SETE_DX10 {{[*]?}} T{{[0-9]+}}.{{[XYZW]}}, KC0[2].Z, 1.0,
 define amdgpu_kernel void @fp_to_uint_f32_to_i1(i1 addrspace(1)* %out, float %in) #0 {
+; SI-LABEL: fp_to_uint_f32_to_i1:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s4, s[0:1], 0xb
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_cmp_eq_f32_e64 s[4:5], 1.0, s4
+; SI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; SI-NEXT:    buffer_store_byte v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: fp_to_uint_f32_to_i1:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
+; VI-NEXT:    s_load_dword s0, s[0:1], 0x2c
+; VI-NEXT:    s_mov_b32 s7, 0xf000
+; VI-NEXT:    s_mov_b32 s6, -1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_cmp_eq_f32_e64 s[0:1], 1.0, s0
+; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; VI-NEXT:    buffer_store_byte v0, off, s[4:7], 0
+; VI-NEXT:    s_endpgm
+;
+; EG-LABEL: fp_to_uint_f32_to_i1:
+; EG:       ; %bb.0:
+; EG-NEXT:    ALU 12, @4, KC0[CB0:0-32], KC1[]
+; EG-NEXT:    MEM_RAT MSKOR T0.XW, T1.X
+; EG-NEXT:    CF_END
+; EG-NEXT:    PAD
+; EG-NEXT:    ALU clause starting at 4:
+; EG-NEXT:     AND_INT T0.W, KC0[2].Y, literal.x,
+; EG-NEXT:     SETE_DX10 * T1.W, KC0[2].Z, 1.0,
+; EG-NEXT:    3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT:     AND_INT T1.W, PS, 1,
+; EG-NEXT:     LSHL * T0.W, PV.W, literal.x,
+; EG-NEXT:    3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT:     LSHL T0.X, PV.W, PS,
+; EG-NEXT:     LSHL * T0.W, literal.x, PS,
+; EG-NEXT:    255(3.573311e-43), 0(0.000000e+00)
+; EG-NEXT:     MOV T0.Y, 0.0,
+; EG-NEXT:     MOV * T0.Z, 0.0,
+; EG-NEXT:     LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT:    2(2.802597e-45), 0(0.000000e+00)
   %conv = fptoui float %in to i1
   store i1 %conv, i1 addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}fp_to_uint_fabs_f32_to_i1:
-; GCN: v_cmp_eq_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, 1.0, |s{{[0-9]+}}|
 define amdgpu_kernel void @fp_to_uint_fabs_f32_to_i1(i1 addrspace(1)* %out, float %in) #0 {
+; SI-LABEL: fp_to_uint_fabs_f32_to_i1:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s4, s[0:1], 0xb
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_cmp_eq_f32_e64 s[4:5], 1.0, |s4|
+; SI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; SI-NEXT:    buffer_store_byte v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: fp_to_uint_fabs_f32_to_i1:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
+; VI-NEXT:    s_load_dword s0, s[0:1], 0x2c
+; VI-NEXT:    s_mov_b32 s7, 0xf000
+; VI-NEXT:    s_mov_b32 s6, -1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_cmp_eq_f32_e64 s[0:1], 1.0, |s0|
+; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; VI-NEXT:    buffer_store_byte v0, off, s[4:7], 0
+; VI-NEXT:    s_endpgm
+;
+; EG-LABEL: fp_to_uint_fabs_f32_to_i1:
+; EG:       ; %bb.0:
+; EG-NEXT:    ALU 12, @4, KC0[CB0:0-32], KC1[]
+; EG-NEXT:    MEM_RAT MSKOR T0.XW, T1.X
+; EG-NEXT:    CF_END
+; EG-NEXT:    PAD
+; EG-NEXT:    ALU clause starting at 4:
+; EG-NEXT:     AND_INT T0.W, KC0[2].Y, literal.x,
+; EG-NEXT:     SETE_DX10 * T1.W, |KC0[2].Z|, 1.0,
+; EG-NEXT:    3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT:     AND_INT T1.W, PS, 1,
+; EG-NEXT:     LSHL * T0.W, PV.W, literal.x,
+; EG-NEXT:    3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT:     LSHL T0.X, PV.W, PS,
+; EG-NEXT:     LSHL * T0.W, literal.x, PS,
+; EG-NEXT:    255(3.573311e-43), 0(0.000000e+00)
+; EG-NEXT:     MOV T0.Y, 0.0,
+; EG-NEXT:     MOV * T0.Z, 0.0,
+; EG-NEXT:     LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT:    2(2.802597e-45), 0(0.000000e+00)
   %in.fabs = call float @llvm.fabs.f32(float %in)
   %conv = fptoui float %in.fabs to i1
   store i1 %conv, i1 addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}fp_to_uint_f32_to_i16:
-; GCN: v_cvt_u32_f32_e32 [[VAL:v[0-9]+]], s{{[0-9]+}}
-; GCN: buffer_store_short [[VAL]]
 define amdgpu_kernel void @fp_to_uint_f32_to_i16(i16 addrspace(1)* %out, float %in) #0 {
+; SI-LABEL: fp_to_uint_f32_to_i16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s4, s[0:1], 0xb
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_cvt_u32_f32_e32 v0, s4
+; SI-NEXT:    buffer_store_short v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: fp_to_uint_f32_to_i16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[0:1], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_mov_b32 s3, 0xf000
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_cvt_u32_f32_e32 v0, s2
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    buffer_store_short v0, off, s[0:3], 0
+; VI-NEXT:    s_endpgm
+;
+; EG-LABEL: fp_to_uint_f32_to_i16:
+; EG:       ; %bb.0:
+; EG-NEXT:    ALU 12, @4, KC0[CB0:0-32], KC1[]
+; EG-NEXT:    MEM_RAT MSKOR T0.XW, T1.X
+; EG-NEXT:    CF_END
+; EG-NEXT:    PAD
+; EG-NEXT:    ALU clause starting at 4:
+; EG-NEXT:     TRUNC T0.W, KC0[2].Z,
+; EG-NEXT:     AND_INT * T1.W, KC0[2].Y, literal.x,
+; EG-NEXT:    3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT:     LSHL T1.W, PS, literal.x,
+; EG-NEXT:     FLT_TO_UINT * T0.X, PV.W,
+; EG-NEXT:    3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT:     LSHL T0.X, PS, PV.W,
+; EG-NEXT:     LSHL * T0.W, literal.x, PV.W,
+; EG-NEXT:    65535(9.183409e-41), 0(0.000000e+00)
+; EG-NEXT:     MOV T0.Y, 0.0,
+; EG-NEXT:     MOV * T0.Z, 0.0,
+; EG-NEXT:     LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT:    2(2.802597e-45), 0(0.000000e+00)
   %uint = fptoui float %in to i16
   store i16 %uint, i16 addrspace(1)* %out
   ret void


        


More information about the llvm-commits mailing list