[llvm] 5247ae9 - AMDGPU: Switch some tests to generated checks

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Sat Nov 12 10:37:57 PST 2022


Author: Matt Arsenault
Date: 2022-11-12T10:29:47-08:00
New Revision: 5247ae9de5122d2e77ffe03099a090749324d781

URL: https://github.com/llvm/llvm-project/commit/5247ae9de5122d2e77ffe03099a090749324d781
DIFF: https://github.com/llvm/llvm-project/commit/5247ae9de5122d2e77ffe03099a090749324d781.diff

LOG: AMDGPU: Switch some tests to generated checks

Added: 
    

Modified: 
    llvm/test/CodeGen/AMDGPU/sint_to_fp.f64.ll
    llvm/test/CodeGen/AMDGPU/uint_to_fp.f64.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AMDGPU/sint_to_fp.f64.ll b/llvm/test/CodeGen/AMDGPU/sint_to_fp.f64.ll
index af290aa914adb..7b820ab1e7174 100644
--- a/llvm/test/CodeGen/AMDGPU/sint_to_fp.f64.ll
+++ b/llvm/test/CodeGen/AMDGPU/sint_to_fp.f64.ll
@@ -1,11 +1,31 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,CI %s
 ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI %s
 
 declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
 
-; GCN-LABEL: {{^}}sint_to_fp_i32_to_f64
-; GCN: v_cvt_f64_i32_e32
 define amdgpu_kernel void @sint_to_fp_i32_to_f64(double addrspace(1)* %out, i32 %in) {
+; CI-LABEL: sint_to_fp_i32_to_f64:
+; CI:       ; %bb.0:
+; CI-NEXT:    s_load_dword s2, s[4:5], 0x2
+; CI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CI-NEXT:    s_waitcnt lgkmcnt(0)
+; CI-NEXT:    v_cvt_f64_i32_e32 v[0:1], s2
+; CI-NEXT:    v_mov_b32_e32 v3, s1
+; CI-NEXT:    v_mov_b32_e32 v2, s0
+; CI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; CI-NEXT:    s_endpgm
+;
+; VI-LABEL: sint_to_fp_i32_to_f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[4:5], 0x8
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_cvt_f64_i32_e32 v[0:1], s2
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
   %result = sitofp i32 %in to double
   store double %result, double addrspace(1)* %out
   ret void
@@ -13,47 +33,142 @@ define amdgpu_kernel void @sint_to_fp_i32_to_f64(double addrspace(1)* %out, i32
 
 ; We can't fold the SGPRs into v_cndmask_b32_e64, because it already
 ; uses an SGPR (implicit vcc).
-
-; GCN-LABEL: {{^}}sint_to_fp_i1_f64:
-; GCN-DAG: s_cmp_eq_u32
-; GCN-DAG: s_cselect_b32 s[[SSEL:[0-9]+]], 0xbff00000, 0
-; GCN-DAG: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}}
-; GCN-DAG: v_mov_b32_e32 v[[SEL:[0-9]+]], s[[SSEL]]
-; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v[[[ZERO]]:[[SEL]]]
-; GCN: s_endpgm
 define amdgpu_kernel void @sint_to_fp_i1_f64(double addrspace(1)* %out, i32 %in) {
+; CI-LABEL: sint_to_fp_i1_f64:
+; CI:       ; %bb.0:
+; CI-NEXT:    s_load_dword s2, s[4:5], 0x2
+; CI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CI-NEXT:    v_mov_b32_e32 v0, 0
+; CI-NEXT:    s_waitcnt lgkmcnt(0)
+; CI-NEXT:    s_cmp_eq_u32 s2, 0
+; CI-NEXT:    s_cselect_b32 s2, 0xbff00000, 0
+; CI-NEXT:    v_mov_b32_e32 v3, s1
+; CI-NEXT:    v_mov_b32_e32 v1, s2
+; CI-NEXT:    v_mov_b32_e32 v2, s0
+; CI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; CI-NEXT:    s_endpgm
+;
+; VI-LABEL: sint_to_fp_i1_f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[4:5], 0x8
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; VI-NEXT:    v_mov_b32_e32 v0, 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_cmp_eq_u32 s2, 0
+; VI-NEXT:    s_cselect_b32 s2, 0xbff00000, 0
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_mov_b32_e32 v1, s2
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
   %cmp = icmp eq i32 %in, 0
   %fp = sitofp i1 %cmp to double
   store double %fp, double addrspace(1)* %out, align 4
   ret void
 }
 
-; GCN-LABEL: {{^}}sint_to_fp_i1_f64_load:
-; GCN: v_cndmask_b32_e64 [[IRESULT:v[0-9]]], 0, -1
-; GCN: v_cvt_f64_i32_e32 [[RESULT:v\[[0-9]+:[0-9]\]]], [[IRESULT]]
-; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-; GCN: s_endpgm
 define amdgpu_kernel void @sint_to_fp_i1_f64_load(double addrspace(1)* %out, i1 %in) {
+; CI-LABEL: sint_to_fp_i1_f64_load:
+; CI:       ; %bb.0:
+; CI-NEXT:    s_load_dword s2, s[4:5], 0x2
+; CI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CI-NEXT:    s_waitcnt lgkmcnt(0)
+; CI-NEXT:    s_bitcmp1_b32 s2, 0
+; CI-NEXT:    s_cselect_b64 s[2:3], -1, 0
+; CI-NEXT:    v_cndmask_b32_e64 v0, 0, -1, s[2:3]
+; CI-NEXT:    v_cvt_f64_i32_e32 v[0:1], v0
+; CI-NEXT:    v_mov_b32_e32 v3, s1
+; CI-NEXT:    v_mov_b32_e32 v2, s0
+; CI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; CI-NEXT:    s_endpgm
+;
+; VI-LABEL: sint_to_fp_i1_f64_load:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[4:5], 0x8
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_bitcmp1_b32 s2, 0
+; VI-NEXT:    s_cselect_b64 s[2:3], -1, 0
+; VI-NEXT:    v_cndmask_b32_e64 v0, 0, -1, s[2:3]
+; VI-NEXT:    v_cvt_f64_i32_e32 v[0:1], v0
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
   %fp = sitofp i1 %in to double
   store double %fp, double addrspace(1)* %out, align 8
   ret void
 }
 
-; GCN-LABEL: @s_sint_to_fp_i64_to_f64
 define amdgpu_kernel void @s_sint_to_fp_i64_to_f64(double addrspace(1)* %out, i64 %in) {
+; CI-LABEL: s_sint_to_fp_i64_to_f64:
+; CI:       ; %bb.0:
+; CI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; CI-NEXT:    s_waitcnt lgkmcnt(0)
+; CI-NEXT:    v_cvt_f64_i32_e32 v[0:1], s3
+; CI-NEXT:    v_cvt_f64_u32_e32 v[2:3], s2
+; CI-NEXT:    v_mov_b32_e32 v4, s0
+; CI-NEXT:    v_mov_b32_e32 v5, s1
+; CI-NEXT:    v_ldexp_f64 v[0:1], v[0:1], 32
+; CI-NEXT:    v_add_f64 v[0:1], v[0:1], v[2:3]
+; CI-NEXT:    flat_store_dwordx2 v[4:5], v[0:1]
+; CI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_sint_to_fp_i64_to_f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_cvt_f64_i32_e32 v[0:1], s3
+; VI-NEXT:    v_cvt_f64_u32_e32 v[2:3], s2
+; VI-NEXT:    v_ldexp_f64 v[0:1], v[0:1], 32
+; VI-NEXT:    v_add_f64 v[0:1], v[0:1], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
   %result = sitofp i64 %in to double
   store double %result, double addrspace(1)* %out
   ret void
 }
 
-; GCN-LABEL: @v_sint_to_fp_i64_to_f64
-; GCN: flat_load_dwordx2 v[[[LO:[0-9]+]]:[[HI:[0-9]+]]]
-; GCN-DAG: v_cvt_f64_i32_e32 [[HI_CONV:v\[[0-9]+:[0-9]+\]]], v[[HI]]
-; GCN-DAG: v_cvt_f64_u32_e32 [[LO_CONV:v\[[0-9]+:[0-9]+\]]], v[[LO]]
-; GCN-DAG: v_ldexp_f64 [[LDEXP:v\[[0-9]+:[0-9]+\]]], [[HI_CONV]], 32
-; GCN: v_add_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[LDEXP]], [[LO_CONV]]
-; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_sint_to_fp_i64_to_f64(double addrspace(1)* %out, i64 addrspace(1)* %in) {
+; CI-LABEL: v_sint_to_fp_i64_to_f64:
+; CI:       ; %bb.0:
+; CI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; CI-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
+; CI-NEXT:    s_waitcnt lgkmcnt(0)
+; CI-NEXT:    v_mov_b32_e32 v1, s3
+; CI-NEXT:    v_add_i32_e32 v0, vcc, s2, v0
+; CI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; CI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    v_cvt_f64_i32_e32 v[1:2], v1
+; CI-NEXT:    v_cvt_f64_u32_e32 v[3:4], v0
+; CI-NEXT:    v_ldexp_f64 v[0:1], v[1:2], 32
+; CI-NEXT:    v_mov_b32_e32 v2, s0
+; CI-NEXT:    v_add_f64 v[0:1], v[0:1], v[3:4]
+; CI-NEXT:    v_mov_b32_e32 v3, s1
+; CI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; CI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_sint_to_fp_i64_to_f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; VI-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_cvt_f64_i32_e32 v[1:2], v1
+; VI-NEXT:    v_cvt_f64_u32_e32 v[3:4], v0
+; VI-NEXT:    v_ldexp_f64 v[1:2], v[1:2], 32
+; VI-NEXT:    v_add_f64 v[0:1], v[1:2], v[3:4]
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
   %gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
   %val = load i64, i64 addrspace(1)* %gep, align 8
@@ -63,25 +178,53 @@ define amdgpu_kernel void @v_sint_to_fp_i64_to_f64(double addrspace(1)* %out, i6
 }
 
 ; FIXME: bfe and sext on VI+
-; GCN-LABEL: {{^}}s_sint_to_fp_i8_to_f64:
-; GCN: s_load_dword [[VAL:s[0-9]+]]
-; CI-NOT: bfe
-; CI: s_sext_i32_i8 [[SEXT:s[0-9]+]], [[VAL]]
-
-; VI: s_bfe_i32 [[BFE:s[0-9]+]], [[VAL]], 0x80000
-; VI: s_sext_i32_i16 [[SEXT:s[0-9]+]], [[BFE]]
-
-; GCN: v_cvt_f64_i32_e32 v{{\[[0-9]+:[0-9]+\]}}, [[SEXT]]
 define amdgpu_kernel void @s_sint_to_fp_i8_to_f64(double addrspace(1)* %out, i8 %in) {
+; CI-LABEL: s_sint_to_fp_i8_to_f64:
+; CI:       ; %bb.0:
+; CI-NEXT:    s_load_dword s2, s[4:5], 0x2
+; CI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CI-NEXT:    s_waitcnt lgkmcnt(0)
+; CI-NEXT:    s_sext_i32_i8 s2, s2
+; CI-NEXT:    v_cvt_f64_i32_e32 v[0:1], s2
+; CI-NEXT:    v_mov_b32_e32 v3, s1
+; CI-NEXT:    v_mov_b32_e32 v2, s0
+; CI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; CI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_sint_to_fp_i8_to_f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[4:5], 0x8
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_bfe_i32 s2, s2, 0x80000
+; VI-NEXT:    s_sext_i32_i16 s2, s2
+; VI-NEXT:    v_cvt_f64_i32_e32 v[0:1], s2
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
   %fp = sitofp i8 %in to double
   store double %fp, double addrspace(1)* %out
   ret void
 }
 
-; GCN-LABEL: {{^}}v_sint_to_fp_i8_to_f64:
-; GCN: v_bfe_i32 [[SEXT:v[0-9]+]]
-; GCN: v_cvt_f64_i32_e32 v{{\[[0-9]+:[0-9]+\]}}, [[SEXT]]
 define double @v_sint_to_fp_i8_to_f64(i8 %in) {
+; CI-LABEL: v_sint_to_fp_i8_to_f64:
+; CI:       ; %bb.0:
+; CI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CI-NEXT:    v_bfe_i32 v0, v0, 0, 8
+; CI-NEXT:    v_cvt_f64_i32_e32 v[0:1], v0
+; CI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: v_sint_to_fp_i8_to_f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_bfe_i32 v0, v0, 0, 8
+; VI-NEXT:    v_bfe_i32 v0, v0, 0, 16
+; VI-NEXT:    v_cvt_f64_i32_e32 v[0:1], v0
+; VI-NEXT:    s_setpc_b64 s[30:31]
   %fp = sitofp i8 %in to double
   ret double %fp
 }
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; GCN: {{.*}}

diff  --git a/llvm/test/CodeGen/AMDGPU/uint_to_fp.f64.ll b/llvm/test/CodeGen/AMDGPU/uint_to_fp.f64.ll
index a7b85f62b0504..d4286b8ab95a9 100644
--- a/llvm/test/CodeGen/AMDGPU/uint_to_fp.f64.ll
+++ b/llvm/test/CodeGen/AMDGPU/uint_to_fp.f64.ll
@@ -1,16 +1,47 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -enable-var-scope  -check-prefixes=GCN,SI %s
 ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI %s
 
 declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
 
-; GCN-LABEL: {{^}}v_uint_to_fp_i64_to_f64
-; GCN: flat_load_dwordx2 v[[[LO:[0-9]+]]:[[HI:[0-9]+]]]
-; GCN-DAG: v_cvt_f64_u32_e32 [[HI_CONV:v\[[0-9]+:[0-9]+\]]], v[[HI]]
-; GCN-DAG: v_cvt_f64_u32_e32 [[LO_CONV:v\[[0-9]+:[0-9]+\]]], v[[LO]]
-; GCN-DAG: v_ldexp_f64 [[LDEXP:v\[[0-9]+:[0-9]+\]]], [[HI_CONV]], 32
-; GCN: v_add_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[LDEXP]], [[LO_CONV]]
-; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_uint_to_fp_i64_to_f64(double addrspace(1)* %out, i64 addrspace(1)* %in) {
+; SI-LABEL: v_uint_to_fp_i64_to_f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v0
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_cvt_f64_u32_e32 v[1:2], v1
+; SI-NEXT:    v_cvt_f64_u32_e32 v[3:4], v0
+; SI-NEXT:    v_ldexp_f64 v[0:1], v[1:2], 32
+; SI-NEXT:    v_mov_b32_e32 v2, s0
+; SI-NEXT:    v_add_f64 v[0:1], v[0:1], v[3:4]
+; SI-NEXT:    v_mov_b32_e32 v3, s1
+; SI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_uint_to_fp_i64_to_f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; VI-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_cvt_f64_u32_e32 v[1:2], v1
+; VI-NEXT:    v_cvt_f64_u32_e32 v[3:4], v0
+; VI-NEXT:    v_ldexp_f64 v[1:2], v[1:2], 32
+; VI-NEXT:    v_add_f64 v[0:1], v[1:2], v[3:4]
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
   %gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
   %val = load i64, i64 addrspace(1)* %gep, align 8
@@ -19,53 +50,230 @@ define amdgpu_kernel void @v_uint_to_fp_i64_to_f64(double addrspace(1)* %out, i6
   ret void
 }
 
-; GCN-LABEL: {{^}}s_uint_to_fp_i64_to_f64
 define amdgpu_kernel void @s_uint_to_fp_i64_to_f64(double addrspace(1)* %out, i64 %in) {
+; SI-LABEL: s_uint_to_fp_i64_to_f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_cvt_f64_u32_e32 v[0:1], s3
+; SI-NEXT:    v_cvt_f64_u32_e32 v[2:3], s2
+; SI-NEXT:    v_mov_b32_e32 v4, s0
+; SI-NEXT:    v_mov_b32_e32 v5, s1
+; SI-NEXT:    v_ldexp_f64 v[0:1], v[0:1], 32
+; SI-NEXT:    v_add_f64 v[0:1], v[0:1], v[2:3]
+; SI-NEXT:    flat_store_dwordx2 v[4:5], v[0:1]
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_uint_to_fp_i64_to_f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_cvt_f64_u32_e32 v[0:1], s3
+; VI-NEXT:    v_cvt_f64_u32_e32 v[2:3], s2
+; VI-NEXT:    v_ldexp_f64 v[0:1], v[0:1], 32
+; VI-NEXT:    v_add_f64 v[0:1], v[0:1], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
   %cast = uitofp i64 %in to double
   store double %cast, double addrspace(1)* %out, align 8
   ret void
 }
 
-; GCN-LABEL: {{^}}s_uint_to_fp_v2i64_to_v2f64
 define amdgpu_kernel void @s_uint_to_fp_v2i64_to_v2f64(<2 x double> addrspace(1)* %out, <2 x i64> %in) {
+; SI-LABEL: s_uint_to_fp_v2i64_to_v2f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x4
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_cvt_f64_u32_e32 v[0:1], s3
+; SI-NEXT:    v_cvt_f64_u32_e32 v[2:3], s1
+; SI-NEXT:    v_cvt_f64_u32_e32 v[4:5], s2
+; SI-NEXT:    v_cvt_f64_u32_e32 v[6:7], s0
+; SI-NEXT:    v_ldexp_f64 v[0:1], v[0:1], 32
+; SI-NEXT:    v_ldexp_f64 v[8:9], v[2:3], 32
+; SI-NEXT:    v_add_f64 v[2:3], v[0:1], v[4:5]
+; SI-NEXT:    v_add_f64 v[0:1], v[8:9], v[6:7]
+; SI-NEXT:    v_mov_b32_e32 v4, s4
+; SI-NEXT:    v_mov_b32_e32 v5, s5
+; SI-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_uint_to_fp_v2i64_to_v2f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x10
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_cvt_f64_u32_e32 v[0:1], s3
+; VI-NEXT:    v_cvt_f64_u32_e32 v[2:3], s1
+; VI-NEXT:    v_cvt_f64_u32_e32 v[6:7], s0
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; VI-NEXT:    v_ldexp_f64 v[0:1], v[0:1], 32
+; VI-NEXT:    v_ldexp_f64 v[4:5], v[2:3], 32
+; VI-NEXT:    v_cvt_f64_u32_e32 v[2:3], s2
+; VI-NEXT:    v_add_f64 v[2:3], v[0:1], v[2:3]
+; VI-NEXT:    v_add_f64 v[0:1], v[4:5], v[6:7]
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v5, s1
+; VI-NEXT:    v_mov_b32_e32 v4, s0
+; VI-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; VI-NEXT:    s_endpgm
   %cast = uitofp <2 x i64> %in to <2 x double>
   store <2 x double> %cast, <2 x double> addrspace(1)* %out, align 16
   ret void
 }
 
-; GCN-LABEL: {{^}}s_uint_to_fp_v4i64_to_v4f64
 define amdgpu_kernel void @s_uint_to_fp_v4i64_to_v4f64(<4 x double> addrspace(1)* %out, <4 x i64> %in) {
+; SI-LABEL: s_uint_to_fp_v4i64_to_v4f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x8
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_cvt_f64_u32_e32 v[0:1], s11
+; SI-NEXT:    v_cvt_f64_u32_e32 v[4:5], s9
+; SI-NEXT:    v_cvt_f64_u32_e32 v[2:3], s10
+; SI-NEXT:    v_cvt_f64_u32_e32 v[6:7], s8
+; SI-NEXT:    v_cvt_f64_u32_e32 v[8:9], s15
+; SI-NEXT:    v_cvt_f64_u32_e32 v[10:11], s13
+; SI-NEXT:    v_ldexp_f64 v[0:1], v[0:1], 32
+; SI-NEXT:    v_ldexp_f64 v[4:5], v[4:5], 32
+; SI-NEXT:    v_add_f64 v[2:3], v[0:1], v[2:3]
+; SI-NEXT:    v_add_f64 v[0:1], v[4:5], v[6:7]
+; SI-NEXT:    v_cvt_f64_u32_e32 v[4:5], s14
+; SI-NEXT:    v_cvt_f64_u32_e32 v[12:13], s12
+; SI-NEXT:    v_ldexp_f64 v[6:7], v[8:9], 32
+; SI-NEXT:    v_ldexp_f64 v[8:9], v[10:11], 32
+; SI-NEXT:    s_add_u32 s2, s0, 16
+; SI-NEXT:    s_addc_u32 s3, s1, 0
+; SI-NEXT:    v_add_f64 v[6:7], v[6:7], v[4:5]
+; SI-NEXT:    v_add_f64 v[4:5], v[8:9], v[12:13]
+; SI-NEXT:    v_mov_b32_e32 v9, s3
+; SI-NEXT:    v_mov_b32_e32 v8, s2
+; SI-NEXT:    flat_store_dwordx4 v[8:9], v[4:7]
+; SI-NEXT:    s_nop 0
+; SI-NEXT:    v_mov_b32_e32 v5, s1
+; SI-NEXT:    v_mov_b32_e32 v4, s0
+; SI-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_uint_to_fp_v4i64_to_v4f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x20
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_cvt_f64_u32_e32 v[2:3], s15
+; VI-NEXT:    v_cvt_f64_u32_e32 v[4:5], s13
+; VI-NEXT:    v_cvt_f64_u32_e32 v[0:1], s11
+; VI-NEXT:    v_cvt_f64_u32_e32 v[6:7], s9
+; VI-NEXT:    v_ldexp_f64 v[8:9], v[2:3], 32
+; VI-NEXT:    v_ldexp_f64 v[4:5], v[4:5], 32
+; VI-NEXT:    v_ldexp_f64 v[0:1], v[0:1], 32
+; VI-NEXT:    v_ldexp_f64 v[10:11], v[6:7], 32
+; VI-NEXT:    v_cvt_f64_u32_e32 v[6:7], s14
+; VI-NEXT:    v_cvt_f64_u32_e32 v[12:13], s12
+; VI-NEXT:    v_cvt_f64_u32_e32 v[2:3], s10
+; VI-NEXT:    v_cvt_f64_u32_e32 v[14:15], s8
+; VI-NEXT:    v_add_f64 v[6:7], v[8:9], v[6:7]
+; VI-NEXT:    v_add_f64 v[4:5], v[4:5], v[12:13]
+; VI-NEXT:    v_add_f64 v[2:3], v[0:1], v[2:3]
+; VI-NEXT:    v_add_f64 v[0:1], v[10:11], v[14:15]
+; VI-NEXT:    s_add_u32 s2, s0, 16
+; VI-NEXT:    s_addc_u32 s3, s1, 0
+; VI-NEXT:    v_mov_b32_e32 v11, s3
+; VI-NEXT:    v_mov_b32_e32 v9, s1
+; VI-NEXT:    v_mov_b32_e32 v10, s2
+; VI-NEXT:    v_mov_b32_e32 v8, s0
+; VI-NEXT:    flat_store_dwordx4 v[10:11], v[4:7]
+; VI-NEXT:    flat_store_dwordx4 v[8:9], v[0:3]
+; VI-NEXT:    s_endpgm
   %cast = uitofp <4 x i64> %in to <4 x double>
   store <4 x double> %cast, <4 x double> addrspace(1)* %out, align 16
   ret void
 }
 
-; GCN-LABEL: {{^}}s_uint_to_fp_i32_to_f64
-; GCN: v_cvt_f64_u32_e32
-; GCN: s_endpgm
 define amdgpu_kernel void @s_uint_to_fp_i32_to_f64(double addrspace(1)* %out, i32 %in) {
+; SI-LABEL: s_uint_to_fp_i32_to_f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s2, s[4:5], 0x2
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_cvt_f64_u32_e32 v[0:1], s2
+; SI-NEXT:    v_mov_b32_e32 v3, s1
+; SI-NEXT:    v_mov_b32_e32 v2, s0
+; SI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_uint_to_fp_i32_to_f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[4:5], 0x8
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_cvt_f64_u32_e32 v[0:1], s2
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
   %cast = uitofp i32 %in to double
   store double %cast, double addrspace(1)* %out, align 8
   ret void
 }
 
-; GCN-LABEL: {{^}}s_uint_to_fp_v2i32_to_v2f64
-; GCN: v_cvt_f64_u32_e32
-; GCN: v_cvt_f64_u32_e32
-; GCN: s_endpgm
 define amdgpu_kernel void @s_uint_to_fp_v2i32_to_v2f64(<2 x double> addrspace(1)* %out, <2 x i32> %in) {
+; GCN-LABEL: s_uint_to_fp_v2i32_to_v2f64:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_cvt_f64_u32_e32 v[2:3], s3
+; GCN-NEXT:    v_cvt_f64_u32_e32 v[0:1], s2
+; GCN-NEXT:    v_mov_b32_e32 v5, s1
+; GCN-NEXT:    v_mov_b32_e32 v4, s0
+; GCN-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; GCN-NEXT:    s_endpgm
   %cast = uitofp <2 x i32> %in to <2 x double>
   store <2 x double> %cast, <2 x double> addrspace(1)* %out, align 16
   ret void
 }
 
-; GCN-LABEL: {{^}}s_uint_to_fp_v4i32_to_v4f64
-; GCN: v_cvt_f64_u32_e32
-; GCN: v_cvt_f64_u32_e32
-; GCN: v_cvt_f64_u32_e32
-; GCN: v_cvt_f64_u32_e32
-; GCN: s_endpgm
 define amdgpu_kernel void @s_uint_to_fp_v4i32_to_v4f64(<4 x double> addrspace(1)* %out, <4 x i32> %in) {
+; SI-LABEL: s_uint_to_fp_v4i32_to_v4f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x4
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_cvt_f64_u32_e32 v[0:1], s0
+; SI-NEXT:    v_cvt_f64_u32_e32 v[6:7], s3
+; SI-NEXT:    v_cvt_f64_u32_e32 v[4:5], s2
+; SI-NEXT:    s_add_u32 s0, s4, 16
+; SI-NEXT:    v_cvt_f64_u32_e32 v[2:3], s1
+; SI-NEXT:    s_addc_u32 s1, s5, 0
+; SI-NEXT:    v_mov_b32_e32 v9, s1
+; SI-NEXT:    v_mov_b32_e32 v8, s0
+; SI-NEXT:    flat_store_dwordx4 v[8:9], v[4:7]
+; SI-NEXT:    s_nop 0
+; SI-NEXT:    v_mov_b32_e32 v4, s4
+; SI-NEXT:    v_mov_b32_e32 v5, s5
+; SI-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_uint_to_fp_v4i32_to_v4f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x10
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_cvt_f64_u32_e32 v[0:1], s0
+; VI-NEXT:    v_cvt_f64_u32_e32 v[6:7], s3
+; VI-NEXT:    v_cvt_f64_u32_e32 v[4:5], s2
+; VI-NEXT:    s_add_u32 s0, s4, 16
+; VI-NEXT:    v_cvt_f64_u32_e32 v[2:3], s1
+; VI-NEXT:    s_addc_u32 s1, s5, 0
+; VI-NEXT:    v_mov_b32_e32 v9, s1
+; VI-NEXT:    v_mov_b32_e32 v8, s0
+; VI-NEXT:    flat_store_dwordx4 v[8:9], v[4:7]
+; VI-NEXT:    s_nop 0
+; VI-NEXT:    v_mov_b32_e32 v4, s4
+; VI-NEXT:    v_mov_b32_e32 v5, s5
+; VI-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; VI-NEXT:    s_endpgm
   %cast = uitofp <4 x i32> %in to <4 x double>
   store <4 x double> %cast, <4 x double> addrspace(1)* %out, align 16
   ret void
@@ -73,54 +281,118 @@ define amdgpu_kernel void @s_uint_to_fp_v4i32_to_v4f64(<4 x double> addrspace(1)
 
 ; We can't fold the SGPRs into v_cndmask_b32_e32, because it already
 ; uses an SGPR (implicit vcc).
-
-; GCN-LABEL: {{^}}uint_to_fp_i1_to_f64:
-; VI-DAG: s_cmp_eq_u32
-; GCN-DAG: s_cselect_b32 s[[SSEL:[0-9]+]], 0x3ff00000, 0
-; GCN-DAG: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}}
-; GCN-DAG: v_mov_b32_e32 v[[SEL:[0-9]+]], s[[SSEL]]
-; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v[[[ZERO]]:[[SEL]]]
-; GCN: s_endpgm
 define amdgpu_kernel void @uint_to_fp_i1_to_f64(double addrspace(1)* %out, i32 %in) {
+; SI-LABEL: uint_to_fp_i1_to_f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s2, s[4:5], 0x2
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; SI-NEXT:    v_mov_b32_e32 v0, 0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_cmp_eq_u32 s2, 0
+; SI-NEXT:    s_cselect_b32 s2, 0x3ff00000, 0
+; SI-NEXT:    v_mov_b32_e32 v3, s1
+; SI-NEXT:    v_mov_b32_e32 v1, s2
+; SI-NEXT:    v_mov_b32_e32 v2, s0
+; SI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: uint_to_fp_i1_to_f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[4:5], 0x8
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; VI-NEXT:    v_mov_b32_e32 v0, 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_cmp_eq_u32 s2, 0
+; VI-NEXT:    s_cselect_b32 s2, 0x3ff00000, 0
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_mov_b32_e32 v1, s2
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
   %cmp = icmp eq i32 %in, 0
   %fp = uitofp i1 %cmp to double
   store double %fp, double addrspace(1)* %out, align 4
   ret void
 }
 
-; GCN-LABEL: {{^}}uint_to_fp_i1_to_f64_load:
-; GCN: v_cndmask_b32_e64 [[IRESULT:v[0-9]]], 0, 1
-; GCN: v_cvt_f64_u32_e32 [[RESULT:v\[[0-9]+:[0-9]\]]], [[IRESULT]]
-; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-; GCN: s_endpgm
 define amdgpu_kernel void @uint_to_fp_i1_to_f64_load(double addrspace(1)* %out, i1 %in) {
+; SI-LABEL: uint_to_fp_i1_to_f64_load:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s2, s[4:5], 0x2
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_bitcmp1_b32 s2, 0
+; SI-NEXT:    s_cselect_b64 s[2:3], -1, 0
+; SI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[2:3]
+; SI-NEXT:    v_cvt_f64_u32_e32 v[0:1], v0
+; SI-NEXT:    v_mov_b32_e32 v3, s1
+; SI-NEXT:    v_mov_b32_e32 v2, s0
+; SI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: uint_to_fp_i1_to_f64_load:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[4:5], 0x8
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_bitcmp1_b32 s2, 0
+; VI-NEXT:    s_cselect_b64 s[2:3], -1, 0
+; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[2:3]
+; VI-NEXT:    v_cvt_f64_u32_e32 v[0:1], v0
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
   %fp = uitofp i1 %in to double
   store double %fp, double addrspace(1)* %out, align 8
   ret void
 }
 
-; GCN-LABEL: {{^}}s_uint_to_fp_i8_to_f64:
-; GCN: s_load_dword [[VAL:s[0-9]+]]
-; SI: s_and_b32 [[ZEXT:s[0-9]+]], [[VAL]], 0xff{{$}}
-; SI: v_cvt_f64_u32_e32 v{{\[[0-9]+:[0-9]+\]}}, [[ZEXT]]
-
-; VI: s_and_b32 [[ZEXT:s[0-9]+]], [[VAL]], 0xff{{$}}
-; VI: v_cvt_f64_u32_e32 v{{\[[0-9]+:[0-9]+\]}}, [[ZEXT]]
 define amdgpu_kernel void @s_uint_to_fp_i8_to_f64(double addrspace(1)* %out, i8 %in) {
+; SI-LABEL: s_uint_to_fp_i8_to_f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s2, s[4:5], 0x2
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_and_b32 s2, s2, 0xff
+; SI-NEXT:    v_cvt_f64_u32_e32 v[0:1], s2
+; SI-NEXT:    v_mov_b32_e32 v3, s1
+; SI-NEXT:    v_mov_b32_e32 v2, s0
+; SI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_uint_to_fp_i8_to_f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[4:5], 0x8
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_and_b32 s2, s2, 0xff
+; VI-NEXT:    v_cvt_f64_u32_e32 v[0:1], s2
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
   %fp = uitofp i8 %in to double
   store double %fp, double addrspace(1)* %out
   ret void
 }
 
 ; FIXME: Worse on VI
-; GCN-LABEL: {{^}}v_uint_to_fp_i8_to_f64:
-; SI: v_and_b32_e32 [[ZEXT:v[0-9]+]], 0xff, v0
-; SI: v_cvt_f64_u32_e32 v{{\[[0-9]+:[0-9]+\]}}, [[ZEXT]]
-
-; VI: v_mov_b32_e32 v{{[0-9]+}}
-; VI: v_and_b32_sdwa
-; VI: v_cvt_f64_u32_e32 v{{\[[0-9]+:[0-9]+\]}},
 define double @v_uint_to_fp_i8_to_f64(i8 %in) {
+; SI-LABEL: v_uint_to_fp_i8_to_f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT:    v_cvt_f64_u32_e32 v[0:1], v0
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: v_uint_to_fp_i8_to_f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, 0xffff
+; VI-NEXT:    v_and_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT:    v_cvt_f64_u32_e32 v[0:1], v0
+; VI-NEXT:    s_setpc_b64 s[30:31]
   %fp = uitofp i8 %in to double
   ret double %fp
 }


        


More information about the llvm-commits mailing list