[llvm] 1f8f2ed - [NFC][AMDGPU] Autogenerate tests for uniform i32 promo in ISel (#106382)

via llvm-commits llvm-commits at lists.llvm.org
Thu Aug 29 06:20:37 PDT 2024


Author: Pierre van Houtryve
Date: 2024-08-29T15:20:32+02:00
New Revision: 1f8f2ed66ab742a1fbb4a84411e656cb8324e107

URL: https://github.com/llvm/llvm-project/commit/1f8f2ed66ab742a1fbb4a84411e656cb8324e107
DIFF: https://github.com/llvm/llvm-project/commit/1f8f2ed66ab742a1fbb4a84411e656cb8324e107.diff

LOG: [NFC][AMDGPU] Autogenerate tests for uniform i32 promo in ISel (#106382)

Many tests were easy to update, but these are quite big and I think it's
better to autogenerate them to see the difference well.

Added: 
    

Modified: 
    llvm/test/CodeGen/AMDGPU/extract_vector_dynelt.ll
    llvm/test/CodeGen/AMDGPU/extract_vector_elt-i8.ll
    llvm/test/CodeGen/AMDGPU/sminmax.v2i16.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AMDGPU/extract_vector_dynelt.ll b/llvm/test/CodeGen/AMDGPU/extract_vector_dynelt.ll
index ee1df9aa0d6cea..81d6ce219b5d51 100644
--- a/llvm/test/CodeGen/AMDGPU/extract_vector_dynelt.ll
+++ b/llvm/test/CodeGen/AMDGPU/extract_vector_dynelt.ll
@@ -1,415 +1,729 @@
-; RUN: llc -mtriple=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -check-prefixes=GCN %s
 
-; GCN-LABEL: {{^}}float4_extelt:
-; GCN-NOT: buffer_
-; GCN-DAG: s_cmp_eq_u32 [[IDX:s[0-9]+]], 1
-; GCN-DAG: s_cselect_b64 [[C1:[^,]+]], -1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 2
-; GCN-DAG: s_cselect_b64 [[C2:[^,]+]], -1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 3
-; GCN-DAG: s_cselect_b64 [[C3:[^,]+]], -1, 0
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V1:v[0-9]+]], 0, 1.0, [[C1]]
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V2:v[0-9]+]], 2.0, [[V1]], [[C2]]
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V3:v[0-9]+]], 4.0, [[V2]], [[C3]]
-; GCN:     store_dword v[{{[0-9:]+}}], [[V3]]
 define amdgpu_kernel void @float4_extelt(ptr addrspace(1) %out, i32 %sel) {
+; GCN-LABEL: float4_extelt:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dword s4, s[2:3], 0x2c
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[2:3], 0x24
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_cmp_eq_u32 s4, 1
+; GCN-NEXT:    s_cselect_b64 s[2:3], -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 2
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1.0, s[2:3]
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 3
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 2.0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    v_cndmask_b32_e32 v2, 4.0, v0, vcc
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    flat_store_dword v[0:1], v2
+; GCN-NEXT:    s_endpgm
 entry:
   %ext = extractelement <4 x float> <float 0.0, float 1.0, float 2.0, float 4.0>, i32 %sel
   store float %ext, ptr addrspace(1) %out
   ret void
 }
 
-; GCN-LABEL: {{^}}int4_extelt:
-; GCN-NOT: buffer_
-; GCN-DAG: s_cmp_lg_u32 [[IDX:s[0-9]+]], 2
-; GCN-DAG: s_cmp_eq_u32 [[IDX]], 1
-; GCN-DAG: s_cselect_b64 [[C1:[^,]+]], -1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 3
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V1:v[0-9]+]], 0, 1, [[C1]]
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V2:v[0-9]+]], 2, [[V1]], vcc
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V3:v[0-9]+]], 4, [[V2]], vcc
-; GCN: store_dword v[{{[0-9:]+}}], [[V3]]
 define amdgpu_kernel void @int4_extelt(ptr addrspace(1) %out, i32 %sel) {
+; GCN-LABEL: int4_extelt:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dword s4, s[2:3], 0x2c
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[2:3], 0x24
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_cmp_eq_u32 s4, 1
+; GCN-NEXT:    s_cselect_b64 s[2:3], -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 2
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[2:3]
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 3
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 2, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    v_cndmask_b32_e32 v2, 4, v0, vcc
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    flat_store_dword v[0:1], v2
+; GCN-NEXT:    s_endpgm
 entry:
   %ext = extractelement <4 x i32> <i32 0, i32 1, i32 2, i32 4>, i32 %sel
   store i32 %ext, ptr addrspace(1) %out
   ret void
 }
 
-; GCN-LABEL: {{^}}double4_extelt:
-; GCN-NOT: buffer_
-; GCN-DAG: s_cmp_eq_u32 s{{[0-9]+}}, 1
-; GCN-DAG: s_cselect_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x3f847ae1
-; GCN-DAG: s_cselect_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x47ae147b
-; GCN-DAG: s_cmp_eq_u32 s{{[0-9]+}}, 2
-; GCN-DAG: s_cselect_b32 s{{[0-9]+}}, 0xe147ae14, s{{[0-9]+}}
-; GCN-DAG: s_cselect_b32 s{{[0-9]+}}, 0x4000147a, s{{[0-9]+}}
-; GCN-DAG: s_cmp_eq_u32 s{{[[0-9]+}}, 3
-; GCN-DAG: s_cselect_b32 s{{[0-9]+}}, 0x40100a3d, s{{[0-9]+}}
-; GCN-DAG: s_cselect_b32 s{{[0-9]+}}, 0x70a3d70a, s{{[0-9]+}}
-; GCN: store_dwordx2 v[{{[0-9:]+}}]
 define amdgpu_kernel void @double4_extelt(ptr addrspace(1) %out, i32 %sel) {
+; GCN-LABEL: double4_extelt:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dword s4, s[2:3], 0x2c
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[2:3], 0x24
+; GCN-NEXT:    s_mov_b32 s2, 0x3ff028f5
+; GCN-NEXT:    s_mov_b32 s3, 0xc28f5c29
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_cmp_eq_u32 s4, 1
+; GCN-NEXT:    s_cselect_b32 s2, s2, 0x3f847ae1
+; GCN-NEXT:    s_cselect_b32 s3, s3, 0x47ae147b
+; GCN-NEXT:    s_cmp_eq_u32 s4, 2
+; GCN-NEXT:    s_cselect_b32 s3, 0xe147ae14, s3
+; GCN-NEXT:    s_cselect_b32 s2, 0x4000147a, s2
+; GCN-NEXT:    s_cmp_eq_u32 s4, 3
+; GCN-NEXT:    s_cselect_b32 s2, 0x40100a3d, s2
+; GCN-NEXT:    s_cselect_b32 s3, 0x70a3d70a, s3
+; GCN-NEXT:    v_mov_b32_e32 v3, s1
+; GCN-NEXT:    v_mov_b32_e32 v0, s3
+; GCN-NEXT:    v_mov_b32_e32 v1, s2
+; GCN-NEXT:    v_mov_b32_e32 v2, s0
+; GCN-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GCN-NEXT:    s_endpgm
 entry:
   %ext = extractelement <4 x double> <double 0.01, double 1.01, double 2.01, double 4.01>, i32 %sel
   store double %ext, ptr addrspace(1) %out
   ret void
 }
 
-; GCN-LABEL: {{^}}double5_extelt:
-; GCN-NOT: buffer_
-; GCN-DAG: s_cmp_eq_u32 s{{[0-9]+}}, 1
-; GCN-DAG: s_cselect_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x3f847ae1
-; GCN-DAG: s_cselect_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x47ae147b
-; GCN-DAG: s_cmp_eq_u32 s{{[0-9]+}}, 2
-; GCN-DAG: s_cselect_b32 s{{[0-9]+}}, 0xe147ae14, s{{[0-9]+}}
-; GCN-DAG: s_cselect_b32 s{{[0-9]+}}, 0x4000147a, s{{[0-9]+}}
-; GCN-DAG: s_cmp_eq_u32 s{{[[0-9]+}}, 3
-; GCN-DAG: s_cselect_b32 s{{[0-9]+}}, 0x40100a3d, s{{[0-9]+}}
-; GCN-DAG: s_cselect_b32 s{{[0-9]+}}, 0x70a3d70a, s{{[0-9]+}}
-; GCN-DAG: s_cmp_eq_u32 s{{[[0-9]+}}, 4
-; GCN-DAG: s_cselect_b32 s{{[0-9]+}}, 0x40140a3d, s{{[0-9]+}}
-; GCN: store_dwordx2 v[{{[0-9:]+}}]
 define amdgpu_kernel void @double5_extelt(ptr addrspace(1) %out, i32 %sel) {
+; GCN-LABEL: double5_extelt:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dword s6, s[2:3], 0x2c
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[2:3], 0x24
+; GCN-NEXT:    s_mov_b32 s2, 0x3ff028f5
+; GCN-NEXT:    s_mov_b32 s3, 0xc28f5c29
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_cmp_eq_u32 s6, 1
+; GCN-NEXT:    s_cselect_b32 s2, s2, 0x3f847ae1
+; GCN-NEXT:    s_cselect_b32 s3, s3, 0x47ae147b
+; GCN-NEXT:    s_cmp_eq_u32 s6, 2
+; GCN-NEXT:    s_cselect_b32 s8, 0xe147ae14, s3
+; GCN-NEXT:    s_cselect_b32 s7, 0x4000147a, s2
+; GCN-NEXT:    s_cmp_eq_u32 s6, 3
+; GCN-NEXT:    s_cselect_b64 s[2:3], -1, 0
+; GCN-NEXT:    s_and_b64 s[4:5], s[2:3], exec
+; GCN-NEXT:    s_cselect_b32 s9, 0x40100a3d, s7
+; GCN-NEXT:    s_cmp_eq_u32 s6, 4
+; GCN-NEXT:    s_cselect_b64 s[4:5], -1, 0
+; GCN-NEXT:    s_and_b64 s[6:7], s[4:5], exec
+; GCN-NEXT:    s_cselect_b32 s6, 0x40140a3d, s9
+; GCN-NEXT:    s_or_b64 s[2:3], s[4:5], s[2:3]
+; GCN-NEXT:    s_and_b64 s[2:3], s[2:3], exec
+; GCN-NEXT:    s_cselect_b32 s2, 0x70a3d70a, s8
+; GCN-NEXT:    v_mov_b32_e32 v3, s1
+; GCN-NEXT:    v_mov_b32_e32 v0, s2
+; GCN-NEXT:    v_mov_b32_e32 v1, s6
+; GCN-NEXT:    v_mov_b32_e32 v2, s0
+; GCN-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GCN-NEXT:    s_endpgm
 entry:
   %ext = extractelement <5 x double> <double 0.01, double 1.01, double 2.01, double 4.01, double 5.01>, i32 %sel
   store double %ext, ptr addrspace(1) %out
   ret void
 }
 
-; GCN-LABEL: {{^}}half4_extelt:
-; GCN-NOT: buffer_
-; GCN-DAG: s_mov_b32 s[[SL:[0-9]+]], 0x40003c00
-; GCN-DAG: s_mov_b32 s[[SH:[0-9]+]], 0x44004200
-; GCN-DAG: s_lshl_b32 [[SEL:s[0-p]+]], s{{[0-9]+}}, 4
-; GCN:     s_lshr_b64 s[[[RL:[0-9]+]]:{{[0-9]+}}], s[[[SL]]:[[SH]]], [[SEL]]
-; GCN-DAG: v_mov_b32_e32 v[[VRL:[0-9]+]], s[[RL]]
-; GCN:     store_short v[{{[0-9:]+}}], v[[VRL]]
 define amdgpu_kernel void @half4_extelt(ptr addrspace(1) %out, i32 %sel) {
+; GCN-LABEL: half4_extelt:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dword s4, s[2:3], 0x2c
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[2:3], 0x24
+; GCN-NEXT:    s_mov_b32 s2, 0x40003c00
+; GCN-NEXT:    s_mov_b32 s3, 0x44004200
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_lshl_b32 s4, s4, 4
+; GCN-NEXT:    s_lshr_b64 s[2:3], s[2:3], s4
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    v_mov_b32_e32 v2, s2
+; GCN-NEXT:    flat_store_short v[0:1], v2
+; GCN-NEXT:    s_endpgm
 entry:
   %ext = extractelement <4 x half> <half 1.0, half 2.0, half 3.0, half 4.0>, i32 %sel
   store half %ext, ptr addrspace(1) %out
   ret void
 }
 
-; GCN-LABEL: {{^}}float2_extelt:
-; GCN-NOT: buffer_
-; GCN-DAG: s_cmp_eq_u32 [[IDX:s[0-9]+]], 1
-; GCN-DAG: s_cselect_b64 [[C1:[^,]+]], -1, 0
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V1:v[0-9]+]], 0, 1.0, [[C1]]
-; GCN: store_dword v[{{[0-9:]+}}], [[V1]]
 define amdgpu_kernel void @float2_extelt(ptr addrspace(1) %out, i32 %sel) {
+; GCN-LABEL: float2_extelt:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dword s4, s[2:3], 0x2c
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[2:3], 0x24
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_cmp_eq_u32 s4, 1
+; GCN-NEXT:    s_cselect_b64 s[2:3], -1, 0
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    v_cndmask_b32_e64 v2, 0, 1.0, s[2:3]
+; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    flat_store_dword v[0:1], v2
+; GCN-NEXT:    s_endpgm
 entry:
   %ext = extractelement <2 x float> <float 0.0, float 1.0>, i32 %sel
   store float %ext, ptr addrspace(1) %out
   ret void
 }
 
-; GCN-LABEL: {{^}}double2_extelt:
-; GCN-NOT: buffer_
-; GCN-DAG: s_cmp_eq_u32 s{{[0-9]+}}, 1
-; GCN-DAG: s_cselect_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x3f847ae1
-; GCN-DAG: s_cselect_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x47ae147b
-; GCN: store_dwordx2 v[{{[0-9:]+}}]
 define amdgpu_kernel void @double2_extelt(ptr addrspace(1) %out, i32 %sel) {
+; GCN-LABEL: double2_extelt:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dword s4, s[2:3], 0x2c
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[2:3], 0x24
+; GCN-NEXT:    s_mov_b32 s2, 0x3ff028f5
+; GCN-NEXT:    s_mov_b32 s3, 0xc28f5c29
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_cmp_eq_u32 s4, 1
+; GCN-NEXT:    s_cselect_b32 s2, s2, 0x3f847ae1
+; GCN-NEXT:    s_cselect_b32 s3, s3, 0x47ae147b
+; GCN-NEXT:    v_mov_b32_e32 v3, s1
+; GCN-NEXT:    v_mov_b32_e32 v0, s3
+; GCN-NEXT:    v_mov_b32_e32 v1, s2
+; GCN-NEXT:    v_mov_b32_e32 v2, s0
+; GCN-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GCN-NEXT:    s_endpgm
 entry:
   %ext = extractelement <2 x double> <double 0.01, double 1.01>, i32 %sel
   store double %ext, ptr addrspace(1) %out
   ret void
 }
 
-; GCN-LABEL: {{^}}half8_extelt:
-; GCN-NOT: buffer_
-; GCN-DAG: s_cmp_eq_u32 [[IDX:s[0-9]+]], 1
-; GCN-DAG: s_cselect_b64 [[C1:[^,]+]], -1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 2
-; GCN-DAG: s_cselect_b64 [[C2:[^,]+]], -1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 3
-; GCN-DAG: s_cselect_b64 [[C3:[^,]+]], -1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 4
-; GCN-DAG: s_cselect_b64 [[C4:[^,]+]], -1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 5
-; GCN-DAG: s_cselect_b64 [[C5:[^,]+]], -1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 6
-; GCN-DAG: s_cselect_b64 [[C6:[^,]+]], -1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 7
-; GCN-DAG: s_cselect_b64 [[C7:[^,]+]], -1, 0
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V1:v[0-9]+]], {{[^,]+}}, {{[^,]+}}, [[C1]]
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V2:v[0-9]+]], {{[^,]+}}, [[V1]], [[C2]]
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V3:v[0-9]+]], {{[^,]+}}, [[V2]], [[C3]]
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V4:v[0-9]+]], {{[^,]+}}, [[V3]], [[C4]]
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V5:v[0-9]+]], {{[^,]+}}, [[V4]], [[C5]]
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V6:v[0-9]+]], {{[^,]+}}, [[V5]], [[C6]]
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V7:v[0-9]+]], {{[^,]+}}, [[V6]], [[C7]]
-; GCN:     store_short v[{{[0-9:]+}}], [[V7]]
 define amdgpu_kernel void @half8_extelt(ptr addrspace(1) %out, i32 %sel) {
+; GCN-LABEL: half8_extelt:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dword s4, s[2:3], 0x2c
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[2:3], 0x24
+; GCN-NEXT:    v_mov_b32_e32 v0, 0x3c00
+; GCN-NEXT:    v_mov_b32_e32 v1, 0x4000
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_cmp_eq_u32 s4, 1
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 2
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; GCN-NEXT:    v_mov_b32_e32 v1, 0x4200
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 3
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v1, v0, vcc
+; GCN-NEXT:    v_mov_b32_e32 v1, 0x4400
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 4
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v1, v0, vcc
+; GCN-NEXT:    v_mov_b32_e32 v1, 0x4500
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 5
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v1, v0, vcc
+; GCN-NEXT:    v_mov_b32_e32 v1, 0x4600
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 6
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v1, v0, vcc
+; GCN-NEXT:    v_mov_b32_e32 v1, 0x4700
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 7
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v1, v0, vcc
+; GCN-NEXT:    v_mov_b32_e32 v1, 0x4800
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    v_cndmask_b32_e32 v2, v1, v0, vcc
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    flat_store_short v[0:1], v2
+; GCN-NEXT:    s_endpgm
 entry:
   %ext = extractelement <8 x half> <half 1.0, half 2.0, half 3.0, half 4.0, half 5.0, half 6.0, half 7.0, half 8.0>, i32 %sel
   store half %ext, ptr addrspace(1) %out
   ret void
 }
 
-; GCN-LABEL: {{^}}short8_extelt:
-; GCN-NOT: buffer_
-; GCN-DAG: s_cmp_eq_u32 [[IDX:s[0-9]+]], 1
-; GCN-DAG: s_cselect_b64 [[C1:[^,]+]], -1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 2
-; GCN-DAG: s_cselect_b64 [[C2:[^,]+]], -1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 3
-; GCN-DAG: s_cselect_b64 [[C3:[^,]+]], -1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 4
-; GCN-DAG: s_cselect_b64 [[C4:[^,]+]], -1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 5
-; GCN-DAG: s_cselect_b64 [[C5:[^,]+]], -1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 6
-; GCN-DAG: s_cselect_b64 [[C6:[^,]+]], -1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 7
-; GCN-DAG: s_cselect_b64 [[C7:[^,]+]], -1, 0
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V1:v[0-9]+]], {{[^,]+}}, {{[^,]+}}, [[C1]]
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V2:v[0-9]+]], {{[^,]+}}, [[V1]], [[C2]]
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V3:v[0-9]+]], {{[^,]+}}, [[V2]], [[C3]]
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V4:v[0-9]+]], {{[^,]+}}, [[V3]], [[C4]]
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V5:v[0-9]+]], {{[^,]+}}, [[V4]], [[C5]]
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V6:v[0-9]+]], {{[^,]+}}, [[V5]], [[C6]]
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V7:v[0-9]+]], {{[^,]+}}, [[V6]], [[C7]]
-; GCN:     store_short v[{{[0-9:]+}}], [[V7]]
 define amdgpu_kernel void @short8_extelt(ptr addrspace(1) %out, i32 %sel) {
+; GCN-LABEL: short8_extelt:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dword s4, s[2:3], 0x2c
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[2:3], 0x24
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_cmp_eq_u32 s4, 1
+; GCN-NEXT:    s_cselect_b64 s[2:3], -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 2
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 1, 2, s[2:3]
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 3
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 3, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 4
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 4, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 5
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 5, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 6
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 6, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 7
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 7, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    v_cndmask_b32_e32 v2, 8, v0, vcc
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    flat_store_short v[0:1], v2
+; GCN-NEXT:    s_endpgm
 entry:
   %ext = extractelement <8 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>, i32 %sel
   store i16 %ext, ptr addrspace(1) %out
   ret void
 }
 
-; GCN-LABEL: {{^}}float8_extelt:
-; GCN-DAG: s_load_dwordx2 s[0:1], s[2:3], 0x24
-; GCN-DAG: s_load_dword [[S0:s[0-9]+]], s[2:3], 0x2c
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 1.0
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 2.0
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x40400000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 4.0
-; GCN-DAG: s_waitcnt lgkmcnt(0)
-; GCN-DAG: s_mov_b32 m0, [[S0]]
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x40a00000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x40c00000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x40e00000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41000000
-; GCN-DAG: v_movrels_b32_e32 [[RES:v[0-9]+]], v{{[0-9]+}}
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
-; GCN:     flat_store_dword v[{{[0-9:]+}}], [[RES]]
 define amdgpu_kernel void @float8_extelt(ptr addrspace(1) %out, i32 %sel) {
+; GCN-LABEL: float8_extelt:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[2:3], 0x24
+; GCN-NEXT:    s_load_dword s2, s[2:3], 0x2c
+; GCN-NEXT:    v_mov_b32_e32 v0, 1.0
+; GCN-NEXT:    v_mov_b32_e32 v1, 2.0
+; GCN-NEXT:    v_mov_b32_e32 v2, 0x40400000
+; GCN-NEXT:    v_mov_b32_e32 v3, 4.0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_mov_b32 m0, s2
+; GCN-NEXT:    v_mov_b32_e32 v4, 0x40a00000
+; GCN-NEXT:    v_mov_b32_e32 v5, 0x40c00000
+; GCN-NEXT:    v_mov_b32_e32 v6, 0x40e00000
+; GCN-NEXT:    v_mov_b32_e32 v7, 0x41000000
+; GCN-NEXT:    v_movrels_b32_e32 v2, v0
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    flat_store_dword v[0:1], v2
+; GCN-NEXT:    s_endpgm
 entry:
   %ext = extractelement <8 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0>, i32 %sel
   store float %ext, ptr addrspace(1) %out
   ret void
 }
 
-; GCN-LABEL: {{^}}double8_extelt:
-; GCN-NOT: buffer_
-; GCN-NOT: s_or_b32
-; GCN-DAG: s_mov_b32 [[ZERO:s[0-9]+]], 0{{$}}
-; GCN-DAG: v_mov_b32_e32 v[[#BASE:]], [[ZERO]]
-; GCN-DAG: s_mov_b32 m0, [[IND:s[0-9]+]]
-; GCN-DAG: v_movrels_b32_e32 v[[RES_LO:[0-9]+]], v[[#BASE]]
-; GCN-DAG: v_movrels_b32_e32 v[[RES_HI:[0-9]+]], v[[#BASE+1]]
-; GCN:     store_dwordx2 v[{{[0-9:]+}}], v[[[RES_LO]]:[[RES_HI]]]
 define amdgpu_kernel void @double8_extelt(ptr addrspace(1) %out, i32 %sel) {
+; GCN-LABEL: double8_extelt:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dwordx2 s[16:17], s[2:3], 0x24
+; GCN-NEXT:    s_load_dword s18, s[2:3], 0x2c
+; GCN-NEXT:    s_mov_b32 s0, 0
+; GCN-NEXT:    s_mov_b32 s15, 0x40200000
+; GCN-NEXT:    s_mov_b32 s13, 0x401c0000
+; GCN-NEXT:    s_mov_b32 s11, 0x40180000
+; GCN-NEXT:    s_mov_b32 s9, 0x40140000
+; GCN-NEXT:    s_mov_b32 s7, 0x40100000
+; GCN-NEXT:    s_mov_b32 s5, 0x40080000
+; GCN-NEXT:    s_mov_b32 s3, 2.0
+; GCN-NEXT:    s_mov_b32 s1, 0x3ff00000
+; GCN-NEXT:    s_mov_b32 s2, s0
+; GCN-NEXT:    s_mov_b32 s4, s0
+; GCN-NEXT:    s_mov_b32 s6, s0
+; GCN-NEXT:    s_mov_b32 s8, s0
+; GCN-NEXT:    s_mov_b32 s10, s0
+; GCN-NEXT:    s_mov_b32 s12, s0
+; GCN-NEXT:    s_mov_b32 s14, s0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_lshl_b32 s18, s18, 1
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    v_mov_b32_e32 v15, s15
+; GCN-NEXT:    s_mov_b32 m0, s18
+; GCN-NEXT:    v_mov_b32_e32 v2, s2
+; GCN-NEXT:    v_mov_b32_e32 v3, s3
+; GCN-NEXT:    v_mov_b32_e32 v4, s4
+; GCN-NEXT:    v_mov_b32_e32 v5, s5
+; GCN-NEXT:    v_mov_b32_e32 v6, s6
+; GCN-NEXT:    v_mov_b32_e32 v7, s7
+; GCN-NEXT:    v_mov_b32_e32 v8, s8
+; GCN-NEXT:    v_mov_b32_e32 v9, s9
+; GCN-NEXT:    v_mov_b32_e32 v10, s10
+; GCN-NEXT:    v_mov_b32_e32 v11, s11
+; GCN-NEXT:    v_mov_b32_e32 v12, s12
+; GCN-NEXT:    v_mov_b32_e32 v13, s13
+; GCN-NEXT:    v_mov_b32_e32 v14, s14
+; GCN-NEXT:    v_movrels_b32_e32 v16, v1
+; GCN-NEXT:    v_movrels_b32_e32 v15, v0
+; GCN-NEXT:    v_mov_b32_e32 v0, s16
+; GCN-NEXT:    v_mov_b32_e32 v1, s17
+; GCN-NEXT:    flat_store_dwordx2 v[0:1], v[15:16]
+; GCN-NEXT:    s_endpgm
 entry:
   %ext = extractelement <8 x double> <double 1.0, double 2.0, double 3.0, double 4.0, double 5.0, double 6.0, double 7.0, double 8.0>, i32 %sel
   store double %ext, ptr addrspace(1) %out
   ret void
 }
 
-; GCN-LABEL: {{^}}double7_extelt:
-; GCN-NOT: buffer_
-; GCN-NOT: s_or_b32
-; GCN-DAG: s_mov_b32 [[ZERO:s[0-9]+]], 0{{$}}
-; GCN-DAG: v_mov_b32_e32 v[[#BASE:]], [[ZERO]]
-; GCN-DAG: s_mov_b32 m0, [[IND:s[0-9]+]]
-; GCN-DAG: v_movrels_b32_e32 v[[RES_LO:[0-9]+]], v[[#BASE]]
-; GCN-DAG: v_movrels_b32_e32 v[[RES_HI:[0-9]+]], v[[#BASE+1]]
-; GCN:     store_dwordx2 v[{{[0-9:]+}}], v[[[RES_LO]]:[[RES_HI]]]
 define amdgpu_kernel void @double7_extelt(ptr addrspace(1) %out, i32 %sel) {
+; GCN-LABEL: double7_extelt:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dwordx2 s[14:15], s[2:3], 0x24
+; GCN-NEXT:    s_load_dword s16, s[2:3], 0x2c
+; GCN-NEXT:    s_mov_b32 s0, 0
+; GCN-NEXT:    s_mov_b32 s13, 0x401c0000
+; GCN-NEXT:    s_mov_b32 s11, 0x40180000
+; GCN-NEXT:    s_mov_b32 s9, 0x40140000
+; GCN-NEXT:    s_mov_b32 s7, 0x40100000
+; GCN-NEXT:    s_mov_b32 s5, 0x40080000
+; GCN-NEXT:    s_mov_b32 s3, 2.0
+; GCN-NEXT:    s_mov_b32 s1, 0x3ff00000
+; GCN-NEXT:    s_mov_b32 s2, s0
+; GCN-NEXT:    s_mov_b32 s4, s0
+; GCN-NEXT:    s_mov_b32 s6, s0
+; GCN-NEXT:    s_mov_b32 s8, s0
+; GCN-NEXT:    s_mov_b32 s10, s0
+; GCN-NEXT:    s_mov_b32 s12, s0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_lshl_b32 s16, s16, 1
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    v_mov_b32_e32 v15, s15
+; GCN-NEXT:    s_mov_b32 m0, s16
+; GCN-NEXT:    v_mov_b32_e32 v2, s2
+; GCN-NEXT:    v_mov_b32_e32 v3, s3
+; GCN-NEXT:    v_mov_b32_e32 v4, s4
+; GCN-NEXT:    v_mov_b32_e32 v5, s5
+; GCN-NEXT:    v_mov_b32_e32 v6, s6
+; GCN-NEXT:    v_mov_b32_e32 v7, s7
+; GCN-NEXT:    v_mov_b32_e32 v8, s8
+; GCN-NEXT:    v_mov_b32_e32 v9, s9
+; GCN-NEXT:    v_mov_b32_e32 v10, s10
+; GCN-NEXT:    v_mov_b32_e32 v11, s11
+; GCN-NEXT:    v_mov_b32_e32 v12, s12
+; GCN-NEXT:    v_mov_b32_e32 v13, s13
+; GCN-NEXT:    v_mov_b32_e32 v14, s14
+; GCN-NEXT:    v_movrels_b32_e32 v16, v1
+; GCN-NEXT:    v_movrels_b32_e32 v15, v0
+; GCN-NEXT:    v_mov_b32_e32 v0, s14
+; GCN-NEXT:    v_mov_b32_e32 v1, s15
+; GCN-NEXT:    flat_store_dwordx2 v[0:1], v[15:16]
+; GCN-NEXT:    s_endpgm
 entry:
   %ext = extractelement <7 x double> <double 1.0, double 2.0, double 3.0, double 4.0, double 5.0, double 6.0, double 7.0>, i32 %sel
   store double %ext, ptr addrspace(1) %out
   ret void
 }
 
-; GCN-LABEL: {{^}}float16_extelt:
-; GCN-NOT: buffer_
-; GCN-DAG: s_mov_b32 m0,
-; GCN-DAG: v_mov_b32_e32 [[VLO:v[0-9]+]], 1.0
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 2.0
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x40400000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 4.0
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x40a00000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x40c00000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x40e00000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41000000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41100000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41200000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41300000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41400000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41500000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41600000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41700000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41800000
-; GCN-DAG: v_movrels_b32_e32 [[RES:v[0-9]+]], [[VLO]]
-; GCN:     store_dword v[{{[0-9:]+}}], [[RES]]
 define amdgpu_kernel void @float16_extelt(ptr addrspace(1) %out, i32 %sel) {
+; GCN-LABEL: float16_extelt:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[2:3], 0x24
+; GCN-NEXT:    s_load_dword s2, s[2:3], 0x2c
+; GCN-NEXT:    v_mov_b32_e32 v0, 1.0
+; GCN-NEXT:    v_mov_b32_e32 v1, 2.0
+; GCN-NEXT:    v_mov_b32_e32 v2, 0x40400000
+; GCN-NEXT:    v_mov_b32_e32 v3, 4.0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_mov_b32 m0, s2
+; GCN-NEXT:    v_mov_b32_e32 v4, 0x40a00000
+; GCN-NEXT:    v_mov_b32_e32 v5, 0x40c00000
+; GCN-NEXT:    v_mov_b32_e32 v6, 0x40e00000
+; GCN-NEXT:    v_mov_b32_e32 v7, 0x41000000
+; GCN-NEXT:    v_mov_b32_e32 v8, 0x41100000
+; GCN-NEXT:    v_mov_b32_e32 v9, 0x41200000
+; GCN-NEXT:    v_mov_b32_e32 v10, 0x41300000
+; GCN-NEXT:    v_mov_b32_e32 v11, 0x41400000
+; GCN-NEXT:    v_mov_b32_e32 v12, 0x41500000
+; GCN-NEXT:    v_mov_b32_e32 v13, 0x41600000
+; GCN-NEXT:    v_mov_b32_e32 v14, 0x41700000
+; GCN-NEXT:    v_mov_b32_e32 v15, 0x41800000
+; GCN-NEXT:    v_movrels_b32_e32 v2, v0
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    flat_store_dword v[0:1], v2
+; GCN-NEXT:    s_endpgm
 entry:
   %ext = extractelement <16 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0, float 9.0, float 10.0, float 11.0, float 12.0, float 13.0, float 14.0, float 15.0, float 16.0>, i32 %sel
   store float %ext, ptr addrspace(1) %out
   ret void
 }
 
-; GCN-LABEL: {{^}}double15_extelt:
-; GCN-NOT: buffer_
-; GCN-NOT: s_or_b32
-; GCN-DAG: s_mov_b32 [[ZERO:s[0-9]+]], 0{{$}}
-; GCN-DAG: v_mov_b32_e32 v[[#BASE:]], [[ZERO]]
-; GCN-DAG: s_mov_b32 m0, [[IND:s[0-9]+]]
-; GCN-DAG: v_movrels_b32_e32 v[[RES_LO:[0-9]+]], v[[#BASE]]
-; GCN-DAG: v_movrels_b32_e32 v[[RES_HI:[0-9]+]], v[[#BASE+1]]
-; GCN:     store_dwordx2 v[{{[0-9:]+}}], v[[[RES_LO]]:[[RES_HI]]]
 define amdgpu_kernel void @double15_extelt(ptr addrspace(1) %out, i32 %sel) {
+; GCN-LABEL: double15_extelt:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dword s4, s[2:3], 0x2c
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[2:3], 0x24
+; GCN-NEXT:    s_mov_b32 s36, 0
+; GCN-NEXT:    s_mov_b32 s65, 0x402e0000
+; GCN-NEXT:    s_mov_b32 s63, 0x402c0000
+; GCN-NEXT:    s_mov_b32 s61, 0x402a0000
+; GCN-NEXT:    s_mov_b32 s59, 0x40280000
+; GCN-NEXT:    s_mov_b32 s57, 0x40260000
+; GCN-NEXT:    s_mov_b32 s55, 0x40240000
+; GCN-NEXT:    s_mov_b32 s53, 0x40220000
+; GCN-NEXT:    s_mov_b32 s51, 0x40200000
+; GCN-NEXT:    s_mov_b32 s49, 0x401c0000
+; GCN-NEXT:    s_mov_b32 s47, 0x40180000
+; GCN-NEXT:    s_mov_b32 s45, 0x40140000
+; GCN-NEXT:    s_mov_b32 s43, 0x40100000
+; GCN-NEXT:    s_mov_b32 s41, 0x40080000
+; GCN-NEXT:    s_mov_b32 s39, 2.0
+; GCN-NEXT:    s_mov_b32 s37, 0x3ff00000
+; GCN-NEXT:    s_mov_b32 s38, s36
+; GCN-NEXT:    s_mov_b32 s40, s36
+; GCN-NEXT:    s_mov_b32 s42, s36
+; GCN-NEXT:    s_mov_b32 s44, s36
+; GCN-NEXT:    s_mov_b32 s46, s36
+; GCN-NEXT:    s_mov_b32 s48, s36
+; GCN-NEXT:    s_mov_b32 s50, s36
+; GCN-NEXT:    s_mov_b32 s52, s36
+; GCN-NEXT:    s_mov_b32 s54, s36
+; GCN-NEXT:    s_mov_b32 s56, s36
+; GCN-NEXT:    s_mov_b32 s58, s36
+; GCN-NEXT:    s_mov_b32 s60, s36
+; GCN-NEXT:    s_mov_b32 s62, s36
+; GCN-NEXT:    s_mov_b32 s64, s36
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_lshl_b32 s2, s4, 1
+; GCN-NEXT:    v_mov_b32_e32 v0, s36
+; GCN-NEXT:    v_mov_b32_e32 v1, s37
+; GCN-NEXT:    v_mov_b32_e32 v31, s67
+; GCN-NEXT:    s_mov_b32 m0, s2
+; GCN-NEXT:    v_mov_b32_e32 v2, s38
+; GCN-NEXT:    v_mov_b32_e32 v3, s39
+; GCN-NEXT:    v_mov_b32_e32 v4, s40
+; GCN-NEXT:    v_mov_b32_e32 v5, s41
+; GCN-NEXT:    v_mov_b32_e32 v6, s42
+; GCN-NEXT:    v_mov_b32_e32 v7, s43
+; GCN-NEXT:    v_mov_b32_e32 v8, s44
+; GCN-NEXT:    v_mov_b32_e32 v9, s45
+; GCN-NEXT:    v_mov_b32_e32 v10, s46
+; GCN-NEXT:    v_mov_b32_e32 v11, s47
+; GCN-NEXT:    v_mov_b32_e32 v12, s48
+; GCN-NEXT:    v_mov_b32_e32 v13, s49
+; GCN-NEXT:    v_mov_b32_e32 v14, s50
+; GCN-NEXT:    v_mov_b32_e32 v15, s51
+; GCN-NEXT:    v_mov_b32_e32 v16, s52
+; GCN-NEXT:    v_mov_b32_e32 v17, s53
+; GCN-NEXT:    v_mov_b32_e32 v18, s54
+; GCN-NEXT:    v_mov_b32_e32 v19, s55
+; GCN-NEXT:    v_mov_b32_e32 v20, s56
+; GCN-NEXT:    v_mov_b32_e32 v21, s57
+; GCN-NEXT:    v_mov_b32_e32 v22, s58
+; GCN-NEXT:    v_mov_b32_e32 v23, s59
+; GCN-NEXT:    v_mov_b32_e32 v24, s60
+; GCN-NEXT:    v_mov_b32_e32 v25, s61
+; GCN-NEXT:    v_mov_b32_e32 v26, s62
+; GCN-NEXT:    v_mov_b32_e32 v27, s63
+; GCN-NEXT:    v_mov_b32_e32 v28, s64
+; GCN-NEXT:    v_mov_b32_e32 v29, s65
+; GCN-NEXT:    v_mov_b32_e32 v30, s66
+; GCN-NEXT:    v_movrels_b32_e32 v32, v1
+; GCN-NEXT:    v_movrels_b32_e32 v31, v0
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    flat_store_dwordx2 v[0:1], v[31:32]
+; GCN-NEXT:    s_endpgm
 entry:
   %ext = extractelement <15 x double> <double 1.0, double 2.0, double 3.0, double 4.0, double 5.0, double 6.0, double 7.0, double 8.0, double 9.0, double 10.0, double 11.0, double 12.0, double 13.0, double 14.0, double 15.0>, i32 %sel
   store double %ext, ptr addrspace(1) %out
   ret void
 }
 
-; GCN-LABEL: {{^}}double16_extelt:
-; GCN-NOT: buffer_
-; GCN-NOT: s_or_b32
-; GCN-DAG: s_mov_b32 [[ZERO:s[0-9]+]], 0{{$}}
-; GCN-DAG: v_mov_b32_e32 v[[#BASE:]], [[ZERO]]
-; GCN-DAG: s_mov_b32 m0, [[IND:s[0-9]+]]
-; GCN-DAG: v_movrels_b32_e32 v[[RES_LO:[0-9]+]], v[[#BASE]]
-; GCN-DAG: v_movrels_b32_e32 v[[RES_HI:[0-9]+]], v[[#BASE+1]]
-; GCN:     store_dwordx2 v[{{[0-9:]+}}], v[[[RES_LO]]:[[RES_HI]]]
 define amdgpu_kernel void @double16_extelt(ptr addrspace(1) %out, i32 %sel) {
+; GCN-LABEL: double16_extelt:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dword s4, s[2:3], 0x2c
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[2:3], 0x24
+; GCN-NEXT:    s_mov_b32 s36, 0
+; GCN-NEXT:    s_mov_b32 s67, 0x40300000
+; GCN-NEXT:    s_mov_b32 s65, 0x402e0000
+; GCN-NEXT:    s_mov_b32 s63, 0x402c0000
+; GCN-NEXT:    s_mov_b32 s61, 0x402a0000
+; GCN-NEXT:    s_mov_b32 s59, 0x40280000
+; GCN-NEXT:    s_mov_b32 s57, 0x40260000
+; GCN-NEXT:    s_mov_b32 s55, 0x40240000
+; GCN-NEXT:    s_mov_b32 s53, 0x40220000
+; GCN-NEXT:    s_mov_b32 s51, 0x40200000
+; GCN-NEXT:    s_mov_b32 s49, 0x401c0000
+; GCN-NEXT:    s_mov_b32 s47, 0x40180000
+; GCN-NEXT:    s_mov_b32 s45, 0x40140000
+; GCN-NEXT:    s_mov_b32 s43, 0x40100000
+; GCN-NEXT:    s_mov_b32 s41, 0x40080000
+; GCN-NEXT:    s_mov_b32 s39, 2.0
+; GCN-NEXT:    s_mov_b32 s37, 0x3ff00000
+; GCN-NEXT:    s_mov_b32 s38, s36
+; GCN-NEXT:    s_mov_b32 s40, s36
+; GCN-NEXT:    s_mov_b32 s42, s36
+; GCN-NEXT:    s_mov_b32 s44, s36
+; GCN-NEXT:    s_mov_b32 s46, s36
+; GCN-NEXT:    s_mov_b32 s48, s36
+; GCN-NEXT:    s_mov_b32 s50, s36
+; GCN-NEXT:    s_mov_b32 s52, s36
+; GCN-NEXT:    s_mov_b32 s54, s36
+; GCN-NEXT:    s_mov_b32 s56, s36
+; GCN-NEXT:    s_mov_b32 s58, s36
+; GCN-NEXT:    s_mov_b32 s60, s36
+; GCN-NEXT:    s_mov_b32 s62, s36
+; GCN-NEXT:    s_mov_b32 s64, s36
+; GCN-NEXT:    s_mov_b32 s66, s36
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_lshl_b32 s2, s4, 1
+; GCN-NEXT:    v_mov_b32_e32 v0, s36
+; GCN-NEXT:    v_mov_b32_e32 v1, s37
+; GCN-NEXT:    v_mov_b32_e32 v31, s67
+; GCN-NEXT:    s_mov_b32 m0, s2
+; GCN-NEXT:    v_mov_b32_e32 v2, s38
+; GCN-NEXT:    v_mov_b32_e32 v3, s39
+; GCN-NEXT:    v_mov_b32_e32 v4, s40
+; GCN-NEXT:    v_mov_b32_e32 v5, s41
+; GCN-NEXT:    v_mov_b32_e32 v6, s42
+; GCN-NEXT:    v_mov_b32_e32 v7, s43
+; GCN-NEXT:    v_mov_b32_e32 v8, s44
+; GCN-NEXT:    v_mov_b32_e32 v9, s45
+; GCN-NEXT:    v_mov_b32_e32 v10, s46
+; GCN-NEXT:    v_mov_b32_e32 v11, s47
+; GCN-NEXT:    v_mov_b32_e32 v12, s48
+; GCN-NEXT:    v_mov_b32_e32 v13, s49
+; GCN-NEXT:    v_mov_b32_e32 v14, s50
+; GCN-NEXT:    v_mov_b32_e32 v15, s51
+; GCN-NEXT:    v_mov_b32_e32 v16, s52
+; GCN-NEXT:    v_mov_b32_e32 v17, s53
+; GCN-NEXT:    v_mov_b32_e32 v18, s54
+; GCN-NEXT:    v_mov_b32_e32 v19, s55
+; GCN-NEXT:    v_mov_b32_e32 v20, s56
+; GCN-NEXT:    v_mov_b32_e32 v21, s57
+; GCN-NEXT:    v_mov_b32_e32 v22, s58
+; GCN-NEXT:    v_mov_b32_e32 v23, s59
+; GCN-NEXT:    v_mov_b32_e32 v24, s60
+; GCN-NEXT:    v_mov_b32_e32 v25, s61
+; GCN-NEXT:    v_mov_b32_e32 v26, s62
+; GCN-NEXT:    v_mov_b32_e32 v27, s63
+; GCN-NEXT:    v_mov_b32_e32 v28, s64
+; GCN-NEXT:    v_mov_b32_e32 v29, s65
+; GCN-NEXT:    v_mov_b32_e32 v30, s66
+; GCN-NEXT:    v_movrels_b32_e32 v32, v1
+; GCN-NEXT:    v_movrels_b32_e32 v31, v0
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    flat_store_dwordx2 v[0:1], v[31:32]
+; GCN-NEXT:    s_endpgm
 entry:
   %ext = extractelement <16 x double> <double 1.0, double 2.0, double 3.0, double 4.0, double 5.0, double 6.0, double 7.0, double 8.0, double 9.0, double 10.0, double 11.0, double 12.0, double 13.0, double 14.0, double 15.0, double 16.0>, i32 %sel
   store double %ext, ptr addrspace(1) %out
   ret void
 }
 
-; GCN-LABEL: {{^}}float32_extelt:
-; GCN-NOT: buffer_
-; GCN-DAG: s_mov_b32 m0,
-; GCN-DAG: v_mov_b32_e32 [[VLO:v[0-9]+]], 1.0
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 2.0
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x40400000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 4.0
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x40a00000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x40c00000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x40e00000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41000000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41100000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41200000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41300000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41400000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41500000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41600000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41700000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41800000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41880000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41900000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41980000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41a00000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41a80000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41b00000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41b80000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41c00000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41c80000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41d00000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41d80000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41e00000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41e80000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41f00000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41f80000
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x42000000
-; GCN-DAG: v_movrels_b32_e32 [[RES:v[0-9]+]], [[VLO]]
-; GCN:     store_dword v[{{[0-9:]+}}], [[RES]]
 define amdgpu_kernel void @float32_extelt(ptr addrspace(1) %out, i32 %sel) {
+; GCN-LABEL: float32_extelt:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dword s4, s[2:3], 0x2c
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[2:3], 0x24
+; GCN-NEXT:    v_mov_b32_e32 v0, 1.0
+; GCN-NEXT:    v_mov_b32_e32 v1, 2.0
+; GCN-NEXT:    v_mov_b32_e32 v2, 0x40400000
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_mov_b32 m0, s4
+; GCN-NEXT:    v_mov_b32_e32 v3, 4.0
+; GCN-NEXT:    v_mov_b32_e32 v4, 0x40a00000
+; GCN-NEXT:    v_mov_b32_e32 v5, 0x40c00000
+; GCN-NEXT:    v_mov_b32_e32 v6, 0x40e00000
+; GCN-NEXT:    v_mov_b32_e32 v7, 0x41000000
+; GCN-NEXT:    v_mov_b32_e32 v8, 0x41100000
+; GCN-NEXT:    v_mov_b32_e32 v9, 0x41200000
+; GCN-NEXT:    v_mov_b32_e32 v10, 0x41300000
+; GCN-NEXT:    v_mov_b32_e32 v11, 0x41400000
+; GCN-NEXT:    v_mov_b32_e32 v12, 0x41500000
+; GCN-NEXT:    v_mov_b32_e32 v13, 0x41600000
+; GCN-NEXT:    v_mov_b32_e32 v14, 0x41700000
+; GCN-NEXT:    v_mov_b32_e32 v15, 0x41800000
+; GCN-NEXT:    v_mov_b32_e32 v16, 0x41880000
+; GCN-NEXT:    v_mov_b32_e32 v17, 0x41900000
+; GCN-NEXT:    v_mov_b32_e32 v18, 0x41980000
+; GCN-NEXT:    v_mov_b32_e32 v19, 0x41a00000
+; GCN-NEXT:    v_mov_b32_e32 v20, 0x41a80000
+; GCN-NEXT:    v_mov_b32_e32 v21, 0x41b00000
+; GCN-NEXT:    v_mov_b32_e32 v22, 0x41b80000
+; GCN-NEXT:    v_mov_b32_e32 v23, 0x41c00000
+; GCN-NEXT:    v_mov_b32_e32 v24, 0x41c80000
+; GCN-NEXT:    v_mov_b32_e32 v25, 0x41d00000
+; GCN-NEXT:    v_mov_b32_e32 v26, 0x41d80000
+; GCN-NEXT:    v_mov_b32_e32 v27, 0x41e00000
+; GCN-NEXT:    v_mov_b32_e32 v28, 0x41e80000
+; GCN-NEXT:    v_mov_b32_e32 v29, 0x41f00000
+; GCN-NEXT:    v_mov_b32_e32 v30, 0x41f80000
+; GCN-NEXT:    v_mov_b32_e32 v31, 0x42000000
+; GCN-NEXT:    v_movrels_b32_e32 v2, v0
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    flat_store_dword v[0:1], v2
+; GCN-NEXT:    s_endpgm
 entry:
   %ext = extractelement <32 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0, float 9.0, float 10.0, float 11.0, float 12.0, float 13.0, float 14.0, float 15.0, float 16.0, float 17.0, float 18.0, float 19.0, float 20.0, float 21.0, float 22.0, float 23.0, float 24.0, float 25.0, float 26.0, float 27.0, float 28.0, float 29.0, float 30.0, float 31.0, float 32.0>, i32 %sel
   store float %ext, ptr addrspace(1) %out
   ret void
 }
 
-; GCN-LABEL: {{^}}byte8_extelt:
-; GCN-NOT: buffer_
-; GCN-DAG: s_mov_b32 s[[SL:[0-9]+]], 0x4030201
-; GCN-DAG: s_mov_b32 s[[SH:[0-9]+]], 0x8070605
-; GCN-DAG: s_lshl_b32 [[SEL:s[0-p]+]], s{{[0-9]+}}, 3
-; GCN:     s_lshr_b64 s[[[RL:[0-9]+]]:{{[0-9]+}}], s[[[SL]]:[[SH]]], [[SEL]]
-; GCN-DAG: v_mov_b32_e32 v[[VRL:[0-9]+]], s[[RL]]
-; GCN:     store_byte v[{{[0-9:]+}}], v[[VRL]]
 define amdgpu_kernel void @byte8_extelt(ptr addrspace(1) %out, i32 %sel) {
+; GCN-LABEL: byte8_extelt:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dword s4, s[2:3], 0x2c
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[2:3], 0x24
+; GCN-NEXT:    s_mov_b32 s2, 0x4030201
+; GCN-NEXT:    s_mov_b32 s3, 0x8070605
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_lshl_b32 s4, s4, 3
+; GCN-NEXT:    s_lshr_b64 s[2:3], s[2:3], s4
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    v_mov_b32_e32 v2, s2
+; GCN-NEXT:    flat_store_byte v[0:1], v2
+; GCN-NEXT:    s_endpgm
 entry:
   %ext = extractelement <8 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>, i32 %sel
   store i8 %ext, ptr addrspace(1) %out
   ret void
 }
 
-; GCN-LABEL: {{^}}byte16_extelt:
-; GCN-NOT: buffer_
-; GCN-DAG: s_cmp_eq_u32 [[IDX:s[0-9]+]], 1
-; GCN-DAG: s_cselect_b64 [[C1:[^,]+]], -1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 2
-; GCN-DAG: s_cselect_b64 [[C2:[^,]+]], -1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 3
-; GCN-DAG: s_cselect_b64 [[C3:[^,]+]], -1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 4
-; GCN-DAG: s_cselect_b64 [[C4:[^,]+]], -1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 5
-; GCN-DAG: s_cselect_b64 [[C5:[^,]+]], -1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 6
-; GCN-DAG: s_cselect_b64 [[C6:[^,]+]], -1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 7
-; GCN-DAG: s_cselect_b64 [[C7:[^,]+]], -1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 8
-; GCN-DAG: s_cselect_b64 [[C8:[^,]+]], -1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 9
-; GCN-DAG: s_cselect_b64 [[C9:[^,]+]], -1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 10
-; GCN-DAG: s_cselect_b64 [[C10:[^,]+]], -1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 11
-; GCN-DAG: s_cselect_b64 [[C11:[^,]+]], -1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 12
-; GCN-DAG: s_cselect_b64 [[C12:[^,]+]], -1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 13
-; GCN-DAG: s_cselect_b64 [[C13:[^,]+]], -1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 14
-; GCN-DAG: s_cselect_b64 [[C14:[^,]+]], -1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 15
-; GCN-DAG: s_cselect_b64 [[C15:[^,]+]], -1, 0
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V1:v[0-9]+]], {{[^,]+}}, {{[^,]+}}, [[C1]]
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V2:v[0-9]+]], {{[^,]+}}, [[V1]], [[C2]]
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V3:v[0-9]+]], {{[^,]+}}, [[V2]], [[C3]]
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V4:v[0-9]+]], {{[^,]+}}, [[V3]], [[C4]]
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V5:v[0-9]+]], {{[^,]+}}, [[V4]], [[C5]]
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V6:v[0-9]+]], {{[^,]+}}, [[V5]], [[C6]]
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V7:v[0-9]+]], {{[^,]+}}, [[V6]], [[C7]]
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V8:v[0-9]+]], {{[^,]+}}, [[V7]], [[C8]]
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V9:v[0-9]+]], {{[^,]+}}, [[V8]], [[C8]]
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V10:v[0-9]+]], {{[^,]+}}, [[V9]], [[C10]]
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V11:v[0-9]+]], {{[^,]+}}, [[V10]], [[C11]]
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V12:v[0-9]+]], {{[^,]+}}, [[V11]], [[C12]]
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V13:v[0-9]+]], {{[^,]+}}, [[V12]], [[C13]]
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V14:v[0-9]+]], {{[^,]+}}, [[V13]], [[C14]]
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V15:v[0-9]+]], {{[^,]+}}, [[V14]], [[C15]]
-; GCN:     store_byte v[{{[0-9:]+}}], [[V15]]
 define amdgpu_kernel void @byte16_extelt(ptr addrspace(1) %out, i32 %sel) {
+; GCN-LABEL: byte16_extelt:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dword s4, s[2:3], 0x2c
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[2:3], 0x24
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_cmp_eq_u32 s4, 1
+; GCN-NEXT:    s_cselect_b64 s[2:3], -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 2
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 1, 2, s[2:3]
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 3
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 3, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 4
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 4, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 5
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 5, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 6
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 6, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 7
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 7, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 8
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 8, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 9
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 9, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 10
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 10, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 11
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 11, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 12
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 12, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 13
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 13, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 14
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 14, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 15
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 15, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    v_cndmask_b32_e32 v2, 16, v0, vcc
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    flat_store_byte v[0:1], v2
+; GCN-NEXT:    s_endpgm
 entry:
   %ext = extractelement <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, i32 %sel
   store i8 %ext, ptr addrspace(1) %out
   ret void
 }
 
-; GCN-LABEL: {{^}}bit4_extelt:
+define amdgpu_kernel void @bit4_extelt(ptr addrspace(1) %out, i32 %sel) {
+; GCN-LABEL: bit4_extelt:
 ; GCN:       ; %bb.0: ; %entry
 ; GCN-NEXT:    s_load_dword s4, s[2:3], 0x2c
 ; GCN-NEXT:    s_load_dwordx2 s[0:1], s[2:3], 0x24
@@ -421,7 +735,7 @@ entry:
 ; GCN-NEXT:    v_mov_b32_e32 v1, s1
 ; GCN-NEXT:    v_mov_b32_e32 v2, s2
 ; GCN-NEXT:    flat_store_dword v[0:1], v2
-define amdgpu_kernel void @bit4_extelt(ptr addrspace(1) %out, i32 %sel) {
+; GCN-NEXT:    s_endpgm
 entry:
   %ext = extractelement <4 x i1> <i1 0, i1 1, i1 0, i1 1>, i32 %sel
   %zext = zext i1 %ext to i32
@@ -429,15 +743,398 @@ entry:
   ret void
 }
 
-; GCN-LABEL: {{^}}bit128_extelt:
-; GCN-NOT: buffer_
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V1:v[0-9]+]], 0, 1
-; GCN: s_cmpk_lg_i32 {{s[0-9]+}}, 0x7f
-; GCN: s_cselect_b64 [[CL:[^,]+]], -1, 0
-; GCN: v_cndmask_b32_e{{32|64}} [[VL:v[0-9]+]], 0, [[V1]], [[CL]]
-; GCN:     v_and_b32_e32 [[RES:v[0-9]+]], 1, [[VL]]
-; GCN:     store_dword v[{{[0-9:]+}}], [[RES]]
 define amdgpu_kernel void @bit128_extelt(ptr addrspace(1) %out, i32 %sel) {
+; GCN-LABEL: bit128_extelt:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dword s4, s[2:3], 0x2c
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[2:3], 0x24
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_cmp_lg_u32 s4, 1
+; GCN-NEXT:    s_cselect_b64 s[2:3], -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 2
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[2:3]
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 3
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 4
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 5
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 6
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 7
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 8
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 9
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 10
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 11
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 12
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 13
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 14
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 15
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 16
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 17
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 18
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 19
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 20
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 21
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 22
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 23
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 24
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 25
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 26
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 27
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 28
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 29
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 30
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 31
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 32
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 33
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 34
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 35
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 36
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 37
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 38
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 39
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 40
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 41
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 42
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 43
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 44
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 45
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 46
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 47
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 48
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 49
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 50
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 51
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 52
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 53
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 54
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 55
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 56
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 57
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 58
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 59
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 60
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 61
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 62
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 63
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 64
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x41
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x42
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x43
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x44
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x45
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x46
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x47
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x48
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x49
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x4a
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x4b
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x4c
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x4d
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x4e
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x4f
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x50
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x51
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x52
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x53
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x54
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x55
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x56
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x57
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x58
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x59
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x5a
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x5b
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x5c
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x5d
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x5e
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x5f
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x60
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x61
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x62
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x63
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x64
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x65
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x66
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x67
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x68
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x69
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x6a
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x6b
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x6c
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x6d
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x6e
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x6f
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x70
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x71
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x72
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x73
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x74
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x75
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x76
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x77
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x78
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x79
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x7a
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x7b
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x7c
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x7d
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x7e
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmpk_lg_i32 s4, 0x7f
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 1, v0, vcc
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    v_and_b32_e32 v2, 1, v0
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    flat_store_dword v[0:1], v2
+; GCN-NEXT:    s_endpgm
 entry:
   %ext = extractelement <128 x i1> <i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0>, i32 %sel
   %zext = zext i1 %ext to i32
@@ -445,29 +1142,177 @@ entry:
   ret void
 }
 
-; GCN-LABEL: {{^}}float32_extelt_vec:
-; GCN-NOT: buffer_
-; GCN-DAG: v_cmp_eq_u32_e{{32|64}} [[CC1:[^,]+]], 1, v0
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V1:v[0-9]+]], 1.0, 2.0, [[CC1]]
-; GCN-DAG: v_mov_b32_e32 [[LASTVAL:v[0-9]+]], 0x42000000
-; GCN-DAG: v_cmp_ne_u32_e32 [[LASTCC:[^,]+]], 31, v0
-; GCN-DAG: v_cndmask_b32_e{{32|64}} v0, [[LASTVAL]], v{{[0-9]+}}, [[LASTCC]]
 define float @float32_extelt_vec(i32 %sel) {
+; GCN-LABEL: float32_extelt_vec:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
+; GCN-NEXT:    v_cndmask_b32_e64 v1, 1.0, 2.0, vcc
+; GCN-NEXT:    v_mov_b32_e32 v2, 0x40400000
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 2, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 3, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v1, 4.0, v1, vcc
+; GCN-NEXT:    v_mov_b32_e32 v2, 0x40a00000
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 4, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
+; GCN-NEXT:    v_mov_b32_e32 v2, 0x40c00000
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 5, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
+; GCN-NEXT:    v_mov_b32_e32 v2, 0x40e00000
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 6, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
+; GCN-NEXT:    v_mov_b32_e32 v2, 0x41000000
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 7, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
+; GCN-NEXT:    v_mov_b32_e32 v2, 0x41100000
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 8, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
+; GCN-NEXT:    v_mov_b32_e32 v2, 0x41200000
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 9, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
+; GCN-NEXT:    v_mov_b32_e32 v2, 0x41300000
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 10, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
+; GCN-NEXT:    v_mov_b32_e32 v2, 0x41400000
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 11, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
+; GCN-NEXT:    v_mov_b32_e32 v2, 0x41500000
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 12, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
+; GCN-NEXT:    v_mov_b32_e32 v2, 0x41600000
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 13, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
+; GCN-NEXT:    v_mov_b32_e32 v2, 0x41700000
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 14, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
+; GCN-NEXT:    v_mov_b32_e32 v2, 0x41800000
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 15, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
+; GCN-NEXT:    v_mov_b32_e32 v2, 0x41880000
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 16, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
+; GCN-NEXT:    v_mov_b32_e32 v2, 0x41900000
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 17, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
+; GCN-NEXT:    v_mov_b32_e32 v2, 0x41980000
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 18, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
+; GCN-NEXT:    v_mov_b32_e32 v2, 0x41a00000
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 19, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
+; GCN-NEXT:    v_mov_b32_e32 v2, 0x41a80000
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 20, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
+; GCN-NEXT:    v_mov_b32_e32 v2, 0x41b00000
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 21, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
+; GCN-NEXT:    v_mov_b32_e32 v2, 0x41b80000
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 22, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
+; GCN-NEXT:    v_mov_b32_e32 v2, 0x41c00000
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 23, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
+; GCN-NEXT:    v_mov_b32_e32 v2, 0x41c80000
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 24, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
+; GCN-NEXT:    v_mov_b32_e32 v2, 0x41d00000
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 25, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
+; GCN-NEXT:    v_mov_b32_e32 v2, 0x41d80000
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 26, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
+; GCN-NEXT:    v_mov_b32_e32 v2, 0x41e00000
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 27, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
+; GCN-NEXT:    v_mov_b32_e32 v2, 0x41e80000
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 28, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
+; GCN-NEXT:    v_mov_b32_e32 v2, 0x41f00000
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 29, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
+; GCN-NEXT:    v_mov_b32_e32 v2, 0x41f80000
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 30, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
+; GCN-NEXT:    v_mov_b32_e32 v2, 0x42000000
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 31, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v2, v1, vcc
+; GCN-NEXT:    s_setpc_b64 s[30:31]
 entry:
   %ext = extractelement <32 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0, float 9.0, float 10.0, float 11.0, float 12.0, float 13.0, float 14.0, float 15.0, float 16.0, float 17.0, float 18.0, float 19.0, float 20.0, float 21.0, float 22.0, float 23.0, float 24.0, float 25.0, float 26.0, float 27.0, float 28.0, float 29.0, float 30.0, float 31.0, float 32.0>, i32 %sel
   ret float %ext
 }
 
-; GCN-LABEL: {{^}}double16_extelt_vec:
-; GCN-NOT: buffer_
-; GCN-DAG: v_mov_b32_e32 [[V1HI:v[0-9]+]], 0x3ff19999
-; GCN-DAG: v_mov_b32_e32 [[V1LO:v[0-9]+]], 0x9999999a
-; GCN-DAG: v_mov_b32_e32 [[V2HI:v[0-9]+]], 0x4000cccc
-; GCN-DAG: v_mov_b32_e32 [[V2LO:v[0-9]+]], 0xcccccccd
-; GCN-DAG: v_cmp_eq_u32_e{{32|64}} [[CC1:[^,]+]], 1, v0
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[R1HI:v[0-9]+]], [[V1HI]], [[V2HI]], [[CC1]]
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[R1LO:v[0-9]+]], [[V1LO]], [[V2LO]], [[CC1]]
 define double @double16_extelt_vec(i32 %sel) {
+; GCN-LABEL: double16_extelt_vec:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v3, 0x3ff19999
+; GCN-NEXT:    v_mov_b32_e32 v4, 0x4000cccc
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
+; GCN-NEXT:    v_cmp_eq_u32_e64 s[4:5], 2, v0
+; GCN-NEXT:    v_mov_b32_e32 v1, 0x9999999a
+; GCN-NEXT:    v_mov_b32_e32 v2, 0xcccccccd
+; GCN-NEXT:    v_cndmask_b32_e32 v3, v3, v4, vcc
+; GCN-NEXT:    v_mov_b32_e32 v4, 0x4008cccc
+; GCN-NEXT:    s_or_b64 vcc, s[4:5], vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v3, v3, v4, s[4:5]
+; GCN-NEXT:    v_cndmask_b32_e32 v2, v1, v2, vcc
+; GCN-NEXT:    v_mov_b32_e32 v4, 0x40106666
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v3, v3, v4, vcc
+; GCN-NEXT:    v_mov_b32_e32 v4, 0x40146666
+; GCN-NEXT:    v_cmp_eq_u32_e64 s[4:5], 4, v0
+; GCN-NEXT:    v_cndmask_b32_e64 v3, v3, v4, s[4:5]
+; GCN-NEXT:    s_or_b64 s[4:5], s[4:5], vcc
+; GCN-NEXT:    v_mov_b32_e32 v4, 0x40186666
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 5, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v3, v3, v4, vcc
+; GCN-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN-NEXT:    v_mov_b32_e32 v5, 0x401c6666
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 6, v0
+; GCN-NEXT:    v_mov_b32_e32 v4, 0x66666666
+; GCN-NEXT:    v_cndmask_b32_e32 v3, v3, v5, vcc
+; GCN-NEXT:    s_or_b64 vcc, vcc, s[4:5]
+; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
+; GCN-NEXT:    v_mov_b32_e32 v4, 0x40203333
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 7, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v3, v3, v4, vcc
+; GCN-NEXT:    v_mov_b32_e32 v4, 0x40223333
+; GCN-NEXT:    v_cmp_eq_u32_e64 s[4:5], 8, v0
+; GCN-NEXT:    v_cndmask_b32_e64 v3, v3, v4, s[4:5]
+; GCN-NEXT:    s_or_b64 s[4:5], s[4:5], vcc
+; GCN-NEXT:    v_mov_b32_e32 v4, 0x40243333
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 9, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v3, v3, v4, vcc
+; GCN-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN-NEXT:    v_mov_b32_e32 v4, 0x40263333
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 10, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v3, v3, v4, vcc
+; GCN-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN-NEXT:    v_mov_b32_e32 v4, 0x40283333
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 11, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v3, v3, v4, vcc
+; GCN-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN-NEXT:    v_mov_b32_e32 v4, 0x402a3333
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 12, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v3, v3, v4, vcc
+; GCN-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN-NEXT:    v_mov_b32_e32 v4, 0x402c3333
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 13, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v3, v3, v4, vcc
+; GCN-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN-NEXT:    v_mov_b32_e32 v5, 0x402e3333
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 14, v0
+; GCN-NEXT:    v_mov_b32_e32 v4, 0x33333333
+; GCN-NEXT:    v_cndmask_b32_e32 v3, v3, v5, vcc
+; GCN-NEXT:    s_or_b64 vcc, vcc, s[4:5]
+; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 15, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v2, v1, vcc
+; GCN-NEXT:    v_mov_b32_e32 v1, 0x40301999
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v3, v1, vcc
+; GCN-NEXT:    s_setpc_b64 s[30:31]
 entry:
   %ext = extractelement <16 x double> <double 1.1, double 2.1, double 3.1, double 4.1, double 5.1, double 6.1, double 7.1, double 8.1, double 9.1, double 10.1, double 11.1, double 12.1, double 13.1, double 14.1, double 15.1, double 16.1>, i32 %sel
   ret double %ext

diff  --git a/llvm/test/CodeGen/AMDGPU/extract_vector_elt-i8.ll b/llvm/test/CodeGen/AMDGPU/extract_vector_elt-i8.ll
index 164352ef75b3b9..e831da036acc7c 100644
--- a/llvm/test/CodeGen/AMDGPU/extract_vector_elt-i8.ll
+++ b/llvm/test/CodeGen/AMDGPU/extract_vector_elt-i8.ll
@@ -1,25 +1,73 @@
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -enable-var-scope -check-prefixes=GCN,SI %s
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -enable-var-scope -check-prefixes=GCN,VI %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn-amd-amdhsa < %s | FileCheck -check-prefixes=SI %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=tonga < %s | FileCheck -check-prefixes=VI %s
 
-; GCN-LABEL: {{^}}extract_vector_elt_v1i8:
-; GCN: s_load_dword [[LOAD:s[0-9]+]]
-; GCN: v_mov_b32_e32 [[V_LOAD:v[0-9]+]], [[LOAD]]
-; GCN: buffer_store_byte [[V_LOAD]]
 define amdgpu_kernel void @extract_vector_elt_v1i8(ptr addrspace(1) %out, <1 x i8> %foo) #0 {
+; SI-LABEL: extract_vector_elt_v1i8:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[6:7], 0x0
+; SI-NEXT:    s_load_dword s2, s[6:7], 0x2
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_mov_b32_e32 v2, s2
+; SI-NEXT:    flat_store_byte v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: extract_vector_elt_v1i8:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[6:7], 0x0
+; VI-NEXT:    s_load_dword s2, s[6:7], 0x8
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    flat_store_byte v[0:1], v2
+; VI-NEXT:    s_endpgm
   %p0 = extractelement <1 x i8> %foo, i32 0
   store i8 %p0, ptr addrspace(1) %out
   ret void
 }
 
-; GCN-LABEL: {{^}}extract_vector_elt_v2i8:
-; GCN: s_load_dword s
-; GCN-NOT: {{flat|buffer|global}}
-; SI: s_lshr_b32 s{{[0-9]+}}, s{{[0-9]+}}, 8
-; VI: v_lshrrev_b16_e64 v{{[0-9]+}}, 8, s{{[0-9]+}}
-; GCN-NOT: {{flat|buffer|global}}
-; GCN: buffer_store_byte
-; GCN: buffer_store_byte
 define amdgpu_kernel void @extract_vector_elt_v2i8(ptr addrspace(1) %out, <2 x i8> %foo) #0 {
+; SI-LABEL: extract_vector_elt_v2i8:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[6:7], 0x0
+; SI-NEXT:    s_load_dword s2, s[6:7], 0x2
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_lshr_b32 s3, s2, 8
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_mov_b32_e32 v2, s2
+; SI-NEXT:    s_add_u32 s0, s0, 1
+; SI-NEXT:    v_mov_b32_e32 v3, s3
+; SI-NEXT:    s_addc_u32 s1, s1, 0
+; SI-NEXT:    flat_store_byte v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    flat_store_byte v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: extract_vector_elt_v2i8:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[6:7], 0x8
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[6:7], 0x0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_lshrrev_b16_e64 v3, 8, s2
+; VI-NEXT:    s_add_u32 s2, s0, 1
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    s_addc_u32 s3, s1, 0
+; VI-NEXT:    flat_store_byte v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s2
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    flat_store_byte v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %p0 = extractelement <2 x i8> %foo, i32 0
   %p1 = extractelement <2 x i8> %foo, i32 1
   %out1 = getelementptr i8, ptr addrspace(1) %out, i32 1
@@ -28,14 +76,46 @@ define amdgpu_kernel void @extract_vector_elt_v2i8(ptr addrspace(1) %out, <2 x i
   ret void
 }
 
-; GCN-LABEL: {{^}}extract_vector_elt_v3i8:
-; GCN: s_load_dword s
-; GCN-NOT: {{flat|buffer|global}}
-; GCN: s_lshr_b32 s{{[0-9]+}}, s{{[0-9]+}}, 16
-; GCN-NOT: {{flat|buffer|global}}
-; GCN: buffer_store_byte
-; GCN: buffer_store_byte
 define amdgpu_kernel void @extract_vector_elt_v3i8(ptr addrspace(1) %out, <3 x i8> %foo) #0 {
+; SI-LABEL: extract_vector_elt_v3i8:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[6:7], 0x0
+; SI-NEXT:    s_load_dword s2, s[6:7], 0x2
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_lshr_b32 s3, s2, 16
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_mov_b32_e32 v2, s2
+; SI-NEXT:    s_add_u32 s0, s0, 1
+; SI-NEXT:    v_mov_b32_e32 v3, s3
+; SI-NEXT:    s_addc_u32 s1, s1, 0
+; SI-NEXT:    flat_store_byte v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    flat_store_byte v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: extract_vector_elt_v3i8:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[6:7], 0x8
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[6:7], 0x0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_lshr_b32 s3, s2, 16
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    s_add_u32 s0, s0, 1
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    s_addc_u32 s1, s1, 0
+; VI-NEXT:    flat_store_byte v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    flat_store_byte v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %p0 = extractelement <3 x i8> %foo, i32 0
   %p1 = extractelement <3 x i8> %foo, i32 2
   %out1 = getelementptr i8, ptr addrspace(1) %out, i32 1
@@ -44,14 +124,46 @@ define amdgpu_kernel void @extract_vector_elt_v3i8(ptr addrspace(1) %out, <3 x i
   ret void
 }
 
-; GCN-LABEL: {{^}}extract_vector_elt_v4i8:
-; GCN: s_load_dword s
-; GCN-NOT: {{flat|buffer|global}}
-; GCN: s_lshr_b32 s{{[0-9]+}}, s{{[0-9]+}}, 16
-; GCN-NOT: {{flat|buffer|global}}
-; GCN: buffer_store_byte
-; GCN: buffer_store_byte
 define amdgpu_kernel void @extract_vector_elt_v4i8(ptr addrspace(1) %out, <4 x i8> %foo) #0 {
+; SI-LABEL: extract_vector_elt_v4i8:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[6:7], 0x0
+; SI-NEXT:    s_load_dword s2, s[6:7], 0x2
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_lshr_b32 s3, s2, 16
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_mov_b32_e32 v2, s2
+; SI-NEXT:    s_add_u32 s0, s0, 1
+; SI-NEXT:    v_mov_b32_e32 v3, s3
+; SI-NEXT:    s_addc_u32 s1, s1, 0
+; SI-NEXT:    flat_store_byte v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    flat_store_byte v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: extract_vector_elt_v4i8:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[6:7], 0x8
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[6:7], 0x0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_lshr_b32 s3, s2, 16
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    s_add_u32 s0, s0, 1
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    s_addc_u32 s1, s1, 0
+; VI-NEXT:    flat_store_byte v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    flat_store_byte v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %p0 = extractelement <4 x i8> %foo, i32 0
   %p1 = extractelement <4 x i8> %foo, i32 2
   %out1 = getelementptr i8, ptr addrspace(1) %out, i32 1
@@ -60,15 +172,36 @@ define amdgpu_kernel void @extract_vector_elt_v4i8(ptr addrspace(1) %out, <4 x i
   ret void
 }
 
-; GCN-LABEL: {{^}}extract_vector_elt_v8i8:
-; GCN-NOT: {{s|flat|buffer|global}}_load
-; GCN: s_load_dword [[VAL:s[0-9]+]]
-; GCN-NOT: {{s|flat|buffer|global}}_load
-; GCN: s_lshr_b32 s{{[0-9]+}}, [[VAL]], 16
-; GCN-NOT: {{s|flat|buffer|global}}_load
-; GCN: buffer_store_byte
-; GCN: buffer_store_byte
 define amdgpu_kernel void @extract_vector_elt_v8i8(<8 x i8> %foo) #0 {
+; SI-LABEL: extract_vector_elt_v8i8:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s0, s[6:7], 0x0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_lshr_b32 s1, s0, 16
+; SI-NEXT:    v_mov_b32_e32 v0, 0
+; SI-NEXT:    v_mov_b32_e32 v1, 0
+; SI-NEXT:    v_mov_b32_e32 v2, s0
+; SI-NEXT:    v_mov_b32_e32 v3, s1
+; SI-NEXT:    flat_store_byte v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_byte v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: extract_vector_elt_v8i8:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s0, s[6:7], 0x0
+; VI-NEXT:    v_mov_b32_e32 v0, 0
+; VI-NEXT:    v_mov_b32_e32 v1, 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_lshr_b32 s1, s0, 16
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    flat_store_byte v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_byte v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %p0 = extractelement <8 x i8> %foo, i32 0
   %p1 = extractelement <8 x i8> %foo, i32 2
   store volatile i8 %p1, ptr addrspace(1) null
@@ -76,15 +209,46 @@ define amdgpu_kernel void @extract_vector_elt_v8i8(<8 x i8> %foo) #0 {
   ret void
 }
 
-; GCN-LABEL: {{^}}extract_vector_elt_v16i8:
-; GCN: s_load_dword [[LOAD0:s[0-9]+]]
-; GCN-NOT: {{flat|buffer|global}}
-; GCN: s_lshr_b32 [[ELT2:s[0-9]+]], [[LOAD0]], 16
-; GCN-DAG: v_mov_b32_e32 [[V_LOAD0:v[0-9]+]], [[LOAD0]]
-; GCN-DAG: v_mov_b32_e32 [[V_ELT2:v[0-9]+]], [[ELT2]]
-; GCN: buffer_store_byte [[V_ELT2]]
-; GCN: buffer_store_byte [[V_LOAD0]]
 define amdgpu_kernel void @extract_vector_elt_v16i8(ptr addrspace(1) %out, <16 x i8> %foo) #0 {
+; SI-LABEL: extract_vector_elt_v16i8:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[6:7], 0x0
+; SI-NEXT:    s_load_dword s2, s[6:7], 0x4
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_lshr_b32 s3, s2, 16
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_mov_b32_e32 v2, s2
+; SI-NEXT:    s_add_u32 s0, s0, 1
+; SI-NEXT:    v_mov_b32_e32 v3, s3
+; SI-NEXT:    s_addc_u32 s1, s1, 0
+; SI-NEXT:    flat_store_byte v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    flat_store_byte v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: extract_vector_elt_v16i8:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[6:7], 0x10
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[6:7], 0x0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_lshr_b32 s3, s2, 16
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    s_add_u32 s0, s0, 1
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    s_addc_u32 s1, s1, 0
+; VI-NEXT:    flat_store_byte v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    flat_store_byte v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %p0 = extractelement <16 x i8> %foo, i32 0
   %p1 = extractelement <16 x i8> %foo, i32 2
   %out1 = getelementptr i8, ptr addrspace(1) %out, i32 1
@@ -93,16 +257,36 @@ define amdgpu_kernel void @extract_vector_elt_v16i8(ptr addrspace(1) %out, <16 x
   ret void
 }
 
-; GCN-LABEL: {{^}}extract_vector_elt_v32i8:
-; GCN-NOT: {{s|flat|buffer|global}}_load
-; GCN: s_load_dword [[VAL:s[0-9]+]]
-; GCN-NOT: {{s|flat|buffer|global}}_load
-; GCN: s_lshr_b32 [[ELT2:s[0-9]+]], [[VAL]], 16
-; GCN-DAG: v_mov_b32_e32 [[V_LOAD0:v[0-9]+]], s{{[0-9]+}}
-; GCN-DAG: v_mov_b32_e32 [[V_ELT2:v[0-9]+]], [[ELT2]]
-; GCN: buffer_store_byte [[V_ELT2]]
-; GCN: buffer_store_byte [[V_LOAD0]]
 define amdgpu_kernel void @extract_vector_elt_v32i8(<32 x i8> %foo) #0 {
+; SI-LABEL: extract_vector_elt_v32i8:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s0, s[6:7], 0x0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_lshr_b32 s1, s0, 16
+; SI-NEXT:    v_mov_b32_e32 v0, 0
+; SI-NEXT:    v_mov_b32_e32 v1, 0
+; SI-NEXT:    v_mov_b32_e32 v2, s0
+; SI-NEXT:    v_mov_b32_e32 v3, s1
+; SI-NEXT:    flat_store_byte v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_byte v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: extract_vector_elt_v32i8:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s0, s[6:7], 0x0
+; VI-NEXT:    v_mov_b32_e32 v0, 0
+; VI-NEXT:    v_mov_b32_e32 v1, 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_lshr_b32 s1, s0, 16
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    flat_store_byte v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_byte v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %p0 = extractelement <32 x i8> %foo, i32 0
   %p1 = extractelement <32 x i8> %foo, i32 2
   store volatile i8 %p1, ptr addrspace(1) null
@@ -110,15 +294,46 @@ define amdgpu_kernel void @extract_vector_elt_v32i8(<32 x i8> %foo) #0 {
   ret void
 }
 
-; GCN-LABEL: {{^}}extract_vector_elt_v64i8:
-; GCN: s_load_dword [[LOAD0:s[0-9]+]]
-; GCN-NOT: {{flat|buffer|global}}
-; GCN: s_lshr_b32 [[ELT2:s[0-9]+]], [[LOAD0]], 16
-; GCN-DAG: v_mov_b32_e32 [[V_LOAD0:v[0-9]+]], [[LOAD0]]
-; GCN-DAG: v_mov_b32_e32 [[V_ELT2:v[0-9]+]], [[ELT2]]
-; GCN: buffer_store_byte [[V_ELT2]]
-; GCN: buffer_store_byte [[V_LOAD0]]
 define amdgpu_kernel void @extract_vector_elt_v64i8(ptr addrspace(1) %out, <64 x i8> %foo) #0 {
+; SI-LABEL: extract_vector_elt_v64i8:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[6:7], 0x0
+; SI-NEXT:    s_load_dword s2, s[6:7], 0x10
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_lshr_b32 s3, s2, 16
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_mov_b32_e32 v2, s2
+; SI-NEXT:    s_add_u32 s0, s0, 1
+; SI-NEXT:    v_mov_b32_e32 v3, s3
+; SI-NEXT:    s_addc_u32 s1, s1, 0
+; SI-NEXT:    flat_store_byte v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    flat_store_byte v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: extract_vector_elt_v64i8:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[6:7], 0x40
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[6:7], 0x0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_lshr_b32 s3, s2, 16
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    s_add_u32 s0, s0, 1
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    s_addc_u32 s1, s1, 0
+; VI-NEXT:    flat_store_byte v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    flat_store_byte v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %p0 = extractelement <64 x i8> %foo, i32 0
   %p1 = extractelement <64 x i8> %foo, i32 2
   %out1 = getelementptr i8, ptr addrspace(1) %out, i32 1
@@ -132,45 +347,110 @@ define amdgpu_kernel void @extract_vector_elt_v64i8(ptr addrspace(1) %out, <64 x
 ; FIXME: 16-bit and 32-bit shift not combined after legalize to to
 ; isTypeDesirableForOp in SimplifyDemandedBits
 
-; GCN-LABEL: {{^}}dynamic_extract_vector_elt_v2i8:
-; VI: s_load_dword [[IDX:s[0-9]+]], s[6:7], 0x4c
-; VI-NEXT: s_load_dword [[LOAD:s[0-9]+]], s[6:7], 0x28
-; VI-NOT: {{flat|buffer|global}}
-; VI-DAG: v_mov_b32_e32 [[V_LOAD:v[0-9]+]], [[LOAD]]
-; VI-DAG: s_lshl_b32 [[SCALED_IDX:s[0-9]+]], [[IDX]], 3
-; VI: v_lshrrev_b16_e32 [[ELT:v[0-9]+]], [[SCALED_IDX]], [[V_LOAD]]
-; VI: buffer_store_byte [[ELT]]
 define amdgpu_kernel void @dynamic_extract_vector_elt_v2i8(ptr addrspace(1) %out, [8 x i32], <2 x i8> %foo, [8 x i32], i32 %idx) #0 {
+; SI-LABEL: dynamic_extract_vector_elt_v2i8:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s2, s[6:7], 0xa
+; SI-NEXT:    s_load_dword s3, s[6:7], 0x13
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[6:7], 0x0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_and_b32 s2, s2, 0xffff
+; SI-NEXT:    s_lshl_b32 s3, s3, 3
+; SI-NEXT:    s_lshr_b32 s2, s2, s3
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_mov_b32_e32 v2, s2
+; SI-NEXT:    flat_store_byte v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: dynamic_extract_vector_elt_v2i8:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[6:7], 0x4c
+; VI-NEXT:    s_load_dword s3, s[6:7], 0x28
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[6:7], 0x0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_lshl_b32 s2, s2, 3
+; VI-NEXT:    v_mov_b32_e32 v0, s3
+; VI-NEXT:    v_lshrrev_b16_e32 v2, s2, v0
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    flat_store_byte v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %elt = extractelement <2 x i8> %foo, i32 %idx
   store volatile i8 %elt, ptr addrspace(1) %out
   ret void
 }
 
-; GCN-LABEL: {{^}}dynamic_extract_vector_elt_v3i8:
-; VI: s_load_dword [[IDX:s[0-9]+]], s[6:7], 0x4c
-; VI-NEXT: s_load_dword [[LOAD:s[0-9]+]], s[6:7], 0x28
-; VI-NOT: {{flat|buffer|global}}
-; VI: s_lshl_b32 [[SCALED_IDX:s[0-9]+]], [[IDX]], 3
-; VI: s_lshr_b32 [[ELT:s[0-9]+]], [[LOAD]], [[SCALED_IDX]]
-; VI: v_mov_b32_e32 [[V_ELT:v[0-9]+]], [[ELT]]
-; VI: buffer_store_byte [[V_ELT]]
 define amdgpu_kernel void @dynamic_extract_vector_elt_v3i8(ptr addrspace(1) %out, [8 x i32], <3 x i8> %foo, [8 x i32], i32 %idx) #0 {
+; SI-LABEL: dynamic_extract_vector_elt_v3i8:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s2, s[6:7], 0x13
+; SI-NEXT:    s_load_dword s3, s[6:7], 0xa
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[6:7], 0x0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_lshl_b32 s2, s2, 3
+; SI-NEXT:    s_lshr_b32 s2, s3, s2
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_mov_b32_e32 v2, s2
+; SI-NEXT:    flat_store_byte v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: dynamic_extract_vector_elt_v3i8:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[6:7], 0x4c
+; VI-NEXT:    s_load_dword s3, s[6:7], 0x28
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[6:7], 0x0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_lshl_b32 s2, s2, 3
+; VI-NEXT:    s_lshr_b32 s2, s3, s2
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    flat_store_byte v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %p0 = extractelement <3 x i8> %foo, i32 %idx
   %out1 = getelementptr i8, ptr addrspace(1) %out, i32 1
   store volatile i8 %p0, ptr addrspace(1) %out
   ret void
 }
 
-; GCN-LABEL: {{^}}dynamic_extract_vector_elt_v4i8:
-; VI: s_load_dword [[IDX:s[0-9]+]], s[6:7], 0x30
-; VI: s_load_dword [[VEC4:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x0
-
-; VI: s_lshl_b32 [[SCALED_IDX:s[0-9]+]], [[IDX]], 3
-; VI: s_lshr_b32 [[EXTRACT:s[0-9]+]], [[VEC4]], [[SCALED_IDX]]
-
-; VI: v_mov_b32_e32 [[V_EXTRACT:v[0-9]+]], [[EXTRACT]]
-; VI: buffer_store_byte [[V_EXTRACT]]
 define amdgpu_kernel void @dynamic_extract_vector_elt_v4i8(ptr addrspace(1) %out, ptr addrspace(4) %vec.ptr, [8 x i32], i32 %idx) #0 {
+; SI-LABEL: dynamic_extract_vector_elt_v4i8:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[6:7], 0x0
+; SI-NEXT:    s_load_dword s4, s[6:7], 0xc
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_load_dword s2, s[2:3], 0x0
+; SI-NEXT:    s_lshl_b32 s3, s4, 3
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_lshr_b32 s2, s2, s3
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_mov_b32_e32 v2, s2
+; SI-NEXT:    flat_store_byte v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: dynamic_extract_vector_elt_v4i8:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[6:7], 0x0
+; VI-NEXT:    s_load_dword s4, s[6:7], 0x30
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_load_dword s2, s[2:3], 0x0
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    s_lshl_b32 s0, s4, 3
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_lshr_b32 s0, s2, s0
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    flat_store_byte v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %vec = load <4 x i8>, ptr addrspace(4) %vec.ptr
   %p0 = extractelement <4 x i8> %vec, i32 %idx
   %out1 = getelementptr i8, ptr addrspace(1) %out, i32 1
@@ -178,15 +458,38 @@ define amdgpu_kernel void @dynamic_extract_vector_elt_v4i8(ptr addrspace(1) %out
   ret void
 }
 
-; GCN-LABEL: {{^}}dynamic_extract_vector_elt_v8i8:
-; VI: s_load_dword [[IDX:s[0-9]+]], s[6:7], 0x10
-; VI: s_load_dwordx2 [[VEC8:s\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, 0x0
-
-; VI: s_lshl_b32 [[SCALED_IDX:s[0-9]+]], [[IDX]], 3
-; VI: s_lshr_b64 s[[[EXTRACT_LO:[0-9]+]]:{{[0-9]+\]}}, [[VEC8]], [[SCALED_IDX]]
-; VI: v_mov_b32_e32 [[V_EXTRACT:v[0-9]+]], s[[EXTRACT_LO]]
-; VI: buffer_store_byte [[V_EXTRACT]]
 define amdgpu_kernel void @dynamic_extract_vector_elt_v8i8(ptr addrspace(1) %out, ptr addrspace(4) %vec.ptr, i32 %idx) #0 {
+; SI-LABEL: dynamic_extract_vector_elt_v8i8:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[6:7], 0x0
+; SI-NEXT:    s_load_dword s4, s[6:7], 0x4
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_load_dwordx2 s[2:3], s[2:3], 0x0
+; SI-NEXT:    s_lshl_b32 s4, s4, 3
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_lshr_b64 s[2:3], s[2:3], s4
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_mov_b32_e32 v2, s2
+; SI-NEXT:    flat_store_byte v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: dynamic_extract_vector_elt_v8i8:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[6:7], 0x0
+; VI-NEXT:    s_load_dword s4, s[6:7], 0x10
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_load_dwordx2 s[2:3], s[2:3], 0x0
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    s_lshl_b32 s0, s4, 3
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_lshr_b64 s[0:1], s[2:3], s0
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    flat_store_byte v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %vec = load <8 x i8>, ptr addrspace(4) %vec.ptr
   %p0 = extractelement <8 x i8> %vec, i32 %idx
   %out1 = getelementptr i8, ptr addrspace(1) %out, i32 1
@@ -194,14 +497,50 @@ define amdgpu_kernel void @dynamic_extract_vector_elt_v8i8(ptr addrspace(1) %out
   ret void
 }
 
-; GCN-LABEL: {{^}}reduce_load_vector_v8i8_extract_0123:
-; GCN-NOT: {{s|buffer|flat|global}}_load_
-; GCN: s_load_dword s
-; GCN-NOT: {{s|buffer|flat|global}}_load_
-; GCN: s_lshr_b32 s{{[0-9]+}}, s{{[0-9]+}}, 8
-; GCN: s_lshr_b32 s{{[0-9]+}}, s{{[0-9]+}}, 16
-; GCN: s_lshr_b32 s{{[0-9]+}}, s{{[0-9]+}}, 24
 define amdgpu_kernel void @reduce_load_vector_v8i8_extract_0123() #0 {
+; SI-LABEL: reduce_load_vector_v8i8_extract_0123:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_mov_b64 s[0:1], 0
+; SI-NEXT:    s_load_dword s0, s[0:1], 0x0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_lshr_b32 s1, s0, 8
+; SI-NEXT:    s_lshr_b32 s2, s0, 16
+; SI-NEXT:    s_lshr_b32 s3, s0, 24
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    flat_store_byte v[0:1], v0
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s1
+; SI-NEXT:    flat_store_byte v[0:1], v0
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s2
+; SI-NEXT:    flat_store_byte v[0:1], v0
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s3
+; SI-NEXT:    flat_store_byte v[0:1], v0
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: reduce_load_vector_v8i8_extract_0123:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_mov_b64 s[0:1], 0
+; VI-NEXT:    s_load_dword s0, s[0:1], 0x0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_lshr_b32 s1, s0, 8
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    s_lshr_b32 s2, s0, 16
+; VI-NEXT:    flat_store_byte v[0:1], v0
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s1
+; VI-NEXT:    s_lshr_b32 s3, s0, 24
+; VI-NEXT:    flat_store_byte v[0:1], v0
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s2
+; VI-NEXT:    flat_store_byte v[0:1], v0
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s3
+; VI-NEXT:    flat_store_byte v[0:1], v0
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %load = load <8 x i8>, ptr addrspace(4) null
   %elt0 = extractelement <8 x i8> %load, i32 0
   %elt1 = extractelement <8 x i8> %load, i32 1
@@ -214,13 +553,48 @@ define amdgpu_kernel void @reduce_load_vector_v8i8_extract_0123() #0 {
   ret void
 }
 
-; GCN-LABEL: {{^}}reduce_load_vector_v8i8_extract_0145:
-; GCN-NOT: {{s|buffer|flat|global}}_load_
-; GCN: s_load_dwordx2
-; GCN-NOT: {{s|buffer|flat|global}}_load_
-; GCN: s_lshr_b32 s{{[0-9]+}}, s{{[0-9]+}}, 8
-; GCN: s_lshr_b32 s{{[0-9]+}}, s{{[0-9]+}}, 8
 define amdgpu_kernel void @reduce_load_vector_v8i8_extract_0145() #0 {
+; SI-LABEL: reduce_load_vector_v8i8_extract_0145:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_mov_b64 s[0:1], 0
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_lshr_b32 s2, s0, 8
+; SI-NEXT:    s_lshr_b32 s3, s1, 8
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    flat_store_byte v[0:1], v0
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s1
+; SI-NEXT:    v_mov_b32_e32 v1, s2
+; SI-NEXT:    flat_store_byte v[0:1], v1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_byte v[0:1], v0
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s3
+; SI-NEXT:    flat_store_byte v[0:1], v0
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: reduce_load_vector_v8i8_extract_0145:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_mov_b64 s[0:1], 0
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_lshr_b32 s2, s0, 8
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    s_lshr_b32 s3, s1, 8
+; VI-NEXT:    flat_store_byte v[0:1], v0
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s2
+; VI-NEXT:    flat_store_byte v[0:1], v0
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_byte v[0:1], v1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s3
+; VI-NEXT:    flat_store_byte v[0:1], v0
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %load = load <8 x i8>, ptr addrspace(4) null
   %elt0 = extractelement <8 x i8> %load, i32 0
   %elt1 = extractelement <8 x i8> %load, i32 1
@@ -233,13 +607,34 @@ define amdgpu_kernel void @reduce_load_vector_v8i8_extract_0145() #0 {
   ret void
 }
 
-; GCN-LABEL: {{^}}reduce_load_vector_v8i8_extract_45:
-; GCN-NOT: {{s|buffer|flat|global}}_load_
-; GCN: s_mov_b64 [[PTR:s\[[0-9]+:[0-9]+\]]], 4{{$}}
-; GCN: s_load_dword s{{[0-9]+}}, [[PTR]], 0x0{{$}}
-; GCN-NOT: {{s|buffer|flat|global}}_load_
-; GCN: s_lshr_b32 s{{[0-9]+}}, s{{[0-9]+}}, 8
 define amdgpu_kernel void @reduce_load_vector_v8i8_extract_45() #0 {
+; SI-LABEL: reduce_load_vector_v8i8_extract_45:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_mov_b64 s[0:1], 4
+; SI-NEXT:    s_load_dword s0, s[0:1], 0x0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_lshr_b32 s1, s0, 8
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    flat_store_byte v[0:1], v0
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s1
+; SI-NEXT:    flat_store_byte v[0:1], v0
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: reduce_load_vector_v8i8_extract_45:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_mov_b64 s[0:1], 4
+; VI-NEXT:    s_load_dword s0, s[0:1], 0x0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_lshr_b32 s1, s0, 8
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    flat_store_byte v[0:1], v0
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s1
+; VI-NEXT:    flat_store_byte v[0:1], v0
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %load = load <8 x i8>, ptr addrspace(4) null
   %elt4 = extractelement <8 x i8> %load, i32 4
   %elt5 = extractelement <8 x i8> %load, i32 5
@@ -249,13 +644,48 @@ define amdgpu_kernel void @reduce_load_vector_v8i8_extract_45() #0 {
 }
 
 ; FIXME: ought to be able to eliminate high half of load
-; GCN-LABEL: {{^}}reduce_load_vector_v16i8_extract_0145:
-; GCN-NOT: {{s|buffer|flat|global}}_load_
-; GCN: s_load_dwordx4
-; GCN-NOT: {{s|buffer|flat|global}}_load_
-; GCN: s_lshr_b32 s{{[0-9]+}}, s{{[0-9]+}}, 8
-; GCN: s_lshr_b32 s{{[0-9]+}}, s{{[0-9]+}}, 8
 define amdgpu_kernel void @reduce_load_vector_v16i8_extract_0145() #0 {
+; SI-LABEL: reduce_load_vector_v16i8_extract_0145:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_mov_b64 s[0:1], 0
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_lshr_b32 s2, s0, 8
+; SI-NEXT:    s_lshr_b32 s3, s1, 8
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    flat_store_byte v[0:1], v0
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s1
+; SI-NEXT:    v_mov_b32_e32 v1, s2
+; SI-NEXT:    flat_store_byte v[0:1], v1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_byte v[0:1], v0
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s3
+; SI-NEXT:    flat_store_byte v[0:1], v0
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: reduce_load_vector_v16i8_extract_0145:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_mov_b64 s[0:1], 0
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_lshr_b32 s2, s0, 8
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    s_lshr_b32 s3, s1, 8
+; VI-NEXT:    flat_store_byte v[0:1], v0
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s2
+; VI-NEXT:    flat_store_byte v[0:1], v0
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_byte v[0:1], v1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s3
+; VI-NEXT:    flat_store_byte v[0:1], v0
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %load = load <16 x i8>, ptr addrspace(4) null
   %elt0 = extractelement <16 x i8> %load, i32 0
   %elt1 = extractelement <16 x i8> %load, i32 1

diff  --git a/llvm/test/CodeGen/AMDGPU/sminmax.v2i16.ll b/llvm/test/CodeGen/AMDGPU/sminmax.v2i16.ll
index 93e210bb4c8090..448fa7e959a51d 100644
--- a/llvm/test/CodeGen/AMDGPU/sminmax.v2i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/sminmax.v2i16.ll
@@ -1,23 +1,71 @@
-; RUN: llc -mtriple=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GFX9,GCN %s
-; RUN: llc -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=VI,CIVI,GCN %s
-; RUN: llc -mtriple=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=CI,CIVI,GCN %s
-
-; GCN-LABEL: {{^}}s_abs_v2i16:
-; GFX9: s_load_dword [[VAL:s[0-9]+]]
-; GFX9: v_pk_sub_i16 [[SUB:v[0-9]+]], 0, [[VAL]]
-; GFX9: v_pk_max_i16 [[MAX:v[0-9]+]], [[VAL]], [[SUB]]
-; GFX9: v_pk_add_u16 [[ADD:v[0-9]+]], [[MAX]], 2 op_sel_hi:[1,0]
-
-; CIVI: s_lshr_b32 s{{[0-9]+}}, s{{[0-9]+}}, 16
-; CIVI: s_sub_i32
-; CIVI: s_sub_i32
-; CIVI: s_max_i32
-; CIVI: s_max_i32
-; CIVI: s_add_i32
-; CIVI-DAG: s_add_i32
-; CIVI-DAG: s_and_b32
-; CIVI-DAG: s_or_b32
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck -check-prefix=GFX9 %s
+; RUN: llc -mtriple=amdgcn -mcpu=tonga< %s | FileCheck -check-prefix=VI %s
+; RUN: llc -mtriple=amdgcn -mcpu=bonaire < %s | FileCheck -check-prefix=CI %s
+
+
 define amdgpu_kernel void @s_abs_v2i16(ptr addrspace(1) %out, <2 x i16> %val) #0 {
+; GFX9-LABEL: s_abs_v2i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dword s4, s[2:3], 0x2c
+; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[2:3], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_pk_sub_i16 v1, 0, s4
+; GFX9-NEXT:    v_pk_max_i16 v1, s4, v1
+; GFX9-NEXT:    v_pk_add_u16 v1, v1, 2 op_sel_hi:[1,0]
+; GFX9-NEXT:    global_store_dword v0, v1, s[0:1]
+; GFX9-NEXT:    s_endpgm
+;
+; VI-LABEL: s_abs_v2i16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s4, s[2:3], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[2:3], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_lshr_b32 s2, s4, 16
+; VI-NEXT:    s_sub_i32 s3, 0, s4
+; VI-NEXT:    s_ashr_i32 s5, s4, 16
+; VI-NEXT:    s_sext_i32_i16 s4, s4
+; VI-NEXT:    s_sub_i32 s2, 0, s2
+; VI-NEXT:    s_sext_i32_i16 s3, s3
+; VI-NEXT:    s_sext_i32_i16 s2, s2
+; VI-NEXT:    s_max_i32 s3, s4, s3
+; VI-NEXT:    s_max_i32 s2, s5, s2
+; VI-NEXT:    s_add_i32 s3, s3, 2
+; VI-NEXT:    s_lshl_b32 s2, s2, 16
+; VI-NEXT:    s_and_b32 s3, s3, 0xffff
+; VI-NEXT:    s_or_b32 s2, s2, s3
+; VI-NEXT:    s_add_i32 s2, s2, 0x20000
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
+;
+; CI-LABEL: s_abs_v2i16:
+; CI:       ; %bb.0:
+; CI-NEXT:    s_load_dword s4, s[2:3], 0xb
+; CI-NEXT:    s_load_dwordx2 s[0:1], s[2:3], 0x9
+; CI-NEXT:    s_mov_b32 s3, 0xf000
+; CI-NEXT:    s_mov_b32 s2, -1
+; CI-NEXT:    s_waitcnt lgkmcnt(0)
+; CI-NEXT:    s_ashr_i32 s5, s4, 16
+; CI-NEXT:    s_lshr_b32 s6, s4, 16
+; CI-NEXT:    s_sext_i32_i16 s7, s4
+; CI-NEXT:    s_sub_i32 s4, 0, s4
+; CI-NEXT:    s_sext_i32_i16 s4, s4
+; CI-NEXT:    s_sub_i32 s6, 0, s6
+; CI-NEXT:    s_sext_i32_i16 s6, s6
+; CI-NEXT:    s_max_i32 s4, s7, s4
+; CI-NEXT:    s_max_i32 s5, s5, s6
+; CI-NEXT:    s_add_i32 s4, s4, 2
+; CI-NEXT:    s_lshl_b32 s5, s5, 16
+; CI-NEXT:    s_and_b32 s4, s4, 0xffff
+; CI-NEXT:    s_or_b32 s4, s5, s4
+; CI-NEXT:    s_add_i32 s4, s4, 0x20000
+; CI-NEXT:    v_mov_b32_e32 v0, s4
+; CI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; CI-NEXT:    s_endpgm
   %neg = sub <2 x i16> zeroinitializer, %val
   %cond = icmp sgt <2 x i16> %val, %neg
   %res = select <2 x i1> %cond, <2 x i16> %val, <2 x i16> %neg
@@ -26,34 +74,73 @@ define amdgpu_kernel void @s_abs_v2i16(ptr addrspace(1) %out, <2 x i16> %val) #0
   ret void
 }
 
-; GCN-LABEL: {{^}}v_abs_v2i16:
-; GFX9: global_load_dword [[VAL:v[0-9]+]]
-; GFX9: v_pk_sub_i16 [[SUB:v[0-9]+]], 0, [[VAL]]
-; GFX9: v_pk_max_i16 [[MAX:v[0-9]+]], [[VAL]], [[SUB]]
-; GFX9: v_pk_add_u16 [[ADD:v[0-9]+]], [[MAX]], 2 op_sel_hi:[1,0]
-
-; VI-DAG: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0
-; VI-DAG: v_mov_b32_e32 [[TWO:v[0-9]+]], 2
-; VI-DAG: v_sub_u16_e32 v{{[0-9]+}}, 0, v{{[0-9]+}}
-; VI-DAG: v_sub_u16_sdwa v{{[0-9]+}}, [[ZERO]], v{{[0-9]+}}
-; VI-DAG: v_max_i16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-; VI-DAG: v_max_i16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-; VI: v_add_u16_e32 v{{[0-9]+}}, 2, v{{[0-9]+}}
-; VI: v_add_u16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, [[TWO]]  dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; VI-NOT: v_and_b32
-; VI: v_or_b32_e32
-
-; CI: buffer_load_dword v
-; CI: v_lshrrev_b32_e32
-; CI-DAG: v_sub_i32_e32
-; CI-DAG: v_bfe_i32
-; CI-DAG: v_bfe_i32
-; CI-DAG: v_max_i32
-; CI-DAG: v_max_i32
-; CI-DAG: v_add_i32
-; CI-DAG: v_add_i32
-; CI-DAG: v_or_b32
 define amdgpu_kernel void @v_abs_v2i16(ptr addrspace(1) %out, ptr addrspace(1) %src) #0 {
+; GFX9-LABEL: v_abs_v2i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[2:3], 0x24
+; GFX9-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    global_load_dword v1, v0, s[6:7]
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_pk_sub_i16 v2, 0, v1
+; GFX9-NEXT:    v_pk_max_i16 v1, v1, v2
+; GFX9-NEXT:    v_pk_add_u16 v1, v1, 2 op_sel_hi:[1,0]
+; GFX9-NEXT:    global_store_dword v0, v1, s[4:5]
+; GFX9-NEXT:    s_endpgm
+;
+; VI-LABEL: v_abs_v2i16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[2:3], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    v_mov_b32_e32 v4, 0
+; VI-NEXT:    v_mov_b32_e32 v5, 2
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1]
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_sub_u16_e32 v2, 0, v3
+; VI-NEXT:    v_sub_u16_sdwa v4, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT:    v_max_i16_sdwa v4, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; VI-NEXT:    v_max_i16_e32 v2, v3, v2
+; VI-NEXT:    v_add_u16_e32 v2, 2, v2
+; VI-NEXT:    v_add_u16_sdwa v3, v4, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-NEXT:    v_or_b32_e32 v2, v2, v3
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
+;
+; CI-LABEL: v_abs_v2i16:
+; CI:       ; %bb.0:
+; CI-NEXT:    s_load_dwordx4 s[0:3], s[2:3], 0x9
+; CI-NEXT:    s_mov_b32 s7, 0xf000
+; CI-NEXT:    s_mov_b32 s6, 0
+; CI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; CI-NEXT:    v_mov_b32_e32 v1, 0
+; CI-NEXT:    s_waitcnt lgkmcnt(0)
+; CI-NEXT:    s_mov_b64 s[4:5], s[2:3]
+; CI-NEXT:    buffer_load_dword v2, v[0:1], s[4:7], 0 addr64
+; CI-NEXT:    s_mov_b64 s[2:3], s[6:7]
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    v_bfe_i32 v3, v2, 0, 16
+; CI-NEXT:    v_ashrrev_i32_e32 v4, 16, v2
+; CI-NEXT:    v_lshrrev_b32_e32 v5, 16, v2
+; CI-NEXT:    v_sub_i32_e32 v2, vcc, 0, v2
+; CI-NEXT:    v_bfe_i32 v2, v2, 0, 16
+; CI-NEXT:    v_sub_i32_e32 v5, vcc, 0, v5
+; CI-NEXT:    v_bfe_i32 v5, v5, 0, 16
+; CI-NEXT:    v_max_i32_e32 v2, v3, v2
+; CI-NEXT:    v_max_i32_e32 v3, v4, v5
+; CI-NEXT:    v_add_i32_e32 v2, vcc, 2, v2
+; CI-NEXT:    v_and_b32_e32 v2, 0xffff, v2
+; CI-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
+; CI-NEXT:    v_or_b32_e32 v2, v3, v2
+; CI-NEXT:    v_add_i32_e32 v2, vcc, 0x20000, v2
+; CI-NEXT:    buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
+; CI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %gep.in = getelementptr inbounds <2 x i16>, ptr addrspace(1) %src, i32 %tid
   %gep.out = getelementptr inbounds <2 x i16>, ptr addrspace(1) %out, i32 %tid
@@ -66,12 +153,68 @@ define amdgpu_kernel void @v_abs_v2i16(ptr addrspace(1) %out, ptr addrspace(1) %
   ret void
 }
 
-; GCN-LABEL: {{^}}s_abs_v2i16_2:
-; GFX9: s_load_dword [[VAL:s[0-9]+]]
-; GFX9: v_pk_sub_i16 [[SUB:v[0-9]+]], 0, [[VAL]]
-; GFX9: v_pk_max_i16 [[MAX:v[0-9]+]], [[VAL]], [[SUB]]
-; GFX9: v_pk_add_u16 [[ADD:v[0-9]+]], [[MAX]], 2 op_sel_hi:[1,0]
 define amdgpu_kernel void @s_abs_v2i16_2(ptr addrspace(1) %out, <2 x i16> %val) #0 {
+; GFX9-LABEL: s_abs_v2i16_2:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dword s4, s[2:3], 0x2c
+; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[2:3], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_pk_sub_i16 v1, 0, s4
+; GFX9-NEXT:    v_pk_max_i16 v1, s4, v1
+; GFX9-NEXT:    v_pk_add_u16 v1, v1, 2 op_sel_hi:[1,0]
+; GFX9-NEXT:    global_store_dword v0, v1, s[0:1]
+; GFX9-NEXT:    s_endpgm
+;
+; VI-LABEL: s_abs_v2i16_2:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s4, s[2:3], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[2:3], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_lshr_b32 s2, s4, 16
+; VI-NEXT:    s_sub_i32 s3, 0, s4
+; VI-NEXT:    s_ashr_i32 s5, s4, 16
+; VI-NEXT:    s_sext_i32_i16 s4, s4
+; VI-NEXT:    s_sub_i32 s2, 0, s2
+; VI-NEXT:    s_sext_i32_i16 s3, s3
+; VI-NEXT:    s_sext_i32_i16 s2, s2
+; VI-NEXT:    s_max_i32 s3, s4, s3
+; VI-NEXT:    s_max_i32 s2, s5, s2
+; VI-NEXT:    s_add_i32 s3, s3, 2
+; VI-NEXT:    s_lshl_b32 s2, s2, 16
+; VI-NEXT:    s_and_b32 s3, s3, 0xffff
+; VI-NEXT:    s_or_b32 s2, s2, s3
+; VI-NEXT:    s_add_i32 s2, s2, 0x20000
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
+;
+; CI-LABEL: s_abs_v2i16_2:
+; CI:       ; %bb.0:
+; CI-NEXT:    s_load_dword s4, s[2:3], 0xb
+; CI-NEXT:    s_load_dwordx2 s[0:1], s[2:3], 0x9
+; CI-NEXT:    s_mov_b32 s3, 0xf000
+; CI-NEXT:    s_mov_b32 s2, -1
+; CI-NEXT:    s_waitcnt lgkmcnt(0)
+; CI-NEXT:    s_ashr_i32 s5, s4, 16
+; CI-NEXT:    s_lshr_b32 s6, s4, 16
+; CI-NEXT:    s_sext_i32_i16 s7, s4
+; CI-NEXT:    s_sub_i32 s4, 0, s4
+; CI-NEXT:    s_sext_i32_i16 s4, s4
+; CI-NEXT:    s_sub_i32 s6, 0, s6
+; CI-NEXT:    s_sext_i32_i16 s6, s6
+; CI-NEXT:    s_max_i32 s4, s7, s4
+; CI-NEXT:    s_max_i32 s5, s5, s6
+; CI-NEXT:    s_add_i32 s4, s4, 2
+; CI-NEXT:    s_lshl_b32 s5, s5, 16
+; CI-NEXT:    s_and_b32 s4, s4, 0xffff
+; CI-NEXT:    s_or_b32 s4, s5, s4
+; CI-NEXT:    s_add_i32 s4, s4, 0x20000
+; CI-NEXT:    v_mov_b32_e32 v0, s4
+; CI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; CI-NEXT:    s_endpgm
   %z0 = insertelement <2 x i16> undef, i16 0, i16 0
   %z1 = insertelement <2 x i16> %z0, i16 0, i16 1
   %t0 = insertelement <2 x i16> undef, i16 2, i16 0
@@ -84,12 +227,76 @@ define amdgpu_kernel void @s_abs_v2i16_2(ptr addrspace(1) %out, <2 x i16> %val)
   ret void
 }
 
-; GCN-LABEL: {{^}}v_abs_v2i16_2:
-; GFX9: global_load_dword [[VAL:v[0-9]+]]
-; GFX9: v_pk_sub_i16 [[SUB:v[0-9]+]], 0, [[VAL]]
-; GFX9: v_pk_max_i16 [[MAX:v[0-9]+]], [[VAL]], [[SUB]]
-; GFX9: v_pk_add_u16 [[ADD:v[0-9]+]], [[MAX]], 2 op_sel_hi:[1,0]
 define amdgpu_kernel void @v_abs_v2i16_2(ptr addrspace(1) %out, ptr addrspace(1) %src) #0 {
+; GFX9-LABEL: v_abs_v2i16_2:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[2:3], 0x24
+; GFX9-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    global_load_dword v0, v0, s[6:7]
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_pk_sub_i16 v2, 0, v0
+; GFX9-NEXT:    v_pk_max_i16 v0, v0, v2
+; GFX9-NEXT:    v_pk_add_u16 v0, v0, 2 op_sel_hi:[1,0]
+; GFX9-NEXT:    global_store_dword v1, v0, s[4:5]
+; GFX9-NEXT:    s_endpgm
+;
+; VI-LABEL: v_abs_v2i16_2:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[2:3], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT:    v_mov_b32_e32 v3, 0
+; VI-NEXT:    v_mov_b32_e32 v4, 2
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v2, v[0:1]
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_sub_u16_e32 v5, 0, v2
+; VI-NEXT:    v_sub_u16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT:    v_max_i16_sdwa v3, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; VI-NEXT:    v_max_i16_e32 v2, v2, v5
+; VI-NEXT:    v_add_u16_e32 v2, 2, v2
+; VI-NEXT:    v_add_u16_sdwa v3, v3, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-NEXT:    v_or_b32_e32 v2, v2, v3
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
+;
+; CI-LABEL: v_abs_v2i16_2:
+; CI:       ; %bb.0:
+; CI-NEXT:    s_load_dwordx4 s[0:3], s[2:3], 0x9
+; CI-NEXT:    s_mov_b32 s7, 0xf000
+; CI-NEXT:    s_mov_b32 s10, 0
+; CI-NEXT:    s_mov_b32 s11, s7
+; CI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; CI-NEXT:    s_waitcnt lgkmcnt(0)
+; CI-NEXT:    s_mov_b64 s[8:9], s[2:3]
+; CI-NEXT:    v_mov_b32_e32 v1, 0
+; CI-NEXT:    buffer_load_dword v0, v[0:1], s[8:11], 0 addr64
+; CI-NEXT:    s_mov_b32 s6, -1
+; CI-NEXT:    s_mov_b32 s4, s0
+; CI-NEXT:    s_mov_b32 s5, s1
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    v_bfe_i32 v1, v0, 0, 16
+; CI-NEXT:    v_ashrrev_i32_e32 v2, 16, v0
+; CI-NEXT:    v_lshrrev_b32_e32 v3, 16, v0
+; CI-NEXT:    v_sub_i32_e32 v0, vcc, 0, v0
+; CI-NEXT:    v_bfe_i32 v0, v0, 0, 16
+; CI-NEXT:    v_sub_i32_e32 v3, vcc, 0, v3
+; CI-NEXT:    v_bfe_i32 v3, v3, 0, 16
+; CI-NEXT:    v_max_i32_e32 v0, v1, v0
+; CI-NEXT:    v_max_i32_e32 v1, v2, v3
+; CI-NEXT:    v_add_i32_e32 v0, vcc, 2, v0
+; CI-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; CI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; CI-NEXT:    v_or_b32_e32 v0, v1, v0
+; CI-NEXT:    v_add_i32_e32 v0, vcc, 0x20000, v0
+; CI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
+; CI-NEXT:    s_endpgm
   %z0 = insertelement <2 x i16> undef, i16 0, i16 0
   %z1 = insertelement <2 x i16> %z0, i16 0, i16 1
   %t0 = insertelement <2 x i16> undef, i16 2, i16 0
@@ -105,15 +312,100 @@ define amdgpu_kernel void @v_abs_v2i16_2(ptr addrspace(1) %out, ptr addrspace(1)
   ret void
 }
 
-; GCN-LABEL: {{^}}s_abs_v4i16:
-; GFX9: s_load_dwordx4 s[[[#LOAD:]]:{{[0-9]+}}], s[2:3], 0x24
-; GFX9-DAG: v_pk_sub_i16 [[SUB0:v[0-9]+]], 0, s[[#LOAD + 2]]
-; GFX9-DAG: v_pk_sub_i16 [[SUB1:v[0-9]+]], 0, s[[#LOAD + 3]]
-; GFX9-DAG: v_pk_max_i16 [[MAX0:v[0-9]+]], s[[#LOAD + 2]], [[SUB0]]
-; GFX9-DAG: v_pk_max_i16 [[MAX1:v[0-9]+]], s[[#LOAD + 3]], [[SUB1]]
-; GFX9-DAG: v_pk_add_u16 [[ADD0:v[0-9]+]], [[MAX0]], 2 op_sel_hi:[1,0]
-; GFX9-DAG: v_pk_add_u16 [[ADD1:v[0-9]+]], [[MAX1]], 2 op_sel_hi:[1,0]
 define amdgpu_kernel void @s_abs_v4i16(ptr addrspace(1) %out, <4 x i16> %val) #0 {
+; GFX9-LABEL: s_abs_v4i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[2:3], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_pk_sub_i16 v0, 0, s7
+; GFX9-NEXT:    v_pk_sub_i16 v1, 0, s6
+; GFX9-NEXT:    v_pk_max_i16 v3, s6, v1
+; GFX9-NEXT:    v_pk_max_i16 v0, s7, v0
+; GFX9-NEXT:    v_pk_add_u16 v1, v0, 2 op_sel_hi:[1,0]
+; GFX9-NEXT:    v_pk_add_u16 v0, v3, 2 op_sel_hi:[1,0]
+; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[4:5]
+; GFX9-NEXT:    s_endpgm
+;
+; VI-LABEL: s_abs_v4i16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[2:3], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_lshr_b32 s4, s2, 16
+; VI-NEXT:    s_lshr_b32 s5, s3, 16
+; VI-NEXT:    s_sub_i32 s6, 0, s3
+; VI-NEXT:    s_sub_i32 s7, 0, s2
+; VI-NEXT:    s_sub_i32 s5, 0, s5
+; VI-NEXT:    s_sub_i32 s4, 0, s4
+; VI-NEXT:    s_ashr_i32 s8, s2, 16
+; VI-NEXT:    s_ashr_i32 s9, s3, 16
+; VI-NEXT:    s_sext_i32_i16 s2, s2
+; VI-NEXT:    s_sext_i32_i16 s3, s3
+; VI-NEXT:    s_sext_i32_i16 s7, s7
+; VI-NEXT:    s_sext_i32_i16 s6, s6
+; VI-NEXT:    s_sext_i32_i16 s4, s4
+; VI-NEXT:    s_sext_i32_i16 s5, s5
+; VI-NEXT:    s_max_i32 s3, s3, s6
+; VI-NEXT:    s_max_i32 s2, s2, s7
+; VI-NEXT:    s_max_i32 s5, s9, s5
+; VI-NEXT:    s_max_i32 s4, s8, s4
+; VI-NEXT:    s_add_i32 s2, s2, 2
+; VI-NEXT:    s_add_i32 s3, s3, 2
+; VI-NEXT:    s_lshl_b32 s4, s4, 16
+; VI-NEXT:    s_lshl_b32 s5, s5, 16
+; VI-NEXT:    s_and_b32 s3, s3, 0xffff
+; VI-NEXT:    s_and_b32 s2, s2, 0xffff
+; VI-NEXT:    s_or_b32 s3, s5, s3
+; VI-NEXT:    s_or_b32 s2, s4, s2
+; VI-NEXT:    s_add_i32 s3, s3, 0x20000
+; VI-NEXT:    s_add_i32 s2, s2, 0x20000
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_mov_b32_e32 v0, s2
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
+;
+; CI-LABEL: s_abs_v4i16:
+; CI:       ; %bb.0:
+; CI-NEXT:    s_load_dwordx4 s[0:3], s[2:3], 0x9
+; CI-NEXT:    s_mov_b32 s7, 0xf000
+; CI-NEXT:    s_mov_b32 s6, -1
+; CI-NEXT:    s_waitcnt lgkmcnt(0)
+; CI-NEXT:    s_mov_b32 s4, s0
+; CI-NEXT:    s_mov_b32 s5, s1
+; CI-NEXT:    s_ashr_i64 s[0:1], s[2:3], 48
+; CI-NEXT:    s_ashr_i32 s1, s2, 16
+; CI-NEXT:    s_lshr_b32 s8, s2, 16
+; CI-NEXT:    s_lshr_b32 s9, s3, 16
+; CI-NEXT:    s_sext_i32_i16 s10, s3
+; CI-NEXT:    s_sext_i32_i16 s11, s2
+; CI-NEXT:    s_sub_i32 s3, 0, s3
+; CI-NEXT:    s_sub_i32 s2, 0, s2
+; CI-NEXT:    s_sext_i32_i16 s3, s3
+; CI-NEXT:    s_sext_i32_i16 s2, s2
+; CI-NEXT:    s_sub_i32 s9, 0, s9
+; CI-NEXT:    s_sub_i32 s8, 0, s8
+; CI-NEXT:    s_sext_i32_i16 s9, s9
+; CI-NEXT:    s_sext_i32_i16 s8, s8
+; CI-NEXT:    s_max_i32 s2, s11, s2
+; CI-NEXT:    s_max_i32 s3, s10, s3
+; CI-NEXT:    s_max_i32 s1, s1, s8
+; CI-NEXT:    s_max_i32 s0, s0, s9
+; CI-NEXT:    s_add_i32 s3, s3, 2
+; CI-NEXT:    s_add_i32 s2, s2, 2
+; CI-NEXT:    s_lshl_b32 s0, s0, 16
+; CI-NEXT:    s_and_b32 s3, s3, 0xffff
+; CI-NEXT:    s_lshl_b32 s1, s1, 16
+; CI-NEXT:    s_and_b32 s2, s2, 0xffff
+; CI-NEXT:    s_or_b32 s0, s0, s3
+; CI-NEXT:    s_or_b32 s1, s1, s2
+; CI-NEXT:    s_add_i32 s0, s0, 0x20000
+; CI-NEXT:    s_add_i32 s1, s1, 0x20000
+; CI-NEXT:    v_mov_b32_e32 v0, s1
+; CI-NEXT:    v_mov_b32_e32 v1, s0
+; CI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; CI-NEXT:    s_endpgm
   %z0 = insertelement <4 x i16> undef, i16 0, i16 0
   %z1 = insertelement <4 x i16> %z0, i16 0, i16 1
   %z2 = insertelement <4 x i16> %z1, i16 0, i16 2
@@ -130,17 +422,102 @@ define amdgpu_kernel void @s_abs_v4i16(ptr addrspace(1) %out, <4 x i16> %val) #0
   ret void
 }
 
-; GCN-LABEL: {{^}}v_abs_v4i16:
-; GFX9: global_load_dwordx2 v[[[VAL0:[0-9]+]]:[[VAL1:[0-9]+]]]
 
-; GFX9-DAG: v_pk_sub_i16 [[SUB0:v[0-9]+]], 0, v[[VAL0]]
-; GFX9-DAG: v_pk_max_i16 [[MAX0:v[0-9]+]], v[[VAL0]], [[SUB0]]
-; GFX9-DAG: v_pk_add_u16 [[ADD0:v[0-9]+]], [[MAX0]], 2 op_sel_hi:[1,0]
 
-; GFX9-DAG: v_pk_sub_i16 [[SUB1:v[0-9]+]], 0, v[[VAL1]]
-; GFX9-DAG: v_pk_max_i16 [[MAX1:v[0-9]+]], v[[VAL1]], [[SUB1]]
-; GFX9-DAG: v_pk_add_u16 [[ADD1:v[0-9]+]], [[MAX1]], 2 op_sel_hi:[1,0]
 define amdgpu_kernel void @v_abs_v4i16(ptr addrspace(1) %out, ptr addrspace(1) %src) #0 {
+; GFX9-LABEL: v_abs_v4i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[2:3], 0x24
+; GFX9-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    global_load_dwordx2 v[0:1], v0, s[6:7]
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_pk_sub_i16 v3, 0, v1
+; GFX9-NEXT:    v_pk_sub_i16 v4, 0, v0
+; GFX9-NEXT:    v_pk_max_i16 v0, v0, v4
+; GFX9-NEXT:    v_pk_max_i16 v1, v1, v3
+; GFX9-NEXT:    v_pk_add_u16 v1, v1, 2 op_sel_hi:[1,0]
+; GFX9-NEXT:    v_pk_add_u16 v0, v0, 2 op_sel_hi:[1,0]
+; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[4:5]
+; GFX9-NEXT:    s_endpgm
+;
+; VI-LABEL: v_abs_v4i16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[2:3], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
+; VI-NEXT:    v_mov_b32_e32 v4, 0
+; VI-NEXT:    v_mov_b32_e32 v5, 2
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_sub_u16_e32 v6, 0, v1
+; VI-NEXT:    v_sub_u16_sdwa v7, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT:    v_sub_u16_e32 v8, 0, v0
+; VI-NEXT:    v_sub_u16_sdwa v4, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT:    v_max_i16_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; VI-NEXT:    v_max_i16_e32 v0, v0, v8
+; VI-NEXT:    v_max_i16_sdwa v7, v1, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; VI-NEXT:    v_max_i16_e32 v1, v1, v6
+; VI-NEXT:    v_add_u16_e32 v1, 2, v1
+; VI-NEXT:    v_add_u16_sdwa v6, v7, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-NEXT:    v_add_u16_e32 v0, 2, v0
+; VI-NEXT:    v_add_u16_sdwa v4, v4, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-NEXT:    v_or_b32_e32 v1, v1, v6
+; VI-NEXT:    v_or_b32_e32 v0, v0, v4
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
+;
+; CI-LABEL: v_abs_v4i16:
+; CI:       ; %bb.0:
+; CI-NEXT:    s_load_dwordx4 s[4:7], s[2:3], 0x9
+; CI-NEXT:    s_mov_b32 s3, 0xf000
+; CI-NEXT:    s_mov_b32 s10, 0
+; CI-NEXT:    s_mov_b32 s11, s3
+; CI-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
+; CI-NEXT:    s_waitcnt lgkmcnt(0)
+; CI-NEXT:    s_mov_b64 s[8:9], s[6:7]
+; CI-NEXT:    v_mov_b32_e32 v1, 0
+; CI-NEXT:    buffer_load_dwordx2 v[0:1], v[0:1], s[8:11], 0 addr64
+; CI-NEXT:    s_mov_b32 s2, -1
+; CI-NEXT:    s_mov_b32 s0, s4
+; CI-NEXT:    s_mov_b32 s1, s5
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    v_ashr_i64 v[2:3], v[0:1], 48
+; CI-NEXT:    v_bfe_i32 v4, v1, 0, 16
+; CI-NEXT:    v_bfe_i32 v3, v0, 0, 16
+; CI-NEXT:    v_ashrrev_i32_e32 v5, 16, v0
+; CI-NEXT:    v_lshrrev_b32_e32 v6, 16, v0
+; CI-NEXT:    v_lshrrev_b32_e32 v7, 16, v1
+; CI-NEXT:    v_sub_i32_e32 v1, vcc, 0, v1
+; CI-NEXT:    v_sub_i32_e32 v0, vcc, 0, v0
+; CI-NEXT:    v_bfe_i32 v1, v1, 0, 16
+; CI-NEXT:    v_sub_i32_e32 v7, vcc, 0, v7
+; CI-NEXT:    v_bfe_i32 v0, v0, 0, 16
+; CI-NEXT:    v_sub_i32_e32 v6, vcc, 0, v6
+; CI-NEXT:    v_bfe_i32 v7, v7, 0, 16
+; CI-NEXT:    v_bfe_i32 v6, v6, 0, 16
+; CI-NEXT:    v_max_i32_e32 v0, v3, v0
+; CI-NEXT:    v_max_i32_e32 v1, v4, v1
+; CI-NEXT:    v_max_i32_e32 v3, v5, v6
+; CI-NEXT:    v_max_i32_e32 v2, v2, v7
+; CI-NEXT:    v_add_i32_e32 v1, vcc, 2, v1
+; CI-NEXT:    v_add_i32_e32 v0, vcc, 2, v0
+; CI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; CI-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
+; CI-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; CI-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
+; CI-NEXT:    v_or_b32_e32 v1, v2, v1
+; CI-NEXT:    v_or_b32_e32 v0, v3, v0
+; CI-NEXT:    v_add_i32_e32 v1, vcc, 0x20000, v1
+; CI-NEXT:    v_add_i32_e32 v0, vcc, 0x20000, v0
+; CI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; CI-NEXT:    s_endpgm
   %z0 = insertelement <4 x i16> undef, i16 0, i16 0
   %z1 = insertelement <4 x i16> %z0, i16 0, i16 1
   %z2 = insertelement <4 x i16> %z1, i16 0, i16 2
@@ -160,10 +537,87 @@ define amdgpu_kernel void @v_abs_v4i16(ptr addrspace(1) %out, ptr addrspace(1) %
   ret void
 }
 
-; GCN-LABEL: {{^}}s_min_max_v2i16:
-; GFX9: v_pk_max_i16
-; GFX9: v_pk_min_i16
 define amdgpu_kernel void @s_min_max_v2i16(ptr addrspace(1) %out0, ptr addrspace(1) %out1, <2 x i16> %val0, <2 x i16> %val1) #0 {
+; GFX9-LABEL: s_min_max_v2i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[2:3], 0x34
+; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[2:3], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v1, s1
+; GFX9-NEXT:    v_pk_max_i16 v2, s0, v1
+; GFX9-NEXT:    v_pk_min_i16 v1, s0, v1
+; GFX9-NEXT:    global_store_dword v0, v2, s[4:5]
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    global_store_dword v0, v1, s[6:7]
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    s_endpgm
+;
+; VI-LABEL: s_min_max_v2i16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[4:7], s[2:3], 0x24
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[2:3], 0x34
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    s_ashr_i32 s2, s0, 16
+; VI-NEXT:    s_sext_i32_i16 s0, s0
+; VI-NEXT:    s_ashr_i32 s3, s1, 16
+; VI-NEXT:    s_sext_i32_i16 s1, s1
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    s_max_i32 s4, s2, s3
+; VI-NEXT:    s_max_i32 s5, s0, s1
+; VI-NEXT:    s_lshl_b32 s4, s4, 16
+; VI-NEXT:    s_and_b32 s5, s5, 0xffff
+; VI-NEXT:    s_min_i32 s2, s2, s3
+; VI-NEXT:    s_min_i32 s0, s0, s1
+; VI-NEXT:    s_or_b32 s4, s5, s4
+; VI-NEXT:    s_lshl_b32 s1, s2, 16
+; VI-NEXT:    s_and_b32 s0, s0, 0xffff
+; VI-NEXT:    s_or_b32 s0, s0, s1
+; VI-NEXT:    v_mov_b32_e32 v4, s4
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    v_mov_b32_e32 v3, s7
+; VI-NEXT:    flat_store_dword v[0:1], v4
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    flat_store_dword v[2:3], v0
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
+;
+; CI-LABEL: s_min_max_v2i16:
+; CI:       ; %bb.0:
+; CI-NEXT:    s_load_dwordx4 s[4:7], s[2:3], 0x9
+; CI-NEXT:    s_load_dwordx2 s[12:13], s[2:3], 0xd
+; CI-NEXT:    s_mov_b32 s11, 0xf000
+; CI-NEXT:    s_mov_b32 s10, -1
+; CI-NEXT:    s_mov_b32 s2, s10
+; CI-NEXT:    s_waitcnt lgkmcnt(0)
+; CI-NEXT:    s_mov_b32 s8, s4
+; CI-NEXT:    s_mov_b32 s0, s6
+; CI-NEXT:    s_ashr_i32 s4, s12, 16
+; CI-NEXT:    s_ashr_i32 s6, s13, 16
+; CI-NEXT:    s_mov_b32 s9, s5
+; CI-NEXT:    s_mov_b32 s1, s7
+; CI-NEXT:    s_sext_i32_i16 s5, s12
+; CI-NEXT:    s_sext_i32_i16 s7, s13
+; CI-NEXT:    s_max_i32 s12, s4, s6
+; CI-NEXT:    s_max_i32 s13, s5, s7
+; CI-NEXT:    v_mov_b32_e32 v0, s12
+; CI-NEXT:    s_min_i32 s4, s4, s6
+; CI-NEXT:    buffer_store_short v0, off, s[8:11], 0 offset:2
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    v_mov_b32_e32 v0, s13
+; CI-NEXT:    s_mov_b32 s3, s11
+; CI-NEXT:    s_min_i32 s5, s5, s7
+; CI-NEXT:    buffer_store_short v0, off, s[8:11], 0
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    v_mov_b32_e32 v0, s4
+; CI-NEXT:    buffer_store_short v0, off, s[0:3], 0 offset:2
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    v_mov_b32_e32 v0, s5
+; CI-NEXT:    buffer_store_short v0, off, s[0:3], 0
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    s_endpgm
   %cond0 = icmp sgt <2 x i16> %val0, %val1
   %sel0 = select <2 x i1> %cond0, <2 x i16> %val0, <2 x i16> %val1
   %sel1 = select <2 x i1> %cond0, <2 x i16> %val1, <2 x i16> %val0
@@ -173,10 +627,91 @@ define amdgpu_kernel void @s_min_max_v2i16(ptr addrspace(1) %out0, ptr addrspace
   ret void
 }
 
-; GCN-LABEL: {{^}}v_min_max_v2i16:
-; GFX9: v_pk_max_i16
-; GFX9: v_pk_min_i16
 define amdgpu_kernel void @v_min_max_v2i16(ptr addrspace(1) %out0, ptr addrspace(1) %out1, ptr addrspace(1) %ptr0, ptr addrspace(1) %ptr1) #0 {
+; GFX9-LABEL: v_min_max_v2i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dwordx8 s[4:11], s[2:3], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    global_load_dword v1, v0, s[8:9] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    global_load_dword v2, v0, s[10:11] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_pk_max_i16 v3, v1, v2
+; GFX9-NEXT:    v_pk_min_i16 v1, v1, v2
+; GFX9-NEXT:    global_store_dword v0, v3, s[4:5]
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    global_store_dword v0, v1, s[6:7]
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    s_endpgm
+;
+; VI-LABEL: v_min_max_v2i16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx8 s[0:7], s[2:3], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    v_mov_b32_e32 v3, s7
+; VI-NEXT:    flat_load_dword v4, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v5, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    v_mov_b32_e32 v3, s3
+; VI-NEXT:    v_max_i32_sdwa v6, sext(v4), sext(v5) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:WORD_0
+; VI-NEXT:    v_max_i32_sdwa v7, sext(v4), sext(v5) dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI-NEXT:    v_min_i32_sdwa v8, sext(v4), sext(v5) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:WORD_0
+; VI-NEXT:    v_min_i32_sdwa v4, sext(v4), sext(v5) dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI-NEXT:    v_or_b32_sdwa v5, v6, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT:    v_or_b32_sdwa v4, v8, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT:    flat_store_dword v[0:1], v5
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[2:3], v4
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
+;
+; CI-LABEL: v_min_max_v2i16:
+; CI:       ; %bb.0:
+; CI-NEXT:    s_load_dwordx8 s[0:7], s[2:3], 0x9
+; CI-NEXT:    s_mov_b32 s11, 0xf000
+; CI-NEXT:    s_mov_b32 s10, -1
+; CI-NEXT:    s_mov_b32 s14, s10
+; CI-NEXT:    s_mov_b32 s15, s11
+; CI-NEXT:    s_waitcnt lgkmcnt(0)
+; CI-NEXT:    s_mov_b32 s12, s4
+; CI-NEXT:    s_mov_b32 s13, s5
+; CI-NEXT:    s_mov_b32 s4, s6
+; CI-NEXT:    s_mov_b32 s5, s7
+; CI-NEXT:    s_mov_b32 s6, s10
+; CI-NEXT:    s_mov_b32 s7, s11
+; CI-NEXT:    buffer_load_sshort v0, off, s[12:15], 0 glc
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    buffer_load_sshort v1, off, s[12:15], 0 offset:2 glc
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    buffer_load_sshort v2, off, s[4:7], 0 glc
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    buffer_load_sshort v3, off, s[4:7], 0 offset:2 glc
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    s_mov_b32 s8, s0
+; CI-NEXT:    s_mov_b32 s9, s1
+; CI-NEXT:    s_mov_b32 s4, s2
+; CI-NEXT:    s_mov_b32 s5, s3
+; CI-NEXT:    v_max_i32_e32 v4, v0, v2
+; CI-NEXT:    v_max_i32_e32 v5, v1, v3
+; CI-NEXT:    v_min_i32_e32 v0, v0, v2
+; CI-NEXT:    v_min_i32_e32 v1, v1, v3
+; CI-NEXT:    buffer_store_short v5, off, s[8:11], 0 offset:2
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    buffer_store_short v4, off, s[8:11], 0
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    buffer_store_short v1, off, s[4:7], 0 offset:2
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    buffer_store_short v0, off, s[4:7], 0
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    s_endpgm
   %val0 = load volatile <2 x i16>, ptr addrspace(1) %ptr0
   %val1 = load volatile <2 x i16>, ptr addrspace(1) %ptr1
 
@@ -189,12 +724,123 @@ define amdgpu_kernel void @v_min_max_v2i16(ptr addrspace(1) %out0, ptr addrspace
   ret void
 }
 
-; GCN-LABEL: {{^}}s_min_max_v4i16:
-; GFX9-DAG: v_pk_max_i16
-; GFX9-DAG: v_pk_min_i16
-; GFX9-DAG: v_pk_max_i16
-; GFX9-DAG: v_pk_min_i16
 define amdgpu_kernel void @s_min_max_v4i16(ptr addrspace(1) %out0, ptr addrspace(1) %out1, <4 x i16> %val0, <4 x i16> %val1) #0 {
+; GFX9-LABEL: s_min_max_v4i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dwordx8 s[4:11], s[2:3], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v4, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v2, s11
+; GFX9-NEXT:    v_mov_b32_e32 v5, s10
+; GFX9-NEXT:    v_pk_max_i16 v1, s9, v2
+; GFX9-NEXT:    v_pk_max_i16 v0, s8, v5
+; GFX9-NEXT:    v_pk_min_i16 v3, s9, v2
+; GFX9-NEXT:    v_pk_min_i16 v2, s8, v5
+; GFX9-NEXT:    global_store_dwordx2 v4, v[0:1], s[4:5]
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    global_store_dwordx2 v4, v[2:3], s[6:7]
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    s_endpgm
+;
+; VI-LABEL: s_min_max_v4i16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx8 s[0:7], s[2:3], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    v_mov_b32_e32 v3, s3
+; VI-NEXT:    s_ashr_i32 s0, s5, 16
+; VI-NEXT:    s_ashr_i32 s1, s4, 16
+; VI-NEXT:    s_sext_i32_i16 s2, s5
+; VI-NEXT:    s_sext_i32_i16 s3, s4
+; VI-NEXT:    s_ashr_i32 s4, s7, 16
+; VI-NEXT:    s_ashr_i32 s5, s6, 16
+; VI-NEXT:    s_sext_i32_i16 s7, s7
+; VI-NEXT:    s_sext_i32_i16 s6, s6
+; VI-NEXT:    s_max_i32 s8, s1, s5
+; VI-NEXT:    s_max_i32 s9, s0, s4
+; VI-NEXT:    s_max_i32 s10, s3, s6
+; VI-NEXT:    s_max_i32 s11, s2, s7
+; VI-NEXT:    s_min_i32 s0, s0, s4
+; VI-NEXT:    s_min_i32 s2, s2, s7
+; VI-NEXT:    s_lshl_b32 s9, s9, 16
+; VI-NEXT:    s_and_b32 s11, s11, 0xffff
+; VI-NEXT:    s_lshl_b32 s8, s8, 16
+; VI-NEXT:    s_and_b32 s10, s10, 0xffff
+; VI-NEXT:    s_min_i32 s1, s1, s5
+; VI-NEXT:    s_min_i32 s3, s3, s6
+; VI-NEXT:    s_lshl_b32 s0, s0, 16
+; VI-NEXT:    s_and_b32 s2, s2, 0xffff
+; VI-NEXT:    s_or_b32 s9, s11, s9
+; VI-NEXT:    s_or_b32 s8, s10, s8
+; VI-NEXT:    s_or_b32 s0, s2, s0
+; VI-NEXT:    s_lshl_b32 s1, s1, 16
+; VI-NEXT:    s_and_b32 s2, s3, 0xffff
+; VI-NEXT:    v_mov_b32_e32 v4, s8
+; VI-NEXT:    v_mov_b32_e32 v5, s9
+; VI-NEXT:    s_or_b32 s1, s2, s1
+; VI-NEXT:    v_mov_b32_e32 v6, s1
+; VI-NEXT:    v_mov_b32_e32 v7, s0
+; VI-NEXT:    flat_store_dwordx2 v[0:1], v[4:5]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[6:7]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
+;
+; CI-LABEL: s_min_max_v4i16:
+; CI:       ; %bb.0:
+; CI-NEXT:    s_load_dwordx8 s[0:7], s[2:3], 0x9
+; CI-NEXT:    s_mov_b32 s11, 0xf000
+; CI-NEXT:    s_mov_b32 s10, -1
+; CI-NEXT:    s_waitcnt lgkmcnt(0)
+; CI-NEXT:    s_ashr_i32 s12, s5, 16
+; CI-NEXT:    s_ashr_i32 s14, s7, 16
+; CI-NEXT:    s_sext_i32_i16 s5, s5
+; CI-NEXT:    s_sext_i32_i16 s7, s7
+; CI-NEXT:    s_max_i32 s17, s12, s14
+; CI-NEXT:    s_mov_b32 s8, s0
+; CI-NEXT:    s_mov_b32 s9, s1
+; CI-NEXT:    s_ashr_i32 s13, s4, 16
+; CI-NEXT:    s_ashr_i32 s15, s6, 16
+; CI-NEXT:    s_max_i32 s19, s5, s7
+; CI-NEXT:    v_mov_b32_e32 v0, s17
+; CI-NEXT:    s_sext_i32_i16 s4, s4
+; CI-NEXT:    s_sext_i32_i16 s6, s6
+; CI-NEXT:    s_max_i32 s16, s13, s15
+; CI-NEXT:    buffer_store_short v0, off, s[8:11], 0 offset:6
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    v_mov_b32_e32 v0, s19
+; CI-NEXT:    s_max_i32 s18, s4, s6
+; CI-NEXT:    buffer_store_short v0, off, s[8:11], 0 offset:4
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    v_mov_b32_e32 v0, s16
+; CI-NEXT:    s_min_i32 s12, s12, s14
+; CI-NEXT:    buffer_store_short v0, off, s[8:11], 0 offset:2
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    v_mov_b32_e32 v0, s18
+; CI-NEXT:    s_mov_b32 s0, s2
+; CI-NEXT:    s_mov_b32 s1, s3
+; CI-NEXT:    s_mov_b32 s2, s10
+; CI-NEXT:    s_mov_b32 s3, s11
+; CI-NEXT:    s_min_i32 s5, s5, s7
+; CI-NEXT:    buffer_store_short v0, off, s[8:11], 0
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    v_mov_b32_e32 v0, s12
+; CI-NEXT:    s_min_i32 s13, s13, s15
+; CI-NEXT:    buffer_store_short v0, off, s[0:3], 0 offset:6
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    v_mov_b32_e32 v0, s5
+; CI-NEXT:    s_min_i32 s4, s4, s6
+; CI-NEXT:    buffer_store_short v0, off, s[0:3], 0 offset:4
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    v_mov_b32_e32 v0, s13
+; CI-NEXT:    buffer_store_short v0, off, s[0:3], 0 offset:2
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    v_mov_b32_e32 v0, s4
+; CI-NEXT:    buffer_store_short v0, off, s[0:3], 0
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    s_endpgm
   %cond0 = icmp sgt <4 x i16> %val0, %val1
   %sel0 = select <4 x i1> %cond0, <4 x i16> %val0, <4 x i16> %val1
   %sel1 = select <4 x i1> %cond0, <4 x i16> %val1, <4 x i16> %val0
@@ -204,8 +850,142 @@ define amdgpu_kernel void @s_min_max_v4i16(ptr addrspace(1) %out0, ptr addrspace
   ret void
 }
 
-; GCN-LABEL: {{^}}v_min_max_v2i16_user:
 define amdgpu_kernel void @v_min_max_v2i16_user(ptr addrspace(1) %out0, ptr addrspace(1) %out1, ptr addrspace(1) %ptr0, ptr addrspace(1) %ptr1) #0 {
+; GFX9-LABEL: v_min_max_v2i16_user:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dwordx8 s[4:11], s[2:3], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    global_load_dword v1, v0, s[8:9] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    global_load_dword v2, v0, s[10:11] glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_lshrrev_b32_e32 v3, 16, v1
+; GFX9-NEXT:    v_lshrrev_b32_e32 v4, 16, v2
+; GFX9-NEXT:    v_cmp_gt_i16_e32 vcc, v1, v2
+; GFX9-NEXT:    v_cndmask_b32_e32 v5, v2, v1, vcc
+; GFX9-NEXT:    v_cmp_gt_i16_e64 s[0:1], v3, v4
+; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v6, v4, v3, s[0:1]
+; GFX9-NEXT:    v_and_b32_e32 v5, 0xffff, v5
+; GFX9-NEXT:    v_cndmask_b32_e64 v3, v3, v4, s[0:1]
+; GFX9-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; GFX9-NEXT:    v_cndmask_b32_e64 v4, 0, 1, s[0:1]
+; GFX9-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX9-NEXT:    v_lshl_or_b32 v5, v6, 16, v5
+; GFX9-NEXT:    v_lshl_or_b32 v1, v3, 16, v1
+; GFX9-NEXT:    v_lshlrev_b16_e32 v3, 1, v4
+; GFX9-NEXT:    global_store_dword v0, v5, s[4:5]
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    global_store_dword v0, v1, s[6:7]
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_or_b32_e32 v0, v2, v3
+; GFX9-NEXT:    v_and_b32_e32 v0, 3, v0
+; GFX9-NEXT:    global_store_byte v[0:1], v0, off
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    s_endpgm
+;
+; VI-LABEL: v_min_max_v2i16_user:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx8 s[0:7], s[2:3], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    v_mov_b32_e32 v3, s7
+; VI-NEXT:    flat_load_dword v4, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v5, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mov_b32_e32 v3, s3
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    v_readfirstlane_b32 s0, v4
+; VI-NEXT:    v_readfirstlane_b32 s1, v5
+; VI-NEXT:    s_ashr_i32 s3, s0, 16
+; VI-NEXT:    s_ashr_i32 s5, s1, 16
+; VI-NEXT:    s_cmp_gt_i32 s3, s5
+; VI-NEXT:    s_sext_i32_i16 s2, s0
+; VI-NEXT:    s_sext_i32_i16 s4, s1
+; VI-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; VI-NEXT:    v_cndmask_b32_e64 v4, 0, 1, s[0:1]
+; VI-NEXT:    s_and_b64 s[0:1], s[0:1], exec
+; VI-NEXT:    s_cselect_b32 s0, s3, s5
+; VI-NEXT:    s_cselect_b32 s3, s5, s3
+; VI-NEXT:    s_lshl_b32 s5, s0, 16
+; VI-NEXT:    s_cmp_gt_i32 s2, s4
+; VI-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; VI-NEXT:    v_cndmask_b32_e64 v5, 0, 1, s[0:1]
+; VI-NEXT:    s_and_b64 s[0:1], s[0:1], exec
+; VI-NEXT:    s_cselect_b32 s0, s2, s4
+; VI-NEXT:    s_cselect_b32 s1, s4, s2
+; VI-NEXT:    s_and_b32 s0, s0, 0xffff
+; VI-NEXT:    v_lshlrev_b16_e32 v4, 1, v4
+; VI-NEXT:    s_lshl_b32 s2, s3, 16
+; VI-NEXT:    s_and_b32 s1, s1, 0xffff
+; VI-NEXT:    s_or_b32 s0, s0, s5
+; VI-NEXT:    v_or_b32_e32 v4, v5, v4
+; VI-NEXT:    s_or_b32 s1, s1, s2
+; VI-NEXT:    v_mov_b32_e32 v5, s0
+; VI-NEXT:    v_and_b32_e32 v4, 3, v4
+; VI-NEXT:    v_mov_b32_e32 v6, s1
+; VI-NEXT:    flat_store_dword v[0:1], v5
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[2:3], v6
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_byte v[0:1], v4
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
+;
+; CI-LABEL: v_min_max_v2i16_user:
+; CI:       ; %bb.0:
+; CI-NEXT:    s_load_dwordx8 s[8:15], s[2:3], 0x9
+; CI-NEXT:    s_mov_b32 s7, 0xf000
+; CI-NEXT:    s_mov_b32 s6, -1
+; CI-NEXT:    s_mov_b32 s2, s6
+; CI-NEXT:    s_mov_b32 s3, s7
+; CI-NEXT:    s_waitcnt lgkmcnt(0)
+; CI-NEXT:    s_mov_b32 s0, s12
+; CI-NEXT:    s_mov_b32 s1, s13
+; CI-NEXT:    s_mov_b32 s12, s14
+; CI-NEXT:    s_mov_b32 s13, s15
+; CI-NEXT:    s_mov_b32 s14, s6
+; CI-NEXT:    s_mov_b32 s15, s7
+; CI-NEXT:    buffer_load_sshort v0, off, s[0:3], 0 glc
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    buffer_load_sshort v1, off, s[0:3], 0 offset:2 glc
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    buffer_load_sshort v2, off, s[12:15], 0 glc
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    buffer_load_sshort v3, off, s[12:15], 0 offset:2 glc
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    s_mov_b32 s4, s8
+; CI-NEXT:    s_mov_b32 s5, s9
+; CI-NEXT:    s_mov_b32 s12, s10
+; CI-NEXT:    s_mov_b32 s13, s11
+; CI-NEXT:    v_cmp_gt_i32_e32 vcc, v0, v2
+; CI-NEXT:    v_cmp_gt_i32_e64 s[0:1], v1, v3
+; CI-NEXT:    v_cndmask_b32_e32 v4, v2, v0, vcc
+; CI-NEXT:    v_cndmask_b32_e64 v5, v3, v1, s[0:1]
+; CI-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
+; CI-NEXT:    v_cndmask_b32_e64 v1, v1, v3, s[0:1]
+; CI-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s[0:1]
+; CI-NEXT:    buffer_store_short v5, off, s[4:7], 0 offset:2
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    buffer_store_short v4, off, s[4:7], 0
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    buffer_store_short v1, off, s[12:15], 0 offset:2
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    buffer_store_short v0, off, s[12:15], 0
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    v_lshlrev_b32_e32 v0, 1, v2
+; CI-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; CI-NEXT:    v_or_b32_e32 v0, v1, v0
+; CI-NEXT:    v_and_b32_e32 v0, 3, v0
+; CI-NEXT:    buffer_store_byte v0, off, s[4:7], 0
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    s_endpgm
   %val0 = load volatile <2 x i16>, ptr addrspace(1) %ptr0
   %val1 = load volatile <2 x i16>, ptr addrspace(1) %ptr1
 
@@ -219,10 +999,85 @@ define amdgpu_kernel void @v_min_max_v2i16_user(ptr addrspace(1) %out0, ptr addr
   ret void
 }
 
-; GCN-LABEL: {{^}}u_min_max_v2i16:
-; GFX9: v_pk_max_u16 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}
-; GFX9: v_pk_min_u16 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}
 define amdgpu_kernel void @u_min_max_v2i16(ptr addrspace(1) %out0, ptr addrspace(1) %out1, <2 x i16> %val0, <2 x i16> %val1) nounwind {
+; GFX9-LABEL: u_min_max_v2i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[2:3], 0x34
+; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[2:3], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v1, s1
+; GFX9-NEXT:    v_pk_max_u16 v2, s0, v1
+; GFX9-NEXT:    v_pk_min_u16 v1, s0, v1
+; GFX9-NEXT:    global_store_dword v0, v2, s[4:5]
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    global_store_dword v0, v1, s[6:7]
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    s_endpgm
+;
+; VI-LABEL: u_min_max_v2i16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[4:7], s[2:3], 0x24
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[2:3], 0x34
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    s_lshr_b32 s2, s0, 16
+; VI-NEXT:    s_lshr_b32 s3, s1, 16
+; VI-NEXT:    s_and_b32 s0, s0, 0xffff
+; VI-NEXT:    s_and_b32 s1, s1, 0xffff
+; VI-NEXT:    s_max_u32 s5, s2, s3
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    s_max_u32 s4, s0, s1
+; VI-NEXT:    s_lshl_b32 s5, s5, 16
+; VI-NEXT:    s_min_u32 s0, s0, s1
+; VI-NEXT:    s_min_u32 s1, s2, s3
+; VI-NEXT:    s_or_b32 s4, s4, s5
+; VI-NEXT:    s_lshl_b32 s1, s1, 16
+; VI-NEXT:    s_or_b32 s0, s0, s1
+; VI-NEXT:    v_mov_b32_e32 v4, s4
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    v_mov_b32_e32 v3, s7
+; VI-NEXT:    flat_store_dword v[0:1], v4
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    flat_store_dword v[2:3], v0
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
+;
+; CI-LABEL: u_min_max_v2i16:
+; CI:       ; %bb.0:
+; CI-NEXT:    s_load_dwordx4 s[4:7], s[2:3], 0x9
+; CI-NEXT:    s_load_dwordx2 s[12:13], s[2:3], 0xd
+; CI-NEXT:    s_mov_b32 s11, 0xf000
+; CI-NEXT:    s_mov_b32 s10, -1
+; CI-NEXT:    s_mov_b32 s2, s10
+; CI-NEXT:    s_waitcnt lgkmcnt(0)
+; CI-NEXT:    s_mov_b32 s8, s4
+; CI-NEXT:    s_mov_b32 s0, s6
+; CI-NEXT:    s_lshr_b32 s4, s12, 16
+; CI-NEXT:    s_lshr_b32 s6, s13, 16
+; CI-NEXT:    s_mov_b32 s9, s5
+; CI-NEXT:    s_mov_b32 s1, s7
+; CI-NEXT:    s_and_b32 s5, s12, 0xffff
+; CI-NEXT:    s_and_b32 s7, s13, 0xffff
+; CI-NEXT:    s_max_u32 s13, s4, s6
+; CI-NEXT:    s_max_u32 s12, s5, s7
+; CI-NEXT:    v_mov_b32_e32 v0, s13
+; CI-NEXT:    s_min_u32 s4, s4, s6
+; CI-NEXT:    buffer_store_short v0, off, s[8:11], 0 offset:2
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    v_mov_b32_e32 v0, s12
+; CI-NEXT:    s_mov_b32 s3, s11
+; CI-NEXT:    s_min_u32 s5, s5, s7
+; CI-NEXT:    buffer_store_short v0, off, s[8:11], 0
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    v_mov_b32_e32 v0, s4
+; CI-NEXT:    buffer_store_short v0, off, s[0:3], 0 offset:2
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    v_mov_b32_e32 v0, s5
+; CI-NEXT:    buffer_store_short v0, off, s[0:3], 0
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    s_endpgm
   %cond0 = icmp ugt <2 x i16> %val0, %val1
   %sel0 = select <2 x i1> %cond0, <2 x i16> %val0, <2 x i16> %val1
   %sel1 = select <2 x i1> %cond0, <2 x i16> %val1, <2 x i16> %val0


        


More information about the llvm-commits mailing list