[llvm] r367175 - [AMDGPU] Regenerate tests.

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sat Jul 27 07:32:23 PDT 2019


Author: rksimon
Date: Sat Jul 27 07:32:23 2019
New Revision: 367175

URL: http://llvm.org/viewvc/llvm-project?rev=367175&view=rev
Log:
[AMDGPU] Regenerate tests. 

To help show the diffs from an upcoming SimplifyDemandedBits patch.

Modified:
    llvm/trunk/test/CodeGen/AMDGPU/insert_vector_elt.ll
    llvm/trunk/test/CodeGen/AMDGPU/scalar_to_vector.ll
    llvm/trunk/test/CodeGen/AMDGPU/trunc-combine.ll

Modified: llvm/trunk/test/CodeGen/AMDGPU/insert_vector_elt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/insert_vector_elt.ll?rev=367175&r1=367174&r2=367175&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/insert_vector_elt.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/insert_vector_elt.ll Sat Jul 27 07:32:23 2019
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -verify-machineinstrs -mtriple=amdgcn-amd-amdhsa -mcpu=kaveri -mattr=-flat-for-global,+max-private-element-size-16 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI,GCN-NO-TONGA %s
 ; RUN: llc -verify-machineinstrs -mtriple=amdgcn-amd-amdhsa -mcpu=tonga -mattr=-flat-for-global -mattr=+max-private-element-size-16 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI,GCN-TONGA %s
 
@@ -8,246 +9,981 @@
 
 ; FIXME: Why is the constant moved into the intermediate register and
 ; not just directly into the vector component?
-
-; GCN-LABEL: {{^}}insertelement_v4f32_0:
-; GCN: s_load_dwordx4
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
-; GCN-DAG: s_mov_b32 [[CONSTREG:s[0-9]+]], 0x40a00000
-; GCN-DAG: v_mov_b32_e32 v[[LOW_REG:[0-9]+]], [[CONSTREG]]
-; GCN: buffer_store_dwordx4 v{{\[}}[[LOW_REG]]:
 define amdgpu_kernel void @insertelement_v4f32_0(<4 x float> addrspace(1)* %out, <4 x float> %a) nounwind {
+; SI-LABEL: insertelement_v4f32_0:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; SI-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x4
+; SI-NEXT:    s_mov_b32 s8, 0x40a00000
+; SI-NEXT:    s_mov_b32 s3, 0x100f000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    v_mov_b32_e32 v1, s5
+; SI-NEXT:    v_mov_b32_e32 v2, s6
+; SI-NEXT:    v_mov_b32_e32 v3, s7
+; SI-NEXT:    v_mov_b32_e32 v0, s8
+; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: insertelement_v4f32_0:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; VI-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x10
+; VI-NEXT:    s_mov_b32 s8, 0x40a00000
+; VI-NEXT:    s_mov_b32 s3, 0x1100f000
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    v_mov_b32_e32 v3, s7
+; VI-NEXT:    v_mov_b32_e32 v0, s8
+; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; VI-NEXT:    s_endpgm
   %vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 0
   store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16
   ret void
 }
 
-; GCN-LABEL: {{^}}insertelement_v4f32_1:
 define amdgpu_kernel void @insertelement_v4f32_1(<4 x float> addrspace(1)* %out, <4 x float> %a) nounwind {
+; SI-LABEL: insertelement_v4f32_1:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; SI-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x4
+; SI-NEXT:    s_mov_b32 s8, 0x40a00000
+; SI-NEXT:    s_mov_b32 s3, 0x100f000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    v_mov_b32_e32 v1, s5
+; SI-NEXT:    v_mov_b32_e32 v2, s6
+; SI-NEXT:    v_mov_b32_e32 v3, s7
+; SI-NEXT:    v_mov_b32_e32 v1, s8
+; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: insertelement_v4f32_1:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; VI-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x10
+; VI-NEXT:    s_mov_b32 s8, 0x40a00000
+; VI-NEXT:    s_mov_b32 s3, 0x1100f000
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    v_mov_b32_e32 v3, s7
+; VI-NEXT:    v_mov_b32_e32 v1, s8
+; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; VI-NEXT:    s_endpgm
   %vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 1
   store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16
   ret void
 }
 
-; GCN-LABEL: {{^}}insertelement_v4f32_2:
 define amdgpu_kernel void @insertelement_v4f32_2(<4 x float> addrspace(1)* %out, <4 x float> %a) nounwind {
+; SI-LABEL: insertelement_v4f32_2:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; SI-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x4
+; SI-NEXT:    s_mov_b32 s8, 0x40a00000
+; SI-NEXT:    s_mov_b32 s3, 0x100f000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    v_mov_b32_e32 v2, s6
+; SI-NEXT:    v_mov_b32_e32 v1, s5
+; SI-NEXT:    v_mov_b32_e32 v3, s7
+; SI-NEXT:    v_mov_b32_e32 v2, s8
+; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: insertelement_v4f32_2:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; VI-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x10
+; VI-NEXT:    s_mov_b32 s8, 0x40a00000
+; VI-NEXT:    s_mov_b32 s3, 0x1100f000
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    v_mov_b32_e32 v3, s7
+; VI-NEXT:    v_mov_b32_e32 v2, s8
+; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; VI-NEXT:    s_endpgm
   %vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 2
   store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16
   ret void
 }
 
-; GCN-LABEL: {{^}}insertelement_v4f32_3:
 define amdgpu_kernel void @insertelement_v4f32_3(<4 x float> addrspace(1)* %out, <4 x float> %a) nounwind {
+; SI-LABEL: insertelement_v4f32_3:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; SI-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x4
+; SI-NEXT:    s_mov_b32 s8, 0x40a00000
+; SI-NEXT:    s_mov_b32 s3, 0x100f000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    v_mov_b32_e32 v3, s7
+; SI-NEXT:    v_mov_b32_e32 v1, s5
+; SI-NEXT:    v_mov_b32_e32 v2, s6
+; SI-NEXT:    v_mov_b32_e32 v3, s8
+; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: insertelement_v4f32_3:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; VI-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x10
+; VI-NEXT:    s_mov_b32 s8, 0x40a00000
+; VI-NEXT:    s_mov_b32 s3, 0x1100f000
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v3, s7
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    v_mov_b32_e32 v3, s8
+; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; VI-NEXT:    s_endpgm
   %vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 3
   store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16
   ret void
 }
 
-; GCN-LABEL: {{^}}insertelement_v4i32_0:
 define amdgpu_kernel void @insertelement_v4i32_0(<4 x i32> addrspace(1)* %out, <4 x i32> %a) nounwind {
+; SI-LABEL: insertelement_v4i32_0:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; SI-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x4
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_movk_i32 s4, 0x3e7
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    s_mov_b32 s3, 0x100f000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    v_mov_b32_e32 v1, s5
+; SI-NEXT:    v_mov_b32_e32 v2, s6
+; SI-NEXT:    v_mov_b32_e32 v3, s7
+; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: insertelement_v4i32_0:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; VI-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x10
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_movk_i32 s4, 0x3e7
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    s_mov_b32 s3, 0x1100f000
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    v_mov_b32_e32 v3, s7
+; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; VI-NEXT:    s_endpgm
   %vecins = insertelement <4 x i32> %a, i32 999, i32 0
   store <4 x i32> %vecins, <4 x i32> addrspace(1)* %out, align 16
   ret void
 }
 
-; GCN-LABEL: {{^}}insertelement_v3f32_1:
 define amdgpu_kernel void @insertelement_v3f32_1(<3 x float> addrspace(1)* %out, <3 x float> %a) nounwind {
+; SI-LABEL: insertelement_v3f32_1:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; SI-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x4
+; SI-NEXT:    s_mov_b32 s3, 0x100f000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    v_mov_b32_e32 v1, 0x40a00000
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s6
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    buffer_store_dwordx3 v[0:2], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: insertelement_v3f32_1:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; VI-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x10
+; VI-NEXT:    s_mov_b32 s3, 0x1100f000
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    v_mov_b32_e32 v1, 0x40a00000
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    buffer_store_dwordx3 v[0:2], off, s[0:3], 0
+; VI-NEXT:    s_endpgm
   %vecins = insertelement <3 x float> %a, float 5.000000e+00, i32 1
   store <3 x float> %vecins, <3 x float> addrspace(1)* %out, align 16
   ret void
 }
 
-; GCN-LABEL: {{^}}insertelement_v3f32_2:
 define amdgpu_kernel void @insertelement_v3f32_2(<3 x float> addrspace(1)* %out, <3 x float> %a) nounwind {
+; SI-LABEL: insertelement_v3f32_2:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; SI-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x4
+; SI-NEXT:    s_mov_b32 s3, 0x100f000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    v_mov_b32_e32 v2, 0x40a00000
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s5
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    buffer_store_dwordx3 v[0:2], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: insertelement_v3f32_2:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; VI-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x10
+; VI-NEXT:    s_mov_b32 s3, 0x1100f000
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    v_mov_b32_e32 v2, 0x40a00000
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    buffer_store_dwordx3 v[0:2], off, s[0:3], 0
+; VI-NEXT:    s_endpgm
   %vecins = insertelement <3 x float> %a, float 5.000000e+00, i32 2
   store <3 x float> %vecins, <3 x float> addrspace(1)* %out, align 16
   ret void
 }
 
-; GCN-LABEL: {{^}}insertelement_v3f32_3:
 define amdgpu_kernel void @insertelement_v3f32_3(<3 x float> addrspace(1)* %out, <3 x float> %a) nounwind {
+; GCN-LABEL: insertelement_v3f32_3:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_endpgm
   %vecins = insertelement <3 x float> %a, float 5.000000e+00, i32 3
   store <3 x float> %vecins, <3 x float> addrspace(1)* %out, align 16
   ret void
 }
 
-; GCN-LABEL: {{^}}insertelement_to_sgpr:
-; GCN-NOT: v_readfirstlane
 define <4 x float> @insertelement_to_sgpr() nounwind {
+; GCN-LABEL: insertelement_to_sgpr:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    s_load_dwordx4 s[12:15], s[4:5], 0x0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_mov_b32 s12, 0
+; GCN-NEXT:    s_mov_b32 s4, s12
+; GCN-NEXT:    s_mov_b32 s5, s12
+; GCN-NEXT:    s_mov_b32 s6, s12
+; GCN-NEXT:    s_mov_b32 s7, s12
+; GCN-NEXT:    s_mov_b32 s8, s12
+; GCN-NEXT:    s_mov_b32 s9, s12
+; GCN-NEXT:    s_mov_b32 s10, s12
+; GCN-NEXT:    s_mov_b32 s11, s12
+; GCN-NEXT:    image_gather4_lz v[0:3], v[0:1], s[4:11], s[12:15] dmask:0x1
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %tmp = load <4 x i32>, <4 x i32> addrspace(4)* undef
   %tmp1 = insertelement <4 x i32> %tmp, i32 0, i32 0
   %tmp2 = call <4 x float> @llvm.amdgcn.image.gather4.lz.2d.v4f32.f32(i32 1, float undef, float undef, <8 x i32> undef, <4 x i32> %tmp1, i1 0, i32 0, i32 0)
   ret <4 x float> %tmp2
 }
 
-; GCN-LABEL: {{^}}dynamic_insertelement_v2f32:
-; GCN-DAG: v_mov_b32_e32 [[CONST:v[0-9]+]], 0x40a00000
-; GCN-DAG: v_cmp_ne_u32_e64 [[CC2:[^,]+]], [[IDX:s[0-9]+]], 1
-; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, [[CONST]], v{{[0-9]+}}, [[CC2]]
-; GCN-DAG: v_cmp_ne_u32_e64 [[CC1:[^,]+]], [[IDX]], 0
-; GCN-DAG: v_cndmask_b32_e32 v[[LOW_RESULT_REG:[0-9]+]], [[CONST]], v{{[0-9]+}}, [[CC1]]
-; GCN: buffer_store_dwordx2 {{v\[}}[[LOW_RESULT_REG]]:
 define amdgpu_kernel void @dynamic_insertelement_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, i32 %b) nounwind {
+; SI-LABEL: dynamic_insertelement_v2f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; SI-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x2
+; SI-NEXT:    s_load_dword s4, s[4:5], 0x4
+; SI-NEXT:    v_mov_b32_e32 v2, 0x40a00000
+; SI-NEXT:    s_mov_b32 s3, 0x100f000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s7
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 1
+; SI-NEXT:    v_cndmask_b32_e32 v1, v2, v0, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s6
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 0
+; SI-NEXT:    v_cndmask_b32_e32 v0, v2, v0, vcc
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: dynamic_insertelement_v2f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; VI-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x8
+; VI-NEXT:    s_load_dword s4, s[4:5], 0x10
+; VI-NEXT:    v_mov_b32_e32 v2, 0x40a00000
+; VI-NEXT:    s_mov_b32 s3, 0x1100f000
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s7
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 1
+; VI-NEXT:    v_cndmask_b32_e32 v1, v2, v0, vcc
+; VI-NEXT:    v_mov_b32_e32 v0, s6
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 0
+; VI-NEXT:    v_cndmask_b32_e32 v0, v2, v0, vcc
+; VI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; VI-NEXT:    s_endpgm
   %vecins = insertelement <2 x float> %a, float 5.000000e+00, i32 %b
   store <2 x float> %vecins, <2 x float> addrspace(1)* %out, align 8
   ret void
 }
 
-; GCN-LABEL: {{^}}dynamic_insertelement_v3f32:
-; GCN-DAG: v_mov_b32_e32 [[CONST:v[0-9]+]], 0x40a00000
-; GCN-DAG: v_cmp_ne_u32_e64 [[CC3:[^,]+]], [[IDX:s[0-9]+]], 2
-; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, [[CONST]], v{{[0-9]+}}, [[CC3]]
-; GCN-DAG: v_cmp_ne_u32_e64 [[CC2:[^,]+]], [[IDX]], 1
-; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, [[CONST]], v{{[0-9]+}}, [[CC2]]
-; GCN-DAG: v_cmp_ne_u32_e64 [[CC1:[^,]+]], [[IDX]], 0
-; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, [[CONST]], v{{[0-9]+}}, [[CC1]]
-; GCN-DAG: buffer_store_dwordx3 v
 define amdgpu_kernel void @dynamic_insertelement_v3f32(<3 x float> addrspace(1)* %out, <3 x float> %a, i32 %b) nounwind {
+; SI-LABEL: dynamic_insertelement_v3f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; SI-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x4
+; SI-NEXT:    s_load_dword s4, s[4:5], 0x8
+; SI-NEXT:    v_mov_b32_e32 v3, 0x40a00000
+; SI-NEXT:    s_mov_b32 s3, 0x100f000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s10
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 2
+; SI-NEXT:    v_cndmask_b32_e32 v2, v3, v2, vcc
+; SI-NEXT:    v_mov_b32_e32 v1, s9
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 1
+; SI-NEXT:    v_cndmask_b32_e32 v1, v3, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s8
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 0
+; SI-NEXT:    v_cndmask_b32_e32 v0, v3, v0, vcc
+; SI-NEXT:    buffer_store_dwordx3 v[0:2], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: dynamic_insertelement_v3f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; VI-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x10
+; VI-NEXT:    s_load_dword s4, s[4:5], 0x20
+; VI-NEXT:    v_mov_b32_e32 v3, 0x40a00000
+; VI-NEXT:    s_mov_b32 s3, 0x1100f000
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s10
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 2
+; VI-NEXT:    v_cndmask_b32_e32 v2, v3, v2, vcc
+; VI-NEXT:    v_mov_b32_e32 v1, s9
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 1
+; VI-NEXT:    v_cndmask_b32_e32 v1, v3, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v0, s8
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 0
+; VI-NEXT:    v_cndmask_b32_e32 v0, v3, v0, vcc
+; VI-NEXT:    buffer_store_dwordx3 v[0:2], off, s[0:3], 0
+; VI-NEXT:    s_endpgm
   %vecins = insertelement <3 x float> %a, float 5.000000e+00, i32 %b
   store <3 x float> %vecins, <3 x float> addrspace(1)* %out, align 16
   ret void
 }
 
-; GCN-LABEL: {{^}}dynamic_insertelement_v4f32:
-; GCN-DAG: v_mov_b32_e32 [[CONST:v[0-9]+]], 0x40a00000
-; GCN-DAG: v_cmp_ne_u32_e64 [[CC4:[^,]+]], [[IDX:s[0-9]+]], 3
-; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, [[CONST]], v{{[0-9]+}}, [[CC4]]
-; GCN-DAG: v_cmp_ne_u32_e64 [[CC3:[^,]+]], [[IDX]], 2
-; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, [[CONST]], v{{[0-9]+}}, [[CC3]]
-; GCN-DAG: v_cmp_ne_u32_e64 [[CC2:[^,]+]], [[IDX]], 1
-; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, [[CONST]], v{{[0-9]+}}, [[CC2]]
-; GCN-DAG: v_cmp_ne_u32_e64 [[CC1:[^,]+]], [[IDX]], 0
-; GCN-DAG: v_cndmask_b32_e32 v[[LOW_RESULT_REG:[0-9]+]], [[CONST]], v{{[0-9]+}}, [[CC1]]
-; GCN: buffer_store_dwordx4 {{v\[}}[[LOW_RESULT_REG]]:
 define amdgpu_kernel void @dynamic_insertelement_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %a, i32 %b) nounwind {
+; SI-LABEL: dynamic_insertelement_v4f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; SI-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x4
+; SI-NEXT:    s_load_dword s4, s[4:5], 0x8
+; SI-NEXT:    v_mov_b32_e32 v4, 0x40a00000
+; SI-NEXT:    s_mov_b32 s3, 0x100f000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s11
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 3
+; SI-NEXT:    v_cndmask_b32_e32 v3, v4, v0, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s10
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 2
+; SI-NEXT:    v_cndmask_b32_e32 v2, v4, v0, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s9
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 1
+; SI-NEXT:    v_cndmask_b32_e32 v1, v4, v0, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s8
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 0
+; SI-NEXT:    v_cndmask_b32_e32 v0, v4, v0, vcc
+; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: dynamic_insertelement_v4f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; VI-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x10
+; VI-NEXT:    s_load_dword s4, s[4:5], 0x20
+; VI-NEXT:    v_mov_b32_e32 v4, 0x40a00000
+; VI-NEXT:    s_mov_b32 s3, 0x1100f000
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s11
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 3
+; VI-NEXT:    v_cndmask_b32_e32 v3, v4, v0, vcc
+; VI-NEXT:    v_mov_b32_e32 v0, s10
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 2
+; VI-NEXT:    v_cndmask_b32_e32 v2, v4, v0, vcc
+; VI-NEXT:    v_mov_b32_e32 v0, s9
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 1
+; VI-NEXT:    v_cndmask_b32_e32 v1, v4, v0, vcc
+; VI-NEXT:    v_mov_b32_e32 v0, s8
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 0
+; VI-NEXT:    v_cndmask_b32_e32 v0, v4, v0, vcc
+; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; VI-NEXT:    s_endpgm
   %vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 %b
   store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16
   ret void
 }
 
-; GCN-LABEL: {{^}}dynamic_insertelement_v8f32:
-; GCN-DAG: v_mov_b32_e32 [[CONST:v[0-9]+]], 0x40a00000
-; GCN-DAG: v_cmp_ne_u32_e64 [[CCL:[^,]+]], [[IDX:s[0-9]+]], 7
-; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, [[CONST]], v{{[0-9]+}}, [[CCL]]
-; GCN-DAG: v_cmp_ne_u32_e64 [[CC1:[^,]+]], [[IDX]], 0
-; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, [[CONST]], v{{[0-9]+}}, [[CC1]]
-; GCN: buffer_store_dwordx4
-; GCN: buffer_store_dwordx4
 define amdgpu_kernel void @dynamic_insertelement_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %a, i32 %b) nounwind {
+; SI-LABEL: dynamic_insertelement_v8f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; SI-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x8
+; SI-NEXT:    s_load_dword s4, s[4:5], 0x10
+; SI-NEXT:    v_mov_b32_e32 v4, 0x40a00000
+; SI-NEXT:    s_mov_b32 s3, 0x100f000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s11
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 3
+; SI-NEXT:    v_cndmask_b32_e32 v3, v4, v0, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s10
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 2
+; SI-NEXT:    v_cndmask_b32_e32 v2, v4, v0, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s9
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 1
+; SI-NEXT:    v_cndmask_b32_e32 v1, v4, v0, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s8
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 0
+; SI-NEXT:    v_cndmask_b32_e32 v0, v4, v0, vcc
+; SI-NEXT:    v_mov_b32_e32 v5, s15
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 7
+; SI-NEXT:    v_cndmask_b32_e32 v7, v4, v5, vcc
+; SI-NEXT:    v_mov_b32_e32 v5, s14
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 6
+; SI-NEXT:    v_cndmask_b32_e32 v6, v4, v5, vcc
+; SI-NEXT:    v_mov_b32_e32 v5, s13
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 5
+; SI-NEXT:    v_cndmask_b32_e32 v5, v4, v5, vcc
+; SI-NEXT:    v_mov_b32_e32 v8, s12
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 4
+; SI-NEXT:    v_cndmask_b32_e32 v4, v4, v8, vcc
+; SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
+; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: dynamic_insertelement_v8f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; VI-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x20
+; VI-NEXT:    s_load_dword s4, s[4:5], 0x40
+; VI-NEXT:    v_mov_b32_e32 v4, 0x40a00000
+; VI-NEXT:    s_mov_b32 s3, 0x1100f000
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s11
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 3
+; VI-NEXT:    v_cndmask_b32_e32 v3, v4, v0, vcc
+; VI-NEXT:    v_mov_b32_e32 v0, s10
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 2
+; VI-NEXT:    v_cndmask_b32_e32 v2, v4, v0, vcc
+; VI-NEXT:    v_mov_b32_e32 v0, s9
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 1
+; VI-NEXT:    v_cndmask_b32_e32 v1, v4, v0, vcc
+; VI-NEXT:    v_mov_b32_e32 v0, s8
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 0
+; VI-NEXT:    v_cndmask_b32_e32 v0, v4, v0, vcc
+; VI-NEXT:    v_mov_b32_e32 v5, s15
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 7
+; VI-NEXT:    v_cndmask_b32_e32 v7, v4, v5, vcc
+; VI-NEXT:    v_mov_b32_e32 v5, s14
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 6
+; VI-NEXT:    v_cndmask_b32_e32 v6, v4, v5, vcc
+; VI-NEXT:    v_mov_b32_e32 v5, s13
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 5
+; VI-NEXT:    v_cndmask_b32_e32 v5, v4, v5, vcc
+; VI-NEXT:    v_mov_b32_e32 v8, s12
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 4
+; VI-NEXT:    v_cndmask_b32_e32 v4, v4, v8, vcc
+; VI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
+; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; VI-NEXT:    s_endpgm
   %vecins = insertelement <8 x float> %a, float 5.000000e+00, i32 %b
   store <8 x float> %vecins, <8 x float> addrspace(1)* %out, align 32
   ret void
 }
 
-; GCN-LABEL: {{^}}dynamic_insertelement_v16f32:
-; GCN: v_movreld_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}
-; GCN: buffer_store_dwordx4
-; GCN: buffer_store_dwordx4
-; GCN: buffer_store_dwordx4
-; GCN: buffer_store_dwordx4
 define amdgpu_kernel void @dynamic_insertelement_v16f32(<16 x float> addrspace(1)* %out, <16 x float> %a, i32 %b) nounwind {
+; SI-LABEL: dynamic_insertelement_v16f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; SI-NEXT:    s_load_dwordx16 s[8:23], s[4:5], 0x10
+; SI-NEXT:    s_load_dword s4, s[4:5], 0x20
+; SI-NEXT:    s_mov_b32 s3, 0x100f000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    v_mov_b32_e32 v16, 0x40a00000
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s8
+; SI-NEXT:    v_mov_b32_e32 v1, s9
+; SI-NEXT:    v_mov_b32_e32 v2, s10
+; SI-NEXT:    v_mov_b32_e32 v3, s11
+; SI-NEXT:    v_mov_b32_e32 v4, s12
+; SI-NEXT:    v_mov_b32_e32 v5, s13
+; SI-NEXT:    v_mov_b32_e32 v6, s14
+; SI-NEXT:    v_mov_b32_e32 v7, s15
+; SI-NEXT:    v_mov_b32_e32 v8, s16
+; SI-NEXT:    v_mov_b32_e32 v9, s17
+; SI-NEXT:    v_mov_b32_e32 v10, s18
+; SI-NEXT:    v_mov_b32_e32 v11, s19
+; SI-NEXT:    v_mov_b32_e32 v12, s20
+; SI-NEXT:    v_mov_b32_e32 v13, s21
+; SI-NEXT:    v_mov_b32_e32 v14, s22
+; SI-NEXT:    v_mov_b32_e32 v15, s23
+; SI-NEXT:    s_mov_b32 m0, s4
+; SI-NEXT:    v_movreld_b32_e32 v0, v16
+; SI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:48
+; SI-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:32
+; SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
+; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: dynamic_insertelement_v16f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; VI-NEXT:    s_load_dwordx16 s[8:23], s[4:5], 0x40
+; VI-NEXT:    s_load_dword s4, s[4:5], 0x80
+; VI-NEXT:    s_mov_b32 s3, 0x1100f000
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    v_mov_b32_e32 v16, 0x40a00000
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s8
+; VI-NEXT:    v_mov_b32_e32 v1, s9
+; VI-NEXT:    v_mov_b32_e32 v2, s10
+; VI-NEXT:    v_mov_b32_e32 v3, s11
+; VI-NEXT:    v_mov_b32_e32 v4, s12
+; VI-NEXT:    v_mov_b32_e32 v5, s13
+; VI-NEXT:    v_mov_b32_e32 v6, s14
+; VI-NEXT:    v_mov_b32_e32 v7, s15
+; VI-NEXT:    v_mov_b32_e32 v8, s16
+; VI-NEXT:    v_mov_b32_e32 v9, s17
+; VI-NEXT:    v_mov_b32_e32 v10, s18
+; VI-NEXT:    v_mov_b32_e32 v11, s19
+; VI-NEXT:    v_mov_b32_e32 v12, s20
+; VI-NEXT:    v_mov_b32_e32 v13, s21
+; VI-NEXT:    v_mov_b32_e32 v14, s22
+; VI-NEXT:    v_mov_b32_e32 v15, s23
+; VI-NEXT:    s_mov_b32 m0, s4
+; VI-NEXT:    v_movreld_b32_e32 v0, v16
+; VI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:48
+; VI-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:32
+; VI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
+; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; VI-NEXT:    s_endpgm
   %vecins = insertelement <16 x float> %a, float 5.000000e+00, i32 %b
   store <16 x float> %vecins, <16 x float> addrspace(1)* %out, align 64
   ret void
 }
 
-; GCN-LABEL: {{^}}dynamic_insertelement_v2i32:
-; GCN-DAG: v_cmp_ne_u32_e64 [[CC2:[^,]+]], [[IDX:s[0-9]+]], 1
-; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}}, [[CC2]]
-; GCN-DAG: v_cmp_ne_u32_e64 [[CC1:[^,]+]], [[IDX]], 0
-; GCN-DAG: v_cndmask_b32_e32 v[[LOW_RESULT_REG:[0-9]+]], 5, v{{[0-9]+}}, [[CC1]]
-; GCN: buffer_store_dwordx2 {{v\[}}[[LOW_RESULT_REG]]:
 define amdgpu_kernel void @dynamic_insertelement_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, i32 %b) nounwind {
+; SI-LABEL: dynamic_insertelement_v2i32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; SI-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x2
+; SI-NEXT:    s_load_dword s4, s[4:5], 0x4
+; SI-NEXT:    s_mov_b32 s3, 0x100f000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s7
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 1
+; SI-NEXT:    v_cndmask_b32_e32 v1, 5, v0, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s6
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 0
+; SI-NEXT:    v_cndmask_b32_e32 v0, 5, v0, vcc
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: dynamic_insertelement_v2i32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; VI-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x8
+; VI-NEXT:    s_load_dword s4, s[4:5], 0x10
+; VI-NEXT:    s_mov_b32 s3, 0x1100f000
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s7
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 1
+; VI-NEXT:    v_cndmask_b32_e32 v1, 5, v0, vcc
+; VI-NEXT:    v_mov_b32_e32 v0, s6
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 0
+; VI-NEXT:    v_cndmask_b32_e32 v0, 5, v0, vcc
+; VI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; VI-NEXT:    s_endpgm
   %vecins = insertelement <2 x i32> %a, i32 5, i32 %b
   store <2 x i32> %vecins, <2 x i32> addrspace(1)* %out, align 8
   ret void
 }
 
-; GCN-LABEL: {{^}}dynamic_insertelement_v3i32:
-; GCN-DAG: v_cmp_ne_u32_e64 [[CC3:[^,]+]], [[IDX:s[0-9]+]], 2
-; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}}, [[CC3]]
-; GCN-DAG: v_cmp_ne_u32_e64 [[CC2:[^,]+]], [[IDX]], 1
-; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}}, [[CC2]]
-; GCN-DAG: v_cmp_ne_u32_e64 [[CC1:[^,]+]], [[IDX]], 0
-; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}}, [[CC1]]
-; GCN-DAG: buffer_store_dwordx3 v
 define amdgpu_kernel void @dynamic_insertelement_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> %a, i32 %b) nounwind {
+; SI-LABEL: dynamic_insertelement_v3i32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; SI-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x4
+; SI-NEXT:    s_load_dword s4, s[4:5], 0x8
+; SI-NEXT:    s_mov_b32 s3, 0x100f000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s10
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 2
+; SI-NEXT:    v_cndmask_b32_e32 v2, 5, v0, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s9
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 1
+; SI-NEXT:    v_cndmask_b32_e32 v1, 5, v0, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s8
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 0
+; SI-NEXT:    v_cndmask_b32_e32 v0, 5, v0, vcc
+; SI-NEXT:    buffer_store_dwordx3 v[0:2], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: dynamic_insertelement_v3i32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; VI-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x10
+; VI-NEXT:    s_load_dword s4, s[4:5], 0x20
+; VI-NEXT:    s_mov_b32 s3, 0x1100f000
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s10
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 2
+; VI-NEXT:    v_cndmask_b32_e32 v2, 5, v0, vcc
+; VI-NEXT:    v_mov_b32_e32 v0, s9
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 1
+; VI-NEXT:    v_cndmask_b32_e32 v1, 5, v0, vcc
+; VI-NEXT:    v_mov_b32_e32 v0, s8
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 0
+; VI-NEXT:    v_cndmask_b32_e32 v0, 5, v0, vcc
+; VI-NEXT:    buffer_store_dwordx3 v[0:2], off, s[0:3], 0
+; VI-NEXT:    s_endpgm
   %vecins = insertelement <3 x i32> %a, i32 5, i32 %b
   store <3 x i32> %vecins, <3 x i32> addrspace(1)* %out, align 16
   ret void
 }
 
-; GCN-LABEL: {{^}}dynamic_insertelement_v4i32:
-; GCN: s_load_dword [[SVAL:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, {{0x11|0x44}}
-; GCN-DAG: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[SVAL]]
-; GCN-DAG: v_cmp_eq_u32_e64 [[CC4:[^,]+]], [[IDX:s[0-9]+]], 3
-; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, [[VVAL]], [[CC4]]
-; GCN-DAG: v_cmp_eq_u32_e64 [[CC3:[^,]+]], [[IDX]], 2
-; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}},  v{{[0-9]+}}, [[VVAL]], [[CC3]]
-; GCN-DAG: v_cmp_eq_u32_e64 [[CC2:[^,]+]], [[IDX]], 1
-; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, [[VVAL]], [[CC2]]
-; GCN-DAG: v_cmp_eq_u32_e64 [[CC1:[^,]+]], [[IDX]], 0
-; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, [[VVAL]], [[CC1]]
-; GCN: buffer_store_dwordx4
 define amdgpu_kernel void @dynamic_insertelement_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %a, i32 %b, [8 x i32], i32 %val) nounwind {
+; SI-LABEL: dynamic_insertelement_v4i32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; SI-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x4
+; SI-NEXT:    s_load_dword s6, s[4:5], 0x8
+; SI-NEXT:    s_load_dword s4, s[4:5], 0x11
+; SI-NEXT:    s_mov_b32 s3, 0x100f000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s11
+; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s6, 3
+; SI-NEXT:    v_mov_b32_e32 v4, s4
+; SI-NEXT:    v_cndmask_b32_e32 v3, v0, v4, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s10
+; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s6, 2
+; SI-NEXT:    v_cndmask_b32_e32 v2, v0, v4, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s9
+; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s6, 1
+; SI-NEXT:    v_cndmask_b32_e32 v1, v0, v4, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s8
+; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s6, 0
+; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc
+; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: dynamic_insertelement_v4i32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; VI-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x10
+; VI-NEXT:    s_load_dword s6, s[4:5], 0x20
+; VI-NEXT:    s_load_dword s4, s[4:5], 0x44
+; VI-NEXT:    s_mov_b32 s3, 0x1100f000
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s11
+; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s6, 3
+; VI-NEXT:    v_mov_b32_e32 v4, s4
+; VI-NEXT:    v_cndmask_b32_e32 v3, v0, v4, vcc
+; VI-NEXT:    v_mov_b32_e32 v0, s10
+; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s6, 2
+; VI-NEXT:    v_cndmask_b32_e32 v2, v0, v4, vcc
+; VI-NEXT:    v_mov_b32_e32 v0, s9
+; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s6, 1
+; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v4, vcc
+; VI-NEXT:    v_mov_b32_e32 v0, s8
+; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s6, 0
+; VI-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc
+; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; VI-NEXT:    s_endpgm
   %vecins = insertelement <4 x i32> %a, i32 %val, i32 %b
   store <4 x i32> %vecins, <4 x i32> addrspace(1)* %out, align 16
   ret void
 }
 
-; GCN-LABEL: {{^}}dynamic_insertelement_v8i32:
-; GCN-DAG: v_cmp_ne_u32_e64 [[CCL:[^,]+]], [[IDX:s[0-9]+]], 7
-; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}}, [[CCL]]
-; GCN-DAG: v_cmp_ne_u32_e64 [[CC1:[^,]+]], [[IDX]], 0
-; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}}, [[CC1]]
-; GCN: buffer_store_dwordx4
-; GCN: buffer_store_dwordx4
 define amdgpu_kernel void @dynamic_insertelement_v8i32(<8 x i32> addrspace(1)* %out, <8 x i32> %a, i32 %b) nounwind {
+; SI-LABEL: dynamic_insertelement_v8i32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; SI-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x8
+; SI-NEXT:    s_load_dword s4, s[4:5], 0x10
+; SI-NEXT:    s_mov_b32 s3, 0x100f000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s11
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 3
+; SI-NEXT:    v_cndmask_b32_e32 v3, 5, v0, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s10
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 2
+; SI-NEXT:    v_cndmask_b32_e32 v2, 5, v0, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s9
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 1
+; SI-NEXT:    v_cndmask_b32_e32 v1, 5, v0, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s8
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 0
+; SI-NEXT:    v_cndmask_b32_e32 v0, 5, v0, vcc
+; SI-NEXT:    v_mov_b32_e32 v4, s15
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 7
+; SI-NEXT:    v_cndmask_b32_e32 v7, 5, v4, vcc
+; SI-NEXT:    v_mov_b32_e32 v4, s14
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 6
+; SI-NEXT:    v_cndmask_b32_e32 v6, 5, v4, vcc
+; SI-NEXT:    v_mov_b32_e32 v4, s13
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 5
+; SI-NEXT:    v_cndmask_b32_e32 v5, 5, v4, vcc
+; SI-NEXT:    v_mov_b32_e32 v4, s12
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 4
+; SI-NEXT:    v_cndmask_b32_e32 v4, 5, v4, vcc
+; SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
+; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: dynamic_insertelement_v8i32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; VI-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x20
+; VI-NEXT:    s_load_dword s4, s[4:5], 0x40
+; VI-NEXT:    s_mov_b32 s3, 0x1100f000
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s11
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 3
+; VI-NEXT:    v_cndmask_b32_e32 v3, 5, v0, vcc
+; VI-NEXT:    v_mov_b32_e32 v0, s10
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 2
+; VI-NEXT:    v_cndmask_b32_e32 v2, 5, v0, vcc
+; VI-NEXT:    v_mov_b32_e32 v0, s9
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 1
+; VI-NEXT:    v_cndmask_b32_e32 v1, 5, v0, vcc
+; VI-NEXT:    v_mov_b32_e32 v0, s8
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 0
+; VI-NEXT:    v_cndmask_b32_e32 v0, 5, v0, vcc
+; VI-NEXT:    v_mov_b32_e32 v4, s15
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 7
+; VI-NEXT:    v_cndmask_b32_e32 v7, 5, v4, vcc
+; VI-NEXT:    v_mov_b32_e32 v4, s14
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 6
+; VI-NEXT:    v_cndmask_b32_e32 v6, 5, v4, vcc
+; VI-NEXT:    v_mov_b32_e32 v4, s13
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 5
+; VI-NEXT:    v_cndmask_b32_e32 v5, 5, v4, vcc
+; VI-NEXT:    v_mov_b32_e32 v4, s12
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 4
+; VI-NEXT:    v_cndmask_b32_e32 v4, 5, v4, vcc
+; VI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
+; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; VI-NEXT:    s_endpgm
   %vecins = insertelement <8 x i32> %a, i32 5, i32 %b
   store <8 x i32> %vecins, <8 x i32> addrspace(1)* %out, align 32
   ret void
 }
 
-; GCN-LABEL: {{^}}dynamic_insertelement_v16i32:
-; GCN: v_movreld_b32
-; GCN: buffer_store_dwordx4
-; GCN: buffer_store_dwordx4
-; GCN: buffer_store_dwordx4
-; GCN: buffer_store_dwordx4
 define amdgpu_kernel void @dynamic_insertelement_v16i32(<16 x i32> addrspace(1)* %out, <16 x i32> %a, i32 %b) nounwind {
+; SI-LABEL: dynamic_insertelement_v16i32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; SI-NEXT:    s_load_dwordx16 s[8:23], s[4:5], 0x10
+; SI-NEXT:    s_load_dword s4, s[4:5], 0x20
+; SI-NEXT:    s_mov_b32 s3, 0x100f000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s8
+; SI-NEXT:    v_mov_b32_e32 v1, s9
+; SI-NEXT:    v_mov_b32_e32 v2, s10
+; SI-NEXT:    v_mov_b32_e32 v3, s11
+; SI-NEXT:    v_mov_b32_e32 v4, s12
+; SI-NEXT:    v_mov_b32_e32 v5, s13
+; SI-NEXT:    v_mov_b32_e32 v6, s14
+; SI-NEXT:    v_mov_b32_e32 v7, s15
+; SI-NEXT:    v_mov_b32_e32 v8, s16
+; SI-NEXT:    v_mov_b32_e32 v9, s17
+; SI-NEXT:    v_mov_b32_e32 v10, s18
+; SI-NEXT:    v_mov_b32_e32 v11, s19
+; SI-NEXT:    v_mov_b32_e32 v12, s20
+; SI-NEXT:    v_mov_b32_e32 v13, s21
+; SI-NEXT:    v_mov_b32_e32 v14, s22
+; SI-NEXT:    v_mov_b32_e32 v15, s23
+; SI-NEXT:    s_mov_b32 m0, s4
+; SI-NEXT:    v_movreld_b32_e32 v0, 5
+; SI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:48
+; SI-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:32
+; SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
+; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: dynamic_insertelement_v16i32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; VI-NEXT:    s_load_dwordx16 s[8:23], s[4:5], 0x40
+; VI-NEXT:    s_load_dword s4, s[4:5], 0x80
+; VI-NEXT:    s_mov_b32 s3, 0x1100f000
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s8
+; VI-NEXT:    v_mov_b32_e32 v1, s9
+; VI-NEXT:    v_mov_b32_e32 v2, s10
+; VI-NEXT:    v_mov_b32_e32 v3, s11
+; VI-NEXT:    v_mov_b32_e32 v4, s12
+; VI-NEXT:    v_mov_b32_e32 v5, s13
+; VI-NEXT:    v_mov_b32_e32 v6, s14
+; VI-NEXT:    v_mov_b32_e32 v7, s15
+; VI-NEXT:    v_mov_b32_e32 v8, s16
+; VI-NEXT:    v_mov_b32_e32 v9, s17
+; VI-NEXT:    v_mov_b32_e32 v10, s18
+; VI-NEXT:    v_mov_b32_e32 v11, s19
+; VI-NEXT:    v_mov_b32_e32 v12, s20
+; VI-NEXT:    v_mov_b32_e32 v13, s21
+; VI-NEXT:    v_mov_b32_e32 v14, s22
+; VI-NEXT:    v_mov_b32_e32 v15, s23
+; VI-NEXT:    s_mov_b32 m0, s4
+; VI-NEXT:    v_movreld_b32_e32 v0, 5
+; VI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:48
+; VI-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:32
+; VI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
+; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; VI-NEXT:    s_endpgm
   %vecins = insertelement <16 x i32> %a, i32 5, i32 %b
   store <16 x i32> %vecins, <16 x i32> addrspace(1)* %out, align 64
   ret void
 }
 
-; GCN-LABEL: {{^}}dynamic_insertelement_v2i16:
 define amdgpu_kernel void @dynamic_insertelement_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> %a, i32 %b) nounwind {
+; SI-LABEL: dynamic_insertelement_v2i16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; SI-NEXT:    s_load_dword s6, s[4:5], 0x2
+; SI-NEXT:    s_load_dword s4, s[4:5], 0x3
+; SI-NEXT:    v_mov_b32_e32 v0, 0x50005
+; SI-NEXT:    s_mov_b32 s3, 0x100f000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s6
+; SI-NEXT:    s_lshl_b32 s4, s4, 4
+; SI-NEXT:    s_lshl_b32 s4, 0xffff, s4
+; SI-NEXT:    v_bfi_b32 v0, s4, v0, v1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: dynamic_insertelement_v2i16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; VI-NEXT:    s_load_dword s6, s[4:5], 0x8
+; VI-NEXT:    s_load_dword s4, s[4:5], 0xc
+; VI-NEXT:    v_mov_b32_e32 v0, 0x50005
+; VI-NEXT:    s_mov_b32 s3, 0x1100f000
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s6
+; VI-NEXT:    s_lshl_b32 s4, s4, 4
+; VI-NEXT:    s_lshl_b32 s4, 0xffff, s4
+; VI-NEXT:    v_bfi_b32 v0, s4, v0, v1
+; VI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT:    s_endpgm
   %vecins = insertelement <2 x i16> %a, i16 5, i32 %b
   store <2 x i16> %vecins, <2 x i16> addrspace(1)* %out, align 8
   ret void
 }
 
-; GCN-LABEL: {{^}}dynamic_insertelement_v3i16:
 define amdgpu_kernel void @dynamic_insertelement_v3i16(<3 x i16> addrspace(1)* %out, <3 x i16> %a, i32 %b) nounwind {
+; SI-LABEL: dynamic_insertelement_v3i16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; SI-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x2
+; SI-NEXT:    s_load_dword s4, s[4:5], 0x4
+; SI-NEXT:    s_mov_b32 s5, 0
+; SI-NEXT:    s_mov_b32 s3, 0x100f000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_lshl_b32 s8, s4, 4
+; SI-NEXT:    s_mov_b32 s4, 0xffff
+; SI-NEXT:    s_lshl_b64 s[4:5], s[4:5], s8
+; SI-NEXT:    s_mov_b32 s8, 0x50005
+; SI-NEXT:    s_and_b32 s9, s5, s8
+; SI-NEXT:    s_and_b32 s8, s4, s8
+; SI-NEXT:    s_andn2_b64 s[4:5], s[6:7], s[4:5]
+; SI-NEXT:    s_or_b64 s[4:5], s[8:9], s[4:5]
+; SI-NEXT:    v_mov_b32_e32 v0, s5
+; SI-NEXT:    buffer_store_short v0, off, s[0:3], 0 offset:4
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: dynamic_insertelement_v3i16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; VI-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x8
+; VI-NEXT:    s_load_dword s4, s[4:5], 0x10
+; VI-NEXT:    s_mov_b32 s5, 0
+; VI-NEXT:    s_mov_b32 s3, 0x1100f000
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s7
+; VI-NEXT:    s_lshl_b32 s8, s4, 4
+; VI-NEXT:    s_mov_b32 s4, 0xffff
+; VI-NEXT:    s_lshl_b64 s[4:5], s[4:5], s8
+; VI-NEXT:    s_mov_b32 s8, 0x50005
+; VI-NEXT:    v_mov_b32_e32 v0, s8
+; VI-NEXT:    v_bfi_b32 v0, s5, v0, v1
+; VI-NEXT:    v_mov_b32_e32 v1, s8
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    v_bfi_b32 v1, s4, v1, v2
+; VI-NEXT:    buffer_store_short v0, off, s[0:3], 0 offset:4
+; VI-NEXT:    buffer_store_dword v1, off, s[0:3], 0
+; VI-NEXT:    s_endpgm
   %vecins = insertelement <3 x i16> %a, i16 5, i32 %b
   store <3 x i16> %vecins, <3 x i16> addrspace(1)* %out, align 8
   ret void
 }
 
-; GCN-LABEL: {{^}}dynamic_insertelement_v2i8:
-; VI: s_load_dword [[LOAD:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x28
-; VI-NEXT: s_load_dword [[IDX:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x4c
-; VI-NOT: _load
-; VI: s_lshl_b32 [[SCALED_IDX:s[0-9]+]], [[IDX]], 3
-; VI: v_lshlrev_b16_e64 [[MASK:v[0-9]+]], [[SCALED_IDX]], -1
-; VI: v_and_b32_e32 [[INSERT:v[0-9]+]], 0x505, [[MASK]]
-; VI: v_xor_b32_e32 [[NOT_MASK:v[0-9]+]], -1, [[MASK]]
-; VI: v_and_b32_e32 [[AND_NOT_MASK:v[0-9]+]], [[LOAD]], [[NOT_MASK]]
-; VI: v_or_b32_e32 [[OR:v[0-9]+]], [[INSERT]], [[AND_NOT_MASK]]
-; VI: buffer_store_short [[OR]]
 define amdgpu_kernel void @dynamic_insertelement_v2i8(<2 x i8> addrspace(1)* %out, [8 x i32], <2 x i8> %a, [8 x i32], i32 %b) nounwind {
+; SI-LABEL: dynamic_insertelement_v2i8:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; SI-NEXT:    s_load_dword s6, s[4:5], 0xa
+; SI-NEXT:    s_load_dword s4, s[4:5], 0x13
+; SI-NEXT:    v_mov_b32_e32 v0, 0x505
+; SI-NEXT:    s_mov_b32 s3, 0x100f000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s6
+; SI-NEXT:    s_lshl_b32 s4, s4, 3
+; SI-NEXT:    s_lshl_b32 s4, -1, s4
+; SI-NEXT:    v_bfi_b32 v0, s4, v0, v1
+; SI-NEXT:    buffer_store_short v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: dynamic_insertelement_v2i8:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; VI-NEXT:    s_load_dword s6, s[4:5], 0x28
+; VI-NEXT:    s_load_dword s4, s[4:5], 0x4c
+; VI-NEXT:    s_mov_b32 s3, 0x1100f000
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_lshl_b32 s4, s4, 3
+; VI-NEXT:    v_lshlrev_b16_e64 v0, s4, -1
+; VI-NEXT:    v_and_b32_e32 v1, 0x505, v0
+; VI-NEXT:    v_xor_b32_e32 v0, -1, v0
+; VI-NEXT:    v_and_b32_e32 v0, s6, v0
+; VI-NEXT:    v_or_b32_e32 v0, v1, v0
+; VI-NEXT:    buffer_store_short v0, off, s[0:3], 0
+; VI-NEXT:    s_endpgm
   %vecins = insertelement <2 x i8> %a, i8 5, i32 %b
   store <2 x i8> %vecins, <2 x i8> addrspace(1)* %out, align 8
   ret void
@@ -255,83 +991,340 @@ define amdgpu_kernel void @dynamic_inser
 
 ; FIXME: post legalize i16 and i32 shifts aren't merged because of
 ; isTypeDesirableForOp in SimplifyDemandedBits
-
-; GCN-LABEL: {{^}}dynamic_insertelement_v3i8:
-; VI: s_load_dword [[LOAD:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x28
-; VI-NEXT: s_load_dword [[IDX:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x4c
-; VI-NOT: _load
-
-; VI: v_mov_b32_e32 [[VAL:v[0-9]+]], 0x5050505
-; VI: v_mov_b32_e32 [[V_LOAD:v[0-9]+]], [[LOAD]]
-; VI: s_lshl_b32 [[SCALED_IDX:s[0-9]+]], [[IDX]], 3
-; VI: s_lshl_b32 [[SHIFTED_MASK:s[0-9]+]], 0xffff, [[SCALED_IDX]]
-; VI: v_bfi_b32 [[BFI:v[0-9]+]], [[SHIFTED_MASK]], [[VAL]], [[V_LOAD]]
-; VI: v_lshrrev_b32_e32 [[V_HI2:v[0-9]+]], 16, [[BFI]]
-
-; VI: buffer_store_short [[BFI]]
-; VI: buffer_store_byte [[V_HI2]]
 define amdgpu_kernel void @dynamic_insertelement_v3i8(<3 x i8> addrspace(1)* %out, [8 x i32], <3 x i8> %a, [8 x i32], i32 %b) nounwind {
+; SI-LABEL: dynamic_insertelement_v3i8:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; SI-NEXT:    s_load_dword s6, s[4:5], 0xa
+; SI-NEXT:    s_load_dword s4, s[4:5], 0x13
+; SI-NEXT:    v_mov_b32_e32 v0, 0x5050505
+; SI-NEXT:    s_mov_b32 s3, 0x100f000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s6
+; SI-NEXT:    s_lshl_b32 s4, s4, 3
+; SI-NEXT:    s_lshl_b32 s4, 0xffff, s4
+; SI-NEXT:    v_bfi_b32 v0, s4, v0, v1
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
+; SI-NEXT:    buffer_store_short v0, off, s[0:3], 0
+; SI-NEXT:    buffer_store_byte v1, off, s[0:3], 0 offset:2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: dynamic_insertelement_v3i8:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; VI-NEXT:    s_load_dword s6, s[4:5], 0x28
+; VI-NEXT:    s_load_dword s4, s[4:5], 0x4c
+; VI-NEXT:    v_mov_b32_e32 v0, 0x5050505
+; VI-NEXT:    s_mov_b32 s3, 0x1100f000
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s6
+; VI-NEXT:    s_lshl_b32 s4, s4, 3
+; VI-NEXT:    s_lshl_b32 s4, 0xffff, s4
+; VI-NEXT:    v_bfi_b32 v0, s4, v0, v1
+; VI-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
+; VI-NEXT:    buffer_store_short v0, off, s[0:3], 0
+; VI-NEXT:    buffer_store_byte v1, off, s[0:3], 0 offset:2
+; VI-NEXT:    s_endpgm
   %vecins = insertelement <3 x i8> %a, i8 5, i32 %b
   store <3 x i8> %vecins, <3 x i8> addrspace(1)* %out, align 4
   ret void
 }
 
-; GCN-LABEL: {{^}}dynamic_insertelement_v4i8:
-; VI: s_load_dword [[LOAD:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x28
-; VI-NEXT: s_load_dword [[IDX:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x4c
-; VI-NOT: _load
-
-; VI: v_mov_b32_e32 [[VAL:v[0-9]+]], 0x5050505
-; VI: v_mov_b32_e32 [[V_LOAD:v[0-9]+]], [[LOAD]]
-; VI-DAG: s_lshl_b32 [[SCALED_IDX:s[0-9]+]], [[IDX]], 3
-; VI: s_lshl_b32 [[SHIFTED_MASK:s[0-9]+]], 0xffff, [[SCALED_IDX]]
-; VI: v_bfi_b32 [[BFI:v[0-9]+]], [[SHIFTED_MASK]], [[VAL]], [[V_LOAD]]
-; VI: buffer_store_dword [[BFI]]
 define amdgpu_kernel void @dynamic_insertelement_v4i8(<4 x i8> addrspace(1)* %out, [8 x i32], <4 x i8> %a, [8 x i32], i32 %b) nounwind {
+; SI-LABEL: dynamic_insertelement_v4i8:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; SI-NEXT:    s_load_dword s6, s[4:5], 0xa
+; SI-NEXT:    s_load_dword s4, s[4:5], 0x13
+; SI-NEXT:    v_mov_b32_e32 v0, 0x5050505
+; SI-NEXT:    s_mov_b32 s3, 0x100f000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s6
+; SI-NEXT:    s_lshl_b32 s4, s4, 3
+; SI-NEXT:    s_lshl_b32 s4, 0xffff, s4
+; SI-NEXT:    v_bfi_b32 v0, s4, v0, v1
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: dynamic_insertelement_v4i8:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; VI-NEXT:    s_load_dword s6, s[4:5], 0x28
+; VI-NEXT:    s_load_dword s4, s[4:5], 0x4c
+; VI-NEXT:    v_mov_b32_e32 v0, 0x5050505
+; VI-NEXT:    s_mov_b32 s3, 0x1100f000
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s6
+; VI-NEXT:    s_lshl_b32 s4, s4, 3
+; VI-NEXT:    s_lshl_b32 s4, 0xffff, s4
+; VI-NEXT:    v_bfi_b32 v0, s4, v0, v1
+; VI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT:    s_endpgm
   %vecins = insertelement <4 x i8> %a, i8 5, i32 %b
   store <4 x i8> %vecins, <4 x i8> addrspace(1)* %out, align 4
   ret void
 }
 
-; GCN-LABEL: {{^}}s_dynamic_insertelement_v8i8:
-; VI-NOT: {{buffer|flat|global}}_load
-; VI-DAG: s_load_dwordx4 s{{\[[0-9]+:[0-9]+\]}}, s[4:5], 0x0
-; VI-DAG: s_load_dword [[IDX:s[0-9]]], s[4:5], 0x10
-; VI-DAG: s_mov_b32 s[[MASK_HI:[0-9]+]], 0{{$}}
-; VI-DAG: s_load_dwordx2 [[VEC:s\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, 0x0
-
-; VI-DAG: s_lshl_b32 [[SCALED_IDX:s[0-9]+]], [[IDX]], 3
-; VI-DAG: s_mov_b32 s[[MASK_LO:[0-9]+]], 0xffff
-; VI: s_lshl_b64 s{{\[}}[[MASK_SHIFT_LO:[0-9]+]]:[[MASK_SHIFT_HI:[0-9]+]]{{\]}}, s{{\[}}[[MASK_LO]]:[[MASK_HI]]{{\]}}, [[SCALED_IDX]]
-; VI: s_mov_b32 [[VAL:s[0-9]+]], 0x5050505
-; VI: s_and_b32 s[[INS_HI:[0-9]+]], s[[MASK_SHIFT_HI]], [[VAL]]
-; VI: s_and_b32 s[[INS_LO:[0-9]+]], s[[MASK_SHIFT_LO]], [[VAL]]
-; VI: s_andn2_b64 [[AND:s\[[0-9]+:[0-9]+\]]], [[VEC]], s{{\[}}[[MASK_SHIFT_LO]]:[[MASK_SHIFT_HI]]{{\]}}
-; VI: s_or_b64 s{{\[}}[[RESULT0:[0-9]+]]:[[RESULT1:[0-9]+]]{{\]}}, s{{\[}}[[INS_LO]]:[[INS_HI]]{{\]}}, [[AND]]
-; VI: v_mov_b32_e32 v[[V_RESULT0:[0-9]+]], s[[RESULT0]]
-; VI: v_mov_b32_e32 v[[V_RESULT1:[0-9]+]], s[[RESULT1]]
-; VI: buffer_store_dwordx2 v{{\[}}[[V_RESULT0]]:[[V_RESULT1]]{{\]}}
 define amdgpu_kernel void @s_dynamic_insertelement_v8i8(<8 x i8> addrspace(1)* %out, <8 x i8> addrspace(4)* %a.ptr, i32 %b) nounwind {
+; SI-LABEL: s_dynamic_insertelement_v8i8:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x0
+; SI-NEXT:    s_load_dword s6, s[4:5], 0x4
+; SI-NEXT:    s_mov_b32 s7, 0
+; SI-NEXT:    s_mov_b32 s3, 0x100f000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[10:11], 0x0
+; SI-NEXT:    s_mov_b32 s0, s8
+; SI-NEXT:    s_lshl_b32 s8, s6, 3
+; SI-NEXT:    s_mov_b32 s6, 0xffff
+; SI-NEXT:    s_lshl_b64 s[6:7], s[6:7], s8
+; SI-NEXT:    s_mov_b32 s8, 0x5050505
+; SI-NEXT:    s_mov_b32 s1, s9
+; SI-NEXT:    s_and_b32 s9, s7, s8
+; SI-NEXT:    s_and_b32 s8, s6, s8
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_andn2_b64 s[4:5], s[4:5], s[6:7]
+; SI-NEXT:    s_or_b64 s[4:5], s[8:9], s[4:5]
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    v_mov_b32_e32 v1, s5
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_dynamic_insertelement_v8i8:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x0
+; VI-NEXT:    s_load_dword s6, s[4:5], 0x10
+; VI-NEXT:    s_mov_b32 s7, 0
+; VI-NEXT:    s_mov_b32 s3, 0x1100f000
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[10:11], 0x0
+; VI-NEXT:    s_mov_b32 s0, s8
+; VI-NEXT:    s_lshl_b32 s8, s6, 3
+; VI-NEXT:    s_mov_b32 s6, 0xffff
+; VI-NEXT:    s_lshl_b64 s[6:7], s[6:7], s8
+; VI-NEXT:    s_mov_b32 s8, 0x5050505
+; VI-NEXT:    s_mov_b32 s1, s9
+; VI-NEXT:    s_and_b32 s9, s7, s8
+; VI-NEXT:    s_and_b32 s8, s6, s8
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_andn2_b64 s[4:5], s[4:5], s[6:7]
+; VI-NEXT:    s_or_b64 s[4:5], s[8:9], s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; VI-NEXT:    s_endpgm
   %a = load <8 x i8>, <8 x i8> addrspace(4)* %a.ptr, align 4
   %vecins = insertelement <8 x i8> %a, i8 5, i32 %b
   store <8 x i8> %vecins, <8 x i8> addrspace(1)* %out, align 8
   ret void
 }
 
-; GCN-LABEL: {{^}}dynamic_insertelement_v16i8:
-; GCN: s_load_dwordx2
-; GCN: s_load_dwordx4
-; GCN: s_load_dword s
-
-; GCN-NOT: buffer_store_byte
-
-; GCN-DAG: v_cmp_ne_u32_e64 [[CCL:[^,]+]], [[IDX:s[0-9]+]], 15
-; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}}, [[CCL]]
-; GCN-DAG: v_cmp_ne_u32_e64 [[CC1:[^,]+]], [[IDX]], 0
-; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}}, [[CC1]]
-
-; GCN: buffer_store_dwordx4
 define amdgpu_kernel void @dynamic_insertelement_v16i8(<16 x i8> addrspace(1)* %out, <16 x i8> %a, i32 %b) nounwind {
+; SI-LABEL: dynamic_insertelement_v16i8:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; SI-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x4
+; SI-NEXT:    s_load_dword s4, s[4:5], 0x8
+; SI-NEXT:    s_mov_b32 s3, 0x100f000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_lshr_b32 s5, s11, 24
+; SI-NEXT:    v_mov_b32_e32 v0, s5
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 15
+; SI-NEXT:    s_lshr_b32 s5, s11, 16
+; SI-NEXT:    v_cndmask_b32_e32 v0, 5, v0, vcc
+; SI-NEXT:    v_mov_b32_e32 v1, s5
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 14
+; SI-NEXT:    v_cndmask_b32_e32 v1, 5, v1, vcc
+; SI-NEXT:    s_movk_i32 s5, 0xff
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 8, v0
+; SI-NEXT:    v_and_b32_e32 v1, s5, v1
+; SI-NEXT:    s_lshr_b32 s6, s11, 8
+; SI-NEXT:    v_or_b32_e32 v0, v1, v0
+; SI-NEXT:    v_mov_b32_e32 v1, s6
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 13
+; SI-NEXT:    v_cndmask_b32_e32 v1, 5, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v2, s11
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 12
+; SI-NEXT:    v_cndmask_b32_e32 v2, 5, v2, vcc
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 8, v1
+; SI-NEXT:    v_and_b32_e32 v2, s5, v2
+; SI-NEXT:    v_or_b32_e32 v1, v2, v1
+; SI-NEXT:    s_mov_b32 s6, 0xffff
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
+; SI-NEXT:    v_and_b32_e32 v1, s6, v1
+; SI-NEXT:    s_lshr_b32 s7, s10, 24
+; SI-NEXT:    v_or_b32_e32 v3, v1, v0
+; SI-NEXT:    v_mov_b32_e32 v0, s7
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 11
+; SI-NEXT:    s_lshr_b32 s7, s10, 16
+; SI-NEXT:    v_cndmask_b32_e32 v0, 5, v0, vcc
+; SI-NEXT:    v_mov_b32_e32 v1, s7
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 10
+; SI-NEXT:    v_cndmask_b32_e32 v1, 5, v1, vcc
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 8, v0
+; SI-NEXT:    v_and_b32_e32 v1, s5, v1
+; SI-NEXT:    s_lshr_b32 s7, s10, 8
+; SI-NEXT:    v_or_b32_e32 v0, v1, v0
+; SI-NEXT:    v_mov_b32_e32 v1, s7
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 9
+; SI-NEXT:    v_cndmask_b32_e32 v1, 5, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v2, s10
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 8
+; SI-NEXT:    v_cndmask_b32_e32 v2, 5, v2, vcc
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 8, v1
+; SI-NEXT:    v_and_b32_e32 v2, s5, v2
+; SI-NEXT:    v_or_b32_e32 v1, v2, v1
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
+; SI-NEXT:    v_and_b32_e32 v1, s6, v1
+; SI-NEXT:    s_lshr_b32 s7, s9, 24
+; SI-NEXT:    v_or_b32_e32 v2, v1, v0
+; SI-NEXT:    v_mov_b32_e32 v0, s7
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 7
+; SI-NEXT:    s_lshr_b32 s7, s9, 16
+; SI-NEXT:    v_cndmask_b32_e32 v0, 5, v0, vcc
+; SI-NEXT:    v_mov_b32_e32 v1, s7
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 6
+; SI-NEXT:    v_cndmask_b32_e32 v1, 5, v1, vcc
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 8, v0
+; SI-NEXT:    v_and_b32_e32 v1, s5, v1
+; SI-NEXT:    s_lshr_b32 s7, s9, 8
+; SI-NEXT:    v_or_b32_e32 v0, v1, v0
+; SI-NEXT:    v_mov_b32_e32 v1, s7
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 5
+; SI-NEXT:    v_cndmask_b32_e32 v1, 5, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v4, s9
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 4
+; SI-NEXT:    v_cndmask_b32_e32 v4, 5, v4, vcc
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 8, v1
+; SI-NEXT:    v_and_b32_e32 v4, s5, v4
+; SI-NEXT:    v_or_b32_e32 v1, v4, v1
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
+; SI-NEXT:    v_and_b32_e32 v1, s6, v1
+; SI-NEXT:    s_lshr_b32 s7, s8, 24
+; SI-NEXT:    v_or_b32_e32 v1, v1, v0
+; SI-NEXT:    v_mov_b32_e32 v0, s7
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 3
+; SI-NEXT:    s_lshr_b32 s7, s8, 16
+; SI-NEXT:    v_cndmask_b32_e32 v0, 5, v0, vcc
+; SI-NEXT:    v_mov_b32_e32 v4, s7
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 2
+; SI-NEXT:    v_cndmask_b32_e32 v4, 5, v4, vcc
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 8, v0
+; SI-NEXT:    v_and_b32_e32 v4, s5, v4
+; SI-NEXT:    s_lshr_b32 s7, s8, 8
+; SI-NEXT:    v_or_b32_e32 v0, v4, v0
+; SI-NEXT:    v_mov_b32_e32 v4, s7
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 1
+; SI-NEXT:    v_cndmask_b32_e32 v4, 5, v4, vcc
+; SI-NEXT:    v_mov_b32_e32 v5, s8
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 0
+; SI-NEXT:    v_cndmask_b32_e32 v5, 5, v5, vcc
+; SI-NEXT:    v_lshlrev_b32_e32 v4, 8, v4
+; SI-NEXT:    v_and_b32_e32 v5, s5, v5
+; SI-NEXT:    v_or_b32_e32 v4, v5, v4
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
+; SI-NEXT:    v_and_b32_e32 v4, s6, v4
+; SI-NEXT:    v_or_b32_e32 v0, v4, v0
+; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: dynamic_insertelement_v16i8:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; VI-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x10
+; VI-NEXT:    s_load_dword s4, s[4:5], 0x20
+; VI-NEXT:    s_mov_b32 s3, 0x1100f000
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_lshr_b32 s5, s11, 24
+; VI-NEXT:    v_mov_b32_e32 v0, s5
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 15
+; VI-NEXT:    s_lshr_b32 s5, s11, 16
+; VI-NEXT:    v_cndmask_b32_e32 v0, 5, v0, vcc
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 14
+; VI-NEXT:    v_cndmask_b32_e32 v1, 5, v1, vcc
+; VI-NEXT:    v_lshlrev_b16_e32 v0, 8, v0
+; VI-NEXT:    s_lshr_b32 s5, s11, 8
+; VI-NEXT:    v_or_b32_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 13
+; VI-NEXT:    v_cndmask_b32_e32 v1, 5, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v2, s11
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 12
+; VI-NEXT:    v_cndmask_b32_e32 v2, 5, v2, vcc
+; VI-NEXT:    v_lshlrev_b16_e32 v1, 8, v1
+; VI-NEXT:    v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT:    s_lshr_b32 s5, s10, 24
+; VI-NEXT:    v_or_b32_sdwa v3, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT:    v_mov_b32_e32 v0, s5
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 11
+; VI-NEXT:    s_lshr_b32 s5, s10, 16
+; VI-NEXT:    v_cndmask_b32_e32 v0, 5, v0, vcc
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 10
+; VI-NEXT:    v_cndmask_b32_e32 v1, 5, v1, vcc
+; VI-NEXT:    v_lshlrev_b16_e32 v0, 8, v0
+; VI-NEXT:    s_lshr_b32 s5, s10, 8
+; VI-NEXT:    v_or_b32_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 9
+; VI-NEXT:    v_cndmask_b32_e32 v1, 5, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v2, s10
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 8
+; VI-NEXT:    v_cndmask_b32_e32 v2, 5, v2, vcc
+; VI-NEXT:    v_lshlrev_b16_e32 v1, 8, v1
+; VI-NEXT:    v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT:    s_lshr_b32 s5, s9, 24
+; VI-NEXT:    v_or_b32_sdwa v2, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT:    v_mov_b32_e32 v0, s5
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 7
+; VI-NEXT:    s_lshr_b32 s5, s9, 16
+; VI-NEXT:    v_cndmask_b32_e32 v0, 5, v0, vcc
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 6
+; VI-NEXT:    v_cndmask_b32_e32 v1, 5, v1, vcc
+; VI-NEXT:    v_lshlrev_b16_e32 v0, 8, v0
+; VI-NEXT:    s_lshr_b32 s5, s9, 8
+; VI-NEXT:    v_or_b32_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 5
+; VI-NEXT:    v_cndmask_b32_e32 v1, 5, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v4, s9
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 4
+; VI-NEXT:    v_cndmask_b32_e32 v4, 5, v4, vcc
+; VI-NEXT:    v_lshlrev_b16_e32 v1, 8, v1
+; VI-NEXT:    v_or_b32_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT:    s_lshr_b32 s5, s8, 24
+; VI-NEXT:    v_or_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT:    v_mov_b32_e32 v0, s5
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 3
+; VI-NEXT:    s_lshr_b32 s5, s8, 16
+; VI-NEXT:    v_cndmask_b32_e32 v0, 5, v0, vcc
+; VI-NEXT:    v_mov_b32_e32 v4, s5
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 2
+; VI-NEXT:    v_cndmask_b32_e32 v4, 5, v4, vcc
+; VI-NEXT:    v_lshlrev_b16_e32 v0, 8, v0
+; VI-NEXT:    s_lshr_b32 s5, s8, 8
+; VI-NEXT:    v_or_b32_sdwa v0, v4, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT:    v_mov_b32_e32 v4, s5
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 1
+; VI-NEXT:    v_cndmask_b32_e32 v4, 5, v4, vcc
+; VI-NEXT:    v_mov_b32_e32 v5, s8
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 0
+; VI-NEXT:    v_lshlrev_b16_e32 v4, 8, v4
+; VI-NEXT:    v_cndmask_b32_e32 v5, 5, v5, vcc
+; VI-NEXT:    v_or_b32_sdwa v4, v5, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT:    v_or_b32_sdwa v0, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; VI-NEXT:    s_endpgm
   %vecins = insertelement <16 x i8> %a, i8 5, i32 %b
   store <16 x i8> %vecins, <16 x i8> addrspace(1)* %out, align 16
   ret void
@@ -339,8 +1332,48 @@ define amdgpu_kernel void @dynamic_inser
 
 ; This test requires handling INSERT_SUBREG in SIFixSGPRCopies.  Check that
 ; the compiler doesn't crash.
-; GCN-LABEL: {{^}}insert_split_bb:
 define amdgpu_kernel void @insert_split_bb(<2 x i32> addrspace(1)* %out, i32 addrspace(1)* %in, i32 %a, i32 %b) {
+; SI-LABEL: insert_split_bb:
+; SI:       ; %bb.0: ; %entry
+; SI-NEXT:    s_load_dword s0, s[4:5], 0x4
+; SI-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_cmp_lg_u32 s0, 0
+; SI-NEXT:    s_cbranch_scc0 BB26_2
+; SI-NEXT:  ; %bb.1: ; %else
+; SI-NEXT:    s_load_dword s1, s[6:7], 0x1
+; SI-NEXT:    s_branch BB26_3
+; SI-NEXT:  BB26_2: ; %if
+; SI-NEXT:    s_load_dword s1, s[6:7], 0x0
+; SI-NEXT:  BB26_3: ; %endif
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    s_mov_b32 s7, 0x100f000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: insert_split_bb:
+; VI:       ; %bb.0: ; %entry
+; VI-NEXT:    s_load_dword s0, s[4:5], 0x10
+; VI-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_cmp_lg_u32 s0, 0
+; VI-NEXT:    s_cbranch_scc0 BB26_2
+; VI-NEXT:  ; %bb.1: ; %else
+; VI-NEXT:    s_load_dword s1, s[6:7], 0x4
+; VI-NEXT:    s_branch BB26_3
+; VI-NEXT:  BB26_2: ; %if
+; VI-NEXT:    s_load_dword s1, s[6:7], 0x0
+; VI-NEXT:  BB26_3: ; %endif
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    s_mov_b32 s7, 0x1100f000
+; VI-NEXT:    s_mov_b32 s6, -1
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; VI-NEXT:    s_endpgm
 entry:
   %0 = insertelement <2 x i32> undef, i32 %a, i32 0
   %1 = icmp eq i32 %a, 0
@@ -363,111 +1396,324 @@ endif:
   ret void
 }
 
-; GCN-LABEL: {{^}}dynamic_insertelement_v2f64:
-; GCN-DAG: s_load_dwordx4 s{{\[}}[[A_ELT0:[0-9]+]]:[[A_ELT3:[0-9]+]]{{\]}}
-; GCN-DAG: s_load_dword [[IDX:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, {{0x18|0x60}}{{$}}
-
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
-; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
-; GCN-DAG: v_mov_b32_e32 [[ELT1:v[0-9]+]], 0x40200000
-
-; GCN-DAG: v_cmp_eq_u32_e64 [[CC2:[^,]+]], [[IDX:s[0-9]+]], 1
-; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, [[ELT1]], [[CC2]]
-; GCN-DAG: v_cndmask_b32_e64 v{{[0-9]+}}, v{{[0-9]+}}, 0, [[CC2]]
-; GCN-DAG: v_cmp_eq_u32_e64 [[CC1:[^,]+]], [[IDX]], 0
-; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, [[ELT1]], [[CC1]]
-; GCN-DAG: v_cndmask_b32_e64 v{{[0-9]+}}, v{{[0-9]+}}, 0, [[CC1]]
-
-; GCN: buffer_store_dwordx4
-; GCN: s_endpgm
 define amdgpu_kernel void @dynamic_insertelement_v2f64(<2 x double> addrspace(1)* %out, [8 x i32], <2 x double> %a, [8 x i32], i32 %b) nounwind {
+; SI-LABEL: dynamic_insertelement_v2f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; SI-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0xc
+; SI-NEXT:    s_load_dword s4, s[4:5], 0x18
+; SI-NEXT:    v_mov_b32_e32 v1, 0x40200000
+; SI-NEXT:    s_mov_b32 s3, 0x100f000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s11
+; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s4, 1
+; SI-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s10
+; SI-NEXT:    v_cndmask_b32_e64 v2, v0, 0, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s9
+; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s4, 0
+; SI-NEXT:    v_cndmask_b32_e32 v1, v0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s8
+; SI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: dynamic_insertelement_v2f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; VI-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x30
+; VI-NEXT:    s_load_dword s4, s[4:5], 0x60
+; VI-NEXT:    v_mov_b32_e32 v1, 0x40200000
+; VI-NEXT:    s_mov_b32 s3, 0x1100f000
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s11
+; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s4, 1
+; VI-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v0, s10
+; VI-NEXT:    v_cndmask_b32_e64 v2, v0, 0, vcc
+; VI-NEXT:    v_mov_b32_e32 v0, s9
+; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s4, 0
+; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v0, s8
+; VI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; VI-NEXT:    s_endpgm
   %vecins = insertelement <2 x double> %a, double 8.0, i32 %b
   store <2 x double> %vecins, <2 x double> addrspace(1)* %out, align 16
   ret void
 }
 
-; GCN-LABEL: {{^}}dynamic_insertelement_v2i64:
-
-; GCN-DAG: v_cmp_eq_u32_e64 [[CC2:[^,]+]], [[IDX:s[0-9]+]], 1
-; GCN-DAG: v_cndmask_b32_e64 v{{[0-9]+}}, v{{[0-9]+}}, 5, [[CC2]]
-; GCN-DAG: v_cndmask_b32_e64 v{{[0-9]+}}, v{{[0-9]+}}, 0, [[CC2]]
-; GCN-DAG: v_cmp_eq_u32_e64 [[CC1:[^,]+]], [[IDX]], 0
-; GCN-DAG: v_cndmask_b32_e64 v{{[0-9]+}}, v{{[0-9]+}}, 5, [[CC1]]
-; GCN-DAG: v_cndmask_b32_e64 v{{[0-9]+}}, v{{[0-9]+}}, 0, [[CC1]]
-
-; GCN: buffer_store_dwordx4
-; GCN: s_endpgm
 define amdgpu_kernel void @dynamic_insertelement_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> %a, i32 %b) nounwind {
+; SI-LABEL: dynamic_insertelement_v2i64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; SI-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x4
+; SI-NEXT:    s_load_dword s6, s[4:5], 0x8
+; SI-NEXT:    s_mov_b32 s3, 0x100f000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s11
+; SI-NEXT:    v_cmp_eq_u32_e64 s[4:5], s6, 1
+; SI-NEXT:    v_cndmask_b32_e64 v3, v0, 0, s[4:5]
+; SI-NEXT:    v_mov_b32_e32 v0, s10
+; SI-NEXT:    v_cndmask_b32_e64 v2, v0, 5, s[4:5]
+; SI-NEXT:    v_mov_b32_e32 v0, s9
+; SI-NEXT:    v_cmp_eq_u32_e64 s[4:5], s6, 0
+; SI-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[4:5]
+; SI-NEXT:    v_mov_b32_e32 v0, s8
+; SI-NEXT:    v_cndmask_b32_e64 v0, v0, 5, s[4:5]
+; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: dynamic_insertelement_v2i64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; VI-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x10
+; VI-NEXT:    s_load_dword s6, s[4:5], 0x20
+; VI-NEXT:    s_mov_b32 s3, 0x1100f000
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s11
+; VI-NEXT:    v_cmp_eq_u32_e64 s[4:5], s6, 1
+; VI-NEXT:    v_cndmask_b32_e64 v3, v0, 0, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v0, s10
+; VI-NEXT:    v_cndmask_b32_e64 v2, v0, 5, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v0, s9
+; VI-NEXT:    v_cmp_eq_u32_e64 s[4:5], s6, 0
+; VI-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v0, s8
+; VI-NEXT:    v_cndmask_b32_e64 v0, v0, 5, s[4:5]
+; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; VI-NEXT:    s_endpgm
   %vecins = insertelement <2 x i64> %a, i64 5, i32 %b
   store <2 x i64> %vecins, <2 x i64> addrspace(1)* %out, align 8
   ret void
 }
 
-; GCN-LABEL: {{^}}dynamic_insertelement_v3i64:
-; GCN-DAG: v_cmp_eq_u32_e64 [[CC3:[^,]+]], [[IDX:s[0-9]+]], 2
-; GCN-DAG: v_cndmask_b32_e64 v{{[0-9]+}},  v{{[0-9]+}}, 5, [[CC3]]
-; GCN-DAG: v_cndmask_b32_e64 v{{[0-9]+}},  v{{[0-9]+}}, 0, [[CC3]]
-; GCN-DAG: v_cmp_eq_u32_e64 [[CC2:[^,]+]], [[IDX]], 1
-; GCN-DAG: v_cndmask_b32_e64 v{{[0-9]+}}, v{{[0-9]+}}, 5, [[CC2]]
-; GCN-DAG: v_cndmask_b32_e64 v{{[0-9]+}}, v{{[0-9]+}}, 0, [[CC2]]
-; GCN-DAG: v_cmp_eq_u32_e64 [[CC1:[^,]+]], [[IDX]], 0
-; GCN-DAG: v_cndmask_b32_e64 v{{[0-9]+}}, v{{[0-9]+}}, 5, [[CC1]]
-; GCN-DAG: v_cndmask_b32_e64 v{{[0-9]+}}, v{{[0-9]+}}, 0, [[CC1]]
 define amdgpu_kernel void @dynamic_insertelement_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64> %a, i32 %b) nounwind {
+; SI-LABEL: dynamic_insertelement_v3i64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; SI-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x8
+; SI-NEXT:    s_load_dword s6, s[4:5], 0x10
+; SI-NEXT:    s_mov_b32 s3, 0x100f000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s13
+; SI-NEXT:    v_cmp_eq_u32_e64 s[4:5], s6, 2
+; SI-NEXT:    v_cndmask_b32_e64 v5, v0, 0, s[4:5]
+; SI-NEXT:    v_mov_b32_e32 v0, s12
+; SI-NEXT:    v_cndmask_b32_e64 v4, v0, 5, s[4:5]
+; SI-NEXT:    v_mov_b32_e32 v0, s11
+; SI-NEXT:    v_cmp_eq_u32_e64 s[4:5], s6, 1
+; SI-NEXT:    v_cndmask_b32_e64 v3, v0, 0, s[4:5]
+; SI-NEXT:    v_mov_b32_e32 v0, s10
+; SI-NEXT:    v_cndmask_b32_e64 v2, v0, 5, s[4:5]
+; SI-NEXT:    v_mov_b32_e32 v0, s9
+; SI-NEXT:    v_cmp_eq_u32_e64 s[4:5], s6, 0
+; SI-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[4:5]
+; SI-NEXT:    v_mov_b32_e32 v0, s8
+; SI-NEXT:    v_cndmask_b32_e64 v0, v0, 5, s[4:5]
+; SI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[0:3], 0 offset:16
+; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: dynamic_insertelement_v3i64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; VI-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x20
+; VI-NEXT:    s_load_dword s6, s[4:5], 0x40
+; VI-NEXT:    s_mov_b32 s3, 0x1100f000
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s13
+; VI-NEXT:    v_cmp_eq_u32_e64 s[4:5], s6, 2
+; VI-NEXT:    v_cndmask_b32_e64 v5, v0, 0, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v0, s12
+; VI-NEXT:    v_cndmask_b32_e64 v4, v0, 5, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v0, s11
+; VI-NEXT:    v_cmp_eq_u32_e64 s[4:5], s6, 1
+; VI-NEXT:    v_cndmask_b32_e64 v3, v0, 0, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v0, s10
+; VI-NEXT:    v_cndmask_b32_e64 v2, v0, 5, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v0, s9
+; VI-NEXT:    v_cmp_eq_u32_e64 s[4:5], s6, 0
+; VI-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v0, s8
+; VI-NEXT:    v_cndmask_b32_e64 v0, v0, 5, s[4:5]
+; VI-NEXT:    buffer_store_dwordx2 v[4:5], off, s[0:3], 0 offset:16
+; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; VI-NEXT:    s_endpgm
   %vecins = insertelement <3 x i64> %a, i64 5, i32 %b
   store <3 x i64> %vecins, <3 x i64> addrspace(1)* %out, align 32
   ret void
 }
 
-; GCN-LABEL: {{^}}dynamic_insertelement_v4f64:
-
-; GCN-DAG: v_mov_b32_e32 [[CONST:v[0-9]+]], 0x40200000
-; GCN-DAG: v_cmp_eq_u32_e64 [[CC4:[^,]+]], [[IDX:s[0-9]+]], 3
-; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, [[CONST]], [[CC4]]
-; GCN-DAG: v_cndmask_b32_e64 v{{[0-9]+}}, v{{[0-9]+}}, 0, [[CC4]]
-; GCN-DAG: v_cmp_eq_u32_e64 [[CC3:[^,]+]], [[IDX]], 2
-; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}},  v{{[0-9]+}}, [[CONST]], [[CC3]]
-; GCN-DAG: v_cndmask_b32_e64 v{{[0-9]+}},  v{{[0-9]+}}, 0, [[CC3]]
-; GCN-DAG: v_cmp_eq_u32_e64 [[CC2:[^,]+]], [[IDX]], 1
-; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, [[CONST]], [[CC2]]
-; GCN-DAG: v_cndmask_b32_e64 v{{[0-9]+}}, v{{[0-9]+}}, 0, [[CC2]]
-; GCN-DAG: v_cmp_eq_u32_e64 [[CC1:[^,]+]], [[IDX]], 0
-; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, [[CONST]], [[CC1]]
-; GCN-DAG: v_cndmask_b32_e64 v{{[0-9]+}}, v{{[0-9]+}}, 0, [[CC1]]
-
-; GCN: buffer_store_dwordx4
-; GCN: buffer_store_dwordx4
-; GCN: s_endpgm
-; GCN: ScratchSize: 0
-
 define amdgpu_kernel void @dynamic_insertelement_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %a, i32 %b) nounwind {
+; SI-LABEL: dynamic_insertelement_v4f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; SI-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x8
+; SI-NEXT:    s_load_dword s4, s[4:5], 0x10
+; SI-NEXT:    v_mov_b32_e32 v4, 0x40200000
+; SI-NEXT:    s_mov_b32 s3, 0x100f000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s11
+; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s4, 1
+; SI-NEXT:    v_cndmask_b32_e32 v3, v0, v4, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s10
+; SI-NEXT:    v_cndmask_b32_e64 v2, v0, 0, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s9
+; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s4, 0
+; SI-NEXT:    v_cndmask_b32_e32 v1, v0, v4, vcc
+; SI-NEXT:    v_mov_b32_e32 v0, s8
+; SI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; SI-NEXT:    v_mov_b32_e32 v5, s15
+; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s4, 3
+; SI-NEXT:    v_cndmask_b32_e32 v7, v5, v4, vcc
+; SI-NEXT:    v_mov_b32_e32 v5, s14
+; SI-NEXT:    v_cndmask_b32_e64 v6, v5, 0, vcc
+; SI-NEXT:    v_mov_b32_e32 v5, s13
+; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s4, 2
+; SI-NEXT:    v_cndmask_b32_e32 v5, v5, v4, vcc
+; SI-NEXT:    v_mov_b32_e32 v4, s12
+; SI-NEXT:    v_cndmask_b32_e64 v4, v4, 0, vcc
+; SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
+; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: dynamic_insertelement_v4f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; VI-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x20
+; VI-NEXT:    s_load_dword s4, s[4:5], 0x40
+; VI-NEXT:    v_mov_b32_e32 v4, 0x40200000
+; VI-NEXT:    s_mov_b32 s3, 0x1100f000
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s11
+; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s4, 1
+; VI-NEXT:    v_cndmask_b32_e32 v3, v0, v4, vcc
+; VI-NEXT:    v_mov_b32_e32 v0, s10
+; VI-NEXT:    v_cndmask_b32_e64 v2, v0, 0, vcc
+; VI-NEXT:    v_mov_b32_e32 v0, s9
+; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s4, 0
+; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v4, vcc
+; VI-NEXT:    v_mov_b32_e32 v0, s8
+; VI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; VI-NEXT:    v_mov_b32_e32 v5, s15
+; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s4, 3
+; VI-NEXT:    v_cndmask_b32_e32 v7, v5, v4, vcc
+; VI-NEXT:    v_mov_b32_e32 v5, s14
+; VI-NEXT:    v_cndmask_b32_e64 v6, v5, 0, vcc
+; VI-NEXT:    v_mov_b32_e32 v5, s13
+; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s4, 2
+; VI-NEXT:    v_cndmask_b32_e32 v5, v5, v4, vcc
+; VI-NEXT:    v_mov_b32_e32 v4, s12
+; VI-NEXT:    v_cndmask_b32_e64 v4, v4, 0, vcc
+; VI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
+; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; VI-NEXT:    s_endpgm
   %vecins = insertelement <4 x double> %a, double 8.0, i32 %b
   store <4 x double> %vecins, <4 x double> addrspace(1)* %out, align 16
   ret void
 }
 
-; GCN-LABEL: {{^}}dynamic_insertelement_v8f64:
-; GCN-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s[0:3], {{s[0-9]+}} offset:64{{$}}
-; GCN-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s[0:3], {{s[0-9]+}} offset:80{{$}}
-; GCN-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s[0:3], {{s[0-9]+}} offset:96{{$}}
-; GCN-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s[0:3], {{s[0-9]+}} offset:112{{$}}
-
-; GCN: buffer_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s[0:3], {{s[0-9]+}} offen{{$}}
-
-; GCN-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s[0:3], {{s[0-9]+}} offset:64{{$}}
-; GCN-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s[0:3], {{s[0-9]+}} offset:80{{$}}
-; GCN-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s[0:3], {{s[0-9]+}} offset:96{{$}}
-; GCN-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s[0:3], {{s[0-9]+}} offset:112{{$}}
-
-; GCN: buffer_store_dwordx4
-; GCN: buffer_store_dwordx4
-; GCN: buffer_store_dwordx4
-; GCN: buffer_store_dwordx4
-; GCN: s_endpgm
-; GCN: ScratchSize: 128
 define amdgpu_kernel void @dynamic_insertelement_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %a, i32 %b) #0 {
+; SI-LABEL: dynamic_insertelement_v8f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[24:25], s[4:5], 0x0
+; SI-NEXT:    s_load_dwordx16 s[8:23], s[4:5], 0x10
+; SI-NEXT:    s_load_dword s4, s[4:5], 0x20
+; SI-NEXT:    v_mov_b32_e32 v16, 64
+; SI-NEXT:    s_mov_b32 s27, 0x100f000
+; SI-NEXT:    s_mov_b32 s26, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s8
+; SI-NEXT:    s_and_b32 s4, s4, 7
+; SI-NEXT:    s_lshl_b32 s4, s4, 3
+; SI-NEXT:    v_mov_b32_e32 v1, s9
+; SI-NEXT:    v_mov_b32_e32 v2, s10
+; SI-NEXT:    v_mov_b32_e32 v3, s11
+; SI-NEXT:    v_mov_b32_e32 v4, s12
+; SI-NEXT:    v_mov_b32_e32 v5, s13
+; SI-NEXT:    v_mov_b32_e32 v6, s14
+; SI-NEXT:    v_mov_b32_e32 v7, s15
+; SI-NEXT:    v_mov_b32_e32 v8, s16
+; SI-NEXT:    v_mov_b32_e32 v9, s17
+; SI-NEXT:    v_mov_b32_e32 v10, s18
+; SI-NEXT:    v_mov_b32_e32 v11, s19
+; SI-NEXT:    v_mov_b32_e32 v12, s20
+; SI-NEXT:    v_mov_b32_e32 v13, s21
+; SI-NEXT:    v_mov_b32_e32 v14, s22
+; SI-NEXT:    v_mov_b32_e32 v15, s23
+; SI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], s7 offset:112
+; SI-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], s7 offset:96
+; SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], s7 offset:80
+; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], s7 offset:64
+; SI-NEXT:    v_or_b32_e32 v16, s4, v16
+; SI-NEXT:    v_mov_b32_e32 v0, 0
+; SI-NEXT:    v_mov_b32_e32 v1, 0x40200000
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], v16, s[0:3], s7 offen
+; SI-NEXT:    buffer_load_dwordx4 v[0:3], off, s[0:3], s7 offset:64
+; SI-NEXT:    buffer_load_dwordx4 v[4:7], off, s[0:3], s7 offset:80
+; SI-NEXT:    buffer_load_dwordx4 v[8:11], off, s[0:3], s7 offset:96
+; SI-NEXT:    buffer_load_dwordx4 v[12:15], off, s[0:3], s7 offset:112
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[24:27], 0 offset:48
+; SI-NEXT:    buffer_store_dwordx4 v[8:11], off, s[24:27], 0 offset:32
+; SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[24:27], 0 offset:16
+; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[24:27], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: dynamic_insertelement_v8f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[24:25], s[4:5], 0x0
+; VI-NEXT:    s_load_dwordx16 s[8:23], s[4:5], 0x40
+; VI-NEXT:    s_load_dword s4, s[4:5], 0x80
+; VI-NEXT:    v_mov_b32_e32 v16, 64
+; VI-NEXT:    s_mov_b32 s27, 0x1100f000
+; VI-NEXT:    s_mov_b32 s26, -1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s8
+; VI-NEXT:    s_and_b32 s4, s4, 7
+; VI-NEXT:    s_lshl_b32 s4, s4, 3
+; VI-NEXT:    v_mov_b32_e32 v1, s9
+; VI-NEXT:    v_mov_b32_e32 v2, s10
+; VI-NEXT:    v_mov_b32_e32 v3, s11
+; VI-NEXT:    v_mov_b32_e32 v4, s12
+; VI-NEXT:    v_mov_b32_e32 v5, s13
+; VI-NEXT:    v_mov_b32_e32 v6, s14
+; VI-NEXT:    v_mov_b32_e32 v7, s15
+; VI-NEXT:    v_mov_b32_e32 v8, s16
+; VI-NEXT:    v_mov_b32_e32 v9, s17
+; VI-NEXT:    v_mov_b32_e32 v10, s18
+; VI-NEXT:    v_mov_b32_e32 v11, s19
+; VI-NEXT:    v_mov_b32_e32 v12, s20
+; VI-NEXT:    v_mov_b32_e32 v13, s21
+; VI-NEXT:    v_mov_b32_e32 v14, s22
+; VI-NEXT:    v_mov_b32_e32 v15, s23
+; VI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], s7 offset:112
+; VI-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], s7 offset:96
+; VI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], s7 offset:80
+; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], s7 offset:64
+; VI-NEXT:    v_or_b32_e32 v16, s4, v16
+; VI-NEXT:    v_mov_b32_e32 v0, 0
+; VI-NEXT:    v_mov_b32_e32 v1, 0x40200000
+; VI-NEXT:    buffer_store_dwordx2 v[0:1], v16, s[0:3], s7 offen
+; VI-NEXT:    buffer_load_dwordx4 v[0:3], off, s[0:3], s7 offset:64
+; VI-NEXT:    buffer_load_dwordx4 v[4:7], off, s[0:3], s7 offset:80
+; VI-NEXT:    buffer_load_dwordx4 v[8:11], off, s[0:3], s7 offset:96
+; VI-NEXT:    buffer_load_dwordx4 v[12:15], off, s[0:3], s7 offset:112
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[24:27], 0 offset:48
+; VI-NEXT:    buffer_store_dwordx4 v[8:11], off, s[24:27], 0 offset:32
+; VI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[24:27], 0 offset:16
+; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[24:27], 0
+; VI-NEXT:    s_endpgm
   %vecins = insertelement <8 x double> %a, double 8.0, i32 %b
   store <8 x double> %vecins, <8 x double> addrspace(1)* %out, align 16
   ret void

Modified: llvm/trunk/test/CodeGen/AMDGPU/scalar_to_vector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/scalar_to_vector.ll?rev=367175&r1=367174&r2=367175&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/scalar_to_vector.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/scalar_to_vector.ll Sat Jul 27 07:32:23 2019
@@ -1,15 +1,50 @@
-; RUN:  llc -amdgpu-scalarize-global-loads=false  -march=amdgcn -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN:  llc -amdgpu-scalarize-global-loads=false  -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -amdgpu-scalarize-global-loads=false  -march=amdgcn -mattr=-flat-for-global -verify-machineinstrs | FileCheck %s -check-prefixes=GCN,SI
+; RUN: llc < %s -amdgpu-scalarize-global-loads=false  -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs | FileCheck %s -check-prefixes=GCN,VI
 
 ; XXX - Why the packing?
-; GCN-LABEL: {{^}}scalar_to_vector_v2i32:
-; GCN: buffer_load_dword [[VAL:v[0-9]+]],
-; GCN: v_lshrrev_b32_e32 [[SHR:v[0-9]+]], 16, [[VAL]]
-; GCN: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], 16, [[SHR]]
-; GCN: v_or_b32_e32 v[[OR:[0-9]+]], [[SHR]], [[SHL]]
-; GCN: v_mov_b32_e32 v[[COPY:[0-9]+]], v[[OR]]
-; GCN: buffer_store_dwordx2 v{{\[}}[[OR]]:[[COPY]]{{\]}}
 define amdgpu_kernel void @scalar_to_vector_v2i32(<4 x i16> addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+; SI-LABEL: scalar_to_vector_v2i32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_mov_b32 s10, s6
+; SI-NEXT:    s_mov_b32 s11, s7
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s8, s2
+; SI-NEXT:    s_mov_b32 s9, s3
+; SI-NEXT:    buffer_load_dword v0, off, s[8:11], 0
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v0
+; SI-NEXT:    v_or_b32_e32 v0, v0, v1
+; SI-NEXT:    s_mov_b32 s4, s0
+; SI-NEXT:    s_mov_b32 s5, s1
+; SI-NEXT:    v_mov_b32_e32 v1, v0
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: scalar_to_vector_v2i32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; VI-NEXT:    s_mov_b32 s3, 0xf000
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_mov_b32 s0, s4
+; VI-NEXT:    s_mov_b32 s1, s5
+; VI-NEXT:    s_mov_b32 s4, s6
+; VI-NEXT:    s_mov_b32 s5, s7
+; VI-NEXT:    s_mov_b32 s6, s2
+; VI-NEXT:    s_mov_b32 s7, s3
+; VI-NEXT:    buffer_load_dword v0, off, s[4:7], 0
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 16, v0
+; VI-NEXT:    v_or_b32_e32 v0, v0, v1
+; VI-NEXT:    v_mov_b32_e32 v1, v0
+; VI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; VI-NEXT:    s_endpgm
   %tmp1 = load i32, i32 addrspace(1)* %in, align 4
   %bc = bitcast i32 %tmp1 to <2 x i16>
   %tmp2 = shufflevector <2 x i16> %bc, <2 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -17,11 +52,48 @@ define amdgpu_kernel void @scalar_to_vec
   ret void
 }
 
-; GCN-LABEL: {{^}}scalar_to_vector_v2f32:
-; GCN: buffer_load_dword [[VAL:v[0-9]+]],
-; GCN: v_lshrrev_b32_e32 [[RESULT:v[0-9]+]], 16, [[VAL]]
-; GCN: buffer_store_dwordx2
 define amdgpu_kernel void @scalar_to_vector_v2f32(<4 x i16> addrspace(1)* %out, float addrspace(1)* %in) nounwind {
+; SI-LABEL: scalar_to_vector_v2f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_mov_b32 s10, s6
+; SI-NEXT:    s_mov_b32 s11, s7
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s8, s2
+; SI-NEXT:    s_mov_b32 s9, s3
+; SI-NEXT:    buffer_load_dword v0, off, s[8:11], 0
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v0
+; SI-NEXT:    v_or_b32_e32 v0, v0, v1
+; SI-NEXT:    s_mov_b32 s4, s0
+; SI-NEXT:    s_mov_b32 s5, s1
+; SI-NEXT:    v_mov_b32_e32 v1, v0
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: scalar_to_vector_v2f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; VI-NEXT:    s_mov_b32 s3, 0xf000
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_mov_b32 s0, s4
+; VI-NEXT:    s_mov_b32 s1, s5
+; VI-NEXT:    s_mov_b32 s4, s6
+; VI-NEXT:    s_mov_b32 s5, s7
+; VI-NEXT:    s_mov_b32 s6, s2
+; VI-NEXT:    s_mov_b32 s7, s3
+; VI-NEXT:    buffer_load_dword v0, off, s[4:7], 0
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 16, v0
+; VI-NEXT:    v_or_b32_e32 v0, v0, v1
+; VI-NEXT:    v_mov_b32_e32 v1, v0
+; VI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; VI-NEXT:    s_endpgm
   %tmp1 = load float, float addrspace(1)* %in, align 4
   %bc = bitcast float %tmp1 to <2 x i16>
   %tmp2 = shufflevector <2 x i16> %bc, <2 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -29,14 +101,40 @@ define amdgpu_kernel void @scalar_to_vec
   ret void
 }
 
-; GCN-LABEL: {{^}}scalar_to_vector_v4i16:
-; VI: v_lshlrev_b16_e32
-; VI: v_lshlrev_b16_e32
-; VI: v_or_b32_e32
-; VI: v_lshlrev_b32
-; VI: v_or_b32_sdwa
-; VI: v_or_b32_sdwa
 define amdgpu_kernel void @scalar_to_vector_v4i16() {
+; SI-LABEL: scalar_to_vector_v4i16:
+; SI:       ; %bb.0: ; %bb
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    buffer_load_ubyte v0, off, s[0:3], 0
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 8, v0
+; SI-NEXT:    v_or_b32_e32 v0, v1, v0
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 8, v0
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 8, v1
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v1
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_or_b32_e32 v0, v0, v2
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: scalar_to_vector_v4i16:
+; VI:       ; %bb.0: ; %bb
+; VI-NEXT:    s_mov_b32 s3, 0xf000
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    buffer_load_ubyte v0, off, s[0:3], 0
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_lshlrev_b16_e32 v1, 8, v0
+; VI-NEXT:    v_or_b32_e32 v0, v1, v0
+; VI-NEXT:    v_lshrrev_b16_e32 v1, 8, v0
+; VI-NEXT:    v_lshlrev_b16_e32 v2, 8, v1
+; VI-NEXT:    v_or_b32_e32 v1, v1, v2
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 16, v1
+; VI-NEXT:    v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT:    v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; VI-NEXT:    s_endpgm
 bb:
   %tmp = load <2 x i8>, <2 x i8> addrspace(1)* undef, align 1
   %tmp1 = shufflevector <2 x i8> %tmp, <2 x i8> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
@@ -45,14 +143,40 @@ bb:
   ret void
 }
 
-; GCN-LABEL: {{^}}scalar_to_vector_v4f16:
-; VI: v_lshlrev_b16_e32
-; VI: v_lshlrev_b16_e32
-; VI: v_or_b32_e32
-; VI: v_lshlrev_b32
-; VI: v_or_b32_sdwa
-; VI: v_or_b32_sdwa
 define amdgpu_kernel void @scalar_to_vector_v4f16() {
+; SI-LABEL: scalar_to_vector_v4f16:
+; SI:       ; %bb.0: ; %bb
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    buffer_load_ubyte v0, off, s[0:3], 0
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 8, v0
+; SI-NEXT:    v_or_b32_e32 v0, v1, v0
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 8, v0
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 8, v1
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v1
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_or_b32_e32 v0, v0, v2
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: scalar_to_vector_v4f16:
+; VI:       ; %bb.0: ; %bb
+; VI-NEXT:    s_mov_b32 s3, 0xf000
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    buffer_load_ubyte v0, off, s[0:3], 0
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_lshlrev_b16_e32 v1, 8, v0
+; VI-NEXT:    v_or_b32_e32 v0, v1, v0
+; VI-NEXT:    v_lshrrev_b16_e32 v1, 8, v0
+; VI-NEXT:    v_lshlrev_b16_e32 v2, 8, v1
+; VI-NEXT:    v_or_b32_e32 v1, v1, v2
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 16, v1
+; VI-NEXT:    v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT:    v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; VI-NEXT:    s_endpgm
 bb:
   %load = load half, half addrspace(1)* undef, align 1
   %tmp = bitcast half %load to <2 x i8>
@@ -101,6 +225,29 @@ bb:
 ; }
 
 define amdgpu_kernel void @scalar_to_vector_test6(<2 x half> addrspace(1)* %out, i8 zeroext %val) nounwind {
+; SI-LABEL: scalar_to_vector_test6:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dword s2, s[0:1], 0xb
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_and_b32 s0, s2, 0xff
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: scalar_to_vector_test6:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
+; VI-NEXT:    s_load_dword s0, s[0:1], 0x2c
+; VI-NEXT:    s_mov_b32 s7, 0xf000
+; VI-NEXT:    s_mov_b32 s6, -1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_and_b32 s0, s0, 0xff
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
+; VI-NEXT:    s_endpgm
   %newvec0 = insertelement <4 x i8> undef, i8 %val, i32 0
   %bc = bitcast <4 x i8> %newvec0 to <2 x half>
   store <2 x half> %bc, <2 x half> addrspace(1)* %out

Modified: llvm/trunk/test/CodeGen/AMDGPU/trunc-combine.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/trunc-combine.ll?rev=367175&r1=367174&r2=367175&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/trunc-combine.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/trunc-combine.ll Sat Jul 27 07:32:23 2019
@@ -1,33 +1,48 @@
-; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs< %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs< %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -march=amdgcn -mcpu=tahiti -verify-machineinstrs | FileCheck %s -enable-var-scope -check-prefixes=GCN,SI
+; RUN: llc < %s -march=amdgcn -mcpu=fiji -verify-machineinstrs | FileCheck %s -enable-var-scope -check-prefixes=GCN,VI
 
 ; Make sure high constant 0 isn't pointlessly materialized
-; GCN-LABEL: {{^}}trunc_bitcast_i64_lshr_32_i16:
-; GCN: s_waitcnt
-; GCN-NEXT: v_mov_b32_e32 v0, v1
-; GCN-NEXT: s_setpc_b64
 define i16 @trunc_bitcast_i64_lshr_32_i16(i64 %bar) {
+; GCN-LABEL: trunc_bitcast_i64_lshr_32_i16:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v0, v1
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %srl = lshr i64 %bar, 32
   %trunc = trunc i64 %srl to i16
   ret i16 %trunc
 }
 
-; GCN-LABEL: {{^}}trunc_bitcast_i64_lshr_32_i32:
-; GCN: s_waitcnt
-; GCN-NEXT: v_mov_b32_e32 v0, v1
-; GCN-NEXT: s_setpc_b64
 define i32 @trunc_bitcast_i64_lshr_32_i32(i64 %bar) {
+; GCN-LABEL: trunc_bitcast_i64_lshr_32_i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v0, v1
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %srl = lshr i64 %bar, 32
   %trunc = trunc i64 %srl to i32
   ret i32 %trunc
 }
 
-; GCN-LABEL: {{^}}trunc_bitcast_v2i32_to_i16:
-; GCN: _load_dword
-; GCN-NOT: _load_dword
-; GCN-NOT: v_mov_b32
-; VI: v_add_u16_e32 v0, 4, v0
 define i16 @trunc_bitcast_v2i32_to_i16(<2 x i32> %bar) {
+; SI-LABEL: trunc_bitcast_v2i32_to_i16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dword v0, off, s[4:7], 0
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_add_i32_e32 v0, vcc, 4, v0
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: trunc_bitcast_v2i32_to_i16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_load_dword v0, v[0:1]
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u16_e32 v0, 4, v0
+; VI-NEXT:    s_setpc_b64 s[30:31]
   %load0 = load i32, i32 addrspace(1)* undef
   %load1 = load i32, i32 addrspace(1)* null
   %insert.0 = insertelement <2 x i32> undef, i32 %load0, i32 0
@@ -39,12 +54,24 @@ define i16 @trunc_bitcast_v2i32_to_i16(<
 }
 
 ; Make sure there's no crash if the source vector type is FP
-; GCN-LABEL: {{^}}trunc_bitcast_v2f32_to_i16:
-; GCN: _load_dword
-; GCN-NOT: _load_dword
-; GCN-NOT: v_mov_b32
-; VI: v_add_u16_e32 v0, 4, v0
 define i16 @trunc_bitcast_v2f32_to_i16(<2 x float> %bar) {
+; SI-LABEL: trunc_bitcast_v2f32_to_i16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    buffer_load_dword v0, off, s[4:7], 0
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_add_i32_e32 v0, vcc, 4, v0
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: trunc_bitcast_v2f32_to_i16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    flat_load_dword v0, v[0:1]
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_add_u16_e32 v0, 4, v0
+; VI-NEXT:    s_setpc_b64 s[30:31]
   %load0 = load float, float addrspace(1)* undef
   %load1 = load float, float addrspace(1)* null
   %insert.0 = insertelement <2 x float> undef, float %load0, i32 0
@@ -55,14 +82,42 @@ define i16 @trunc_bitcast_v2f32_to_i16(<
   ret i16 %add
 }
 
-; GCN-LABEL: {{^}}truncate_high_elt_extract_vector:
-; GCN: s_load_dword s
-; GCN: s_load_dword s
-; GCN: s_sext_i32_i16
-; GCN: s_sext_i32_i16
-; GCN: v_mul_i32_i24
-; GCN: v_lshrrev_b32_e32
 define amdgpu_kernel void @truncate_high_elt_extract_vector(<2 x i16> addrspace(1)* nocapture readonly %arg, <2 x i16> addrspace(1)* nocapture readonly %arg1, <2 x i16> addrspace(1)* nocapture %arg2) local_unnamed_addr {
+; SI-LABEL: truncate_high_elt_extract_vector:
+; SI:       ; %bb.0: ; %bb
+; SI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0xd
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_load_dword s4, s[4:5], 0x0
+; SI-NEXT:    s_load_dword s5, s[6:7], 0x0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_sext_i32_i16 s4, s4
+; SI-NEXT:    s_sext_i32_i16 s5, s5
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    v_mul_i32_i24_e32 v0, s5, v0
+; SI-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: truncate_high_elt_extract_vector:
+; VI:       ; %bb.0: ; %bb
+; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_load_dword s2, s[4:5], 0x0
+; VI-NEXT:    s_load_dword s3, s[6:7], 0x0
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_sext_i32_i16 s0, s2
+; VI-NEXT:    s_sext_i32_i16 s1, s3
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    v_mul_i32_i24_e32 v2, s1, v2
+; VI-NEXT:    v_lshrrev_b32_e32 v2, 16, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
 bb:
   %tmp = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %arg, i64 undef
   %tmp3 = load <2 x i16>, <2 x i16> addrspace(1)* %tmp, align 4
@@ -82,17 +137,22 @@ bb:
   ret void
 }
 
-; GCN-LABEL: {{^}}trunc_v2i64_arg_to_v2i16:
-; GCN: v_lshlrev_b32_e32 v1, 16, v2
-
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-
-; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-
-; GCN-NEXT: s_setpc_b64
 define <2 x i16> @trunc_v2i64_arg_to_v2i16(<2 x i64> %arg0) #0 {
+; SI-LABEL: trunc_v2i64_arg_to_v2i16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v2
+; SI-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT:    v_or_b32_e32 v0, v0, v1
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: trunc_v2i64_arg_to_v2i16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 16, v2
+; VI-NEXT:    v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT:    s_setpc_b64 s[30:31]
   %trunc = trunc <2 x i64> %arg0 to <2 x i16>
   ret <2 x i16> %trunc
 }




More information about the llvm-commits mailing list